From a491e6a71a8cf862d77defd740a4ee8d65d8292a Mon Sep 17 00:00:00 2001 From: benedettadavico Date: Wed, 11 Dec 2024 10:28:47 +0100 Subject: [PATCH 01/64] update changelog for crunch --- CHANGELOG.md | 142 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 142 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bda75be82a..27ff4a5e56 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,148 @@ Post 1.0.0 release, the changelog format is based on [Keep a Changelog](https:// ## [Unreleased] +## [2024.14-crunch] (2024-12-11) + +- Merge/release/2024.14-crunch ([#5242]) +- bugfix: added explicit openapi servers to account for route prefixes ([#5237]) +- Further config score adjustments ([#5225]) +- feature: remve any filtering on node semver ([#5224]) +- Backport #5218 ([#5220]) +- Derive serialize for UserAgent (#5210) ([#5217]) +- dont consider legacy nodes for rewarded set selection ([#5215]) +- introduce UNSTABLE endpoints for returning network monitor run details ([#5214]) +- Nmv2 add debug config ([#5212]) +- nym-api NMv1 adjustments ([#5209]) +- adjusted config score penalty calculation ([#5206]) +- Fix backwards compat mac generation ([#5202]) +- merge crunch into develop ([#5199]) +- Update Security disclosure email, public key and policy ([#5195]) +- Guard storage access with cache ([#5193]) +- chore: apply 1.84 linter suggestions ([#5192]) +- improvement: make internal gateway clients use the same topology cache ([#5191]) +- Bugfix/credential proxy sequencing ([#5187]) +- Add monitor_run and testing_route indexes ([#5182]) +- Add indexes to monitor run and testing route ([#5181]) +- bugfix: fixed nym-node config migrations (again) ([#5179]) +- bugfix: use default value for verloc config when deserialising missing values ([#5177]) +- Remove peers with no allowed ip from storage ([#5175]) +- Move two minor jobs to free tier github hosted runners ([#5169]) +- Add support for DELETE to nym-http-api-client ([#5166]) +- Fix env var name ([#5165]) +- Add strum::EnumIter for TicketType ([#5164]) +- Add export_to_env to NymNetworkDetails ([#5162]) +- bugfix: correctly expose ecash-related data on nym-api ([#5155]) +- fix: validator-rewarder GH job ([#5151]) +- build(deps): bump cross-spawn from 7.0.3 to 7.0.6 in /testnet-faucet ([#5150]) +- build(deps): bump mikefarah/yq from 4.44.3 to 4.44.5 ([#5149]) +- start session collection for exit gateways ([#5148]) +- add version to clientStatsReport ([#5147]) +- update serde_json_path due to compilation issue ([#5144]) +- chore: remove standalone legacy mixnode/gateway binaries ([#5135]) +- [Product Data] Set up country reporting from vpn-client ([#5134]) +- removed ci-nym-api-tests.yml which was running outdated (and broken) tests ([#5133]) +- CI: reduce jobs running on cluster ([#5132]) +- [DOCS/operators]: Release changes v2024.13-magura & Tokenomics pages v1.0 ([#5128]) +- NS Agent auth with NS API ([#5127]) +- [Product Data] Config deserialization bug fix ([#5126]) +- bugfix: don't send empty BankMsg in ecash contract ([#5121]) +- [Product data] Data consumption with ecash ticket ([#5120]) +- feat: add GH workflow for nym-validator-rewarder ([#5119]) +- feat: add Dockerfile and add env vars for clap arguments ([#5118]) +- feature: config score ([#5117]) +- [Product Data] Add stats reporting configuration in client config ([#5115]) +- Correct IPv6 address generation ([#5113]) +- feature: rewarding for ticketbook issuance ([#5112]) +- Add granular log on nym-node ([#5111]) +- Send mixnet packet stats using task client ([#5109]) +- Expose time range ([#5108]) +- [Product Data] Client-side stats collection ([#5107]) +- chore: ecash contract migration to remove unused 'redemption_gateway_share' ([#5104]) +- [Product Data] Better unique user count on gateways ([#5084]) +- feat: add nym node GH workflow ([#5080]) +- IPv6 support for wireguard ([#5059]) +- Node Status API ([#5050]) +- Authenticator CLI client mode ([#5044]) +- Integrate nym-credential-proxy into workspace ([#5027]) +- [Product Data] Introduce data persistence on gateways ([#5022]) +- Bump the patch-updates group across 1 directory with 10 updates ([#5011]) +- build(deps): bump once_cell from 1.19.0 to 1.20.2 ([#4952]) +- Create TaskStatusEvent trait instead of piggybacking on Error ([#4919]) +- build(deps): bump lazy_static from 1.4.0 to 1.5.0 ([#4913]) +- Sync code with .env in build.rs ([#4876]) +- build(deps): bump axios from 1.6.0 to 1.7.5 in /nym-api/tests ([#4790]) +- Bump elliptic from 6.5.4 to 6.5.7 in /testnet-faucet ([#4768]) + +[#5242]: https://github.com/nymtech/nym/pull/5242 +[#5237]: https://github.com/nymtech/nym/pull/5237 +[#5225]: https://github.com/nymtech/nym/pull/5225 +[#5224]: https://github.com/nymtech/nym/pull/5224 +[#5220]: https://github.com/nymtech/nym/pull/5220 +[#5217]: https://github.com/nymtech/nym/pull/5217 +[#5215]: https://github.com/nymtech/nym/pull/5215 +[#5214]: https://github.com/nymtech/nym/pull/5214 +[#5212]: https://github.com/nymtech/nym/pull/5212 +[#5209]: https://github.com/nymtech/nym/pull/5209 +[#5206]: https://github.com/nymtech/nym/pull/5206 +[#5202]: https://github.com/nymtech/nym/pull/5202 +[#5199]: https://github.com/nymtech/nym/pull/5199 +[#5195]: https://github.com/nymtech/nym/pull/5195 +[#5193]: https://github.com/nymtech/nym/pull/5193 +[#5192]: https://github.com/nymtech/nym/pull/5192 +[#5191]: https://github.com/nymtech/nym/pull/5191 +[#5187]: https://github.com/nymtech/nym/pull/5187 +[#5182]: https://github.com/nymtech/nym/pull/5182 +[#5181]: https://github.com/nymtech/nym/pull/5181 +[#5179]: https://github.com/nymtech/nym/pull/5179 +[#5177]: https://github.com/nymtech/nym/pull/5177 +[#5175]: https://github.com/nymtech/nym/pull/5175 +[#5169]: https://github.com/nymtech/nym/pull/5169 +[#5166]: https://github.com/nymtech/nym/pull/5166 +[#5165]: https://github.com/nymtech/nym/pull/5165 +[#5164]: https://github.com/nymtech/nym/pull/5164 +[#5162]: https://github.com/nymtech/nym/pull/5162 +[#5155]: https://github.com/nymtech/nym/pull/5155 +[#5151]: https://github.com/nymtech/nym/pull/5151 +[#5150]: https://github.com/nymtech/nym/pull/5150 +[#5149]: https://github.com/nymtech/nym/pull/5149 +[#5148]: https://github.com/nymtech/nym/pull/5148 +[#5147]: https://github.com/nymtech/nym/pull/5147 +[#5144]: https://github.com/nymtech/nym/pull/5144 +[#5135]: https://github.com/nymtech/nym/pull/5135 +[#5134]: https://github.com/nymtech/nym/pull/5134 +[#5133]: https://github.com/nymtech/nym/pull/5133 +[#5132]: https://github.com/nymtech/nym/pull/5132 +[#5128]: https://github.com/nymtech/nym/pull/5128 +[#5127]: https://github.com/nymtech/nym/pull/5127 +[#5126]: https://github.com/nymtech/nym/pull/5126 +[#5121]: https://github.com/nymtech/nym/pull/5121 +[#5120]: https://github.com/nymtech/nym/pull/5120 +[#5119]: https://github.com/nymtech/nym/pull/5119 +[#5118]: https://github.com/nymtech/nym/pull/5118 +[#5117]: https://github.com/nymtech/nym/pull/5117 +[#5115]: https://github.com/nymtech/nym/pull/5115 +[#5113]: https://github.com/nymtech/nym/pull/5113 +[#5112]: https://github.com/nymtech/nym/pull/5112 +[#5111]: https://github.com/nymtech/nym/pull/5111 +[#5109]: https://github.com/nymtech/nym/pull/5109 +[#5108]: https://github.com/nymtech/nym/pull/5108 +[#5107]: https://github.com/nymtech/nym/pull/5107 +[#5104]: https://github.com/nymtech/nym/pull/5104 +[#5084]: https://github.com/nymtech/nym/pull/5084 +[#5080]: https://github.com/nymtech/nym/pull/5080 +[#5059]: https://github.com/nymtech/nym/pull/5059 +[#5050]: https://github.com/nymtech/nym/pull/5050 +[#5044]: https://github.com/nymtech/nym/pull/5044 +[#5027]: https://github.com/nymtech/nym/pull/5027 +[#5022]: https://github.com/nymtech/nym/pull/5022 +[#5011]: https://github.com/nymtech/nym/pull/5011 +[#4952]: https://github.com/nymtech/nym/pull/4952 +[#4919]: https://github.com/nymtech/nym/pull/4919 +[#4913]: https://github.com/nymtech/nym/pull/4913 +[#4876]: https://github.com/nymtech/nym/pull/4876 +[#4790]: https://github.com/nymtech/nym/pull/4790 +[#4768]: https://github.com/nymtech/nym/pull/4768 + ## [2024.13-magura-drift] (2024-11-29) - Optimised syncing bandwidth information to storage From c26d4f24fc367779e30a2ab9cdaf028b73cc536d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bogdan-=C8=98tefan=20Neac=C5=9Fu?= Date: Fri, 13 Dec 2024 10:38:25 +0200 Subject: [PATCH 02/64] Add conversion unit tests for auth msg (#5251) * Add conversion unit tests for auth msg * Fix remaining bad mac conversions --- common/authenticator-requests/src/lib.rs | 1 + common/authenticator-requests/src/util.rs | 71 ++ .../src/v2/registration.rs | 14 +- .../authenticator-requests/src/v2/request.rs | 2 +- .../authenticator-requests/src/v2/response.rs | 8 +- .../src/v3/conversion.rs | 576 +++++++++++++- .../src/v3/registration.rs | 14 +- .../authenticator-requests/src/v3/request.rs | 2 +- .../authenticator-requests/src/v3/response.rs | 10 +- common/authenticator-requests/src/v3/topup.rs | 2 +- .../src/v4/conversion.rs | 701 +++++++++++++++--- .../src/v4/registration.rs | 14 +- .../authenticator-requests/src/v4/request.rs | 4 +- .../authenticator-requests/src/v4/response.rs | 12 +- common/authenticator-requests/src/v4/topup.rs | 2 +- .../src/lib.rs | 2 +- .../authenticator/src/mixnet_listener.rs | 14 +- 17 files changed, 1304 insertions(+), 145 deletions(-) create mode 100644 common/authenticator-requests/src/util.rs diff --git a/common/authenticator-requests/src/lib.rs b/common/authenticator-requests/src/lib.rs index 27164417e8..ed987a9a50 100644 --- a/common/authenticator-requests/src/lib.rs +++ b/common/authenticator-requests/src/lib.rs @@ -8,6 +8,7 @@ pub mod v3; pub mod v4; mod error; +mod util; pub use error::Error; pub use v4 as latest; diff --git a/common/authenticator-requests/src/util.rs b/common/authenticator-requests/src/util.rs new file mode 100644 index 0000000000..cb1269f10c --- /dev/null +++ b/common/authenticator-requests/src/util.rs @@ -0,0 +1,71 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: Apache-2.0 + +#[cfg(test)] +pub(crate) mod tests { + pub(crate) const CREDENTIAL_BYTES: [u8; 1245] = [ + 0, 0, 4, 133, 96, 179, 223, 185, 136, 23, 213, 166, 59, 203, 66, 69, 209, 181, 227, 254, + 16, 102, 98, 237, 59, 119, 170, 111, 31, 194, 51, 59, 120, 17, 115, 229, 79, 91, 11, 139, + 154, 2, 212, 23, 68, 70, 167, 3, 240, 54, 224, 171, 221, 1, 69, 48, 60, 118, 119, 249, 123, + 35, 172, 227, 131, 96, 232, 209, 187, 123, 4, 197, 102, 90, 96, 45, 125, 135, 140, 99, 1, + 151, 17, 131, 143, 157, 97, 107, 139, 232, 212, 87, 14, 115, 253, 255, 166, 167, 186, 43, + 90, 96, 173, 105, 120, 40, 10, 163, 250, 224, 214, 200, 178, 4, 160, 16, 130, 59, 76, 193, + 39, 240, 3, 101, 141, 209, 183, 226, 186, 207, 56, 210, 187, 7, 164, 240, 164, 205, 37, 81, + 184, 214, 193, 195, 90, 205, 238, 225, 195, 104, 12, 123, 203, 57, 233, 243, 215, 145, 195, + 196, 57, 38, 125, 172, 18, 47, 63, 165, 110, 219, 180, 40, 58, 116, 92, 254, 160, 98, 48, + 92, 254, 232, 107, 184, 80, 234, 60, 160, 235, 249, 76, 41, 38, 165, 28, 40, 136, 74, 48, + 166, 50, 245, 23, 201, 140, 101, 79, 93, 235, 128, 186, 146, 126, 180, 134, 43, 13, 186, + 19, 195, 48, 168, 201, 29, 216, 95, 176, 198, 132, 188, 64, 39, 212, 150, 32, 52, 53, 38, + 228, 199, 122, 226, 217, 75, 40, 191, 151, 48, 164, 242, 177, 79, 14, 122, 105, 151, 85, + 88, 199, 162, 17, 96, 103, 83, 178, 128, 9, 24, 30, 74, 108, 241, 85, 240, 166, 97, 241, + 85, 199, 11, 198, 226, 234, 70, 107, 145, 28, 208, 114, 51, 12, 234, 108, 101, 202, 112, + 48, 185, 22, 159, 67, 109, 49, 27, 149, 90, 109, 32, 226, 112, 7, 201, 208, 209, 104, 31, + 97, 134, 204, 145, 27, 181, 206, 181, 106, 32, 110, 136, 115, 249, 201, 111, 5, 245, 203, + 71, 121, 169, 126, 151, 178, 236, 59, 221, 195, 48, 135, 115, 6, 50, 227, 74, 97, 107, 107, + 213, 90, 2, 203, 154, 138, 47, 128, 52, 134, 128, 224, 51, 65, 240, 90, 8, 55, 175, 180, + 178, 204, 206, 168, 110, 51, 57, 189, 169, 48, 169, 136, 121, 99, 51, 170, 178, 214, 74, 1, + 96, 151, 167, 25, 173, 180, 171, 155, 10, 55, 142, 234, 190, 113, 90, 79, 80, 244, 71, 166, + 30, 235, 113, 150, 133, 1, 218, 17, 109, 111, 223, 24, 216, 177, 41, 2, 204, 65, 221, 212, + 207, 236, 144, 6, 65, 224, 55, 42, 1, 1, 161, 134, 118, 127, 111, 220, 110, 127, 240, 71, + 223, 129, 12, 93, 20, 220, 60, 56, 71, 146, 184, 95, 132, 69, 28, 56, 53, 192, 213, 22, + 119, 230, 152, 225, 182, 188, 163, 219, 37, 175, 247, 73, 14, 247, 38, 72, 243, 1, 48, 131, + 59, 8, 13, 96, 143, 185, 127, 241, 161, 217, 24, 149, 193, 40, 16, 30, 202, 151, 28, 119, + 240, 153, 101, 156, 61, 193, 72, 245, 199, 181, 12, 231, 65, 166, 67, 142, 121, 207, 202, + 58, 197, 113, 188, 248, 42, 124, 105, 48, 161, 241, 55, 209, 36, 194, 27, 63, 233, 144, + 189, 85, 117, 234, 9, 139, 46, 31, 206, 114, 95, 131, 29, 240, 13, 81, 142, 140, 133, 33, + 30, 41, 141, 37, 80, 217, 95, 221, 76, 115, 86, 201, 165, 51, 252, 9, 28, 209, 1, 48, 150, + 74, 248, 212, 187, 222, 66, 210, 3, 200, 19, 217, 171, 184, 42, 148, 53, 150, 57, 50, 6, + 227, 227, 62, 49, 42, 148, 148, 157, 82, 191, 58, 24, 34, 56, 98, 120, 89, 105, 176, 85, + 15, 253, 241, 41, 153, 195, 136, 1, 48, 142, 126, 213, 101, 223, 79, 133, 230, 105, 38, + 161, 149, 2, 21, 136, 150, 42, 72, 218, 85, 146, 63, 223, 58, 108, 186, 183, 248, 62, 20, + 47, 34, 113, 160, 177, 204, 181, 16, 24, 212, 224, 35, 84, 51, 168, 56, 136, 11, 1, 48, + 135, 242, 62, 149, 230, 178, 32, 224, 119, 26, 234, 163, 237, 224, 114, 95, 112, 140, 170, + 150, 96, 125, 136, 221, 180, 78, 18, 11, 12, 184, 2, 198, 217, 119, 43, 69, 4, 172, 109, + 55, 183, 40, 131, 172, 161, 88, 183, 101, 1, 48, 173, 216, 22, 73, 42, 255, 211, 93, 249, + 87, 159, 115, 61, 91, 55, 130, 17, 216, 60, 34, 122, 55, 8, 244, 244, 153, 151, 57, 5, 144, + 178, 55, 249, 64, 211, 168, 34, 148, 56, 89, 92, 203, 70, 124, 219, 152, 253, 165, 0, 32, + 203, 116, 63, 7, 240, 222, 82, 86, 11, 149, 167, 72, 224, 55, 190, 66, 201, 65, 168, 184, + 96, 47, 194, 241, 168, 124, 7, 74, 214, 250, 37, 76, 32, 218, 69, 122, 103, 215, 145, 169, + 24, 212, 229, 168, 106, 10, 144, 31, 13, 25, 178, 242, 250, 106, 159, 40, 48, 163, 165, 61, + 130, 57, 146, 4, 73, 32, 254, 233, 125, 135, 212, 29, 111, 4, 177, 114, 15, 210, 170, 82, + 108, 110, 62, 166, 81, 209, 106, 176, 156, 14, 133, 242, 60, 127, 120, 242, 28, 97, 0, 1, + 32, 103, 93, 109, 89, 240, 91, 1, 84, 150, 50, 206, 157, 203, 49, 220, 120, 234, 175, 234, + 150, 126, 225, 94, 163, 164, 199, 138, 114, 62, 99, 106, 112, 1, 32, 171, 40, 220, 82, 241, + 203, 76, 146, 111, 139, 182, 179, 237, 182, 115, 75, 128, 201, 107, 43, 214, 0, 135, 217, + 160, 68, 150, 232, 144, 114, 237, 98, 32, 30, 134, 232, 59, 93, 163, 253, 244, 13, 202, 52, + 147, 168, 83, 121, 123, 95, 21, 210, 209, 225, 223, 143, 49, 10, 205, 238, 1, 22, 83, 81, + 70, 1, 32, 26, 76, 6, 234, 160, 50, 139, 102, 161, 232, 155, 106, 130, 171, 226, 210, 233, + 178, 85, 247, 71, 123, 55, 53, 46, 67, 148, 137, 156, 207, 208, 107, 1, 32, 102, 31, 4, 98, + 110, 156, 144, 61, 229, 140, 198, 84, 196, 238, 128, 35, 131, 182, 137, 125, 241, 95, 69, + 131, 170, 27, 2, 144, 75, 72, 242, 102, 3, 32, 121, 80, 45, 173, 56, 65, 218, 27, 40, 251, + 197, 32, 169, 104, 123, 110, 90, 78, 153, 166, 38, 9, 129, 228, 99, 8, 1, 116, 142, 233, + 162, 69, 32, 216, 169, 159, 116, 95, 12, 63, 176, 195, 6, 183, 123, 135, 75, 61, 112, 106, + 83, 235, 176, 41, 27, 248, 48, 71, 165, 170, 12, 92, 103, 103, 81, 32, 58, 74, 75, 145, + 192, 94, 153, 69, 80, 128, 241, 3, 16, 117, 192, 86, 161, 103, 44, 174, 211, 196, 182, 124, + 55, 11, 107, 142, 49, 88, 6, 41, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 0, 37, 139, 240, 0, 0, + 0, 0, 0, 0, 0, 1, + ]; + pub(crate) const RECIPIENT: &str = "CytBseW6yFXUMzz4SGAKdNLGR7q3sJLLYxyBGvutNEQV.4QXYyEVc5fUDjmmi8PrHN9tdUFV4PCvSJE1278cHyvoe@4sBbL1ngf1vtNqykydQKTFh26sQCw888GpUqvPvyNB4f"; +} diff --git a/common/authenticator-requests/src/v2/registration.rs b/common/authenticator-requests/src/v2/registration.rs index b7bf2d8430..f3aa22d749 100644 --- a/common/authenticator-requests/src/v2/registration.rs +++ b/common/authenticator-requests/src/v2/registration.rs @@ -29,7 +29,7 @@ pub type Taken = Option; pub const BANDWIDTH_CAP_PER_DAY: u64 = 1024 * 1024 * 1024; // 1 GB -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct InitMessage { /// Base64 encoded x25519 public key pub pub_key: PeerPublicKey, @@ -41,7 +41,7 @@ impl InitMessage { } } -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct FinalMessage { /// Gateway client data pub gateway_client: GatewayClient, @@ -50,28 +50,28 @@ pub struct FinalMessage { pub credential: Option, } -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct RegistrationData { pub nonce: u64, pub gateway_data: GatewayClient, pub wg_port: u16, } -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct RegistredData { pub pub_key: PeerPublicKey, pub private_ip: IpAddr, pub wg_port: u16, } -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct RemainingBandwidthData { pub available_bandwidth: i64, } /// Client that wants to register sends its PublicKey bytes mac digest encrypted with a DH shared secret. /// Gateway/Nym node can then verify pub_key payload using the same process -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct GatewayClient { /// Base64 encoded x25519 public key pub pub_key: PeerPublicKey, @@ -147,7 +147,7 @@ impl GatewayClient { // TODO: change the inner type into generic array of size HmacSha256::OutputSize // TODO2: rely on our internal crypto/hmac -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct ClientMac(Vec); impl fmt::Display for ClientMac { diff --git a/common/authenticator-requests/src/v2/request.rs b/common/authenticator-requests/src/v2/request.rs index 6943085af7..abd1e5ebff 100644 --- a/common/authenticator-requests/src/v2/request.rs +++ b/common/authenticator-requests/src/v2/request.rs @@ -87,7 +87,7 @@ impl AuthenticatorRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub enum AuthenticatorRequestData { Initial(InitMessage), Final(Box), diff --git a/common/authenticator-requests/src/v2/response.rs b/common/authenticator-requests/src/v2/response.rs index ab05dfcd35..1b389de43f 100644 --- a/common/authenticator-requests/src/v2/response.rs +++ b/common/authenticator-requests/src/v2/response.rs @@ -100,28 +100,28 @@ impl AuthenticatorResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub enum AuthenticatorResponseData { PendingRegistration(PendingRegistrationResponse), Registered(RegisteredResponse), RemainingBandwidth(RemainingBandwidthResponse), } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct PendingRegistrationResponse { pub request_id: u64, pub reply_to: Recipient, pub reply: RegistrationData, } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct RegisteredResponse { pub request_id: u64, pub reply_to: Recipient, pub reply: RegistredData, } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct RemainingBandwidthResponse { pub request_id: u64, pub reply_to: Recipient, diff --git a/common/authenticator-requests/src/v3/conversion.rs b/common/authenticator-requests/src/v3/conversion.rs index fe0699ab8b..1a55576a0d 100644 --- a/common/authenticator-requests/src/v3/conversion.rs +++ b/common/authenticator-requests/src/v3/conversion.rs @@ -19,6 +19,24 @@ impl From for v3::request::AuthenticatorReque } } +impl TryFrom for v2::request::AuthenticatorRequest { + type Error = crate::Error; + + fn try_from( + authenticator_request: v3::request::AuthenticatorRequest, + ) -> Result { + Ok(Self { + protocol: Protocol { + version: 2, + service_provider_type: ServiceProviderType::Authenticator, + }, + data: authenticator_request.data.try_into()?, + reply_to: authenticator_request.reply_to, + request_id: authenticator_request.request_id, + }) + } +} + impl From for v3::request::AuthenticatorRequestData { fn from(authenticator_request_data: v2::request::AuthenticatorRequestData) -> Self { match authenticator_request_data { @@ -35,6 +53,29 @@ impl From for v3::request::AuthenticatorR } } +impl TryFrom for v2::request::AuthenticatorRequestData { + type Error = crate::Error; + + fn try_from( + authenticator_request_data: v3::request::AuthenticatorRequestData, + ) -> Result { + match authenticator_request_data { + v3::request::AuthenticatorRequestData::Initial(init_msg) => Ok( + v2::request::AuthenticatorRequestData::Initial(init_msg.into()), + ), + v3::request::AuthenticatorRequestData::Final(gw_client) => Ok( + v2::request::AuthenticatorRequestData::Final(gw_client.into()), + ), + v3::request::AuthenticatorRequestData::QueryBandwidth(pub_key) => Ok( + v2::request::AuthenticatorRequestData::QueryBandwidth(pub_key), + ), + v3::request::AuthenticatorRequestData::TopUpBandwidth(_) => Err( + Self::Error::Conversion("no top up bandwidth variant in v2".to_string()), + ), + } + } +} + impl From for v3::registration::InitMessage { fn from(init_msg: v2::registration::InitMessage) -> Self { Self { @@ -43,6 +84,14 @@ impl From for v3::registration::InitMessage { } } +impl From for v2::registration::InitMessage { + fn from(init_msg: v3::registration::InitMessage) -> Self { + Self { + pub_key: init_msg.pub_key, + } + } +} + impl From> for Box { fn from(gw_client: Box) -> Self { Box::new(v3::registration::FinalMessage { @@ -52,6 +101,15 @@ impl From> for Box> for Box { + fn from(gw_client: Box) -> Self { + Box::new(v2::registration::FinalMessage { + gateway_client: gw_client.gateway_client.into(), + credential: gw_client.credential, + }) + } +} + impl From for v3::registration::GatewayClient { fn from(gw_client: v2::registration::GatewayClient) -> Self { Self { @@ -93,7 +151,10 @@ impl TryFrom for v2::response::Authenticato Ok(Self { data: authenticator_response.data.try_into()?, reply_to: authenticator_response.reply_to, - protocol: authenticator_response.protocol, + protocol: Protocol { + version: 2, + service_provider_type: authenticator_response.protocol.service_provider_type, + }, }) } } @@ -101,7 +162,10 @@ impl TryFrom for v2::response::Authenticato impl From for v3::response::AuthenticatorResponse { fn from(value: v2::response::AuthenticatorResponse) -> Self { Self { - protocol: value.protocol, + protocol: Protocol { + version: 3, + service_provider_type: value.protocol.service_provider_type, + }, data: value.data.into(), reply_to: value.reply_to, } @@ -270,3 +334,511 @@ impl From for v3::registration::Remain } } } + +#[cfg(test)] +mod tests { + use std::{net::IpAddr, str::FromStr}; + + use nym_credentials_interface::CredentialSpendingData; + use nym_crypto::asymmetric::encryption::PrivateKey; + use nym_sphinx::addressing::Recipient; + use nym_wireguard_types::PeerPublicKey; + use x25519_dalek::PublicKey; + + use super::*; + use crate::util::tests::{CREDENTIAL_BYTES, RECIPIENT}; + + #[test] + fn upgrade_initial_req() { + let pub_key = PeerPublicKey::new(PublicKey::from([0; 32])); + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let (msg, _) = v2::request::AuthenticatorRequest::new_initial_request( + v2::registration::InitMessage::new(pub_key), + reply_to, + ); + let upgraded_msg = v3::request::AuthenticatorRequest::from(msg); + + assert_eq!( + upgraded_msg.protocol, + Protocol { + version: 3, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + upgraded_msg.data, + v3::request::AuthenticatorRequestData::Initial(v3::registration::InitMessage { + pub_key + }) + ); + } + + #[test] + fn downgrade_initial_req() { + let pub_key = PeerPublicKey::new(PublicKey::from([0; 32])); + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let (msg, _) = v3::request::AuthenticatorRequest::new_initial_request( + v3::registration::InitMessage::new(pub_key), + reply_to, + ); + let downgraded_msg = v2::request::AuthenticatorRequest::try_from(msg).unwrap(); + + assert_eq!( + downgraded_msg.protocol, + Protocol { + version: 2, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + downgraded_msg.data, + v2::request::AuthenticatorRequestData::Initial(v2::registration::InitMessage { + pub_key + }) + ); + } + + #[test] + fn upgrade_final_req() { + let mut rng = rand::thread_rng(); + + let local_secret = PrivateKey::new(&mut rng); + let remote_secret = x25519_dalek::StaticSecret::random_from_rng(&mut rng); + let private_ip = IpAddr::from_str("10.10.10.10").unwrap(); + let nonce = 42; + let gateway_client = v2::registration::GatewayClient::new( + &local_secret, + (&remote_secret).into(), + private_ip, + nonce, + ); + let credential = Some(CredentialSpendingData::try_from_bytes(&CREDENTIAL_BYTES).unwrap()); + let final_message = v2::registration::FinalMessage { + gateway_client, + credential: credential.clone(), + }; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let (msg, _) = + v2::request::AuthenticatorRequest::new_final_request(final_message, reply_to); + let upgraded_msg = v3::request::AuthenticatorRequest::from(msg); + + assert_eq!( + upgraded_msg.protocol, + Protocol { + version: 3, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + upgraded_msg.data, + v3::request::AuthenticatorRequestData::Final(Box::new( + v3::registration::FinalMessage { + gateway_client: v3::registration::GatewayClient::new( + &local_secret, + (&remote_secret).into(), + private_ip, + nonce, + ), + credential + } + )) + ); + } + + #[test] + fn downgrade_final_req() { + let mut rng = rand::thread_rng(); + + let local_secret = PrivateKey::new(&mut rng); + let remote_secret = x25519_dalek::StaticSecret::random_from_rng(&mut rng); + let private_ip = IpAddr::from_str("10.10.10.10").unwrap(); + let nonce = 42; + let gateway_client = v3::registration::GatewayClient::new( + &local_secret, + (&remote_secret).into(), + private_ip, + nonce, + ); + let credential = Some(CredentialSpendingData::try_from_bytes(&CREDENTIAL_BYTES).unwrap()); + let final_message = v3::registration::FinalMessage { + gateway_client, + credential: credential.clone(), + }; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let (msg, _) = + v3::request::AuthenticatorRequest::new_final_request(final_message, reply_to); + let upgraded_msg = v2::request::AuthenticatorRequest::try_from(msg).unwrap(); + + assert_eq!( + upgraded_msg.protocol, + Protocol { + version: 2, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + upgraded_msg.data, + v2::request::AuthenticatorRequestData::Final(Box::new( + v2::registration::FinalMessage { + gateway_client: v2::registration::GatewayClient::new( + &local_secret, + (&remote_secret).into(), + private_ip, + nonce, + ), + credential + } + )) + ); + } + + #[test] + fn upgrade_query_req() { + let pub_key = PeerPublicKey::new(PublicKey::from([0; 32])); + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let (msg, _) = v2::request::AuthenticatorRequest::new_query_request(pub_key, reply_to); + let upgraded_msg = v3::request::AuthenticatorRequest::from(msg); + + assert_eq!( + upgraded_msg.protocol, + Protocol { + version: 3, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + upgraded_msg.data, + v3::request::AuthenticatorRequestData::QueryBandwidth(pub_key) + ); + } + + #[test] + fn downgrade_query_req() { + let pub_key = PeerPublicKey::new(PublicKey::from([0; 32])); + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let (msg, _) = v3::request::AuthenticatorRequest::new_query_request(pub_key, reply_to); + let downgraded_msg = v2::request::AuthenticatorRequest::try_from(msg).unwrap(); + + assert_eq!( + downgraded_msg.protocol, + Protocol { + version: 2, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + downgraded_msg.data, + v2::request::AuthenticatorRequestData::QueryBandwidth(pub_key) + ); + } + + #[test] + fn downgrade_topup_req() { + let pub_key = PeerPublicKey::new(PublicKey::from([0; 32])); + let credential = CredentialSpendingData::try_from_bytes(&CREDENTIAL_BYTES).unwrap(); + let top_up_message = v3::topup::TopUpMessage { + pub_key, + credential, + }; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let (msg, _) = + v3::request::AuthenticatorRequest::new_topup_request(top_up_message, reply_to); + assert!(v2::request::AuthenticatorRequest::try_from(msg).is_err()); + } + + #[test] + fn upgrade_pending_reg_resp() { + let mut rng = rand::thread_rng(); + + let local_secret = PrivateKey::new(&mut rng); + let remote_secret = x25519_dalek::StaticSecret::random_from_rng(&mut rng); + let private_ip = IpAddr::from_str("10.10.10.10").unwrap(); + let nonce = 42; + let wg_port = 51822; + let gateway_data = v2::registration::GatewayClient::new( + &local_secret, + (&remote_secret).into(), + private_ip, + nonce, + ); + let registration_data = v2::registration::RegistrationData { + nonce, + gateway_data, + wg_port, + }; + let request_id = 123; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let msg = v2::response::AuthenticatorResponse::new_pending_registration_success( + registration_data, + request_id, + reply_to, + ); + let upgraded_msg = v3::response::AuthenticatorResponse::from(msg); + + assert_eq!( + upgraded_msg.protocol, + Protocol { + version: 3, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + upgraded_msg.data, + v3::response::AuthenticatorResponseData::PendingRegistration( + v3::response::PendingRegistrationResponse { + request_id, + reply_to, + reply: v3::registration::RegistrationData { + nonce, + gateway_data: v3::registration::GatewayClient::new( + &local_secret, + (&remote_secret).into(), + private_ip, + nonce, + ), + wg_port, + } + } + ) + ); + } + + #[test] + fn downgrade_pending_reg_resp() { + let mut rng = rand::thread_rng(); + + let local_secret = PrivateKey::new(&mut rng); + let remote_secret = x25519_dalek::StaticSecret::random_from_rng(&mut rng); + let private_ip = IpAddr::from_str("10.10.10.10").unwrap(); + let nonce = 42; + let wg_port = 51822; + let gateway_data = v3::registration::GatewayClient::new( + &local_secret, + (&remote_secret).into(), + private_ip, + nonce, + ); + let registration_data = v3::registration::RegistrationData { + nonce, + gateway_data, + wg_port, + }; + let request_id = 123; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let msg = v3::response::AuthenticatorResponse::new_pending_registration_success( + registration_data, + request_id, + reply_to, + ); + let downgraded_msg = v2::response::AuthenticatorResponse::try_from(msg).unwrap(); + + assert_eq!( + downgraded_msg.protocol, + Protocol { + version: 2, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + downgraded_msg.data, + v2::response::AuthenticatorResponseData::PendingRegistration( + v2::response::PendingRegistrationResponse { + request_id, + reply_to, + reply: v2::registration::RegistrationData { + nonce, + gateway_data: v2::registration::GatewayClient::new( + &local_secret, + (&remote_secret).into(), + private_ip, + nonce, + ), + wg_port, + } + } + ) + ); + } + + #[test] + fn upgrade_registered_resp() { + let pub_key = PeerPublicKey::new(PublicKey::from([0; 32])); + let private_ip = IpAddr::from_str("10.10.10.10").unwrap(); + let wg_port = 51822; + let registred_data = v2::registration::RegistredData { + pub_key, + private_ip, + wg_port, + }; + let request_id = 123; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let msg = v2::response::AuthenticatorResponse::new_registered( + registred_data, + reply_to, + request_id, + ); + let upgraded_msg = v3::response::AuthenticatorResponse::from(msg); + + assert_eq!( + upgraded_msg.protocol, + Protocol { + version: 3, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + upgraded_msg.data, + v3::response::AuthenticatorResponseData::Registered(v3::response::RegisteredResponse { + request_id, + reply_to, + reply: v3::registration::RegistredData { + wg_port, + pub_key, + private_ip + } + }) + ); + } + + #[test] + fn downgrade_registered_resp() { + let pub_key = PeerPublicKey::new(PublicKey::from([0; 32])); + let private_ip = IpAddr::from_str("10.10.10.10").unwrap(); + let wg_port = 51822; + let registred_data = v3::registration::RegistredData { + pub_key, + private_ip, + wg_port, + }; + let request_id = 123; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let msg = v3::response::AuthenticatorResponse::new_registered( + registred_data, + reply_to, + request_id, + ); + let downgraded_msg = v2::response::AuthenticatorResponse::try_from(msg).unwrap(); + + assert_eq!( + downgraded_msg.protocol, + Protocol { + version: 2, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + downgraded_msg.data, + v2::response::AuthenticatorResponseData::Registered(v2::response::RegisteredResponse { + request_id, + reply_to, + reply: v2::registration::RegistredData { + wg_port, + pub_key, + private_ip + } + }) + ); + } + + #[test] + fn upgrade_remaining_bandwidth_resp() { + let available_bandwidth = 42; + let remaining_bandwidth_data = Some(v2::registration::RemainingBandwidthData { + available_bandwidth, + }); + let request_id = 123; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let msg = v2::response::AuthenticatorResponse::new_remaining_bandwidth( + remaining_bandwidth_data, + reply_to, + request_id, + ); + let upgraded_msg = v3::response::AuthenticatorResponse::from(msg); + + assert_eq!( + upgraded_msg.protocol, + Protocol { + version: 3, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + upgraded_msg.data, + v3::response::AuthenticatorResponseData::RemainingBandwidth( + v3::response::RemainingBandwidthResponse { + request_id, + reply_to, + reply: Some(v3::registration::RemainingBandwidthData { + available_bandwidth, + }) + } + ) + ); + } + + #[test] + fn downgrade_remaining_bandwidth_resp() { + let available_bandwidth = 42; + let remaining_bandwidth_data = Some(v3::registration::RemainingBandwidthData { + available_bandwidth, + }); + let request_id = 123; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let msg = v3::response::AuthenticatorResponse::new_remaining_bandwidth( + remaining_bandwidth_data, + reply_to, + request_id, + ); + let downgraded_msg = v2::response::AuthenticatorResponse::try_from(msg).unwrap(); + + assert_eq!( + downgraded_msg.protocol, + Protocol { + version: 2, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + downgraded_msg.data, + v2::response::AuthenticatorResponseData::RemainingBandwidth( + v2::response::RemainingBandwidthResponse { + request_id, + reply_to, + reply: Some(v2::registration::RemainingBandwidthData { + available_bandwidth, + }) + } + ) + ); + } + + #[test] + fn downgrade_topup_resp() { + let available_bandwidth = 42; + let remaining_bandwidth_data = v3::registration::RemainingBandwidthData { + available_bandwidth, + }; + let request_id = 123; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let msg = v3::response::AuthenticatorResponse::new_topup_bandwidth( + remaining_bandwidth_data, + reply_to, + request_id, + ); + assert!(v2::response::AuthenticatorResponse::try_from(msg).is_err()); + } +} diff --git a/common/authenticator-requests/src/v3/registration.rs b/common/authenticator-requests/src/v3/registration.rs index 37234f7e1f..d9fac785a8 100644 --- a/common/authenticator-requests/src/v3/registration.rs +++ b/common/authenticator-requests/src/v3/registration.rs @@ -29,7 +29,7 @@ pub type Taken = Option; pub const BANDWIDTH_CAP_PER_DAY: u64 = 250 * 1024 * 1024 * 1024; // 250 GB -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct InitMessage { /// Base64 encoded x25519 public key pub pub_key: PeerPublicKey, @@ -41,7 +41,7 @@ impl InitMessage { } } -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct FinalMessage { /// Gateway client data pub gateway_client: GatewayClient, @@ -50,28 +50,28 @@ pub struct FinalMessage { pub credential: Option, } -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct RegistrationData { pub nonce: u64, pub gateway_data: GatewayClient, pub wg_port: u16, } -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct RegistredData { pub pub_key: PeerPublicKey, pub private_ip: IpAddr, pub wg_port: u16, } -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct RemainingBandwidthData { pub available_bandwidth: i64, } /// Client that wants to register sends its PublicKey bytes mac digest encrypted with a DH shared secret. /// Gateway/Nym node can then verify pub_key payload using the same process -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct GatewayClient { /// Base64 encoded x25519 public key pub pub_key: PeerPublicKey, @@ -147,7 +147,7 @@ impl GatewayClient { // TODO: change the inner type into generic array of size HmacSha256::OutputSize // TODO2: rely on our internal crypto/hmac -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct ClientMac(Vec); impl fmt::Display for ClientMac { diff --git a/common/authenticator-requests/src/v3/request.rs b/common/authenticator-requests/src/v3/request.rs index 32db17aed2..9a7940e1cc 100644 --- a/common/authenticator-requests/src/v3/request.rs +++ b/common/authenticator-requests/src/v3/request.rs @@ -106,7 +106,7 @@ impl AuthenticatorRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub enum AuthenticatorRequestData { Initial(InitMessage), Final(Box), diff --git a/common/authenticator-requests/src/v3/response.rs b/common/authenticator-requests/src/v3/response.rs index 370fc64671..ca44fb19f6 100644 --- a/common/authenticator-requests/src/v3/response.rs +++ b/common/authenticator-requests/src/v3/response.rs @@ -120,7 +120,7 @@ impl AuthenticatorResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub enum AuthenticatorResponseData { PendingRegistration(PendingRegistrationResponse), Registered(RegisteredResponse), @@ -128,28 +128,28 @@ pub enum AuthenticatorResponseData { TopUpBandwidth(TopUpBandwidthResponse), } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct PendingRegistrationResponse { pub request_id: u64, pub reply_to: Recipient, pub reply: RegistrationData, } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct RegisteredResponse { pub request_id: u64, pub reply_to: Recipient, pub reply: RegistredData, } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct RemainingBandwidthResponse { pub request_id: u64, pub reply_to: Recipient, pub reply: Option, } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct TopUpBandwidthResponse { pub request_id: u64, pub reply_to: Recipient, diff --git a/common/authenticator-requests/src/v3/topup.rs b/common/authenticator-requests/src/v3/topup.rs index 31a61a0659..1163d07f12 100644 --- a/common/authenticator-requests/src/v3/topup.rs +++ b/common/authenticator-requests/src/v3/topup.rs @@ -5,7 +5,7 @@ use nym_credentials_interface::CredentialSpendingData; use nym_wireguard_types::PeerPublicKey; use serde::{Deserialize, Serialize}; -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct TopUpMessage { /// Base64 encoded x25519 public key pub pub_key: PeerPublicKey, diff --git a/common/authenticator-requests/src/v4/conversion.rs b/common/authenticator-requests/src/v4/conversion.rs index 7b79b5f087..86a1c8791b 100644 --- a/common/authenticator-requests/src/v4/conversion.rs +++ b/common/authenticator-requests/src/v4/conversion.rs @@ -3,128 +3,131 @@ use nym_service_provider_requests_common::{Protocol, ServiceProviderType}; -use crate::{v2, v3, v4}; +use crate::{v3, v4}; -impl From for v4::request::AuthenticatorRequest { - fn from(authenticator_request: v3::request::AuthenticatorRequest) -> Self { - Self { +impl TryFrom for v4::request::AuthenticatorRequest { + type Error = crate::Error; + fn try_from( + authenticator_request: v3::request::AuthenticatorRequest, + ) -> Result { + Ok(Self { protocol: Protocol { version: 4, service_provider_type: ServiceProviderType::Authenticator, }, - data: authenticator_request.data.into(), + data: authenticator_request.data.try_into()?, reply_to: authenticator_request.reply_to, request_id: authenticator_request.request_id, - } - } -} - -impl From for v4::request::AuthenticatorRequestData { - fn from(authenticator_request_data: v3::request::AuthenticatorRequestData) -> Self { - match authenticator_request_data { - v3::request::AuthenticatorRequestData::Initial(init_msg) => { - v4::request::AuthenticatorRequestData::Initial(init_msg.into()) - } - v3::request::AuthenticatorRequestData::Final(gw_client) => { - v4::request::AuthenticatorRequestData::Final(gw_client.into()) - } - v3::request::AuthenticatorRequestData::QueryBandwidth(pub_key) => { - v4::request::AuthenticatorRequestData::QueryBandwidth(pub_key) - } - v3::request::AuthenticatorRequestData::TopUpBandwidth(top_up_message) => { - v4::request::AuthenticatorRequestData::TopUpBandwidth(top_up_message.into()) - } - } - } -} - -impl From for v4::registration::InitMessage { - fn from(init_msg: v3::registration::InitMessage) -> Self { - Self { - pub_key: init_msg.pub_key, - } - } -} - -impl From> for Box { - fn from(gw_client: Box) -> Self { - Box::new(v4::registration::FinalMessage { - gateway_client: gw_client.gateway_client.into(), - credential: gw_client.credential, }) } } -impl From> for Box { - fn from(top_up_message: Box) -> Self { - Box::new(v4::topup::TopUpMessage { - pub_key: top_up_message.pub_key, - credential: top_up_message.credential, +impl TryFrom for v3::request::AuthenticatorRequest { + type Error = crate::Error; + fn try_from( + authenticator_request: v4::request::AuthenticatorRequest, + ) -> Result { + Ok(Self { + protocol: Protocol { + version: 3, + service_provider_type: ServiceProviderType::Authenticator, + }, + data: authenticator_request.data.try_into()?, + reply_to: authenticator_request.reply_to, + request_id: authenticator_request.request_id, }) } } -impl From for v4::registration::GatewayClient { - fn from(gw_client: v2::registration::GatewayClient) -> Self { - Self { - pub_key: gw_client.pub_key, - private_ips: gw_client.private_ip.into(), - mac: gw_client.mac.into(), +impl TryFrom for v4::request::AuthenticatorRequestData { + type Error = crate::Error; + fn try_from( + authenticator_request_data: v3::request::AuthenticatorRequestData, + ) -> Result { + match authenticator_request_data { + v3::request::AuthenticatorRequestData::Initial(init_msg) => Ok( + v4::request::AuthenticatorRequestData::Initial(init_msg.into()), + ), + v3::request::AuthenticatorRequestData::Final(_) => Err(Self::Error::Conversion( + "mac hash breaking change".to_string(), + )), + v3::request::AuthenticatorRequestData::QueryBandwidth(pub_key) => Ok( + v4::request::AuthenticatorRequestData::QueryBandwidth(pub_key), + ), + v3::request::AuthenticatorRequestData::TopUpBandwidth(top_up_message) => Ok( + v4::request::AuthenticatorRequestData::TopUpBandwidth(top_up_message.into()), + ), } } } -impl From for v4::registration::GatewayClient { - fn from(gw_client: v3::registration::GatewayClient) -> Self { - Self { - pub_key: gw_client.pub_key, - private_ips: gw_client.private_ip.into(), - mac: gw_client.mac.into(), +impl TryFrom for v3::request::AuthenticatorRequestData { + type Error = crate::Error; + fn try_from( + authenticator_request_data: v4::request::AuthenticatorRequestData, + ) -> Result { + match authenticator_request_data { + v4::request::AuthenticatorRequestData::Initial(init_msg) => Ok( + v3::request::AuthenticatorRequestData::Initial(init_msg.into()), + ), + v4::request::AuthenticatorRequestData::Final(_) => Err(Self::Error::Conversion( + "mac hash breaking change".to_string(), + )), + v4::request::AuthenticatorRequestData::QueryBandwidth(pub_key) => Ok( + v3::request::AuthenticatorRequestData::QueryBandwidth(pub_key), + ), + v4::request::AuthenticatorRequestData::TopUpBandwidth(top_up_message) => Ok( + v3::request::AuthenticatorRequestData::TopUpBandwidth(top_up_message.into()), + ), } } } -impl From for v3::registration::GatewayClient { - fn from(gw_client: v4::registration::GatewayClient) -> Self { +impl From for v4::registration::InitMessage { + fn from(init_msg: v3::registration::InitMessage) -> Self { Self { - pub_key: gw_client.pub_key, - private_ip: gw_client.private_ips.ipv4.into(), - mac: gw_client.mac.into(), + pub_key: init_msg.pub_key, } } } -impl From for v2::registration::GatewayClient { - fn from(gw_client: v4::registration::GatewayClient) -> Self { +impl From for v3::registration::InitMessage { + fn from(init_msg: v4::registration::InitMessage) -> Self { Self { - pub_key: gw_client.pub_key, - private_ip: gw_client.private_ips.ipv4.into(), - mac: gw_client.mac.into(), + pub_key: init_msg.pub_key, } } } -impl From for v4::registration::ClientMac { - fn from(mac: v2::registration::ClientMac) -> Self { - Self::new(mac.to_vec()) - } -} - -impl From for v4::registration::ClientMac { - fn from(mac: v3::registration::ClientMac) -> Self { - Self::new(mac.to_vec()) +impl From> for Box { + fn from(top_up_message: Box) -> Self { + Box::new(v4::topup::TopUpMessage { + pub_key: top_up_message.pub_key, + credential: top_up_message.credential, + }) } } -impl From for v3::registration::ClientMac { - fn from(mac: v4::registration::ClientMac) -> Self { - Self::new(mac.to_vec()) +impl From> for Box { + fn from(top_up_message: Box) -> Self { + Box::new(v3::topup::TopUpMessage { + pub_key: top_up_message.pub_key, + credential: top_up_message.credential, + }) } } -impl From for v2::registration::ClientMac { - fn from(mac: v4::registration::ClientMac) -> Self { - Self::new(mac.to_vec()) +impl TryFrom for v4::response::AuthenticatorResponse { + type Error = crate::Error; + fn try_from(value: v3::response::AuthenticatorResponse) -> Result { + Ok(Self { + protocol: Protocol { + version: 4, + service_provider_type: value.protocol.service_provider_type, + }, + data: value.data.try_into()?, + reply_to: value.reply_to, + }) } } @@ -137,11 +140,40 @@ impl TryFrom for v3::response::Authenticato Ok(Self { data: authenticator_response.data.try_into()?, reply_to: authenticator_response.reply_to, - protocol: authenticator_response.protocol, + protocol: Protocol { + version: 3, + service_provider_type: authenticator_response.protocol.service_provider_type, + }, }) } } +impl TryFrom for v4::response::AuthenticatorResponseData { + type Error = crate::Error; + fn try_from( + authenticator_response_data: v3::response::AuthenticatorResponseData, + ) -> Result { + match authenticator_response_data { + v3::response::AuthenticatorResponseData::PendingRegistration(_) => Err( + Self::Error::Conversion("mac hash breaking change".to_string()), + ), + + v3::response::AuthenticatorResponseData::Registered(registered_response) => Ok( + v4::response::AuthenticatorResponseData::Registered(registered_response.into()), + ), + + v3::response::AuthenticatorResponseData::RemainingBandwidth( + remaining_bandwidth_response, + ) => Ok(v4::response::AuthenticatorResponseData::RemainingBandwidth( + remaining_bandwidth_response.into(), + )), + v3::response::AuthenticatorResponseData::TopUpBandwidth(top_up_response) => Ok( + v4::response::AuthenticatorResponseData::TopUpBandwidth(top_up_response.into()), + ), + } + } +} + impl TryFrom for v3::response::AuthenticatorResponseData { type Error = crate::Error; @@ -149,13 +181,10 @@ impl TryFrom for v3::response::Authenti authenticator_response_data: v4::response::AuthenticatorResponseData, ) -> Result { match authenticator_response_data { - v4::response::AuthenticatorResponseData::PendingRegistration( - pending_registration_response, - ) => Ok( - v3::response::AuthenticatorResponseData::PendingRegistration( - pending_registration_response.into(), - ), + v4::response::AuthenticatorResponseData::PendingRegistration(_) => Err( + Self::Error::Conversion("mac hash breaking change".to_string()), ), + v4::response::AuthenticatorResponseData::Registered(registered_response) => Ok( v3::response::AuthenticatorResponseData::Registered(registered_response.into()), ), @@ -173,8 +202,8 @@ impl TryFrom for v3::response::Authenti } } -impl From for v3::response::PendingRegistrationResponse { - fn from(value: v4::response::PendingRegistrationResponse) -> Self { +impl From for v3::response::RegisteredResponse { + fn from(value: v4::response::RegisteredResponse) -> Self { Self { request_id: value.request_id, reply_to: value.reply_to, @@ -183,8 +212,8 @@ impl From for v3::response::PendingRe } } -impl From for v3::response::RegisteredResponse { - fn from(value: v4::response::RegisteredResponse) -> Self { +impl From for v4::response::RegisteredResponse { + fn from(value: v3::response::RegisteredResponse) -> Self { Self { request_id: value.request_id, reply_to: value.reply_to, @@ -193,6 +222,16 @@ impl From for v3::response::RegisteredResponse } } +impl From for v4::response::RemainingBandwidthResponse { + fn from(value: v3::response::RemainingBandwidthResponse) -> Self { + Self { + request_id: value.request_id, + reply_to: value.reply_to, + reply: value.reply.map(Into::into), + } + } +} + impl From for v3::response::RemainingBandwidthResponse { fn from(value: v4::response::RemainingBandwidthResponse) -> Self { Self { @@ -203,11 +242,31 @@ impl From for v3::response::RemainingB } } -impl From for v3::registration::RegistrationData { - fn from(value: v4::registration::RegistrationData) -> Self { +impl From for v4::response::TopUpBandwidthResponse { + fn from(value: v3::response::TopUpBandwidthResponse) -> Self { + Self { + request_id: value.request_id, + reply_to: value.reply_to, + reply: value.reply.into(), + } + } +} + +impl From for v3::response::TopUpBandwidthResponse { + fn from(value: v4::response::TopUpBandwidthResponse) -> Self { Self { - nonce: value.nonce, - gateway_data: value.gateway_data.into(), + request_id: value.request_id, + reply_to: value.reply_to, + reply: value.reply.into(), + } + } +} + +impl From for v4::registration::RegistredData { + fn from(value: v3::registration::RegistredData) -> Self { + Self { + pub_key: value.pub_key, + private_ips: value.private_ip.into(), wg_port: value.wg_port, } } @@ -223,6 +282,14 @@ impl From for v3::registration::RegistredData { } } +impl From for v4::registration::RemainingBandwidthData { + fn from(value: v3::registration::RemainingBandwidthData) -> Self { + Self { + available_bandwidth: value.available_bandwidth, + } + } +} + impl From for v3::registration::RemainingBandwidthData { fn from(value: v4::registration::RemainingBandwidthData) -> Self { Self { @@ -230,3 +297,441 @@ impl From for v3::registration::Remain } } } + +#[cfg(test)] +mod tests { + use std::{ + net::{Ipv4Addr, Ipv6Addr}, + str::FromStr, + }; + + use nym_credentials_interface::CredentialSpendingData; + use nym_crypto::asymmetric::encryption::PrivateKey; + use nym_sphinx::addressing::Recipient; + use nym_wireguard_types::PeerPublicKey; + use x25519_dalek::PublicKey; + + use super::*; + use crate::util::tests::{CREDENTIAL_BYTES, RECIPIENT}; + + #[test] + fn upgrade_initial_req() { + let pub_key = PeerPublicKey::new(PublicKey::from([0; 32])); + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let (msg, _) = v3::request::AuthenticatorRequest::new_initial_request( + v3::registration::InitMessage::new(pub_key), + reply_to, + ); + let upgraded_msg = v4::request::AuthenticatorRequest::try_from(msg).unwrap(); + + assert_eq!( + upgraded_msg.protocol, + Protocol { + version: 4, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + upgraded_msg.data, + v4::request::AuthenticatorRequestData::Initial(v4::registration::InitMessage { + pub_key + }) + ); + } + + #[test] + fn downgrade_initial_req() { + let pub_key = PeerPublicKey::new(PublicKey::from([0; 32])); + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let (msg, _) = v4::request::AuthenticatorRequest::new_initial_request( + v4::registration::InitMessage::new(pub_key), + reply_to, + ); + let downgraded_msg = v3::request::AuthenticatorRequest::try_from(msg).unwrap(); + + assert_eq!( + downgraded_msg.protocol, + Protocol { + version: 3, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + downgraded_msg.data, + v3::request::AuthenticatorRequestData::Initial(v3::registration::InitMessage { + pub_key + }) + ); + } + + #[test] + fn upgrade_final_req() { + let mut rng = rand::thread_rng(); + + let local_secret = PrivateKey::new(&mut rng); + let remote_secret = x25519_dalek::StaticSecret::random_from_rng(&mut rng); + let ipv4 = Ipv4Addr::from_str("10.10.10.10").unwrap(); + let nonce = 42; + let gateway_client = v3::registration::GatewayClient::new( + &local_secret, + (&remote_secret).into(), + ipv4.into(), + nonce, + ); + let credential = Some(CredentialSpendingData::try_from_bytes(&CREDENTIAL_BYTES).unwrap()); + let final_message = v3::registration::FinalMessage { + gateway_client, + credential: credential.clone(), + }; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let (msg, _) = + v3::request::AuthenticatorRequest::new_final_request(final_message, reply_to); + assert!(v4::request::AuthenticatorRequest::try_from(msg).is_err()); + } + + #[test] + fn downgrade_final_req() { + let mut rng = rand::thread_rng(); + + let local_secret = PrivateKey::new(&mut rng); + let remote_secret = x25519_dalek::StaticSecret::random_from_rng(&mut rng); + let ipv4 = Ipv4Addr::from_str("10.10.10.10").unwrap(); + let private_ips = + v4::registration::IpPair::new(ipv4, Ipv6Addr::from_str("fc01::10").unwrap()); + let nonce = 42; + let gateway_client = v4::registration::GatewayClient::new( + &local_secret, + (&remote_secret).into(), + private_ips, + nonce, + ); + let credential = Some(CredentialSpendingData::try_from_bytes(&CREDENTIAL_BYTES).unwrap()); + let final_message = v4::registration::FinalMessage { + gateway_client, + credential: credential.clone(), + }; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let (msg, _) = + v4::request::AuthenticatorRequest::new_final_request(final_message, reply_to); + assert!(v3::request::AuthenticatorRequest::try_from(msg).is_err()); + } + + #[test] + fn upgrade_query_req() { + let pub_key = PeerPublicKey::new(PublicKey::from([0; 32])); + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let (msg, _) = v3::request::AuthenticatorRequest::new_query_request(pub_key, reply_to); + let upgraded_msg = v4::request::AuthenticatorRequest::try_from(msg).unwrap(); + + assert_eq!( + upgraded_msg.protocol, + Protocol { + version: 4, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + upgraded_msg.data, + v4::request::AuthenticatorRequestData::QueryBandwidth(pub_key) + ); + } + + #[test] + fn downgrade_query_req() { + let pub_key = PeerPublicKey::new(PublicKey::from([0; 32])); + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let (msg, _) = v4::request::AuthenticatorRequest::new_query_request(pub_key, reply_to); + let downgraded_msg = v3::request::AuthenticatorRequest::try_from(msg).unwrap(); + + assert_eq!( + downgraded_msg.protocol, + Protocol { + version: 3, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + downgraded_msg.data, + v3::request::AuthenticatorRequestData::QueryBandwidth(pub_key) + ); + } + + #[test] + fn downgrade_topup_req() { + let pub_key = PeerPublicKey::new(PublicKey::from([0; 32])); + let credential = CredentialSpendingData::try_from_bytes(&CREDENTIAL_BYTES).unwrap(); + let top_up_message = v4::topup::TopUpMessage { + pub_key, + credential: credential.clone(), + }; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let (msg, _) = + v4::request::AuthenticatorRequest::new_topup_request(top_up_message, reply_to); + let downgraded_msg = v3::request::AuthenticatorRequest::try_from(msg).unwrap(); + + assert_eq!( + downgraded_msg.protocol, + Protocol { + version: 3, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + downgraded_msg.data, + v3::request::AuthenticatorRequestData::TopUpBandwidth(Box::new( + v3::topup::TopUpMessage { + pub_key, + credential + } + )) + ); + } + + #[test] + fn upgrade_pending_reg_resp() { + let mut rng = rand::thread_rng(); + + let local_secret = PrivateKey::new(&mut rng); + let remote_secret = x25519_dalek::StaticSecret::random_from_rng(&mut rng); + let ipv4 = Ipv4Addr::from_str("10.10.10.10").unwrap(); + let nonce = 42; + let wg_port = 51822; + let gateway_data = v3::registration::GatewayClient::new( + &local_secret, + (&remote_secret).into(), + ipv4.into(), + nonce, + ); + let registration_data = v3::registration::RegistrationData { + nonce, + gateway_data, + wg_port, + }; + let request_id = 123; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let msg = v3::response::AuthenticatorResponse::new_pending_registration_success( + registration_data, + request_id, + reply_to, + ); + assert!(v4::response::AuthenticatorResponse::try_from(msg).is_err()); + } + + #[test] + fn downgrade_pending_reg_resp() { + let mut rng = rand::thread_rng(); + + let local_secret = PrivateKey::new(&mut rng); + let remote_secret = x25519_dalek::StaticSecret::random_from_rng(&mut rng); + let ipv4 = Ipv4Addr::from_str("10.10.10.10").unwrap(); + let private_ips = + v4::registration::IpPair::new(ipv4, Ipv6Addr::from_str("fc01::10").unwrap()); + let nonce = 42; + let wg_port = 51822; + let gateway_data = v4::registration::GatewayClient::new( + &local_secret, + (&remote_secret).into(), + private_ips, + nonce, + ); + let registration_data = v4::registration::RegistrationData { + nonce, + gateway_data, + wg_port, + }; + let request_id = 123; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let msg = v4::response::AuthenticatorResponse::new_pending_registration_success( + registration_data, + request_id, + reply_to, + ); + assert!(v3::response::AuthenticatorResponse::try_from(msg).is_err()); + } + + #[test] + fn upgrade_registered_resp() { + let pub_key = PeerPublicKey::new(PublicKey::from([0; 32])); + let ipv4 = Ipv4Addr::from_str("10.1.10.10").unwrap(); + let private_ips = + v4::registration::IpPair::new(ipv4, Ipv6Addr::from_str("fc01::a0a").unwrap()); + let wg_port = 51822; + let registred_data = v3::registration::RegistredData { + pub_key, + private_ip: ipv4.into(), + wg_port, + }; + let request_id = 123; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let msg = v3::response::AuthenticatorResponse::new_registered( + registred_data, + reply_to, + request_id, + ); + let upgraded_msg = v4::response::AuthenticatorResponse::try_from(msg).unwrap(); + + assert_eq!( + upgraded_msg.protocol, + Protocol { + version: 4, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + upgraded_msg.data, + v4::response::AuthenticatorResponseData::Registered(v4::response::RegisteredResponse { + request_id, + reply_to, + reply: v4::registration::RegistredData { + wg_port, + pub_key, + private_ips + } + }) + ); + } + + #[test] + fn downgrade_registered_resp() { + let pub_key = PeerPublicKey::new(PublicKey::from([0; 32])); + let ipv4 = Ipv4Addr::from_str("10.10.10.10").unwrap(); + let private_ips = + v4::registration::IpPair::new(ipv4, Ipv6Addr::from_str("fc01::10").unwrap()); + let wg_port = 51822; + let registred_data = v4::registration::RegistredData { + pub_key, + private_ips, + wg_port, + }; + let request_id = 123; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let msg = v4::response::AuthenticatorResponse::new_registered( + registred_data, + reply_to, + request_id, + ); + let downgraded_msg = v3::response::AuthenticatorResponse::try_from(msg).unwrap(); + + assert_eq!( + downgraded_msg.protocol, + Protocol { + version: 3, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + downgraded_msg.data, + v3::response::AuthenticatorResponseData::Registered(v3::response::RegisteredResponse { + request_id, + reply_to, + reply: v3::registration::RegistredData { + wg_port, + pub_key, + private_ip: ipv4.into() + } + }) + ); + } + + #[test] + fn upgrade_remaining_bandwidth_resp() { + let available_bandwidth = 42; + let remaining_bandwidth_data = Some(v3::registration::RemainingBandwidthData { + available_bandwidth, + }); + let request_id = 123; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let msg = v3::response::AuthenticatorResponse::new_remaining_bandwidth( + remaining_bandwidth_data, + reply_to, + request_id, + ); + let upgraded_msg = v4::response::AuthenticatorResponse::try_from(msg).unwrap(); + + assert_eq!( + upgraded_msg.protocol, + Protocol { + version: 4, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + upgraded_msg.data, + v4::response::AuthenticatorResponseData::RemainingBandwidth( + v4::response::RemainingBandwidthResponse { + request_id, + reply_to, + reply: Some(v4::registration::RemainingBandwidthData { + available_bandwidth, + }) + } + ) + ); + } + + #[test] + fn downgrade_remaining_bandwidth_resp() { + let available_bandwidth = 42; + let remaining_bandwidth_data = Some(v4::registration::RemainingBandwidthData { + available_bandwidth, + }); + let request_id = 123; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let msg = v4::response::AuthenticatorResponse::new_remaining_bandwidth( + remaining_bandwidth_data, + reply_to, + request_id, + ); + let downgraded_msg = v3::response::AuthenticatorResponse::try_from(msg).unwrap(); + + assert_eq!( + downgraded_msg.protocol, + Protocol { + version: 3, + service_provider_type: ServiceProviderType::Authenticator + } + ); + assert_eq!( + downgraded_msg.data, + v3::response::AuthenticatorResponseData::RemainingBandwidth( + v3::response::RemainingBandwidthResponse { + request_id, + reply_to, + reply: Some(v3::registration::RemainingBandwidthData { + available_bandwidth, + }) + } + ) + ); + } + + #[test] + fn downgrade_topup_resp() { + let available_bandwidth = 42; + let remaining_bandwidth_data = v4::registration::RemainingBandwidthData { + available_bandwidth, + }; + let request_id = 123; + let reply_to = Recipient::try_from_base58_string(RECIPIENT).unwrap(); + + let msg = v4::response::AuthenticatorResponse::new_topup_bandwidth( + remaining_bandwidth_data, + reply_to, + request_id, + ); + assert!(v3::response::AuthenticatorResponse::try_from(msg).is_err()); + } +} diff --git a/common/authenticator-requests/src/v4/registration.rs b/common/authenticator-requests/src/v4/registration.rs index a4a49c66a5..2595922f00 100644 --- a/common/authenticator-requests/src/v4/registration.rs +++ b/common/authenticator-requests/src/v4/registration.rs @@ -81,7 +81,7 @@ impl From for IpPair { } } -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct InitMessage { /// Base64 encoded x25519 public key pub pub_key: PeerPublicKey, @@ -93,7 +93,7 @@ impl InitMessage { } } -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct FinalMessage { /// Gateway client data pub gateway_client: GatewayClient, @@ -102,28 +102,28 @@ pub struct FinalMessage { pub credential: Option, } -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct RegistrationData { pub nonce: u64, pub gateway_data: GatewayClient, pub wg_port: u16, } -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct RegistredData { pub pub_key: PeerPublicKey, pub private_ips: IpPair, pub wg_port: u16, } -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct RemainingBandwidthData { pub available_bandwidth: i64, } /// Client that wants to register sends its PublicKey bytes mac digest encrypted with a DH shared secret. /// Gateway/Nym node can then verify pub_key payload using the same process -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct GatewayClient { /// Base64 encoded x25519 public key pub pub_key: PeerPublicKey, @@ -199,7 +199,7 @@ impl GatewayClient { // TODO: change the inner type into generic array of size HmacSha256::OutputSize // TODO2: rely on our internal crypto/hmac -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct ClientMac(Vec); impl fmt::Display for ClientMac { diff --git a/common/authenticator-requests/src/v4/request.rs b/common/authenticator-requests/src/v4/request.rs index aa4862b057..25cf0e0f1f 100644 --- a/common/authenticator-requests/src/v4/request.rs +++ b/common/authenticator-requests/src/v4/request.rs @@ -20,7 +20,7 @@ fn generate_random() -> u64 { rng.next_u64() } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct AuthenticatorRequest { pub protocol: Protocol, pub data: AuthenticatorRequestData, @@ -106,7 +106,7 @@ impl AuthenticatorRequest { } } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub enum AuthenticatorRequestData { Initial(InitMessage), Final(Box), diff --git a/common/authenticator-requests/src/v4/response.rs b/common/authenticator-requests/src/v4/response.rs index 370fc64671..9743e8db43 100644 --- a/common/authenticator-requests/src/v4/response.rs +++ b/common/authenticator-requests/src/v4/response.rs @@ -10,7 +10,7 @@ use crate::make_bincode_serializer; use super::VERSION; -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct AuthenticatorResponse { pub protocol: Protocol, pub data: AuthenticatorResponseData, @@ -120,7 +120,7 @@ impl AuthenticatorResponse { } } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub enum AuthenticatorResponseData { PendingRegistration(PendingRegistrationResponse), Registered(RegisteredResponse), @@ -128,28 +128,28 @@ pub enum AuthenticatorResponseData { TopUpBandwidth(TopUpBandwidthResponse), } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct PendingRegistrationResponse { pub request_id: u64, pub reply_to: Recipient, pub reply: RegistrationData, } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct RegisteredResponse { pub request_id: u64, pub reply_to: Recipient, pub reply: RegistredData, } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct RemainingBandwidthResponse { pub request_id: u64, pub reply_to: Recipient, pub reply: Option, } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct TopUpBandwidthResponse { pub request_id: u64, pub reply_to: Recipient, diff --git a/common/authenticator-requests/src/v4/topup.rs b/common/authenticator-requests/src/v4/topup.rs index 31a61a0659..1163d07f12 100644 --- a/common/authenticator-requests/src/v4/topup.rs +++ b/common/authenticator-requests/src/v4/topup.rs @@ -5,7 +5,7 @@ use nym_credentials_interface::CredentialSpendingData; use nym_wireguard_types::PeerPublicKey; use serde::{Deserialize, Serialize}; -#[derive(Serialize, Deserialize, Debug, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] pub struct TopUpMessage { /// Base64 encoded x25519 public key pub pub_key: PeerPublicKey, diff --git a/common/service-provider-requests-common/src/lib.rs b/common/service-provider-requests-common/src/lib.rs index f9f0564e1d..3b294baba7 100644 --- a/common/service-provider-requests-common/src/lib.rs +++ b/common/service-provider-requests-common/src/lib.rs @@ -11,7 +11,7 @@ pub enum ServiceProviderType { Authenticator = 2, } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct Protocol { pub version: u8, pub service_provider_type: ServiceProviderType, diff --git a/service-providers/authenticator/src/mixnet_listener.rs b/service-providers/authenticator/src/mixnet_listener.rs index 4235a9da15..753bcd2fbf 100644 --- a/service-providers/authenticator/src/mixnet_listener.rs +++ b/service-providers/authenticator/src/mixnet_listener.rs @@ -184,7 +184,12 @@ impl MixnetListener { v2::response::AuthenticatorResponse::new_pending_registration_success( v2::registration::RegistrationData { nonce: registration_data.nonce, - gateway_data: registration_data.gateway_data.clone().into(), + gateway_data: v2::registration::GatewayClient::new( + self.keypair().private_key(), + remote_public.inner(), + registration_data.gateway_data.private_ips.ipv4.into(), + registration_data.nonce, + ), wg_port: registration_data.wg_port, }, request_id, @@ -199,7 +204,12 @@ impl MixnetListener { v3::response::AuthenticatorResponse::new_pending_registration_success( v3::registration::RegistrationData { nonce: registration_data.nonce, - gateway_data: registration_data.gateway_data.clone().into(), + gateway_data: v3::registration::GatewayClient::new( + self.keypair().private_key(), + remote_public.inner(), + registration_data.gateway_data.private_ips.ipv4.into(), + registration_data.nonce, + ), wg_port: registration_data.wg_port, }, request_id, From b8130443601195fb582d00a5d1794dae45bad816 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C4=99drzej=20Stuczy=C5=84ski?= Date: Fri, 13 Dec 2024 09:09:56 +0000 Subject: [PATCH 03/64] bugfix: make sure to apply gateway score filtering when choosing initial node (#5256) * bugfix: make sure to apply gateway score filtering when choosing initial node * mixfetch build fix --- .../src/cli_helpers/client_add_gateway.rs | 9 ++++-- .../src/cli_helpers/client_init.rs | 9 ++++-- common/client-core/src/init/helpers.rs | 32 ++++++------------- common/wasm/client-core/src/helpers.rs | 3 +- sdk/lib/socks5-listener/src/lib.rs | 2 +- sdk/rust/nym-sdk/src/mixnet/client.rs | 11 ++++++- wasm/client/src/client.rs | 1 + wasm/mix-fetch/src/client.rs | 1 + 8 files changed, 38 insertions(+), 30 deletions(-) diff --git a/common/client-core/src/cli_helpers/client_add_gateway.rs b/common/client-core/src/cli_helpers/client_add_gateway.rs index 022db8b9c3..56b1c3ad8e 100644 --- a/common/client-core/src/cli_helpers/client_add_gateway.rs +++ b/common/client-core/src/cli_helpers/client_add_gateway.rs @@ -115,8 +115,13 @@ where hardcoded_topology.get_gateways() } else { let mut rng = rand::thread_rng(); - crate::init::helpers::current_gateways(&mut rng, &core.client.nym_api_urls, user_agent) - .await? + crate::init::helpers::current_gateways( + &mut rng, + &core.client.nym_api_urls, + user_agent, + core.debug.topology.minimum_gateway_performance, + ) + .await? }; // since we're registering with a brand new gateway, diff --git a/common/client-core/src/cli_helpers/client_init.rs b/common/client-core/src/cli_helpers/client_init.rs index 0e3f3bf234..060c1192da 100644 --- a/common/client-core/src/cli_helpers/client_init.rs +++ b/common/client-core/src/cli_helpers/client_init.rs @@ -170,8 +170,13 @@ where hardcoded_topology.get_gateways() } else { let mut rng = rand::thread_rng(); - crate::init::helpers::current_gateways(&mut rng, &core.client.nym_api_urls, user_agent) - .await? + crate::init::helpers::current_gateways( + &mut rng, + &core.client.nym_api_urls, + user_agent, + core.debug.topology.minimum_gateway_performance, + ) + .await? }; let gateway_setup = GatewaySetup::New { diff --git a/common/client-core/src/init/helpers.rs b/common/client-core/src/init/helpers.rs index 60c692df9f..68b3b8d457 100644 --- a/common/client-core/src/init/helpers.rs +++ b/common/client-core/src/init/helpers.rs @@ -7,7 +7,7 @@ use futures::{SinkExt, StreamExt}; use log::{debug, info, trace, warn}; use nym_crypto::asymmetric::identity; use nym_gateway_client::GatewayClient; -use nym_topology::{gateway, mix}; +use nym_topology::gateway; use nym_validator_client::client::IdentityKeyRef; use nym_validator_client::UserAgent; use rand::{seq::SliceRandom, Rng}; @@ -82,6 +82,7 @@ pub async fn current_gateways( rng: &mut R, nym_apis: &[Url], user_agent: Option, + minimum_performance: u8, ) -> Result, ClientCoreError> { let nym_api = nym_apis .choose(rng) @@ -95,41 +96,26 @@ pub async fn current_gateways( log::debug!("Fetching list of gateways from: {nym_api}"); let gateways = client.get_all_basic_entry_assigned_nodes().await?; - log::debug!("Found {} gateways", gateways.len()); + info!("nym api reports {} gateways", gateways.len()); + log::trace!("Gateways: {:#?}", gateways); let valid_gateways = gateways .iter() + .filter(|g| g.performance.round_to_integer() >= minimum_performance) .filter_map(|gateway| gateway.try_into().ok()) .collect::>(); log::debug!("After checking validity: {}", valid_gateways.len()); log::trace!("Valid gateways: {:#?}", valid_gateways); - log::info!("nym-api reports {} valid gateways", valid_gateways.len()); + log::info!( + "and {} after validity and performance filtering", + valid_gateways.len() + ); Ok(valid_gateways) } -pub async fn current_mixnodes( - rng: &mut R, - nym_apis: &[Url], -) -> Result, ClientCoreError> { - let nym_api = nym_apis - .choose(rng) - .ok_or(ClientCoreError::ListOfNymApisIsEmpty)?; - let client = nym_validator_client::client::NymApiClient::new(nym_api.clone()); - - log::trace!("Fetching list of mixnodes from: {nym_api}"); - - let mixnodes = client.get_all_basic_active_mixing_assigned_nodes().await?; - let valid_mixnodes = mixnodes - .iter() - .filter_map(|mixnode| mixnode.try_into().ok()) - .collect::>(); - - Ok(valid_mixnodes) -} - #[cfg(not(target_arch = "wasm32"))] async fn connect(endpoint: &str) -> Result { match tokio::time::timeout(CONN_TIMEOUT, connect_async(endpoint)).await { diff --git a/common/wasm/client-core/src/helpers.rs b/common/wasm/client-core/src/helpers.rs index eee064e2cc..04d6960377 100644 --- a/common/wasm/client-core/src/helpers.rs +++ b/common/wasm/client-core/src/helpers.rs @@ -121,9 +121,10 @@ pub async fn setup_gateway_from_api( force_tls: bool, chosen_gateway: Option, nym_apis: &[Url], + minimum_performance: u8, ) -> Result { let mut rng = thread_rng(); - let gateways = current_gateways(&mut rng, nym_apis, None).await?; + let gateways = current_gateways(&mut rng, nym_apis, None, minimum_performance).await?; setup_gateway_wasm(client_store, force_tls, chosen_gateway, &gateways).await } diff --git a/sdk/lib/socks5-listener/src/lib.rs b/sdk/lib/socks5-listener/src/lib.rs index bc5350718c..3019402182 100644 --- a/sdk/lib/socks5-listener/src/lib.rs +++ b/sdk/lib/socks5-listener/src/lib.rs @@ -275,7 +275,7 @@ where specification: GatewaySelectionSpecification::UniformRemote { must_use_tls: false, }, - available_gateways: current_gateways(&mut rng, &nym_apis, None).await?, + available_gateways: current_gateways(&mut rng, &nym_apis, None, 50).await?, }); eprintln!("starting the socks5 client"); diff --git a/sdk/rust/nym-sdk/src/mixnet/client.rs b/sdk/rust/nym-sdk/src/mixnet/client.rs index 54885b24eb..118885a650 100644 --- a/sdk/rust/nym-sdk/src/mixnet/client.rs +++ b/sdk/rust/nym-sdk/src/mixnet/client.rs @@ -457,7 +457,16 @@ where let user_agent = self.user_agent.clone(); let mut rng = OsRng; - let available_gateways = current_gateways(&mut rng, &nym_api_endpoints, user_agent).await?; + let available_gateways = current_gateways( + &mut rng, + &nym_api_endpoints, + user_agent, + self.config + .debug_config + .topology + .minimum_gateway_performance, + ) + .await?; Ok(GatewaySetup::New { specification: selection_spec, diff --git a/wasm/client/src/client.rs b/wasm/client/src/client.rs index 645fda613c..4bb9f2c2f2 100644 --- a/wasm/client/src/client.rs +++ b/wasm/client/src/client.rs @@ -165,6 +165,7 @@ impl NymClientBuilder { self.force_tls, user_chosen, &nym_api_endpoints, + self.config.base.debug.topology.minimum_gateway_performance, ) .await? }; diff --git a/wasm/mix-fetch/src/client.rs b/wasm/mix-fetch/src/client.rs index 9e4bff5e32..e905d01aad 100644 --- a/wasm/mix-fetch/src/client.rs +++ b/wasm/mix-fetch/src/client.rs @@ -102,6 +102,7 @@ impl MixFetchClientBuilder { self.force_tls, user_chosen, &nym_api_endpoints, + self.config.base.debug.topology.minimum_gateway_performance, ) .await?; From 684d7ac1a2d4ea3a2bd55982b89474aa4773b39c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C4=99drzej=20Stuczy=C5=84ski?= Date: Fri, 13 Dec 2024 10:03:43 +0000 Subject: [PATCH 04/64] removed legacy socks5 listener (#5259) --- .../workflows/publish-nyms5-android-apk.yml | 8 - Cargo.lock | 245 ------------ Cargo.toml | 5 - sdk/lib/socks5-listener/Cargo.toml | 39 -- sdk/lib/socks5-listener/Makefile | 47 --- .../socks5-listener/build-android-darwin.sh | 10 - sdk/lib/socks5-listener/build-android.sh | 78 ---- sdk/lib/socks5-listener/socks5_c.h | 108 ----- sdk/lib/socks5-listener/src/android.rs | 97 ----- .../src/bin/generate-headers.rs | 6 - sdk/lib/socks5-listener/src/config/mod.rs | 101 ----- .../socks5-listener/src/config/persistence.rs | 26 -- .../socks5-listener/src/config/template.rs | 111 ----- sdk/lib/socks5-listener/src/lib.rs | 378 ------------------ sdk/lib/socks5-listener/src/persistence.rs | 65 --- 15 files changed, 1324 deletions(-) delete mode 100644 sdk/lib/socks5-listener/Cargo.toml delete mode 100644 sdk/lib/socks5-listener/Makefile delete mode 100755 sdk/lib/socks5-listener/build-android-darwin.sh delete mode 100755 sdk/lib/socks5-listener/build-android.sh delete mode 100644 sdk/lib/socks5-listener/socks5_c.h delete mode 100644 sdk/lib/socks5-listener/src/android.rs delete mode 100644 sdk/lib/socks5-listener/src/bin/generate-headers.rs delete mode 100644 sdk/lib/socks5-listener/src/config/mod.rs delete mode 100644 sdk/lib/socks5-listener/src/config/persistence.rs delete mode 100644 sdk/lib/socks5-listener/src/config/template.rs delete mode 100644 sdk/lib/socks5-listener/src/lib.rs delete mode 100644 sdk/lib/socks5-listener/src/persistence.rs diff --git a/.github/workflows/publish-nyms5-android-apk.yml b/.github/workflows/publish-nyms5-android-apk.yml index ab665ea660..b4518d57c3 100644 --- a/.github/workflows/publish-nyms5-android-apk.yml +++ b/.github/workflows/publish-nyms5-android-apk.yml @@ -56,14 +56,6 @@ jobs: rustup target add aarch64-linux-android \ x86_64-linux-android - - name: Build lib nym-socks5-listener - working-directory: sdk/lib/socks5-listener/ - env: - RELEASE: true - RUSTFLAGS: "-C link-args=-Wl,--hash-style=gnu" - # build for arm64 and x86_64 - run: ./build-android.sh aarch64 x86_64 - - name: Build APKs (unsigned) working-directory: nym-connect/native/android env: diff --git a/Cargo.lock b/Cargo.lock index 4b69324120..c7c85e0d9e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -140,23 +140,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" -[[package]] -name = "android_log-sys" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ecc8056bf6ab9892dcd53216c83d1597487d7dacac16c8df6b877d127df9937" - -[[package]] -name = "android_logger" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b07e8e73d720a1f2e4b6014766e6039fd2e96a4fa44e2a78d0e1fa2ff49826" -dependencies = [ - "android_log-sys", - "env_filter", - "log", -] - [[package]] name = "android_system_properties" version = "0.1.5" @@ -981,12 +964,6 @@ dependencies = [ "serde", ] -[[package]] -name = "cesu8" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" - [[package]] name = "cfg-if" version = "1.0.0" @@ -1182,16 +1159,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "combine" -version = "4.6.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" -dependencies = [ - "bytes", - "memchr", -] - [[package]] name = "comfy-table" version = "7.1.1" @@ -2482,26 +2449,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "ext-trait" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d772df1c1a777963712fb68e014235e80863d6a91a85c4e06ba2d16243a310e5" -dependencies = [ - "ext-trait-proc_macros", -] - -[[package]] -name = "ext-trait-proc_macros" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ab7934152eaf26aa5aa9f7371408ad5af4c31357073c9e84c3b9d7f11ad639a" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "extension-storage" version = "1.3.0-rc.0" @@ -2518,21 +2465,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "extension-traits" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a296e5a895621edf9fa8329c83aa1cb69a964643e36cf54d8d7a69b789089537" -dependencies = [ - "ext-trait", -] - -[[package]] -name = "extern-c" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320bea982e85d42441eb25c49b41218e7eaa2657e8f90bc4eca7437376751e23" - [[package]] name = "eyre" version = "0.6.12" @@ -3730,28 +3662,6 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" -[[package]] -name = "jni" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" -dependencies = [ - "cesu8", - "cfg-if", - "combine", - "jni-sys", - "log", - "thiserror", - "walkdir", - "windows-sys 0.45.0", -] - -[[package]] -name = "jni-sys" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" - [[package]] name = "js-sys" version = "0.3.72" @@ -3941,22 +3851,6 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "macro_rules_attribute" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf0c9b980bf4f3a37fd7b1c066941dd1b1d0152ce6ee6e8fe8c49b9f6810d862" -dependencies = [ - "macro_rules_attribute-proc_macro", - "paste", -] - -[[package]] -name = "macro_rules_attribute-proc_macro" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58093314a45e00c77d5c508f76e77c3396afbbc0d01506e7fae47b018bac2b1d" - [[package]] name = "macroific" version = "1.3.1" @@ -6394,28 +6288,6 @@ dependencies = [ "url", ] -[[package]] -name = "nym-socks5-listener" -version = "0.1.0" -dependencies = [ - "android_logger", - "anyhow", - "futures", - "jni", - "lazy_static", - "log", - "nym-bin-common", - "nym-client-core", - "nym-config", - "nym-credential-storage", - "nym-crypto", - "nym-socks5-client-core", - "rand", - "safer-ffi", - "serde", - "tokio", -] - [[package]] name = "nym-socks5-proxy-helpers" version = "0.1.0" @@ -7548,16 +7420,6 @@ dependencies = [ "log", ] -[[package]] -name = "prettyplease" -version = "0.1.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" -dependencies = [ - "proc-macro2", - "syn 1.0.109", -] - [[package]] name = "proc-macro-crate" version = "3.1.0" @@ -8455,38 +8317,6 @@ version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" -[[package]] -name = "safer-ffi" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435fdd58b61a6f1d8545274c1dfa458e905ff68c166e65e294a0130ef5e675bd" -dependencies = [ - "extern-c", - "inventory", - "libc", - "macro_rules_attribute", - "paste", - "safer_ffi-proc_macros", - "scopeguard", - "stabby", - "uninit", - "unwind_safe", - "with_builtin_macros", -] - -[[package]] -name = "safer_ffi-proc_macros" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f25be5ba5f319542edb31925517e0380245ae37df50a9752cdbc05ef948156" -dependencies = [ - "macro_rules_attribute", - "prettyplease", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "same-file" version = "1.0.6" @@ -8919,12 +8749,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "sha2-const-stable" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f179d4e11094a893b82fff208f74d448a7512f99f5a0acbd5c679b705f83ed9" - [[package]] name = "sharded-slab" version = "0.1.7" @@ -9325,40 +9149,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "stabby" -version = "36.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "311d6bcf0070c462ff626122ec2246f42bd2acd44b28908eedbfd07d500c7d99" -dependencies = [ - "rustversion", - "stabby-abi", -] - -[[package]] -name = "stabby-abi" -version = "36.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6daae1a0707399f56d27fce7f212e50e31d215112a447e1bbcd837ae1bf5f49" -dependencies = [ - "rustversion", - "sha2-const-stable", - "stabby-macros", -] - -[[package]] -name = "stabby-macros" -version = "36.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43cf89a0cc9131279235baf8599b0e073fbcb096419204de0cc5d1a48ae73f74" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "rand", - "syn 1.0.109", -] - [[package]] name = "stable-pattern" version = "0.1.0" @@ -10677,15 +10467,6 @@ dependencies = [ "weedle2", ] -[[package]] -name = "uninit" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e130f2ed46ca5d8ec13c7ff95836827f92f5f5f37fd2b2bf16f33c408d98bb6" -dependencies = [ - "extension-traits", -] - [[package]] name = "universal-hash" version = "0.5.1" @@ -10708,12 +10489,6 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" -[[package]] -name = "unwind_safe" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0976c77def3f1f75c4ef892a292c31c0bbe9e3d0702c63044d7c76db298171a3" - [[package]] name = "url" version = "2.5.2" @@ -11441,26 +11216,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "with_builtin_macros" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a59d55032495429b87f9d69954c6c8602e4d3f3e0a747a12dea6b0b23de685da" -dependencies = [ - "with_builtin_macros-proc_macros", -] - -[[package]] -name = "with_builtin_macros-proc_macros" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15bd7679c15e22924f53aee34d4e448c45b674feb6129689af88593e129f8f42" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "wyz" version = "0.5.1" diff --git a/Cargo.toml b/Cargo.toml index d833eb8fe3..52ef268311 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -107,7 +107,6 @@ members = [ "sdk/ffi/cpp", "sdk/ffi/go", "sdk/ffi/shared", - "sdk/lib/socks5-listener", "sdk/rust/nym-sdk", "service-providers/authenticator", "service-providers/common", @@ -417,10 +416,6 @@ web-sys = "0.3.72" [profile.dev.package.sqlx-macros] opt-level = 3 -[profile.release.package.nym-socks5-listener] -strip = true -codegen-units = 1 - [profile.release.package.nym-client-wasm] # lto = true opt-level = 'z' diff --git a/sdk/lib/socks5-listener/Cargo.toml b/sdk/lib/socks5-listener/Cargo.toml deleted file mode 100644 index ac8c4d3e81..0000000000 --- a/sdk/lib/socks5-listener/Cargo.toml +++ /dev/null @@ -1,39 +0,0 @@ -[package] -name = "nym-socks5-listener" -version = "0.1.0" -edition = "2021" -license.workspace = true - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[[bin]] -name = "generate-headers" -required-features = ["headers"] - -[lib] -crate-type = ["cdylib", "staticlib", "rlib"] - -[dependencies] -anyhow = { workspace = true } -futures = { workspace = true } -lazy_static = { workspace = true } -nym-bin-common = { path = "../../../common/bin-common"} -nym-client-core = { path = "../../../common/client-core", default-features = false } -nym-config-common = { path = "../../../common/config", package = "nym-config" } -nym-credential-storage = { path = "../../../common/credential-storage" } -nym-crypto = { path = "../../../common/crypto" } -nym-socks5-client-core = { path = "../../../common/socks5-client-core", default-features = false } -serde = { workspace = true } -tokio = { workspace = true, features = ["sync", "time"] } -log = { workspace = true } -rand = { workspace = true } - -safer-ffi = { workspace = true } - -[target.'cfg(target_os="android")'.dependencies] -jni = { version = "0.21", default-features = false } -android_logger = "0.14.1" - - -[features] -headers = ["safer-ffi/headers"] diff --git a/sdk/lib/socks5-listener/Makefile b/sdk/lib/socks5-listener/Makefile deleted file mode 100644 index 85edf5b352..0000000000 --- a/sdk/lib/socks5-listener/Makefile +++ /dev/null @@ -1,47 +0,0 @@ -UNAME_S := $(shell uname -s) - -# Default arch -ANDROID_TOOLCHAIN_ARCH=linux-x86_64 -ifeq ($(UNAME_S),Linux) - ANDROID_TOOLCHAIN_ARCH=linux-x86_64 -endif -ifeq ($(UNAME_S),Darwin) - ANDROID_TOOLCHAIN_ARCH=darwin-x86_64 -endif - -ANDROID_TOOLCHAIN=$(NDK_HOME)/toolchains/llvm/prebuilt/$(ANDROID_TOOLCHAIN_ARCH) -ANDROID_API=33 -ANDROID_TARGET_CC=$(ANDROID_TOOLCHAIN)/bin/x86_64-linux-android$(ANDROID_API)-clang -ANDROID_TARGET_AR=$(ANDROID_TOOLCHAIN)/bin/llvm-ar -ANDROID_TARGET_RANLIB=$(ANDROID_TOOLCHAIN)/bin/llvm-ranlib -ANDROID_CARGO_TARGET_X86_64_LINUX_ANDROID_LINKER=$(ANDROID_TOOLCHAIN)/bin/x86_64-linux-android$(ANDROID_API)-clang - -# Common environment -ANDROID_ENV = \ - TARGET_CC=$(ANDROID_TARGET_CC) \ - TARGET_AR=$(ANDROID_TARGET_AR) \ - TARGET_RANLIB=$(ANDROID_TARGET_RANLIB) \ - CARGO_TARGET_X86_64_LINUX_ANDROID_LINKER=$(ANDROID_CARGO_TARGET_X86_64_LINUX_ANDROID_LINKER) - -ANDROID_TARGET=x86_64-linux-android - -none: - @echo "No default target. Please specify a target." - -gen-headers: - cargo run --features headers --bin generate-headers - -#gen-header: -# cbindgen --lang c --crate socks5-c --output socks5_c.h - -android: - $(ANDROID_ENV) cargo build --lib --target $(ANDROID_TARGET) - -android-release: - $(ANDROID_ENV) cargo build --lib --target $(ANDROID_TARGET) --release - -ios: - cargo lipo - -ios-release: - cargo lipo --release diff --git a/sdk/lib/socks5-listener/build-android-darwin.sh b/sdk/lib/socks5-listener/build-android-darwin.sh deleted file mode 100755 index 73a152076f..0000000000 --- a/sdk/lib/socks5-listener/build-android-darwin.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -export API=33 -export TOOLCHAIN="$NDK_HOME/toolchains/llvm/prebuilt/darwin-x86_64" -export TARGET_CC="$TOOLCHAIN/bin/x86_64-linux-android$API-clang" -export TARGET_AR="$TOOLCHAIN/bin/llvm-ar" -export TARGET_RANLIB="$TOOLCHAIN/bin/llvm-ranlib" -export CARGO_TARGET_X86_64_LINUX_ANDROID_LINKER="$TOOLCHAIN/bin/x86_64-linux-android$API-clang" - -cargo build --lib --target x86_64-linux-android --release diff --git a/sdk/lib/socks5-listener/build-android.sh b/sdk/lib/socks5-listener/build-android.sh deleted file mode 100755 index 8f199d347a..0000000000 --- a/sdk/lib/socks5-listener/build-android.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash -# -# Usage -# -# build-android.sh [ARCH ...] -# -# This script builds the lib for android and moves the shared -# objects (*.so) into the right app's directories -# -# ARCH: -# - aarch64 (arm 64) -# - x86_64 (classic PC 64) -# - i686 (x86) -# - armv7 -# -# ⚠ to build for release set the env var `RELEASE=true` - -set -E -set -o pipefail -trap 'catch $? ${FUNCNAME[0]:-main} $LINENO' ERR - -# ANSI style codes -RED="\e[38;5;1m" # red -GRN="\e[38;5;2m" # green -YLW="\e[38;5;3m" # yellow -BLD="\e[1m" # bold -RS="\e[0m" # style reset -# bold variants -B_RED="$BLD$RED" -B_GRN="$BLD$GRN" -B_YLW="$BLD$YLW" - -catch() { - echo -e " $B_RED✗$RS unexpected error, $BLD$2$RS [$BLD$1$RS] L#$BLD$3$RS" - exit 1 -} - -export API=33 -export TOOLCHAIN="$NDK_HOME/toolchains/llvm/prebuilt/linux-x86_64" -export TARGET_AR="$TOOLCHAIN/bin/llvm-ar" -export TARGET_RANLIB="$TOOLCHAIN/bin/llvm-ranlib" -export CARGO_TARGET_X86_64_LINUX_ANDROID_LINKER="$TOOLCHAIN/bin/x86_64-linux-android$API-clang" -export CARGO_TARGET_AARCH64_LINUX_ANDROID_LINKER="$TOOLCHAIN/bin/aarch64-linux-android$API-clang" - -# arch mapping between Rust targets (keys) and Android ABIs (values) -# https://developer.android.com/ndk/guides/abis.html -declare -A arch_map=([x86_64]="x86_64" [aarch64]="arm64-v8a" [armv7]="armeabi-v7a" [i686]="x86") - -output_dir=../../../target -jni_dir=../../../nym-connect/native/android/app/src/main/jniLibs -lib=libnym_socks5_listener.so - -build () { - abi="${arch_map[$1]}" - echo -e " $B_YLW⚡$RS building for arch $BLD$1$RS" - export TARGET_CC="$TOOLCHAIN/bin/$1-linux-android$API-clang" - if [ -a "$jni_dir/$abi/$lib" ]; then - # remove any previously built library - rm "$jni_dir/$abi/$lib" - fi - if [ "$RELEASE" = true ]; then - cargo build --lib --target "$1-linux-android" --release - mv "$output_dir/$1-linux-android/release/$lib" "$jni_dir/$abi/" - else - cargo build --lib --target "$1-linux-android" - mv "$output_dir/$1-linux-android/debug/$lib" "$jni_dir/$abi/" - fi - echo -e " $B_GRN✓$RS lib built successfully for $BLD$1$RS, moved under app's dir$BLD jniLibs/$abi/$RS" -} - -for arch in "$@"; do - if [ "${arch_map[$arch]}" ]; then - build "$arch" - else - echo -e " $B_RED✗$RS unknown arch $BLD$arch$RS" - exit 1 - fi -done diff --git a/sdk/lib/socks5-listener/socks5_c.h b/sdk/lib/socks5-listener/socks5_c.h deleted file mode 100644 index e5a570cba4..0000000000 --- a/sdk/lib/socks5-listener/socks5_c.h +++ /dev/null @@ -1,108 +0,0 @@ -/*! \file */ -/******************************************* - * * - * File auto-generated by `::safer_ffi`. * - * * - * Do not manually edit this file. * - * * - *******************************************/ - -#ifndef __RUST_NYM_SOCKS5_LISTENER__ -#define __RUST_NYM_SOCKS5_LISTENER__ -#ifdef __cplusplus -extern "C" { -#endif - - -#include -#include - -/** */ -/** \remark Has the same ABI as `uint8_t` **/ -#ifdef DOXYGEN -typedef -#endif -enum ClientState { - /** */ - CLIENT_STATE_UNINITIALISED, - /** */ - CLIENT_STATE_CONNECTED, - /** */ - CLIENT_STATE_DISCONNECTED, -} -#ifndef DOXYGEN -; typedef uint8_t -#endif -ClientState_t; - -/** \brief - * `&'lt mut (dyn 'lt + Send + FnMut(A1) -> Ret)` - */ -typedef struct RefDynFnMut1_void_char_ptr { - /** */ - void * env_ptr; - - /** */ - void (*call)(void *, char *); -} RefDynFnMut1_void_char_ptr_t; - -/** \brief - * `&'lt mut (dyn 'lt + Send + FnMut() -> Ret)` - */ -typedef struct RefDynFnMut0_void { - /** */ - void * env_ptr; - - /** */ - void (*call)(void *); -} RefDynFnMut0_void_t; - -/** */ -void -blocking_run_client ( - char const * storage_directory, - char const * service_provider, - RefDynFnMut1_void_char_ptr_t on_start_callback, - RefDynFnMut0_void_t on_shutdown_callback); - -/** */ -char * -existing_service_provider ( - char const * storage_directory); - -/** */ -ClientState_t -get_client_state (void); - -/** */ -void -initialise_logger (void); - -/** */ -void -reset_client_data ( - char const * root_directory); - -/** */ -void -rust_free_string ( - char * string); - -/** */ -void -start_client ( - char const * storage_directory, - char const * service_provider, - RefDynFnMut1_void_char_ptr_t on_start_callback, - RefDynFnMut0_void_t on_shutdown_callback); - -/** */ -void -stop_client (void); - - -#ifdef __cplusplus -} /* extern \"C\" */ -#endif - -#endif /* __RUST_NYM_SOCKS5_LISTENER__ */ diff --git a/sdk/lib/socks5-listener/src/android.rs b/sdk/lib/socks5-listener/src/android.rs deleted file mode 100644 index b8c6032aef..0000000000 --- a/sdk/lib/socks5-listener/src/android.rs +++ /dev/null @@ -1,97 +0,0 @@ -use crate::ClientState; -use ::safer_ffi::prelude::*; -use jni::{ - objects::{JClass, JObject, JString}, - sys::jint, - JNIEnv, -}; -use std::sync::{Arc, Mutex}; - -fn init_jni_logger() { - use android_logger::{Config, FilterBuilder}; - use log::LevelFilter; - - android_logger::init_once( - Config::default() - .with_max_level(LevelFilter::Trace) - .with_tag("libnyms5") - .with_filter( - FilterBuilder::new() - .parse("debug,tungstenite=warn,mio=warn,tokio_tungstenite=warn") - .build(), - ), - ); - log::debug!("Logger initialized"); -} - -/// Blocking call that starts the socks5 listener -#[no_mangle] -pub unsafe extern "C" fn Java_net_nymtech_nyms5_NymProxy_startClient( - mut env: JNIEnv, - _class: JClass, - service_provider: JString, - start_cb: JObject, - stop_cb: JObject, -) { - init_jni_logger(); - - let sp_input: String = env - .get_string(&service_provider) - .expect("Couldn't get java string!") - .into(); - - log::debug!("using sp {}", sp_input); - - let service_provider = char_p::new(sp_input.as_str()); - - let arced = Arc::new(Mutex::new(env)); - let env_start = arced.clone(); - let env_stop = arced.clone(); - - crate::blocking_run_client( - None, - Some(service_provider.as_ref()), - move |_| { - log::debug!("client connected"); - env_start - .lock() - .unwrap() - .call_method(&start_cb, "onStart", "()V", &[]) - .expect("failed to call Java callbacks"); - }, - move || { - log::debug!("client disconnected"); - env_stop - .lock() - .unwrap() - .call_method(&stop_cb, "onStop", "()V", &[]) - .expect("failed to call Java callbacks"); - }, - ); -} - -#[no_mangle] -pub unsafe extern "C" fn Java_net_nymtech_nyms5_NymProxy_stopClient(_env: JNIEnv, _class: JClass) { - crate::stop_client(); -} - -#[no_mangle] -pub unsafe extern "C" fn Java_net_nymtech_nyms5_NymProxy_getClientState( - _env: JNIEnv, - _class: JClass, -) -> jint { - let state = crate::get_client_state(); - log::debug!("client state {:?}", state); - - match state { - ClientState::Uninitialised => 0, - ClientState::Connected => 1, - ClientState::Disconnected => 2, - } -} - -#[no_mangle] -pub unsafe extern "C" fn Java_net_nymtech_nyms5_NymProxy_pingClient(_env: JNIEnv, _class: JClass) { - log::debug!("pong"); - crate::ping_client(); -} diff --git a/sdk/lib/socks5-listener/src/bin/generate-headers.rs b/sdk/lib/socks5-listener/src/bin/generate-headers.rs deleted file mode 100644 index 70a7527b01..0000000000 --- a/sdk/lib/socks5-listener/src/bin/generate-headers.rs +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2023 - Nym Technologies SA -// SPDX-License-Identifier: Apache-2.0 - -fn main() -> ::std::io::Result<()> { - ::nym_socks5_listener::generate_headers() -} diff --git a/sdk/lib/socks5-listener/src/config/mod.rs b/sdk/lib/socks5-listener/src/config/mod.rs deleted file mode 100644 index 5ace48c88f..0000000000 --- a/sdk/lib/socks5-listener/src/config/mod.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2023 - Nym Technologies SA -// SPDX-License-Identifier: Apache-2.0 - -use crate::config::persistence::MobileSocksClientPaths; -use crate::config::template::CONFIG_TEMPLATE; -use nym_bin_common::logging::LoggingSettings; -use nym_config_common::{ - read_config_from_toml_file, save_formatted_config_to_file, NymConfigTemplate, - DEFAULT_CONFIG_DIR, DEFAULT_CONFIG_FILENAME, DEFAULT_DATA_DIR, NYM_DIR, -}; -use nym_socks5_client_core::config::Config as CoreConfig; -use serde::{Deserialize, Serialize}; -use std::io; -use std::path::{Path, PathBuf}; - -mod persistence; -mod template; - -const DEFAULT_SOCKS5_CLIENTS_DIR: &str = "socks5-clients"; - -/// Derive default path to clients's config directory. -/// It should get resolved to `$HOME/.nym/socks5-clients//config` -pub fn config_directory_from_root, R: AsRef>(root: P, id: R) -> PathBuf { - root.as_ref() - .join(NYM_DIR) - .join(DEFAULT_SOCKS5_CLIENTS_DIR) - .join(id) - .join(DEFAULT_CONFIG_DIR) -} - -/// Derive default path to client's config file. -/// It should get resolved to `$HOME/.nym/socks5-clients//config/config.toml` -pub fn config_filepath_from_root, R: AsRef>(root: P, id: R) -> PathBuf { - config_directory_from_root(root, id).join(DEFAULT_CONFIG_FILENAME) -} - -/// Derive default path to client's data directory where files, such as keys, are stored. -/// It should get resolved to `$HOME/.nym/socks5-clients//data` -pub fn data_directory_from_root, R: AsRef>(root: P, id: R) -> PathBuf { - root.as_ref() - .join(NYM_DIR) - .join(DEFAULT_SOCKS5_CLIENTS_DIR) - .join(id) - .join(DEFAULT_DATA_DIR) -} - -#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] -#[serde(deny_unknown_fields)] -pub struct Config { - pub core: CoreConfig, - - pub storage_paths: Option, - - pub logging: LoggingSettings, -} - -impl NymConfigTemplate for Config { - fn template(&self) -> &'static str { - CONFIG_TEMPLATE - } -} - -impl Config { - pub fn new(storage_root: Option

, id: S, provider_mix_address: S) -> Self - where - P: AsRef, - S: AsRef, - { - Config { - core: CoreConfig::new( - id.as_ref(), - env!("CARGO_PKG_VERSION"), - provider_mix_address.as_ref(), - ), - storage_paths: storage_root.map(|storage_root| { - MobileSocksClientPaths::new_default(data_directory_from_root( - storage_root, - id.as_ref(), - )) - }), - logging: Default::default(), - } - } - - pub fn read_from_toml_file>(path: P) -> io::Result { - read_config_from_toml_file(path) - } - - pub fn read_from_default_path, R: AsRef>( - storage_root: P, - id: R, - ) -> io::Result { - Self::read_from_toml_file(config_filepath_from_root(storage_root, id)) - } - - pub fn save_to_default_location>(&self, storage_root: P) -> io::Result<()> { - let config_save_location: PathBuf = - config_filepath_from_root(storage_root, &self.core.base.client.id); - save_formatted_config_to_file(self, config_save_location) - } -} diff --git a/sdk/lib/socks5-listener/src/config/persistence.rs b/sdk/lib/socks5-listener/src/config/persistence.rs deleted file mode 100644 index 525ea7066f..0000000000 --- a/sdk/lib/socks5-listener/src/config/persistence.rs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2023 - Nym Technologies SA -// SPDX-License-Identifier: Apache-2.0 - -use crate::config::data_directory_from_root; -use nym_client_core::config::disk_persistence::CommonClientPaths; -use serde::{Deserialize, Serialize}; -use std::path::Path; - -#[derive(Debug, Deserialize, PartialEq, Eq, Serialize, Clone)] -pub struct MobileSocksClientPaths { - #[serde(flatten)] - pub common_paths: CommonClientPaths, -} - -impl MobileSocksClientPaths { - pub fn new_default>(base_data_directory: P) -> Self { - MobileSocksClientPaths { - common_paths: CommonClientPaths::new_base(base_data_directory), - } - } - - pub fn change_root, R: AsRef>(&mut self, new_root: P, id: R) { - let new_data_dir = data_directory_from_root(new_root, id); - self.common_paths = CommonClientPaths::new_base(new_data_dir) - } -} diff --git a/sdk/lib/socks5-listener/src/config/template.rs b/sdk/lib/socks5-listener/src/config/template.rs deleted file mode 100644 index e2ad4c414d..0000000000 --- a/sdk/lib/socks5-listener/src/config/template.rs +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2023 - Nym Technologies SA -// SPDX-License-Identifier: Apache-2.0 - -// While using normal toml marshalling would have been way simpler with less overhead, -// I think it's useful to have comments attached to the saved config file to explain behaviour of -// particular fields. -// Note: any changes to the template must be reflected in the appropriate structs. -pub(crate) const CONFIG_TEMPLATE: &str = r#" -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -##### main base client config options ##### - -[core.client] -# Version of the client for which this configuration was created. -version = '{{ core.client.version }}' - -# Human readable ID of this particular client. -id = '{{ core.client.id }}' - -# Indicates whether this client is running in a disabled credentials mode, thus attempting -# to claim bandwidth without presenting bandwidth credentials. -disabled_credentials_mode = {{ core.client.disabled_credentials_mode }} - -# Addresses to nyxd validators via which the client can communicate with the chain. -nyxd_urls = [ - {{#each core.client.nyxd_urls }} - '{{this}}', - {{/each}} -] - -# Addresses to APIs running on validator from which the client gets the view of the network. -nym_api_urls = [ - {{#each core.client.nym_api_urls }} - '{{this}}', - {{/each}} -] - -[storage_paths] - -# Path to file containing private identity key. -keys.private_identity_key_file = '{{ storage_paths.keys.private_identity_key_file }}' - -# Path to file containing public identity key. -keys.public_identity_key_file = '{{ storage_paths.keys.public_identity_key_file }}' - -# Path to file containing private encryption key. -keys.private_encryption_key_file = '{{ storage_paths.keys.private_encryption_key_file }}' - -# Path to file containing public encryption key. -keys.public_encryption_key_file = '{{ storage_paths.keys.public_encryption_key_file }}' - -# Path to file containing key used for encrypting and decrypting the content of an -# acknowledgement so that nobody besides the client knows which packet it refers to. -keys.ack_key_file = '{{ storage_paths.keys.ack_key_file }}' - -# Path to the database containing bandwidth credentials -credentials_database = '{{ storage_paths.credentials_database }}' - -# Path to the persistent store for received reply surbs, unused encryption keys and used sender tags. -reply_surb_database = '{{ storage_paths.reply_surb_database }}' - -# Path to the file containing information about gateways used by this client, -# i.e. details such as their public keys, owner addresses or the network information. -gateway_registrations = '{{ storage_paths.gateway_registrations }}' - -##### socket config options ##### - -[core.socks5] - -# The mix address of the provider to which all requests are going to be sent. -provider_mix_address = '{{ core.socks5.provider_mix_address }}' - -# The address on which the client will be listening for incoming requests -# (default: 127.0.0.1:1080) -bind_address = '{{ core.socks5.bind_address }}' - -# Specifies whether this client is going to use an anonymous sender tag for communication with the service provider. -# While this is going to hide its actual address information, it will make the actual communication -# slower and consume nearly double the bandwidth as it will require sending reply SURBs. -# -# Note that some service providers might not support this. -send_anonymously = {{ core.socks5.send_anonymously }} - -##### logging configuration options ##### - -[logging] - -# TODO - - -##### debug configuration options ##### -# The following options should not be modified unless you know EXACTLY what you are doing -# as if set incorrectly, they may impact your anonymity. - -# [core.socks5.socks5_debug] - - -[core.debug] - -[core.debug.traffic] -average_packet_delay = '{{ core.debug.traffic.average_packet_delay }}' -message_sending_average_delay = '{{ core.debug.traffic.message_sending_average_delay }}' - -[core.debug.acknowledgements] -average_ack_delay = '{{ core.debug.acknowledgements.average_ack_delay }}' - -[core.debug.cover_traffic] -loop_cover_traffic_average_delay = '{{ core.debug.cover_traffic.loop_cover_traffic_average_delay }}' - -"#; diff --git a/sdk/lib/socks5-listener/src/lib.rs b/sdk/lib/socks5-listener/src/lib.rs deleted file mode 100644 index 3019402182..0000000000 --- a/sdk/lib/socks5-listener/src/lib.rs +++ /dev/null @@ -1,378 +0,0 @@ -// Copyright 2023 - Nym Technologies SA -// SPDX-License-Identifier: Apache-2.0 - -use crate::config::{config_filepath_from_root, Config}; -use crate::persistence::MobileClientStorage; -use ::safer_ffi::prelude::*; -use anyhow::{anyhow, Result}; -use lazy_static::lazy_static; -use log::{debug, info, warn}; -use nym_bin_common::logging::setup_logging; -use nym_client_core::init::helpers::current_gateways; -use nym_client_core::init::types::{GatewaySelectionSpecification, GatewaySetup}; -use nym_config_common::defaults::setup_env; -use nym_socks5_client_core::NymClient as Socks5NymClient; -use rand::rngs::OsRng; -use safer_ffi::char_p::char_p_boxed; -use std::path::PathBuf; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Arc; -use std::time::Duration; -use tokio::runtime::Runtime; -use tokio::sync::{Mutex, Notify}; -use tokio::time::{sleep, Instant}; - -#[cfg(target_os = "android")] -pub mod android; -mod config; -mod persistence; - -static SOCKS5_CONFIG_ID: &str = "mobile-socks5-test"; -const ANDROID_HEALTHCHECK_INTERVAL: Duration = Duration::from_secs(5); -const HEALTHCHECK_TIMEOUT: Duration = Duration::from_secs(10); - -// hehe, this is so disgusting : ) -lazy_static! { - static ref CLIENT_SHUTDOWN_HANDLE: Mutex>> = Mutex::new(None); - static ref RUNTIME: Runtime = Runtime::new().unwrap(); - static ref LAST_HEALTHCHECK_PING: Mutex> = Mutex::new(None); -} -static ENV_SET: AtomicBool = AtomicBool::new(false); - -async fn set_shutdown_handle(handle: Arc) { - let mut guard = CLIENT_SHUTDOWN_HANDLE.lock().await; - if guard.is_some() { - panic!("client wasn't properly stopped") - } - *guard = Some(handle) -} - -async fn stop_and_reset_shutdown_handle() { - let mut guard = CLIENT_SHUTDOWN_HANDLE.lock().await; - if let Some(sh) = &*guard { - sh.notify_waiters() - } else { - panic!("client wasn't properly started") - } - - *guard = None -} - -async fn is_shutdown_handle_set() -> bool { - CLIENT_SHUTDOWN_HANDLE.lock().await.is_some() -} - -fn set_default_env() { - if !ENV_SET.swap(true, Ordering::SeqCst) { - setup_env::<&str>(None); - } -} - -// to be used with the on startup callback which returns the address -#[ffi_export] -pub fn rust_free_string(string: char_p::Box) { - drop(string) -} - -#[ffi_export] -pub fn initialise_logger() { - setup_logging(); - info!("logger initialised"); -} - -#[derive_ReprC] -#[ffi_export] -#[repr(u8)] -#[derive(Eq, PartialEq, Debug)] -pub enum ClientState { - Uninitialised, - Connected, - Disconnected, -} - -#[ffi_export] -pub fn get_client_state() -> ClientState { - // if the environment is not set, we never called start before - // if the shutdown was never set, the client can't possibly be running - // and similarly if it's set, it's most likely running - if !ENV_SET.load(Ordering::Relaxed) { - ClientState::Uninitialised - } else if RUNTIME.block_on(is_shutdown_handle_set()) { - ClientState::Connected - } else { - ClientState::Disconnected - } -} - -pub fn start_client( - storage_directory: Option>, - service_provider: Option>, - on_start_callback: F, - on_shutdown_callback: S, -) where - F: FnMut(String) + Send + 'static, - S: FnMut() + Send + 'static, -{ - if get_client_state() == ClientState::Connected { - warn!("could not start the client as it's already running"); - return; - } - - let storage_dir = storage_directory.map(|s| s.to_string()); - let service_provider = service_provider.map(|s| s.to_string()); - RUNTIME.spawn(async move { - _async_run_client( - storage_dir, - SOCKS5_CONFIG_ID.to_string(), - service_provider, - on_start_callback, - on_shutdown_callback, - ) - .await - }); -} - -#[ffi_export] -pub fn stop_client() { - if get_client_state() == ClientState::Disconnected { - warn!("could not stop the client as it's not running "); - return; - } - - RUNTIME.block_on(async move { stop_and_reset_shutdown_handle().await }); -} - -#[ffi_export] -pub fn ping_client() { - RUNTIME.spawn(async { - let mut guard = LAST_HEALTHCHECK_PING.lock().await; - *guard = Some(Instant::now()); - }); -} - -// Continusouly poll that we are being pinged from the outside. If the pings stop that means -// that the higher layer somehow terminated without telling us. -pub async fn health_check() { - // init the ping to now - let mut guard = LAST_HEALTHCHECK_PING.lock().await; - *guard = Some(Instant::now()); - // release the mutex - drop(guard); - - loop { - sleep(ANDROID_HEALTHCHECK_INTERVAL).await; - - if !is_shutdown_handle_set().await { - debug!("client has been shutdown, cancelling healthcheck"); - break; - } - let mut guard = LAST_HEALTHCHECK_PING.lock().await; - let Some(last_ping) = *guard else { - warn!("client has not been pinged yet - shutting down"); - *guard = None; - stop_and_reset_shutdown_handle().await; - break; - }; - if last_ping.elapsed() > HEALTHCHECK_TIMEOUT { - warn!( - "client has not been pinged for more than {} seconds - shutting down", - HEALTHCHECK_TIMEOUT.as_secs() - ); - *guard = None; - stop_and_reset_shutdown_handle().await; - break; - } - debug!("✓ android app healthy"); - } -} - -pub fn blocking_run_client<'cb, F, S>( - storage_directory: Option>, - service_provider: Option>, - on_start_callback: F, - on_shutdown_callback: S, -) where - F: FnMut(String) + 'cb, - S: FnMut() + 'cb, -{ - if get_client_state() == ClientState::Connected { - warn!("could not start the client as it's already running"); - return; - } - - // Spawn a task that monitors that we are continuously receiving pings from the outside, - // to make sure we don't end up with a runaway process - RUNTIME.spawn(async { health_check().await }); - - let storage_dir = storage_directory.map(|s| s.to_string()); - let service_provider = service_provider.map(|s| s.to_string()); - RUNTIME - .block_on(async move { - _async_run_client( - storage_dir, - SOCKS5_CONFIG_ID.to_string(), - service_provider, - on_start_callback, - on_shutdown_callback, - ) - .await - }) - .map_err(|err| { - warn!("failed to run client: {}", err); - }) - .ok(); -} - -#[ffi_export] -pub fn reset_client_data(root_directory: char_p::Ref<'_>) { - if get_client_state() == ClientState::Connected { - return; - } - - let root_dir = root_directory.to_string(); - _reset_client_data(root_dir) -} - -#[ffi_export] -pub fn existing_service_provider(storage_directory: char_p::Ref<'_>) -> Option { - if let Ok(config) = Config::read_from_default_path(storage_directory.to_str(), SOCKS5_CONFIG_ID) - { - Some(config.core.socks5.provider_mix_address.try_into().unwrap()) - } else { - None - } -} - -fn _reset_client_data(root_directory: String) { - let client_storage_dir = PathBuf::new().join(root_directory).join(SOCKS5_CONFIG_ID); - std::fs::remove_dir_all(client_storage_dir).expect("failed to clear client data") -} - -async fn _async_run_client( - storage_dir: Option, - client_id: String, - service_provider: Option, - mut on_start_callback: F, - mut on_shutdown_callback: S, -) -> anyhow::Result<()> -where - F: FnMut(String), - S: FnMut(), -{ - let mut rng = OsRng; - - set_default_env(); - let stop_handle = Arc::new(Notify::new()); - set_shutdown_handle(stop_handle.clone()).await; - - let config = load_or_generate_base_config(storage_dir, client_id, service_provider).await?; - let nym_apis = config.core.base.client.nym_api_urls.clone(); - - let storage = MobileClientStorage::new(&config); - let user_agent = nym_bin_common::bin_info!().into(); - let socks5_client = Socks5NymClient::new(config.core, storage, user_agent, None) - .with_gateway_setup(GatewaySetup::New { - specification: GatewaySelectionSpecification::UniformRemote { - must_use_tls: false, - }, - available_gateways: current_gateways(&mut rng, &nym_apis, None, 50).await?, - }); - - eprintln!("starting the socks5 client"); - let started_client = socks5_client.start().await?; - eprintln!("the client has started!"); - - // invoke the callback since we've started! - on_start_callback(started_client.address.to_string()); - - // wait for notify to be set... - stop_handle.notified().await; - - // and then do graceful shutdown of all tasks - let mut task_manager = started_client - .shutdown_handle - .try_into_task_manager() - .unwrap(); - task_manager.signal_shutdown().ok(); - task_manager.wait_for_shutdown().await; - - // and the corresponding one for shutdown! - on_shutdown_callback(); - - Ok(()) -} - -// note: it does might not contain any gateway configuration and should not be persisted in that state! -async fn load_or_generate_base_config( - storage_dir: Option, - client_id: String, - service_provider: Option, -) -> Result { - let Some(storage_dir) = storage_dir else { - eprintln!("no storage path specified"); - return setup_new_client_config(None, client_id, service_provider).await; - }; - - let expected_store_path = config_filepath_from_root(&storage_dir, &client_id); - eprintln!( - "attempting to load socks5 config from {}", - expected_store_path.display() - ); - - // simulator workaround - if let Ok(mut config) = Config::read_from_toml_file(expected_store_path) { - eprintln!("loaded config"); - if let Some(storage_paths) = &mut config.storage_paths { - if !storage_paths - .common_paths - .keys - .public_identity_key_file - .starts_with(&storage_dir) - { - eprintln!("... but it seems to have been made for different container - fixing it up... (ASSUMING DEFAULT PATHS)"); - storage_paths.change_root(storage_dir, &config.core.base.client.id); - } - } - - return Ok(config); - }; - - eprintln!("creating new config"); - setup_new_client_config(Some(storage_dir), client_id, service_provider).await -} - -async fn setup_new_client_config( - storage_dir: Option, - client_id: String, - service_provider: Option, -) -> Result { - let service_provider = service_provider.ok_or(anyhow!( - "service provider was not specified for fresh config" - ))?; - - let mut new_config = Config::new(storage_dir.as_ref(), client_id, service_provider); - - if let Ok(raw_validators) = std::env::var(nym_config_common::defaults::var_names::NYM_API) { - new_config - .core - .base - .set_custom_nym_apis(nym_config_common::parse_urls(&raw_validators)); - } - - if let Some(_storage_paths) = &new_config.storage_paths { - println!("persistent storage is not implemented"); - }; - - if let Some(storage_dir) = storage_dir { - new_config.save_to_default_location(storage_dir)?; - } - - Ok(new_config) -} - -#[cfg(feature = "headers")] // c.f. the `Cargo.toml` section -pub fn generate_headers() -> ::std::io::Result<()> { - ::safer_ffi::headers::builder() - .to_file("socks5_c.h")? - .generate() -} diff --git a/sdk/lib/socks5-listener/src/persistence.rs b/sdk/lib/socks5-listener/src/persistence.rs deleted file mode 100644 index 77a903144a..0000000000 --- a/sdk/lib/socks5-listener/src/persistence.rs +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2023 - Nym Technologies SA -// SPDX-License-Identifier: Apache-2.0 - -use crate::config::Config; -use nym_client_core::client::base_client::storage::{InMemGatewaysDetails, MixnetClientStorage}; -use nym_client_core::client::key_manager::persistence::InMemEphemeralKeys; -use nym_client_core::client::replies::reply_storage; -use nym_credential_storage::ephemeral_storage::EphemeralStorage as EphemeralCredentialStorage; - -pub struct MobileClientStorage { - // the key storage is now useless without gateway details store. so use ephemeral for everything. - key_store: InMemEphemeralKeys, - gateway_details_store: InMemGatewaysDetails, - - reply_store: reply_storage::Empty, - credential_store: EphemeralCredentialStorage, -} - -impl MixnetClientStorage for MobileClientStorage { - type KeyStore = InMemEphemeralKeys; - type ReplyStore = reply_storage::Empty; - type CredentialStore = EphemeralCredentialStorage; - type GatewaysDetailsStore = InMemGatewaysDetails; - - fn into_runtime_stores( - self, - ) -> ( - Self::ReplyStore, - Self::CredentialStore, - Self::GatewaysDetailsStore, - ) { - ( - self.reply_store, - self.credential_store, - self.gateway_details_store, - ) - } - - fn key_store(&self) -> &Self::KeyStore { - &self.key_store - } - - fn reply_store(&self) -> &Self::ReplyStore { - &self.reply_store - } - - fn credential_store(&self) -> &Self::CredentialStore { - &self.credential_store - } - - fn gateway_details_store(&self) -> &Self::GatewaysDetailsStore { - &self.gateway_details_store - } -} - -impl MobileClientStorage { - pub fn new(_config: &Config) -> Self { - MobileClientStorage { - key_store: Default::default(), - gateway_details_store: Default::default(), - reply_store: Default::default(), - credential_store: Default::default(), - } - } -} From 07435ce3b25f1d3e0dc7d5f5212b2fd24e8dd561 Mon Sep 17 00:00:00 2001 From: windy-ux <75579979+windy-ux@users.noreply.github.com> Date: Fri, 13 Dec 2024 14:09:49 +0100 Subject: [PATCH 05/64] Fix/web 615 seo setup (#5257) * + add header into Packet Mixing docs * + add head changes for testing * / updated version of metatags in theme.config * + add env file * / theme.config to use NEXT_PUBLIC_SITE_URL from env file * @ Fix broken link in theme.config * - remove favicon code * + add desription for intro pages --- documentation/docs/.env.example | 1 + documentation/docs/.gitignore | 4 ++ .../docs/pages/apis/introduction.mdx | 4 ++ documentation/docs/pages/developers/index.mdx | 4 ++ .../docs/pages/network/concepts/mixing.mdx | 5 +++ documentation/docs/pages/network/index.md | 4 ++ .../docs/pages/operators/introduction.mdx | 4 ++ documentation/docs/theme.config.tsx | 42 ++++++++++++++++--- 8 files changed, 63 insertions(+), 5 deletions(-) create mode 100644 documentation/docs/.env.example diff --git a/documentation/docs/.env.example b/documentation/docs/.env.example new file mode 100644 index 0000000000..2ed3461d68 --- /dev/null +++ b/documentation/docs/.env.example @@ -0,0 +1 @@ +NEXT_PUBLIC_SITE_URL=http://localhost:3000 \ No newline at end of file diff --git a/documentation/docs/.gitignore b/documentation/docs/.gitignore index 46f9bd0984..515d7fce8d 100644 --- a/documentation/docs/.gitignore +++ b/documentation/docs/.gitignore @@ -4,3 +4,7 @@ out # the lock file will break Vercel because it may get committed from a machine with a different build architecture package-lock.json + +# local env files +.env*.local +.env \ No newline at end of file diff --git a/documentation/docs/pages/apis/introduction.mdx b/documentation/docs/pages/apis/introduction.mdx index c2c521ba8c..03d4998584 100644 --- a/documentation/docs/pages/apis/introduction.mdx +++ b/documentation/docs/pages/apis/introduction.mdx @@ -1,3 +1,7 @@ +--- +description: Interactive APIs generated from the OpenAPI specs of various API endpoints offered by bits of Nym infrastructure run both by Nym and community operators for both Mainnet and the Sandbox testnet. +--- + # Introduction This site contains interactive APIs generated from the OpenAPI specs of various API endpoints offered by bits of Nym infrastructure run both by Nym and community operators for both Mainnet and the Sandbox testnet. diff --git a/documentation/docs/pages/developers/index.mdx b/documentation/docs/pages/developers/index.mdx index 9f27706c0d..a05d61780a 100644 --- a/documentation/docs/pages/developers/index.mdx +++ b/documentation/docs/pages/developers/index.mdx @@ -1,3 +1,7 @@ +--- +description: Nym's developer documentation covering core concepts of integrating with the Mixnet, interacting with the Nyx blockchain, an overview of the avaliable tools, and our SDK docs. +--- + # Introduction Nym's developer documentation covering core concepts of integrating with the Mixnet, interacting with the Nyx blockchain, an overview of the avaliable tools, and our SDK docs. diff --git a/documentation/docs/pages/network/concepts/mixing.mdx b/documentation/docs/pages/network/concepts/mixing.mdx index 387480a8b7..7027182d14 100644 --- a/documentation/docs/pages/network/concepts/mixing.mdx +++ b/documentation/docs/pages/network/concepts/mixing.mdx @@ -1,3 +1,8 @@ +--- +title: Packet Mixing +description: Mixnets are networks of nodes that route traffic in a way that makes it untraceable +--- + # Packet Mixing > Continuous-time mixing strategies ... delay each message independently, forwarding it to its next destination once a specified delay has timed out. The aggregate effect of independently delaying each message is an output sequence of messages that is randomly reordered with respect to the input sequence. diff --git a/documentation/docs/pages/network/index.md b/documentation/docs/pages/network/index.md index 2fc3a1a583..8ee579d0dc 100644 --- a/documentation/docs/pages/network/index.md +++ b/documentation/docs/pages/network/index.md @@ -1,3 +1,7 @@ +--- +description: Nym's network documentation covering network architecture, node types, tokenomics, and cryptography. +--- + # Introduction Nym's network documentation covering network architecture, node types, tokenomics, and crypto systems. diff --git a/documentation/docs/pages/operators/introduction.mdx b/documentation/docs/pages/operators/introduction.mdx index 82afe0998f..0261f8fa07 100644 --- a/documentation/docs/pages/operators/introduction.mdx +++ b/documentation/docs/pages/operators/introduction.mdx @@ -1,3 +1,7 @@ +--- +description: Nym's Operators guide containing information and setup guides for the various components of Nym network and Nyx blockchain validators. +--- + # Introduction This is **Nym's Operators guide**, containing information and setup guides for the various components of Nym network and Nyx blockchain validators. diff --git a/documentation/docs/theme.config.tsx b/documentation/docs/theme.config.tsx index 2b85bd88bd..acb9e39c4c 100644 --- a/documentation/docs/theme.config.tsx +++ b/documentation/docs/theme.config.tsx @@ -1,14 +1,46 @@ import React from "react"; -import { DocsThemeConfig } from "nextra-theme-docs"; +import { DocsThemeConfig, useConfig } from "nextra-theme-docs"; import { Footer } from "./components/footer"; import { Matrix } from "./components/matrix-link"; import { useRouter } from "next/router"; const config: DocsThemeConfig = { - useNextSeoProps() { - return { - titleTemplate: "%s – Nym Docs", - }; + head: function useHead() { + const config = useConfig() + const { route } = useRouter() + const url = process.env.NEXT_PUBLIC_SITE_URL + const image = url + '/nym_logo.jpg' + + const description = + config.frontMatter.description || + 'Nym is a privacy platform. It provides strong network-level privacy against sophisticated end-to-end attackers, and anonymous access control using blinded, re-randomizable, decentralized credentials.' + const title = config.title + (route === '/' ? '' : ' - Nym docs') + + return ( + <> + {title} + + + + + + + + + + + + + + + + + + + + + + ) }, logo: Nym Docs, project: { From e7702a1e7a4aac4339ae8156f4f40a0bdddf45e4 Mon Sep 17 00:00:00 2001 From: Fran Arbanas Date: Fri, 13 Dec 2024 14:44:36 +0100 Subject: [PATCH 06/64] fix: remove documentation from dockerignore since it's refernced in Cargo.toml (#5264) --- .dockerignore | 1 - 1 file changed, 1 deletion(-) diff --git a/.dockerignore b/.dockerignore index 09ed129139..a11022eb2b 100644 --- a/.dockerignore +++ b/.dockerignore @@ -4,4 +4,3 @@ **/node_modules **/target dist -documentation From c0b4e8dd70598e36d999baaacea4ae9a9fb97ff0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20H=C3=A4ggblad?= Date: Mon, 16 Dec 2024 09:15:46 +0100 Subject: [PATCH 07/64] Remove unneeded async function annotation (#5246) --- common/http-api-client/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/http-api-client/src/lib.rs b/common/http-api-client/src/lib.rs index 824f4fa278..19826bd9cc 100644 --- a/common/http-api-client/src/lib.rs +++ b/common/http-api-client/src/lib.rs @@ -282,7 +282,7 @@ impl Client { } } - pub async fn create_delete_request( + pub fn create_delete_request( &self, path: PathSegments<'_>, params: Params<'_, K, V>, From 04c2045d944d718bc31672ebb43bf3fcb3ebfa74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20H=C3=A4ggblad?= Date: Mon, 16 Dec 2024 12:28:44 +0100 Subject: [PATCH 08/64] Add PATCH support to nym-http-api-client (#5260) --- common/http-api-client/src/lib.rs | 103 ++++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) diff --git a/common/http-api-client/src/lib.rs b/common/http-api-client/src/lib.rs index 19826bd9cc..c826fe295b 100644 --- a/common/http-api-client/src/lib.rs +++ b/common/http-api-client/src/lib.rs @@ -324,6 +324,56 @@ impl Client { } } + pub fn create_patch_request( + &self, + path: PathSegments<'_>, + params: Params<'_, K, V>, + json_body: &B, + ) -> RequestBuilder + where + B: Serialize + ?Sized, + K: AsRef, + V: AsRef, + { + let url = sanitize_url(&self.base_url, path, params); + self.reqwest_client.patch(url).json(json_body) + } + + pub async fn send_patch_request( + &self, + path: PathSegments<'_>, + params: Params<'_, K, V>, + json_body: &B, + ) -> Result> + where + B: Serialize + ?Sized, + K: AsRef, + V: AsRef, + E: Display, + { + let url = sanitize_url(&self.base_url, path, params); + + #[cfg(target_arch = "wasm32")] + { + Ok(wasmtimer::tokio::timeout( + self.request_timeout, + self.reqwest_client.patch(url).json(json_body).send(), + ) + .await + .map_err(|_timeout| HttpClientError::RequestTimeout)??) + } + + #[cfg(not(target_arch = "wasm32"))] + { + Ok(self + .reqwest_client + .patch(url) + .json(json_body) + .send() + .await?) + } + } + #[instrument(level = "debug", skip_all)] pub async fn get_json( &self, @@ -372,6 +422,23 @@ impl Client { parse_response(res, false).await } + pub async fn patch_json( + &self, + path: PathSegments<'_>, + params: Params<'_, K, V>, + json_body: &B, + ) -> Result> + where + B: Serialize + ?Sized, + for<'a> T: Deserialize<'a>, + K: AsRef, + V: AsRef, + E: Display + DeserializeOwned, + { + let res = self.send_patch_request(path, params, json_body).await?; + parse_response(res, true).await + } + #[instrument(level = "debug", skip_all)] pub async fn get_json_endpoint(&self, endpoint: S) -> Result> where @@ -466,6 +533,42 @@ impl Client { parse_response(res, false).await } + + pub async fn patch_json_endpoint( + &self, + endpoint: S, + json_body: &B, + ) -> Result> + where + B: Serialize + ?Sized, + for<'a> T: Deserialize<'a>, + E: Display + DeserializeOwned, + S: AsRef, + { + #[cfg(target_arch = "wasm32")] + let res = { + wasmtimer::tokio::timeout( + self.request_timeout, + self.reqwest_client + .patch(self.base_url.join(endpoint.as_ref())?) + .json(json_body) + .send(), + ) + .await + .map_err(|_timeout| HttpClientError::RequestTimeout)?? + }; + + #[cfg(not(target_arch = "wasm32"))] + let res = { + self.reqwest_client + .patch(self.base_url.join(endpoint.as_ref())?) + .json(json_body) + .send() + .await? + }; + + parse_response(res, true).await + } } // define those methods on the trait for nicer extensions (and not having to type the thing twice) From 88d6fb4e22a378c27bbe673cbe2819bde0f32958 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bogdan-=C8=98tefan=20Neac=C5=9Fu?= Date: Mon, 16 Dec 2024 13:57:34 +0200 Subject: [PATCH 09/64] Add fd callback to client core (#5230) * Add fd callback to client core * Include in sdk * Fix clippy many args * Method in builder * Replace Box with Arc --- .../client-core/src/client/base_client/mod.rs | 23 ++++++++++++++++ .../gateway-client/src/client/mod.rs | 17 ++++++++++++ nym-api/src/network_monitor/monitor/sender.rs | 1 + sdk/rust/nym-sdk/src/mixnet/client.rs | 27 +++++++++++++++++++ 4 files changed, 68 insertions(+) diff --git a/common/client-core/src/client/base_client/mod.rs b/common/client-core/src/client/base_client/mod.rs index 856ddd9497..839df3ec90 100644 --- a/common/client-core/src/client/base_client/mod.rs +++ b/common/client-core/src/client/base_client/mod.rs @@ -188,6 +188,9 @@ pub struct BaseClientBuilder<'a, C, S: MixnetClientStorage> { user_agent: Option, setup_method: GatewaySetup, + + #[cfg(unix)] + connection_fd_callback: Option>, } impl<'a, C, S> BaseClientBuilder<'a, C, S> @@ -210,6 +213,8 @@ where shutdown: None, user_agent: None, setup_method: GatewaySetup::MustLoad { gateway_id: None }, + #[cfg(unix)] + connection_fd_callback: None, } } @@ -261,6 +266,15 @@ where Ok(self) } + #[cfg(unix)] + pub fn with_connection_fd_callback( + mut self, + callback: Arc, + ) -> Self { + self.connection_fd_callback = Some(callback); + self + } + // note: do **NOT** make this method public as its only valid usage is from within `start_base` // because it relies on the crypto keys being already loaded fn mix_address(details: &InitialisationResult) -> Recipient { @@ -352,6 +366,7 @@ where controller.start_with_shutdown(shutdown) } + #[allow(clippy::too_many_arguments)] async fn start_gateway_client( config: &Config, initialisation_result: InitialisationResult, @@ -359,6 +374,7 @@ where details_store: &S::GatewaysDetailsStore, packet_router: PacketRouter, stats_reporter: ClientStatsSender, + #[cfg(unix)] connection_fd_callback: Option>, shutdown: TaskClient, ) -> Result, ClientCoreError> where @@ -401,6 +417,8 @@ where packet_router, bandwidth_controller, stats_reporter, + #[cfg(unix)] + connection_fd_callback, shutdown, ) }; @@ -462,6 +480,7 @@ where details_store: &S::GatewaysDetailsStore, packet_router: PacketRouter, stats_reporter: ClientStatsSender, + #[cfg(unix)] connection_fd_callback: Option>, mut shutdown: TaskClient, ) -> Result, ClientCoreError> where @@ -493,6 +512,8 @@ where details_store, packet_router, stats_reporter, + #[cfg(unix)] + connection_fd_callback, shutdown, ) .await?; @@ -772,6 +793,8 @@ where &details_store, gateway_packet_router, stats_reporter.clone(), + #[cfg(unix)] + self.connection_fd_callback, shutdown.fork("gateway_transceiver"), ) .await?; diff --git a/common/client-libs/gateway-client/src/client/mod.rs b/common/client-libs/gateway-client/src/client/mod.rs index 6cb7b83f02..481e57dd6f 100644 --- a/common/client-libs/gateway-client/src/client/mod.rs +++ b/common/client-libs/gateway-client/src/client/mod.rs @@ -101,6 +101,10 @@ pub struct GatewayClient { // currently unused (but populated) negotiated_protocol: Option, + // Callback on the fd as soon as the connection has been established + #[cfg(unix)] + connection_fd_callback: Option>, + /// Listen to shutdown messages and send notifications back to the task manager task_client: TaskClient, } @@ -116,6 +120,7 @@ impl GatewayClient { packet_router: PacketRouter, bandwidth_controller: Option>, stats_reporter: ClientStatsSender, + #[cfg(unix)] connection_fd_callback: Option>, task_client: TaskClient, ) -> Self { GatewayClient { @@ -131,6 +136,8 @@ impl GatewayClient { bandwidth_controller, stats_reporter, negotiated_protocol: None, + #[cfg(unix)] + connection_fd_callback, task_client, } } @@ -205,6 +212,12 @@ impl GatewayClient { }; self.connection = SocketState::Available(Box::new(ws_stream)); + + #[cfg(unix)] + if let (Some(callback), Some(fd)) = (self.connection_fd_callback.as_ref(), self.ws_fd()) { + callback.as_ref()(fd); + } + Ok(()) } @@ -1034,6 +1047,8 @@ impl GatewayClient { bandwidth_controller: None, stats_reporter: ClientStatsSender::new(None), negotiated_protocol: None, + #[cfg(unix)] + connection_fd_callback: None, task_client, } } @@ -1064,6 +1079,8 @@ impl GatewayClient { bandwidth_controller, stats_reporter, negotiated_protocol: self.negotiated_protocol, + #[cfg(unix)] + connection_fd_callback: self.connection_fd_callback, task_client, } } diff --git a/nym-api/src/network_monitor/monitor/sender.rs b/nym-api/src/network_monitor/monitor/sender.rs index c1140a55ac..172d6b3680 100644 --- a/nym-api/src/network_monitor/monitor/sender.rs +++ b/nym-api/src/network_monitor/monitor/sender.rs @@ -183,6 +183,7 @@ impl PacketSender { gateway_packet_router, Some(fresh_gateway_client_data.bandwidth_controller.clone()), nym_statistics_common::clients::ClientStatsSender::new(None), + None, task_client, ); diff --git a/sdk/rust/nym-sdk/src/mixnet/client.rs b/sdk/rust/nym-sdk/src/mixnet/client.rs index 118885a650..f223037572 100644 --- a/sdk/rust/nym-sdk/src/mixnet/client.rs +++ b/sdk/rust/nym-sdk/src/mixnet/client.rs @@ -36,6 +36,7 @@ use nym_validator_client::{nyxd, QueryHttpRpcNyxdClient, UserAgent}; use rand::rngs::OsRng; use std::path::Path; use std::path::PathBuf; +use std::sync::Arc; use url::Url; use zeroize::Zeroizing; @@ -54,6 +55,7 @@ pub struct MixnetClientBuilder { custom_shutdown: Option, force_tls: bool, user_agent: Option, + connection_fd_callback: Option>, // TODO: incorporate it properly into `MixnetClientStorage` (I will need it in wasm anyway) gateway_endpoint_config_path: Option, @@ -93,6 +95,8 @@ impl MixnetClientBuilder { custom_gateway_transceiver: None, force_tls: false, user_agent: None, + #[cfg(unix)] + connection_fd_callback: None, }) } } @@ -120,6 +124,8 @@ where custom_shutdown: None, force_tls: false, user_agent: None, + #[cfg(unix)] + connection_fd_callback: None, gateway_endpoint_config_path: None, storage, } @@ -138,6 +144,8 @@ where custom_shutdown: self.custom_shutdown, force_tls: self.force_tls, user_agent: self.user_agent, + #[cfg(unix)] + connection_fd_callback: self.connection_fd_callback, gateway_endpoint_config_path: self.gateway_endpoint_config_path, storage, } @@ -237,6 +245,15 @@ where self } + #[must_use] + pub fn with_connection_fd_callback( + mut self, + connection_fd_callback: Arc, + ) -> Self { + self.connection_fd_callback = Some(connection_fd_callback); + self + } + /// Use custom mixnet sender that might not be the default websocket gateway connection. /// only for advanced use #[must_use] @@ -265,6 +282,7 @@ where client.wait_for_gateway = self.wait_for_gateway; client.force_tls = self.force_tls; client.user_agent = self.user_agent; + client.connection_fd_callback = self.connection_fd_callback; Ok(client) } @@ -314,6 +332,9 @@ where custom_shutdown: Option, user_agent: Option, + + /// Callback on the websocket fd as soon as the connection has been established + connection_fd_callback: Option>, } impl DisconnectedMixnetClient @@ -363,6 +384,7 @@ where force_tls: false, custom_shutdown: None, user_agent: None, + connection_fd_callback: None, }) } @@ -604,6 +626,11 @@ where base_builder = base_builder.with_gateway_transceiver(gateway_transceiver); } + #[cfg(unix)] + if let Some(connection_fd_callback) = self.connection_fd_callback { + base_builder = base_builder.with_connection_fd_callback(connection_fd_callback); + } + let started_client = base_builder.start_base().await?; self.state = BuilderState::Registered {}; let nym_address = started_client.address; From 0a712b9fce75c5f4bf1c95f0f8ee49a6aa919bcc Mon Sep 17 00:00:00 2001 From: windy-ux <75579979+windy-ux@users.noreply.github.com> Date: Mon, 16 Dec 2024 14:17:25 +0100 Subject: [PATCH 10/64] Fix/web 615 seo setup (#5265) * + add header into Packet Mixing docs * + add head changes for testing * / updated version of metatags in theme.config * + add env file * / theme.config to use NEXT_PUBLIC_SITE_URL from env file * @ Fix broken link in theme.config * - remove favicon code * + add desription for intro pages * + add default book's desriptions * Revert "+ add desription for intro pages" This reverts commit 98c78242d4e7eb51d91e7a178319dab16b1daab8. --- documentation/docs/pages/apis/introduction.mdx | 4 ---- documentation/docs/pages/developers/index.mdx | 4 ---- documentation/docs/pages/network/index.md | 4 ---- .../docs/pages/operators/introduction.mdx | 4 ---- documentation/docs/theme.config.tsx | 17 +++++++++++++++-- 5 files changed, 15 insertions(+), 18 deletions(-) diff --git a/documentation/docs/pages/apis/introduction.mdx b/documentation/docs/pages/apis/introduction.mdx index 03d4998584..c2c521ba8c 100644 --- a/documentation/docs/pages/apis/introduction.mdx +++ b/documentation/docs/pages/apis/introduction.mdx @@ -1,7 +1,3 @@ ---- -description: Interactive APIs generated from the OpenAPI specs of various API endpoints offered by bits of Nym infrastructure run both by Nym and community operators for both Mainnet and the Sandbox testnet. ---- - # Introduction This site contains interactive APIs generated from the OpenAPI specs of various API endpoints offered by bits of Nym infrastructure run both by Nym and community operators for both Mainnet and the Sandbox testnet. diff --git a/documentation/docs/pages/developers/index.mdx b/documentation/docs/pages/developers/index.mdx index a05d61780a..9f27706c0d 100644 --- a/documentation/docs/pages/developers/index.mdx +++ b/documentation/docs/pages/developers/index.mdx @@ -1,7 +1,3 @@ ---- -description: Nym's developer documentation covering core concepts of integrating with the Mixnet, interacting with the Nyx blockchain, an overview of the avaliable tools, and our SDK docs. ---- - # Introduction Nym's developer documentation covering core concepts of integrating with the Mixnet, interacting with the Nyx blockchain, an overview of the avaliable tools, and our SDK docs. diff --git a/documentation/docs/pages/network/index.md b/documentation/docs/pages/network/index.md index 8ee579d0dc..2fc3a1a583 100644 --- a/documentation/docs/pages/network/index.md +++ b/documentation/docs/pages/network/index.md @@ -1,7 +1,3 @@ ---- -description: Nym's network documentation covering network architecture, node types, tokenomics, and cryptography. ---- - # Introduction Nym's network documentation covering network architecture, node types, tokenomics, and crypto systems. diff --git a/documentation/docs/pages/operators/introduction.mdx b/documentation/docs/pages/operators/introduction.mdx index 0261f8fa07..82afe0998f 100644 --- a/documentation/docs/pages/operators/introduction.mdx +++ b/documentation/docs/pages/operators/introduction.mdx @@ -1,7 +1,3 @@ ---- -description: Nym's Operators guide containing information and setup guides for the various components of Nym network and Nyx blockchain validators. ---- - # Introduction This is **Nym's Operators guide**, containing information and setup guides for the various components of Nym network and Nyx blockchain validators. diff --git a/documentation/docs/theme.config.tsx b/documentation/docs/theme.config.tsx index acb9e39c4c..1ad62048a3 100644 --- a/documentation/docs/theme.config.tsx +++ b/documentation/docs/theme.config.tsx @@ -11,10 +11,23 @@ const config: DocsThemeConfig = { const url = process.env.NEXT_PUBLIC_SITE_URL const image = url + '/nym_logo.jpg' + // Define descriptions for different "books" + const bookDescriptions: Record = { + '/developers': "Nym's developer documentation covering core concepts of integrating with the Mixnet, interacting with the Nyx blockchain, an overview of the avaliable tools, and our SDK docs.", + '/network': "Nym's network documentation covering network architecture, node types, tokenomics, and cryptography.", + '/operators': "Nym's Operators guide containing information and setup guides for the various components of Nym network and Nyx blockchain validators.", + '/apis': "Interactive APIs generated from the OpenAPI specs of various API endpoints offered by bits of Nym infrastructure run both by Nym and community operators for both Mainnet and the Sandbox testnet." + } + + const defaultDescription = 'Nym is a privacy platform. It provides strong network-level privacy against sophisticated end-to-end attackers, and anonymous access control using blinded, re-randomizable, decentralized credentials.' + + const topLevel = '/' + route.split('/')[1] const description = config.frontMatter.description || - 'Nym is a privacy platform. It provides strong network-level privacy against sophisticated end-to-end attackers, and anonymous access control using blinded, re-randomizable, decentralized credentials.' - const title = config.title + (route === '/' ? '' : ' - Nym docs') + bookDescriptions[topLevel] || + defaultDescription + + const title = config.title + (route === '/' ? '' : ' - Nym docs') return ( <> From be063a36eb02f4eb50a6a3abdc57b62850e75553 Mon Sep 17 00:00:00 2001 From: import this <97586125+serinko@users.noreply.github.com> Date: Mon, 16 Dec 2024 13:17:38 +0000 Subject: [PATCH 11/64] syntax hotfix (#5266) --- .../docs/pages/operators/tokenomics/mixnet-rewards.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation/docs/pages/operators/tokenomics/mixnet-rewards.mdx b/documentation/docs/pages/operators/tokenomics/mixnet-rewards.mdx index fb1225af15..b329a227c0 100644 --- a/documentation/docs/pages/operators/tokenomics/mixnet-rewards.mdx +++ b/documentation/docs/pages/operators/tokenomics/mixnet-rewards.mdx @@ -222,7 +222,7 @@ Good performance is much more essential than [total stake](../tokenomics.mdx#sta For a comparison we made an example with 5 nodes, where first number is node performance and second stake saturation (assuming all of them `config_score` = 1 for simplification):
- + > node_1 = 1.00 ^ 20 \* 1.0 = 1
> node_2 = 1.00 ^ 20 \* 0.5 = 0.5
> node_3 = 0.99 ^ 20 \* 1.0 = 0.818
From 84d7004cb2cba6ff5c83af8322061b3f30637664 Mon Sep 17 00:00:00 2001 From: Drazen Urch Date: Mon, 16 Dec 2024 15:18:04 +0100 Subject: [PATCH 12/64] Add control messages to GatewayTransciver (#5247) * Add control messages to GatewayTransciver * Add forget me flag to clients * CI gate IPIINFO test * Handle ForgetMe for client and stats db * fmt --- .gitignore | 3 +- Cargo.lock | 1 + .../client-core/src/client/base_client/mod.rs | 17 +++++- .../client-core/src/client/mix_traffic/mod.rs | 34 ++++++++++-- .../src/client/mix_traffic/transceiver.rs | 47 +++++++++++++++- common/client-core/src/lib.rs | 45 ++++++++++++++++ .../gateway-client/src/client/mod.rs | 2 +- .../src/types/text_request.rs | 4 ++ .../src/types/text_response.rs | 1 + common/gateway-stats-storage/src/lib.rs | 10 ++++ common/gateway-stats-storage/src/sessions.rs | 10 ++++ ...0c61aa0ddd30fee2565803b88c6086bd2a734.json | 12 +++++ ...5f9e8d251a3e764104d2a54153895dee1a118.json | 12 +++++ common/gateway-storage/src/bandwidth.rs | 10 ++++ common/gateway-storage/src/inboxes.rs | 13 +++++ common/gateway-storage/src/lib.rs | 42 +++++++++++++++ common/statistics/src/gateways.rs | 8 +++ .../client_handling/websocket/common_state.rs | 6 +++ .../connection_handler/authenticated.rs | 23 ++++++++ .../websocket/connection_handler/fresh.rs | 4 ++ nym-network-monitor/Cargo.toml | 1 + nym-network-monitor/src/main.rs | 8 ++- .../src/monitor/geodata.rs | 54 +++++++++++-------- .../node/metrics/handler/client_sessions.rs | 14 +++++ sdk/rust/nym-sdk/src/mixnet/client.rs | 19 ++++++- 25 files changed, 366 insertions(+), 34 deletions(-) create mode 100644 common/gateway-storage/.sqlx/query-3ea5542b21a41b14276a8fd6b870c61aa0ddd30fee2565803b88c6086bd2a734.json create mode 100644 common/gateway-storage/.sqlx/query-a3cc707995b8215fa77738cd1a55f9e8d251a3e764104d2a54153895dee1a118.json diff --git a/.gitignore b/.gitignore index 597794c0e3..8953d2a2da 100644 --- a/.gitignore +++ b/.gitignore @@ -51,4 +51,5 @@ ppa-private-key.b64 ppa-private-key.asc nym-network-monitor/topology.json nym-network-monitor/__pycache__ -nym-network-monitor/*.key \ No newline at end of file +nym-network-monitor/*.key +nym-network-monitor/.envrc diff --git a/Cargo.lock b/Cargo.lock index c7c85e0d9e..2fd2fbf19c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5782,6 +5782,7 @@ dependencies = [ "nym-bin-common", "nym-client-core", "nym-crypto", + "nym-gateway-requests", "nym-network-defaults", "nym-sdk", "nym-sphinx", diff --git a/common/client-core/src/client/base_client/mod.rs b/common/client-core/src/client/base_client/mod.rs index 839df3ec90..bb0ffd5d03 100644 --- a/common/client-core/src/client/base_client/mod.rs +++ b/common/client-core/src/client/base_client/mod.rs @@ -32,7 +32,7 @@ use crate::init::{ setup_gateway, types::{GatewaySetup, InitialisationResult}, }; -use crate::{config, spawn_future}; +use crate::{config, spawn_future, ForgetMe}; use futures::channel::mpsc; use log::*; use nym_bandwidth_controller::BandwidthController; @@ -191,6 +191,8 @@ pub struct BaseClientBuilder<'a, C, S: MixnetClientStorage> { #[cfg(unix)] connection_fd_callback: Option>, + + forget_me: ForgetMe, } impl<'a, C, S> BaseClientBuilder<'a, C, S> @@ -215,9 +217,16 @@ where setup_method: GatewaySetup::MustLoad { gateway_id: None }, #[cfg(unix)] connection_fd_callback: None, + forget_me: Default::default(), } } + #[must_use] + pub fn with_forget_me(mut self, forget_me: &ForgetMe) -> Self { + self.forget_me = forget_me.clone(); + self + } + #[must_use] pub fn with_gateway_setup(mut self, setup: GatewaySetup) -> Self { self.setup_method = setup; @@ -636,9 +645,11 @@ where fn start_mix_traffic_controller( gateway_transceiver: Box, shutdown: TaskClient, + forget_me: ForgetMe, ) -> BatchMixMessageSender { info!("Starting mix traffic controller..."); - let (mix_traffic_controller, mix_tx) = MixTrafficController::new(gateway_transceiver); + let (mix_traffic_controller, mix_tx) = + MixTrafficController::new(gateway_transceiver, forget_me); mix_traffic_controller.start_with_shutdown(shutdown); mix_tx } @@ -820,9 +831,11 @@ where // that are to be sent to the mixnet. They are used by cover traffic stream and real // traffic stream. // The MixTrafficController then sends the actual traffic + let message_sender = Self::start_mix_traffic_controller( gateway_transceiver, shutdown.fork("mix_traffic_controller"), + self.forget_me, ); // Channels that the websocket listener can use to signal downstream to the real traffic diff --git a/common/client-core/src/client/mix_traffic/mod.rs b/common/client-core/src/client/mix_traffic/mod.rs index 91c652efba..fa73cc82a8 100644 --- a/common/client-core/src/client/mix_traffic/mod.rs +++ b/common/client-core/src/client/mix_traffic/mod.rs @@ -2,8 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 use crate::client::mix_traffic::transceiver::GatewayTransceiver; -use crate::spawn_future; +use crate::{spawn_future, ForgetMe}; use log::*; +use nym_gateway_requests::ClientRequest; use nym_sphinx::forwarding::packet::MixPacket; pub type BatchMixMessageSender = tokio::sync::mpsc::Sender>; @@ -26,10 +27,14 @@ pub struct MixTrafficController { // TODO: this is temporary work-around. // in long run `gateway_client` will be moved away from `MixTrafficController` anyway. consecutive_gateway_failure_count: usize, + forget_me: ForgetMe, } impl MixTrafficController { - pub fn new(gateway_transceiver: T) -> (MixTrafficController, BatchMixMessageSender) + pub fn new( + gateway_transceiver: T, + forget_me: ForgetMe, + ) -> (MixTrafficController, BatchMixMessageSender) where T: GatewayTransceiver + Send + 'static, { @@ -40,6 +45,7 @@ impl MixTrafficController { gateway_transceiver: Box::new(gateway_transceiver), mix_rx: message_receiver, consecutive_gateway_failure_count: 0, + forget_me, }, message_sender, ) @@ -47,6 +53,7 @@ impl MixTrafficController { pub fn new_dynamic( gateway_transceiver: Box, + forget_me: ForgetMe, ) -> (MixTrafficController, BatchMixMessageSender) { let (message_sender, message_receiver) = tokio::sync::mpsc::channel(MIX_MESSAGE_RECEIVER_BUFFER_SIZE); @@ -55,6 +62,7 @@ impl MixTrafficController { gateway_transceiver, mix_rx: message_receiver, consecutive_gateway_failure_count: 0, + forget_me, }, message_sender, ) @@ -111,7 +119,27 @@ impl MixTrafficController { } } shutdown.recv_timeout().await; + + if self.forget_me.any() { + log::info!("Sending forget me request to the gateway"); + match self + .gateway_transceiver + .send_client_request(ClientRequest::ForgetMe { + client: self.forget_me.client(), + stats: self.forget_me.stats(), + }) + .await + { + Ok(_) => { + log::info!("Successfully sent forget me request to the gateway"); + } + Err(err) => { + log::error!("Failed to send forget me request to the gateway: {err}"); + } + } + } + log::debug!("MixTrafficController: Exiting"); - }) + }); } } diff --git a/common/client-core/src/client/mix_traffic/transceiver.rs b/common/client-core/src/client/mix_traffic/transceiver.rs index 6d9b4fa6de..77c226d245 100644 --- a/common/client-core/src/client/mix_traffic/transceiver.rs +++ b/common/client-core/src/client/mix_traffic/transceiver.rs @@ -5,8 +5,10 @@ use async_trait::async_trait; use log::{debug, error}; use nym_credential_storage::storage::Storage as CredentialStorage; use nym_crypto::asymmetric::identity; +use nym_gateway_client::error::GatewayClientError; use nym_gateway_client::GatewayClient; pub use nym_gateway_client::{GatewayPacketRouter, PacketRouter}; +use nym_gateway_requests::ClientRequest; use nym_sphinx::forwarding::packet::MixPacket; use nym_validator_client::nyxd::contract_traits::DkgQueryClient; use std::fmt::Debug; @@ -26,9 +28,14 @@ fn erase_err(err: E) -> ErasedGate } /// This combines combines the functionalities of being able to send and receive mix packets. +#[async_trait] pub trait GatewayTransceiver: GatewaySender + GatewayReceiver { fn gateway_identity(&self) -> identity::PublicKey; fn ws_fd(&self) -> Option; + async fn send_client_request( + &mut self, + message: ClientRequest, + ) -> Result<(), GatewayClientError>; } /// This trait defines the functionality of sending `MixPacket` into the mixnet, @@ -65,6 +72,7 @@ pub trait GatewayReceiver { } // to allow for dynamic dispatch +#[async_trait] impl GatewayTransceiver for Box { #[inline] fn gateway_identity(&self) -> identity::PublicKey { @@ -73,6 +81,13 @@ impl GatewayTransceiver for Box { fn ws_fd(&self) -> Option { (**self).ws_fd() } + + async fn send_client_request( + &mut self, + message: ClientRequest, + ) -> Result<(), GatewayClientError> { + (**self).send_client_request(message).await + } } #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] @@ -91,7 +106,6 @@ impl GatewaySender for Box { (**self).batch_send_mix_packets(packets).await } } - impl GatewayReceiver for Box { #[inline] fn set_packet_router(&mut self, packet_router: PacketRouter) -> Result<(), ErasedGatewayError> { @@ -111,6 +125,7 @@ impl RemoteGateway { } } +#[async_trait] impl GatewayTransceiver for RemoteGateway where C: DkgQueryClient + Send + Sync, @@ -123,6 +138,20 @@ where fn ws_fd(&self) -> Option { self.gateway_client.ws_fd() } + + async fn send_client_request( + &mut self, + message: ClientRequest, + ) -> Result<(), GatewayClientError> { + if let Some(shared_key) = self.gateway_client.shared_key() { + self.gateway_client + .send_websocket_message(message.encrypt(&*shared_key)?) + .await?; + Ok(()) + } else { + Err(GatewayClientError::ConnectionInInvalidState) + } + } } #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] @@ -195,6 +224,7 @@ impl LocalGateway { mod nonwasm_sealed { use super::*; + #[async_trait] impl GatewayTransceiver for LocalGateway { fn gateway_identity(&self) -> identity::PublicKey { self.local_identity @@ -202,6 +232,13 @@ mod nonwasm_sealed { fn ws_fd(&self) -> Option { None } + + async fn send_client_request( + &mut self, + _message: ClientRequest, + ) -> Result<(), GatewayClientError> { + Ok(()) + } } #[async_trait] @@ -269,6 +306,7 @@ impl GatewaySender for MockGateway { } } +#[async_trait] impl GatewayTransceiver for MockGateway { fn gateway_identity(&self) -> identity::PublicKey { self.dummy_identity @@ -276,4 +314,11 @@ impl GatewayTransceiver for MockGateway { fn ws_fd(&self) -> Option { None } + + async fn send_client_request( + &mut self, + _message: ClientRequest, + ) -> Result<(), GatewayClientError> { + Ok(()) + } } diff --git a/common/client-core/src/lib.rs b/common/client-core/src/lib.rs index ffa1402859..12ea3f7d5c 100644 --- a/common/client-core/src/lib.rs +++ b/common/client-core/src/lib.rs @@ -34,3 +34,48 @@ where { tokio::spawn(future); } + +#[derive(Clone, Default, Debug)] +pub struct ForgetMe { + client: bool, + stats: bool, +} + +impl ForgetMe { + pub fn new_all() -> Self { + Self { + client: true, + stats: true, + } + } + + pub fn new_client() -> Self { + Self { + client: true, + stats: false, + } + } + + pub fn new_stats() -> Self { + Self { + client: false, + stats: true, + } + } + + pub fn new(client: bool, stats: bool) -> Self { + Self { client, stats } + } + + pub fn any(&self) -> bool { + self.client || self.stats + } + + pub fn client(&self) -> bool { + self.client + } + + pub fn stats(&self) -> bool { + self.stats + } +} diff --git a/common/client-libs/gateway-client/src/client/mod.rs b/common/client-libs/gateway-client/src/client/mod.rs index 481e57dd6f..26986e3df8 100644 --- a/common/client-libs/gateway-client/src/client/mod.rs +++ b/common/client-libs/gateway-client/src/client/mod.rs @@ -324,7 +324,7 @@ impl GatewayClient { // If we want to send a message (with response), we need to have a full control over the socket, // as we need to be able to write the request and read the subsequent response - async fn send_websocket_message( + pub async fn send_websocket_message( &mut self, msg: impl Into, ) -> Result { diff --git a/common/gateway-requests/src/types/text_request.rs b/common/gateway-requests/src/types/text_request.rs index 8be56bcfb3..72eebc3e17 100644 --- a/common/gateway-requests/src/types/text_request.rs +++ b/common/gateway-requests/src/types/text_request.rs @@ -20,6 +20,10 @@ pub enum ClientRequest { hkdf_salt: Vec, derived_key_digest: Vec, }, + ForgetMe { + client: bool, + stats: bool, + }, } impl ClientRequest { diff --git a/common/gateway-requests/src/types/text_response.rs b/common/gateway-requests/src/types/text_response.rs index b0c8250f1e..5c6ce668b5 100644 --- a/common/gateway-requests/src/types/text_response.rs +++ b/common/gateway-requests/src/types/text_response.rs @@ -11,6 +11,7 @@ use tungstenite::Message; #[non_exhaustive] pub enum SensitiveServerResponse { KeyUpgradeAck {}, + ForgetMeAck {}, } impl SensitiveServerResponse { diff --git a/common/gateway-stats-storage/src/lib.rs b/common/gateway-stats-storage/src/lib.rs index 74b45e9e7a..e57f7452cd 100644 --- a/common/gateway-stats-storage/src/lib.rs +++ b/common/gateway-stats-storage/src/lib.rs @@ -116,6 +116,16 @@ impl PersistentStatsStorage { .await?) } + pub async fn delete_unique_user( + &self, + client_address: DestinationAddressBytes, + ) -> Result<(), StatsStorageError> { + Ok(self + .session_manager + .delete_unique_user(client_address.as_base58_string()) + .await?) + } + pub async fn insert_active_session( &self, client_address: DestinationAddressBytes, diff --git a/common/gateway-stats-storage/src/sessions.rs b/common/gateway-stats-storage/src/sessions.rs index 920ee4d1d5..a919696967 100644 --- a/common/gateway-stats-storage/src/sessions.rs +++ b/common/gateway-stats-storage/src/sessions.rs @@ -71,6 +71,16 @@ impl SessionManager { Ok(()) } + pub(crate) async fn delete_unique_user(&self, client_address_b58: String) -> Result<()> { + sqlx::query!( + "DELETE FROM sessions_unique_users WHERE client_address = ?", + client_address_b58 + ) + .execute(&self.connection_pool) + .await?; + Ok(()) + } + pub(crate) async fn get_unique_users(&self, date: Date) -> Result> { sqlx::query_scalar!( "SELECT client_address as count FROM sessions_unique_users WHERE day = ?", diff --git a/common/gateway-storage/.sqlx/query-3ea5542b21a41b14276a8fd6b870c61aa0ddd30fee2565803b88c6086bd2a734.json b/common/gateway-storage/.sqlx/query-3ea5542b21a41b14276a8fd6b870c61aa0ddd30fee2565803b88c6086bd2a734.json new file mode 100644 index 0000000000..8c8994d4ca --- /dev/null +++ b/common/gateway-storage/.sqlx/query-3ea5542b21a41b14276a8fd6b870c61aa0ddd30fee2565803b88c6086bd2a734.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "DELETE FROM message_store WHERE client_address_bs58 = ?", + "describe": { + "columns": [], + "parameters": { + "Right": 1 + }, + "nullable": [] + }, + "hash": "3ea5542b21a41b14276a8fd6b870c61aa0ddd30fee2565803b88c6086bd2a734" +} diff --git a/common/gateway-storage/.sqlx/query-a3cc707995b8215fa77738cd1a55f9e8d251a3e764104d2a54153895dee1a118.json b/common/gateway-storage/.sqlx/query-a3cc707995b8215fa77738cd1a55f9e8d251a3e764104d2a54153895dee1a118.json new file mode 100644 index 0000000000..bc58f0aa1b --- /dev/null +++ b/common/gateway-storage/.sqlx/query-a3cc707995b8215fa77738cd1a55f9e8d251a3e764104d2a54153895dee1a118.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "DELETE FROM available_bandwidth WHERE client_id = ?", + "describe": { + "columns": [], + "parameters": { + "Right": 1 + }, + "nullable": [] + }, + "hash": "a3cc707995b8215fa77738cd1a55f9e8d251a3e764104d2a54153895dee1a118" +} diff --git a/common/gateway-storage/src/bandwidth.rs b/common/gateway-storage/src/bandwidth.rs index 1f1cdce1c8..be64357350 100644 --- a/common/gateway-storage/src/bandwidth.rs +++ b/common/gateway-storage/src/bandwidth.rs @@ -49,6 +49,16 @@ impl BandwidthManager { Ok(()) } + pub(crate) async fn remove_client(&self, client_id: i64) -> Result<(), sqlx::Error> { + sqlx::query!( + "DELETE FROM available_bandwidth WHERE client_id = ?", + client_id + ) + .execute(&self.connection_pool) + .await?; + Ok(()) + } + /// Set the expiration date of the particular client to the provided date. pub(crate) async fn set_expiration( &self, diff --git a/common/gateway-storage/src/inboxes.rs b/common/gateway-storage/src/inboxes.rs index 6c1ac23c47..bdf798aa02 100644 --- a/common/gateway-storage/src/inboxes.rs +++ b/common/gateway-storage/src/inboxes.rs @@ -133,4 +133,17 @@ impl InboxManager { .await?; Ok(()) } + + pub(crate) async fn remove_messages_for_client( + &self, + client_address_bs58: &str, + ) -> Result<(), sqlx::Error> { + sqlx::query!( + "DELETE FROM message_store WHERE client_address_bs58 = ?", + client_address_bs58 + ) + .execute(&self.connection_pool) + .await?; + Ok(()) + } } diff --git a/common/gateway-storage/src/lib.rs b/common/gateway-storage/src/lib.rs index 7b0f70a5b3..9d574dbe91 100644 --- a/common/gateway-storage/src/lib.rs +++ b/common/gateway-storage/src/lib.rs @@ -41,6 +41,33 @@ pub struct GatewayStorage { } impl GatewayStorage { + #[allow(dead_code)] + pub(crate) fn client_manager(&self) -> &ClientManager { + &self.client_manager + } + + pub(crate) fn shared_key_manager(&self) -> &SharedKeysManager { + &self.shared_key_manager + } + + pub(crate) fn inbox_manager(&self) -> &InboxManager { + &self.inbox_manager + } + + pub(crate) fn bandwidth_manager(&self) -> &BandwidthManager { + &self.bandwidth_manager + } + + #[allow(dead_code)] + pub(crate) fn ticket_manager(&self) -> &TicketStorageManager { + &self.ticket_manager + } + + #[allow(dead_code)] + pub(crate) fn wireguard_peer_manager(&self) -> &wireguard_peers::WgPeerManager { + &self.wireguard_peer_manager + } + /// Initialises `PersistentStorage` using the provided path. /// /// # Arguments @@ -101,6 +128,21 @@ impl GatewayStorage { .await?) } + pub async fn handle_forget_me( + &self, + client_address: DestinationAddressBytes, + ) -> Result<(), GatewayStorageError> { + let client_id = self.get_mixnet_client_id(client_address).await?; + self.inbox_manager() + .remove_messages_for_client(&client_address.as_base58_string()) + .await?; + self.bandwidth_manager().remove_client(client_id).await?; + self.shared_key_manager() + .remove_shared_keys(&client_address.as_base58_string()) + .await?; + Ok(()) + } + pub async fn insert_shared_keys( &self, client_address: DestinationAddressBytes, diff --git a/common/statistics/src/gateways.rs b/common/statistics/src/gateways.rs index cc617ae027..4e8e701de1 100644 --- a/common/statistics/src/gateways.rs +++ b/common/statistics/src/gateways.rs @@ -58,6 +58,10 @@ pub enum GatewaySessionEvent { /// Address of the remote client opening the connection client: DestinationAddressBytes, }, + SessionDelete { + /// Address of the remote client opening the connection + client: DestinationAddressBytes, + }, } impl GatewaySessionEvent { @@ -87,4 +91,8 @@ impl GatewaySessionEvent { client, } } + + pub fn new_session_delete(client: DestinationAddressBytes) -> GatewaySessionEvent { + GatewaySessionEvent::SessionDelete { client } + } } diff --git a/gateway/src/node/client_handling/websocket/common_state.rs b/gateway/src/node/client_handling/websocket/common_state.rs index 4ff8b8991a..19a5943c53 100644 --- a/gateway/src/node/client_handling/websocket/common_state.rs +++ b/gateway/src/node/client_handling/websocket/common_state.rs @@ -21,3 +21,9 @@ pub(crate) struct CommonHandlerState { pub(crate) outbound_mix_sender: MixForwardingSender, pub(crate) active_clients_store: ActiveClientsStore, } + +impl CommonHandlerState { + pub(crate) fn storage(&self) -> &GatewayStorage { + &self.storage + } +} diff --git a/gateway/src/node/client_handling/websocket/connection_handler/authenticated.rs b/gateway/src/node/client_handling/websocket/connection_handler/authenticated.rs index d7bc9dd32e..6c39b613ce 100644 --- a/gateway/src/node/client_handling/websocket/connection_handler/authenticated.rs +++ b/gateway/src/node/client_handling/websocket/connection_handler/authenticated.rs @@ -157,6 +157,10 @@ impl Drop for AuthenticatedHandler { } impl AuthenticatedHandler { + pub(crate) fn inner(&self) -> &FreshHandler { + &self.inner + } + /// Upgrades `FreshHandler` into the Authenticated variant implying the client is now authenticated /// and thus allowed to perform more actions with the gateway, such as redeeming bandwidth or /// sending sphinx packets. @@ -327,6 +331,24 @@ impl AuthenticatedHandler { } } + async fn handle_forget_me( + &mut self, + client: bool, + stats: bool, + ) -> Result { + if client { + self.inner() + .shared_state() + .storage() + .handle_forget_me(self.client.address) + .await?; + } + if stats { + self.send_metrics(GatewaySessionEvent::new_session_delete(self.client.address)); + } + Ok(SensitiveServerResponse::ForgetMeAck {}.encrypt(&self.client.shared_keys)?) + } + async fn handle_key_upgrade( &mut self, hkdf_salt: Vec, @@ -370,6 +392,7 @@ impl AuthenticatedHandler { hkdf_salt, derived_key_digest, } => self.handle_key_upgrade(hkdf_salt, derived_key_digest).await, + ClientRequest::ForgetMe { client, stats } => self.handle_forget_me(client, stats).await, _ => Err(RequestHandlingError::UnknownEncryptedTextRequest), } } diff --git a/gateway/src/node/client_handling/websocket/connection_handler/fresh.rs b/gateway/src/node/client_handling/websocket/connection_handler/fresh.rs index 188699c64c..818aea8fdb 100644 --- a/gateway/src/node/client_handling/websocket/connection_handler/fresh.rs +++ b/gateway/src/node/client_handling/websocket/connection_handler/fresh.rs @@ -114,6 +114,10 @@ pub(crate) struct FreshHandler { } impl FreshHandler { + pub(crate) fn shared_state(&self) -> &CommonHandlerState { + &self.shared_state + } + // for time being we assume handle is always constructed from raw socket. // if we decide we want to change it, that's not too difficult // also at this point I'm not entirely sure how to deal with this warning without diff --git a/nym-network-monitor/Cargo.toml b/nym-network-monitor/Cargo.toml index 3d80fed9a0..bd537d5483 100644 --- a/nym-network-monitor/Cargo.toml +++ b/nym-network-monitor/Cargo.toml @@ -33,6 +33,7 @@ nym-bin-common = { path = "../common/bin-common" } nym-client-core = { path = "../common/client-core" } nym-crypto = { path = "../common/crypto" } nym-network-defaults = { path = "../common/network-defaults" } +nym-gateway-requests = { path = "../common/gateway-requests" } nym-sdk = { path = "../sdk/rust/nym-sdk" } nym-sphinx = { path = "../common/nymsphinx" } nym-topology = { path = "../common/topology" } diff --git a/nym-network-monitor/src/main.rs b/nym-network-monitor/src/main.rs index 141944eb8a..76ec69440d 100644 --- a/nym-network-monitor/src/main.rs +++ b/nym-network-monitor/src/main.rs @@ -3,6 +3,7 @@ use accounting::submit_metrics; use anyhow::Result; use clap::Parser; use log::{info, warn}; +use nym_client_core::ForgetMe; use nym_crypto::asymmetric::ed25519::PrivateKey; use nym_network_defaults::setup_env; use nym_network_defaults::var_names::NYM_API; @@ -56,7 +57,11 @@ async fn make_clients( loop { if Arc::strong_count(&dropped_client) == 1 { if let Some(client) = Arc::into_inner(dropped_client) { - client.into_inner().disconnect().await; + // let forget_me = ClientRequest::ForgetMe { + // also_from_stats: true, + // }; + let client_handle = client.into_inner(); + client_handle.disconnect().await; } else { warn!("Failed to drop client, client had more then one strong ref") } @@ -89,6 +94,7 @@ async fn make_client(topology: NymTopology) -> Result { .network_details(net) .custom_topology_provider(topology_provider) .debug_config(mixnet_debug_config(0)) + .with_forget_me(ForgetMe::new_all()) // .enable_credentials_mode() .build()?; diff --git a/nym-node-status-api/nym-node-status-api/src/monitor/geodata.rs b/nym-node-status-api/nym-node-status-api/src/monitor/geodata.rs index 369acee1a7..8841d3f505 100644 --- a/nym-node-status-api/nym-node-status-api/src/monitor/geodata.rs +++ b/nym-node-status-api/nym-node-status-api/src/monitor/geodata.rs @@ -154,31 +154,39 @@ mod api_regression { use super::*; use std::{env::var, sync::LazyLock}; - static IPINFO_TOKEN: LazyLock = LazyLock::new(|| var("IPINFO_API_TOKEN").unwrap()); + static IPINFO_TOKEN: LazyLock> = LazyLock::new(|| var("IPINFO_API_TOKEN").ok()); + static CI: LazyLock> = LazyLock::new(|| var("CI").ok()); #[tokio::test] async fn should_parse_response() { - let client = IpInfoClient::new(&(*IPINFO_TOKEN)); - let my_ip = reqwest::get("https://api.ipify.org") - .await - .expect("Couldn't get own IP") - .text() - .await - .unwrap(); - - let location_result = client.locate_ip(my_ip).await; - assert!(location_result.is_ok(), "Did ipinfo response change?"); - - assert!( - client.check_remaining_bandwidth().await.is_ok(), - "Failed to check remaining bandwidth?" - ); - - // when serialized, these fields should be present because they're exposed over API - let location_result = location_result.unwrap(); - let json = serde_json::to_value(&location_result).unwrap(); - assert!(json.get("two_letter_iso_country_code").is_some()); - assert!(json.get("latitude").is_some()); - assert!(json.get("longitude").is_some()); + if CI.is_none() { + return; + } + if let Some(token) = &*IPINFO_TOKEN { + let client = IpInfoClient::new(token); + let my_ip = reqwest::get("https://api.ipify.org") + .await + .expect("Couldn't get own IP") + .text() + .await + .unwrap(); + + let location_result = client.locate_ip(my_ip).await; + assert!(location_result.is_ok(), "Did ipinfo response change?"); + + assert!( + client.check_remaining_bandwidth().await.is_ok(), + "Failed to check remaining bandwidth?" + ); + + // when serialized, these fields should be present because they're exposed over API + let location_result = location_result.unwrap(); + let json = serde_json::to_value(&location_result).unwrap(); + assert!(json.get("two_letter_iso_country_code").is_some()); + assert!(json.get("latitude").is_some()); + assert!(json.get("longitude").is_some()); + } else { + panic!("IPINFO_API_TOKEN not set"); + } } } diff --git a/nym-node/src/node/metrics/handler/client_sessions.rs b/nym-node/src/node/metrics/handler/client_sessions.rs index 5f9e870a2a..8394eb1d89 100644 --- a/nym-node/src/node/metrics/handler/client_sessions.rs +++ b/nym-node/src/node/metrics/handler/client_sessions.rs @@ -73,6 +73,15 @@ impl GatewaySessionStatsHandler { Ok(()) } + async fn handle_session_delete( + &mut self, + client: DestinationAddressBytes, + ) -> Result<(), StatsStorageError> { + self.storage.delete_active_session(client).await?; + self.storage.delete_unique_user(client).await?; + Ok(()) + } + async fn handle_session_event( &mut self, event: GatewaySessionEvent, @@ -90,6 +99,11 @@ impl GatewaySessionStatsHandler { ticket_type, client, } => self.handle_ecash_ticket(ticket_type, client).await, + + // As long as delete is sent before stop, everything should work as expected + GatewaySessionEvent::SessionDelete { client } => { + self.handle_session_delete(client).await + } } } diff --git a/sdk/rust/nym-sdk/src/mixnet/client.rs b/sdk/rust/nym-sdk/src/mixnet/client.rs index f223037572..598bbd4808 100644 --- a/sdk/rust/nym-sdk/src/mixnet/client.rs +++ b/sdk/rust/nym-sdk/src/mixnet/client.rs @@ -28,6 +28,7 @@ use nym_client_core::error::ClientCoreError; use nym_client_core::init::helpers::current_gateways; use nym_client_core::init::setup_gateway; use nym_client_core::init::types::{GatewaySelectionSpecification, GatewaySetup}; +use nym_client_core::ForgetMe; use nym_credentials_interface::TicketType; use nym_socks5_client_core::config::Socks5; use nym_task::{TaskClient, TaskHandle, TaskStatus}; @@ -61,6 +62,7 @@ pub struct MixnetClientBuilder { gateway_endpoint_config_path: Option, storage: S, + forget_me: ForgetMe, } impl MixnetClientBuilder { @@ -97,6 +99,7 @@ impl MixnetClientBuilder { user_agent: None, #[cfg(unix)] connection_fd_callback: None, + forget_me: Default::default(), }) } } @@ -128,6 +131,7 @@ where connection_fd_callback: None, gateway_endpoint_config_path: None, storage, + forget_me: Default::default(), } } @@ -148,6 +152,7 @@ where connection_fd_callback: self.connection_fd_callback, gateway_endpoint_config_path: self.gateway_endpoint_config_path, storage, + forget_me: self.forget_me, } } @@ -160,6 +165,12 @@ where self.set_storage(storage) } + #[must_use] + pub fn with_forget_me(mut self, forget_me: ForgetMe) -> Self { + self.forget_me = forget_me; + self + } + /// Request a specific gateway instead of a random one. #[must_use] pub fn request_gateway(mut self, user_chosen_gateway: String) -> Self { @@ -283,7 +294,7 @@ where client.force_tls = self.force_tls; client.user_agent = self.user_agent; client.connection_fd_callback = self.connection_fd_callback; - + client.forget_me = self.forget_me; Ok(client) } } @@ -335,6 +346,8 @@ where /// Callback on the websocket fd as soon as the connection has been established connection_fd_callback: Option>, + + forget_me: ForgetMe, } impl DisconnectedMixnetClient @@ -385,6 +398,7 @@ where custom_shutdown: None, user_agent: None, connection_fd_callback: None, + forget_me: Default::default(), }) } @@ -608,7 +622,8 @@ where let mut base_builder: BaseClientBuilder<_, _> = BaseClientBuilder::new(&base_config, self.storage, self.dkg_query_client) - .with_wait_for_gateway(self.wait_for_gateway); + .with_wait_for_gateway(self.wait_for_gateway) + .with_forget_me(&self.forget_me); if let Some(user_agent) = self.user_agent { base_builder = base_builder.with_user_agent(user_agent); From a3f3d83c1b43fba94514f4c145895d394c322a67 Mon Sep 17 00:00:00 2001 From: Drazen Urch Date: Mon, 16 Dec 2024 16:19:37 +0100 Subject: [PATCH 13/64] Shipping raw metrics to PG (#5216) * Shipping raw metrics to PG * Put cancel token back in its place * fmt --- .gitignore | 1 + Cargo.lock | 81 ++++++++++++ nym-network-monitor/Cargo.toml | 1 + nym-network-monitor/entrypoint.sh | 5 +- nym-network-monitor/src/accounting.rs | 169 +++++++++++++++++++------- nym-network-monitor/src/main.rs | 33 ++++- 6 files changed, 238 insertions(+), 52 deletions(-) diff --git a/.gitignore b/.gitignore index 8953d2a2da..817ddac4f2 100644 --- a/.gitignore +++ b/.gitignore @@ -53,3 +53,4 @@ nym-network-monitor/topology.json nym-network-monitor/__pycache__ nym-network-monitor/*.key nym-network-monitor/.envrc +nym-network-monitor/.envrc diff --git a/Cargo.lock b/Cargo.lock index 2fd2fbf19c..d84ab9916c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2475,6 +2475,12 @@ dependencies = [ "once_cell", ] +[[package]] +name = "fallible-iterator" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" + [[package]] name = "fancy_constructor" version = "1.2.2" @@ -5796,6 +5802,7 @@ dependencies = [ "serde", "serde_json", "tokio", + "tokio-postgres", "tokio-util", "utoipa", "utoipa-swagger-ui", @@ -7251,6 +7258,24 @@ dependencies = [ "indexmap 2.2.6", ] +[[package]] +name = "phf" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_shared" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +dependencies = [ + "siphasher 0.3.11", +] + [[package]] name = "pin-project" version = "1.1.6" @@ -7389,6 +7414,35 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" +[[package]] +name = "postgres-protocol" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acda0ebdebc28befa84bee35e651e4c5f09073d668c7aed4cf7e23c3cda84b23" +dependencies = [ + "base64 0.22.1", + "byteorder", + "bytes", + "fallible-iterator", + "hmac", + "md-5", + "memchr", + "rand", + "sha2 0.10.8", + "stringprep", +] + +[[package]] +name = "postgres-types" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f66ea23a2d0e5734297357705193335e0a957696f34bed2f2faefacb2fec336f" +dependencies = [ + "bytes", + "fallible-iterator", + "postgres-protocol", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -9723,6 +9777,32 @@ dependencies = [ "syn 2.0.90", ] +[[package]] +name = "tokio-postgres" +version = "0.7.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b5d3742945bc7d7f210693b0c58ae542c6fd47b17adbbda0885f3dcb34a6bdb" +dependencies = [ + "async-trait", + "byteorder", + "bytes", + "fallible-iterator", + "futures-channel", + "futures-util", + "log", + "parking_lot", + "percent-encoding", + "phf", + "pin-project-lite", + "postgres-protocol", + "postgres-types", + "rand", + "socket2", + "tokio", + "tokio-util", + "whoami", +] + [[package]] name = "tokio-rustls" version = "0.24.1" @@ -10913,6 +10993,7 @@ checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" dependencies = [ "redox_syscall 0.5.1", "wasite", + "web-sys", ] [[package]] diff --git a/nym-network-monitor/Cargo.toml b/nym-network-monitor/Cargo.toml index bd537d5483..a6def79278 100644 --- a/nym-network-monitor/Cargo.toml +++ b/nym-network-monitor/Cargo.toml @@ -27,6 +27,7 @@ tokio = { workspace = true, features = ["macros", "time"] } tokio-util = { workspace = true } utoipa = { workspace = true, features = ["axum_extras"] } utoipa-swagger-ui = { workspace = true, features = ["axum"] } +tokio-postgres = "0.7" # internal nym-bin-common = { path = "../common/bin-common" } diff --git a/nym-network-monitor/entrypoint.sh b/nym-network-monitor/entrypoint.sh index 91a0830639..62b527944d 100755 --- a/nym-network-monitor/entrypoint.sh +++ b/nym-network-monitor/entrypoint.sh @@ -9,13 +9,14 @@ network=${NYM_NETWORK:-mainnet} timeout=${LOCUST_TIMEOUT:-600} users=${LOCUST_USERS:-10} processes=${LOCUST_PROCESSES:-4} +_database_url=${DATABASE_URL} -RUST_LOG=info nym-network-monitor --env envs/"${network}".env --private-key "${_private_key}" & +RUST_LOG=info nym-network-monitor --env envs/"${network}".env --private-key "${_private_key}" --database-url "${_database_url}" & nnm_pid=$! sleep 10 -python -m locust -H http://${NYM_NETWORK_MONITOR_HOST}:${NYM_NETWORK_MONITOR_PORT} --processes "${processes}" --autostart --autoquit 60 -u "${users}" -t "${timeout}"s & +python -m locust -H http://"${NYM_NETWORK_MONITOR_HOST}":"${NYM_NETWORK_MONITOR_PORT}" --processes "${processes}" --autostart --autoquit 60 -u "${users}" -t "${timeout}"s & locust_pid=$! wait $locust_pid diff --git a/nym-network-monitor/src/accounting.rs b/nym-network-monitor/src/accounting.rs index b87f795762..2fcacee75a 100644 --- a/nym-network-monitor/src/accounting.rs +++ b/nym-network-monitor/src/accounting.rs @@ -1,7 +1,10 @@ -use std::collections::{HashMap, HashSet}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use anyhow::Result; -use futures::{stream::FuturesUnordered, StreamExt}; +use futures::{pin_mut, stream::FuturesUnordered, StreamExt}; use log::{debug, info}; use nym_sphinx::chunking::{monitoring, SentFragment}; use nym_topology::{gateway, mix, NymTopology}; @@ -10,6 +13,7 @@ use nym_validator_client::nym_api::routes::{API_VERSION, STATUS, SUBMIT_GATEWAY, use rand::SeedableRng; use rand_chacha::ChaCha8Rng; use serde::{Deserialize, Serialize}; +use tokio_postgres::{binary_copy::BinaryCopyInWriter, types::Type, Client}; use utoipa::ToSchema; use crate::{NYM_API_URL, PRIVATE_KEY, TOPOLOGY}; @@ -23,20 +27,20 @@ struct HydratedRoute { struct GatewayStats(u32, u32); impl GatewayStats { - fn new(sent: u32, recv: u32) -> Self { - GatewayStats(sent, recv) + fn new(success: u32, failure: u32) -> Self { + GatewayStats(success, failure) } fn success(&self) -> u32 { self.0 } - fn failed(&self) -> u32 { + fn failure(&self) -> u32 { self.1 } fn reliability(&self) -> f64 { - self.success() as f64 / (self.success() + self.failed()) as f64 + self.success() as f64 / (self.success() + self.failure()) as f64 } fn incr_success(&mut self) { @@ -321,48 +325,125 @@ pub async fn monitor_mixnode_results() -> anyhow::Result> { .collect()) } -pub async fn submit_metrics() -> anyhow::Result<()> { - let node_stats = monitor_mixnode_results().await?; - let gateway_stats = monitor_gateway_results().await?; +async fn submit_node_stats_to_db(client: Arc) -> anyhow::Result<()> { + let client = Arc::clone(&client); + let node_stats = all_node_stats().await?; + + let sink = client + .copy_in("COPY node_stats (node_id, identity, reliability, complete_routes, incomplete_routes) FROM STDIN BINARY") + .await?; + + let writer = BinaryCopyInWriter::new( + sink, + &[Type::INT4, Type::TEXT, Type::FLOAT8, Type::INT8, Type::INT8], + ); + pin_mut!(writer); + + for stat in node_stats { + writer + .as_mut() + .write(&[ + &(stat.mix_id as i32), + &stat.identity, + &stat.reliability, + &(stat.complete_routes as i64), + &(stat.incomplete_routes as i64), + ]) + .await?; + } - info!("Submitting metrics to {}", *NYM_API_URL); - let client = reqwest::Client::new(); + writer.finish().await?; - let node_submit_url = format!("{}/{API_VERSION}/{STATUS}/{SUBMIT_NODE}", &*NYM_API_URL); - let gateway_submit_url = format!("{}/{API_VERSION}/{STATUS}/{SUBMIT_GATEWAY}", &*NYM_API_URL); + Ok(()) +} - info!("Submitting {} mixnode measurements", node_stats.len()); +async fn submit_gateway_stats_to_db(client: Arc) -> anyhow::Result<()> { + let client = Arc::clone(&client); + let network_account = NetworkAccount::finalize()?; + let gateway_stats = network_account.gateway_stats; + + let sink = client + .copy_in("COPY gateway_stats (identity, reliability, success, failure) FROM STDIN BINARY") + .await?; + + let writer = BinaryCopyInWriter::new(sink, &[Type::TEXT, Type::FLOAT8, Type::INT8, Type::INT8]); + pin_mut!(writer); + + for (key, stats) in gateway_stats { + writer + .as_mut() + .write(&[ + &key, + &stats.reliability(), + &(stats.success() as i64), + &(stats.failure() as i64), + ]) + .await?; + } - node_stats - .chunks(10) - .map(|chunk| { - let monitor_message = - MonitorMessage::new(chunk.to_vec(), PRIVATE_KEY.get().expect("We've set this!")); - client.post(&node_submit_url).json(&monitor_message).send() - }) - .collect::>() - .collect::>>() - .await - .into_iter() - .collect::, _>>()?; - - info!("Submitting {} gateway measurements", gateway_stats.len()); - - gateway_stats - .chunks(10) - .map(|chunk| { - let monitor_message = - MonitorMessage::new(chunk.to_vec(), PRIVATE_KEY.get().expect("We've set this!")); - client - .post(&gateway_submit_url) - .json(&monitor_message) - .send() - }) - .collect::>() - .collect::>>() - .await - .into_iter() - .collect::, _>>()?; + writer.finish().await?; + + Ok(()) +} + +pub async fn submit_metrics_to_db(client: Arc) -> anyhow::Result<()> { + let client = Arc::clone(&client); + let client2 = Arc::clone(&client); + submit_node_stats_to_db(client).await?; + submit_gateway_stats_to_db(client2).await?; + Ok(()) +} + +pub async fn submit_metrics(client: Option>) -> anyhow::Result<()> { + if let Some(client) = client { + submit_metrics_to_db(client).await?; + } + + if let Some(private_key) = PRIVATE_KEY.get() { + let node_stats = monitor_mixnode_results().await?; + let gateway_stats = monitor_gateway_results().await?; + + info!("Submitting metrics to {}", *NYM_API_URL); + let client = reqwest::Client::new(); + + let node_submit_url = format!("{}/{API_VERSION}/{STATUS}/{SUBMIT_NODE}", &*NYM_API_URL); + let gateway_submit_url = + format!("{}/{API_VERSION}/{STATUS}/{SUBMIT_GATEWAY}", &*NYM_API_URL); + + info!("Submitting {} mixnode measurements", node_stats.len()); + + node_stats + .chunks(10) + .map(|chunk| { + let monitor_message = MonitorMessage::new(chunk.to_vec(), private_key); + client.post(&node_submit_url).json(&monitor_message).send() + }) + .collect::>() + .collect::>>() + .await + .into_iter() + .collect::, _>>()?; + + info!("Submitting {} gateway measurements", gateway_stats.len()); + + gateway_stats + .chunks(10) + .map(|chunk| { + let monitor_message = MonitorMessage::new( + chunk.to_vec(), + PRIVATE_KEY.get().expect("We've set this!"), + ); + client + .post(&gateway_submit_url) + .json(&monitor_message) + .send() + }) + .collect::>() + .collect::>>() + .await + .into_iter() + .collect::, _>>()?; + } NetworkAccount::empty_buffers(); diff --git a/nym-network-monitor/src/main.rs b/nym-network-monitor/src/main.rs index 76ec69440d..89fee6df6f 100644 --- a/nym-network-monitor/src/main.rs +++ b/nym-network-monitor/src/main.rs @@ -2,7 +2,7 @@ use crate::http::HttpServer; use accounting::submit_metrics; use anyhow::Result; use clap::Parser; -use log::{info, warn}; +use log::{error, info, warn}; use nym_client_core::ForgetMe; use nym_crypto::asymmetric::ed25519::PrivateKey; use nym_network_defaults::setup_env; @@ -22,6 +22,7 @@ use std::{ }; use tokio::sync::OnceCell; use tokio::{signal::ctrl_c, sync::RwLock}; +use tokio_postgres::NoTls; use tokio_util::sync::CancellationToken; static NYM_API_URL: LazyLock = LazyLock::new(|| { @@ -136,7 +137,10 @@ struct Args { generate_key_pair: bool, #[arg(long)] - private_key: String, + private_key: Option, + + #[arg(long, env = "DATABASE_URL")] + database_url: Option, } fn generate_key_pair() -> Result<()> { @@ -174,8 +178,10 @@ async fn main() -> Result<()> { std::process::exit(0); } - let pk = PrivateKey::from_base58_string(&args.private_key)?; - PRIVATE_KEY.set(pk).ok(); + if let Some(private_key) = args.private_key { + let pk = PrivateKey::from_base58_string(&private_key)?; + PRIVATE_KEY.set(pk).ok(); + } TOPOLOGY .set(if let Some(topology_file) = args.topology { @@ -203,16 +209,31 @@ async fn main() -> Result<()> { info!("Waiting for message (ctrl-c to exit)"); + let client = if let Some(database_url) = args.database_url { + let (client, connection) = tokio_postgres::connect(&database_url, NoTls).await?; + + tokio::spawn(async move { + if let Err(e) = connection.await { + error!("Postgres connection error: {}", e); + } + }); + + Some(Arc::new(client)) + } else { + None + }; + loop { + let client = client.as_ref().map(Arc::clone); match tokio::time::timeout(Duration::from_secs(600), ctrl_c()).await { Ok(_) => { info!("Received kill signal, shutting down, submitting final batch of metrics"); - submit_metrics().await?; + submit_metrics(client).await?; break; } Err(_) => { info!("Submitting metrics, cleaning metric buffers"); - submit_metrics().await?; + submit_metrics(client).await?; } }; } From b4f51baf94a1b3df2834df1b1fdd364601ad4c80 Mon Sep 17 00:00:00 2001 From: dynco-nym <173912580+dynco-nym@users.noreply.github.com> Date: Mon, 16 Dec 2024 16:40:02 +0100 Subject: [PATCH 14/64] Change sqlite journal mode to WAL (#5213) * Change sqlite journal mode to WAL * Synchronous mode & auto vacuum * Bump probe git ref to 1.1.0 --- .../src/backend/fs_backend/manager.rs | 12 +++++++++--- .../surb-storage/src/backend/fs_backend/manager.rs | 8 +++++++- .../credential-storage/src/persistent_storage/mod.rs | 8 +++++++- common/gateway-stats-storage/src/lib.rs | 8 +++++++- common/gateway-storage/src/lib.rs | 8 +++++++- common/nyxd-scraper/src/storage/mod.rs | 9 ++++++++- nym-api/src/support/storage/mod.rs | 4 ++++ .../nym-credential-proxy/src/storage/mod.rs | 4 ++++ nym-node-status-api/nym-node-status-agent/run.sh | 4 ++-- .../nym-node-status-api/launch_node_status_api.sh | 4 +++- nym-validator-rewarder/src/rewarder/storage/mod.rs | 4 ++++ .../testnet-manager/src/manager/storage/mod.rs | 8 +++++++- 12 files changed, 69 insertions(+), 12 deletions(-) diff --git a/common/client-core/gateways-storage/src/backend/fs_backend/manager.rs b/common/client-core/gateways-storage/src/backend/fs_backend/manager.rs index 4b32c60936..d2d1943101 100644 --- a/common/client-core/gateways-storage/src/backend/fs_backend/manager.rs +++ b/common/client-core/gateways-storage/src/backend/fs_backend/manager.rs @@ -8,7 +8,10 @@ use crate::{ }, }; use log::{debug, error}; -use sqlx::ConnectOptions; +use sqlx::{ + sqlite::{SqliteAutoVacuum, SqliteSynchronous}, + ConnectOptions, +}; use std::path::Path; #[derive(Debug, Clone)] @@ -30,6 +33,9 @@ impl StorageManager { } let opts = sqlx::sqlite::SqliteConnectOptions::new() + .journal_mode(sqlx::sqlite::SqliteJournalMode::Wal) + .synchronous(SqliteSynchronous::Normal) + .auto_vacuum(SqliteAutoVacuum::Incremental) .filename(database_path) .create_if_missing(true) .disable_statement_logging(); @@ -110,7 +116,7 @@ impl StorageManager { ) -> Result<(), sqlx::Error> { sqlx::query!( r#" - INSERT INTO registered_gateway(gateway_id_bs58, registration_timestamp, gateway_type) + INSERT INTO registered_gateway(gateway_id_bs58, registration_timestamp, gateway_type) VALUES (?, ?, ?) "#, registered_gateway.gateway_id_bs58, @@ -224,7 +230,7 @@ impl StorageManager { ) -> Result<(), sqlx::Error> { sqlx::query!( r#" - INSERT INTO custom_gateway_details(gateway_id_bs58, data) + INSERT INTO custom_gateway_details(gateway_id_bs58, data) VALUES (?, ?) "#, custom.gateway_id_bs58, diff --git a/common/client-core/surb-storage/src/backend/fs_backend/manager.rs b/common/client-core/surb-storage/src/backend/fs_backend/manager.rs index b6adadd3d5..02316ddb7f 100644 --- a/common/client-core/surb-storage/src/backend/fs_backend/manager.rs +++ b/common/client-core/surb-storage/src/backend/fs_backend/manager.rs @@ -9,7 +9,10 @@ use crate::backend::fs_backend::{ }, }; use log::{error, info}; -use sqlx::ConnectOptions; +use sqlx::{ + sqlite::{SqliteAutoVacuum, SqliteSynchronous}, + ConnectOptions, +}; use std::path::Path; #[derive(Debug, Clone)] @@ -31,6 +34,9 @@ impl StorageManager { } let opts = sqlx::sqlite::SqliteConnectOptions::new() + .journal_mode(sqlx::sqlite::SqliteJournalMode::Wal) + .synchronous(SqliteSynchronous::Normal) + .auto_vacuum(SqliteAutoVacuum::Incremental) .filename(database_path) .create_if_missing(fresh) .disable_statement_logging(); diff --git a/common/credential-storage/src/persistent_storage/mod.rs b/common/credential-storage/src/persistent_storage/mod.rs index 80e746b482..e8c9eca5aa 100644 --- a/common/credential-storage/src/persistent_storage/mod.rs +++ b/common/credential-storage/src/persistent_storage/mod.rs @@ -33,7 +33,10 @@ use nym_credentials::{ IssuanceTicketBook, IssuedTicketBook, }; use nym_ecash_time::{ecash_today, Date, EcashTime}; -use sqlx::ConnectOptions; +use sqlx::{ + sqlite::{SqliteAutoVacuum, SqliteSynchronous}, + ConnectOptions, +}; use std::path::Path; use zeroize::Zeroizing; @@ -56,6 +59,9 @@ impl PersistentStorage { ); let opts = sqlx::sqlite::SqliteConnectOptions::new() + .journal_mode(sqlx::sqlite::SqliteJournalMode::Wal) + .synchronous(SqliteSynchronous::Normal) + .auto_vacuum(SqliteAutoVacuum::Incremental) .filename(database_path) .create_if_missing(true) .disable_statement_logging(); diff --git a/common/gateway-stats-storage/src/lib.rs b/common/gateway-stats-storage/src/lib.rs index e57f7452cd..5bfc658a93 100644 --- a/common/gateway-stats-storage/src/lib.rs +++ b/common/gateway-stats-storage/src/lib.rs @@ -6,7 +6,10 @@ use models::StoredFinishedSession; use nym_node_metrics::entry::{ActiveSession, FinishedSession, SessionType}; use nym_sphinx::DestinationAddressBytes; use sessions::SessionManager; -use sqlx::ConnectOptions; +use sqlx::{ + sqlite::{SqliteAutoVacuum, SqliteSynchronous}, + ConnectOptions, +}; use std::path::Path; use time::Date; use tracing::{debug, error}; @@ -36,6 +39,9 @@ impl PersistentStatsStorage { // TODO: we can inject here more stuff based on our gateway global config // struct. Maybe different pool size or timeout intervals? let opts = sqlx::sqlite::SqliteConnectOptions::new() + .journal_mode(sqlx::sqlite::SqliteJournalMode::Wal) + .synchronous(SqliteSynchronous::Normal) + .auto_vacuum(SqliteAutoVacuum::Incremental) .filename(database_path) .create_if_missing(true) .disable_statement_logging(); diff --git a/common/gateway-storage/src/lib.rs b/common/gateway-storage/src/lib.rs index 9d574dbe91..d95581f88a 100644 --- a/common/gateway-storage/src/lib.rs +++ b/common/gateway-storage/src/lib.rs @@ -12,7 +12,10 @@ use nym_credentials_interface::ClientTicket; use nym_gateway_requests::shared_key::SharedGatewayKey; use nym_sphinx::DestinationAddressBytes; use shared_keys::SharedKeysManager; -use sqlx::ConnectOptions; +use sqlx::{ + sqlite::{SqliteAutoVacuum, SqliteSynchronous}, + ConnectOptions, +}; use std::path::Path; use tickets::TicketStorageManager; use time::OffsetDateTime; @@ -86,6 +89,9 @@ impl GatewayStorage { // TODO: we can inject here more stuff based on our gateway global config // struct. Maybe different pool size or timeout intervals? let opts = sqlx::sqlite::SqliteConnectOptions::new() + .journal_mode(sqlx::sqlite::SqliteJournalMode::Wal) + .synchronous(SqliteSynchronous::Normal) + .auto_vacuum(SqliteAutoVacuum::Incremental) .filename(database_path) .create_if_missing(true) .disable_statement_logging(); diff --git a/common/nyxd-scraper/src/storage/mod.rs b/common/nyxd-scraper/src/storage/mod.rs index 8ac0f07775..a3b0550677 100644 --- a/common/nyxd-scraper/src/storage/mod.rs +++ b/common/nyxd-scraper/src/storage/mod.rs @@ -13,7 +13,11 @@ use crate::{ models::{CommitSignature, Validator}, }, }; -use sqlx::{types::time::OffsetDateTime, ConnectOptions, Sqlite, Transaction}; +use sqlx::{ + sqlite::{SqliteAutoVacuum, SqliteSynchronous}, + types::time::OffsetDateTime, + ConnectOptions, Sqlite, Transaction, +}; use std::{fmt::Debug, path::Path}; use tendermint::{ block::{Commit, CommitSig}, @@ -51,6 +55,9 @@ impl ScraperStorage { #[instrument] pub async fn init + Debug>(database_path: P) -> Result { let opts = sqlx::sqlite::SqliteConnectOptions::new() + .journal_mode(sqlx::sqlite::SqliteJournalMode::Wal) + .synchronous(SqliteSynchronous::Normal) + .auto_vacuum(SqliteAutoVacuum::Incremental) .filename(database_path) .create_if_missing(true) .disable_statement_logging(); diff --git a/nym-api/src/support/storage/mod.rs b/nym-api/src/support/storage/mod.rs index 72ff9aea1d..dc5e4c2560 100644 --- a/nym-api/src/support/storage/mod.rs +++ b/nym-api/src/support/storage/mod.rs @@ -18,6 +18,7 @@ use crate::support::storage::models::{ use dashmap::DashMap; use nym_mixnet_contract_common::NodeId; use nym_types::monitoring::NodeResult; +use sqlx::sqlite::{SqliteAutoVacuum, SqliteSynchronous}; use sqlx::ConnectOptions; use std::path::Path; use std::sync::Arc; @@ -67,6 +68,9 @@ impl NymApiStorage { // TODO: we can inject here more stuff based on our nym-api global config // struct. Maybe different pool size or timeout intervals? let connect_opts = sqlx::sqlite::SqliteConnectOptions::new() + .journal_mode(sqlx::sqlite::SqliteJournalMode::Wal) + .synchronous(SqliteSynchronous::Normal) + .auto_vacuum(SqliteAutoVacuum::Incremental) .filename(database_path) .create_if_missing(true) .log_statements(LevelFilter::Trace) diff --git a/nym-credential-proxy/nym-credential-proxy/src/storage/mod.rs b/nym-credential-proxy/nym-credential-proxy/src/storage/mod.rs index 6133d1acbc..5480f96d5b 100644 --- a/nym-credential-proxy/nym-credential-proxy/src/storage/mod.rs +++ b/nym-credential-proxy/nym-credential-proxy/src/storage/mod.rs @@ -16,6 +16,7 @@ use nym_validator_client::ecash::BlindedSignatureResponse; use nym_validator_client::nym_api::EpochId; use nym_validator_client::nyxd::contract_traits::ecash_query_client::DepositId; use nym_validator_client::nyxd::Coin; +use sqlx::sqlite::{SqliteAutoVacuum, SqliteSynchronous}; use sqlx::ConnectOptions; use std::fmt::Debug; use std::path::Path; @@ -40,6 +41,9 @@ impl VpnApiStorage { debug!("Attempting to connect to database"); let opts = sqlx::sqlite::SqliteConnectOptions::new() + .journal_mode(sqlx::sqlite::SqliteJournalMode::Wal) + .synchronous(SqliteSynchronous::Normal) + .auto_vacuum(SqliteAutoVacuum::Incremental) .filename(database_path) .create_if_missing(true) .log_statements(LevelFilter::Trace) diff --git a/nym-node-status-api/nym-node-status-agent/run.sh b/nym-node-status-api/nym-node-status-agent/run.sh index 680c87db8b..f3900ec227 100755 --- a/nym-node-status-api/nym-node-status-agent/run.sh +++ b/nym-node-status-api/nym-node-status-agent/run.sh @@ -3,7 +3,7 @@ set -eu export ENVIRONMENT=${ENVIRONMENT:-"sandbox"} -probe_git_ref="nym-vpn-core-v1.0.0-rc.14" +probe_git_ref="nym-vpn-core-v1.1.0" crate_root=$(dirname $(realpath "$0")) monorepo_root=$(realpath "${crate_root}/../..") @@ -54,7 +54,7 @@ function swarm() { echo "All agents completed" } -copy_gw_probe +# copy_gw_probe build_agent swarm $workers diff --git a/nym-node-status-api/nym-node-status-api/launch_node_status_api.sh b/nym-node-status-api/nym-node-status-api/launch_node_status_api.sh index 24b9a68ae8..9735f873d7 100755 --- a/nym-node-status-api/nym-node-status-api/launch_node_status_api.sh +++ b/nym-node-status-api/nym-node-status-api/launch_node_status_api.sh @@ -2,6 +2,7 @@ set -e +user_rust_log_preference=$RUST_LOG export NYM_API_CLIENT_TIMEOUT=60 export EXPLORER_CLIENT_TIMEOUT=60 export NODE_STATUS_API_TESTRUN_REFRESH_INTERVAL=120 @@ -20,7 +21,8 @@ function run_bare() { set -a source "${monorepo_root}/envs/${ENVIRONMENT}.env" set +a - export RUST_LOG=debug + export RUST_LOG=${user_rust_log_preference:-debug} + echo "RUST_LOG=${RUST_LOG}" # --conection-url is provided in build.rs cargo run --package nym-node-status-api diff --git a/nym-validator-rewarder/src/rewarder/storage/mod.rs b/nym-validator-rewarder/src/rewarder/storage/mod.rs index 1d58da9b11..876cb6d98d 100644 --- a/nym-validator-rewarder/src/rewarder/storage/mod.rs +++ b/nym-validator-rewarder/src/rewarder/storage/mod.rs @@ -7,6 +7,7 @@ use crate::{ rewarder::{epoch::Epoch, storage::manager::StorageManager, RewardingResult}, }; use nym_contracts_common::types::NaiveFloat; +use sqlx::sqlite::{SqliteAutoVacuum, SqliteSynchronous}; use sqlx::ConnectOptions; use std::{fmt::Debug, path::Path}; use time::{Date, OffsetDateTime}; @@ -23,6 +24,9 @@ impl RewarderStorage { #[instrument] pub async fn init + Debug>(database_path: P) -> Result { let opts = sqlx::sqlite::SqliteConnectOptions::new() + .journal_mode(sqlx::sqlite::SqliteJournalMode::Wal) + .synchronous(SqliteSynchronous::Normal) + .auto_vacuum(SqliteAutoVacuum::Incremental) .filename(database_path) .create_if_missing(true) .disable_statement_logging(); diff --git a/tools/internal/testnet-manager/src/manager/storage/mod.rs b/tools/internal/testnet-manager/src/manager/storage/mod.rs index c61f52070e..7eb3df7cb1 100644 --- a/tools/internal/testnet-manager/src/manager/storage/mod.rs +++ b/tools/internal/testnet-manager/src/manager/storage/mod.rs @@ -10,7 +10,10 @@ use crate::{ storage::manager::StorageManager, }, }; -use sqlx::ConnectOptions; +use sqlx::{ + sqlite::{SqliteAutoVacuum, SqliteSynchronous}, + ConnectOptions, +}; use std::path::Path; use tracing::{error, info}; use url::Url; @@ -39,6 +42,9 @@ impl NetworkManagerStorage { // TODO: we can inject here more stuff based on our nym-api global config // struct. Maybe different pool size or timeout intervals? let opts = sqlx::sqlite::SqliteConnectOptions::new() + .journal_mode(sqlx::sqlite::SqliteJournalMode::Wal) + .synchronous(SqliteSynchronous::Normal) + .auto_vacuum(SqliteAutoVacuum::Incremental) .filename(database_path) .create_if_missing(true) .disable_statement_logging(); From 61e872f033a7960c850fa9e63e6f33a29f28ae48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bogdan-=C8=98tefan=20Neac=C5=9Fu?= Date: Tue, 17 Dec 2024 15:18:11 +0200 Subject: [PATCH 15/64] Add windows to CI builds (#5269) * Add windows to CI builds * Fix win build for node status api * Fix win build for sdk * Fix win build for cred proxy --- .github/workflows/ci-build.yml | 2 +- nym-api/src/network_monitor/monitor/sender.rs | 1 + .../nym-credential-proxy/src/helpers.rs | 101 ++++++++++++- .../nym-credential-proxy/src/main.rs | 138 ++++-------------- .../nym-node-status-api/build.rs | 7 +- sdk/rust/nym-sdk/src/mixnet/client.rs | 10 +- 6 files changed, 146 insertions(+), 113 deletions(-) diff --git a/.github/workflows/ci-build.yml b/.github/workflows/ci-build.yml index 56fd0d15eb..d07f7edcbd 100644 --- a/.github/workflows/ci-build.yml +++ b/.github/workflows/ci-build.yml @@ -30,7 +30,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ arc-ubuntu-20.04, custom-runner-mac-m1 ] + os: [ arc-ubuntu-20.04, custom-windows-11, custom-runner-mac-m1 ] runs-on: ${{ matrix.os }} env: CARGO_TERM_COLOR: always diff --git a/nym-api/src/network_monitor/monitor/sender.rs b/nym-api/src/network_monitor/monitor/sender.rs index 172d6b3680..c20feadac1 100644 --- a/nym-api/src/network_monitor/monitor/sender.rs +++ b/nym-api/src/network_monitor/monitor/sender.rs @@ -183,6 +183,7 @@ impl PacketSender { gateway_packet_router, Some(fresh_gateway_client_data.bandwidth_controller.clone()), nym_statistics_common::clients::ClientStatsSender::new(None), + #[cfg(unix)] None, task_client, ); diff --git a/nym-credential-proxy/nym-credential-proxy/src/helpers.rs b/nym-credential-proxy/nym-credential-proxy/src/helpers.rs index 12ba427a70..8a528ff575 100644 --- a/nym-credential-proxy/nym-credential-proxy/src/helpers.rs +++ b/nym-credential-proxy/nym-credential-proxy/src/helpers.rs @@ -1,8 +1,22 @@ // Copyright 2024 Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only +use nym_bin_common::bin_info; use time::OffsetDateTime; -use tracing::{debug, info, warn}; +use tokio_util::sync::CancellationToken; +use tracing::{debug, error, info, warn}; + +use crate::{ + cli::Cli, + deposit_maker::DepositMaker, + error::VpnApiError, + http::{ + state::{ApiState, ChainClient}, + HttpServer, + }, + storage::VpnApiStorage, + tasks::StoragePruner, +}; pub struct LockTimer { created: OffsetDateTime, @@ -40,3 +54,88 @@ impl Default for LockTimer { } } } + +pub async fn wait_for_signal() { + use tokio::signal::unix::{signal, SignalKind}; + + // if we fail to setup the signals, we should just blow up + #[allow(clippy::expect_used)] + let mut sigterm = signal(SignalKind::terminate()).expect("Failed to setup SIGTERM channel"); + #[allow(clippy::expect_used)] + let mut sigquit = signal(SignalKind::quit()).expect("Failed to setup SIGQUIT channel"); + + tokio::select! { + _ = tokio::signal::ctrl_c() => { + info!("Received SIGINT"); + }, + _ = sigterm.recv() => { + info!("Received SIGTERM"); + } + _ = sigquit.recv() => { + info!("Received SIGQUIT"); + } + } +} + +fn build_sha_short() -> &'static str { + let bin_info = bin_info!(); + if bin_info.commit_sha.len() < 7 { + panic!("unavailable build commit sha") + } + + if bin_info.commit_sha == "VERGEN_IDEMPOTENT_OUTPUT" { + error!("the binary hasn't been built correctly. it doesn't have a commit sha information"); + return "unknown"; + } + + &bin_info.commit_sha[..7] +} + +pub(crate) async fn run_api(cli: Cli) -> Result<(), VpnApiError> { + // create the tasks + let bind_address = cli.bind_address(); + + let storage = VpnApiStorage::init(cli.persistent_storage_path()).await?; + let mnemonic = cli.mnemonic; + let auth_token = cli.http_auth_token; + let webhook_cfg = cli.webhook; + let chain_client = ChainClient::new(mnemonic)?; + let cancellation_token = CancellationToken::new(); + + let deposit_maker = DepositMaker::new( + build_sha_short(), + chain_client.clone(), + cli.max_concurrent_deposits, + cancellation_token.clone(), + ); + + let deposit_request_sender = deposit_maker.deposit_request_sender(); + let api_state = ApiState::new( + storage.clone(), + webhook_cfg, + chain_client, + deposit_request_sender, + cancellation_token.clone(), + ) + .await?; + let http_server = HttpServer::new( + bind_address, + api_state.clone(), + auth_token, + cancellation_token.clone(), + ); + let storage_pruner = StoragePruner::new(cancellation_token, storage); + + // spawn all the tasks + api_state.try_spawn(http_server.run_forever()); + api_state.try_spawn(storage_pruner.run_forever()); + api_state.try_spawn(deposit_maker.run_forever()); + + // wait for cancel signal (SIGINT, SIGTERM or SIGQUIT) + wait_for_signal().await; + + // cancel all the tasks and wait for all task to terminate + api_state.cancel_and_wait().await; + + Ok(()) +} diff --git a/nym-credential-proxy/nym-credential-proxy/src/main.rs b/nym-credential-proxy/nym-credential-proxy/src/main.rs index 71dd082b10..1ca9ccf316 100644 --- a/nym-credential-proxy/nym-credential-proxy/src/main.rs +++ b/nym-credential-proxy/nym-credential-proxy/src/main.rs @@ -6,117 +6,30 @@ #![warn(clippy::todo)] #![warn(clippy::dbg_macro)] -use crate::cli::Cli; -use crate::deposit_maker::DepositMaker; -use crate::error::VpnApiError; -use crate::http::state::{ApiState, ChainClient}; -use crate::http::HttpServer; -use crate::storage::VpnApiStorage; -use crate::tasks::StoragePruner; -use clap::Parser; -use nym_bin_common::logging::setup_tracing_logger; -use nym_bin_common::{bin_info, bin_info_owned}; -use nym_network_defaults::setup_env; -use tokio_util::sync::CancellationToken; -use tracing::{error, info, trace}; - -pub mod cli; -pub mod config; -pub mod credentials; -mod deposit_maker; -pub mod error; -pub mod helpers; -pub mod http; -pub mod nym_api_helpers; -pub mod storage; -pub mod tasks; -mod webhook; - -pub async fn wait_for_signal() { - use tokio::signal::unix::{signal, SignalKind}; - - // if we fail to setup the signals, we should just blow up - #[allow(clippy::expect_used)] - let mut sigterm = signal(SignalKind::terminate()).expect("Failed to setup SIGTERM channel"); - #[allow(clippy::expect_used)] - let mut sigquit = signal(SignalKind::quit()).expect("Failed to setup SIGQUIT channel"); - - tokio::select! { - _ = tokio::signal::ctrl_c() => { - info!("Received SIGINT"); - }, - _ = sigterm.recv() => { - info!("Received SIGTERM"); - } - _ = sigquit.recv() => { - info!("Received SIGQUIT"); - } - } -} - -fn build_sha_short() -> &'static str { - let bin_info = bin_info!(); - if bin_info.commit_sha.len() < 7 { - panic!("unavailable build commit sha") - } - - if bin_info.commit_sha == "VERGEN_IDEMPOTENT_OUTPUT" { - error!("the binary hasn't been built correctly. it doesn't have a commit sha information"); - return "unknown"; +cfg_if::cfg_if! { + if #[cfg(unix)] { + use crate::cli::Cli; + use clap::Parser; + use nym_bin_common::bin_info_owned; + use nym_bin_common::logging::setup_tracing_logger; + use nym_network_defaults::setup_env; + use tracing::{info, trace}; + + pub mod cli; + pub mod config; + pub mod credentials; + mod deposit_maker; + pub mod error; + pub mod helpers; + pub mod http; + pub mod nym_api_helpers; + pub mod storage; + pub mod tasks; + mod webhook; } - - &bin_info.commit_sha[..7] -} - -async fn run_api(cli: Cli) -> Result<(), VpnApiError> { - // create the tasks - let bind_address = cli.bind_address(); - - let storage = VpnApiStorage::init(cli.persistent_storage_path()).await?; - let mnemonic = cli.mnemonic; - let auth_token = cli.http_auth_token; - let webhook_cfg = cli.webhook; - let chain_client = ChainClient::new(mnemonic)?; - let cancellation_token = CancellationToken::new(); - - let deposit_maker = DepositMaker::new( - build_sha_short(), - chain_client.clone(), - cli.max_concurrent_deposits, - cancellation_token.clone(), - ); - - let deposit_request_sender = deposit_maker.deposit_request_sender(); - let api_state = ApiState::new( - storage.clone(), - webhook_cfg, - chain_client, - deposit_request_sender, - cancellation_token.clone(), - ) - .await?; - let http_server = HttpServer::new( - bind_address, - api_state.clone(), - auth_token, - cancellation_token.clone(), - ); - let storage_pruner = StoragePruner::new(cancellation_token, storage); - - // spawn all the tasks - api_state.try_spawn(http_server.run_forever()); - api_state.try_spawn(storage_pruner.run_forever()); - api_state.try_spawn(deposit_maker.run_forever()); - - // wait for cancel signal (SIGINT, SIGTERM or SIGQUIT) - wait_for_signal().await; - - // cancel all the tasks and wait for all task to terminate - api_state.cancel_and_wait().await; - - Ok(()) } +#[cfg(unix)] #[tokio::main] async fn main() -> anyhow::Result<()> { // std::env::set_var( @@ -134,6 +47,13 @@ async fn main() -> anyhow::Result<()> { let bin_info = bin_info_owned!(); info!("using the following version: {bin_info}"); - run_api(cli).await?; + helpers::run_api(cli).await?; Ok(()) } + +#[cfg(not(unix))] +#[tokio::main] +async fn main() -> anyhow::Result<()> { + eprintln!("This tool is only supported on Unix systems"); + std::process::exit(1) +} diff --git a/nym-node-status-api/nym-node-status-api/build.rs b/nym-node-status-api/nym-node-status-api/build.rs index 025e755088..3a1b933bc4 100644 --- a/nym-node-status-api/nym-node-status-api/build.rs +++ b/nym-node-status-api/nym-node-status-api/build.rs @@ -1,6 +1,8 @@ use anyhow::{anyhow, Result}; use sqlx::{Connection, SqliteConnection}; +#[cfg(target_family = "unix")] use std::fs::Permissions; +#[cfg(target_family = "unix")] use std::os::unix::fs::PermissionsExt; use tokio::{fs::File, io::AsyncWriteExt}; @@ -39,7 +41,10 @@ async fn write_db_path_to_file(out_dir: &str, db_filename: &str) -> anyhow::Resu file.write_all(format!("sqlite3 {}/{}", out_dir, db_filename).as_bytes()) .await?; + #[cfg(target_family = "unix")] file.set_permissions(Permissions::from_mode(0o755)) .await - .map_err(From::from) + .map_err(anyhow::Error::from)?; + + Ok(()) } diff --git a/sdk/rust/nym-sdk/src/mixnet/client.rs b/sdk/rust/nym-sdk/src/mixnet/client.rs index 598bbd4808..249bbd182c 100644 --- a/sdk/rust/nym-sdk/src/mixnet/client.rs +++ b/sdk/rust/nym-sdk/src/mixnet/client.rs @@ -37,6 +37,7 @@ use nym_validator_client::{nyxd, QueryHttpRpcNyxdClient, UserAgent}; use rand::rngs::OsRng; use std::path::Path; use std::path::PathBuf; +#[cfg(unix)] use std::sync::Arc; use url::Url; use zeroize::Zeroizing; @@ -56,6 +57,7 @@ pub struct MixnetClientBuilder { custom_shutdown: Option, force_tls: bool, user_agent: Option, + #[cfg(unix)] connection_fd_callback: Option>, // TODO: incorporate it properly into `MixnetClientStorage` (I will need it in wasm anyway) @@ -256,6 +258,7 @@ where self } + #[cfg(unix)] #[must_use] pub fn with_connection_fd_callback( mut self, @@ -293,7 +296,10 @@ where client.wait_for_gateway = self.wait_for_gateway; client.force_tls = self.force_tls; client.user_agent = self.user_agent; - client.connection_fd_callback = self.connection_fd_callback; + #[cfg(unix)] + if self.connection_fd_callback.is_some() { + client.connection_fd_callback = self.connection_fd_callback; + } client.forget_me = self.forget_me; Ok(client) } @@ -345,6 +351,7 @@ where user_agent: Option, /// Callback on the websocket fd as soon as the connection has been established + #[cfg(unix)] connection_fd_callback: Option>, forget_me: ForgetMe, @@ -397,6 +404,7 @@ where force_tls: false, custom_shutdown: None, user_agent: None, + #[cfg(unix)] connection_fd_callback: None, forget_me: Default::default(), }) From ad0c135d4c63024164b04e1438db6d920647fab3 Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Tue, 17 Dec 2024 20:35:42 +0000 Subject: [PATCH 16/64] Bump credential proxy version --- Cargo.lock | 2 +- nym-credential-proxy/nym-credential-proxy/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d84ab9916c..9a314eed31 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5027,7 +5027,7 @@ dependencies = [ [[package]] name = "nym-credential-proxy" -version = "0.1.6" +version = "0.1.7" dependencies = [ "anyhow", "async-trait", diff --git a/nym-credential-proxy/nym-credential-proxy/Cargo.toml b/nym-credential-proxy/nym-credential-proxy/Cargo.toml index 8069d51493..0d322ad34e 100644 --- a/nym-credential-proxy/nym-credential-proxy/Cargo.toml +++ b/nym-credential-proxy/nym-credential-proxy/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "nym-credential-proxy" -version = "0.1.6" +version = "0.1.7" authors.workspace = true repository.workspace = true homepage.workspace = true From cd86110b2c07dc27fe7c72539ca86fe8697b768c Mon Sep 17 00:00:00 2001 From: mfahampshire Date: Wed, 18 Dec 2024 10:37:45 +0000 Subject: [PATCH 17/64] Max/crunch patch docs (#5284) * patch changelog done * auto commit generated command files --- .../command-outputs/nym-api-build-info.md | 2 +- .../command-outputs/nym-client-build-info.md | 2 +- .../command-outputs/nym-node-build-info.md | 2 +- .../nym-socks5-client-build-info.md | 2 +- .../command-outputs/nymvisor-build-info.md | 2 +- .../developers/clients/socks5/commands.mdx | 2 +- .../developers/clients/websocket/commands.mdx | 2 +- .../docs/pages/operators/changelog.mdx | 83 ++++++++++++------- 8 files changed, 58 insertions(+), 39 deletions(-) diff --git a/documentation/docs/components/outputs/command-outputs/nym-api-build-info.md b/documentation/docs/components/outputs/command-outputs/nym-api-build-info.md index c7ef59a43c..f8b3bc0089 100644 --- a/documentation/docs/components/outputs/command-outputs/nym-api-build-info.md +++ b/documentation/docs/components/outputs/command-outputs/nym-api-build-info.md @@ -1,7 +1,7 @@ ```sh Binary Name: nym-api -Build Timestamp: 2024-12-10T11:37:07.839071360Z +Build Timestamp: 2024-12-18T10:26:40.905460408Z Build Version: 1.1.46 Commit SHA: 62045d76b32265f6a5e6358ab7aebbc827d47dcd Commit Date: 2024-11-26T11:53:05.000000000+01:00 diff --git a/documentation/docs/components/outputs/command-outputs/nym-client-build-info.md b/documentation/docs/components/outputs/command-outputs/nym-client-build-info.md index 660d02d4a2..e8b6b4e861 100644 --- a/documentation/docs/components/outputs/command-outputs/nym-client-build-info.md +++ b/documentation/docs/components/outputs/command-outputs/nym-client-build-info.md @@ -1,7 +1,7 @@ ```sh Binary Name: nym-client -Build Timestamp: 2024-12-10T11:37:07.839071360Z +Build Timestamp: 2024-12-18T10:26:40.905460408Z Build Version: 1.1.44 Commit SHA: 62045d76b32265f6a5e6358ab7aebbc827d47dcd Commit Date: 2024-11-26T11:53:05.000000000+01:00 diff --git a/documentation/docs/components/outputs/command-outputs/nym-node-build-info.md b/documentation/docs/components/outputs/command-outputs/nym-node-build-info.md index 7ed28e4de5..201423087e 100644 --- a/documentation/docs/components/outputs/command-outputs/nym-node-build-info.md +++ b/documentation/docs/components/outputs/command-outputs/nym-node-build-info.md @@ -1,7 +1,7 @@ ```sh Binary Name: nym-node -Build Timestamp: 2024-12-10T11:37:07.839071360Z +Build Timestamp: 2024-12-18T10:26:40.905460408Z Build Version: 1.1.11 Commit SHA: 62045d76b32265f6a5e6358ab7aebbc827d47dcd Commit Date: 2024-11-26T11:53:05.000000000+01:00 diff --git a/documentation/docs/components/outputs/command-outputs/nym-socks5-client-build-info.md b/documentation/docs/components/outputs/command-outputs/nym-socks5-client-build-info.md index cb9ed56157..2727bfe476 100644 --- a/documentation/docs/components/outputs/command-outputs/nym-socks5-client-build-info.md +++ b/documentation/docs/components/outputs/command-outputs/nym-socks5-client-build-info.md @@ -1,7 +1,7 @@ ```sh Binary Name: nym-socks5-client -Build Timestamp: 2024-12-10T11:37:07.839071360Z +Build Timestamp: 2024-12-18T10:26:40.905460408Z Build Version: 1.1.44 Commit SHA: 62045d76b32265f6a5e6358ab7aebbc827d47dcd Commit Date: 2024-11-26T11:53:05.000000000+01:00 diff --git a/documentation/docs/components/outputs/command-outputs/nymvisor-build-info.md b/documentation/docs/components/outputs/command-outputs/nymvisor-build-info.md index a373c4b930..b5f79a2571 100644 --- a/documentation/docs/components/outputs/command-outputs/nymvisor-build-info.md +++ b/documentation/docs/components/outputs/command-outputs/nymvisor-build-info.md @@ -1,7 +1,7 @@ ```sh Binary Name: nymvisor -Build Timestamp: 2024-12-10T11:37:07.839071360Z +Build Timestamp: 2024-12-18T10:26:40.905460408Z Build Version: 0.1.9 Commit SHA: 62045d76b32265f6a5e6358ab7aebbc827d47dcd Commit Date: 2024-11-26T11:53:05.000000000+01:00 diff --git a/documentation/docs/pages/developers/clients/socks5/commands.mdx b/documentation/docs/pages/developers/clients/socks5/commands.mdx index 2ecb2e7e49..d48d0f9300 100644 --- a/documentation/docs/pages/developers/clients/socks5/commands.mdx +++ b/documentation/docs/pages/developers/clients/socks5/commands.mdx @@ -152,7 +152,7 @@ Example output: ```sh Binary Name: nym-socks5-client -Build Timestamp: 2024-12-10T11:37:07.839071360Z +Build Timestamp: 2024-12-18T10:26:40.905460408Z Build Version: 1.1.44 Commit SHA: 62045d76b32265f6a5e6358ab7aebbc827d47dcd Commit Date: 2024-11-26T11:53:05.000000000+01:00 diff --git a/documentation/docs/pages/developers/clients/websocket/commands.mdx b/documentation/docs/pages/developers/clients/websocket/commands.mdx index e7a30285cb..b7d67e26cd 100644 --- a/documentation/docs/pages/developers/clients/websocket/commands.mdx +++ b/documentation/docs/pages/developers/clients/websocket/commands.mdx @@ -117,7 +117,7 @@ Example output: ```sh Binary Name: nym-client -Build Timestamp: 2024-12-10T11:37:07.839071360Z +Build Timestamp: 2024-12-18T10:26:40.905460408Z Build Version: 1.1.44 Commit SHA: 62045d76b32265f6a5e6358ab7aebbc827d47dcd Commit Date: 2024-11-26T11:53:05.000000000+01:00 diff --git a/documentation/docs/pages/operators/changelog.mdx b/documentation/docs/pages/operators/changelog.mdx index a649d8de4f..7fd6aa2186 100644 --- a/documentation/docs/pages/operators/changelog.mdx +++ b/documentation/docs/pages/operators/changelog.mdx @@ -39,6 +39,25 @@ This page displays a full list of all the changes during our release cycle from +## `v2024.14-crunch-patched` +Patch for `v2024.14-crunch` release. [Fixes an issue](https://github.com/nymtech/nym/commit/b656003306184061588f25df0b8b4555b41157f4) to allow only one private IP pair & compatibility issues between nym-nodes and older clients. + +- [Release binaries](https://github.com/nymtech/nym/releases/tag/nym-binaries-v2024.14-crunch-patched) +- [`nym-node`](nodes/nym-node.mdx) version `1.2.1` + +```sh +nym-node +Binary Name: nym-node +Build Timestamp: 2024-12-18T10:18:42.978852430Z +Build Version: 1.2.1 +Commit SHA: 8d5a41a790e96ae5e821964865affaa7d3343eab +Commit Date: 2024-12-18T11:07:49.000000000+01:00 +Commit Branch: HEAD +rustc Version: 1.83.0 +rustc Channel: stable +cargo Profile: release +``` + ## `v2024.14-crunch` - [Release binaries](https://github.com/nymtech/nym/releases/tag/nym-binaries-v2024.14-crunch) @@ -63,7 +82,7 @@ cargo Profile: release - [Bump elliptic from `6.5.4` to `6.5.7` in /testnet-faucet](https://github.com/nymtech/nym/pull/4768): Bumps [elliptic](https://github.com/indutny/elliptic) from `6.5.4` to `6.5.7`. -- [build(deps): bump micromatch from `4.0.4` to `4.0.8` in /nym-wallet/webdriver](https://github.com/nymtech/nym/pull/4789): Bumps [micromatch](https://github.com/micromatch/micromatch) from `4.0.4` to `4.0.8`. +- [build(deps): bump micromatch from `4.0.4` to `4.0.8` in /nym-wallet/webdriver](https://github.com/nymtech/nym/pull/4789): Bumps [micromatch](https://github.com/micromatch/micromatch) from `4.0.4` to `4.0.8`. - [build(deps): bump axios from 1.6.0 to 1.7.5 in /nym-api/tests](https://github.com/nymtech/nym/pull/4790) Bumps [axios](https://github.com/axios/axios) from 1.6.0 to 1.7.5. @@ -80,26 +99,26 @@ cargo Profile: release | Package | From | To | | --- | --- | --- | | [anyhow](https://github.com/dtolnay/anyhow) | `1.0.89` | `1.0.90` | -| [clap](https://github.com/clap-rs/clap) | `4.5.18` | `4.5.20` | -| [clap_complete](https://github.com/clap-rs/clap) | `4.5.29` | `4.5.33` | -| [pin-project](https://github.com/taiki-e/pin-project) | `1.1.5` | `1.1.6` | -| [serde](https://github.com/serde-rs/serde) | `1.0.210` | `1.0.211` | -| [serde_json](https://github.com/serde-rs/json) | `1.0.128` | `1.0.132` | -| [wasm-bindgen](https://github.com/rustwasm/wasm-bindgen) | `0.2.93` | `0.2.95` | -| [wasm-bindgen-futures](https://github.com/rustwasm/wasm-bindgen) | `0.4.43` | `0.4.45` | -| [web-sys](https://github.com/rustwasm/wasm-bindgen) | `0.3.70` | `0.3.72` | +| [clap](https://github.com/clap-rs/clap) | `4.5.18` | `4.5.20` | +| [clap_complete](https://github.com/clap-rs/clap) | `4.5.29` | `4.5.33` | +| [pin-project](https://github.com/taiki-e/pin-project) | `1.1.5` | `1.1.6` | +| [serde](https://github.com/serde-rs/serde) | `1.0.210` | `1.0.211` | +| [serde_json](https://github.com/serde-rs/json) | `1.0.128` | `1.0.132` | +| [wasm-bindgen](https://github.com/rustwasm/wasm-bindgen) | `0.2.93` | `0.2.95` | +| [wasm-bindgen-futures](https://github.com/rustwasm/wasm-bindgen) | `0.4.43` | `0.4.45` | +| [web-sys](https://github.com/rustwasm/wasm-bindgen) | `0.3.70` | `0.3.72` | | Updates `anyhow` | `1.0.89` | `1.0.90` | - [[Product Data] Introduce data persistence on gateways](https://github.com/nymtech/nym/pull/5022): This PR builds on top of [\#4974](https://github.com/nymtech/nym/pull/4974), not changing the behavior of the data collection, but persisting them in a sqlite database so they can be kept across restarts and crashes. It also leave the door open for other stats module to use that storage if needed. Here are some points of interest: - New [`gateway_stats_storage`](https://github.com/nymtech/nym/tree/simon/gateway_stats_persistence/common/gateway-stats-storage) crate - [Config migration](https://github.com/nymtech/nym/blob/simon/gateway_stats_persistence/nym-node/src/config/old_configs/old_config_v4.rs) resulting from the added database. - - Resulting changes in the [`statistics`](https://github.com/nymtech/nym/tree/simon/gateway_stats_persistence/gateway/src/node/statistics) module to account the new storage system - + - Resulting changes in the [`statistics`](https://github.com/nymtech/nym/tree/simon/gateway_stats_persistence/gateway/src/node/statistics) module to account the new storage system + - [Integrate nym-credential-proxy into workspace](https://github.com/nymtech/nym/pull/5027): Integrate `nym-credential-proxy` into the main workspace - [Authenticator CLI client mode](https://github.com/nymtech/nym/pull/5044) -- [Node Status API](https://github.com/nymtech/nym/pull/5050): merging a long-diverged feature branch - all commits here were their own merge requests +- [Node Status API](https://github.com/nymtech/nym/pull/5050): merging a long-diverged feature branch - all commits here were their own merge requests - [IPv6 support for wireguard](https://github.com/nymtech/nym/pull/5059) @@ -109,23 +128,23 @@ cargo Profile: release - [chore: ecash contract migration to remove unused 'redemption_gateway_share'](https://github.com/nymtech/nym/pull/5104) -- [[Product Data] Client-side stats collection ](https://github.com/nymtech/nym/pull/5107): The goal is to anonymously gather stats from nym-clients. These stats will be sent through the mixnet to a Nym run service provider that will gather them. This PR sets the scene to send stats in a mixnet message to an address. The address can be set when the client is created. Current stats include some infos on sent packets along with platform information. If a receiving address is set, the client will send a mixnet packet every 5min to this address. Otherwise, nothing happens and the client runs as usual. +- [[Product Data] Client-side stats collection ](https://github.com/nymtech/nym/pull/5107): The goal is to anonymously gather stats from nym-clients. These stats will be sent through the mixnet to a Nym run service provider that will gather them. This PR sets the scene to send stats in a mixnet message to an address. The address can be set when the client is created. Current stats include some infos on sent packets along with platform information. If a receiving address is set, the client will send a mixnet packet every 5min to this address. Otherwise, nothing happens and the client runs as usual. - [Send mixnet packet stats using task client](https://github.com/nymtech/nym/pull/5109) - + - [Add granular log on nym-node](https://github.com/nymtech/nym/pull/5111) and make use of it for `defguard_wireguard_rs` big info logs -- [Rewarding for ticketbook issuance](https://github.com/nymtech/nym/pull/5112): Revamps the current validator rewarder to allow for rewards for issuing the zk-nym ticketbooks. - -- [[Product Data] Add stats reporting configuration in client config ](https://github.com/nymtech/nym/pull/5115): Adds the stats reporting address to client configs. It can be set in the config file, as a CLI argument and as an env var in a `.env` file. As the stats reporting config in now in the `DebugConfig`, the `StatsReportingConfig` is no longer required, making the propagation of these changes more readable +- [Rewarding for ticketbook issuance](https://github.com/nymtech/nym/pull/5112): Revamps the current validator rewarder to allow for rewards for issuing the zk-nym ticketbooks. + +- [[Product Data] Add stats reporting configuration in client config ](https://github.com/nymtech/nym/pull/5115): Adds the stats reporting address to client configs. It can be set in the config file, as a CLI argument and as an env var in a `.env` file. As the stats reporting config in now in the `DebugConfig`, the `StatsReportingConfig` is no longer required, making the propagation of these changes more readable - [config score](https://github.com/nymtech/nym/pull/5117): introduces a concept of a `config_score` to a nym node which influences performance and thus rewarding amounts and chances of being in the rewarded set. Currently it's influenced by the following factors: - Accepting terms and conditions (not accepted: 0) - Exposing self-described API (not exposed: 0) - Running "nym-node" binary (legacy binary: 0) - Number of versions behind the core (`score = 0.995 ^ (X * versions_behind ^ 1.65)`) - - The old performance is now treated as `routing_score` - - the "new" performance = `routing_score * config_score` + - The old performance is now treated as `routing_score` + - the "new" performance = `routing_score * config_score` - [Add Dockerfile and add env vars for clap arguments](https://github.com/nymtech/nym/pull/5118) @@ -139,7 +158,7 @@ cargo Profile: release - Added flag to Agent to generate keypairs - Agent requests are signed by agent - Server-side requests are checked for authentication - + - [CI: reduce jobs running on cluster](https://github.com/nymtech/nym/pull/5132) - [Removed ci-nym-api-tests.yml which was running outdated (and broken) tests](https://github.com/nymtech/nym/pull/5133) @@ -181,7 +200,7 @@ cargo Profile: release - Adds new endpoints to get: - `nym-nodes` (list + by id) - account balance + delegations + rewarding + vesting - + - Explorer UI (NextJS) - List of nym-nodes - Remove service providers routes (Harbour Master shows these) @@ -208,7 +227,7 @@ cargo Profile: release - [`nym-api` NMv1 adjustments](https://github.com/nymtech/nym/pull/5209) -- [Nmv2 add debug config](https://github.com/nymtech/nym/pull/5212): Adds debug config to disable poisson process, cover traffic and min performance filtering +- [Nmv2 add debug config](https://github.com/nymtech/nym/pull/5212): Adds debug config to disable poisson process, cover traffic and min performance filtering - [introduce UNSTABLE endpoints for returning network monitor run details](https://github.com/nymtech/nym/pull/5214) @@ -226,14 +245,14 @@ cargo Profile: release - [Correct IPv6 address generation](https://github.com/nymtech/nym/pull/5113) -- [bugfix: don't send empty BankMsg in ecash contract](https://github.com/nymtech/nym/pull/5121): If ticketbook prices were to be set so low the resultant redemption would have created `BankMsg` with value of 0, that message is no longer going to be sent +- [bugfix: don't send empty BankMsg in ecash contract](https://github.com/nymtech/nym/pull/5121): If ticketbook prices were to be set so low the resultant redemption would have created `BankMsg` with value of 0, that message is no longer going to be sent - [fix: validator-rewarder GH job](https://github.com/nymtech/nym/pull/5151) -- [bugfix: correctly expose ecash-related data on nym-api](https://github.com/nymtech/nym/pull/5155): This PR makes fixes to ecash-related endpoints on `nym-api` +- [bugfix: correctly expose ecash-related data on nym-api](https://github.com/nymtech/nym/pull/5155): This PR makes fixes to ecash-related endpoints on `nym-api` - global data (such as aggregated signatures and keys) are actually always available by all apis - global data (such as aggregated signatures and keys) are actually always available by all apis - + - [bugfix: use default value for verloc config when deserialising missing values](https://github.com/nymtech/nym/pull/5177) - [bugfix: fixed nym-node config migrations (again)](https://github.com/nymtech/nym/pull/5179) @@ -263,7 +282,7 @@ cargo Profile: release - `nym-node` has now implemented [IPv6 support for wireguard](https://github.com/nymtech/nym/pull/5059) -- [`network_tunnel_manager.sh` updated](network): run the commands below to make sure +- [`network_tunnel_manager.sh` updated](network): run the commands below to make sure
@@ -287,7 +306,7 @@ $HOME/nym-binaries/network_tunnel_manager.sh configure_dns_and_icmp_wg ; \ $HOME/nym-binaries/network_tunnel_manager.sh adjust_ip_forwarding ; \ $HOME/nym-binaries/network_tunnel_manager.sh check_ipv6_ipv4_forwarding; \ -systemctl daemon-reload && service nym-node restart && journalctl -u nym-node -f +systemctl daemon-reload && service nym-node restart && journalctl -u nym-node -f ``` Then run the jokes in a new window for control @@ -307,7 +326,7 @@ $HOME/nym-binaries/network_tunnel_manager.sh joke_through_wg_tunnel ## `magura-drift` -Second patch to `v2024.13-magura` release version. +Second patch to `v2024.13-magura` release version. - [Release binaries](https://github.com/nymtech/nym/releases/tag/nym-binaries-v2024.13-magura-drift) - [`nym-node`](nodes/nym-node.mdx) version `1.1.12` @@ -325,7 +344,7 @@ rustc Channel: stable cargo Profile: release ``` -- This patch adds a peer storage manager to fix issues causing external clients to be blocked, ensuring they can successfully connect to different nodes. +- This patch adds a peer storage manager to fix issues causing external clients to be blocked, ensuring they can successfully connect to different nodes. ## `v2024.13-magura-patched` @@ -347,7 +366,7 @@ cargo Profile: release -After changes coming along with `v2024.13-magura` (`nym-node v1.1.10`), Nym Explorer is no longer picking all values correctly. Instead of fixing this outdated explorer, we are working on a new one, coming out soon. +After changes coming along with `v2024.13-magura` (`nym-node v1.1.10`), Nym Explorer is no longer picking all values correctly. Instead of fixing this outdated explorer, we are working on a new one, coming out soon. [Nym Harbourmaster](https://harbourmaster.nymtech.net) has cache of 90min, expect your values to be updated with delay. We are aware of some issues with Nym Harbourmaster and working hard to resolve them in the upcoming explorer v2. To check your routing values in real time, you can use [`nym-gateway-probe`](nodes/performance-and-testing/gateway-probe). @@ -428,7 +447,7 @@ Be careful when running commands within sqlite database. ```sh cd $HOME/.nym/nym-nodes//data ``` -- Enter the database: +- Enter the database: ```sh sqlite3 clients.sqlite ``` @@ -448,7 +467,7 @@ select * from wireguard_peer where public_key like "%%"; ###### 3. Exit and restart the service -Run `.quit` and: +Run `.quit` and: ```sh systemctl restart nym-node.service ``` From d03302c39141f7f3c3fb6ea867bb8f574ac6894f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20H=C3=A4ggblad?= Date: Wed, 18 Dec 2024 12:36:10 +0100 Subject: [PATCH 18/64] http-api-client: deduplicate code (#5267) * Deduplicate code * Remove unneeded async --- common/http-api-client/src/lib.rs | 316 +++++++++++++++--------------- 1 file changed, 158 insertions(+), 158 deletions(-) diff --git a/common/http-api-client/src/lib.rs b/common/http-api-client/src/lib.rs index c826fe295b..1792a85a22 100644 --- a/common/http-api-client/src/lib.rs +++ b/common/http-api-client/src/lib.rs @@ -192,49 +192,39 @@ impl Client { &self.base_url } - pub fn create_get_request( + pub fn create_request( &self, + method: reqwest::Method, path: PathSegments<'_>, params: Params<'_, K, V>, + json_body: Option<&B>, ) -> RequestBuilder where + B: Serialize + ?Sized, K: AsRef, V: AsRef, { let url = sanitize_url(&self.base_url, path, params); - self.reqwest_client.get(url) + let mut request = self.reqwest_client.request(method.clone(), url); + + if let Some(body) = json_body { + request = request.json(body); + } + + request } - #[instrument(level = "debug", skip_all, fields(path=?path))] - async fn send_get_request( + pub fn create_get_request( &self, path: PathSegments<'_>, params: Params<'_, K, V>, - ) -> Result> + ) -> RequestBuilder where K: AsRef, V: AsRef, - E: Display, { - tracing::trace!("Sending GET request"); let url = sanitize_url(&self.base_url, path, params); - - #[cfg(target_arch = "wasm32")] - { - Ok( - wasmtimer::tokio::timeout( - self.request_timeout, - self.reqwest_client.get(url).send(), - ) - .await - .map_err(|_timeout| HttpClientError::RequestTimeout)??, - ) - } - - #[cfg(not(target_arch = "wasm32"))] - { - Ok(self.reqwest_client.get(url).send().await?) - } + self.reqwest_client.get(url) } pub fn create_post_request( @@ -252,91 +242,113 @@ impl Client { self.reqwest_client.post(url).json(json_body) } - async fn send_post_request( + pub fn create_delete_request( &self, path: PathSegments<'_>, params: Params<'_, K, V>, - json_body: &B, - ) -> Result> + ) -> RequestBuilder where - B: Serialize + ?Sized, K: AsRef, V: AsRef, - E: Display, { let url = sanitize_url(&self.base_url, path, params); - - #[cfg(target_arch = "wasm32")] - { - Ok(wasmtimer::tokio::timeout( - self.request_timeout, - self.reqwest_client.post(url).json(json_body).send(), - ) - .await - .map_err(|_timeout| HttpClientError::RequestTimeout)??) - } - - #[cfg(not(target_arch = "wasm32"))] - { - Ok(self.reqwest_client.post(url).json(json_body).send().await?) - } + self.reqwest_client.delete(url) } - pub fn create_delete_request( + pub fn create_patch_request( &self, path: PathSegments<'_>, params: Params<'_, K, V>, + json_body: &B, ) -> RequestBuilder where + B: Serialize + ?Sized, K: AsRef, V: AsRef, { let url = sanitize_url(&self.base_url, path, params); - self.reqwest_client.delete(url) + self.reqwest_client.patch(url).json(json_body) } - pub async fn send_delete_request( + async fn send_request( &self, + method: reqwest::Method, path: PathSegments<'_>, params: Params<'_, K, V>, + json_body: Option<&B>, ) -> Result> where + B: Serialize + ?Sized, K: AsRef, V: AsRef, E: Display, { - tracing::trace!("Sending DELETE request"); let url = sanitize_url(&self.base_url, path, params); + let mut request = self.reqwest_client.request(method.clone(), url); + + if let Some(body) = json_body { + request = request.json(body); + } + #[cfg(target_arch = "wasm32")] { - Ok(wasmtimer::tokio::timeout( - self.request_timeout, - self.reqwest_client.delete(url).send(), + Ok( + wasmtimer::tokio::timeout(self.request_timeout, request.send()) + .await + .map_err(|_timeout| HttpClientError::RequestTimeout)??, ) - .await - .map_err(|_timeout| HttpClientError::RequestTimeout)??) } #[cfg(not(target_arch = "wasm32"))] { - Ok(self.reqwest_client.delete(url).send().await?) + Ok(request.send().await?) } } - pub fn create_patch_request( + #[instrument(level = "debug", skip_all, fields(path=?path))] + async fn send_get_request( + &self, + path: PathSegments<'_>, + params: Params<'_, K, V>, + ) -> Result> + where + K: AsRef, + V: AsRef, + E: Display, + { + self.send_request(reqwest::Method::GET, path, params, None::<&()>) + .await + } + + async fn send_post_request( &self, path: PathSegments<'_>, params: Params<'_, K, V>, json_body: &B, - ) -> RequestBuilder + ) -> Result> where B: Serialize + ?Sized, K: AsRef, V: AsRef, + E: Display, { - let url = sanitize_url(&self.base_url, path, params); - self.reqwest_client.patch(url).json(json_body) + self.send_request(reqwest::Method::POST, path, params, Some(json_body)) + .await + } + + pub async fn send_delete_request( + &self, + path: PathSegments<'_>, + params: Params<'_, K, V>, + ) -> Result> + where + K: AsRef, + V: AsRef, + E: Display, + { + self.send_request(reqwest::Method::DELETE, path, params, None::<&()>) + .await } pub async fn send_patch_request( @@ -351,27 +363,8 @@ impl Client { V: AsRef, E: Display, { - let url = sanitize_url(&self.base_url, path, params); - - #[cfg(target_arch = "wasm32")] - { - Ok(wasmtimer::tokio::timeout( - self.request_timeout, - self.reqwest_client.patch(url).json(json_body).send(), - ) + self.send_request(reqwest::Method::PATCH, path, params, Some(json_body)) .await - .map_err(|_timeout| HttpClientError::RequestTimeout)??) - } - - #[cfg(not(target_arch = "wasm32"))] - { - Ok(self - .reqwest_client - .patch(url) - .json(json_body) - .send() - .await?) - } } #[instrument(level = "debug", skip_all)] @@ -439,36 +432,50 @@ impl Client { parse_response(res, true).await } - #[instrument(level = "debug", skip_all)] - pub async fn get_json_endpoint(&self, endpoint: S) -> Result> + async fn call_json_endpoint( + &self, + method: reqwest::Method, + endpoint: S, + json_body: Option<&B>, + ) -> Result> where + B: Serialize + ?Sized, for<'a> T: Deserialize<'a>, E: Display + DeserializeOwned, S: AsRef, { + let mut request = self + .reqwest_client + .request(method.clone(), self.base_url.join(endpoint.as_ref())?); + + if let Some(body) = json_body { + request = request.json(body); + } + #[cfg(target_arch = "wasm32")] let res = { - wasmtimer::tokio::timeout( - self.request_timeout, - self.reqwest_client - .get(self.base_url.join(endpoint.as_ref())?) - .send(), - ) - .await - .map_err(|_timeout| HttpClientError::RequestTimeout)?? + wasmtimer::tokio::timeout(self.request_timeout, request.send()) + .await + .map_err(|_timeout| HttpClientError::RequestTimeout)?? }; #[cfg(not(target_arch = "wasm32"))] - let res = { - self.reqwest_client - .get(self.base_url.join(endpoint.as_ref())?) - .send() - .await? - }; + let res = { request.send().await? }; parse_response(res, false).await } + #[instrument(level = "debug", skip_all)] + pub async fn get_json_endpoint(&self, endpoint: S) -> Result> + where + for<'a> T: Deserialize<'a>, + E: Display + DeserializeOwned, + S: AsRef, + { + self.call_json_endpoint(reqwest::Method::GET, endpoint, None::<&()>) + .await + } + pub async fn post_json_endpoint( &self, endpoint: S, @@ -480,29 +487,8 @@ impl Client { E: Display + DeserializeOwned, S: AsRef, { - #[cfg(target_arch = "wasm32")] - let res = { - wasmtimer::tokio::timeout( - self.request_timeout, - self.reqwest_client - .post(self.base_url.join(endpoint.as_ref())?) - .json(json_body) - .send(), - ) + self.call_json_endpoint(reqwest::Method::POST, endpoint, Some(json_body)) .await - .map_err(|_timeout| HttpClientError::RequestTimeout)?? - }; - - #[cfg(not(target_arch = "wasm32"))] - let res = { - self.reqwest_client - .post(self.base_url.join(endpoint.as_ref())?) - .json(json_body) - .send() - .await? - }; - - parse_response(res, true).await } pub async fn delete_json_endpoint(&self, endpoint: S) -> Result> @@ -511,27 +497,8 @@ impl Client { E: Display + DeserializeOwned, S: AsRef, { - #[cfg(target_arch = "wasm32")] - let res = { - wasmtimer::tokio::timeout( - self.request_timeout, - self.reqwest_client - .delete(self.base_url.join(endpoint.as_ref())?) - .send(), - ) + self.call_json_endpoint(reqwest::Method::DELETE, endpoint, None::<&()>) .await - .map_err(|_timeout| HttpClientError::RequestTimeout)?? - }; - - #[cfg(not(target_arch = "wasm32"))] - let res = { - self.reqwest_client - .delete(self.base_url.join(endpoint.as_ref())?) - .send() - .await? - }; - - parse_response(res, false).await } pub async fn patch_json_endpoint( @@ -545,29 +512,8 @@ impl Client { E: Display + DeserializeOwned, S: AsRef, { - #[cfg(target_arch = "wasm32")] - let res = { - wasmtimer::tokio::timeout( - self.request_timeout, - self.reqwest_client - .patch(self.base_url.join(endpoint.as_ref())?) - .json(json_body) - .send(), - ) + self.call_json_endpoint(reqwest::Method::PATCH, endpoint, Some(json_body)) .await - .map_err(|_timeout| HttpClientError::RequestTimeout)?? - }; - - #[cfg(not(target_arch = "wasm32"))] - let res = { - self.reqwest_client - .patch(self.base_url.join(endpoint.as_ref())?) - .json(json_body) - .send() - .await? - }; - - parse_response(res, true).await } } @@ -612,6 +558,19 @@ pub trait ApiClient { V: AsRef + Sync, E: Display + DeserializeOwned; + async fn patch_json( + &self, + path: PathSegments<'_>, + params: Params<'_, K, V>, + json_body: &B, + ) -> Result> + where + B: Serialize + ?Sized + Sync, + for<'a> T: Deserialize<'a>, + K: AsRef + Sync, + V: AsRef + Sync, + E: Display + DeserializeOwned; + /// `get` json data from the provided absolute endpoint, i.e. for example `"/api/v1/mixnodes?since=12345"` async fn get_json_from(&self, endpoint: S) -> Result> where @@ -635,6 +594,17 @@ pub trait ApiClient { for<'a> T: Deserialize<'a>, E: Display + DeserializeOwned, S: AsRef + Sync + Send; + + async fn patch_json_data_at( + &self, + endpoint: S, + json_body: &B, + ) -> Result> + where + B: Serialize + ?Sized + Sync, + for<'a> T: Deserialize<'a>, + E: Display + DeserializeOwned, + S: AsRef + Sync + Send; } #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] @@ -684,6 +654,22 @@ impl ApiClient for Client { self.delete_json(path, params).await } + async fn patch_json( + &self, + path: PathSegments<'_>, + params: Params<'_, K, V>, + json_body: &B, + ) -> Result> + where + B: Serialize + ?Sized + Sync, + for<'a> T: Deserialize<'a>, + K: AsRef + Sync, + V: AsRef + Sync, + E: Display + DeserializeOwned, + { + self.patch_json(path, params, json_body).await + } + async fn get_json_from(&self, endpoint: S) -> Result> where for<'a> T: Deserialize<'a>, @@ -715,6 +701,20 @@ impl ApiClient for Client { { self.delete_json_endpoint(endpoint).await } + + async fn patch_json_data_at( + &self, + endpoint: S, + json_body: &B, + ) -> Result> + where + B: Serialize + ?Sized + Sync, + for<'a> T: Deserialize<'a>, + E: Display + DeserializeOwned, + S: AsRef + Sync + Send, + { + self.patch_json_endpoint(endpoint, json_body).await + } } // utility function that should solve the double slash problem in API urls forever. From 3695332036e35b99b04a14cd9ffe55717930515b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bogdan-=C8=98tefan=20Neac=C5=9Fu?= Date: Wed, 18 Dec 2024 15:03:21 +0200 Subject: [PATCH 19/64] Move tun constants to network defaults (#5286) --- common/network-defaults/src/constants.rs | 11 +++++++++++ service-providers/ip-packet-router/src/constants.rs | 9 --------- .../ip-packet-router/src/ip_packet_router.rs | 13 +++++++------ .../ip-packet-router/src/util/generate_new_ip.rs | 7 ++++--- 4 files changed, 22 insertions(+), 18 deletions(-) diff --git a/common/network-defaults/src/constants.rs b/common/network-defaults/src/constants.rs index 6723b45e41..e9b70f54e3 100644 --- a/common/network-defaults/src/constants.rs +++ b/common/network-defaults/src/constants.rs @@ -57,3 +57,14 @@ pub mod wireguard { pub const WG_TUN_DEVICE_IP_ADDRESS_V6: Ipv6Addr = Ipv6Addr::new(0xfc01, 0, 0, 0, 0, 0, 0, 0x1); // fc01::1 pub const WG_TUN_DEVICE_NETMASK_V6: u8 = 112; } + +pub mod mixnet_vpn { + use std::net::{Ipv4Addr, Ipv6Addr}; + + // The interface used to route traffic + pub const NYM_TUN_BASE_NAME: &str = "nymtun"; + pub const NYM_TUN_DEVICE_ADDRESS_V4: Ipv4Addr = Ipv4Addr::new(10, 0, 0, 1); + pub const NYM_TUN_DEVICE_NETMASK_V4: Ipv4Addr = Ipv4Addr::new(255, 255, 0, 0); + pub const NYM_TUN_DEVICE_ADDRESS_V6: Ipv6Addr = Ipv6Addr::new(0xfc00, 0, 0, 0, 0, 0, 0, 0x1); // fc00::1 + pub const NYM_TUN_DEVICE_NETMASK_V6: &str = "112"; +} diff --git a/service-providers/ip-packet-router/src/constants.rs b/service-providers/ip-packet-router/src/constants.rs index 73e744890d..bfee848df2 100644 --- a/service-providers/ip-packet-router/src/constants.rs +++ b/service-providers/ip-packet-router/src/constants.rs @@ -1,14 +1,5 @@ -use std::net::{Ipv4Addr, Ipv6Addr}; use std::time::Duration; -// The interface used to route traffic -pub const TUN_BASE_NAME: &str = "nymtun"; -pub const TUN_DEVICE_ADDRESS_V4: Ipv4Addr = Ipv4Addr::new(10, 0, 0, 1); -pub const TUN_DEVICE_NETMASK_V4: Ipv4Addr = Ipv4Addr::new(255, 255, 0, 0); -pub const TUN_DEVICE_ADDRESS_V6: Ipv6Addr = Ipv6Addr::new(0xfc00, 0, 0, 0, 0, 0, 0, 0x1); // fc00::1 - -pub const TUN_DEVICE_NETMASK_V6: &str = "112"; - // We routinely check if any clients needs to be disconnected at this interval pub(crate) const DISCONNECT_TIMER_INTERVAL: Duration = Duration::from_secs(10); diff --git a/service-providers/ip-packet-router/src/ip_packet_router.rs b/service-providers/ip-packet-router/src/ip_packet_router.rs index cff86d6fb0..a0f185cc76 100644 --- a/service-providers/ip-packet-router/src/ip_packet_router.rs +++ b/service-providers/ip-packet-router/src/ip_packet_router.rs @@ -119,7 +119,7 @@ impl IpPacketRouter { log::error!("ip packet router service provider is not yet supported on this platform"); Ok(()) } else { - unimplemented!("service provider is not yet supported on this platform") + todo!("service provider is not yet supported on this platform") } } @@ -145,11 +145,12 @@ impl IpPacketRouter { // Create the TUN device that we interact with the rest of the world with let config = nym_tun::tun_device::TunDeviceConfig { - base_name: crate::constants::TUN_BASE_NAME.to_string(), - ipv4: crate::constants::TUN_DEVICE_ADDRESS_V4, - netmaskv4: crate::constants::TUN_DEVICE_NETMASK_V4, - ipv6: crate::constants::TUN_DEVICE_ADDRESS_V6, - netmaskv6: crate::constants::TUN_DEVICE_NETMASK_V6.to_string(), + base_name: nym_network_defaults::constants::mixnet_vpn::NYM_TUN_BASE_NAME.to_string(), + ipv4: nym_network_defaults::constants::mixnet_vpn::NYM_TUN_DEVICE_ADDRESS_V4, + netmaskv4: nym_network_defaults::constants::mixnet_vpn::NYM_TUN_DEVICE_NETMASK_V4, + ipv6: nym_network_defaults::constants::mixnet_vpn::NYM_TUN_DEVICE_ADDRESS_V6, + netmaskv6: nym_network_defaults::constants::mixnet_vpn::NYM_TUN_DEVICE_NETMASK_V6 + .to_string(), }; let (tun_reader, tun_writer) = tokio::io::split(nym_tun::tun_device::TunDevice::new_device_only(config)?); diff --git a/service-providers/ip-packet-router/src/util/generate_new_ip.rs b/service-providers/ip-packet-router/src/util/generate_new_ip.rs index ea5325f237..2743be3ef6 100644 --- a/service-providers/ip-packet-router/src/util/generate_new_ip.rs +++ b/service-providers/ip-packet-router/src/util/generate_new_ip.rs @@ -1,9 +1,10 @@ use nym_ip_packet_requests::IpPair; +use nym_network_defaults::constants::mixnet_vpn::{ + NYM_TUN_DEVICE_ADDRESS_V4, NYM_TUN_DEVICE_ADDRESS_V6, +}; use std::net::Ipv6Addr; use std::{collections::HashMap, net::Ipv4Addr}; -use crate::constants::{TUN_DEVICE_ADDRESS_V4, TUN_DEVICE_ADDRESS_V6}; - // Find an available IP address in self.connected_clients // TODO: make this nicer fn generate_random_ips_within_subnet(rng: &mut R) -> IpPair { @@ -36,7 +37,7 @@ pub(crate) fn find_new_ips( let mut rng = rand::thread_rng(); let mut new_ips = generate_random_ips_within_subnet(&mut rng); let mut tries = 0; - let tun_ips = IpPair::new(TUN_DEVICE_ADDRESS_V4, TUN_DEVICE_ADDRESS_V6); + let tun_ips = IpPair::new(NYM_TUN_DEVICE_ADDRESS_V4, NYM_TUN_DEVICE_ADDRESS_V6); while is_ip_taken( connected_clients_ipv4, From 3521f3637483b8999e3364dafa41468224f219ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bogdan-=C8=98tefan=20Neac=C5=9Fu?= Date: Wed, 18 Dec 2024 16:46:28 +0200 Subject: [PATCH 20/64] Include IPINFO_API_TOKEN in nightly CI (#5285) * Include IPINFO_API_TOKEN in nightly CI * Fix beta clippy --- .github/workflows/nightly-build.yml | 1 + Cargo.lock | 21 +++++++-------- Cargo.toml | 6 +++-- common/client-core/src/init/helpers.rs | 2 +- .../src/registration/handshake/client.rs | 2 +- .../src/registration/handshake/gateway.rs | 2 +- .../src/scheme/aggregation.rs | 5 +--- .../src/clients/packet_statistics.rs | 4 +-- explorer-api/src/gateways/models.rs | 4 +-- explorer-api/src/mix_nodes/models.rs | 4 +-- explorer-api/src/unstable/models.rs | 4 +-- .../src/http/state/mod.rs | 2 +- .../http/router/api/v1/metrics/prometheus.rs | 2 +- .../testnet-manager/src/manager/dkg_skip.rs | 26 +++++++++---------- .../testnet-manager/src/manager/local_apis.rs | 6 ++--- .../src/manager/local_client.rs | 17 +++++------- .../src/manager/local_nodes.rs | 26 +++++++++---------- 17 files changed, 62 insertions(+), 72 deletions(-) diff --git a/.github/workflows/nightly-build.yml b/.github/workflows/nightly-build.yml index d7b1c29563..c5c68a4614 100644 --- a/.github/workflows/nightly-build.yml +++ b/.github/workflows/nightly-build.yml @@ -15,6 +15,7 @@ jobs: runs-on: ${{ matrix.os }} env: CARGO_TERM_COLOR: always + IPINFO_API_TOKEN: ${{ secrets.IPINFO_API_TOKEN }} continue-on-error: true steps: - name: Check out repository code diff --git a/Cargo.lock b/Cargo.lock index 9a314eed31..e0b5847119 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10759,9 +10759,9 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396" dependencies = [ "cfg-if", "once_cell", @@ -10770,13 +10770,12 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79" dependencies = [ "bumpalo", "log", - "once_cell", "proc-macro2", "quote", "syn 2.0.90", @@ -10797,9 +10796,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -10807,9 +10806,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", @@ -10820,9 +10819,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.95" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" [[package]] name = "wasm-bindgen-test" diff --git a/Cargo.toml b/Cargo.toml index 52ef268311..a5b90d7dfd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -147,7 +147,9 @@ members = [ "tools/internal/contract-state-importer/importer-cli", "tools/internal/contract-state-importer/importer-contract", "tools/internal/testnet-manager", - "tools/internal/testnet-manager/dkg-bypass-contract", "common/verloc", "tools/internal/mixnet-connectivity-check", + "tools/internal/testnet-manager/dkg-bypass-contract", + "common/verloc", + "tools/internal/mixnet-connectivity-check", ] default-members = [ @@ -403,7 +405,7 @@ indexed_db_futures = { git = "https://github.com/TiemenSch/rust-indexed-db", bra js-sys = "0.3.70" serde-wasm-bindgen = "0.6.5" tsify = "0.4.5" -wasm-bindgen = "0.2.95" +wasm-bindgen = "0.2.99" wasm-bindgen-futures = "0.4.45" wasmtimer = "0.2.0" web-sys = "0.3.72" diff --git a/common/client-core/src/init/helpers.rs b/common/client-core/src/init/helpers.rs index 68b3b8d457..16aa042045 100644 --- a/common/client-core/src/init/helpers.rs +++ b/common/client-core/src/init/helpers.rs @@ -190,7 +190,7 @@ where Ok(GatewayWithLatency::new(gateway, avg)) } -pub async fn choose_gateway_by_latency<'a, R: Rng, G: ConnectableGateway + Clone>( +pub async fn choose_gateway_by_latency( rng: &mut R, gateways: &[G], must_use_tls: bool, diff --git a/common/gateway-requests/src/registration/handshake/client.rs b/common/gateway-requests/src/registration/handshake/client.rs index 5bdb239a66..549cddca39 100644 --- a/common/gateway-requests/src/registration/handshake/client.rs +++ b/common/gateway-requests/src/registration/handshake/client.rs @@ -9,7 +9,7 @@ use futures::{Sink, Stream}; use rand::{CryptoRng, RngCore}; use tungstenite::Message as WsMessage; -impl<'a, S, R> State<'a, S, R> { +impl State<'_, S, R> { async fn client_handshake_inner(&mut self) -> Result<(), HandshakeError> where S: Stream + Sink + Unpin, diff --git a/common/gateway-requests/src/registration/handshake/gateway.rs b/common/gateway-requests/src/registration/handshake/gateway.rs index fc439b53c0..5fec717c46 100644 --- a/common/gateway-requests/src/registration/handshake/gateway.rs +++ b/common/gateway-requests/src/registration/handshake/gateway.rs @@ -10,7 +10,7 @@ use crate::registration::handshake::{error::HandshakeError, WsItem}; use futures::{Sink, Stream}; use tungstenite::Message as WsMessage; -impl<'a, S, R> State<'a, S, R> { +impl State<'_, S, R> { async fn gateway_handshake_inner( &mut self, raw_init_message: Vec, diff --git a/common/nym_offline_compact_ecash/src/scheme/aggregation.rs b/common/nym_offline_compact_ecash/src/scheme/aggregation.rs index 9018cde7ea..148a619a41 100644 --- a/common/nym_offline_compact_ecash/src/scheme/aggregation.rs +++ b/common/nym_offline_compact_ecash/src/scheme/aggregation.rs @@ -115,10 +115,7 @@ pub fn aggregate_signatures( let params = ecash_group_parameters(); // aggregate the signature - let signature = match Aggregatable::aggregate(signatures, indices) { - Ok(res) => res, - Err(err) => return Err(err), - }; + let signature = Aggregatable::aggregate(signatures, indices)?; // Ensure the aggregated signature is not an infinity point if bool::from(signature.is_at_infinity()) { diff --git a/common/statistics/src/clients/packet_statistics.rs b/common/statistics/src/clients/packet_statistics.rs index 5d6d1f9c1b..ba335d3627 100644 --- a/common/statistics/src/clients/packet_statistics.rs +++ b/common/statistics/src/clients/packet_statistics.rs @@ -428,7 +428,7 @@ impl PacketStatisticsControl { while self .history .front() - .map_or(false, |&(t, _)| t < recording_window) + .is_some_and(|&(t, _)| t < recording_window) { self.history.pop_front(); } @@ -462,7 +462,7 @@ impl PacketStatisticsControl { while self .rates .front() - .map_or(false, |&(t, _)| t < recording_window) + .is_some_and(|&(t, _)| t < recording_window) { self.rates.pop_front(); } diff --git a/explorer-api/src/gateways/models.rs b/explorer-api/src/gateways/models.rs index d2298b2a0c..9b8a6dfaef 100644 --- a/explorer-api/src/gateways/models.rs +++ b/explorer-api/src/gateways/models.rs @@ -120,9 +120,7 @@ impl ThreadsafeGatewayCache { .read() .await .get(&identity_key) - .map_or(false, |cache_item| { - cache_item.valid_until > SystemTime::now() - }) + .is_some_and(|cache_item| cache_item.valid_until > SystemTime::now()) } pub(crate) async fn get_locations(&self) -> GatewayLocationCache { diff --git a/explorer-api/src/mix_nodes/models.rs b/explorer-api/src/mix_nodes/models.rs index ce5876c949..a9e85ab97e 100644 --- a/explorer-api/src/mix_nodes/models.rs +++ b/explorer-api/src/mix_nodes/models.rs @@ -106,9 +106,7 @@ impl ThreadsafeMixNodesCache { .read() .await .get(&mix_id) - .map_or(false, |cache_item| { - cache_item.valid_until > SystemTime::now() - }) + .is_some_and(|cache_item| cache_item.valid_until > SystemTime::now()) } pub(crate) async fn get_locations(&self) -> MixnodeLocationCache { diff --git a/explorer-api/src/unstable/models.rs b/explorer-api/src/unstable/models.rs index 39254d9c07..9abb81942d 100644 --- a/explorer-api/src/unstable/models.rs +++ b/explorer-api/src/unstable/models.rs @@ -60,9 +60,7 @@ impl ThreadSafeNymNodesCache { .read() .await .get(&node_id) - .map_or(false, |cache_item| { - cache_item.valid_until > SystemTime::now() - }) + .is_some_and(|cache_item| cache_item.valid_until > SystemTime::now()) } pub(crate) async fn get_bonded_nymnodes( diff --git a/nym-credential-proxy/nym-credential-proxy/src/http/state/mod.rs b/nym-credential-proxy/nym-credential-proxy/src/http/state/mod.rs index dc6309e8a7..392fa71992 100644 --- a/nym-credential-proxy/nym-credential-proxy/src/http/state/mod.rs +++ b/nym-credential-proxy/nym-credential-proxy/src/http/state/mod.rs @@ -710,7 +710,7 @@ pub(crate) struct ChainWritePermit<'a> { inner: RwLockWriteGuard<'a, DirectSigningHttpRpcNyxdClient>, } -impl<'a> ChainWritePermit<'a> { +impl ChainWritePermit<'_> { pub(crate) async fn make_deposits( self, short_sha: &'static str, diff --git a/nym-node/src/node/http/router/api/v1/metrics/prometheus.rs b/nym-node/src/node/http/router/api/v1/metrics/prometheus.rs index 197cc5a1c8..bdb0050d79 100644 --- a/nym-node/src/node/http/router/api/v1/metrics/prometheus.rs +++ b/nym-node/src/node/http/router/api/v1/metrics/prometheus.rs @@ -25,7 +25,7 @@ use nym_metrics::metrics; ("prometheus_token" = []) ) )] -pub(crate) async fn prometheus_metrics<'a>( +pub(crate) async fn prometheus_metrics( TypedHeader(authorization): TypedHeader>, State(state): State, ) -> Result { diff --git a/tools/internal/testnet-manager/src/manager/dkg_skip.rs b/tools/internal/testnet-manager/src/manager/dkg_skip.rs index 7720202665..ec74a01af3 100644 --- a/tools/internal/testnet-manager/src/manager/dkg_skip.rs +++ b/tools/internal/testnet-manager/src/manager/dkg_skip.rs @@ -168,9 +168,9 @@ impl NetworkManager { Ok(()) } - async fn validate_existing_contracts<'a>( + async fn validate_existing_contracts( &self, - ctx: &DkgSkipCtx<'a>, + ctx: &DkgSkipCtx<'_>, ) -> Result { ctx.println(format!( "🔬 {}Validating the current DKG and group contracts...", @@ -215,9 +215,9 @@ impl NetworkManager { Ok(current_code) } - async fn persist_dkg_keys<'a, P: AsRef>( + async fn persist_dkg_keys>( &self, - ctx: &mut DkgSkipCtx<'a>, + ctx: &mut DkgSkipCtx<'_>, output_dir: P, ) -> Result<(), NetworkManagerError> { ctx.println(format!( @@ -272,9 +272,9 @@ impl NetworkManager { Ok(()) } - async fn upload_bypass_contract<'a, P: AsRef>( + async fn upload_bypass_contract>( &self, - ctx: &DkgSkipCtx<'a>, + ctx: &DkgSkipCtx<'_>, dkg_bypass_contract: P, ) -> Result { ctx.println(format!( @@ -297,9 +297,9 @@ impl NetworkManager { Ok(res.code_id) } - async fn migrate_to_bypass_contract<'a>( + async fn migrate_to_bypass_contract( &self, - ctx: &DkgSkipCtx<'a>, + ctx: &DkgSkipCtx<'_>, code_id: ContractCodeId, ) -> Result<(), NetworkManagerError> { ctx.println(format!( @@ -336,9 +336,9 @@ impl NetworkManager { Ok(()) } - async fn restore_dkg_contract<'a>( + async fn restore_dkg_contract( &self, - ctx: &DkgSkipCtx<'a>, + ctx: &DkgSkipCtx<'_>, code_id: ContractCodeId, ) -> Result<(), NetworkManagerError> { ctx.println(format!( @@ -363,7 +363,7 @@ impl NetworkManager { Ok(()) } - async fn add_group_members<'a>(&self, ctx: &DkgSkipCtx<'a>) -> Result<(), NetworkManagerError> { + async fn add_group_members(&self, ctx: &DkgSkipCtx<'_>) -> Result<(), NetworkManagerError> { ctx.println(format!( "👪 {}Adding all the cw4 group members...", style("[7/8]").bold().dim() @@ -387,9 +387,9 @@ impl NetworkManager { Ok(()) } - async fn transfer_signer_tokens<'a>( + async fn transfer_signer_tokens( &self, - ctx: &DkgSkipCtx<'a>, + ctx: &DkgSkipCtx<'_>, ) -> Result<(), NetworkManagerError> { ctx.println(format!( "💸 {}Transferring tokens to the new signers...", diff --git a/tools/internal/testnet-manager/src/manager/local_apis.rs b/tools/internal/testnet-manager/src/manager/local_apis.rs index 8ad28e8743..7da09928c3 100644 --- a/tools/internal/testnet-manager/src/manager/local_apis.rs +++ b/tools/internal/testnet-manager/src/manager/local_apis.rs @@ -67,9 +67,9 @@ impl NetworkManager { .join(DEFAULT_CONFIG_FILENAME) } - async fn initialise_api<'a>( + async fn initialise_api( &self, - ctx: &LocalApisCtx<'a>, + ctx: &LocalApisCtx<'_>, info: &EcashSignerWithPaths, ) -> Result<(), NetworkManagerError> { let address = &info.data.cosmos_account.address; @@ -139,7 +139,7 @@ impl NetworkManager { Ok(()) } - async fn initialise_apis<'a>(&self, ctx: &LocalApisCtx<'a>) -> Result<(), NetworkManagerError> { + async fn initialise_apis(&self, ctx: &LocalApisCtx<'_>) -> Result<(), NetworkManagerError> { ctx.println(format!( "🔏 {}Initialising local nym-apis...", style("[1/1]").bold().dim() diff --git a/tools/internal/testnet-manager/src/manager/local_client.rs b/tools/internal/testnet-manager/src/manager/local_client.rs index f5c8ba173a..c95c3062df 100644 --- a/tools/internal/testnet-manager/src/manager/local_client.rs +++ b/tools/internal/testnet-manager/src/manager/local_client.rs @@ -80,9 +80,9 @@ impl NetworkManager { .join(DEFAULT_CONFIG_FILENAME) } - async fn wait_for_api_gateway<'a>( + async fn wait_for_api_gateway( &self, - ctx: &LocalClientCtx<'a>, + ctx: &LocalClientCtx<'_>, ) -> Result { // create api client // hehe, that's disgusting, but it's not meant to be used by users @@ -145,9 +145,9 @@ impl NetworkManager { } } - async fn wait_for_gateway_endpoint<'a>( + async fn wait_for_gateway_endpoint( &self, - ctx: &LocalClientCtx<'a>, + ctx: &LocalClientCtx<'_>, gateway: SocketAddr, ) -> Result<(), NetworkManagerError> { ctx.set_pb_message(format!( @@ -177,17 +177,14 @@ impl NetworkManager { Ok(()) } - async fn wait_for_gateway<'a>( - &self, - ctx: &LocalClientCtx<'a>, - ) -> Result<(), NetworkManagerError> { + async fn wait_for_gateway(&self, ctx: &LocalClientCtx<'_>) -> Result<(), NetworkManagerError> { let endpoint = self.wait_for_api_gateway(ctx).await?; self.wait_for_gateway_endpoint(ctx, endpoint).await } - async fn prepare_nym_client<'a>( + async fn prepare_nym_client( &self, - ctx: &LocalClientCtx<'a>, + ctx: &LocalClientCtx<'_>, ) -> Result<(), NetworkManagerError> { ctx.println(format!( "🔏 {}Initialising local nym-client...", diff --git a/tools/internal/testnet-manager/src/manager/local_nodes.rs b/tools/internal/testnet-manager/src/manager/local_nodes.rs index 792c67e8fe..e140ccb20a 100644 --- a/tools/internal/testnet-manager/src/manager/local_nodes.rs +++ b/tools/internal/testnet-manager/src/manager/local_nodes.rs @@ -102,9 +102,9 @@ struct ReducedSignatureOut { } impl NetworkManager { - async fn initialise_nym_node<'a>( + async fn initialise_nym_node( &self, - ctx: &mut LocalNodesCtx<'a>, + ctx: &mut LocalNodesCtx<'_>, offset: u16, is_gateway: bool, ) -> Result<(), NetworkManagerError> { @@ -222,9 +222,9 @@ impl NetworkManager { Ok(()) } - async fn initialise_nym_nodes<'a>( + async fn initialise_nym_nodes( &self, - ctx: &mut LocalNodesCtx<'a>, + ctx: &mut LocalNodesCtx<'_>, mixnodes: u16, gateways: u16, ) -> Result<(), NetworkManagerError> { @@ -250,9 +250,9 @@ impl NetworkManager { Ok(()) } - async fn transfer_bonding_tokens<'a>( + async fn transfer_bonding_tokens( &self, - ctx: &LocalNodesCtx<'a>, + ctx: &LocalNodesCtx<'_>, ) -> Result<(), NetworkManagerError> { ctx.println(format!( "💸 {}Transferring tokens to the bond owners...", @@ -281,9 +281,9 @@ impl NetworkManager { Ok(()) } - async fn bond_node<'a>( + async fn bond_node( &self, - ctx: &LocalNodesCtx<'a>, + ctx: &LocalNodesCtx<'_>, node: &NymNode, is_gateway: bool, ) -> Result<(), NetworkManagerError> { @@ -318,7 +318,7 @@ impl NetworkManager { Ok(()) } - async fn bond_nym_nodes<'a>(&self, ctx: &LocalNodesCtx<'a>) -> Result<(), NetworkManagerError> { + async fn bond_nym_nodes(&self, ctx: &LocalNodesCtx<'_>) -> Result<(), NetworkManagerError> { ctx.println(format!( "⛓️ {}Bonding the local nym-nodes...", style("[3/5]").bold().dim() @@ -336,9 +336,9 @@ impl NetworkManager { Ok(()) } - async fn assign_to_active_set<'a>( + async fn assign_to_active_set( &self, - ctx: &LocalNodesCtx<'a>, + ctx: &LocalNodesCtx<'_>, ) -> Result<(), NetworkManagerError> { ctx.println(format!( "🔌 {}Assigning nodes to the active set...", @@ -460,9 +460,9 @@ impl NetworkManager { ctx.progress.output_run_commands(cmds) } - async fn persist_nodes_in_database<'a>( + async fn persist_nodes_in_database( &self, - ctx: &LocalNodesCtx<'a>, + ctx: &LocalNodesCtx<'_>, ) -> Result<(), NetworkManagerError> { ctx.println(format!( "📦 {}Storing the node information in the database", From 53c28af847e9db9df17c44b070b6917ef75c3972 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20H=C3=A4ggblad?= Date: Wed, 18 Dec 2024 21:51:00 +0100 Subject: [PATCH 21/64] Add close to credential storage (#5283) (#5293) * Add close method to credential storage * wip --- common/credential-storage/src/backends/sqlite.rs | 5 +++++ common/credential-storage/src/ephemeral_storage.rs | 4 ++++ common/credential-storage/src/persistent_storage/mod.rs | 4 ++++ common/credential-storage/src/storage.rs | 2 ++ 4 files changed, 15 insertions(+) diff --git a/common/credential-storage/src/backends/sqlite.rs b/common/credential-storage/src/backends/sqlite.rs index 9267bbddb3..dec0899064 100644 --- a/common/credential-storage/src/backends/sqlite.rs +++ b/common/credential-storage/src/backends/sqlite.rs @@ -23,6 +23,11 @@ impl SqliteEcashTicketbookManager { SqliteEcashTicketbookManager { connection_pool } } + /// Closes the connection pool. + pub async fn close(&self) { + self.connection_pool.close().await + } + pub(crate) async fn cleanup_expired(&self, deadline: Date) -> Result<(), sqlx::Error> { sqlx::query!( "DELETE FROM ecash_ticketbook WHERE expiration_date <= ?", diff --git a/common/credential-storage/src/ephemeral_storage.rs b/common/credential-storage/src/ephemeral_storage.rs index 91436d4d8c..b6a113f414 100644 --- a/common/credential-storage/src/ephemeral_storage.rs +++ b/common/credential-storage/src/ephemeral_storage.rs @@ -43,6 +43,10 @@ impl Debug for EphemeralStorage { impl Storage for EphemeralStorage { type StorageError = StorageError; + async fn close(&self) { + // nothing to do here + } + async fn cleanup_expired(&self) -> Result<(), Self::StorageError> { self.storage_manager.cleanup_expired().await; Ok(()) diff --git a/common/credential-storage/src/persistent_storage/mod.rs b/common/credential-storage/src/persistent_storage/mod.rs index e8c9eca5aa..32b6f581de 100644 --- a/common/credential-storage/src/persistent_storage/mod.rs +++ b/common/credential-storage/src/persistent_storage/mod.rs @@ -89,6 +89,10 @@ impl PersistentStorage { impl Storage for PersistentStorage { type StorageError = StorageError; + async fn close(&self) { + self.storage_manager.close().await + } + /// remove all expired ticketbooks and expiration date signatures async fn cleanup_expired(&self) -> Result<(), Self::StorageError> { let ecash_yesterday = ecash_today().date().previous_day().unwrap(); diff --git a/common/credential-storage/src/storage.rs b/common/credential-storage/src/storage.rs index 19ddc44e86..4c0602ea85 100644 --- a/common/credential-storage/src/storage.rs +++ b/common/credential-storage/src/storage.rs @@ -22,6 +22,8 @@ use std::error::Error; pub trait Storage: Send + Sync { type StorageError: Error; + async fn close(&self); + /// remove all expired ticketbooks and expiration date signatures async fn cleanup_expired(&self) -> Result<(), Self::StorageError>; From ae346bb75b168aa8c968dc42958d1997b0c402a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C4=99drzej=20Stuczy=C5=84ski?= Date: Thu, 19 Dec 2024 10:42:52 +0000 Subject: [PATCH 22/64] bugfix: remove unnecessary arguments for nym-api swagger endpoints (#5272) * removed incorrect body argument for '/rewarded-set' endpoint * removed incorrect pagination parameters for monitor run results --- nym-api/src/node_status_api/handlers/unstable.rs | 6 ------ nym-api/src/nym_nodes/handlers/mod.rs | 1 - 2 files changed, 7 deletions(-) diff --git a/nym-api/src/node_status_api/handlers/unstable.rs b/nym-api/src/node_status_api/handlers/unstable.rs index 1378b6a055..76dcf912cd 100644 --- a/nym-api/src/node_status_api/handlers/unstable.rs +++ b/nym-api/src/node_status_api/handlers/unstable.rs @@ -347,9 +347,6 @@ async fn _latest_monitor_run_report( #[utoipa::path( tag = "UNSTABLE - DO **NOT** USE", get, - params( - PaginationRequest - ), path = "/v1/status/network-monitor/unstable/run/{monitor_run_id}/details", responses( (status = 200, body = NetworkMonitorRunDetailsResponse) @@ -370,9 +367,6 @@ pub async fn monitor_run_report( #[utoipa::path( tag = "UNSTABLE - DO **NOT** USE", get, - params( - PaginationRequest - ), path = "/v1/status/network-monitor/unstable/run/latest/details", responses( (status = 200, body = NetworkMonitorRunDetailsResponse) diff --git a/nym-api/src/nym_nodes/handlers/mod.rs b/nym-api/src/nym_nodes/handlers/mod.rs index a3646e8b50..bae793b15a 100644 --- a/nym-api/src/nym_nodes/handlers/mod.rs +++ b/nym-api/src/nym_nodes/handlers/mod.rs @@ -51,7 +51,6 @@ pub(crate) fn nym_node_routes() -> Router { #[utoipa::path( tag = "Nym Nodes", get, - request_body = NodeRefreshBody, path = "/rewarded-set", context_path = "/v1/nym-nodes", responses( From a2322d6cdf74b57b368abd4a506504791c118d51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C4=99drzej=20Stuczy=C5=84ski?= Date: Thu, 19 Dec 2024 10:44:34 +0000 Subject: [PATCH 23/64] feature: nym topology revamp (#5271) * revamped NymTopology * wip * working e2e client * updated nym-api * updated nym-node * updated rest of non-test code * updated the rest of the codebase * additional tweaks * linux clippy fixes + adding additional dummy ipr types for better linting on non-linux targets --- Cargo.lock | 5 +- common/client-core/Cargo.toml | 4 +- common/client-core/config-types/src/lib.rs | 11 + .../src/cli_helpers/client_add_gateway.rs | 4 +- .../src/cli_helpers/client_init.rs | 2 +- .../client-core/src/client/base_client/mod.rs | 24 +- .../src/client/cover_traffic_stream.rs | 1 + .../src/client/inbound_messages.rs | 28 - .../input_message_listener.rs | 43 +- .../acknowledgement_control/mod.rs | 6 - .../retransmission_request_listener.rs | 9 +- .../real_messages_control/message_handler.rs | 76 +- .../real_traffic_stream.rs | 1 + .../client/replies/reply_controller/mod.rs | 1 - .../src/client/topology_control/accessor.rs | 105 +-- .../topology_control/geo_aware_provider.rs | 33 +- .../src/client/topology_control/mod.rs | 19 +- .../topology_control/nym_api_provider.rs | 162 ++-- common/client-core/src/error.rs | 11 +- common/client-core/src/init/helpers.rs | 58 +- common/client-core/src/init/mod.rs | 4 +- common/client-core/src/init/types.rs | 17 +- common/client-core/src/lib.rs | 3 +- .../validator-client/src/client.rs | 6 +- .../validator-client/src/nym_api/mod.rs | 14 +- .../contract_traits/mixnet_query_client.rs | 27 +- .../mixnet-contract/src/mixnode.rs | 11 + .../mixnet-contract/src/types.rs | 41 + .../ip-packet-requests/src/v7/conversion.rs | 1 + common/ip-packet-requests/src/v7/request.rs | 4 + common/node-tester-utils/src/message.rs | 64 +- common/node-tester-utils/src/node.rs | 42 +- common/node-tester-utils/src/tester.rs | 132 +-- .../acknowledgements/src/surb_ack.rs | 9 +- common/nymsphinx/addressing/src/clients.rs | 4 +- .../anonymous-replies/src/reply_surb.rs | 18 +- .../anonymous-replies/src/requests.rs | 148 +--- common/nymsphinx/chunking/src/lib.rs | 23 +- common/nymsphinx/cover/src/lib.rs | 13 +- common/nymsphinx/params/src/lib.rs | 4 - common/nymsphinx/routing/src/lib.rs | 34 +- common/nymsphinx/src/message.rs | 22 +- common/nymsphinx/src/preparer/mod.rs | 53 +- common/nymsphinx/src/receiver.rs | 131 +-- common/topology/Cargo.toml | 11 +- common/topology/src/error.rs | 20 +- common/topology/src/gateway.rs | 174 ---- common/topology/src/lib.rs | 836 +++++++++--------- common/topology/src/mix.rs | 135 --- common/topology/src/node.rs | 143 +++ common/topology/src/provider_trait.rs | 2 +- common/topology/src/random_route_provider.rs | 30 - common/topology/src/rewarded_set.rs | 122 +++ common/topology/src/serde.rs | 262 ------ common/topology/src/wasm_helpers.rs | 123 +++ common/wasm/client-core/Cargo.toml | 4 +- common/wasm/client-core/src/config/mod.rs | 13 + .../wasm/client-core/src/config/override.rs | 17 + common/wasm/client-core/src/helpers.rs | 20 +- common/wasm/client-core/src/topology.rs | 41 +- nym-api/nym-api-requests/src/models.rs | 38 +- nym-api/nym-api-requests/src/nym_nodes.rs | 4 +- nym-api/src/epoch_operations/helpers.rs | 5 +- nym-api/src/network_monitor/monitor/mod.rs | 4 +- .../src/network_monitor/monitor/preparer.rs | 209 ++--- nym-api/src/network_monitor/monitor/sender.rs | 31 +- nym-api/src/network_monitor/test_packet.rs | 4 +- nym-api/src/network_monitor/test_route/mod.rs | 71 +- nym-api/src/node_describe_cache/mod.rs | 90 +- .../src/node_status_api/cache/node_sets.rs | 50 +- nym-api/src/nym_contract_cache/cache/data.rs | 122 +-- nym-api/src/nym_contract_cache/cache/mod.rs | 13 +- .../src/nym_contract_cache/cache/refresher.rs | 22 +- nym-api/src/nym_nodes/handlers/mod.rs | 9 +- .../nym_nodes/handlers/unstable/skimmed.rs | 6 +- nym-api/src/support/http/state.rs | 5 +- nym-api/src/support/legacy_helpers.rs | 19 + nym-api/src/support/nyxd/mod.rs | 7 +- nym-api/src/support/storage/mod.rs | 6 +- nym-network-monitor/src/accounting.rs | 30 +- nym-network-monitor/src/main.rs | 23 +- nym-node/src/node/mod.rs | 24 +- nym-node/src/node/shared_topology.rs | 14 +- .../examples/custom_topology_provider.rs | 14 +- .../nym-sdk/examples/geo_topology_provider.rs | 50 -- .../examples/manually_overwrite_topology.rs | 75 +- sdk/rust/nym-sdk/src/lib.rs | 1 + sdk/rust/nym-sdk/src/mixnet.rs | 1 + sdk/rust/nym-sdk/src/mixnet/client.rs | 12 + sdk/rust/nym-sdk/src/mixnet/native_client.rs | 10 +- sdk/rust/nym-sdk/src/mixnet/socks5_client.rs | 5 - .../src/connected_client_handler.rs | 7 +- service-providers/ip-packet-router/src/lib.rs | 1 + .../ip-packet-router/src/mixnet_listener.rs | 51 +- .../ip-packet-router/src/non_linux_dummy.rs | 43 + .../src/util/create_message.rs | 9 +- .../network-requester/src/reply.rs | 1 - .../mixnet-connectivity-check/src/main.rs | 2 + wasm/client/Cargo.toml | 8 +- wasm/client/src/client.rs | 6 +- wasm/client/src/helpers.rs | 33 +- wasm/node-tester/src/tester.rs | 6 +- 102 files changed, 1949 insertions(+), 2573 deletions(-) delete mode 100644 common/topology/src/gateway.rs delete mode 100644 common/topology/src/mix.rs create mode 100644 common/topology/src/node.rs delete mode 100644 common/topology/src/random_route_provider.rs create mode 100644 common/topology/src/rewarded_set.rs delete mode 100644 common/topology/src/serde.rs create mode 100644 common/topology/src/wasm_helpers.rs delete mode 100644 sdk/rust/nym-sdk/examples/geo_topology_provider.rs create mode 100644 service-providers/ip-packet-router/src/non_linux_dummy.rs diff --git a/Cargo.lock b/Cargo.lock index e0b5847119..2de09714d9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6559,10 +6559,7 @@ name = "nym-topology" version = "0.1.0" dependencies = [ "async-trait", - "bs58", - "log", "nym-api-requests", - "nym-bin-common", "nym-config", "nym-crypto", "nym-mixnet-contract-common", @@ -6571,10 +6568,10 @@ dependencies = [ "nym-sphinx-types", "rand", "reqwest 0.12.4", - "semver 1.0.23", "serde", "serde_json", "thiserror", + "tracing", "tsify", "wasm-bindgen", "wasm-utils", diff --git a/common/client-core/Cargo.toml b/common/client-core/Cargo.toml index b5191eef80..f6536b9762 100644 --- a/common/client-core/Cargo.toml +++ b/common/client-core/Cargo.toml @@ -3,7 +3,7 @@ name = "nym-client-core" version = "1.1.15" authors = ["Dave Hrycyszyn "] edition = "2021" -rust-version = "1.70" +rust-version = "1.76" license.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -45,7 +45,7 @@ nym-nonexhaustive-delayqueue = { path = "../nonexhaustive-delayqueue" } nym-sphinx = { path = "../nymsphinx" } nym-statistics-common = { path = "../statistics" } nym-pemstore = { path = "../pemstore" } -nym-topology = { path = "../topology", features = ["serializable"] } +nym-topology = { path = "../topology", features = ["persistence"] } nym-mixnet-client = { path = "../client-libs/mixnet-client", default-features = false } nym-validator-client = { path = "../client-libs/validator-client", default-features = false } nym-task = { path = "../task" } diff --git a/common/client-core/config-types/src/lib.rs b/common/client-core/config-types/src/lib.rs index 992559bc92..2031c5c34e 100644 --- a/common/client-core/config-types/src/lib.rs +++ b/common/client-core/config-types/src/lib.rs @@ -550,6 +550,15 @@ pub struct Topology { /// Specifies a minimum performance of a gateway that is used on route construction. /// This setting is only applicable when `NymApi` topology is used. pub minimum_gateway_performance: u8, + + /// Specifies whether this client should attempt to retrieve all available network nodes + /// as opposed to just active mixnodes/gateways. + /// Useless without `ignore_epoch_roles = true` + pub use_extended_topology: bool, + + /// Specifies whether this client should ignore the current epoch role of the target egress node + /// when constructing the final hop packets. + pub ignore_egress_epoch_role: bool, } #[allow(clippy::large_enum_variant)] @@ -586,6 +595,8 @@ impl Default for Topology { topology_structure: TopologyStructure::default(), minimum_mixnode_performance: DEFAULT_MIN_MIXNODE_PERFORMANCE, minimum_gateway_performance: DEFAULT_MIN_GATEWAY_PERFORMANCE, + use_extended_topology: false, + ignore_egress_epoch_role: false, } } } diff --git a/common/client-core/src/cli_helpers/client_add_gateway.rs b/common/client-core/src/cli_helpers/client_add_gateway.rs index 56b1c3ad8e..dd1064f135 100644 --- a/common/client-core/src/cli_helpers/client_add_gateway.rs +++ b/common/client-core/src/cli_helpers/client_add_gateway.rs @@ -112,7 +112,7 @@ where source, } })?; - hardcoded_topology.get_gateways() + hardcoded_topology.entry_capable_nodes().cloned().collect() } else { let mut rng = rand::thread_rng(); crate::init::helpers::current_gateways( @@ -128,7 +128,7 @@ where // make sure the list of available gateways doesn't overlap the list of known gateways let available_gateways = available_gateways .into_iter() - .filter(|g| !registered_gateways.contains(g.identity())) + .filter(|g| !registered_gateways.contains(&g.identity())) .collect::>(); if available_gateways.is_empty() { diff --git a/common/client-core/src/cli_helpers/client_init.rs b/common/client-core/src/cli_helpers/client_init.rs index 060c1192da..0599f3a20e 100644 --- a/common/client-core/src/cli_helpers/client_init.rs +++ b/common/client-core/src/cli_helpers/client_init.rs @@ -167,7 +167,7 @@ where source, } })?; - hardcoded_topology.get_gateways() + hardcoded_topology.entry_capable_nodes().cloned().collect() } else { let mut rng = rand::thread_rng(); crate::init::helpers::current_gateways( diff --git a/common/client-core/src/client/base_client/mod.rs b/common/client-core/src/client/base_client/mod.rs index bb0ffd5d03..3057128afd 100644 --- a/common/client-core/src/client/base_client/mod.rs +++ b/common/client-core/src/client/base_client/mod.rs @@ -3,7 +3,6 @@ use super::received_buffer::ReceivedBufferMessage; use super::statistics_control::StatisticsControl; -use super::topology_control::geo_aware_provider::GeoAwareTopologyProvider; use crate::client::base_client::storage::helpers::store_client_keys; use crate::client::base_client::storage::MixnetClientStorage; use crate::client::cover_traffic_stream::LoopCoverTrafficStream; @@ -24,7 +23,7 @@ use crate::client::replies::reply_storage::{ }; use crate::client::topology_control::nym_api_provider::NymApiTopologyProvider; use crate::client::topology_control::{ - nym_api_provider, TopologyAccessor, TopologyRefresher, TopologyRefresherConfig, + TopologyAccessor, TopologyRefresher, TopologyRefresherConfig, }; use crate::config::{Config, DebugConfig}; use crate::error::ClientCoreError; @@ -464,8 +463,8 @@ where details_store .upgrade_stored_remote_gateway_key(gateway_client.gateway_identity(), &updated_key) .await.map_err(|err| { - error!("failed to store upgraded gateway key! this connection might be forever broken now: {err}"); - ClientCoreError::GatewaysDetailsStoreError { source: Box::new(err) } + error!("failed to store upgraded gateway key! this connection might be forever broken now: {err}"); + ClientCoreError::GatewaysDetailsStoreError { source: Box::new(err) } })? } @@ -539,15 +538,15 @@ where // if no custom provider was ... provided ..., create one using nym-api custom_provider.unwrap_or_else(|| match config_topology.topology_structure { config::TopologyStructure::NymApi => Box::new(NymApiTopologyProvider::new( - nym_api_provider::Config { - min_mixnode_performance: config_topology.minimum_mixnode_performance, - min_gateway_performance: config_topology.minimum_gateway_performance, - }, + config_topology, nym_api_urls, user_agent, )), config::TopologyStructure::GeoAware(group_by) => { - Box::new(GeoAwareTopologyProvider::new(nym_api_urls, group_by)) + warn!("using deprecated 'GeoAware' topology provider - this option will be removed very soon"); + + #[allow(deprecated)] + Box::new(crate::client::topology_control::GeoAwareTopologyProvider::new(nym_api_urls, group_by)) } }) } @@ -558,7 +557,7 @@ where topology_provider: Box, topology_config: config::Topology, topology_accessor: TopologyAccessor, - local_gateway: &NodeIdentity, + local_gateway: NodeIdentity, wait_for_gateway: bool, mut shutdown: TaskClient, ) -> Result<(), ClientCoreError> { @@ -590,7 +589,7 @@ where }; if let Err(err) = topology_refresher - .ensure_contains_gateway(local_gateway) + .ensure_contains_routable_egress(local_gateway) .await { if let Some(waiting_timeout) = gateway_wait_timeout { @@ -740,7 +739,8 @@ where // channels responsible for controlling ack messages let (ack_sender, ack_receiver) = mpsc::unbounded(); - let shared_topology_accessor = TopologyAccessor::new(); + let shared_topology_accessor = + TopologyAccessor::new(self.config.debug.topology.ignore_egress_epoch_role); // Shutdown notifier for signalling tasks to stop let shutdown = self diff --git a/common/client-core/src/client/cover_traffic_stream.rs b/common/client-core/src/client/cover_traffic_stream.rs index 8efea56e5a..1059e22e0f 100644 --- a/common/client-core/src/client/cover_traffic_stream.rs +++ b/common/client-core/src/client/cover_traffic_stream.rs @@ -163,6 +163,7 @@ impl LoopCoverTrafficStream { // poisson delay, but is it really a problem? let topology_permit = self.topology_access.get_read_permit().await; // the ack is sent back to ourselves (and then ignored) + let topology_ref = match topology_permit.try_get_valid_topology_ref( &self.our_full_destination, Some(&self.our_full_destination), diff --git a/common/client-core/src/client/inbound_messages.rs b/common/client-core/src/client/inbound_messages.rs index baf163913f..b14a4d7ed5 100644 --- a/common/client-core/src/client/inbound_messages.rs +++ b/common/client-core/src/client/inbound_messages.rs @@ -28,7 +28,6 @@ pub enum InputMessage { recipient: Recipient, data: Vec, lane: TransmissionLane, - mix_hops: Option, }, /// Creates a message used for a duplex anonymous communication where the recipient @@ -44,7 +43,6 @@ pub enum InputMessage { data: Vec, reply_surbs: u32, lane: TransmissionLane, - mix_hops: Option, }, /// Attempt to use our internally received and stored `ReplySurb` to send the message back @@ -94,29 +92,6 @@ impl InputMessage { recipient, data, lane, - mix_hops: None, - }; - if let Some(packet_type) = packet_type { - InputMessage::new_wrapper(message, packet_type) - } else { - message - } - } - - // IMHO `new_regular` should take `mix_hops: Option` as an argument instead of creating - // this function, but that would potentially break backwards compatibility with the current API - pub fn new_regular_with_custom_hops( - recipient: Recipient, - data: Vec, - lane: TransmissionLane, - packet_type: Option, - mix_hops: Option, - ) -> Self { - let message = InputMessage::Regular { - recipient, - data, - lane, - mix_hops, }; if let Some(packet_type) = packet_type { InputMessage::new_wrapper(message, packet_type) @@ -137,7 +112,6 @@ impl InputMessage { data, reply_surbs, lane, - mix_hops: None, }; if let Some(packet_type) = packet_type { InputMessage::new_wrapper(message, packet_type) @@ -154,14 +128,12 @@ impl InputMessage { reply_surbs: u32, lane: TransmissionLane, packet_type: Option, - mix_hops: Option, ) -> Self { let message = InputMessage::Anonymous { recipient, data, reply_surbs, lane, - mix_hops, }; if let Some(packet_type) = packet_type { InputMessage::new_wrapper(message, packet_type) diff --git a/common/client-core/src/client/real_messages_control/acknowledgement_control/input_message_listener.rs b/common/client-core/src/client/real_messages_control/acknowledgement_control/input_message_listener.rs index 00a7abe4e7..19ba2d1cae 100644 --- a/common/client-core/src/client/real_messages_control/acknowledgement_control/input_message_listener.rs +++ b/common/client-core/src/client/real_messages_control/acknowledgement_control/input_message_listener.rs @@ -73,11 +73,10 @@ where content: Vec, lane: TransmissionLane, packet_type: PacketType, - mix_hops: Option, ) { if let Err(err) = self .message_handler - .try_send_plain_message(recipient, content, lane, packet_type, mix_hops) + .try_send_plain_message(recipient, content, lane, packet_type) .await { warn!("failed to send a plain message - {err}") @@ -91,18 +90,10 @@ where reply_surbs: u32, lane: TransmissionLane, packet_type: PacketType, - mix_hops: Option, ) { if let Err(err) = self .message_handler - .try_send_message_with_reply_surbs( - recipient, - content, - reply_surbs, - lane, - packet_type, - mix_hops, - ) + .try_send_message_with_reply_surbs(recipient, content, reply_surbs, lane, packet_type) .await { warn!("failed to send a repliable message - {err}") @@ -115,9 +106,8 @@ where recipient, data, lane, - mix_hops, } => { - self.handle_plain_message(recipient, data, lane, PacketType::Mix, mix_hops) + self.handle_plain_message(recipient, data, lane, PacketType::Mix) .await } InputMessage::Anonymous { @@ -125,17 +115,9 @@ where data, reply_surbs, lane, - mix_hops, } => { - self.handle_repliable_message( - recipient, - data, - reply_surbs, - lane, - PacketType::Mix, - mix_hops, - ) - .await + self.handle_repliable_message(recipient, data, reply_surbs, lane, PacketType::Mix) + .await } InputMessage::Reply { recipient_tag, @@ -153,9 +135,8 @@ where recipient, data, lane, - mix_hops, } => { - self.handle_plain_message(recipient, data, lane, packet_type, mix_hops) + self.handle_plain_message(recipient, data, lane, packet_type) .await } InputMessage::Anonymous { @@ -163,17 +144,9 @@ where data, reply_surbs, lane, - mix_hops, } => { - self.handle_repliable_message( - recipient, - data, - reply_surbs, - lane, - packet_type, - mix_hops, - ) - .await + self.handle_repliable_message(recipient, data, reply_surbs, lane, packet_type) + .await } InputMessage::Reply { recipient_tag, diff --git a/common/client-core/src/client/real_messages_control/acknowledgement_control/mod.rs b/common/client-core/src/client/real_messages_control/acknowledgement_control/mod.rs index 2b65df9036..24efc7a560 100644 --- a/common/client-core/src/client/real_messages_control/acknowledgement_control/mod.rs +++ b/common/client-core/src/client/real_messages_control/acknowledgement_control/mod.rs @@ -70,7 +70,6 @@ pub(crate) struct PendingAcknowledgement { message_chunk: Fragment, delay: SphinxDelay, destination: PacketDestination, - mix_hops: Option, retransmissions: u32, } @@ -80,13 +79,11 @@ impl PendingAcknowledgement { message_chunk: Fragment, delay: SphinxDelay, recipient: Recipient, - mix_hops: Option, ) -> Self { PendingAcknowledgement { message_chunk, delay, destination: PacketDestination::KnownRecipient(recipient.into()), - mix_hops, retransmissions: 0, } } @@ -104,9 +101,6 @@ impl PendingAcknowledgement { recipient_tag, extra_surb_request, }, - // Messages sent using SURBs are using the number of mix hops set by the recipient when - // they provided the SURBs, so it doesn't make sense to include it here. - mix_hops: None, retransmissions: 0, } } diff --git a/common/client-core/src/client/real_messages_control/acknowledgement_control/retransmission_request_listener.rs b/common/client-core/src/client/real_messages_control/acknowledgement_control/retransmission_request_listener.rs index 6eda1c8adc..a55a9ac226 100644 --- a/common/client-core/src/client/real_messages_control/acknowledgement_control/retransmission_request_listener.rs +++ b/common/client-core/src/client/real_messages_control/acknowledgement_control/retransmission_request_listener.rs @@ -52,18 +52,12 @@ where packet_recipient: Recipient, chunk_data: Fragment, packet_type: PacketType, - mix_hops: Option, ) -> Result { debug!("retransmitting normal packet..."); // TODO: Figure out retransmission packet type signaling self.message_handler - .try_prepare_single_chunk_for_sending( - packet_recipient, - chunk_data, - packet_type, - mix_hops, - ) + .try_prepare_single_chunk_for_sending(packet_recipient, chunk_data, packet_type) .await } @@ -110,7 +104,6 @@ where **recipient, timed_out_ack.message_chunk.clone(), packet_type, - timed_out_ack.mix_hops, ) .await } diff --git a/common/client-core/src/client/real_messages_control/message_handler.rs b/common/client-core/src/client/real_messages_control/message_handler.rs index 75cca06f76..4dcce65494 100644 --- a/common/client-core/src/client/real_messages_control/message_handler.rs +++ b/common/client-core/src/client/real_messages_control/message_handler.rs @@ -15,11 +15,11 @@ use nym_sphinx::anonymous_replies::requests::{AnonymousSenderTag, RepliableMessa use nym_sphinx::anonymous_replies::{ReplySurb, SurbEncryptionKey}; use nym_sphinx::chunking::fragment::{Fragment, FragmentIdentifier}; use nym_sphinx::message::NymMessage; -use nym_sphinx::params::{PacketSize, PacketType, DEFAULT_NUM_MIX_HOPS}; +use nym_sphinx::params::{PacketSize, PacketType}; use nym_sphinx::preparer::{MessagePreparer, PreparedFragment}; use nym_sphinx::Delay; use nym_task::connections::TransmissionLane; -use nym_topology::{NymTopology, NymTopologyError}; +use nym_topology::{NymRouteProvider, NymTopologyError}; use rand::{CryptoRng, Rng}; use std::collections::HashMap; use std::sync::Arc; @@ -100,10 +100,6 @@ pub(crate) struct Config { /// Average delay an acknowledgement packet is going to get delay at a single mixnode. average_ack_delay: Duration, - /// Number of mix hops each packet ('real' message, ack, reply) is expected to take. - /// Note that it does not include gateway hops. - num_mix_hops: u8, - /// Primary predefined packet size used for the encapsulated messages. primary_packet_size: PacketSize, @@ -125,19 +121,11 @@ impl Config { deterministic_route_selection, average_packet_delay, average_ack_delay, - num_mix_hops: DEFAULT_NUM_MIX_HOPS, primary_packet_size: PacketSize::default(), secondary_packet_size: None, } } - /// Allows setting non-default number of expected mix hops in the network. - #[allow(dead_code)] - pub fn with_mix_hops(mut self, hops: u8) -> Self { - self.num_mix_hops = hops; - self - } - /// Allows setting non-default size of the sphinx packets sent out. pub fn with_custom_primary_packet_size(mut self, packet_size: PacketSize) -> Self { self.primary_packet_size = packet_size; @@ -185,9 +173,7 @@ where config.sender_address, config.average_packet_delay, config.average_ack_delay, - ) - .with_mix_hops(config.num_mix_hops); - + ); MessageHandler { config, rng, @@ -216,7 +202,7 @@ where fn get_topology<'a>( &self, permit: &'a TopologyReadPermit<'a>, - ) -> Result<&'a NymTopology, PreparationError> { + ) -> Result<&'a NymRouteProvider, PreparationError> { match permit.try_get_valid_topology_ref(&self.config.sender_address, None) { Ok(topology_ref) => Ok(topology_ref), Err(err) => { @@ -233,9 +219,8 @@ where return self.config.primary_packet_size; }; - let primary_count = - msg.required_packets(self.config.primary_packet_size, self.config.num_mix_hops); - let secondary_count = msg.required_packets(secondary_packet, self.config.num_mix_hops); + let primary_count = msg.required_packets(self.config.primary_packet_size); + let secondary_count = msg.required_packets(secondary_packet); trace!("This message would require: {primary_count} primary packets or {secondary_count} secondary packets..."); // if there would be no benefit in using the secondary packet - use the primary (duh) @@ -424,10 +409,9 @@ where message: Vec, lane: TransmissionLane, packet_type: PacketType, - mix_hops: Option, ) -> Result<(), PreparationError> { let message = NymMessage::new_plain(message); - self.try_split_and_send_non_reply_message(message, recipient, lane, packet_type, mix_hops) + self.try_split_and_send_non_reply_message(message, recipient, lane, packet_type) .await } @@ -437,7 +421,6 @@ where recipient: Recipient, lane: TransmissionLane, packet_type: PacketType, - mix_hops: Option, ) -> Result<(), PreparationError> { debug!("Sending non-reply message with packet type {packet_type}"); // TODO: I really dislike existence of this assertion, it implies code has to be re-organised @@ -470,7 +453,6 @@ where &self.config.ack_key, &recipient, packet_type, - mix_hops, )?; let real_message = RealMessage::new( @@ -478,8 +460,7 @@ where Some(fragment.fragment_identifier()), ); let delay = prepared_fragment.total_delay; - let pending_ack = - PendingAcknowledgement::new_known(fragment, delay, recipient, mix_hops); + let pending_ack = PendingAcknowledgement::new_known(fragment, delay, recipient); real_messages.push(real_message); pending_acks.push(pending_ack); @@ -496,7 +477,6 @@ where recipient: Recipient, amount: u32, packet_type: PacketType, - mix_hops: Option, ) -> Result<(), PreparationError> { debug!("Sending additional reply SURBs with packet type {packet_type}"); let sender_tag = self.get_or_create_sender_tag(&recipient); @@ -513,7 +493,6 @@ where recipient, TransmissionLane::AdditionalReplySurbs, packet_type, - mix_hops, ) .await?; @@ -530,7 +509,6 @@ where num_reply_surbs: u32, lane: TransmissionLane, packet_type: PacketType, - mix_hops: Option, ) -> Result<(), SurbWrappedPreparationError> { debug!("Sending message with reply SURBs with packet type {packet_type}"); let sender_tag = self.get_or_create_sender_tag(&recipient); @@ -541,7 +519,7 @@ where let message = NymMessage::new_repliable(RepliableMessage::new_data(message, sender_tag, reply_surbs)); - self.try_split_and_send_non_reply_message(message, recipient, lane, packet_type, mix_hops) + self.try_split_and_send_non_reply_message(message, recipient, lane, packet_type) .await?; log::trace!("storing {} reply keys", reply_keys.len()); @@ -555,23 +533,18 @@ where recipient: Recipient, chunk: Fragment, packet_type: PacketType, - mix_hops: Option, ) -> Result { debug!("Sending single chunk with packet type {packet_type}"); let topology_permit = self.topology_access.get_read_permit().await; let topology = self.get_topology(&topology_permit)?; - let prepared_fragment = self - .message_preparer - .prepare_chunk_for_sending( - chunk, - topology, - &self.config.ack_key, - &recipient, - packet_type, - mix_hops, - ) - .unwrap(); + let prepared_fragment = self.message_preparer.prepare_chunk_for_sending( + chunk, + topology, + &self.config.ack_key, + &recipient, + packet_type, + )?; Ok(prepared_fragment) } @@ -624,16 +597,13 @@ where Err(err) => return Err(err.return_surbs(vec![reply_surb])), }; - let prepared_fragment = self - .message_preparer - .prepare_reply_chunk_for_sending( - chunk, - topology, - &self.config.ack_key, - reply_surb, - PacketType::Mix, - ) - .unwrap(); + let prepared_fragment = self.message_preparer.prepare_reply_chunk_for_sending( + chunk, + topology, + &self.config.ack_key, + reply_surb, + PacketType::Mix, + )?; Ok(prepared_fragment) } diff --git a/common/client-core/src/client/real_messages_control/real_traffic_stream.rs b/common/client-core/src/client/real_messages_control/real_traffic_stream.rs index d39c1731b6..09b55eb633 100644 --- a/common/client-core/src/client/real_messages_control/real_traffic_stream.rs +++ b/common/client-core/src/client/real_messages_control/real_traffic_stream.rs @@ -230,6 +230,7 @@ where // poisson delay, but is it really a problem? let topology_permit = self.topology_access.get_read_permit().await; // the ack is sent back to ourselves (and then ignored) + let topology_ref = match topology_permit.try_get_valid_topology_ref( &self.config.our_full_destination, Some(&self.config.our_full_destination), diff --git a/common/client-core/src/client/replies/reply_controller/mod.rs b/common/client-core/src/client/replies/reply_controller/mod.rs index da58f377c6..4ba9ce3ac9 100644 --- a/common/client-core/src/client/replies/reply_controller/mod.rs +++ b/common/client-core/src/client/replies/reply_controller/mod.rs @@ -516,7 +516,6 @@ where recipient, to_send, nym_sphinx::params::PacketType::Mix, - self.config.reply_surbs.surb_mix_hops, ) .await { diff --git a/common/client-core/src/client/topology_control/accessor.rs b/common/client-core/src/client/topology_control/accessor.rs index 6b12d64562..b1f74c4f24 100644 --- a/common/client-core/src/client/topology_control/accessor.rs +++ b/common/client-core/src/client/topology_control/accessor.rs @@ -2,8 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use nym_sphinx::addressing::clients::Recipient; -use nym_sphinx::params::DEFAULT_NUM_MIX_HOPS; -use nym_topology::{NymTopology, NymTopologyError}; +use nym_topology::{NymRouteProvider, NymTopology, NymTopologyError}; use std::ops::Deref; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; @@ -17,29 +16,36 @@ pub struct TopologyAccessorInner { // few seconds, while reads are needed every single packet generated. // However, proper benchmarks will be needed to determine if `RwLock` is indeed a better // approach than a `Mutex` - topology: RwLock>, + topology: RwLock, } impl TopologyAccessorInner { - fn new() -> Self { + fn new(initial: NymRouteProvider) -> Self { TopologyAccessorInner { controlled_manually: AtomicBool::new(false), released_manual_control: Notify::new(), - topology: RwLock::new(None), + topology: RwLock::new(initial), } } async fn update(&self, new: Option) { - *self.topology.write().await = new; + let mut guard = self.topology.write().await; + + match new { + Some(updated) => { + guard.update(updated); + } + None => guard.clear_topology(), + } } } pub struct TopologyReadPermit<'a> { - permit: RwLockReadGuard<'a, Option>, + permit: RwLockReadGuard<'a, NymRouteProvider>, } impl Deref for TopologyReadPermit<'_> { - type Target = Option; + type Target = NymRouteProvider; fn deref(&self) -> &Self::Target { &self.permit @@ -53,43 +59,31 @@ impl<'a> TopologyReadPermit<'a> { &'a self, ack_recipient: &Recipient, packet_recipient: Option<&Recipient>, - ) -> Result<&'a NymTopology, NymTopologyError> { + ) -> Result<&'a NymRouteProvider, NymTopologyError> { + let route_provider = self.permit.deref(); + let topology = &route_provider.topology; + // 1. Have we managed to get anything from the refresher, i.e. have the nym-api queries gone through? - let topology = self - .permit - .as_ref() - .ok_or(NymTopologyError::EmptyNetworkTopology)?; - - // 2. does it have any mixnode at all? - // 3. does it have any gateways at all? - // 4. does it have a mixnode on each layer? - topology.ensure_can_construct_path_through(DEFAULT_NUM_MIX_HOPS)?; - - // 5. does it contain OUR gateway (so that we could create an ack packet)? - if !topology.gateway_exists(ack_recipient.gateway()) { - return Err(NymTopologyError::NonExistentGatewayError { - identity_key: ack_recipient.gateway().to_base58_string(), - }); - } + topology.ensure_not_empty()?; + + // 2. does the topology have a node on each mixing layer? + topology.ensure_minimally_routable()?; + + // 3. does it contain OUR gateway (so that we could create an ack packet)? + let _ = route_provider.egress_by_identity(ack_recipient.gateway())?; - // 6. for our target recipient, does it contain THEIR gateway (so that we could create + // 4. for our target recipient, does it contain THEIR gateway (so that we send anything over?) if let Some(recipient) = packet_recipient { - if !topology.gateway_exists(recipient.gateway()) { - return Err(NymTopologyError::NonExistentGatewayError { - identity_key: recipient.gateway().to_base58_string(), - }); - } + let _ = route_provider.egress_by_identity(recipient.gateway())?; } - Ok(topology) + Ok(route_provider) } } -impl<'a> From>> for TopologyReadPermit<'a> { - fn from(read_permit: RwLockReadGuard<'a, Option>) -> Self { - TopologyReadPermit { - permit: read_permit, - } +impl<'a> From> for TopologyReadPermit<'a> { + fn from(permit: RwLockReadGuard<'a, NymRouteProvider>) -> Self { + TopologyReadPermit { permit } } } @@ -99,9 +93,11 @@ pub struct TopologyAccessor { } impl TopologyAccessor { - pub fn new() -> Self { + pub fn new(ignore_egress_epoch_roles: bool) -> Self { TopologyAccessor { - inner: Arc::new(TopologyAccessorInner::new()), + inner: Arc::new(TopologyAccessorInner::new(NymRouteProvider::new_empty( + ignore_egress_epoch_roles, + ))), } } @@ -121,8 +117,21 @@ impl TopologyAccessor { self.inner.released_manual_control.notified().await } + #[deprecated(note = "use .current_route_provider instead")] pub async fn current_topology(&self) -> Option { - self.inner.topology.read().await.clone() + self.current_route_provider() + .await + .as_ref() + .map(|p| p.topology.clone()) + } + + pub async fn current_route_provider(&self) -> Option> { + let provider = self.inner.topology.read().await; + if provider.topology.is_empty() { + None + } else { + Some(provider) + } } pub async fn manually_change_topology(&self, new_topology: NymTopology) { @@ -140,15 +149,11 @@ impl TopologyAccessor { // only used by the client at startup to get a slightly more reasonable error message // (currently displays as unused because health checker is disabled due to required changes) pub async fn ensure_is_routable(&self) -> Result<(), NymTopologyError> { - match self.inner.topology.read().await.deref() { - None => Err(NymTopologyError::EmptyNetworkTopology), - Some(ref topology) => topology.ensure_can_construct_path_through(DEFAULT_NUM_MIX_HOPS), - } - } -} - -impl Default for TopologyAccessor { - fn default() -> Self { - TopologyAccessor::new() + self.inner + .topology + .read() + .await + .topology + .ensure_minimally_routable() } } diff --git a/common/client-core/src/client/topology_control/geo_aware_provider.rs b/common/client-core/src/client/topology_control/geo_aware_provider.rs index d3fabd9a93..459209a977 100644 --- a/common/client-core/src/client/topology_control/geo_aware_provider.rs +++ b/common/client-core/src/client/topology_control/geo_aware_provider.rs @@ -3,7 +3,6 @@ use log::{debug, error}; use nym_explorer_client::{ExplorerClient, PrettyDetailedMixNodeBond}; use nym_network_defaults::var_names::EXPLORER_API; use nym_topology::{ - nym_topology_from_basic_info, provider_trait::{async_trait, TopologyProvider}, NymTopology, }; @@ -15,8 +14,6 @@ use url::Url; pub use nym_country_group::CountryGroup; -const MIN_NODES_PER_LAYER: usize = 1; - fn create_explorer_client() -> Option { let Ok(explorer_api_url) = std::env::var(EXPLORER_API) else { error!("Missing EXPLORER_API"); @@ -63,30 +60,20 @@ fn log_mixnode_distribution(mixnodes: &HashMap>) { } fn check_layer_integrity(topology: NymTopology) -> Result<(), ()> { - let mixes = topology.mixes(); - if mixes.keys().len() < 3 { + if topology.ensure_minimally_routable().is_err() { error!("Layer is missing in topology!"); return Err(()); } - for (layer, mixnodes) in mixes { - debug!("Layer {:?} has {} mixnodes", layer, mixnodes.len()); - if mixnodes.len() < MIN_NODES_PER_LAYER { - error!( - "There are only {} mixnodes in layer {:?}", - mixnodes.len(), - layer - ); - return Err(()); - } - } Ok(()) } +#[deprecated(note = "use NymApiTopologyProvider instead as explorer API will soon be removed")] pub struct GeoAwareTopologyProvider { validator_client: nym_validator_client::client::NymApiClient, filter_on: GroupBy, } +#[allow(deprecated)] impl GeoAwareTopologyProvider { pub fn new(mut nym_api_urls: Vec, filter_on: GroupBy) -> GeoAwareTopologyProvider { log::info!( @@ -104,6 +91,15 @@ impl GeoAwareTopologyProvider { } async fn get_topology(&self) -> Option { + let rewarded_set = self + .validator_client + .get_current_rewarded_set() + .await + .inspect_err(|err| error!("failed to get current rewarded set: {err}")) + .ok()?; + + let mut topology = NymTopology::new_empty(rewarded_set); + let mixnodes = match self .validator_client .get_all_basic_active_mixing_assigned_nodes() @@ -187,7 +183,8 @@ impl GeoAwareTopologyProvider { .filter(|m| filtered_mixnode_ids.contains(&m.node_id)) .collect::>(); - let topology = nym_topology_from_basic_info(&mixnodes, &gateways); + topology.add_skimmed_nodes(&mixnodes); + topology.add_skimmed_nodes(&gateways); // TODO: return real error type check_layer_integrity(topology.clone()).ok()?; @@ -196,6 +193,7 @@ impl GeoAwareTopologyProvider { } } +#[allow(deprecated)] #[cfg(not(target_arch = "wasm32"))] #[async_trait] impl TopologyProvider for GeoAwareTopologyProvider { @@ -205,6 +203,7 @@ impl TopologyProvider for GeoAwareTopologyProvider { } } +#[allow(deprecated)] #[cfg(target_arch = "wasm32")] #[async_trait(?Send)] impl TopologyProvider for GeoAwareTopologyProvider { diff --git a/common/client-core/src/client/topology_control/mod.rs b/common/client-core/src/client/topology_control/mod.rs index 4e60278a22..a19497e197 100644 --- a/common/client-core/src/client/topology_control/mod.rs +++ b/common/client-core/src/client/topology_control/mod.rs @@ -19,6 +19,7 @@ mod accessor; pub mod geo_aware_provider; pub mod nym_api_provider; +#[allow(deprecated)] pub use geo_aware_provider::GeoAwareTopologyProvider; pub use nym_api_provider::{Config as NymApiTopologyProviderConfig, NymApiTopologyProvider}; pub use nym_topology::provider_trait::TopologyProvider; @@ -27,7 +28,7 @@ pub use nym_topology::provider_trait::TopologyProvider; const MAX_FAILURE_COUNT: usize = 10; pub struct TopologyRefresherConfig { - refresh_rate: Duration, + pub refresh_rate: Duration, } impl TopologyRefresherConfig { @@ -96,28 +97,24 @@ impl TopologyRefresher { self.topology_accessor.ensure_is_routable().await } - pub async fn ensure_contains_gateway( + pub async fn ensure_contains_routable_egress( &self, - gateway: &NodeIdentity, + egress: NodeIdentity, ) -> Result<(), NymTopologyError> { let topology = self .topology_accessor - .current_topology() + .current_route_provider() .await .ok_or(NymTopologyError::EmptyNetworkTopology)?; - if !topology.gateway_exists(gateway) { - return Err(NymTopologyError::NonExistentGatewayError { - identity_key: gateway.to_base58_string(), - }); - } + let _ = topology.egress_by_identity(egress)?; Ok(()) } pub async fn wait_for_gateway( &mut self, - gateway: &NodeIdentity, + gateway: NodeIdentity, timeout_duration: Duration, ) -> Result<(), NymTopologyError> { info!( @@ -135,7 +132,7 @@ impl TopologyRefresher { }) } _ = self.try_refresh() => { - if self.ensure_contains_gateway(gateway).await.is_ok() { + if self.ensure_contains_routable_egress(gateway).await.is_ok() { return Ok(()) } info!("gateway '{gateway}' is still not online..."); diff --git a/common/client-core/src/client/topology_control/nym_api_provider.rs b/common/client-core/src/client/topology_control/nym_api_provider.rs index 3b87086f59..30d2461abd 100644 --- a/common/client-core/src/client/topology_control/nym_api_provider.rs +++ b/common/client-core/src/client/topology_control/nym_api_provider.rs @@ -4,32 +4,39 @@ use async_trait::async_trait; use log::{debug, error, warn}; use nym_topology::provider_trait::TopologyProvider; -use nym_topology::{NymTopology, NymTopologyError}; +use nym_topology::NymTopology; use nym_validator_client::UserAgent; use rand::prelude::SliceRandom; use rand::thread_rng; +use std::cmp::min; use url::Url; -// the same values as our current (10.06.24) blacklist -pub const DEFAULT_MIN_MIXNODE_PERFORMANCE: u8 = 50; -pub const DEFAULT_MIN_GATEWAY_PERFORMANCE: u8 = 50; - #[derive(Debug)] pub struct Config { pub min_mixnode_performance: u8, pub min_gateway_performance: u8, + pub use_extended_topology: bool, + pub ignore_egress_epoch_role: bool, } -impl Default for Config { - fn default() -> Self { - // old values that decided on blacklist membership +impl From for Config { + fn from(value: nym_client_core_config_types::Topology) -> Self { Config { - min_mixnode_performance: DEFAULT_MIN_MIXNODE_PERFORMANCE, - min_gateway_performance: DEFAULT_MIN_GATEWAY_PERFORMANCE, + min_mixnode_performance: value.minimum_mixnode_performance, + min_gateway_performance: value.minimum_gateway_performance, + use_extended_topology: value.use_extended_topology, + ignore_egress_epoch_role: value.ignore_egress_epoch_role, } } } +impl Config { + // if we're using 'extended' topology, filter the nodes based on the lowest set performance + fn min_node_performance(&self) -> u8 { + min(self.min_mixnode_performance, self.min_gateway_performance) + } +} + pub struct NymApiTopologyProvider { config: Config, @@ -39,7 +46,11 @@ pub struct NymApiTopologyProvider { } impl NymApiTopologyProvider { - pub fn new(config: Config, mut nym_api_urls: Vec, user_agent: Option) -> Self { + pub fn new( + config: impl Into, + mut nym_api_urls: Vec, + user_agent: Option, + ) -> Self { nym_api_urls.shuffle(&mut thread_rng()); let validator_client = if let Some(user_agent) = user_agent { @@ -52,7 +63,7 @@ impl NymApiTopologyProvider { }; NymApiTopologyProvider { - config, + config: config.into(), validator_client, nym_api_urls, currently_used_api: 0, @@ -70,70 +81,69 @@ impl NymApiTopologyProvider { .change_nym_api(self.nym_api_urls[self.currently_used_api].clone()) } - /// Verifies whether nodes a reasonably distributed among all mix layers. - /// - /// In ideal world we would have 33% nodes on layer 1, 33% on layer 2 and 33% on layer 3. - /// However, this is a rather unrealistic expectation, instead we check whether there exists - /// a layer with more than 66% of nodes or with fewer than 15% and if so, we trigger a failure. - /// - /// # Arguments - /// - /// * `topology`: active topology constructed from validator api data - fn check_layer_distribution( - &self, - active_topology: &NymTopology, - ) -> Result<(), NymTopologyError> { - let lower_threshold = 0.15; - let upper_threshold = 0.66; - active_topology.ensure_even_layer_distribution(lower_threshold, upper_threshold) - } - async fn get_current_compatible_topology(&mut self) -> Option { - let mixnodes = match self + let rewarded_set = self .validator_client - .get_all_basic_active_mixing_assigned_nodes() + .get_current_rewarded_set() .await - { - Err(err) => { - error!("failed to get network mixnodes - {err}"); - return None; - } - Ok(mixes) => mixes, - }; - - let gateways = match self - .validator_client - .get_all_basic_entry_assigned_nodes() - .await - { - Err(err) => { - error!("failed to get network gateways - {err}"); - return None; - } - Ok(gateways) => gateways, + .inspect_err(|err| error!("failed to get current rewarded set: {err}")) + .ok()?; + + let mut topology = NymTopology::new_empty(rewarded_set); + + if self.config.use_extended_topology { + let all_nodes = self + .validator_client + .get_all_basic_nodes() + .await + .inspect_err(|err| error!("failed to get network nodes: {err}")) + .ok()?; + + debug!( + "there are {} nodes on the network (before filtering)", + all_nodes.len() + ); + topology.add_additional_nodes(all_nodes.iter().filter(|n| { + n.performance.round_to_integer() >= self.config.min_node_performance() + })); + } else { + // if we're not using extended topology, we're only getting active set mixnodes and gateways + + let mixnodes = self + .validator_client + .get_all_basic_active_mixing_assigned_nodes() + .await + .inspect_err(|err| error!("failed to get network mixnodes: {err}")) + .ok()?; + + // TODO: we really should be getting ACTIVE gateways only + let gateways = self + .validator_client + .get_all_basic_entry_assigned_nodes() + .await + .inspect_err(|err| error!("failed to get network gateways: {err}")) + .ok()?; + + debug!( + "there are {} mixnodes and {} gateways in total (before performance filtering)", + mixnodes.len(), + gateways.len() + ); + + topology.add_additional_nodes(mixnodes.iter().filter(|m| { + m.performance.round_to_integer() >= self.config.min_mixnode_performance + })); + topology.add_additional_nodes(gateways.iter().filter(|m| { + m.performance.round_to_integer() >= self.config.min_gateway_performance + })); }; - debug!( - "there are {} mixnodes and {} gateways in total (before performance filtering)", - mixnodes.len(), - gateways.len() - ); - - let topology = NymTopology::from_unordered( - mixnodes.iter().filter(|m| { - m.performance.round_to_integer() >= self.config.min_mixnode_performance - }), - gateways.iter().filter(|g| { - g.performance.round_to_integer() >= self.config.min_gateway_performance - }), - ); - if let Err(err) = self.check_layer_distribution(&topology) { - warn!("The current filtered active topology has extremely skewed layer distribution. It cannot be used: {err}"); - self.use_next_nym_api(); - None - } else { - Some(topology) + if !topology.is_minimally_routable() { + error!("the current filtered active topology can't be used to construct any packets"); + return None; } + + Some(topology) } } @@ -142,7 +152,11 @@ impl NymApiTopologyProvider { #[async_trait] impl TopologyProvider for NymApiTopologyProvider { async fn get_new_topology(&mut self) -> Option { - self.get_current_compatible_topology().await + let Some(topology) = self.get_current_compatible_topology().await else { + self.use_next_nym_api(); + return None; + }; + Some(topology) } } @@ -150,6 +164,10 @@ impl TopologyProvider for NymApiTopologyProvider { #[async_trait(?Send)] impl TopologyProvider for NymApiTopologyProvider { async fn get_new_topology(&mut self) -> Option { - self.get_current_compatible_topology().await + let Some(topology) = self.get_current_compatible_topology().await else { + self.use_next_nym_api(); + return None; + }; + Some(topology) } } diff --git a/common/client-core/src/error.rs b/common/client-core/src/error.rs index 0cc9e04d75..5aaedc84ae 100644 --- a/common/client-core/src/error.rs +++ b/common/client-core/src/error.rs @@ -4,8 +4,8 @@ use crate::client::mix_traffic::transceiver::ErasedGatewayError; use nym_crypto::asymmetric::identity::Ed25519RecoveryError; use nym_gateway_client::error::GatewayClientError; -use nym_topology::gateway::GatewayConversionError; -use nym_topology::NymTopologyError; +use nym_topology::node::RoutingNodeError; +use nym_topology::{NodeId, NymTopologyError}; use nym_validator_client::ValidatorClientError; use std::error::Error; use std::path::PathBuf; @@ -74,10 +74,10 @@ pub enum ClientCoreError { #[error("the gateway id is invalid - {0}")] UnableToCreatePublicKeyFromGatewayId(Ed25519RecoveryError), - #[error("The gateway is malformed: {source}")] + #[error("the node is malformed: {source}")] MalformedGateway { #[from] - source: GatewayConversionError, + source: Box, }, #[error("failed to establish connection to gateway: {source}")] @@ -159,6 +159,9 @@ pub enum ClientCoreError { #[error("the specified gateway '{gateway}' does not support the wss protocol")] UnsupportedWssProtocol { gateway: String }, + #[error("node {id} ({identity}) does not support mixnet entry mode")] + UnsupportedEntry { id: NodeId, identity: String }, + #[error( "failed to load custom topology using path '{}'. detailed message: {source}", file_path.display() )] diff --git a/common/client-core/src/init/helpers.rs b/common/client-core/src/init/helpers.rs index 16aa042045..ea0103b0e6 100644 --- a/common/client-core/src/init/helpers.rs +++ b/common/client-core/src/init/helpers.rs @@ -7,7 +7,7 @@ use futures::{SinkExt, StreamExt}; use log::{debug, info, trace, warn}; use nym_crypto::asymmetric::identity; use nym_gateway_client::GatewayClient; -use nym_topology::gateway; +use nym_topology::node::RoutingNode; use nym_validator_client::client::IdentityKeyRef; use nym_validator_client::UserAgent; use rand::{seq::SliceRandom, Rng}; @@ -15,6 +15,7 @@ use std::{sync::Arc, time::Duration}; use tungstenite::Message; use url::Url; +use nym_topology::NodeId; #[cfg(not(target_arch = "wasm32"))] use tokio::net::TcpStream; #[cfg(not(target_arch = "wasm32"))] @@ -25,7 +26,6 @@ use tokio::time::Instant; use tokio_tungstenite::connect_async; #[cfg(not(target_arch = "wasm32"))] use tokio_tungstenite::{MaybeTlsStream, WebSocketStream}; - #[cfg(target_arch = "wasm32")] use wasm_utils::websocket::JSWebsocket; #[cfg(target_arch = "wasm32")] @@ -48,22 +48,30 @@ const PING_TIMEOUT: Duration = Duration::from_millis(1000); // The abstraction that some of these helpers use pub trait ConnectableGateway { - fn identity(&self) -> &identity::PublicKey; - fn clients_address(&self) -> String; + fn node_id(&self) -> NodeId; + fn identity(&self) -> identity::PublicKey; + fn clients_address(&self, prefer_ipv6: bool) -> Option; fn is_wss(&self) -> bool; } -impl ConnectableGateway for gateway::LegacyNode { - fn identity(&self) -> &identity::PublicKey { - self.identity() +impl ConnectableGateway for RoutingNode { + fn node_id(&self) -> NodeId { + self.node_id + } + + fn identity(&self) -> identity::PublicKey { + self.identity_key } - fn clients_address(&self) -> String { - self.clients_address() + fn clients_address(&self, prefer_ipv6: bool) -> Option { + self.ws_entry_address(prefer_ipv6) } fn is_wss(&self) -> bool { - self.clients_wss_port.is_some() + self.entry + .as_ref() + .map(|e| e.clients_wss_port.is_some()) + .unwrap_or_default() } } @@ -83,7 +91,7 @@ pub async fn current_gateways( nym_apis: &[Url], user_agent: Option, minimum_performance: u8, -) -> Result, ClientCoreError> { +) -> Result, ClientCoreError> { let nym_api = nym_apis .choose(rng) .ok_or(ClientCoreError::ListOfNymApisIsEmpty)?; @@ -104,7 +112,7 @@ pub async fn current_gateways( .iter() .filter(|g| g.performance.round_to_integer() >= minimum_performance) .filter_map(|gateway| gateway.try_into().ok()) - .collect::>(); + .collect::>(); log::debug!("After checking validity: {}", valid_gateways.len()); log::trace!("Valid gateways: {:#?}", valid_gateways); @@ -134,7 +142,12 @@ async fn measure_latency(gateway: &G) -> Result, Client where G: ConnectableGateway, { - let addr = gateway.clients_address(); + let Some(addr) = gateway.clients_address(false) else { + return Err(ClientCoreError::UnsupportedEntry { + id: gateway.node_id(), + identity: gateway.identity().to_string(), + }); + }; trace!( "establishing connection to {} ({addr})...", gateway.identity(), @@ -205,7 +218,7 @@ pub async fn choose_gateway_by_latency( let gateways_with_latency = Arc::new(tokio::sync::Mutex::new(Vec::new())); futures::stream::iter(gateways) .for_each_concurrent(CONCURRENT_GATEWAYS_MEASURED, |gateway| async { - let id = *gateway.identity(); + let id = gateway.identity(); trace!("measuring latency to {id}..."); match measure_latency(gateway).await { Ok(with_latency) => { @@ -252,9 +265,9 @@ fn filter_by_tls( pub(super) fn uniformly_random_gateway( rng: &mut R, - gateways: &[gateway::LegacyNode], + gateways: &[RoutingNode], must_use_tls: bool, -) -> Result { +) -> Result { filter_by_tls(gateways, must_use_tls)? .choose(rng) .ok_or(ClientCoreError::NoGatewaysOnNetwork) @@ -263,9 +276,9 @@ pub(super) fn uniformly_random_gateway( pub(super) fn get_specified_gateway( gateway_identity: IdentityKeyRef, - gateways: &[gateway::LegacyNode], + gateways: &[RoutingNode], must_use_tls: bool, -) -> Result { +) -> Result { log::debug!("Requesting specified gateway: {}", gateway_identity); let user_gateway = identity::PublicKey::from_base58_string(gateway_identity) .map_err(ClientCoreError::UnableToCreatePublicKeyFromGatewayId)?; @@ -275,7 +288,14 @@ pub(super) fn get_specified_gateway( .find(|gateway| gateway.identity_key == user_gateway) .ok_or_else(|| ClientCoreError::NoGatewayWithId(gateway_identity.to_string()))?; - if must_use_tls && gateway.clients_wss_port.is_none() { + let Some(entry_details) = gateway.entry.as_ref() else { + return Err(ClientCoreError::UnsupportedEntry { + id: gateway.node_id, + identity: gateway.identity().to_string(), + }); + }; + + if must_use_tls && entry_details.clients_wss_port.is_none() { return Err(ClientCoreError::UnsupportedWssProtocol { gateway: gateway_identity.to_string(), }); diff --git a/common/client-core/src/init/mod.rs b/common/client-core/src/init/mod.rs index 8e93babbf2..4954efbb3d 100644 --- a/common/client-core/src/init/mod.rs +++ b/common/client-core/src/init/mod.rs @@ -19,7 +19,7 @@ use crate::init::types::{ use nym_client_core_gateways_storage::GatewaysDetailsStore; use nym_client_core_gateways_storage::{GatewayDetails, GatewayRegistration}; use nym_gateway_client::client::InitGatewayClient; -use nym_topology::gateway; +use nym_topology::node::RoutingNode; use rand::rngs::OsRng; use rand::{CryptoRng, RngCore}; use serde::Serialize; @@ -50,7 +50,7 @@ async fn setup_new_gateway( key_store: &K, details_store: &D, selection_specification: GatewaySelectionSpecification, - available_gateways: Vec, + available_gateways: Vec, ) -> Result where K: KeyStore, diff --git a/common/client-core/src/init/types.rs b/common/client-core/src/init/types.rs index 1aa4a0d24b..35a5a5a149 100644 --- a/common/client-core/src/init/types.rs +++ b/common/client-core/src/init/types.rs @@ -13,7 +13,7 @@ use nym_crypto::asymmetric::identity; use nym_gateway_client::client::InitGatewayClient; use nym_gateway_requests::shared_key::SharedGatewayKey; use nym_sphinx::addressing::clients::Recipient; -use nym_topology::gateway; +use nym_topology::node::RoutingNode; use nym_validator_client::client::IdentityKey; use nym_validator_client::nyxd::AccountId; use serde::Serialize; @@ -38,16 +38,23 @@ pub enum SelectedGateway { impl SelectedGateway { pub fn from_topology_node( - node: gateway::LegacyNode, + node: RoutingNode, must_use_tls: bool, ) -> Result { + // for now, let's use 'old' behaviour, if you want to change it, you can pass it up the enum stack yourself : ) + let prefer_ipv6 = false; + let gateway_listener = if must_use_tls { - node.clients_address_tls() + node.ws_entry_address_tls() .ok_or(ClientCoreError::UnsupportedWssProtocol { gateway: node.identity_key.to_base58_string(), })? } else { - node.clients_address() + node.ws_entry_address(prefer_ipv6) + .ok_or(ClientCoreError::UnsupportedEntry { + id: node.node_id, + identity: node.identity_key.to_base58_string(), + })? }; let gateway_listener = @@ -200,7 +207,7 @@ pub enum GatewaySetup { specification: GatewaySelectionSpecification, // TODO: seems to be a bit inefficient to pass them by value - available_gateways: Vec, + available_gateways: Vec, }, ReuseConnection { diff --git a/common/client-core/src/lib.rs b/common/client-core/src/lib.rs index 12ea3f7d5c..5154d192f1 100644 --- a/common/client-core/src/lib.rs +++ b/common/client-core/src/lib.rs @@ -14,8 +14,7 @@ pub mod error; pub mod init; pub use nym_topology::{ - HardcodedTopologyProvider, NymTopology, NymTopologyError, SerializableNymTopology, - SerializableTopologyError, TopologyProvider, + HardcodedTopologyProvider, NymRouteProvider, NymTopology, NymTopologyError, TopologyProvider, }; #[cfg(target_arch = "wasm32")] diff --git a/common/client-libs/validator-client/src/client.rs b/common/client-libs/validator-client/src/client.rs index cae61b0d07..8280701644 100644 --- a/common/client-libs/validator-client/src/client.rs +++ b/common/client-libs/validator-client/src/client.rs @@ -32,10 +32,10 @@ use time::Date; use url::Url; pub use crate::nym_api::NymApiClientExt; +use nym_mixnet_contract_common::EpochRewardedSet; pub use nym_mixnet_contract_common::{ mixnode::MixNodeDetails, GatewayBond, IdentityKey, IdentityKeyRef, NodeId, NymNodeDetails, }; - // re-export the type to not break existing imports pub use crate::coconut::EcashApiClient; @@ -367,6 +367,10 @@ impl NymApiClient { Ok(self.nym_api.get_basic_gateways().await?.nodes) } + pub async fn get_current_rewarded_set(&self) -> Result { + Ok(self.nym_api.get_rewarded_set().await?.into()) + } + /// retrieve basic information for nodes are capable of operating as an entry gateway /// this includes legacy gateways and nym-nodes pub async fn get_all_basic_entry_assigned_nodes( diff --git a/common/client-libs/validator-client/src/nym_api/mod.rs b/common/client-libs/validator-client/src/nym_api/mod.rs index ebfb85ba97..730c70c860 100644 --- a/common/client-libs/validator-client/src/nym_api/mod.rs +++ b/common/client-libs/validator-client/src/nym_api/mod.rs @@ -13,7 +13,7 @@ use nym_api_requests::ecash::models::{ use nym_api_requests::ecash::VerificationKeyResponse; use nym_api_requests::models::{ AnnotationResponse, ApiHealthResponse, LegacyDescribedMixNode, NodePerformanceResponse, - NodeRefreshBody, NymNodeDescription, + NodeRefreshBody, NymNodeDescription, RewardedSetResponse, }; use nym_api_requests::nym_nodes::PaginatedCachedNodesResponse; use nym_api_requests::pagination::PaginatedResponse; @@ -235,6 +235,15 @@ pub trait NymApiClientExt: ApiClient { .await } + #[instrument(level = "debug", skip(self))] + async fn get_rewarded_set(&self) -> Result { + self.get_json( + &[routes::API_VERSION, "nym-nodes", "rewarded-set"], + NO_PARAMS, + ) + .await + } + /// retrieve basic information for nodes are capable of operating as an entry gateway /// this includes legacy gateways and nym-nodes #[instrument(level = "debug", skip(self))] @@ -912,6 +921,7 @@ pub trait NymApiClientExt: ApiClient { .await } + #[instrument(level = "debug", skip(self))] async fn force_refresh_describe_cache( &self, request: &NodeRefreshBody, @@ -924,6 +934,7 @@ pub trait NymApiClientExt: ApiClient { .await } + #[instrument(level = "debug", skip(self))] async fn issued_ticketbooks_for( &self, expiration_date: Date, @@ -940,6 +951,7 @@ pub trait NymApiClientExt: ApiClient { .await } + #[instrument(level = "debug", skip(self))] async fn issued_ticketbooks_challenge( &self, expiration_date: Date, diff --git a/common/client-libs/validator-client/src/nyxd/contract_traits/mixnet_query_client.rs b/common/client-libs/validator-client/src/nyxd/contract_traits/mixnet_query_client.rs index 2e1095b3b8..5060fe3df4 100644 --- a/common/client-libs/validator-client/src/nyxd/contract_traits/mixnet_query_client.rs +++ b/common/client-libs/validator-client/src/nyxd/contract_traits/mixnet_query_client.rs @@ -26,10 +26,10 @@ use nym_mixnet_contract_common::{ reward_params::{Performance, RewardingParams}, rewarding::{EstimatedCurrentEpochRewardResponse, PendingRewardResponse}, ContractBuildInformation, ContractState, ContractStateParams, CurrentIntervalResponse, - CurrentNymNodeVersionResponse, Delegation, EpochEventId, EpochStatus, GatewayBond, - GatewayBondResponse, GatewayOwnershipResponse, HistoricalNymNodeVersionEntry, IdentityKey, - IdentityKeyRef, IntervalEventId, MixNodeBond, MixNodeDetails, MixOwnershipResponse, - MixnodeDetailsByIdentityResponse, MixnodeDetailsResponse, NodeId, + CurrentNymNodeVersionResponse, Delegation, EpochEventId, EpochRewardedSet, EpochStatus, + GatewayBond, GatewayBondResponse, GatewayOwnershipResponse, HistoricalNymNodeVersionEntry, + IdentityKey, IdentityKeyRef, IntervalEventId, MixNodeBond, MixNodeDetails, + MixOwnershipResponse, MixnodeDetailsByIdentityResponse, MixnodeDetailsResponse, NodeId, NumberOfPendingEventsResponse, NymNodeBond, NymNodeDetails, NymNodeVersionHistoryResponse, PagedAllDelegationsResponse, PagedDelegatorDelegationsResponse, PagedGatewayResponse, PagedMixnodeBondsResponse, PagedNodeDelegationsResponse, PendingEpochEvent, @@ -670,7 +670,7 @@ impl PagedMixnetQueryClient for T where T: MixnetQueryClient {} #[cfg_attr(target_arch = "wasm32", async_trait(?Send))] #[cfg_attr(not(target_arch = "wasm32"), async_trait)] pub trait MixnetQueryClientExt: MixnetQueryClient { - async fn get_rewarded_set(&self) -> Result { + async fn get_rewarded_set(&self) -> Result { let error_response = |message| Err(NyxdError::extension_query_failure("mixnet", message)); let metadata = self.get_rewarded_set_metadata().await?; @@ -711,13 +711,16 @@ pub trait MixnetQueryClientExt: MixnetQueryClient { return error_response("the nodes assigned for 'standby' returned unexpected epoch_id"); } - Ok(RewardedSet { - entry_gateways: entry.nodes, - exit_gateways: exit.nodes, - layer1: layer1.nodes, - layer2: layer2.nodes, - layer3: layer3.nodes, - standby: standby.nodes, + Ok(EpochRewardedSet { + epoch_id: expected_epoch_id, + assignment: RewardedSet { + entry_gateways: entry.nodes, + exit_gateways: exit.nodes, + layer1: layer1.nodes, + layer2: layer2.nodes, + layer3: layer3.nodes, + standby: standby.nodes, + }, }) } } diff --git a/common/cosmwasm-smart-contracts/mixnet-contract/src/mixnode.rs b/common/cosmwasm-smart-contracts/mixnet-contract/src/mixnode.rs index fb03a1034b..bbae58a935 100644 --- a/common/cosmwasm-smart-contracts/mixnet-contract/src/mixnode.rs +++ b/common/cosmwasm-smart-contracts/mixnet-contract/src/mixnode.rs @@ -7,6 +7,7 @@ use crate::constants::{TOKEN_SUPPLY, UNIT_DELEGATION_BASE}; use crate::error::MixnetContractError; use crate::helpers::IntoBaseDecimal; +use crate::nym_node::Role; use crate::reward_params::{NodeRewardingParameters, RewardingParams}; use crate::rewarding::helpers::truncate_reward; use crate::rewarding::RewardDistribution; @@ -611,6 +612,16 @@ pub enum LegacyMixLayer { Three = 3, } +impl From for Role { + fn from(layer: LegacyMixLayer) -> Self { + match layer { + LegacyMixLayer::One => Role::Layer1, + LegacyMixLayer::Two => Role::Layer2, + LegacyMixLayer::Three => Role::Layer3, + } + } +} + impl From for String { fn from(layer: LegacyMixLayer) -> Self { (layer as u8).to_string() diff --git a/common/cosmwasm-smart-contracts/mixnet-contract/src/types.rs b/common/cosmwasm-smart-contracts/mixnet-contract/src/types.rs index eb5f972e4b..d722f5af93 100644 --- a/common/cosmwasm-smart-contracts/mixnet-contract/src/types.rs +++ b/common/cosmwasm-smart-contracts/mixnet-contract/src/types.rs @@ -3,6 +3,7 @@ use crate::config_score::{ConfigScoreParams, OutdatedVersionWeights, VersionScoreFormulaParams}; use crate::nym_node::Role; +use crate::EpochId; use contracts_common::Percent; use cosmwasm_schema::cw_serde; use cosmwasm_std::Coin; @@ -32,6 +33,23 @@ impl RoleAssignment { } } +#[cw_serde] +#[derive(Default)] +pub struct EpochRewardedSet { + pub epoch_id: EpochId, + + pub assignment: RewardedSet, +} + +impl From<(EpochId, RewardedSet)> for EpochRewardedSet { + fn from((epoch_id, assignment): (EpochId, RewardedSet)) -> Self { + EpochRewardedSet { + epoch_id, + assignment, + } + } +} + #[cw_serde] #[derive(Default)] pub struct RewardedSet { @@ -69,6 +87,29 @@ impl RewardedSet { pub fn rewarded_set_size(&self) -> usize { self.active_set_size() + self.standby.len() } + + pub fn get_role(&self, node_id: NodeId) -> Option { + // given each role has ~100 entries in them, doing linear lookup with vec should be fine + if self.entry_gateways.contains(&node_id) { + return Some(Role::EntryGateway); + } + if self.exit_gateways.contains(&node_id) { + return Some(Role::ExitGateway); + } + if self.layer1.contains(&node_id) { + return Some(Role::Layer1); + } + if self.layer2.contains(&node_id) { + return Some(Role::Layer2); + } + if self.layer3.contains(&node_id) { + return Some(Role::Layer3); + } + if self.standby.contains(&node_id) { + return Some(Role::Standby); + } + None + } } #[cw_serde] diff --git a/common/ip-packet-requests/src/v7/conversion.rs b/common/ip-packet-requests/src/v7/conversion.rs index 16923b7a3b..04b0244b05 100644 --- a/common/ip-packet-requests/src/v7/conversion.rs +++ b/common/ip-packet-requests/src/v7/conversion.rs @@ -63,6 +63,7 @@ impl From for v7::request::StaticConnectReque } } +#[allow(deprecated)] impl From for v7::request::DynamicConnectRequest { fn from(dynamic_connect_request: v6::request::DynamicConnectRequest) -> Self { Self { diff --git a/common/ip-packet-requests/src/v7/request.rs b/common/ip-packet-requests/src/v7/request.rs index dc125068b7..43b71bb362 100644 --- a/common/ip-packet-requests/src/v7/request.rs +++ b/common/ip-packet-requests/src/v7/request.rs @@ -51,6 +51,7 @@ impl IpPacketRequest { ) } + #[allow(deprecated)] pub fn new_dynamic_connect_request( reply_to: Recipient, reply_to_hops: Option, @@ -285,6 +286,9 @@ pub struct DynamicConnectRequest { // The number of mix node hops that responses should take, in addition to the entry and exit // node. Zero means only client -> entry -> exit -> client. + #[deprecated( + note = "clients can no longer control number of hops to use. this field is scheduled for removal in V8" + )] pub reply_to_hops: Option, // The average delay at each mix node, in milliseconds. Currently this is not supported by the diff --git a/common/node-tester-utils/src/message.rs b/common/node-tester-utils/src/message.rs index b17515d5b6..8f09095c22 100644 --- a/common/node-tester-utils/src/message.rs +++ b/common/node-tester-utils/src/message.rs @@ -2,10 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 use crate::error::NetworkTestingError; -use crate::node::TestableNode; -use crate::NodeId; +use crate::node::{NodeType, TestableNode}; use nym_sphinx::message::NymMessage; -use nym_topology::{gateway, mix}; +use nym_topology::node::RoutingNode; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; @@ -26,73 +25,76 @@ pub struct TestMessage { } impl TestMessage { - pub fn new>(node: N, msg_id: u32, total_msgs: u32, ext: T) -> Self { + pub fn new(tested_node: TestableNode, msg_id: u32, total_msgs: u32, ext: T) -> Self { TestMessage { - tested_node: node.into(), + tested_node, msg_id, total_msgs, ext, } } - pub fn new_mix(node: &mix::LegacyNode, msg_id: u32, total_msgs: u32, ext: T) -> Self { - Self::new(node, msg_id, total_msgs, ext) + pub fn new_mix(node: &RoutingNode, msg_id: u32, total_msgs: u32, ext: T) -> Self { + Self::new( + TestableNode::new_routing(node, NodeType::Mixnode), + msg_id, + total_msgs, + ext, + ) } - // pub fn new_gateway(node: &gateway::Node, msg_id: u32, total_msgs: u32, ext: T) -> Self { - // Self::new(node, msg_id, total_msgs, ext) - // } - - pub fn new_serialized( - node: N, - msg_id: u32, - total_msgs: u32, - ext: T, - ) -> Result, NetworkTestingError> - where - N: Into, - T: Serialize, - { - Self::new(node, msg_id, total_msgs, ext).as_bytes() + pub fn new_gateway(node: &RoutingNode, msg_id: u32, total_msgs: u32, ext: T) -> Self { + Self::new( + TestableNode::new_routing(node, NodeType::Gateway), + msg_id, + total_msgs, + ext, + ) } - pub fn new_plaintexts( - node: &N, + pub fn new_plaintexts( + node: TestableNode, total_msgs: u32, ext: T, ) -> Result>, NetworkTestingError> where - for<'a> &'a N: Into, T: Serialize + Clone, { let mut msgs = Vec::with_capacity(total_msgs as usize); for msg_id in 1..=total_msgs { - msgs.push(Self::new(node, msg_id, total_msgs, ext.clone()).as_bytes()?) + msgs.push(Self::new(node.clone(), msg_id, total_msgs, ext.clone()).as_bytes()?) } Ok(msgs) } pub fn mix_plaintexts( - node: &mix::LegacyNode, + node: &RoutingNode, total_msgs: u32, ext: T, ) -> Result>, NetworkTestingError> where T: Serialize + Clone, { - Self::new_plaintexts(node, total_msgs, ext) + Self::new_plaintexts( + TestableNode::new_routing(node, NodeType::Mixnode), + total_msgs, + ext, + ) } pub fn legacy_gateway_plaintexts( - node: &gateway::LegacyNode, - node_id: NodeId, + node: &RoutingNode, total_msgs: u32, ext: T, ) -> Result>, NetworkTestingError> where T: Serialize + Clone, { - Self::new_plaintexts(&(node, node_id), total_msgs, ext) + Self::new_plaintexts( + TestableNode::new_routing(node, NodeType::Gateway), + total_msgs, + ext, + ) } pub fn as_json_string(&self) -> Result diff --git a/common/node-tester-utils/src/node.rs b/common/node-tester-utils/src/node.rs index d60623a6e2..3ec4d80670 100644 --- a/common/node-tester-utils/src/node.rs +++ b/common/node-tester-utils/src/node.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::NodeId; -use nym_topology::{gateway, mix}; +use nym_topology::node::RoutingNode; use serde::{Deserialize, Serialize}; use std::fmt::{Display, Formatter}; @@ -24,6 +24,14 @@ impl TestableNode { } } + pub fn new_routing(routing_node: &RoutingNode, typ: NodeType) -> Self { + TestableNode::new( + routing_node.identity_key.to_base58_string(), + typ, + routing_node.node_id, + ) + } + pub fn new_mixnode(encoded_identity: String, node_id: NodeId) -> Self { TestableNode::new(encoded_identity, NodeType::Mixnode, node_id) } @@ -37,38 +45,6 @@ impl TestableNode { } } -impl<'a> From<&'a mix::LegacyNode> for TestableNode { - fn from(value: &'a mix::LegacyNode) -> Self { - TestableNode { - encoded_identity: value.identity_key.to_base58_string(), - typ: NodeType::Mixnode, - node_id: value.mix_id, - } - } -} - -impl<'a> From<(&'a gateway::LegacyNode, NodeId)> for TestableNode { - fn from((gateway, node_id): (&'a gateway::LegacyNode, NodeId)) -> Self { - (&(gateway, node_id)).into() - } -} - -impl<'a> From<&'a (gateway::LegacyNode, NodeId)> for TestableNode { - fn from((gateway, node_id): &'a (gateway::LegacyNode, NodeId)) -> Self { - (gateway, *node_id).into() - } -} - -impl<'a, 'b> From<&'a (&'b gateway::LegacyNode, NodeId)> for TestableNode { - fn from((gateway, node_id): &'a (&'b gateway::LegacyNode, NodeId)) -> Self { - TestableNode { - encoded_identity: gateway.identity_key.to_base58_string(), - typ: NodeType::Gateway, - node_id: *node_id, - } - } -} - impl Display for TestableNode { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!( diff --git a/common/node-tester-utils/src/tester.rs b/common/node-tester-utils/src/tester.rs index d60ec55e67..211eb988db 100644 --- a/common/node-tester-utils/src/tester.rs +++ b/common/node-tester-utils/src/tester.rs @@ -2,21 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 use crate::error::NetworkTestingError; -use crate::Empty; -use crate::NodeId; use crate::TestMessage; use nym_sphinx::acknowledgements::AckKey; use nym_sphinx::addressing::clients::Recipient; use nym_sphinx::message::NymMessage; -use nym_sphinx::params::{PacketSize, DEFAULT_NUM_MIX_HOPS}; +use nym_sphinx::params::PacketSize; use nym_sphinx::preparer::{FragmentPreparer, PreparedFragment}; use nym_sphinx_params::PacketType; -use nym_topology::{gateway, mix, NymTopology}; +use nym_topology::node::RoutingNode; +use nym_topology::{NymRouteProvider, NymTopology, Role}; use rand::{CryptoRng, Rng}; use serde::Serialize; use std::sync::Arc; use std::time::Duration; +pub use nym_topology::node::LegacyMixLayer; + pub struct NodeTester { rng: R, @@ -38,10 +39,6 @@ pub struct NodeTester { /// Average delay an acknowledgement packet is going to get delay at a single mixnode. average_ack_delay: Duration, - /// Number of mix hops each packet ('real' message, ack, reply) is expected to take. - /// Note that it does not include gateway hops. - num_mix_hops: u8, - // while acks are going to be ignored they still need to be constructed // so that the gateway would be able to correctly process and forward the message ack_key: Arc, @@ -70,41 +67,27 @@ where deterministic_route_selection, average_packet_delay, average_ack_delay, - num_mix_hops: DEFAULT_NUM_MIX_HOPS, ack_key, } } - /// Allows setting non-default number of expected mix hops in the network. - #[allow(dead_code)] - pub fn with_mix_hops(mut self, hops: u8) -> Self { - self.num_mix_hops = hops; - self - } - - pub fn testable_mix_topology(&self, node: &mix::LegacyNode) -> NymTopology { + pub fn testable_mix_topology(&self, layer: LegacyMixLayer, node: &RoutingNode) -> NymTopology { let mut topology = self.base_topology.clone(); - topology.set_mixes_in_layer(node.layer as u8, vec![node.clone()]); + topology.set_testable_node(layer.into(), node.clone()); topology } - pub fn testable_gateway_topology(&self, gateway: &gateway::LegacyNode) -> NymTopology { + pub fn testable_gateway_topology(&self, node: &RoutingNode) -> NymTopology { let mut topology = self.base_topology.clone(); - topology.set_gateways(vec![gateway.clone()]); + topology.set_testable_node(Role::EntryGateway, node.clone()); + topology.set_testable_node(Role::ExitGateway, node.clone()); topology } - pub fn simple_mixnode_test_packets( - &mut self, - mix: &mix::LegacyNode, - test_packets: u32, - ) -> Result, NetworkTestingError> { - self.mixnode_test_packets(mix, Empty, test_packets, None) - } - pub fn mixnode_test_packets( &mut self, - mix: &mix::LegacyNode, + mix: &RoutingNode, + legacy_mix_layer: LegacyMixLayer, msg_ext: T, test_packets: u32, custom_recipient: Option, @@ -112,7 +95,9 @@ where where T: Serialize + Clone, { - let ephemeral_topology = self.testable_mix_topology(mix); + let ephemeral_topology = + NymRouteProvider::from(self.testable_mix_topology(legacy_mix_layer, mix)) + .with_ignore_egress_epoch_roles(true); let mut packets = Vec::with_capacity(test_packets as usize); for plaintext in TestMessage::mix_plaintexts(mix, test_packets, msg_ext)? { @@ -128,7 +113,7 @@ where pub fn mixnodes_test_packets( &mut self, - nodes: &[mix::LegacyNode], + nodes: &[(LegacyMixLayer, RoutingNode)], msg_ext: T, test_packets: u32, custom_recipient: Option, @@ -137,9 +122,10 @@ where T: Serialize + Clone, { let mut packets = Vec::new(); - for node in nodes { + for (layer, node) in nodes { packets.append(&mut self.mixnode_test_packets( node, + *layer, msg_ext.clone(), test_packets, custom_recipient, @@ -149,26 +135,10 @@ where Ok(packets) } - pub fn existing_mixnode_test_packets( - &mut self, - mix_id: NodeId, - msg_ext: T, - test_packets: u32, - custom_recipient: Option, - ) -> Result, NetworkTestingError> - where - T: Serialize + Clone, - { - let Some(node) = self.base_topology.find_mix(mix_id) else { - return Err(NetworkTestingError::NonExistentMixnode { mix_id }); - }; - - self.mixnode_test_packets(&node.clone(), msg_ext, test_packets, custom_recipient) - } - pub fn existing_identity_mixnode_test_packets( &mut self, encoded_mix_identity: String, + layer: LegacyMixLayer, msg_ext: T, test_packets: u32, custom_recipient: Option, @@ -176,22 +146,30 @@ where where T: Serialize + Clone, { - let Some(node) = self - .base_topology - .find_mix_by_identity(&encoded_mix_identity) - else { + let Ok(identity) = encoded_mix_identity.parse() else { + return Err(NetworkTestingError::NonExistentMixnodeIdentity { + mix_identity: encoded_mix_identity, + }); + }; + + let Some(node) = self.base_topology.find_node_by_identity(identity) else { return Err(NetworkTestingError::NonExistentMixnodeIdentity { mix_identity: encoded_mix_identity, }); }; - self.mixnode_test_packets(&node.clone(), msg_ext, test_packets, custom_recipient) + self.mixnode_test_packets( + &node.clone(), + layer, + msg_ext, + test_packets, + custom_recipient, + ) } pub fn legacy_gateway_test_packets( &mut self, - gateway: &gateway::LegacyNode, - node_id: NodeId, + gateway: &RoutingNode, msg_ext: T, test_packets: u32, custom_recipient: Option, @@ -199,12 +177,11 @@ where where T: Serialize + Clone, { - let ephemeral_topology = self.testable_gateway_topology(gateway); + let ephemeral_topology = NymRouteProvider::from(self.testable_gateway_topology(gateway)) + .with_ignore_egress_epoch_roles(true); let mut packets = Vec::with_capacity(test_packets as usize); - for plaintext in - TestMessage::legacy_gateway_plaintexts(gateway, node_id, test_packets, msg_ext)? - { + for plaintext in TestMessage::legacy_gateway_plaintexts(gateway, test_packets, msg_ext)? { packets.push(self.wrap_plaintext_data( plaintext, &ephemeral_topology, @@ -215,36 +192,10 @@ where Ok(packets) } - pub fn existing_gateway_test_packets( - &mut self, - node_id: NodeId, - encoded_gateway_identity: String, - msg_ext: T, - test_packets: u32, - custom_recipient: Option, - ) -> Result, NetworkTestingError> - where - T: Serialize + Clone, - { - let Some(node) = self.base_topology.find_gateway(&encoded_gateway_identity) else { - return Err(NetworkTestingError::NonExistentGateway { - gateway_identity: encoded_gateway_identity, - }); - }; - - self.legacy_gateway_test_packets( - &node.clone(), - node_id, - msg_ext, - test_packets, - custom_recipient, - ) - } - pub fn wrap_plaintext_data( &mut self, plaintext: Vec, - topology: &NymTopology, + topology: &NymRouteProvider, custom_recipient: Option, ) -> Result { let message = NymMessage::new_plain(plaintext); @@ -274,14 +225,13 @@ where &address, &address, PacketType::Mix, - None, )?) } pub fn create_test_packet( &mut self, message: &TestMessage, - topology: &NymTopology, + topology: &NymRouteProvider, custom_recipient: Option, ) -> Result where @@ -307,10 +257,6 @@ impl FragmentPreparer for NodeTester { 1 } - fn num_mix_hops(&self) -> u8 { - self.num_mix_hops - } - fn average_packet_delay(&self) -> Duration { self.average_packet_delay } diff --git a/common/nymsphinx/acknowledgements/src/surb_ack.rs b/common/nymsphinx/acknowledgements/src/surb_ack.rs index a9311be597..a11715268f 100644 --- a/common/nymsphinx/acknowledgements/src/surb_ack.rs +++ b/common/nymsphinx/acknowledgements/src/surb_ack.rs @@ -8,10 +8,10 @@ use nym_sphinx_addressing::nodes::{ NymNodeRoutingAddress, NymNodeRoutingAddressError, MAX_NODE_ADDRESS_UNPADDED_LEN, }; use nym_sphinx_params::packet_sizes::PacketSize; -use nym_sphinx_params::{PacketType, DEFAULT_NUM_MIX_HOPS}; +use nym_sphinx_params::PacketType; use nym_sphinx_types::delays::Delay; use nym_sphinx_types::{NymPacket, NymPacketError, MIN_PACKET_SIZE}; -use nym_topology::{NymTopology, NymTopologyError}; +use nym_topology::{NymRouteProvider, NymTopologyError}; use rand::{CryptoRng, RngCore}; use std::time; @@ -43,14 +43,13 @@ impl SurbAck { ack_key: &AckKey, marshaled_fragment_id: [u8; 5], average_delay: time::Duration, - topology: &NymTopology, + topology: &NymRouteProvider, packet_type: PacketType, ) -> Result where R: RngCore + CryptoRng, { - let route = - topology.random_route_to_gateway(rng, DEFAULT_NUM_MIX_HOPS, recipient.gateway())?; + let route = topology.random_route_to_egress(rng, recipient.gateway())?; let delays = nym_sphinx_routing::generate_hop_delays(average_delay, route.len()); let destination = recipient.as_sphinx_destination(); diff --git a/common/nymsphinx/addressing/src/clients.rs b/common/nymsphinx/addressing/src/clients.rs index 743302edbe..d60581bf21 100644 --- a/common/nymsphinx/addressing/src/clients.rs +++ b/common/nymsphinx/addressing/src/clients.rs @@ -131,8 +131,8 @@ impl Recipient { &self.client_encryption_key } - pub fn gateway(&self) -> &NodeIdentity { - &self.gateway + pub fn gateway(&self) -> NodeIdentity { + self.gateway } pub fn to_bytes(self) -> RecipientBytes { diff --git a/common/nymsphinx/anonymous-replies/src/reply_surb.rs b/common/nymsphinx/anonymous-replies/src/reply_surb.rs index f25871ba0b..3ac7af8571 100644 --- a/common/nymsphinx/anonymous-replies/src/reply_surb.rs +++ b/common/nymsphinx/anonymous-replies/src/reply_surb.rs @@ -6,9 +6,9 @@ use nym_crypto::{generic_array::typenum::Unsigned, Digest}; use nym_sphinx_addressing::clients::Recipient; use nym_sphinx_addressing::nodes::{NymNodeRoutingAddress, MAX_NODE_ADDRESS_UNPADDED_LEN}; use nym_sphinx_params::packet_sizes::PacketSize; -use nym_sphinx_params::{PacketType, ReplySurbKeyDigestAlgorithm, DEFAULT_NUM_MIX_HOPS}; +use nym_sphinx_params::{PacketType, ReplySurbKeyDigestAlgorithm}; use nym_sphinx_types::{NymPacket, SURBMaterial, SphinxError, SURB}; -use nym_topology::{NymTopology, NymTopologyError}; +use nym_topology::{NymRouteProvider, NymTopologyError}; use rand::{CryptoRng, RngCore}; use serde::de::{Error as SerdeError, Visitor}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -89,13 +89,12 @@ impl ReplySurb { rng: &mut R, recipient: &Recipient, average_delay: time::Duration, - topology: &NymTopology, + topology: &NymRouteProvider, ) -> Result where R: RngCore + CryptoRng, { - let route = - topology.random_route_to_gateway(rng, DEFAULT_NUM_MIX_HOPS, recipient.gateway())?; + let route = topology.random_route_to_egress(rng, recipient.gateway())?; let delays = nym_sphinx_routing::generate_hop_delays(average_delay, route.len()); let destination = recipient.as_sphinx_destination(); @@ -110,15 +109,12 @@ impl ReplySurb { /// Returns the expected number of bytes the [`ReplySURB`] will take after serialization. /// Useful for deserialization from a bytes stream. - pub fn serialized_len(mix_hops: u8) -> usize { + pub fn serialized_len() -> usize { use nym_sphinx_types::{HEADER_SIZE, NODE_ADDRESS_LENGTH, PAYLOAD_KEY_SIZE}; // the SURB itself consists of SURB_header, first hop address and set of payload keys - // (note extra 1 for the gateway) - SurbEncryptionKeySize::USIZE - + HEADER_SIZE - + NODE_ADDRESS_LENGTH - + (1 + mix_hops as usize) * PAYLOAD_KEY_SIZE + // for each hop (3x mix + egress) + SurbEncryptionKeySize::USIZE + HEADER_SIZE + NODE_ADDRESS_LENGTH + 4 * PAYLOAD_KEY_SIZE } pub fn encryption_key(&self) -> &SurbEncryptionKey { diff --git a/common/nymsphinx/anonymous-replies/src/requests.rs b/common/nymsphinx/anonymous-replies/src/requests.rs index 9dd4c84dc0..3945753495 100644 --- a/common/nymsphinx/anonymous-replies/src/requests.rs +++ b/common/nymsphinx/anonymous-replies/src/requests.rs @@ -169,10 +169,7 @@ impl RepliableMessage { .collect() } - pub fn try_from_bytes( - bytes: &[u8], - num_mix_hops: u8, - ) -> Result { + pub fn try_from_bytes(bytes: &[u8]) -> Result { if bytes.len() < SENDER_TAG_SIZE + 1 { return Err(InvalidReplyRequestError::RequestTooShortToDeserialize); } @@ -180,11 +177,8 @@ impl RepliableMessage { AnonymousSenderTag::from_bytes(bytes[..SENDER_TAG_SIZE].try_into().unwrap()); let content_tag = RepliableMessageContentTag::try_from(bytes[SENDER_TAG_SIZE])?; - let content = RepliableMessageContent::try_from_bytes( - &bytes[SENDER_TAG_SIZE + 1..], - num_mix_hops, - content_tag, - )?; + let content = + RepliableMessageContent::try_from_bytes(&bytes[SENDER_TAG_SIZE + 1..], content_tag)?; Ok(RepliableMessage { sender_tag, @@ -192,23 +186,20 @@ impl RepliableMessage { }) } - pub fn serialized_size(&self, num_mix_hops: u8) -> usize { + pub fn serialized_size(&self) -> usize { let content_type_size = 1; - SENDER_TAG_SIZE + content_type_size + self.content.serialized_size(num_mix_hops) + SENDER_TAG_SIZE + content_type_size + self.content.serialized_size() } } // this recovery code is shared between all variants containing reply surbs -fn recover_reply_surbs( - bytes: &[u8], - num_mix_hops: u8, -) -> Result<(Vec, usize), InvalidReplyRequestError> { +fn recover_reply_surbs(bytes: &[u8]) -> Result<(Vec, usize), InvalidReplyRequestError> { let mut consumed = mem::size_of::(); if bytes.len() < consumed { return Err(InvalidReplyRequestError::RequestTooShortToDeserialize); } let num_surbs = u32::from_be_bytes([bytes[0], bytes[1], bytes[2], bytes[3]]); - let surb_size = ReplySurb::serialized_len(num_mix_hops); + let surb_size = ReplySurb::serialized_len(); if bytes[consumed..].len() < num_surbs as usize * surb_size { return Err(InvalidReplyRequestError::RequestTooShortToDeserialize); } @@ -307,14 +298,13 @@ impl RepliableMessageContent { fn try_from_bytes( bytes: &[u8], - num_mix_hops: u8, tag: RepliableMessageContentTag, ) -> Result { if bytes.is_empty() { return Err(InvalidReplyRequestError::RequestTooShortToDeserialize); } - let (reply_surbs, n) = recover_reply_surbs(bytes, num_mix_hops)?; + let (reply_surbs, n) = recover_reply_surbs(bytes)?; match tag { RepliableMessageContentTag::Data => Ok(RepliableMessageContent::Data { @@ -340,7 +330,7 @@ impl RepliableMessageContent { } } - fn serialized_size(&self, num_mix_hops: u8) -> usize { + fn serialized_size(&self) -> usize { match self { RepliableMessageContent::Data { message, @@ -348,19 +338,18 @@ impl RepliableMessageContent { } => { let num_reply_surbs_tag = mem::size_of::(); num_reply_surbs_tag - + reply_surbs.len() * ReplySurb::serialized_len(num_mix_hops) + + reply_surbs.len() * ReplySurb::serialized_len() + message.len() } RepliableMessageContent::AdditionalSurbs { reply_surbs } => { let num_reply_surbs_tag = mem::size_of::(); - num_reply_surbs_tag + reply_surbs.len() * ReplySurb::serialized_len(num_mix_hops) + num_reply_surbs_tag + reply_surbs.len() * ReplySurb::serialized_len() } RepliableMessageContent::Heartbeat { additional_reply_surbs, } => { let num_reply_surbs_tag = mem::size_of::(); - num_reply_surbs_tag - + additional_reply_surbs.len() * ReplySurb::serialized_len(num_mix_hops) + num_reply_surbs_tag + additional_reply_surbs.len() * ReplySurb::serialized_len() } } } @@ -578,11 +567,11 @@ mod tests { } } - pub(super) fn reply_surb(rng: &mut ChaCha20Rng, num_mix_hops: u8) -> ReplySurb { + pub(super) fn reply_surb(rng: &mut ChaCha20Rng) -> ReplySurb { // due to gateway - let num_hops = num_mix_hops + 1; - let route = (0..num_hops).map(|_| node(rng)).collect(); - let delays = (0..num_hops) + const HOPS: u8 = 4; + let route = (0..HOPS).map(|_| node(rng)).collect(); + let delays = (0..HOPS) .map(|_| Delay::new_from_nanos(rng.next_u64())) .collect(); let mut destination_bytes = [0u8; 32]; @@ -605,47 +594,40 @@ mod tests { } } - pub(super) fn reply_surbs( - rng: &mut ChaCha20Rng, - num_mix_hops: u8, - n: usize, - ) -> Vec { + pub(super) fn reply_surbs(rng: &mut ChaCha20Rng, n: usize) -> Vec { let mut surbs = Vec::with_capacity(n); for _ in 0..n { - surbs.push(reply_surb(rng, num_mix_hops)) + surbs.push(reply_surb(rng)) } surbs } pub(super) fn repliable_content_data( rng: &mut ChaCha20Rng, - num_mix_hops: u8, msg_len: usize, surbs: usize, ) -> RepliableMessageContent { RepliableMessageContent::Data { message: random_vec_u8(rng, msg_len), - reply_surbs: reply_surbs(rng, num_mix_hops, surbs), + reply_surbs: reply_surbs(rng, surbs), } } pub(super) fn repliable_content_surbs( rng: &mut ChaCha20Rng, - num_mix_hops: u8, surbs: usize, ) -> RepliableMessageContent { RepliableMessageContent::AdditionalSurbs { - reply_surbs: reply_surbs(rng, num_mix_hops, surbs), + reply_surbs: reply_surbs(rng, surbs), } } pub(super) fn repliable_content_heartbeat( rng: &mut ChaCha20Rng, - num_mix_hops: u8, surbs: usize, ) -> RepliableMessageContent { RepliableMessageContent::Heartbeat { - additional_reply_surbs: reply_surbs(rng, num_mix_hops, surbs), + additional_reply_surbs: reply_surbs(rng, surbs), } } @@ -676,70 +658,54 @@ mod tests { #[test] fn serialized_size_matches_actual_serialization() { let mut rng = fixtures::test_rng(); - let num_mix_hops = 3; let data1 = RepliableMessage { sender_tag: fixtures::sender_tag(&mut rng), - content: fixtures::repliable_content_data(&mut rng, num_mix_hops, 10000, 0), + content: fixtures::repliable_content_data(&mut rng, 10000, 0), }; - assert_eq!( - data1.serialized_size(num_mix_hops), - data1.into_bytes().len() - ); + assert_eq!(data1.serialized_size(), data1.into_bytes().len()); let data2 = RepliableMessage { sender_tag: fixtures::sender_tag(&mut rng), - content: fixtures::repliable_content_data(&mut rng, num_mix_hops, 10, 100), + content: fixtures::repliable_content_data(&mut rng, 10, 100), }; - assert_eq!( - data2.serialized_size(num_mix_hops), - data2.into_bytes().len() - ); + assert_eq!(data2.serialized_size(), data2.into_bytes().len()); let data3 = RepliableMessage { sender_tag: fixtures::sender_tag(&mut rng), - content: fixtures::repliable_content_data(&mut rng, num_mix_hops, 100000, 1000), + content: fixtures::repliable_content_data(&mut rng, 100000, 1000), }; - assert_eq!( - data3.serialized_size(num_mix_hops), - data3.into_bytes().len() - ); + assert_eq!(data3.serialized_size(), data3.into_bytes().len()); let additional_surbs1 = RepliableMessage { sender_tag: fixtures::sender_tag(&mut rng), - content: fixtures::repliable_content_surbs(&mut rng, num_mix_hops, 1), + content: fixtures::repliable_content_surbs(&mut rng, 1), }; assert_eq!( - additional_surbs1.serialized_size(num_mix_hops), + additional_surbs1.serialized_size(), additional_surbs1.into_bytes().len() ); let additional_surbs2 = RepliableMessage { sender_tag: fixtures::sender_tag(&mut rng), - content: fixtures::repliable_content_surbs(&mut rng, num_mix_hops, 1000), + content: fixtures::repliable_content_surbs(&mut rng, 1000), }; assert_eq!( - additional_surbs2.serialized_size(num_mix_hops), + additional_surbs2.serialized_size(), additional_surbs2.into_bytes().len() ); let heartbeat1 = RepliableMessage { sender_tag: fixtures::sender_tag(&mut rng), - content: fixtures::repliable_content_heartbeat(&mut rng, num_mix_hops, 1), + content: fixtures::repliable_content_heartbeat(&mut rng, 1), }; - assert_eq!( - heartbeat1.serialized_size(num_mix_hops), - heartbeat1.into_bytes().len() - ); + assert_eq!(heartbeat1.serialized_size(), heartbeat1.into_bytes().len()); let heartbeat2 = RepliableMessage { sender_tag: fixtures::sender_tag(&mut rng), - content: fixtures::repliable_content_heartbeat(&mut rng, num_mix_hops, 1000), + content: fixtures::repliable_content_heartbeat(&mut rng, 1000), }; - assert_eq!( - heartbeat2.serialized_size(num_mix_hops), - heartbeat2.into_bytes().len() - ); + assert_eq!(heartbeat2.serialized_size(), heartbeat2.into_bytes().len()); } } @@ -750,49 +716,33 @@ mod tests { #[test] fn serialized_size_matches_actual_serialization() { let mut rng = fixtures::test_rng(); - let num_mix_hops = 3; - let data1 = fixtures::repliable_content_data(&mut rng, num_mix_hops, 10000, 0); - assert_eq!( - data1.serialized_size(num_mix_hops), - data1.into_bytes().len() - ); + let data1 = fixtures::repliable_content_data(&mut rng, 10000, 0); + assert_eq!(data1.serialized_size(), data1.into_bytes().len()); - let data2 = fixtures::repliable_content_data(&mut rng, num_mix_hops, 10, 100); - assert_eq!( - data2.serialized_size(num_mix_hops), - data2.into_bytes().len() - ); + let data2 = fixtures::repliable_content_data(&mut rng, 10, 100); + assert_eq!(data2.serialized_size(), data2.into_bytes().len()); - let data3 = fixtures::repliable_content_data(&mut rng, num_mix_hops, 100000, 1000); - assert_eq!( - data3.serialized_size(num_mix_hops), - data3.into_bytes().len() - ); + let data3 = fixtures::repliable_content_data(&mut rng, 100000, 1000); + assert_eq!(data3.serialized_size(), data3.into_bytes().len()); - let additional_surbs1 = fixtures::repliable_content_surbs(&mut rng, num_mix_hops, 1); + let additional_surbs1 = fixtures::repliable_content_surbs(&mut rng, 1); assert_eq!( - additional_surbs1.serialized_size(num_mix_hops), + additional_surbs1.serialized_size(), additional_surbs1.into_bytes().len() ); - let additional_surbs2 = fixtures::repliable_content_surbs(&mut rng, num_mix_hops, 1000); + let additional_surbs2 = fixtures::repliable_content_surbs(&mut rng, 1000); assert_eq!( - additional_surbs2.serialized_size(num_mix_hops), + additional_surbs2.serialized_size(), additional_surbs2.into_bytes().len() ); - let heartbeat1 = fixtures::repliable_content_heartbeat(&mut rng, num_mix_hops, 1); - assert_eq!( - heartbeat1.serialized_size(num_mix_hops), - heartbeat1.into_bytes().len() - ); + let heartbeat1 = fixtures::repliable_content_heartbeat(&mut rng, 1); + assert_eq!(heartbeat1.serialized_size(), heartbeat1.into_bytes().len()); - let heartbeat2 = fixtures::repliable_content_heartbeat(&mut rng, num_mix_hops, 1000); - assert_eq!( - heartbeat2.serialized_size(num_mix_hops), - heartbeat2.into_bytes().len() - ); + let heartbeat2 = fixtures::repliable_content_heartbeat(&mut rng, 1000); + assert_eq!(heartbeat2.serialized_size(), heartbeat2.into_bytes().len()); } } diff --git a/common/nymsphinx/chunking/src/lib.rs b/common/nymsphinx/chunking/src/lib.rs index 802d2325a6..5a6e24633c 100644 --- a/common/nymsphinx/chunking/src/lib.rs +++ b/common/nymsphinx/chunking/src/lib.rs @@ -69,11 +69,11 @@ pub mod monitoring { } } - pub fn fragment_sent(fragment: &Fragment, client_nonce: i32, destination: PublicKey, hops: u8) { + pub fn fragment_sent(fragment: &Fragment, client_nonce: i32, destination: PublicKey) { if enabled() { let id = fragment.fragment_identifier().set_id(); let mut entry = FRAGMENTS_SENT.entry(id).or_default(); - let s = SentFragment::new(fragment.header(), now!(), client_nonce, destination, hops); + let s = SentFragment::new(fragment.header(), now!(), client_nonce, destination); entry.push(s); } } @@ -82,16 +82,11 @@ pub mod monitoring { #[derive(Debug, Clone)] pub struct FragmentMixParams { destination: PublicKey, - hops: u8, } impl FragmentMixParams { - pub fn destination(&self) -> &PublicKey { - &self.destination - } - - pub fn hops(&self) -> u8 { - self.hops + pub fn destination(&self) -> PublicKey { + self.destination } } @@ -105,14 +100,8 @@ pub struct SentFragment { } impl SentFragment { - fn new( - header: FragmentHeader, - at: u64, - client_nonce: i32, - destination: PublicKey, - hops: u8, - ) -> Self { - let mixnet_params = FragmentMixParams { destination, hops }; + fn new(header: FragmentHeader, at: u64, client_nonce: i32, destination: PublicKey) -> Self { + let mixnet_params = FragmentMixParams { destination }; SentFragment { header, at, diff --git a/common/nymsphinx/cover/src/lib.rs b/common/nymsphinx/cover/src/lib.rs index 23c83c3ed1..41bb6150ed 100644 --- a/common/nymsphinx/cover/src/lib.rs +++ b/common/nymsphinx/cover/src/lib.rs @@ -10,11 +10,9 @@ use nym_sphinx_addressing::nodes::NymNodeRoutingAddress; use nym_sphinx_chunking::fragment::COVER_FRAG_ID; use nym_sphinx_forwarding::packet::MixPacket; use nym_sphinx_params::packet_sizes::PacketSize; -use nym_sphinx_params::{ - PacketEncryptionAlgorithm, PacketHkdfAlgorithm, PacketType, DEFAULT_NUM_MIX_HOPS, -}; +use nym_sphinx_params::{PacketEncryptionAlgorithm, PacketHkdfAlgorithm, PacketType}; use nym_sphinx_types::NymPacket; -use nym_topology::{NymTopology, NymTopologyError}; +use nym_topology::{NymRouteProvider, NymTopologyError}; use rand::{CryptoRng, RngCore}; use std::time; @@ -36,7 +34,7 @@ pub enum CoverMessageError { pub fn generate_loop_cover_surb_ack( rng: &mut R, - topology: &NymTopology, + topology: &NymRouteProvider, ack_key: &AckKey, full_address: &Recipient, average_ack_delay: time::Duration, @@ -59,7 +57,7 @@ where #[allow(clippy::too_many_arguments)] pub fn generate_loop_cover_packet( rng: &mut R, - topology: &NymTopology, + topology: &NymRouteProvider, ack_key: &AckKey, full_address: &Recipient, average_ack_delay: time::Duration, @@ -118,8 +116,7 @@ where .chain(cover_content) .collect(); - let route = - topology.random_route_to_gateway(rng, DEFAULT_NUM_MIX_HOPS, full_address.gateway())?; + let route = topology.random_route_to_egress(rng, full_address.gateway())?; let delays = nym_sphinx_routing::generate_hop_delays(average_packet_delay, route.len()); let destination = full_address.as_sphinx_destination(); diff --git a/common/nymsphinx/params/src/lib.rs b/common/nymsphinx/params/src/lib.rs index 9d899a426b..f5d3fd7afb 100644 --- a/common/nymsphinx/params/src/lib.rs +++ b/common/nymsphinx/params/src/lib.rs @@ -16,10 +16,6 @@ pub mod packet_sizes; pub mod packet_types; pub mod packet_version; -// If somebody can provide an argument why it might be reasonable to have more than 255 mix hops, -// I will change this to [`usize`] -pub const DEFAULT_NUM_MIX_HOPS: u8 = 3; - // TODO: not entirely sure how to feel about those being defined here, ideally it'd be where [`Fragment`] // is defined, but that'd introduce circular dependencies as the acknowledgements crate also needs // access to that diff --git a/common/nymsphinx/routing/src/lib.rs b/common/nymsphinx/routing/src/lib.rs index e8881b409d..b468fd9754 100644 --- a/common/nymsphinx/routing/src/lib.rs +++ b/common/nymsphinx/routing/src/lib.rs @@ -1,19 +1,10 @@ // Copyright 2023 - Nym Technologies SA // SPDX-License-Identifier: Apache-2.0 +use nym_sphinx_types::{delays, Delay}; use std::time::Duration; - -use nym_sphinx_addressing::clients::Recipient; -use nym_sphinx_types::{delays, Delay, Node}; use thiserror::Error; -pub trait SphinxRouteMaker { - type Error; - - fn sphinx_route(&mut self, hops: u8, destination: &Recipient) - -> Result, Self::Error>; -} - #[derive(Debug, Error, Clone, Copy)] #[error("the route vector contains {available} nodes while {requested} hops are required")] pub struct InvalidNumberOfHops { @@ -21,29 +12,6 @@ pub struct InvalidNumberOfHops { requested: u8, } -// if one wants to provide a hardcoded route, they can -impl SphinxRouteMaker for Vec { - type Error = InvalidNumberOfHops; - - fn sphinx_route( - &mut self, - hops: u8, - _destination: &Recipient, - ) -> Result, InvalidNumberOfHops> { - // it's the responsibility of the caller to ensure the hardcoded route has correct number of hops - // and that it's final hop include the recipient's gateway. - - if self.len() != hops as usize { - Err(InvalidNumberOfHops { - available: self.len(), - requested: hops, - }) - } else { - Ok(self.clone()) - } - } -} - pub fn generate_hop_delays(average_packet_delay: Duration, num_hops: usize) -> Vec { if average_packet_delay.is_zero() { vec![nym_sphinx_types::Delay::new_from_millis(0); num_hops] diff --git a/common/nymsphinx/src/message.rs b/common/nymsphinx/src/message.rs index ed1aa25268..8124989339 100644 --- a/common/nymsphinx/src/message.rs +++ b/common/nymsphinx/src/message.rs @@ -149,7 +149,7 @@ impl NymMessage { .collect() } - fn try_from_bytes(bytes: &[u8], num_mix_hops: u8) -> Result { + fn try_from_bytes(bytes: &[u8]) -> Result { if bytes.is_empty() { return Err(NymMessageError::EmptyMessage); } @@ -158,7 +158,7 @@ impl NymMessage { match typ_tag { NymMessageType::Plain => Ok(NymMessage::Plain(bytes[1..].to_vec())), NymMessageType::Repliable => Ok(NymMessage::Repliable( - RepliableMessage::try_from_bytes(&bytes[1..], num_mix_hops)?, + RepliableMessage::try_from_bytes(&bytes[1..])?, )), NymMessageType::Reply => Ok(NymMessage::Reply(ReplyMessage::try_from_bytes( &bytes[1..], @@ -166,10 +166,10 @@ impl NymMessage { } } - fn serialized_size(&self, num_mix_hops: u8) -> usize { + fn serialized_size(&self) -> usize { let inner_size = match self { NymMessage::Plain(msg) => msg.len(), - NymMessage::Repliable(msg) => msg.serialized_size(num_mix_hops), + NymMessage::Repliable(msg) => msg.serialized_size(), NymMessage::Reply(msg) => msg.serialized_size(), }; let message_type_size = 1; @@ -207,9 +207,9 @@ impl NymMessage { } /// Determines the number of required packets of the provided size for the split message. - pub fn required_packets(&self, packet_size: PacketSize, num_mix_hops: u8) -> usize { + pub fn required_packets(&self, packet_size: PacketSize) -> usize { let plaintext_per_packet = self.true_available_plaintext_per_packet(packet_size); - let serialized_len = self.serialized_size(num_mix_hops); + let serialized_len = self.serialized_size(); let (num_fragments, _) = chunking::number_of_required_fragments(serialized_len, plaintext_per_packet); @@ -279,11 +279,11 @@ impl PaddedMessage { } // reverse of NymMessage::pad_to_full_packet_lengths - pub fn remove_padding(self, num_mix_hops: u8) -> Result { + pub fn remove_padding(self) -> Result { // we are looking for first occurrence of 1 in the tail and we get its index if let Some(padding_end) = self.0.iter().rposition(|b| *b == 1) { // and now we only take bytes until that point (but not including it) - NymMessage::try_from_bytes(&self.0[..padding_end], num_mix_hops) + NymMessage::try_from_bytes(&self.0[..padding_end]) } else { Err(NymMessageError::InvalidMessagePadding) } @@ -304,7 +304,7 @@ mod tests { fn serialized_size_matches_actual_serialization() { // plain let plain = NymMessage::new_plain(vec![1, 2, 3, 4, 5]); - assert_eq!(plain.serialized_size(3), plain.into_bytes().len()); + assert_eq!(plain.serialized_size(), plain.into_bytes().len()); // a single variant for each repliable and reply is enough as they are more thoroughly tested // internally @@ -313,9 +313,9 @@ mod tests { [42u8; 16].into(), vec![], )); - assert_eq!(repliable.serialized_size(3), repliable.into_bytes().len()); + assert_eq!(repliable.serialized_size(), repliable.into_bytes().len()); let reply = NymMessage::new_reply(ReplyMessage::new_data_message(vec![1, 2, 3, 4, 5])); - assert_eq!(reply.serialized_size(3), reply.into_bytes().len()); + assert_eq!(reply.serialized_size(), reply.into_bytes().len()); } } diff --git a/common/nymsphinx/src/preparer/mod.rs b/common/nymsphinx/src/preparer/mod.rs index 8578b655b0..e4e0e3e7ba 100644 --- a/common/nymsphinx/src/preparer/mod.rs +++ b/common/nymsphinx/src/preparer/mod.rs @@ -14,9 +14,9 @@ use nym_sphinx_anonymous_replies::reply_surb::ReplySurb; use nym_sphinx_chunking::fragment::{Fragment, FragmentIdentifier}; use nym_sphinx_forwarding::packet::MixPacket; use nym_sphinx_params::packet_sizes::PacketSize; -use nym_sphinx_params::{PacketType, ReplySurbKeyDigestAlgorithm, DEFAULT_NUM_MIX_HOPS}; +use nym_sphinx_params::{PacketType, ReplySurbKeyDigestAlgorithm}; use nym_sphinx_types::{Delay, NymPacket}; -use nym_topology::{NymTopology, NymTopologyError}; +use nym_topology::{NymRouteProvider, NymTopologyError}; use rand::{CryptoRng, Rng, SeedableRng}; use rand_chacha::ChaCha8Rng; @@ -54,14 +54,13 @@ pub trait FragmentPreparer { fn deterministic_route_selection(&self) -> bool; fn rng(&mut self) -> &mut Self::Rng; fn nonce(&self) -> i32; - fn num_mix_hops(&self) -> u8; fn average_packet_delay(&self) -> Duration; fn average_ack_delay(&self) -> Duration; fn generate_reply_surbs( &mut self, amount: usize, - topology: &NymTopology, + topology: &NymRouteProvider, reply_recipient: &Recipient, ) -> Result, NymTopologyError> { let mut reply_surbs = Vec::with_capacity(amount); @@ -79,7 +78,7 @@ pub trait FragmentPreparer { &mut self, recipient: &Recipient, fragment_id: FragmentIdentifier, - topology: &NymTopology, + topology: &NymRouteProvider, ack_key: &AckKey, packet_type: PacketType, ) -> Result { @@ -109,7 +108,7 @@ pub trait FragmentPreparer { fn prepare_reply_chunk_for_sending( &mut self, fragment: Fragment, - topology: &NymTopology, + topology: &NymRouteProvider, ack_key: &AckKey, reply_surb: ReplySurb, packet_sender: &Recipient, @@ -130,9 +129,8 @@ pub trait FragmentPreparer { .expect("the message has been incorrectly fragmented"); // this is not going to be accurate by any means. but that's the best estimation we can do - let expected_forward_delay = Delay::new_from_millis( - (self.average_packet_delay().as_millis() * self.num_mix_hops() as u128) as u64, - ); + let expected_forward_delay = + Delay::new_from_millis((self.average_packet_delay().as_millis() * 3) as u64); let fragment_identifier = fragment.fragment_identifier(); @@ -190,12 +188,11 @@ pub trait FragmentPreparer { fn prepare_chunk_for_sending( &mut self, fragment: Fragment, - topology: &NymTopology, + topology: &NymRouteProvider, ack_key: &AckKey, packet_sender: &Recipient, packet_recipient: &Recipient, packet_type: PacketType, - mix_hops: Option, ) -> Result { debug!("Preparing chunk for sending"); // each plain or repliable packet (i.e. not a reply) attaches an ephemeral public key so that the recipient @@ -204,8 +201,7 @@ pub trait FragmentPreparer { let fragment_header = fragment.header(); let destination = packet_recipient.gateway(); - let hops = mix_hops.unwrap_or(self.num_mix_hops()); - monitoring::fragment_sent(&fragment, self.nonce(), *destination, hops); + monitoring::fragment_sent(&fragment, self.nonce(), destination); let non_reply_overhead = encryption::PUBLIC_KEY_SIZE; let expected_plaintext = match packet_type { @@ -240,16 +236,16 @@ pub trait FragmentPreparer { }; // generate pseudorandom route for the packet - log::trace!("Preparing chunk for sending with {hops} mix hops"); + log::trace!("Preparing chunk for sending"); let route = if self.deterministic_route_selection() { log::trace!("using deterministic route selection"); let seed = fragment_header.seed().wrapping_mul(self.nonce()); let mut rng = ChaCha8Rng::seed_from_u64(seed as u64); - topology.random_route_to_gateway(&mut rng, hops, destination)? + topology.random_route_to_egress(&mut rng, destination)? } else { log::trace!("using pseudorandom route selection"); let mut rng = self.rng(); - topology.random_route_to_gateway(&mut rng, hops, destination)? + topology.random_route_to_egress(&mut rng, destination)? }; let destination = packet_recipient.as_sphinx_destination(); @@ -335,10 +331,6 @@ pub struct MessagePreparer { /// Average delay an acknowledgement packet is going to get delay at a single mixnode. average_ack_delay: Duration, - /// Number of mix hops each packet ('real' message, ack, reply) is expected to take. - /// Note that it does not include gateway hops. - num_mix_hops: u8, - nonce: i32, } @@ -361,17 +353,10 @@ where sender_address, average_packet_delay, average_ack_delay, - num_mix_hops: DEFAULT_NUM_MIX_HOPS, nonce, } } - /// Allows setting non-default number of expected mix hops in the network. - pub fn with_mix_hops(mut self, hops: u8) -> Self { - self.num_mix_hops = hops; - self - } - /// Overwrites existing sender address with the provided value. pub fn set_sender_address(&mut self, sender_address: Recipient) { self.sender_address = sender_address; @@ -380,7 +365,7 @@ where pub fn generate_reply_surbs( &mut self, amount: usize, - topology: &NymTopology, + topology: &NymRouteProvider, ) -> Result, NymTopologyError> { let mut reply_surbs = Vec::with_capacity(amount); for _ in 0..amount { @@ -399,7 +384,7 @@ where pub fn prepare_reply_chunk_for_sending( &mut self, fragment: Fragment, - topology: &NymTopology, + topology: &NymRouteProvider, ack_key: &AckKey, reply_surb: ReplySurb, packet_type: PacketType, @@ -420,11 +405,10 @@ where pub fn prepare_chunk_for_sending( &mut self, fragment: Fragment, - topology: &NymTopology, + topology: &NymRouteProvider, ack_key: &AckKey, packet_recipient: &Recipient, packet_type: PacketType, - mix_hops: Option, ) -> Result { let sender = self.sender_address; @@ -436,7 +420,6 @@ where &sender, packet_recipient, packet_type, - mix_hops, ) } @@ -444,7 +427,7 @@ where pub fn generate_surb_ack( &mut self, fragment_id: FragmentIdentifier, - topology: &NymTopology, + topology: &NymRouteProvider, ack_key: &AckKey, packet_type: PacketType, ) -> Result { @@ -483,10 +466,6 @@ impl FragmentPreparer for MessagePreparer { self.nonce } - fn num_mix_hops(&self) -> u8 { - self.num_mix_hops - } - fn average_packet_delay(&self) -> Duration { self.average_packet_delay } diff --git a/common/nymsphinx/src/receiver.rs b/common/nymsphinx/src/receiver.rs index 8def09db67..f2f3feadd9 100644 --- a/common/nymsphinx/src/receiver.rs +++ b/common/nymsphinx/src/receiver.rs @@ -14,7 +14,6 @@ use nym_sphinx_chunking::reconstruction::MessageReconstructor; use nym_sphinx_chunking::ChunkingError; use nym_sphinx_params::{ PacketEncryptionAlgorithm, PacketHkdfAlgorithm, ReplySurbEncryptionAlgorithm, - DEFAULT_NUM_MIX_HOPS, }; use thiserror::Error; @@ -79,7 +78,6 @@ pub enum MessageRecoveryError { pub trait MessageReceiver { fn new() -> Self; fn reconstructor(&mut self) -> &mut MessageReconstructor; - fn num_mix_hops(&self) -> u8; fn decrypt_raw_message( &self, @@ -143,7 +141,7 @@ pub trait MessageReceiver { fragment: Fragment, ) -> Result)>, MessageRecoveryError> { if let Some((message, used_sets)) = self.reconstructor().insert_new_fragment(fragment) { - match PaddedMessage::new_reconstructed(message).remove_padding(self.num_mix_hops()) { + match PaddedMessage::new_reconstructed(message).remove_padding() { Ok(message) => Ok(Some((message, used_sets))), Err(err) => Err(MessageRecoveryError::MalformedReconstructedMessage { source: err, @@ -156,28 +154,11 @@ pub trait MessageReceiver { } } -#[derive(Clone)] +#[derive(Clone, Default)] pub struct SphinxMessageReceiver { /// High level public structure used to buffer all received data [`Fragment`]s and eventually /// returning original messages that they encapsulate. reconstructor: MessageReconstructor, - - /// Number of mix hops each packet ('real' message, ack, reply) is expected to take. - /// Note that it does not include gateway hops. - num_mix_hops: u8, -} - -impl SphinxMessageReceiver { - /// Allows setting non-default number of expected mix hops in the network. - // IMPORTANT NOTE: this is among others used to deserialize SURBs. Meaning that this is a - // global setting and currently always set to the default value. The implication is that it is - // not currently possible to have different number of hops for different SURB messages. So, - // don't try to use <3 mix hops for SURBs until this is refactored. - #[must_use] - pub fn with_mix_hops(mut self, hops: u8) -> Self { - self.num_mix_hops = hops; - self - } } impl MessageReceiver for SphinxMessageReceiver { @@ -201,112 +182,4 @@ impl MessageReceiver for SphinxMessageReceiver { fn reconstructor(&mut self) -> &mut MessageReconstructor { &mut self.reconstructor } - - fn num_mix_hops(&self) -> u8 { - self.num_mix_hops - } -} - -impl Default for SphinxMessageReceiver { - fn default() -> Self { - SphinxMessageReceiver { - reconstructor: Default::default(), - num_mix_hops: DEFAULT_NUM_MIX_HOPS, - } - } -} - -#[cfg(test)] -mod message_receiver { - use super::*; - use nym_crypto::asymmetric::identity; - use nym_mixnet_contract_common::LegacyMixLayer; - use nym_topology::{gateway, mix, NymTopology}; - use std::collections::BTreeMap; - - // TODO: is it somehow maybe possible to move it to `topology` and have if conditionally - // available to other modules? - /// Returns a hardcoded, valid instance of [`NymTopology`] that is to be used in - /// tests requiring instance of topology. - #[allow(dead_code)] - fn topology_fixture() -> NymTopology { - let mut mixes = BTreeMap::new(); - mixes.insert( - 1, - vec![mix::LegacyNode { - mix_id: 123, - host: "10.20.30.40".parse().unwrap(), - mix_host: "10.20.30.40:1789".parse().unwrap(), - identity_key: identity::PublicKey::from_base58_string( - "3ebjp1Fb9hdcS1AR6AZihgeJiMHkB5jjJUsvqNnfQwU7", - ) - .unwrap(), - sphinx_key: encryption::PublicKey::from_base58_string( - "B3GzG62aXAZNg14RoMCp3BhELNBrySLr2JqrwyfYFzRc", - ) - .unwrap(), - layer: LegacyMixLayer::One, - version: "0.8.0-dev".into(), - }], - ); - - mixes.insert( - 2, - vec![mix::LegacyNode { - mix_id: 234, - host: "11.21.31.41".parse().unwrap(), - mix_host: "11.21.31.41:1789".parse().unwrap(), - identity_key: identity::PublicKey::from_base58_string( - "D6YaMzLSY7mANtSQRKXsmMZpqgqiVkeiagKM4V4oFPFr", - ) - .unwrap(), - sphinx_key: encryption::PublicKey::from_base58_string( - "5Z1VqYwM2xeKxd8H7fJpGWasNiDFijYBAee7MErkZ5QT", - ) - .unwrap(), - layer: LegacyMixLayer::Two, - version: "0.8.0-dev".into(), - }], - ); - - mixes.insert( - 3, - vec![mix::LegacyNode { - mix_id: 456, - host: "12.22.32.42".parse().unwrap(), - mix_host: "12.22.32.42:1789".parse().unwrap(), - identity_key: identity::PublicKey::from_base58_string( - "GkWDysw4AjESv1KiAiVn7JzzCMJeksxNSXVfr1PpX8wD", - ) - .unwrap(), - sphinx_key: encryption::PublicKey::from_base58_string( - "9EyjhCggr2QEA2nakR88YHmXgpy92DWxoe2draDRkYof", - ) - .unwrap(), - layer: LegacyMixLayer::Three, - version: "0.8.0-dev".into(), - }], - ); - - NymTopology::new( - // currently coco_nodes don't really exist so this is still to be determined - mixes, - vec![gateway::LegacyNode { - node_id: 789, - host: "1.2.3.4".parse().unwrap(), - mix_host: "1.2.3.4:1789".parse().unwrap(), - clients_ws_port: 9000, - clients_wss_port: None, - identity_key: identity::PublicKey::from_base58_string( - "FioFa8nMmPpQnYi7JyojoTuwGLeyNS8BF4ChPr29zUML", - ) - .unwrap(), - sphinx_key: encryption::PublicKey::from_base58_string( - "EB42xvMFMD5rUCstE2CDazgQQJ22zLv8SPm1Luxni44c", - ) - .unwrap(), - version: "0.8.0-dev".into(), - }], - ) - } } diff --git a/common/topology/Cargo.toml b/common/topology/Cargo.toml index 055175fd36..be031ed095 100644 --- a/common/topology/Cargo.toml +++ b/common/topology/Cargo.toml @@ -12,15 +12,13 @@ documentation = { workspace = true } [dependencies] async-trait = { workspace = true, optional = true } -bs58 = { workspace = true } -log = { workspace = true } +tracing = { workspace = true } rand = { workspace = true } reqwest = { workspace = true, features = ["json"] } -semver = { workspace = true } +serde = { workspace = true, features = ["derive"] } thiserror = { workspace = true } -# 'serializable' feature -serde = { workspace = true, features = ["derive"], optional = true } +# 'serde' feature serde_json = { workspace = true, optional = true } # 'wasm-serde-types' feature @@ -28,7 +26,6 @@ tsify = { workspace = true, features = ["js"], optional = true } wasm-bindgen = { workspace = true, optional = true } ## internal -nym-bin-common = { path = "../bin-common" } nym-config = { path = "../config" } nym-crypto = { path = "../crypto", features = ["sphinx", "outfox"] } nym-mixnet-contract-common = { path = "../cosmwasm-smart-contracts/mixnet-contract" } @@ -51,5 +48,5 @@ wasm-utils = { path = "../wasm/utils", default-features = false, optional = true default = ["provider-trait"] provider-trait = ["async-trait"] wasm-serde-types = ["tsify", "wasm-bindgen", "wasm-utils"] -serializable = ["serde", "serde_json"] +persistence = ["serde_json"] outfox = [] diff --git a/common/topology/src/error.rs b/common/topology/src/error.rs index 835ea37b1f..36896effa3 100644 --- a/common/topology/src/error.rs +++ b/common/topology/src/error.rs @@ -4,18 +4,28 @@ use std::array::TryFromSliceError; use crate::MixLayer; +use nym_sphinx_addressing::NodeIdentity; use nym_sphinx_types::NymPacketError; use thiserror::Error; #[derive(Debug, Error)] pub enum NymTopologyError { - #[error("The provided network topology is empty - there are no mixnodes and no gateways on it - the network request(s) probably failed")] + #[error("the provided network topology is empty - there are no valid nodes on it - the network request(s) probably failed")] EmptyNetworkTopology, + #[error("no node with identity {node_identity} is known")] + NonExistentNode { node_identity: Box }, + + #[error("could not use node with identity {node_identity} as egress since it didn't get assigned valid role in the current epoch")] + InvalidEgressRole { node_identity: Box }, + + #[error("one (or more) of mixing layers does not have any valid nodes available")] + InsufficientMixingNodes, + #[error("The provided network topology has no gateways available")] NoGatewaysAvailable, - #[error("The provided network topology has no mixnodes available")] + #[error("The provided network topology has no valid mixnodes available")] NoMixnodesAvailable, #[error("Gateway with identity key {identity_key} doesn't exist")] @@ -55,12 +65,6 @@ pub enum NymTopologyError { #[error("{0}")] ReqwestError(#[from] reqwest::Error), - #[error("{0}")] - MixnodeConversionError(#[from] crate::mix::MixnodeConversionError), - - #[error("{0}")] - GatewayConversionError(#[from] crate::gateway::GatewayConversionError), - #[error("{0}")] VarError(#[from] std::env::VarError), } diff --git a/common/topology/src/gateway.rs b/common/topology/src/gateway.rs deleted file mode 100644 index 545f47dd21..0000000000 --- a/common/topology/src/gateway.rs +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2021 - Nym Technologies SA -// SPDX-License-Identifier: Apache-2.0 - -use crate::{NetworkAddress, NodeVersion}; -use nym_api_requests::nym_nodes::SkimmedNode; -use nym_crypto::asymmetric::{encryption, identity}; -use nym_mixnet_contract_common::NodeId; -use nym_sphinx_addressing::nodes::{NodeIdentity, NymNodeRoutingAddress}; -use nym_sphinx_types::Node as SphinxNode; -use rand::seq::SliceRandom; -use rand::thread_rng; -use std::fmt; -use std::fmt::Formatter; -use std::io; -use std::net::AddrParseError; -use std::net::SocketAddr; -use thiserror::Error; - -#[derive(Error, Debug)] -pub enum GatewayConversionError { - #[error("gateway identity key was malformed - {0}")] - InvalidIdentityKey(#[from] identity::Ed25519RecoveryError), - - #[error("gateway sphinx key was malformed - {0}")] - InvalidSphinxKey(#[from] encryption::KeyRecoveryError), - - #[error("'{value}' is not a valid gateway address - {source}")] - InvalidAddress { - value: String, - #[source] - source: io::Error, - }, - - #[error("'{gateway}' has not provided any valid ip addresses")] - NoIpAddressesProvided { gateway: String }, - - #[error("'{gateway}' has provided a malformed ip address: {err}")] - MalformedIpAddress { - gateway: String, - - #[source] - err: AddrParseError, - }, - - #[error("provided node is not an entry gateway in this epoch!")] - NotGateway, -} - -#[derive(Clone)] -pub struct LegacyNode { - pub node_id: NodeId, - - pub host: NetworkAddress, - // we're keeping this as separate resolved field since we do not want to be resolving the potential - // hostname every time we want to construct a path via this node - pub mix_host: SocketAddr, - - // #[serde(alias = "clients_port")] - pub clients_ws_port: u16, - - // #[serde(default)] - pub clients_wss_port: Option, - - pub identity_key: identity::PublicKey, - pub sphinx_key: encryption::PublicKey, // TODO: or nymsphinx::PublicKey? both are x25519 - - // to be removed: - pub version: NodeVersion, -} - -impl std::fmt::Debug for LegacyNode { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("gateway::Node") - .field("host", &self.host) - .field("mix_host", &self.mix_host) - .field("clients_ws_port", &self.clients_ws_port) - .field("clients_wss_port", &self.clients_wss_port) - .field("identity_key", &self.identity_key.to_base58_string()) - .field("sphinx_key", &self.sphinx_key.to_base58_string()) - .field("version", &self.version) - .finish() - } -} - -impl LegacyNode { - pub fn parse_host(raw: &str) -> Result { - // safety: this conversion is infallible - // (but we retain result return type for legacy reasons) - Ok(raw.parse().unwrap()) - } - - pub fn extract_mix_host( - host: &NetworkAddress, - mix_port: u16, - ) -> Result { - Ok(host.to_socket_addrs(mix_port).map_err(|err| { - GatewayConversionError::InvalidAddress { - value: host.to_string(), - source: err, - } - })?[0]) - } - - pub fn identity(&self) -> &NodeIdentity { - &self.identity_key - } - - pub fn clients_address(&self) -> String { - self.clients_address_tls() - .unwrap_or_else(|| self.clients_address_no_tls()) - } - - pub fn clients_address_no_tls(&self) -> String { - format!("ws://{}:{}", self.host, self.clients_ws_port) - } - - pub fn clients_address_tls(&self) -> Option { - self.clients_wss_port - .map(|p| format!("wss://{}:{p}", self.host)) - } -} - -impl fmt::Display for LegacyNode { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "legacy gateway {} @ {}", self.node_id, self.host) - } -} - -impl<'a> From<&'a LegacyNode> for SphinxNode { - fn from(node: &'a LegacyNode) -> Self { - let node_address_bytes = NymNodeRoutingAddress::from(node.mix_host) - .try_into() - .unwrap(); - - SphinxNode::new(node_address_bytes, (&node.sphinx_key).into()) - } -} - -impl<'a> TryFrom<&'a SkimmedNode> for LegacyNode { - type Error = GatewayConversionError; - - fn try_from(value: &'a SkimmedNode) -> Result { - let Some(entry_details) = &value.entry else { - return Err(GatewayConversionError::NotGateway); - }; - - if value.ip_addresses.is_empty() { - return Err(GatewayConversionError::NoIpAddressesProvided { - gateway: value.ed25519_identity_pubkey.to_base58_string(), - }); - } - - // safety: we just checked the slice is not empty - #[allow(clippy::unwrap_used)] - let ip = value.ip_addresses.choose(&mut thread_rng()).unwrap(); - - let host = if let Some(hostname) = &entry_details.hostname { - NetworkAddress::Hostname(hostname.to_string()) - } else { - NetworkAddress::IpAddr(*ip) - }; - - Ok(LegacyNode { - node_id: value.node_id, - host, - mix_host: SocketAddr::new(*ip, value.mix_port), - clients_ws_port: entry_details.ws_port, - clients_wss_port: entry_details.wss_port, - identity_key: value.ed25519_identity_pubkey, - sphinx_key: value.x25519_sphinx_pubkey, - version: NodeVersion::Unknown, - }) - } -} diff --git a/common/topology/src/lib.rs b/common/topology/src/lib.rs index 4133a8ae0e..c71ada3c2d 100644 --- a/common/topology/src/lib.rs +++ b/common/topology/src/lib.rs @@ -1,581 +1,543 @@ // Copyright 2021-2023 - Nym Technologies SA // SPDX-License-Identifier: Apache-2.0 -#![allow(unknown_lints)] -// clippy::to_string_trait_impl is not on stable as of 1.77 - -pub use error::NymTopologyError; -use log::{debug, info, warn}; -use nym_api_requests::nym_nodes::{CachedNodesResponse, SkimmedNode}; -use nym_config::defaults::var_names::NYM_API; -use nym_mixnet_contract_common::{IdentityKeyRef, NodeId}; +use ::serde::{Deserialize, Serialize}; +use nym_api_requests::nym_nodes::SkimmedNode; use nym_sphinx_addressing::nodes::NodeIdentity; use nym_sphinx_types::Node as SphinxNode; -use rand::prelude::SliceRandom; +use rand::prelude::IteratorRandom; use rand::{CryptoRng, Rng}; -use std::collections::BTreeMap; -use std::convert::Infallible; -use std::fmt::{self, Display, Formatter}; -use std::io; -use std::net::{IpAddr, SocketAddr, ToSocketAddrs}; -use std::str::FromStr; +use std::borrow::Borrow; +use std::collections::{HashMap, HashSet}; +use std::fmt::Display; +use std::net::IpAddr; +use tracing::{debug, warn}; -#[cfg(feature = "serializable")] -use ::serde::{Deserialize, Deserializer, Serialize, Serializer}; +pub use crate::node::{EntryDetails, RoutingNode, SupportedRoles}; +pub use error::NymTopologyError; +pub use nym_mixnet_contract_common::nym_node::Role; +pub use nym_mixnet_contract_common::{EpochRewardedSet, NodeId}; +pub use rewarded_set::CachedEpochRewardedSet; pub mod error; -pub mod gateway; -pub mod mix; -pub mod random_route_provider; +pub mod node; +pub mod rewarded_set; #[cfg(feature = "provider-trait")] pub mod provider_trait; - -#[cfg(feature = "serializable")] -pub(crate) mod serde; - -#[cfg(feature = "serializable")] -pub use crate::serde::{ - SerializableGateway, SerializableMixNode, SerializableNymTopology, SerializableTopologyError, -}; +#[cfg(feature = "wasm-serde-types")] +pub mod wasm_helpers; #[cfg(feature = "provider-trait")] pub use provider_trait::{HardcodedTopologyProvider, TopologyProvider}; -#[derive(Debug, Default, Clone)] -pub enum NodeVersion { - Explicit(semver::Version), - - #[default] - Unknown, +#[deprecated] +#[derive(Debug, Clone)] +pub enum NetworkAddress { + IpAddr(IpAddr), + Hostname(String), } -// this is only implemented for backwards compatibility so we wouldn't need to change everything at once -// (also I intentionally implemented `ToString` as opposed to `Display`) -#[allow(clippy::to_string_trait_impl)] -impl ToString for NodeVersion { - fn to_string(&self) -> String { - match self { - NodeVersion::Explicit(semver) => semver.to_string(), - NodeVersion::Unknown => String::new(), +#[allow(deprecated)] +mod deprecated_network_address_impls { + use crate::NetworkAddress; + use std::convert::Infallible; + use std::fmt::{Display, Formatter}; + use std::net::{SocketAddr, ToSocketAddrs}; + use std::str::FromStr; + use std::{fmt, io}; + + impl NetworkAddress { + pub fn as_hostname(self) -> Option { + match self { + NetworkAddress::IpAddr(_) => None, + NetworkAddress::Hostname(s) => Some(s), + } } } -} -// this is also for backwards compat. -impl<'a> From<&'a str> for NodeVersion { - fn from(value: &'a str) -> Self { - if let Ok(semver) = value.parse() { - NodeVersion::Explicit(semver) - } else { - NodeVersion::Unknown + impl NetworkAddress { + pub fn to_socket_addrs(&self, port: u16) -> io::Result> { + match self { + NetworkAddress::IpAddr(addr) => Ok(vec![SocketAddr::new(*addr, port)]), + NetworkAddress::Hostname(hostname) => { + Ok((hostname.as_str(), port).to_socket_addrs()?.collect()) + } + } } } -} -#[derive(Debug, Clone)] -pub enum NetworkAddress { - IpAddr(IpAddr), - Hostname(String), -} + impl FromStr for NetworkAddress { + type Err = Infallible; -impl NetworkAddress { - pub fn to_socket_addrs(&self, port: u16) -> io::Result> { - match self { - NetworkAddress::IpAddr(addr) => Ok(vec![SocketAddr::new(*addr, port)]), - NetworkAddress::Hostname(hostname) => { - Ok((hostname.as_str(), port).to_socket_addrs()?.collect()) + fn from_str(s: &str) -> Result { + if let Ok(ip_addr) = s.parse() { + Ok(NetworkAddress::IpAddr(ip_addr)) + } else { + Ok(NetworkAddress::Hostname(s.to_string())) + } + } + } + + impl Display for NetworkAddress { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + NetworkAddress::IpAddr(ip_addr) => ip_addr.fmt(f), + NetworkAddress::Hostname(hostname) => hostname.fmt(f), } } } } -impl FromStr for NetworkAddress { - type Err = Infallible; +pub type MixLayer = u8; + +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +pub struct NymTopology { + // for the purposes of future VRF, everyone will need the same view of the network, regardless of performance filtering + // so we use the same 'master' rewarded set information for that + // + // how do we solve the problem of "we have to go through a node that we want to filter out?" + // ¯\_(ツ)_/¯ that's a future problem + rewarded_set: CachedEpochRewardedSet, + + node_details: HashMap, +} - fn from_str(s: &str) -> Result { - if let Ok(ip_addr) = s.parse() { - Ok(NetworkAddress::IpAddr(ip_addr)) - } else { - Ok(NetworkAddress::Hostname(s.to_string())) +#[derive(Clone, Debug, Default)] +pub struct NymRouteProvider { + pub topology: NymTopology, + + /// Allow constructing routes with final hop at nodes that are not entry/exit gateways in the current epoch + pub ignore_egress_epoch_roles: bool, +} + +impl From for NymRouteProvider { + fn from(topology: NymTopology) -> Self { + NymRouteProvider { + topology, + ignore_egress_epoch_roles: false, } } } -impl Display for NetworkAddress { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - NetworkAddress::IpAddr(ip_addr) => ip_addr.fmt(f), - NetworkAddress::Hostname(hostname) => hostname.fmt(f), +impl NymRouteProvider { + pub fn new(topology: NymTopology, ignore_egress_epoch_roles: bool) -> Self { + NymRouteProvider { + topology, + ignore_egress_epoch_roles, } } -} -pub type MixLayer = u8; + pub fn new_empty(ignore_egress_epoch_roles: bool) -> NymRouteProvider { + let this: Self = NymTopology::default().into(); + this.with_ignore_egress_epoch_roles(ignore_egress_epoch_roles) + } -// the reason for those having `Legacy` prefix is that eventually they should be using -// exactly the same types -#[derive(Debug, Clone, Default)] -pub struct NymTopology { - mixes: BTreeMap>, - gateways: Vec, -} + pub fn update(&mut self, new_topology: NymTopology) { + self.topology = new_topology; + } -impl NymTopology { - pub async fn new_from_env() -> Result { - let api_url = std::env::var(NYM_API)?; - - info!("Generating topology from {api_url}"); - - let mixnodes = reqwest::get(&format!("{api_url}/v1/unstable/nym-nodes/mixnodes/skimmed",)) - .await? - .json::>() - .await? - .nodes - .iter() - .map(mix::LegacyNode::try_from) - .filter(Result::is_ok) - .collect::, _>>()?; - - let gateways = reqwest::get(&format!("{api_url}/v1/unstable/nym-nodes/gateways/skimmed",)) - .await? - .json::>() - .await? - .nodes - .iter() - .map(gateway::LegacyNode::try_from) - .filter(Result::is_ok) - .collect::, _>>()?; - let topology = NymTopology::new_unordered(mixnodes, gateways); - Ok(topology) + pub fn clear_topology(&mut self) { + self.topology = Default::default(); } - pub fn new( - mixes: BTreeMap>, - gateways: Vec, - ) -> Self { - NymTopology { mixes, gateways } + pub fn with_ignore_egress_epoch_roles(mut self, ignore_egress_epoch_roles: bool) -> Self { + self.ignore_egress_epoch_roles(ignore_egress_epoch_roles); + self } - pub fn new_unordered( - unordered_mixes: Vec, - gateways: Vec, - ) -> Self { - let mut mixes = BTreeMap::new(); - for node in unordered_mixes.into_iter() { - let layer = node.layer as MixLayer; - let layer_entry = mixes.entry(layer).or_insert_with(Vec::new); - layer_entry.push(node) - } + pub fn ignore_egress_epoch_roles(&mut self, ignore_egress_epoch_roles: bool) { + self.ignore_egress_epoch_roles = ignore_egress_epoch_roles; + } - NymTopology { mixes, gateways } + pub fn egress_by_identity( + &self, + node_identity: NodeIdentity, + ) -> Result<&RoutingNode, NymTopologyError> { + self.topology + .egress_by_identity(node_identity, self.ignore_egress_epoch_roles) + } + + pub fn node_by_identity(&self, node_identity: NodeIdentity) -> Option<&RoutingNode> { + self.topology.find_node_by_identity(node_identity) } - pub fn from_unordered(unordered_mixes: MI, unordered_gateways: GI) -> Self + /// Tries to create a route to the egress point, such that it goes through mixnode on layer 1, + /// mixnode on layer2, .... mixnode on layer n and finally the target egress, which can be any known node + pub fn random_route_to_egress( + &self, + rng: &mut R, + egress_identity: NodeIdentity, + ) -> Result, NymTopologyError> where - MI: Iterator, - GI: Iterator, - G: TryInto, - M: TryInto, - >::Error: Display, - >::Error: Display, + R: Rng + CryptoRng + ?Sized, { - let mut mixes = BTreeMap::new(); - let mut gateways = Vec::new(); + self.topology + .random_route_to_egress(rng, egress_identity, self.ignore_egress_epoch_roles) + } - for node in unordered_mixes.into_iter() { - match node.try_into() { - Ok(mixnode) => mixes - .entry(mixnode.layer as MixLayer) - .or_insert_with(Vec::new) - .push(mixnode), - Err(err) => debug!("malformed mixnode: {err}"), - } - } + pub fn random_path_to_egress( + &self, + rng: &mut R, + egress_identity: NodeIdentity, + ) -> Result<(Vec<&RoutingNode>, &RoutingNode), NymTopologyError> + where + R: Rng + CryptoRng + ?Sized, + { + self.topology + .random_path_to_egress(rng, egress_identity, self.ignore_egress_epoch_roles) + } +} - for node in unordered_gateways.into_iter() { - match node.try_into() { - Ok(gateway) => gateways.push(gateway), - Err(err) => debug!("malformed gateway: {err}"), - } +impl NymTopology { + pub fn new_empty(rewarded_set: impl Into) -> Self { + NymTopology { + rewarded_set: rewarded_set.into(), + node_details: Default::default(), } + } - NymTopology::new(mixes, gateways) + pub fn new( + rewarded_set: impl Into, + node_details: Vec, + ) -> Self { + NymTopology { + rewarded_set: rewarded_set.into(), + node_details: node_details.into_iter().map(|n| (n.node_id, n)).collect(), + } } - #[cfg(feature = "serializable")] + #[cfg(feature = "persistence")] pub fn new_from_file>(path: P) -> std::io::Result { let file = std::fs::File::open(path)?; serde_json::from_reader(file).map_err(Into::into) } - pub fn from_basic(basic_mixes: &[SkimmedNode], basic_gateways: &[SkimmedNode]) -> Self { - nym_topology_from_basic_info(basic_mixes, basic_gateways) + pub fn add_skimmed_nodes(&mut self, nodes: &[SkimmedNode]) { + self.add_additional_nodes(nodes.iter()) } - pub fn find_mix(&self, mix_id: NodeId) -> Option<&mix::LegacyNode> { - for nodes in self.mixes.values() { - for node in nodes { - if node.mix_id == mix_id { - return Some(node); - } + pub fn add_routing_nodes>( + &mut self, + nodes: impl IntoIterator, + ) { + for node_details in nodes { + let node_details = node_details.borrow(); + let node_id = node_details.node_id; + if self + .node_details + .insert(node_id, node_details.clone()) + .is_some() + { + debug!("overwriting node details for node {node_id}") } } - None } - pub fn find_mix_by_identity( - &self, - mixnode_identity: IdentityKeyRef, - ) -> Option<&mix::LegacyNode> { - for nodes in self.mixes.values() { - for node in nodes { - if node.identity_key.to_base58_string() == mixnode_identity { - return Some(node); + pub fn add_additional_nodes(&mut self, nodes: impl Iterator) + where + N: TryInto, + >::Error: Display, + { + for node in nodes { + match node.try_into() { + Ok(node_details) => { + let node_id = node_details.node_id; + if self.node_details.insert(node_id, node_details).is_some() { + debug!("overwriting node details for node {node_id}") + } + } + Err(err) => { + debug!("malformed node details: {err}") } } } - None } - pub fn find_gateway(&self, gateway_identity: IdentityKeyRef) -> Option<&gateway::LegacyNode> { - self.gateways - .iter() - .find(|&gateway| gateway.identity_key.to_base58_string() == gateway_identity) + pub fn has_node_details(&self, node_id: NodeId) -> bool { + self.node_details.contains_key(&node_id) } - pub fn mixes(&self) -> &BTreeMap> { - &self.mixes + pub fn insert_node_details(&mut self, node_details: RoutingNode) { + self.node_details.insert(node_details.node_id, node_details); } - pub fn num_mixnodes(&self) -> usize { - self.mixes.values().map(|m| m.len()).sum() + pub fn rewarded_set(&self) -> &CachedEpochRewardedSet { + &self.rewarded_set } - pub fn mixes_as_vec(&self) -> Vec { - let mut mixes: Vec = vec![]; - - for layer in self.mixes().values() { - mixes.extend(layer.to_owned()) - } - mixes - } - - pub fn mixes_in_layer(&self, layer: MixLayer) -> Vec { - assert!([1, 2, 3].contains(&layer)); - self.mixes.get(&layer).unwrap().to_owned() + pub fn force_set_active(&mut self, node_id: NodeId, role: Role) { + match role { + Role::EntryGateway => self.rewarded_set.entry_gateways.insert(node_id), + Role::Layer1 => self.rewarded_set.layer1.insert(node_id), + Role::Layer2 => self.rewarded_set.layer2.insert(node_id), + Role::Layer3 => self.rewarded_set.layer3.insert(node_id), + Role::ExitGateway => self.rewarded_set.exit_gateways.insert(node_id), + Role::Standby => self.rewarded_set.standby.insert(node_id), + }; } - pub fn gateways(&self) -> &[gateway::LegacyNode] { - &self.gateways + fn node_details_exists(&self, ids: &HashSet) -> bool { + for id in ids { + if self.node_details.contains_key(id) { + return true; + } + } + false } - pub fn get_gateways(&self) -> Vec { - self.gateways.clone() - } + pub fn is_minimally_routable(&self) -> bool { + let has_layer1 = self.node_details_exists(&self.rewarded_set.layer1); + let has_layer2 = self.node_details_exists(&self.rewarded_set.layer2); + let has_layer3 = self.node_details_exists(&self.rewarded_set.layer3); + let has_exit_gateways = !self.rewarded_set.exit_gateways.is_empty(); + let has_entry_gateways = !self.rewarded_set.entry_gateways.is_empty(); - pub fn get_gateway(&self, gateway_identity: &NodeIdentity) -> Option<&gateway::LegacyNode> { - self.gateways - .iter() - .find(|gateway| gateway.identity() == gateway_identity) - } + debug!( + has_layer1 = %has_layer1, + has_layer2 = %has_layer2, + has_layer3 = %has_layer3, + has_entry_gateways = %has_entry_gateways, + has_exit_gateways = %has_exit_gateways, + "network status" + ); - pub fn gateway_exists(&self, gateway_identity: &NodeIdentity) -> bool { - self.get_gateway(gateway_identity).is_some() + has_layer1 && has_layer2 && has_layer3 && (has_exit_gateways || has_entry_gateways) } - pub fn insert_gateway(&mut self, gateway: gateway::LegacyNode) { - self.gateways.push(gateway) + pub fn ensure_minimally_routable(&self) -> Result<(), NymTopologyError> { + if !self.is_minimally_routable() { + return Err(NymTopologyError::InsufficientMixingNodes); + } + Ok(()) } - pub fn set_gateways(&mut self, gateways: Vec) { - self.gateways = gateways + pub fn is_empty(&self) -> bool { + self.rewarded_set.is_empty() || self.node_details.is_empty() } - pub fn random_gateway(&self, rng: &mut R) -> Result<&gateway::LegacyNode, NymTopologyError> - where - R: Rng + CryptoRng, - { - self.gateways - .choose(rng) - .ok_or(NymTopologyError::NoGatewaysAvailable) + pub fn ensure_not_empty(&self) -> Result<(), NymTopologyError> { + if self.is_empty() { + return Err(NymTopologyError::EmptyNetworkTopology); + } + Ok(()) } - /// Returns a vec of size of `num_mix_hops` of mixnodes, such that each subsequent node is on - /// next layer, starting from layer 1 - pub fn random_mix_route( + fn find_valid_mix_hop( &self, rng: &mut R, - num_mix_hops: u8, - ) -> Result, NymTopologyError> + id_choices: Vec, + ) -> Result<&RoutingNode, NymTopologyError> where R: Rng + CryptoRng + ?Sized, { - if self.mixes.len() < num_mix_hops as usize { - return Err(NymTopologyError::InvalidNumberOfHopsError { - available: self.mixes.len(), - requested: num_mix_hops as usize, - }); - } - let mut route = Vec::with_capacity(num_mix_hops as usize); - - // there is no "layer 0" - for layer in 1..=num_mix_hops { - // get all mixes on particular layer - let layer_mixes = self - .mixes - .get(&layer) - .ok_or(NymTopologyError::EmptyMixLayer { layer })?; - - // choose a random mix from the above list - // this can return a 'None' only if slice is empty - let random_mix = layer_mixes - .choose(rng) - .ok_or(NymTopologyError::EmptyMixLayer { layer })?; - route.push(random_mix.clone()); + let mut id_choices = id_choices; + while !id_choices.is_empty() { + let index = rng.gen_range(0..id_choices.len()); + + // SAFETY: this is not run if the vector is empty + let candidate_id = id_choices[index]; + match self.node_details.get(&candidate_id) { + Some(node) => { + return Ok(node); + } + // this will mess with VRF, but that's a future problem + None => { + id_choices.remove(index); + continue; + } + } } - Ok(route) + Err(NymTopologyError::NoMixnodesAvailable) } - pub fn random_path_to_gateway( + fn choose_mixing_node( &self, rng: &mut R, - num_mix_hops: u8, - gateway_identity: &NodeIdentity, - ) -> Result<(Vec, gateway::LegacyNode), NymTopologyError> + assigned_nodes: &HashSet, + ) -> Result<&RoutingNode, NymTopologyError> where R: Rng + CryptoRng + ?Sized, { - let gateway = self.get_gateway(gateway_identity).ok_or( - NymTopologyError::NonExistentGatewayError { - identity_key: gateway_identity.to_base58_string(), - }, - )?; - - let path = self.random_mix_route(rng, num_mix_hops)?; + // try first choice without cloning the ids (because I reckon, more often than not, it will actually work) + // HashSet's iterator implements `ExactSizeIterator` so choosing **one** random element + // is actually not that expensive + let Some(candidate) = assigned_nodes.iter().choose(rng) else { + return Err(NymTopologyError::NoMixnodesAvailable); + }; - Ok((path, gateway.clone())) + match self.node_details.get(candidate) { + Some(node) => Ok(node), + None => { + let remaining_choices = assigned_nodes + .iter() + .filter(|&n| n != candidate) + .copied() + .collect(); + self.find_valid_mix_hop(rng, remaining_choices) + } + } } - /// Tries to create a route to the specified gateway, such that it goes through mixnode on layer 1, - /// mixnode on layer2, .... mixnode on layer n and finally the target gateway - pub fn random_route_to_gateway( - &self, - rng: &mut R, - num_mix_hops: u8, - gateway_identity: &NodeIdentity, - ) -> Result, NymTopologyError> - where - R: Rng + CryptoRng + ?Sized, - { - let gateway = self.get_gateway(gateway_identity).ok_or( - NymTopologyError::NonExistentGatewayError { - identity_key: gateway_identity.to_base58_string(), - }, - )?; - - Ok(self - .random_mix_route(rng, num_mix_hops)? - .into_iter() - .map(|node| SphinxNode::from(&node)) - .chain(std::iter::once(gateway.into())) - .collect()) + pub fn find_node_by_identity(&self, node_identity: NodeIdentity) -> Option<&RoutingNode> { + self.node_details + .values() + .find(|n| n.identity_key == node_identity) } - /// Overwrites the existing nodes in the specified layer - pub fn set_mixes_in_layer(&mut self, layer: u8, mixes: Vec) { - self.mixes.insert(layer, mixes); + pub fn find_node(&self, node_id: NodeId) -> Option<&RoutingNode> { + self.node_details.get(&node_id) } - /// Checks if a mixnet path can be constructed using the specified number of hops - pub fn ensure_can_construct_path_through( + pub fn egress_by_identity( &self, - num_mix_hops: u8, - ) -> Result<(), NymTopologyError> { - let mixnodes = self.mixes(); - // 1. is it completely empty? - if mixnodes.is_empty() && self.gateways().is_empty() { - return Err(NymTopologyError::EmptyNetworkTopology); - } - - // 2. does it have any mixnode at all? - if mixnodes.is_empty() { - return Err(NymTopologyError::NoMixnodesAvailable); - } - - // 3. does it have any gateways at all? - if self.gateways().is_empty() { - return Err(NymTopologyError::NoGatewaysAvailable); - } + node_identity: NodeIdentity, + ignore_epoch_roles: bool, + ) -> Result<&RoutingNode, NymTopologyError> { + let Some(node) = self.find_node_by_identity(node_identity) else { + return Err(NymTopologyError::NonExistentNode { + node_identity: Box::new(node_identity), + }); + }; - // 4. does it have a mixnode on each layer? - for layer in 1..=num_mix_hops { - match mixnodes.get(&layer) { - None => return Err(NymTopologyError::EmptyMixLayer { layer }), - Some(layer_nodes) => { - if layer_nodes.is_empty() { - return Err(NymTopologyError::EmptyMixLayer { layer }); - } - } + // a 'valid' egress is one assigned to either entry role (i.e. entry for another client) + // or exit role (as a service provider) + if !ignore_epoch_roles { + let Some(role) = self.rewarded_set.role(node.node_id) else { + return Err(NymTopologyError::InvalidEgressRole { + node_identity: Box::new(node_identity), + }); + }; + if !matches!(role, Role::EntryGateway | Role::ExitGateway) { + return Err(NymTopologyError::InvalidEgressRole { + node_identity: Box::new(node_identity), + }); } } - - Ok(()) + Ok(node) } - pub fn ensure_even_layer_distribution( + fn egress_node_by_identity( &self, - lower_threshold: f32, - upper_threshold: f32, - ) -> Result<(), NymTopologyError> { - let mixnodes_count = self.num_mixnodes(); - - let layers = self - .mixes - .iter() - .map(|(k, v)| (*k, v.len())) - .collect::>(); - - if self.gateways.is_empty() { - return Err(NymTopologyError::NoGatewaysAvailable); - } + node_identity: NodeIdentity, + ignore_epoch_roles: bool, + ) -> Result { + self.egress_by_identity(node_identity, ignore_epoch_roles) + .map(Into::into) + } - if layers.is_empty() { - return Err(NymTopologyError::NoMixnodesAvailable); + fn random_mix_path_nodes(&self, rng: &mut R) -> Result, NymTopologyError> + where + R: Rng + CryptoRng + ?Sized, + { + if self.rewarded_set.is_empty() || self.node_details.is_empty() { + return Err(NymTopologyError::EmptyNetworkTopology); } - let upper_bound = (mixnodes_count as f32 * upper_threshold) as usize; - let lower_bound = (mixnodes_count as f32 * lower_threshold) as usize; - - for (layer, nodes) in &layers { - if nodes < &lower_bound || nodes > &upper_bound { - return Err(NymTopologyError::UnevenLayerDistribution { - layer: *layer, - nodes: *nodes, - lower_bound, - upper_bound, - total_nodes: mixnodes_count, - layer_distribution: layers, - }); - } - } + // we reserve an additional item in the route because we'll have to push an egress + let mut mix_route = Vec::with_capacity(4); - Ok(()) + mix_route.push(self.choose_mixing_node(rng, &self.rewarded_set.layer1)?); + mix_route.push(self.choose_mixing_node(rng, &self.rewarded_set.layer2)?); + mix_route.push(self.choose_mixing_node(rng, &self.rewarded_set.layer3)?); + + Ok(mix_route) } -} -#[cfg(feature = "serializable")] -impl Serialize for NymTopology { - fn serialize(&self, serializer: S) -> Result + pub fn random_mix_route(&self, rng: &mut R) -> Result, NymTopologyError> where - S: Serializer, + R: Rng + CryptoRng + ?Sized, { - crate::serde::SerializableNymTopology::from(self.clone()).serialize(serializer) + Ok(self + .random_mix_path_nodes(rng)? + .into_iter() + .map(Into::into) + .collect()) } -} -#[cfg(feature = "serializable")] -impl<'de> Deserialize<'de> for NymTopology { - fn deserialize(deserializer: D) -> Result + /// Tries to create a route to the egress point, such that it goes through mixnode on layer 1, + /// mixnode on layer2, .... mixnode on layer n and finally the target egress, which can be any known node + pub fn random_route_to_egress( + &self, + rng: &mut R, + egress_identity: NodeIdentity, + ignore_epoch_roles: bool, + ) -> Result, NymTopologyError> where - D: Deserializer<'de>, + R: Rng + CryptoRng + ?Sized, { - let serializable = crate::serde::SerializableNymTopology::deserialize(deserializer)?; - serializable.try_into().map_err(::serde::de::Error::custom) + let egress = self.egress_node_by_identity(egress_identity, ignore_epoch_roles)?; + let mut mix_route = self.random_mix_route(rng)?; + mix_route.push(egress); + Ok(mix_route) } -} - -pub fn nym_topology_from_basic_info( - basic_mixes: &[SkimmedNode], - basic_gateways: &[SkimmedNode], -) -> NymTopology { - let mut mixes = BTreeMap::new(); - for mix in basic_mixes { - let Some(layer) = mix.get_mix_layer() else { - warn!("node {} doesn't have any assigned mix layer!", mix.node_id); - continue; - }; - let layer_entry = mixes.entry(layer).or_insert_with(Vec::new); - match mix.try_into() { - Ok(mix) => layer_entry.push(mix), - Err(err) => { - warn!("node (mixnode) {} is malformed: {err}", mix.node_id); - continue; - } + pub fn random_path_to_egress( + &self, + rng: &mut R, + egress_identity: NodeIdentity, + ignore_epoch_roles: bool, + ) -> Result<(Vec<&RoutingNode>, &RoutingNode), NymTopologyError> + where + R: Rng + CryptoRng + ?Sized, + { + let egress = self.egress_by_identity(egress_identity, ignore_epoch_roles)?; + let mix_route = self.random_mix_path_nodes(rng)?; + Ok((mix_route, egress)) + } + + pub fn nodes_with_role(&self, role: Role) -> impl Iterator { + self.node_details.values().filter(move |node| match role { + Role::EntryGateway => self.rewarded_set.entry_gateways.contains(&node.node_id), + Role::Layer1 => self.rewarded_set.layer1.contains(&node.node_id), + Role::Layer2 => self.rewarded_set.layer2.contains(&node.node_id), + Role::Layer3 => self.rewarded_set.layer3.contains(&node.node_id), + Role::ExitGateway => self.rewarded_set.exit_gateways.contains(&node.node_id), + Role::Standby => self.rewarded_set.standby.contains(&node.node_id), + }) + } + + pub fn set_testable_node(&mut self, role: Role, node: impl Into) { + fn init_set(node: NodeId) -> HashSet { + let mut set = HashSet::new(); + set.insert(node); + set } - } - let mut gateways = Vec::with_capacity(basic_gateways.len()); - for gateway in basic_gateways { - match gateway.try_into() { - Ok(gate) => gateways.push(gate), - Err(err) => { - warn!("node (gateway) {} is malformed: {err}", gateway.node_id); - continue; + let node = node.into(); + let node_id = node.node_id; + self.node_details.insert(node.node_id, node); + + match role { + Role::EntryGateway => self.rewarded_set.entry_gateways = init_set(node_id), + Role::Layer1 => self.rewarded_set.layer1 = init_set(node_id), + Role::Layer2 => self.rewarded_set.layer2 = init_set(node_id), + Role::Layer3 => self.rewarded_set.layer3 = init_set(node_id), + Role::ExitGateway => self.rewarded_set.exit_gateways = init_set(node_id), + Role::Standby => { + warn!("attempting to test node in 'standby' mode - are you sure that's what you meant to do?"); + self.rewarded_set.standby = init_set(node_id) } } } - NymTopology::new(mixes, gateways) -} - -#[cfg(test)] -mod converting_mixes_to_vec { - use super::*; - - #[cfg(test)] - mod when_nodes_exist { - use nym_crypto::asymmetric::{encryption, identity}; - - use super::*; - use nym_mixnet_contract_common::LegacyMixLayer; - - #[test] - fn returns_a_vec_with_hashmap_values() { - let node1 = mix::LegacyNode { - mix_id: 42, - host: "3.3.3.3".parse().unwrap(), - mix_host: "3.3.3.3:1789".parse().unwrap(), - identity_key: identity::PublicKey::from_base58_string( - "3ebjp1Fb9hdcS1AR6AZihgeJiMHkB5jjJUsvqNnfQwU7", - ) - .unwrap(), - sphinx_key: encryption::PublicKey::from_base58_string( - "C7cown6dYCLZpLiMFC1PaBmhvLvmJmLDJGeRTbPD45bX", - ) - .unwrap(), - layer: LegacyMixLayer::One, - version: "0.2.0".into(), - }; - - let node2 = mix::LegacyNode { ..node1.clone() }; - - let node3 = mix::LegacyNode { ..node1.clone() }; - - let mut mixes = BTreeMap::new(); - mixes.insert(1, vec![node1, node2]); - mixes.insert(2, vec![node3]); - - let topology = NymTopology::new(mixes, vec![]); - let mixvec = topology.mixes_as_vec(); - assert!(mixvec - .iter() - .any(|node| &node.identity_key.to_base58_string() - == "3ebjp1Fb9hdcS1AR6AZihgeJiMHkB5jjJUsvqNnfQwU7")); - } + pub fn entry_gateways(&self) -> impl Iterator { + self.node_details + .values() + .filter(|n| self.rewarded_set.entry_gateways.contains(&n.node_id)) } - #[cfg(test)] - mod when_no_nodes_exist { - use super::*; + // ideally this shouldn't exist... + pub fn entry_capable_nodes(&self) -> impl Iterator { + self.node_details + .values() + .filter(|n| n.supported_roles.mixnet_entry) + } - #[test] - fn returns_an_empty_vec() { - let topology = NymTopology::new(BTreeMap::new(), vec![]); - let mixvec = topology.mixes_as_vec(); - assert!(mixvec.is_empty()); - } + pub fn mixnodes(&self) -> impl Iterator { + self.node_details + .values() + .filter(|n| self.rewarded_set.is_active_mixnode(&n.node_id)) } } diff --git a/common/topology/src/mix.rs b/common/topology/src/mix.rs deleted file mode 100644 index 40c61cff4b..0000000000 --- a/common/topology/src/mix.rs +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2021 - Nym Technologies SA -// SPDX-License-Identifier: Apache-2.0 - -use crate::{NetworkAddress, NodeVersion}; -use nym_api_requests::nym_nodes::{NodeRole, SkimmedNode}; -use nym_crypto::asymmetric::{encryption, identity}; -pub use nym_mixnet_contract_common::LegacyMixLayer; -use nym_mixnet_contract_common::NodeId; -use nym_sphinx_addressing::nodes::NymNodeRoutingAddress; -use nym_sphinx_types::Node as SphinxNode; -use rand::seq::SliceRandom; -use rand::thread_rng; -use std::fmt::Formatter; -use std::io; -use std::net::SocketAddr; -use thiserror::Error; - -#[derive(Error, Debug)] -pub enum MixnodeConversionError { - #[error("mixnode identity key was malformed - {0}")] - InvalidIdentityKey(#[from] identity::Ed25519RecoveryError), - - #[error("mixnode sphinx key was malformed - {0}")] - InvalidSphinxKey(#[from] encryption::KeyRecoveryError), - - #[error("'{value}' is not a valid mixnode address - {source}")] - InvalidAddress { - value: String, - #[source] - source: io::Error, - }, - - #[error("invalid mix layer")] - InvalidLayer, - - #[error("'{mixnode}' has not provided any valid ip addresses")] - NoIpAddressesProvided { mixnode: String }, - - #[error("provided node is not a mixnode in this epoch!")] - NotMixnode, -} - -#[derive(Clone)] -pub struct LegacyNode { - pub mix_id: NodeId, - pub host: NetworkAddress, - // we're keeping this as separate resolved field since we do not want to be resolving the potential - // hostname every time we want to construct a path via this node - pub mix_host: SocketAddr, - pub identity_key: identity::PublicKey, - pub sphinx_key: encryption::PublicKey, // TODO: or nymsphinx::PublicKey? both are x25519 - pub layer: LegacyMixLayer, - - // to be removed: - pub version: NodeVersion, -} - -impl std::fmt::Debug for LegacyNode { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.debug_struct("mix::Node") - .field("mix_id", &self.mix_id) - .field("host", &self.host) - .field("mix_host", &self.mix_host) - .field("identity_key", &self.identity_key.to_base58_string()) - .field("sphinx_key", &self.sphinx_key.to_base58_string()) - .field("layer", &self.layer) - .field("version", &self.version) - .finish() - } -} - -impl LegacyNode { - pub fn parse_host(raw: &str) -> Result { - // safety: this conversion is infallible - // (but we retain result return type for legacy reasons) - Ok(raw.parse().unwrap()) - } - - pub fn extract_mix_host( - host: &NetworkAddress, - mix_port: u16, - ) -> Result { - Ok(host.to_socket_addrs(mix_port).map_err(|err| { - MixnodeConversionError::InvalidAddress { - value: host.to_string(), - source: err, - } - })?[0]) - } -} - -impl<'a> From<&'a LegacyNode> for SphinxNode { - fn from(node: &'a LegacyNode) -> Self { - let node_address_bytes = NymNodeRoutingAddress::from(node.mix_host) - .try_into() - .unwrap(); - - SphinxNode::new(node_address_bytes, (&node.sphinx_key).into()) - } -} - -impl<'a> TryFrom<&'a SkimmedNode> for LegacyNode { - type Error = MixnodeConversionError; - - fn try_from(value: &'a SkimmedNode) -> Result { - if value.ip_addresses.is_empty() { - return Err(MixnodeConversionError::NoIpAddressesProvided { - mixnode: value.ed25519_identity_pubkey.to_base58_string(), - }); - } - - let layer = match value.role { - NodeRole::Mixnode { layer } => layer - .try_into() - .map_err(|_| MixnodeConversionError::InvalidLayer)?, - _ => return Err(MixnodeConversionError::NotMixnode), - }; - - // safety: we just checked the slice is not empty - #[allow(clippy::unwrap_used)] - let ip = value.ip_addresses.choose(&mut thread_rng()).unwrap(); - - let host = NetworkAddress::IpAddr(*ip); - - Ok(LegacyNode { - mix_id: value.node_id, - host, - mix_host: SocketAddr::new(*ip, value.mix_port), - identity_key: value.ed25519_identity_pubkey, - sphinx_key: value.x25519_sphinx_pubkey, - layer, - version: NodeVersion::Unknown, - }) - } -} diff --git a/common/topology/src/node.rs b/common/topology/src/node.rs new file mode 100644 index 0000000000..81ab236f76 --- /dev/null +++ b/common/topology/src/node.rs @@ -0,0 +1,143 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: Apache-2.0 + +use nym_api_requests::models::DeclaredRoles; +use nym_api_requests::nym_nodes::SkimmedNode; +use nym_crypto::asymmetric::{ed25519, x25519}; +use nym_mixnet_contract_common::NodeId; +use nym_sphinx_addressing::nodes::NymNodeRoutingAddress; +use nym_sphinx_types::Node as SphinxNode; +use serde::{Deserialize, Serialize}; +use std::net::{IpAddr, SocketAddr}; +use thiserror::Error; + +pub use nym_mixnet_contract_common::LegacyMixLayer; + +#[derive(Error, Debug)] +pub enum RoutingNodeError { + #[error("node {node_id} ('{identity}') has not provided any valid ip addresses")] + NoIpAddressesProvided { node_id: NodeId, identity: String }, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct EntryDetails { + // to allow client to choose ipv6 preference, if available + pub ip_addresses: Vec, + pub clients_ws_port: u16, + pub hostname: Option, + pub clients_wss_port: Option, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +pub struct SupportedRoles { + pub mixnode: bool, + pub mixnet_entry: bool, + pub mixnet_exit: bool, +} + +impl From for SupportedRoles { + fn from(value: DeclaredRoles) -> Self { + SupportedRoles { + mixnode: value.mixnode, + mixnet_entry: value.entry, + mixnet_exit: value.exit_nr && value.exit_ipr, + } + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct RoutingNode { + pub node_id: NodeId, + + pub mix_host: SocketAddr, + + pub entry: Option, + pub identity_key: ed25519::PublicKey, + pub sphinx_key: x25519::PublicKey, + + pub supported_roles: SupportedRoles, +} + +impl RoutingNode { + pub fn ws_entry_address_tls(&self) -> Option { + let entry = self.entry.as_ref()?; + let hostname = entry.hostname.as_ref()?; + let wss_port = entry.clients_wss_port?; + + Some(format!("wss://{hostname}:{wss_port}")) + } + + pub fn ws_entry_address_no_tls(&self, prefer_ipv6: bool) -> Option { + let entry = self.entry.as_ref()?; + + if let Some(hostname) = entry.hostname.as_ref() { + return Some(format!("ws://{hostname}:{}", entry.clients_ws_port)); + } + + if prefer_ipv6 { + if let Some(ipv6) = entry.ip_addresses.iter().find(|ip| ip.is_ipv6()) { + return Some(format!("ws://{ipv6}:{}", entry.clients_ws_port)); + } + } + + let any_ip = entry.ip_addresses.first()?; + Some(format!("ws://{any_ip}:{}", entry.clients_ws_port)) + } + + pub fn ws_entry_address(&self, prefer_ipv6: bool) -> Option { + if let Some(tls) = self.ws_entry_address_tls() { + return Some(tls); + } + self.ws_entry_address_no_tls(prefer_ipv6) + } + + pub fn identity(&self) -> ed25519::PublicKey { + self.identity_key + } +} + +impl<'a> From<&'a RoutingNode> for SphinxNode { + fn from(node: &'a RoutingNode) -> Self { + // SAFETY: this conversion is infallible as all versions of socket addresses have + // sufficiently small bytes representation to fit inside `NodeAddressBytes` + #[allow(clippy::unwrap_used)] + let node_address_bytes = NymNodeRoutingAddress::from(node.mix_host) + .try_into() + .unwrap(); + + SphinxNode::new(node_address_bytes, (&node.sphinx_key).into()) + } +} + +impl<'a> TryFrom<&'a SkimmedNode> for RoutingNode { + type Error = RoutingNodeError; + + fn try_from(value: &'a SkimmedNode) -> Result { + // IF YOU EVER ADD "performance" TO RoutingNode, + // MAKE SURE TO UPDATE THE LAZY IMPLEMENTATION OF + // `impl NodeDescriptionTopologyExt for NymNodeDescription`!!! + + let Some(first_ip) = value.ip_addresses.first() else { + return Err(RoutingNodeError::NoIpAddressesProvided { + node_id: value.node_id, + identity: value.ed25519_identity_pubkey.to_string(), + }); + }; + + let entry = value.entry.as_ref().map(|entry| EntryDetails { + ip_addresses: value.ip_addresses.clone(), + clients_ws_port: entry.ws_port, + hostname: entry.hostname.clone(), + clients_wss_port: entry.wss_port, + }); + + Ok(RoutingNode { + node_id: value.node_id, + mix_host: SocketAddr::new(*first_ip, value.mix_port), + entry, + identity_key: value.ed25519_identity_pubkey, + sphinx_key: value.x25519_sphinx_pubkey, + supported_roles: value.supported_roles.into(), + }) + } +} diff --git a/common/topology/src/provider_trait.rs b/common/topology/src/provider_trait.rs index 0dddecf2cb..ad8381fa7d 100644 --- a/common/topology/src/provider_trait.rs +++ b/common/topology/src/provider_trait.rs @@ -22,7 +22,7 @@ pub struct HardcodedTopologyProvider { } impl HardcodedTopologyProvider { - #[cfg(feature = "serializable")] + #[cfg(feature = "persistence")] pub fn new_from_file>(path: P) -> std::io::Result { NymTopology::new_from_file(path).map(Self::new) } diff --git a/common/topology/src/random_route_provider.rs b/common/topology/src/random_route_provider.rs deleted file mode 100644 index 1771c83eb7..0000000000 --- a/common/topology/src/random_route_provider.rs +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2023 - Nym Technologies SA -// SPDX-License-Identifier: Apache-2.0 - -use crate::{NymTopology, NymTopologyError}; -use nym_sphinx_addressing::clients::Recipient; -use nym_sphinx_routing::SphinxRouteMaker; -use nym_sphinx_types::Node; -use rand::{CryptoRng, Rng}; - -#[allow(dead_code)] -pub struct NymTopologyRouteProvider { - rng: R, - inner: NymTopology, -} - -impl SphinxRouteMaker for NymTopologyRouteProvider -where - R: Rng + CryptoRng, -{ - type Error = NymTopologyError; - - fn sphinx_route( - &mut self, - hops: u8, - destination: &Recipient, - ) -> Result, NymTopologyError> { - self.inner - .random_route_to_gateway(&mut self.rng, hops, destination.gateway()) - } -} diff --git a/common/topology/src/rewarded_set.rs b/common/topology/src/rewarded_set.rs new file mode 100644 index 0000000000..0d06239be6 --- /dev/null +++ b/common/topology/src/rewarded_set.rs @@ -0,0 +1,122 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: Apache-2.0 + +use nym_mixnet_contract_common::nym_node::Role; +use nym_mixnet_contract_common::{EpochId, EpochRewardedSet, NodeId, RewardedSet}; +use serde::{Deserialize, Serialize}; +use std::collections::HashSet; + +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +pub struct CachedEpochRewardedSet { + pub epoch_id: EpochId, + + pub entry_gateways: HashSet, + + pub exit_gateways: HashSet, + + pub layer1: HashSet, + + pub layer2: HashSet, + + pub layer3: HashSet, + + pub standby: HashSet, +} + +impl From for CachedEpochRewardedSet { + fn from(value: EpochRewardedSet) -> Self { + CachedEpochRewardedSet { + epoch_id: value.epoch_id, + entry_gateways: value.assignment.entry_gateways.into_iter().collect(), + exit_gateways: value.assignment.exit_gateways.into_iter().collect(), + layer1: value.assignment.layer1.into_iter().collect(), + layer2: value.assignment.layer2.into_iter().collect(), + layer3: value.assignment.layer3.into_iter().collect(), + standby: value.assignment.standby.into_iter().collect(), + } + } +} + +impl From for EpochRewardedSet { + fn from(value: CachedEpochRewardedSet) -> Self { + EpochRewardedSet { + epoch_id: value.epoch_id, + assignment: RewardedSet { + entry_gateways: value.entry_gateways.into_iter().collect(), + exit_gateways: value.exit_gateways.into_iter().collect(), + layer1: value.layer1.into_iter().collect(), + layer2: value.layer2.into_iter().collect(), + layer3: value.layer3.into_iter().collect(), + standby: value.standby.into_iter().collect(), + }, + } + } +} + +impl CachedEpochRewardedSet { + pub fn is_empty(&self) -> bool { + self.entry_gateways.is_empty() + && self.exit_gateways.is_empty() + && self.layer1.is_empty() + && self.layer2.is_empty() + && self.layer3.is_empty() + && self.standby.is_empty() + } + + pub fn role(&self, node_id: NodeId) -> Option { + if self.entry_gateways.contains(&node_id) { + Some(Role::EntryGateway) + } else if self.exit_gateways.contains(&node_id) { + Some(Role::ExitGateway) + } else if self.layer1.contains(&node_id) { + Some(Role::Layer1) + } else if self.layer2.contains(&node_id) { + Some(Role::Layer2) + } else if self.layer3.contains(&node_id) { + Some(Role::Layer3) + } else if self.standby.contains(&node_id) { + Some(Role::Standby) + } else { + None + } + } + + pub fn legacy_mix_layer(&self, node_id: &NodeId) -> Option { + if self.layer1.contains(node_id) { + Some(1) + } else if self.layer2.contains(node_id) { + Some(2) + } else if self.layer3.contains(node_id) { + Some(3) + } else { + None + } + } + + pub fn is_standby(&self, node_id: &NodeId) -> bool { + self.standby.contains(node_id) + } + + pub fn is_active_mixnode(&self, node_id: &NodeId) -> bool { + self.layer1.contains(node_id) + || self.layer2.contains(node_id) + || self.layer3.contains(node_id) + } + + pub fn gateways(&self) -> HashSet { + let mut gateways = + HashSet::with_capacity(self.entry_gateways.len() + self.exit_gateways.len()); + gateways.extend(&self.entry_gateways); + gateways.extend(&self.exit_gateways); + gateways + } + + pub fn active_mixnodes(&self) -> HashSet { + let mut mixnodes = + HashSet::with_capacity(self.layer1.len() + self.layer2.len() + self.layer3.len()); + mixnodes.extend(&self.layer1); + mixnodes.extend(&self.layer2); + mixnodes.extend(&self.layer3); + mixnodes + } +} diff --git a/common/topology/src/serde.rs b/common/topology/src/serde.rs deleted file mode 100644 index 601b78dfd3..0000000000 --- a/common/topology/src/serde.rs +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2023 - Nym Technologies SA -// SPDX-License-Identifier: Apache-2.0 - -#![allow(unknown_lints)] -// clippy::empty_docs is not on stable as of 1.77 - -// due to the code generated by Tsify -#![allow(clippy::empty_docs)] - -use crate::gateway::GatewayConversionError; -use crate::mix::MixnodeConversionError; -use crate::{gateway, mix, MixLayer, NymTopology}; -use nym_config::defaults::{DEFAULT_CLIENT_LISTENING_PORT, DEFAULT_MIX_LISTENING_PORT}; -use nym_crypto::asymmetric::{encryption, identity}; -use serde::{Deserialize, Serialize}; -use std::collections::BTreeMap; -use std::net::{IpAddr, SocketAddr}; -use thiserror::Error; - -#[cfg(feature = "wasm-serde-types")] -use tsify::Tsify; - -use nym_mixnet_contract_common::NodeId; -#[cfg(feature = "wasm-serde-types")] -use wasm_bindgen::{prelude::wasm_bindgen, JsValue}; - -#[cfg(feature = "wasm-serde-types")] -use wasm_utils::error::simple_js_error; - -#[derive(Debug, Error)] -pub enum SerializableTopologyError { - #[error("got invalid mix layer {value}. Expected 1, 2 or 3.")] - InvalidMixLayer { value: u8 }, - - #[error(transparent)] - GatewayConversion(#[from] GatewayConversionError), - - #[error(transparent)] - MixnodeConversion(#[from] MixnodeConversionError), - - #[error("The provided mixnode map was malformed: {msg}")] - MalformedMixnodeMap { msg: String }, - - #[error("The provided gateway list was malformed: {msg}")] - MalformedGatewayList { msg: String }, -} - -#[cfg(feature = "wasm-serde-types")] -impl From for JsValue { - fn from(value: SerializableTopologyError) -> Self { - simple_js_error(value.to_string()) - } -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[cfg_attr(feature = "wasm-serde-types", derive(Tsify))] -#[cfg_attr(feature = "wasm-serde-types", tsify(into_wasm_abi, from_wasm_abi))] -#[serde(rename_all = "camelCase")] -#[serde(deny_unknown_fields)] -pub struct SerializableNymTopology { - pub mixnodes: BTreeMap>, - pub gateways: Vec, -} - -impl TryFrom for NymTopology { - type Error = SerializableTopologyError; - - fn try_from(value: SerializableNymTopology) -> Result { - let mut converted_mixes = BTreeMap::new(); - - for (layer, nodes) in value.mixnodes { - let layer_nodes = nodes - .into_iter() - .map(TryInto::try_into) - .collect::>()?; - - converted_mixes.insert(layer, layer_nodes); - } - - let gateways = value - .gateways - .into_iter() - .map(TryInto::try_into) - .collect::>()?; - - Ok(NymTopology::new(converted_mixes, gateways)) - } -} - -impl From for SerializableNymTopology { - fn from(value: NymTopology) -> Self { - SerializableNymTopology { - mixnodes: value - .mixes() - .iter() - .map(|(&l, nodes)| (l, nodes.iter().map(Into::into).collect())) - .collect(), - gateways: value.gateways().iter().map(Into::into).collect(), - } - } -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[cfg_attr(feature = "wasm-serde-types", derive(Tsify))] -#[cfg_attr(feature = "wasm-serde-types", tsify(into_wasm_abi, from_wasm_abi))] -#[serde(rename_all = "camelCase")] -#[serde(deny_unknown_fields)] -pub struct SerializableMixNode { - // this is a `MixId` but due to typescript issue, we're using u32 directly. - #[serde(alias = "mix_id")] - pub mix_id: u32, - - pub host: String, - - #[cfg_attr(feature = "wasm-serde-types", tsify(optional))] - #[serde(alias = "mix_port")] - pub mix_port: Option, - - #[serde(alias = "identity_key")] - pub identity_key: String, - - #[serde(alias = "sphinx_key")] - pub sphinx_key: String, - - // this is a `MixLayer` but due to typescript issue, we're using u8 directly. - pub layer: u8, - - #[cfg_attr(feature = "wasm-serde-types", tsify(optional))] - pub version: Option, -} - -impl TryFrom for mix::LegacyNode { - type Error = SerializableTopologyError; - - fn try_from(value: SerializableMixNode) -> Result { - let host = mix::LegacyNode::parse_host(&value.host)?; - - let mix_port = value.mix_port.unwrap_or(DEFAULT_MIX_LISTENING_PORT); - let version = value.version.map(|v| v.as_str().into()).unwrap_or_default(); - - // try to completely resolve the host in the mix situation to avoid doing it every - // single time we want to construct a path - let mix_host = mix::LegacyNode::extract_mix_host(&host, mix_port)?; - - Ok(mix::LegacyNode { - mix_id: value.mix_id, - host, - mix_host, - identity_key: identity::PublicKey::from_base58_string(&value.identity_key) - .map_err(MixnodeConversionError::from)?, - sphinx_key: encryption::PublicKey::from_base58_string(&value.sphinx_key) - .map_err(MixnodeConversionError::from)?, - layer: mix::LegacyMixLayer::try_from(value.layer) - .map_err(|_| SerializableTopologyError::InvalidMixLayer { value: value.layer })?, - version, - }) - } -} - -impl<'a> From<&'a mix::LegacyNode> for SerializableMixNode { - fn from(value: &'a mix::LegacyNode) -> Self { - SerializableMixNode { - mix_id: value.mix_id, - host: value.host.to_string(), - mix_port: Some(value.mix_host.port()), - identity_key: value.identity_key.to_base58_string(), - sphinx_key: value.sphinx_key.to_base58_string(), - layer: value.layer.into(), - version: Some(value.version.to_string()), - } - } -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -#[cfg_attr(feature = "wasm-serde-types", derive(Tsify))] -#[cfg_attr(feature = "wasm-serde-types", tsify(into_wasm_abi, from_wasm_abi))] -#[serde(rename_all = "camelCase")] -#[serde(deny_unknown_fields)] -pub struct SerializableGateway { - pub host: String, - - pub node_id: NodeId, - - // optional ip address in the case of host being a hostname that can't be resolved - // (thank you wasm) - #[cfg_attr(feature = "wasm-serde-types", tsify(optional))] - #[serde(alias = "explicit_ip")] - pub explicit_ip: Option, - - #[cfg_attr(feature = "wasm-serde-types", tsify(optional))] - #[serde(alias = "mix_port")] - pub mix_port: Option, - - #[cfg_attr(feature = "wasm-serde-types", tsify(optional))] - #[serde(alias = "clients_port")] - #[serde(alias = "clients_ws_port")] - pub clients_ws_port: Option, - - #[cfg_attr(feature = "wasm-serde-types", tsify(optional))] - #[serde(alias = "clients_wss_port")] - pub clients_wss_port: Option, - - #[serde(alias = "identity_key")] - pub identity_key: String, - - #[serde(alias = "sphinx_key")] - pub sphinx_key: String, - - #[cfg_attr(feature = "wasm-serde-types", tsify(optional))] - pub version: Option, -} - -impl TryFrom for gateway::LegacyNode { - type Error = SerializableTopologyError; - - fn try_from(value: SerializableGateway) -> Result { - let host = gateway::LegacyNode::parse_host(&value.host)?; - - let mix_port = value.mix_port.unwrap_or(DEFAULT_MIX_LISTENING_PORT); - let clients_ws_port = value - .clients_ws_port - .unwrap_or(DEFAULT_CLIENT_LISTENING_PORT); - let version = value.version.map(|v| v.as_str().into()).unwrap_or_default(); - - // try to completely resolve the host in the mix situation to avoid doing it every - // single time we want to construct a path - let mix_host = if let Some(explicit_ip) = value.explicit_ip { - SocketAddr::new(explicit_ip, mix_port) - } else { - gateway::LegacyNode::extract_mix_host(&host, mix_port)? - }; - - Ok(gateway::LegacyNode { - node_id: value.node_id, - host, - mix_host, - clients_ws_port, - clients_wss_port: value.clients_wss_port, - identity_key: identity::PublicKey::from_base58_string(&value.identity_key) - .map_err(GatewayConversionError::from)?, - sphinx_key: encryption::PublicKey::from_base58_string(&value.sphinx_key) - .map_err(GatewayConversionError::from)?, - version, - }) - } -} - -impl<'a> From<&'a gateway::LegacyNode> for SerializableGateway { - fn from(value: &'a gateway::LegacyNode) -> Self { - SerializableGateway { - host: value.host.to_string(), - node_id: value.node_id, - explicit_ip: Some(value.mix_host.ip()), - mix_port: Some(value.mix_host.port()), - clients_ws_port: Some(value.clients_ws_port), - clients_wss_port: value.clients_wss_port, - identity_key: value.identity_key.to_base58_string(), - sphinx_key: value.sphinx_key.to_base58_string(), - version: Some(value.version.to_string()), - } - } -} diff --git a/common/topology/src/wasm_helpers.rs b/common/topology/src/wasm_helpers.rs new file mode 100644 index 0000000000..ecb451e8be --- /dev/null +++ b/common/topology/src/wasm_helpers.rs @@ -0,0 +1,123 @@ +// Copyright 2023 - Nym Technologies SA +// SPDX-License-Identifier: Apache-2.0 + +// due to the code generated by Tsify +#![allow(clippy::empty_docs)] + +use crate::node::{EntryDetails, RoutingNode, RoutingNodeError, SupportedRoles}; +use crate::{CachedEpochRewardedSet, NymTopology}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::net::SocketAddr; +use thiserror::Error; +use tsify::Tsify; +use wasm_bindgen::{prelude::wasm_bindgen, JsValue}; +use wasm_utils::error::simple_js_error; + +#[derive(Debug, Error)] +pub enum SerializableTopologyError { + #[error(transparent)] + NodeConversion(#[from] RoutingNodeError), + + #[error("{provided} is not a valid ed25519 public key")] + MalformedIdentity { provided: String }, + + #[error("{provided} is not a valid x25519 public key")] + MalformedSphinxKey { provided: String }, +} + +#[cfg(feature = "wasm-serde-types")] +impl From for JsValue { + fn from(value: SerializableTopologyError) -> Self { + simple_js_error(value.to_string()) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, Tsify)] +#[tsify(into_wasm_abi, from_wasm_abi)] +#[serde(rename_all = "camelCase")] +#[serde(deny_unknown_fields)] +pub struct WasmFriendlyNymTopology { + pub rewarded_set: CachedEpochRewardedSet, + + pub node_details: HashMap, +} + +impl TryFrom for NymTopology { + type Error = SerializableTopologyError; + + fn try_from(value: WasmFriendlyNymTopology) -> Result { + let node_details = value + .node_details + .into_values() + .map(|details| details.try_into()) + .collect::>()?; + + Ok(NymTopology::new(value.rewarded_set, node_details)) + } +} + +impl From for WasmFriendlyNymTopology { + fn from(value: NymTopology) -> Self { + WasmFriendlyNymTopology { + rewarded_set: value.rewarded_set, + node_details: value + .node_details + .into_iter() + .map(|(k, v)| (k, v.into())) + .collect(), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, Tsify)] +#[tsify(into_wasm_abi, from_wasm_abi)] +#[serde(rename_all = "camelCase")] +#[serde(deny_unknown_fields)] +pub struct WasmFriendlyRoutingNode { + pub node_id: u32, + + pub mix_host: SocketAddr, + + pub entry: Option, + pub identity_key: String, + pub sphinx_key: String, + + pub supported_roles: SupportedRoles, +} + +impl TryFrom for RoutingNode { + type Error = SerializableTopologyError; + + fn try_from(value: WasmFriendlyRoutingNode) -> Result { + Ok(RoutingNode { + node_id: value.node_id, + mix_host: value.mix_host, + entry: value.entry, + identity_key: value.identity_key.as_str().parse().map_err(|_| { + SerializableTopologyError::MalformedIdentity { + provided: value.identity_key, + } + })?, + sphinx_key: value.sphinx_key.as_str().parse().map_err(|_| { + SerializableTopologyError::MalformedIdentity { + provided: value.sphinx_key, + } + })?, + supported_roles: value.supported_roles, + }) + } +} + +impl From for WasmFriendlyRoutingNode { + fn from(node: RoutingNode) -> Self { + WasmFriendlyRoutingNode { + node_id: node.node_id, + mix_host: node.mix_host, + entry: node.entry, + identity_key: node.identity_key.to_string(), + sphinx_key: node.sphinx_key.to_string(), + supported_roles: node.supported_roles, + } + } +} diff --git a/common/wasm/client-core/Cargo.toml b/common/wasm/client-core/Cargo.toml index 1b0b3df67f..d63e31e50a 100644 --- a/common/wasm/client-core/Cargo.toml +++ b/common/wasm/client-core/Cargo.toml @@ -29,10 +29,10 @@ nym-credential-storage = { path = "../../credential-storage" } nym-crypto = { path = "../../crypto", features = ["asymmetric", "serde"] } nym-gateway-client = { path = "../../client-libs/gateway-client", default-features = false, features = ["wasm"] } nym-sphinx = { path = "../../nymsphinx" } -nym-sphinx-acknowledgements = { path = "../../nymsphinx/acknowledgements", features = ["serde"]} +nym-sphinx-acknowledgements = { path = "../../nymsphinx/acknowledgements", features = ["serde"] } nym-statistics-common = { path = "../../statistics" } nym-task = { path = "../../task" } -nym-topology = { path = "../../topology", features = ["serializable", "wasm-serde-types"] } +nym-topology = { path = "../../topology", features = ["wasm-serde-types"] } nym-validator-client = { path = "../../client-libs/validator-client", default-features = false } wasm-utils = { path = "../utils" } wasm-storage = { path = "../storage" } diff --git a/common/wasm/client-core/src/config/mod.rs b/common/wasm/client-core/src/config/mod.rs index 486e8e830c..aab813f5da 100644 --- a/common/wasm/client-core/src/config/mod.rs +++ b/common/wasm/client-core/src/config/mod.rs @@ -387,6 +387,15 @@ pub struct TopologyWasm { /// Specifies a minimum performance of a gateway that is used on route construction. /// This setting is only applicable when `NymApi` topology is used. pub minimum_gateway_performance: u8, + + /// Specifies whether this client should attempt to retrieve all available network nodes + /// as opposed to just active mixnodes/gateways. + /// Useless without `ignore_epoch_roles = true` + pub use_extended_topology: bool, + + /// Specifies whether this client should ignore the current epoch role of the target egress node + /// when constructing the final hop packets. + pub ignore_egress_epoch_role: bool, } impl Default for TopologyWasm { @@ -409,6 +418,8 @@ impl From for ConfigTopology { topology_structure: Default::default(), minimum_mixnode_performance: topology.minimum_mixnode_performance, minimum_gateway_performance: topology.minimum_gateway_performance, + use_extended_topology: topology.use_extended_topology, + ignore_egress_epoch_role: topology.ignore_egress_epoch_role, } } } @@ -424,6 +435,8 @@ impl From for TopologyWasm { disable_refreshing: topology.disable_refreshing, minimum_mixnode_performance: topology.minimum_mixnode_performance, minimum_gateway_performance: topology.minimum_gateway_performance, + use_extended_topology: topology.use_extended_topology, + ignore_egress_epoch_role: topology.ignore_egress_epoch_role, } } } diff --git a/common/wasm/client-core/src/config/override.rs b/common/wasm/client-core/src/config/override.rs index 9a8f631215..f85ad9d241 100644 --- a/common/wasm/client-core/src/config/override.rs +++ b/common/wasm/client-core/src/config/override.rs @@ -271,6 +271,17 @@ pub struct TopologyWasmOverride { /// This setting is only applicable when `NymApi` topology is used. #[tsify(optional)] pub minimum_gateway_performance: Option, + + /// Specifies whether this client should attempt to retrieve all available network nodes + /// as opposed to just active mixnodes/gateways. + /// Useless without `ignore_epoch_roles = true` + #[tsify(optional)] + pub use_extended_topology: Option, + + /// Specifies whether this client should ignore the current epoch role of the target egress node + /// when constructing the final hop packets. + #[tsify(optional)] + pub ignore_egress_epoch_role: Option, } impl From for TopologyWasm { @@ -294,6 +305,12 @@ impl From for TopologyWasm { minimum_gateway_performance: value .minimum_gateway_performance .unwrap_or(def.minimum_gateway_performance), + use_extended_topology: value + .use_extended_topology + .unwrap_or(def.use_extended_topology), + ignore_egress_epoch_role: value + .ignore_egress_epoch_role + .unwrap_or(def.ignore_egress_epoch_role), } } } diff --git a/common/wasm/client-core/src/helpers.rs b/common/wasm/client-core/src/helpers.rs index 04d6960377..eee589aead 100644 --- a/common/wasm/client-core/src/helpers.rs +++ b/common/wasm/client-core/src/helpers.rs @@ -15,7 +15,8 @@ use nym_client_core::init::{ }; use nym_sphinx::addressing::clients::Recipient; use nym_sphinx::anonymous_replies::requests::AnonymousSenderTag; -use nym_topology::{gateway, NymTopology, SerializableNymTopology}; +use nym_topology::wasm_helpers::WasmFriendlyNymTopology; +use nym_topology::{NymTopology, RoutingNode}; use nym_validator_client::client::IdentityKey; use nym_validator_client::NymApiClient; use rand::thread_rng; @@ -55,7 +56,7 @@ pub fn parse_sender_tag(tag: &str) -> Result pub async fn current_network_topology_async( nym_api_url: String, -) -> Result { +) -> Result { let url: Url = match nym_api_url.parse() { Ok(url) => url, Err(source) => { @@ -67,12 +68,17 @@ pub async fn current_network_topology_async( }; let api_client = NymApiClient::new(url); + let rewarded_set = api_client.get_current_rewarded_set().await?; let mixnodes = api_client .get_all_basic_active_mixing_assigned_nodes() .await?; let gateways = api_client.get_all_basic_entry_assigned_nodes().await?; - Ok(NymTopology::from_basic(&mixnodes, &gateways).into()) + let mut topology = NymTopology::new_empty(rewarded_set); + topology.add_skimmed_nodes(&mixnodes); + topology.add_skimmed_nodes(&gateways); + + Ok(topology.into()) } #[wasm_bindgen(js_name = "currentNetworkTopology")] @@ -90,7 +96,7 @@ pub async fn setup_gateway_wasm( client_store: &ClientStorage, force_tls: bool, chosen_gateway: Option, - gateways: &[gateway::LegacyNode], + gateways: Vec, ) -> Result { // TODO: so much optimization and extra features could be added here, but that's for the future @@ -107,7 +113,7 @@ pub async fn setup_gateway_wasm( GatewaySetup::New { specification: selection_spec, - available_gateways: gateways.to_vec(), + available_gateways: gateways, } }; @@ -125,7 +131,7 @@ pub async fn setup_gateway_from_api( ) -> Result { let mut rng = thread_rng(); let gateways = current_gateways(&mut rng, nym_apis, None, minimum_performance).await?; - setup_gateway_wasm(client_store, force_tls, chosen_gateway, &gateways).await + setup_gateway_wasm(client_store, force_tls, chosen_gateway, gateways).await } pub async fn setup_from_topology( @@ -134,6 +140,6 @@ pub async fn setup_from_topology( topology: &NymTopology, client_store: &ClientStorage, ) -> Result { - let gateways = topology.gateways(); + let gateways = topology.entry_capable_nodes().cloned().collect::>(); setup_gateway_wasm(client_store, force_tls, explicit_gateway, gateways).await } diff --git a/common/wasm/client-core/src/topology.rs b/common/wasm/client-core/src/topology.rs index 25300c41a4..bd849164e3 100644 --- a/common/wasm/client-core/src/topology.rs +++ b/common/wasm/client-core/src/topology.rs @@ -1,51 +1,26 @@ // Copyright 2023 - Nym Technologies SA // SPDX-License-Identifier: Apache-2.0 -use nym_topology::SerializableTopologyError; +use nym_topology::wasm_helpers::SerializableTopologyError; use nym_validator_client::client::IdentityKeyRef; -use wasm_utils::console_log; -pub use nym_topology::{ - gateway, mix, SerializableGateway, SerializableMixNode, SerializableNymTopology, -}; +pub use nym_topology::wasm_helpers::{WasmFriendlyNymTopology, WasmFriendlyRoutingNode}; +pub use nym_topology::{Role, RoutingNode}; // redeclare this as a type alias for easy of use pub type WasmTopologyError = SerializableTopologyError; // helper trait to define extra functionality on the external type that we used to have here before pub trait SerializableTopologyExt { - fn print(&self); + // fn print(&self); fn ensure_contains_gateway_id(&self, gateway_id: IdentityKeyRef) -> bool; } -impl SerializableTopologyExt for SerializableNymTopology { - fn print(&self) { - if !self.mixnodes.is_empty() { - console_log!("mixnodes:"); - for (layer, nodes) in &self.mixnodes { - console_log!("\tlayer {layer}:"); - for node in nodes { - // console_log!("\t\t{} - {}", node.mix_id, node.identity_key) - console_log!("\t\t{} - {:?}", node.mix_id, node) - } - } - } else { - console_log!("NO MIXNODES") - } - - if !self.gateways.is_empty() { - console_log!("gateways:"); - for gateway in &self.gateways { - // console_log!("\t{}", gateway.identity_key) - console_log!("\t{:?}", gateway) - } - } else { - console_log!("NO GATEWAYS") - } - } - +impl SerializableTopologyExt for WasmFriendlyNymTopology { fn ensure_contains_gateway_id(&self, gateway_id: IdentityKeyRef) -> bool { - self.gateways.iter().any(|g| g.identity_key == gateway_id) + self.node_details + .values() + .any(|node| node.identity_key == gateway_id) } } diff --git a/nym-api/nym-api-requests/src/models.rs b/nym-api/nym-api-requests/src/models.rs index 3b9c759620..720553ccf3 100644 --- a/nym-api/nym-api-requests/src/models.rs +++ b/nym-api/nym-api-requests/src/models.rs @@ -20,7 +20,9 @@ use nym_crypto::asymmetric::x25519::{ use nym_mixnet_contract_common::nym_node::Role; use nym_mixnet_contract_common::reward_params::{Performance, RewardingParams}; use nym_mixnet_contract_common::rewarding::RewardEstimate; -use nym_mixnet_contract_common::{GatewayBond, IdentityKey, Interval, MixNode, NodeId, Percent}; +use nym_mixnet_contract_common::{ + EpochId, GatewayBond, IdentityKey, Interval, MixNode, NodeId, Percent, +}; use nym_network_defaults::{DEFAULT_MIX_LISTENING_PORT, DEFAULT_VERLOC_LISTENING_PORT}; use nym_node_requests::api::v1::authenticator::models::Authenticator; use nym_node_requests::api::v1::gateway::models::Wireguard; @@ -1342,6 +1344,10 @@ impl NodeRefreshBody { #[derive(Clone, Debug, Serialize, Deserialize, schemars::JsonSchema, ToSchema)] pub struct RewardedSetResponse { + #[serde(default)] + #[schema(value_type = u32)] + pub epoch_id: EpochId, + pub entry_gateways: Vec, pub exit_gateways: Vec, @@ -1355,6 +1361,36 @@ pub struct RewardedSetResponse { pub standby: Vec, } +impl From for nym_mixnet_contract_common::EpochRewardedSet { + fn from(res: RewardedSetResponse) -> Self { + nym_mixnet_contract_common::EpochRewardedSet { + epoch_id: res.epoch_id, + assignment: nym_mixnet_contract_common::RewardedSet { + entry_gateways: res.entry_gateways, + exit_gateways: res.exit_gateways, + layer1: res.layer1, + layer2: res.layer2, + layer3: res.layer3, + standby: res.standby, + }, + } + } +} + +impl From for RewardedSetResponse { + fn from(r: nym_mixnet_contract_common::EpochRewardedSet) -> Self { + RewardedSetResponse { + epoch_id: r.epoch_id, + entry_gateways: r.assignment.entry_gateways, + exit_gateways: r.assignment.exit_gateways, + layer1: r.assignment.layer1, + layer2: r.assignment.layer2, + layer3: r.assignment.layer3, + standby: r.assignment.standby, + } + } +} + pub use config_score::*; pub mod config_score { use nym_contracts_common::NaiveFloat; diff --git a/nym-api/nym-api-requests/src/nym_nodes.rs b/nym-api/nym-api-requests/src/nym_nodes.rs index fcfd09c2be..d8574e36d5 100644 --- a/nym-api/nym-api-requests/src/nym_nodes.rs +++ b/nym-api/nym-api-requests/src/nym_nodes.rs @@ -72,7 +72,7 @@ pub enum NodeRoleQueryParam { ExitGateway, } -#[derive(Clone, Debug, Serialize, Deserialize, schemars::JsonSchema, ToSchema)] +#[derive(Clone, Debug, Serialize, Deserialize, schemars::JsonSchema, ToSchema, Default)] pub enum NodeRole { // a properly active mixnode Mixnode { @@ -88,6 +88,7 @@ pub enum NodeRole { // equivalent of node that's in rewarded set but not in the inactive set Standby, + #[default] Inactive, } @@ -134,7 +135,6 @@ pub struct SkimmedNode { #[schema(value_type = Vec)] pub ip_addresses: Vec, - // TODO: to be deprecated in favour of well-known hardcoded port for everyone pub mix_port: u16, #[serde(with = "bs58_x25519_pubkey")] diff --git a/nym-api/src/epoch_operations/helpers.rs b/nym-api/src/epoch_operations/helpers.rs index fce29eda8d..ed9abf30ce 100644 --- a/nym-api/src/epoch_operations/helpers.rs +++ b/nym-api/src/epoch_operations/helpers.rs @@ -6,7 +6,7 @@ use crate::support::caching::Cache; use cosmwasm_std::{Decimal, Fraction}; use nym_api_requests::models::NodeAnnotation; use nym_mixnet_contract_common::reward_params::{NodeRewardingParameters, Performance, WorkFactor}; -use nym_mixnet_contract_common::{ExecuteMsg, NodeId, RewardedSet, RewardingParams}; +use nym_mixnet_contract_common::{EpochRewardedSet, ExecuteMsg, NodeId, RewardingParams}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use tokio::sync::RwLockReadGuard; @@ -93,11 +93,12 @@ impl EpochAdvancer { pub(crate) async fn load_nodes_for_rewarding( &self, - nodes: &RewardedSet, + nodes: &EpochRewardedSet, // we only need reward parameters for active set work factor and rewarded/active set sizes; // we do not need exact values of reward pool, staking supply, etc., so it's fine if it's slightly out of sync global_rewarding_params: RewardingParams, ) -> Vec { + let nodes = &nodes.assignment; // currently we are using constant omega for nodes, but that will change with tickets // or different reward split between entry, exit, etc. at that point this will have to be calculated elsewhere let active_node_work_factor = global_rewarding_params.active_node_work(); diff --git a/nym-api/src/network_monitor/monitor/mod.rs b/nym-api/src/network_monitor/monitor/mod.rs index 1f659b8646..0145245197 100644 --- a/nym-api/src/network_monitor/monitor/mod.rs +++ b/nym-api/src/network_monitor/monitor/mod.rs @@ -181,8 +181,8 @@ impl Monitor { } fn blacklist_route_nodes(&self, route: &TestRoute, blacklist: &mut HashSet) { - for mix in route.topology().mixes_as_vec() { - blacklist.insert(mix.mix_id); + for mix in route.topology().mixnodes() { + blacklist.insert(mix.node_id); } blacklist.insert(route.gateway().node_id); } diff --git a/nym-api/src/network_monitor/monitor/preparer.rs b/nym-api/src/network_monitor/monitor/preparer.rs index 50c3be80ba..7ae2547844 100644 --- a/nym-api/src/network_monitor/monitor/preparer.rs +++ b/nym-api/src/network_monitor/monitor/preparer.rs @@ -5,26 +5,26 @@ use crate::network_monitor::monitor::sender::GatewayPackets; use crate::network_monitor::test_route::TestRoute; use crate::node_describe_cache::{DescribedNodes, NodeDescriptionTopologyExt}; use crate::node_status_api::NodeStatusCache; -use crate::nym_contract_cache::cache::{CachedRewardedSet, NymContractCache}; +use crate::nym_contract_cache::cache::NymContractCache; use crate::support::caching::cache::SharedCache; +use crate::support::legacy_helpers::legacy_host_to_ips_and_hostname; use nym_api_requests::legacy::{LegacyGatewayBondWithId, LegacyMixNodeBondWithLayer}; use nym_api_requests::models::{NodeAnnotation, NymNodeDescription}; use nym_contracts_common::NaiveFloat; use nym_crypto::asymmetric::{encryption, identity}; use nym_mixnet_contract_common::{LegacyMixLayer, NodeId}; -use nym_node_tester_utils::node::TestableNode; +use nym_node_tester_utils::node::{NodeType, TestableNode}; use nym_node_tester_utils::NodeTester; use nym_sphinx::acknowledgements::AckKey; use nym_sphinx::addressing::clients::Recipient; use nym_sphinx::forwarding::packet::MixPacket; use nym_sphinx::params::{PacketSize, PacketType}; -use nym_topology::gateway::GatewayConversionError; -use nym_topology::mix::MixnodeConversionError; -use nym_topology::{gateway, mix}; +use nym_topology::node::{EntryDetails, RoutingNode, SupportedRoles}; use rand::prelude::SliceRandom; use rand::{rngs::ThreadRng, thread_rng, Rng}; use std::collections::HashMap; use std::fmt::{self, Display, Formatter}; +use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; use tracing::{debug, error, info, trace}; @@ -135,7 +135,7 @@ impl PacketPreparer { // when we're testing mixnodes, the recipient is going to stay constant, so we can specify it ahead of time fn ephemeral_mix_tester(&self, test_route: &TestRoute) -> NodeTester { - let self_address = self.create_packet_sender(test_route.gateway()); + let self_address = self.create_packet_sender(&test_route.gateway()); self.ephemeral_tester(test_route, Some(self_address)) } @@ -210,75 +210,87 @@ impl PacketPreparer { (mixnodes, gateways) } - pub(crate) fn try_parse_mix_bond( + pub(crate) fn try_parse_legacy_mix_bond( &self, bond: &LegacyMixNodeBondWithLayer, - ) -> Result { - fn parse_bond( - bond: &LegacyMixNodeBondWithLayer, - ) -> Result { - let host = mix::LegacyNode::parse_host(&bond.mix_node.host)?; - - // try to completely resolve the host in the mix situation to avoid doing it every - // single time we want to construct a path - let mix_host = mix::LegacyNode::extract_mix_host(&host, bond.mix_node.mix_port)?; - - Ok(mix::LegacyNode { - mix_id: bond.mix_id, - host, - mix_host, - identity_key: identity::PublicKey::from_base58_string(&bond.mix_node.identity_key)?, - sphinx_key: encryption::PublicKey::from_base58_string(&bond.mix_node.sphinx_key)?, - layer: bond.layer, - version: bond.mix_node.version.as_str().into(), + ) -> Result { + fn parse_bond(bond: &LegacyMixNodeBondWithLayer) -> Option { + let (ips, _) = legacy_host_to_ips_and_hostname(&bond.mix_node.host)?; + + Some(RoutingNode { + node_id: bond.mix_id, + mix_host: SocketAddr::new(*ips.first()?, bond.mix_node.mix_port), + entry: None, + identity_key: identity::PublicKey::from_base58_string(&bond.mix_node.identity_key) + .ok()?, + sphinx_key: encryption::PublicKey::from_base58_string(&bond.mix_node.sphinx_key) + .ok()?, + supported_roles: SupportedRoles { + mixnode: true, + mixnet_entry: false, + mixnet_exit: false, + }, }) } let identity = bond.mix_node.identity_key.clone(); - parse_bond(bond).map_err(|_| identity) + parse_bond(bond).ok_or(identity) } - pub(crate) fn try_parse_gateway_bond( + pub(crate) fn try_parse_legacy_gateway_bond( &self, gateway: &LegacyGatewayBondWithId, - ) -> Result { - fn parse_bond( - bond: &LegacyGatewayBondWithId, - ) -> Result { - let host = gateway::LegacyNode::parse_host(&bond.gateway.host)?; + ) -> Result { + fn parse_bond(bond: &LegacyGatewayBondWithId) -> Option { + let (ips, hostname) = legacy_host_to_ips_and_hostname(&bond.gateway.host)?; - // try to completely resolve the host in the mix situation to avoid doing it every - // single time we want to construct a path - let mix_host = gateway::LegacyNode::extract_mix_host(&host, bond.gateway.mix_port)?; - - Ok(gateway::LegacyNode { + Some(RoutingNode { node_id: bond.node_id, - host, - mix_host, - clients_ws_port: bond.gateway.clients_port, - clients_wss_port: None, - identity_key: identity::PublicKey::from_base58_string(&bond.gateway.identity_key)?, - sphinx_key: encryption::PublicKey::from_base58_string(&bond.gateway.sphinx_key)?, - version: bond.gateway.version.as_str().into(), + mix_host: SocketAddr::new(*ips.first()?, bond.gateway.mix_port), + entry: Some(EntryDetails { + ip_addresses: ips, + clients_ws_port: bond.gateway.clients_port, + hostname, + clients_wss_port: None, + }), + identity_key: identity::PublicKey::from_base58_string(&bond.gateway.identity_key) + .ok()?, + sphinx_key: encryption::PublicKey::from_base58_string(&bond.gateway.sphinx_key) + .ok()?, + supported_roles: SupportedRoles { + mixnode: false, + mixnet_entry: true, + mixnet_exit: false, + }, }) } let identity = gateway.gateway.identity_key.clone(); - parse_bond(gateway).map_err(|_| identity) + parse_bond(gateway).ok_or(identity) + } + + fn random_legacy_layer(&self, rng: &mut R) -> LegacyMixLayer { + let layer_choices = [ + LegacyMixLayer::One, + LegacyMixLayer::Two, + LegacyMixLayer::Three, + ]; + + // SAFETY: the slice is not empty so the unwrap is fine + #[allow(clippy::unwrap_used)] + layer_choices.choose(rng).copied().unwrap() } fn to_legacy_layered_mixes<'a, R: Rng>( &self, rng: &mut R, - rewarded_set: &CachedRewardedSet, node_statuses: &HashMap, mixing_nym_nodes: impl Iterator + 'a, - ) -> HashMap> { + ) -> HashMap> { let mut layered_mixes = HashMap::new(); for mixing_nym_node in mixing_nym_nodes { - let Some(parsed_node) = self.nym_node_to_legacy_mix(rng, rewarded_set, mixing_nym_node) - else { + let Some(parsed_node) = self.nym_node_to_routing_node(mixing_nym_node) else { continue; }; // if the node is not present, default to 0.5 @@ -286,7 +298,7 @@ impl PacketPreparer { .get(&mixing_nym_node.node_id) .map(|node| node.last_24h_performance.naive_to_f64()) .unwrap_or(0.5); - let layer = parsed_node.layer; + let layer = self.random_legacy_layer(rng); let layer_mixes = layered_mixes.entry(layer).or_insert_with(Vec::new); layer_mixes.push((parsed_node, weight)) } @@ -298,11 +310,11 @@ impl PacketPreparer { &self, node_statuses: &HashMap, gateway_capable_nym_nodes: impl Iterator + 'a, - ) -> Vec<(gateway::LegacyNode, f64)> { + ) -> Vec<(RoutingNode, f64)> { let mut gateways = Vec::new(); for gateway_capable_node in gateway_capable_nym_nodes { - let Some(parsed_node) = self.nym_node_to_legacy_gateway(gateway_capable_node) else { + let Some(parsed_node) = self.nym_node_to_routing_node(gateway_capable_node) else { continue; }; // if the node is not present, default to 0.5 @@ -321,8 +333,6 @@ impl PacketPreparer { // if generated fewer than n, blacklist will be updated by external function with correctly generated // routes so that they wouldn't be reused pub(crate) async fn prepare_test_routes(&self, n: usize) -> Option> { - let rewarded_set = self.contract_cache.rewarded_set().await?; - let descriptions = self.described_cache.get().await.ok()?; let statuses = self.node_status_cache.node_annotations().await?; @@ -333,8 +343,7 @@ impl PacketPreparer { let mut rng = thread_rng(); // separate mixes into layers for easier selection alongside the selection weights - let layered_mixes = - self.to_legacy_layered_mixes(&mut rng, &rewarded_set, &statuses, mixing_nym_nodes); + let layered_mixes = self.to_legacy_layered_mixes(&mut rng, &statuses, mixing_nym_nodes); let gateways = self.to_legacy_gateway_nodes(&statuses, gateway_capable_nym_nodes); // get all nodes from each layer... @@ -394,7 +403,7 @@ impl PacketPreparer { Some(routes) } - fn create_packet_sender(&self, gateway: &gateway::LegacyNode) -> Recipient { + fn create_packet_sender(&self, gateway: &RoutingNode) -> Recipient { Recipient::new( self.self_public_identity, self.self_public_encryption, @@ -410,7 +419,8 @@ impl PacketPreparer { _packet_type: PacketType, ) -> GatewayPackets { let mut tester = self.ephemeral_mix_tester(route); - let topology = route.topology(); + let topology = route.testable_route_provider(); + let plaintexts = route.self_test_messages(num); // the unwrap here is fine as: @@ -419,7 +429,7 @@ impl PacketPreparer { // 3. the test message is not too long, i.e. when serialized it will fit in a single sphinx packet let mix_packets = plaintexts .into_iter() - .map(|p| tester.wrap_plaintext_data(p, topology, None).unwrap()) + .map(|p| tester.wrap_plaintext_data(p, &topology, None).unwrap()) .map(MixPacket::from) .collect(); @@ -433,11 +443,11 @@ impl PacketPreparer { fn filter_outdated_and_malformed_mixnodes( &self, nodes: Vec, - ) -> (Vec, Vec) { + ) -> (Vec, Vec) { let mut parsed_nodes = Vec::new(); let mut invalid_nodes = Vec::new(); for mixnode in nodes { - if let Ok(parsed_node) = self.try_parse_mix_bond(&mixnode) { + if let Ok(parsed_node) = self.try_parse_legacy_mix_bond(&mixnode) { parsed_nodes.push(parsed_node) } else { invalid_nodes.push(InvalidNode::Malformed { @@ -451,12 +461,12 @@ impl PacketPreparer { fn filter_outdated_and_malformed_gateways( &self, nodes: Vec, - ) -> (Vec<(gateway::LegacyNode, NodeId)>, Vec) { + ) -> (Vec, Vec) { let mut parsed_nodes = Vec::new(); let mut invalid_nodes = Vec::new(); for gateway in nodes { - if let Ok(parsed_node) = self.try_parse_gateway_bond(&gateway) { - parsed_nodes.push((parsed_node, gateway.node_id)) + if let Ok(parsed_node) = self.try_parse_legacy_gateway_bond(&gateway) { + parsed_nodes.push(parsed_node) } else { invalid_nodes.push(InvalidNode::Malformed { node: TestableNode::new_gateway( @@ -469,41 +479,8 @@ impl PacketPreparer { (parsed_nodes, invalid_nodes) } - fn nym_node_to_legacy_mix( - &self, - rng: &mut R, - rewarded_set: &CachedRewardedSet, - mixing_nym_node: &NymNodeDescription, - ) -> Option { - let maybe_explicit_layer = rewarded_set - .try_get_mix_layer(&mixing_nym_node.node_id) - .and_then(|layer| LegacyMixLayer::try_from(layer).ok()); - - let layer = match maybe_explicit_layer { - Some(layer) => layer, - None => { - let layer_choices = [ - LegacyMixLayer::One, - LegacyMixLayer::Two, - LegacyMixLayer::Three, - ]; - - // if nym-node doesn't have a layer assigned, since it's either standby or inactive, - // we have to choose one randomly for the testing purposes - // SAFETY: the slice is not empty so the unwrap is fine - #[allow(clippy::unwrap_used)] - layer_choices.choose(rng).copied().unwrap() - } - }; - - mixing_nym_node.try_to_topology_mix_node(layer).ok() - } - - fn nym_node_to_legacy_gateway( - &self, - gateway_capable_node: &NymNodeDescription, - ) -> Option { - gateway_capable_node.try_to_topology_gateway().ok() + fn nym_node_to_routing_node(&self, description: &NymNodeDescription) -> Option { + description.try_to_topology_node().ok() } pub(super) async fn prepare_test_packets( @@ -514,7 +491,6 @@ impl PacketPreparer { _packet_type: PacketType, ) -> PreparedPackets { let (mixnodes, gateways) = self.all_legacy_mixnodes_and_gateways().await; - let rewarded_set = self.contract_cache.rewarded_set().await; let descriptions = self .described_cache @@ -532,28 +508,32 @@ impl PacketPreparer { // summary of nodes that got tested let mut mixnodes_under_test = mixnodes_to_test_details .iter() - .map(|node| node.into()) + .map(|node| TestableNode::new_routing(node, NodeType::Mixnode)) .collect::>(); let mut gateways_under_test = gateways_to_test_details .iter() - .map(|node| node.into()) + .map(|node| TestableNode::new_routing(node, NodeType::Gateway)) .collect::>(); // try to add nym-nodes into the fold - if let Some(rewarded_set) = rewarded_set { - let mut rng = thread_rng(); - for mix in mixing_nym_nodes { - if let Some(parsed) = self.nym_node_to_legacy_mix(&mut rng, &rewarded_set, mix) { - mixnodes_under_test.push(TestableNode::from(&parsed)); - mixnodes_to_test_details.push(parsed); - } + for mix in mixing_nym_nodes { + if let Some(parsed) = self.nym_node_to_routing_node(mix) { + mixnodes_under_test.push(TestableNode::new_routing(&parsed, NodeType::Mixnode)); + mixnodes_to_test_details.push(parsed); } } + // assign random layer to each node + let mut rng = thread_rng(); + let mixnodes_to_test_details = mixnodes_to_test_details + .into_iter() + .map(|node| (self.random_legacy_layer(&mut rng), node)) + .collect::>(); + for gateway in gateway_capable_nym_nodes { - if let Some(parsed) = self.nym_node_to_legacy_gateway(gateway) { - gateways_under_test.push((&parsed, gateway.node_id).into()); - gateways_to_test_details.push((parsed, gateway.node_id)); + if let Some(parsed) = self.nym_node_to_routing_node(gateway) { + gateways_under_test.push(TestableNode::new_routing(&parsed, NodeType::Gateway)); + gateways_to_test_details.push(parsed); } } @@ -594,10 +574,10 @@ impl PacketPreparer { gateway_packets.push_packets(mix_packets); // and generate test packets for gateways (note the variable recipient) - for (gateway, node_id) in &gateways_to_test_details { + for gateway in &gateways_to_test_details { let recipient = self.create_packet_sender(gateway); let gateway_identity = gateway.identity_key; - let gateway_address = gateway.clients_address(); + let gateway_address = gateway.ws_entry_address(false); // the unwrap here is fine as: // 1. the topology is definitely valid (otherwise we wouldn't be here) @@ -607,7 +587,6 @@ impl PacketPreparer { let gateway_test_packets = mix_tester .legacy_gateway_test_packets( gateway, - *node_id, route_ext, self.per_node_test_packets as u32, Some(recipient), diff --git a/nym-api/src/network_monitor/monitor/sender.rs b/nym-api/src/network_monitor/monitor/sender.rs index c20feadac1..65d699d497 100644 --- a/nym-api/src/network_monitor/monitor/sender.rs +++ b/nym-api/src/network_monitor/monitor/sender.rs @@ -35,7 +35,7 @@ const TIME_CHUNK_SIZE: Duration = Duration::from_millis(50); pub(crate) struct GatewayPackets { /// Network address of the target gateway if wanted to be accessed by the client. /// It is a websocket address. - pub(crate) clients_address: String, + pub(crate) clients_address: Option, /// Public key of the target gateway. pub(crate) pub_key: ed25519::PublicKey, @@ -46,7 +46,7 @@ pub(crate) struct GatewayPackets { impl GatewayPackets { pub(crate) fn new( - clients_address: String, + clients_address: Option, pub_key: ed25519::PublicKey, packets: Vec, ) -> Self { @@ -57,15 +57,17 @@ impl GatewayPackets { } } - pub(crate) fn gateway_config(&self) -> GatewayConfig { - GatewayConfig { - gateway_identity: self.pub_key, - gateway_owner: None, - gateway_listener: self.clients_address.clone(), - } + pub(crate) fn gateway_config(&self) -> Option { + self.clients_address + .clone() + .map(|gateway_listener| GatewayConfig { + gateway_identity: self.pub_key, + gateway_owner: None, + gateway_listener, + }) } - pub(crate) fn empty(clients_address: String, pub_key: ed25519::PublicKey) -> Self { + pub(crate) fn empty(clients_address: Option, pub_key: ed25519::PublicKey) -> Self { GatewayPackets { clients_address, pub_key, @@ -356,17 +358,22 @@ impl PacketSender { fresh_gateway_client_data: Arc, max_sending_rate: usize, ) -> Option { + let identity = packets.pub_key; + + let Some(gateway_config) = packets.gateway_config() else { + warn!("gateway {identity} didn't provide valid entry information"); + return None; + }; + let (mut client, gateway_channels) = Self::create_new_gateway_client_handle_and_authenticate( - packets.gateway_config(), + gateway_config, &fresh_gateway_client_data, gateway_connection_timeout, gateway_bandwidth_claim_timeout, ) .await?; - let identity = client.gateway_identity(); - let estimated_time = Duration::from_secs_f64(packets.packets.len() as f64 / max_sending_rate as f64); // give some leeway diff --git a/nym-api/src/network_monitor/test_packet.rs b/nym-api/src/network_monitor/test_packet.rs index 1b1bfbbeda..73ace159ad 100644 --- a/nym-api/src/network_monitor/test_packet.rs +++ b/nym-api/src/network_monitor/test_packet.rs @@ -3,7 +3,7 @@ use nym_node_tester_utils::error::NetworkTestingError; use nym_node_tester_utils::TestMessage; -use nym_topology::mix; +use nym_topology::node::RoutingNode; use serde::{Deserialize, Serialize}; pub(crate) type NodeTestMessage = TestMessage; @@ -24,7 +24,7 @@ impl NymApiTestMessageExt { pub fn mix_plaintexts( &self, - node: &mix::LegacyNode, + node: &RoutingNode, test_packets: u32, ) -> Result>, NetworkTestingError> { NodeTestMessage::mix_plaintexts(node, test_packets, *self) diff --git a/nym-api/src/network_monitor/test_route/mod.rs b/nym-api/src/network_monitor/test_route/mod.rs index 224f751357..1032baf5db 100644 --- a/nym-api/src/network_monitor/test_route/mod.rs +++ b/nym-api/src/network_monitor/test_route/mod.rs @@ -4,7 +4,10 @@ use crate::network_monitor::test_packet::NymApiTestMessageExt; use crate::network_monitor::ROUTE_TESTING_TEST_NONCE; use nym_crypto::asymmetric::identity; -use nym_topology::{gateway, mix, NymTopology}; +use nym_mixnet_contract_common::nym_node::Role; +use nym_mixnet_contract_common::{EpochId, EpochRewardedSet, RewardedSet}; +use nym_topology::node::RoutingNode; +use nym_topology::{NymRouteProvider, NymTopology}; use std::fmt::{Debug, Formatter}; #[derive(Clone)] @@ -16,22 +19,28 @@ pub(crate) struct TestRoute { impl TestRoute { pub(crate) fn new( id: u64, - l1_mix: mix::LegacyNode, - l2_mix: mix::LegacyNode, - l3_mix: mix::LegacyNode, - gateway: gateway::LegacyNode, + l1_mix: RoutingNode, + l2_mix: RoutingNode, + l3_mix: RoutingNode, + gateway: RoutingNode, ) -> Self { - let layered_mixes = [ - (1u8, vec![l1_mix]), - (2u8, vec![l2_mix]), - (3u8, vec![l3_mix]), - ] - .into_iter() - .collect(); + let fake_rewarded_set = EpochRewardedSet { + epoch_id: EpochId::MAX, + assignment: RewardedSet { + entry_gateways: vec![gateway.node_id], + exit_gateways: vec![], + layer1: vec![l1_mix.node_id], + layer2: vec![l2_mix.node_id], + layer3: vec![l3_mix.node_id], + standby: vec![], + }, + }; + + let nodes = vec![l1_mix, l2_mix, l3_mix, gateway]; TestRoute { id, - nodes: NymTopology::new(layered_mixes, vec![gateway]), + nodes: NymTopology::new(fake_rewarded_set, nodes), } } @@ -39,24 +48,36 @@ impl TestRoute { self.id } - pub(crate) fn gateway(&self) -> &gateway::LegacyNode { - &self.nodes.gateways()[0] + pub(crate) fn gateway(&self) -> RoutingNode { + // SAFETY: we inserted entry gateway at construction + #[allow(clippy::unwrap_used)] + self.nodes + .nodes_with_role(Role::EntryGateway) + .next() + .unwrap() + .clone() } - pub(crate) fn layer_one_mix(&self) -> &mix::LegacyNode { - &self.nodes.mixes().get(&1).unwrap()[0] + pub(crate) fn layer_one_mix(&self) -> &RoutingNode { + // SAFETY: we inserted layer1 node at construction + #[allow(clippy::unwrap_used)] + self.nodes.nodes_with_role(Role::Layer1).next().unwrap() } - pub(crate) fn layer_two_mix(&self) -> &mix::LegacyNode { - &self.nodes.mixes().get(&2).unwrap()[0] + pub(crate) fn layer_two_mix(&self) -> &RoutingNode { + // SAFETY: we inserted layer2 node at construction + #[allow(clippy::unwrap_used)] + self.nodes.nodes_with_role(Role::Layer2).next().unwrap() } - pub(crate) fn layer_three_mix(&self) -> &mix::LegacyNode { - &self.nodes.mixes().get(&3).unwrap()[0] + pub(crate) fn layer_three_mix(&self) -> &RoutingNode { + // SAFETY: we inserted layer3 node at construction + #[allow(clippy::unwrap_used)] + self.nodes.nodes_with_role(Role::Layer3).next().unwrap() } - pub(crate) fn gateway_clients_address(&self) -> String { - self.gateway().clients_address() + pub(crate) fn gateway_clients_address(&self) -> Option { + self.gateway().ws_entry_address(false) } pub(crate) fn gateway_identity(&self) -> identity::PublicKey { @@ -67,6 +88,10 @@ impl TestRoute { &self.nodes } + pub(crate) fn testable_route_provider(&self) -> NymRouteProvider { + self.nodes.clone().into() + } + pub(crate) fn test_message_ext(&self, test_nonce: u64) -> NymApiTestMessageExt { NymApiTestMessageExt::new(self.id, test_nonce) } diff --git a/nym-api/src/node_describe_cache/mod.rs b/nym-api/src/node_describe_cache/mod.rs index adf25c29f1..0a0aeabc2f 100644 --- a/nym-api/src/node_describe_cache/mod.rs +++ b/nym-api/src/node_describe_cache/mod.rs @@ -12,13 +12,10 @@ use futures::{stream, StreamExt}; use nym_api_requests::legacy::{LegacyGatewayBondWithId, LegacyMixNodeDetailsWithLayer}; use nym_api_requests::models::{DescribedNodeType, NymNodeData, NymNodeDescription}; use nym_config::defaults::DEFAULT_NYM_NODE_HTTP_PORT; -use nym_mixnet_contract_common::{LegacyMixLayer, NodeId, NymNodeDetails}; +use nym_mixnet_contract_common::{NodeId, NymNodeDetails}; use nym_node_requests::api::client::{NymNodeApiClientError, NymNodeApiClientExt}; -use nym_topology::gateway::GatewayConversionError; -use nym_topology::mix::MixnodeConversionError; -use nym_topology::{gateway, mix, NetworkAddress}; +use nym_topology::node::{RoutingNode, RoutingNodeError}; use std::collections::HashMap; -use std::net::SocketAddr; use std::time::Duration; use thiserror::Error; use tracing::{debug, error, info}; @@ -65,87 +62,14 @@ pub enum NodeDescribeCacheError { // this exists because I've been moving things around quite a lot and now the place that holds the type // doesn't have relevant dependencies for proper impl pub(crate) trait NodeDescriptionTopologyExt { - fn try_to_topology_mix_node( - &self, - layer: LegacyMixLayer, - ) -> Result; - - fn try_to_topology_gateway(&self) -> Result; + fn try_to_topology_node(&self) -> Result; } impl NodeDescriptionTopologyExt for NymNodeDescription { - // TODO: this might have to be moved around - fn try_to_topology_mix_node( - &self, - layer: LegacyMixLayer, - ) -> Result { - let keys = &self.description.host_information.keys; - let ips = &self.description.host_information.ip_address; - if ips.is_empty() { - return Err(MixnodeConversionError::NoIpAddressesProvided { - mixnode: keys.ed25519.to_base58_string(), - }); - } - - let host = match &self.description.host_information.hostname { - None => NetworkAddress::IpAddr(ips[0]), - Some(hostname) => NetworkAddress::Hostname(hostname.clone()), - }; - - // get ip from the self-reported values so we wouldn't need to do any hostname resolution - // (which doesn't really work in wasm) - let mix_host = SocketAddr::new(ips[0], self.description.mix_port()); - - Ok(mix::LegacyNode { - mix_id: self.node_id, - host, - mix_host, - identity_key: keys.ed25519, - sphinx_key: keys.x25519, - layer, - version: self - .description - .build_information - .build_version - .as_str() - .into(), - }) - } - - fn try_to_topology_gateway(&self) -> Result { - let keys = &self.description.host_information.keys; - - let ips = &self.description.host_information.ip_address; - if ips.is_empty() { - return Err(GatewayConversionError::NoIpAddressesProvided { - gateway: keys.ed25519.to_base58_string(), - }); - } - - let host = match &self.description.host_information.hostname { - None => NetworkAddress::IpAddr(ips[0]), - Some(hostname) => NetworkAddress::Hostname(hostname.clone()), - }; - - // get ip from the self-reported values so we wouldn't need to do any hostname resolution - // (which doesn't really work in wasm) - let mix_host = SocketAddr::new(ips[0], self.description.mix_port()); - - Ok(gateway::LegacyNode { - node_id: self.node_id, - host, - mix_host, - clients_ws_port: self.description.mixnet_websockets.ws_port, - clients_wss_port: self.description.mixnet_websockets.wss_port, - identity_key: self.description.host_information.keys.ed25519, - sphinx_key: self.description.host_information.keys.x25519, - version: self - .description - .build_information - .build_version - .as_str() - .into(), - }) + fn try_to_topology_node(&self) -> Result { + // for the purposes of routing, performance is completely ignored, + // so add dummy value and piggyback on existing conversion + (&self.to_skimmed_node(Default::default(), Default::default())).try_into() } } diff --git a/nym-api/src/node_status_api/cache/node_sets.rs b/nym-api/src/node_status_api/cache/node_sets.rs index e03dd2807d..a0c4df4ee9 100644 --- a/nym-api/src/node_status_api/cache/node_sets.rs +++ b/nym-api/src/node_status_api/cache/node_sets.rs @@ -6,7 +6,7 @@ use crate::node_status_api::helpers::RewardedSetStatus; use crate::node_status_api::models::Uptime; use crate::node_status_api::reward_estimate::{compute_apy_from_reward, compute_reward_estimate}; use crate::nym_contract_cache::cache::data::ConfigScoreData; -use crate::nym_contract_cache::cache::CachedRewardedSet; +use crate::support::legacy_helpers::legacy_host_to_ips_and_hostname; use crate::support::storage::NymApiStorage; use nym_api_requests::legacy::{LegacyGatewayBondWithId, LegacyMixNodeDetailsWithLayer}; use nym_api_requests::models::DescribedNodeType::{LegacyGateway, LegacyMixnode, NymNode}; @@ -17,10 +17,8 @@ use nym_api_requests::models::{ use nym_contracts_common::NaiveFloat; use nym_mixnet_contract_common::{Interval, NodeId, VersionScoreFormulaParams}; use nym_mixnet_contract_common::{NymNodeDetails, RewardingParams}; -use nym_topology::NetworkAddress; +use nym_topology::CachedEpochRewardedSet; use std::collections::{HashMap, HashSet}; -use std::net::ToSocketAddrs; -use std::str::FromStr; use tracing::trace; pub(super) async fn get_mixnode_reliability_from_storage( @@ -148,7 +146,10 @@ fn calculate_config_score( } // TODO: this might have to be moved to a different file if other places also rely on this functionality -fn get_rewarded_set_status(rewarded_set: &CachedRewardedSet, node_id: NodeId) -> RewardedSetStatus { +fn get_rewarded_set_status( + rewarded_set: &CachedEpochRewardedSet, + node_id: NodeId, +) -> RewardedSetStatus { if rewarded_set.is_standby(&node_id) { RewardedSetStatus::Standby } else if rewarded_set.is_active_mixnode(&node_id) { @@ -164,7 +165,7 @@ pub(super) async fn annotate_legacy_mixnodes_nodes_with_details( mixnodes: Vec, interval_reward_params: RewardingParams, current_interval: Interval, - rewarded_set: &CachedRewardedSet, + rewarded_set: &CachedEpochRewardedSet, blacklist: &HashSet, ) -> HashMap { let mut annotated = HashMap::new(); @@ -203,21 +204,11 @@ pub(super) async fn annotate_legacy_mixnodes_nodes_with_details( .ok() .unwrap_or_default(); - // safety: this conversion is infallible - let ip_addresses = - match NetworkAddress::from_str(&mixnode.bond_information.mix_node.host).unwrap() { - NetworkAddress::IpAddr(ip) => vec![ip], - NetworkAddress::Hostname(hostname) => { - // try to resolve it - ( - hostname.as_str(), - mixnode.bond_information.mix_node.mix_port, - ) - .to_socket_addrs() - .map(|iter| iter.map(|s| s.ip()).collect::>()) - .unwrap_or_default() - } - }; + let Some((ip_addresses, _)) = + legacy_host_to_ips_and_hostname(&mixnode.bond_information.mix_node.host) + else { + continue; + }; let (estimated_operator_apy, estimated_delegators_apy) = compute_apy_from_reward(&mixnode, reward_estimate, current_interval); @@ -263,17 +254,10 @@ pub(crate) async fn annotate_legacy_gateways_with_details( .ok() .unwrap_or_default(); - // safety: this conversion is infallible - let ip_addresses = match NetworkAddress::from_str(&gateway_bond.bond.gateway.host).unwrap() - { - NetworkAddress::IpAddr(ip) => vec![ip], - NetworkAddress::Hostname(hostname) => { - // try to resolve it - (hostname.as_str(), gateway_bond.bond.gateway.mix_port) - .to_socket_addrs() - .map(|iter| iter.map(|s| s.ip()).collect::>()) - .unwrap_or_default() - } + let Some((ip_addresses, _)) = + legacy_host_to_ips_and_hostname(&gateway_bond.bond.gateway.host) + else { + continue; }; annotated.insert( @@ -298,7 +282,7 @@ pub(crate) async fn produce_node_annotations( legacy_mixnodes: &[LegacyMixNodeDetailsWithLayer], legacy_gateways: &[LegacyGatewayBondWithId], nym_nodes: &[NymNodeDetails], - rewarded_set: &CachedRewardedSet, + rewarded_set: &CachedEpochRewardedSet, current_interval: Interval, described_nodes: &DescribedNodes, ) -> HashMap { diff --git a/nym-api/src/nym_contract_cache/cache/data.rs b/nym-api/src/nym_contract_cache/cache/data.rs index d259109f98..ba2c7d9115 100644 --- a/nym-api/src/nym_contract_cache/cache/data.rs +++ b/nym-api/src/nym_contract_cache/cache/data.rs @@ -3,130 +3,16 @@ use crate::support::caching::Cache; use nym_api_requests::legacy::{LegacyGatewayBondWithId, LegacyMixNodeDetailsWithLayer}; -use nym_api_requests::models::{ConfigScoreDataResponse, RewardedSetResponse}; +use nym_api_requests::models::ConfigScoreDataResponse; use nym_contracts_common::ContractBuildInformation; -use nym_mixnet_contract_common::nym_node::Role; use nym_mixnet_contract_common::{ ConfigScoreParams, HistoricalNymNodeVersionEntry, Interval, NodeId, NymNodeDetails, - RewardedSet, RewardingParams, + RewardingParams, }; +use nym_topology::CachedEpochRewardedSet; use nym_validator_client::nyxd::AccountId; use std::collections::{HashMap, HashSet}; -#[derive(Default, Clone)] -pub(crate) struct CachedRewardedSet { - pub(crate) entry_gateways: HashSet, - - pub(crate) exit_gateways: HashSet, - - pub(crate) layer1: HashSet, - - pub(crate) layer2: HashSet, - - pub(crate) layer3: HashSet, - - pub(crate) standby: HashSet, -} - -impl From for CachedRewardedSet { - fn from(value: RewardedSet) -> Self { - CachedRewardedSet { - entry_gateways: value.entry_gateways.into_iter().collect(), - exit_gateways: value.exit_gateways.into_iter().collect(), - layer1: value.layer1.into_iter().collect(), - layer2: value.layer2.into_iter().collect(), - layer3: value.layer3.into_iter().collect(), - standby: value.standby.into_iter().collect(), - } - } -} - -impl From for RewardedSet { - fn from(value: CachedRewardedSet) -> Self { - RewardedSet { - entry_gateways: value.entry_gateways.into_iter().collect(), - exit_gateways: value.exit_gateways.into_iter().collect(), - layer1: value.layer1.into_iter().collect(), - layer2: value.layer2.into_iter().collect(), - layer3: value.layer3.into_iter().collect(), - standby: value.standby.into_iter().collect(), - } - } -} - -impl From<&CachedRewardedSet> for RewardedSetResponse { - fn from(value: &CachedRewardedSet) -> Self { - RewardedSetResponse { - entry_gateways: value.entry_gateways.iter().copied().collect(), - exit_gateways: value.exit_gateways.iter().copied().collect(), - layer1: value.layer1.iter().copied().collect(), - layer2: value.layer2.iter().copied().collect(), - layer3: value.layer3.iter().copied().collect(), - standby: value.standby.iter().copied().collect(), - } - } -} - -impl CachedRewardedSet { - pub(crate) fn role(&self, node_id: NodeId) -> Option { - if self.entry_gateways.contains(&node_id) { - Some(Role::EntryGateway) - } else if self.exit_gateways.contains(&node_id) { - Some(Role::ExitGateway) - } else if self.layer1.contains(&node_id) { - Some(Role::Layer1) - } else if self.layer2.contains(&node_id) { - Some(Role::Layer2) - } else if self.layer3.contains(&node_id) { - Some(Role::Layer3) - } else if self.standby.contains(&node_id) { - Some(Role::Standby) - } else { - None - } - } - - pub fn try_get_mix_layer(&self, node_id: &NodeId) -> Option { - if self.layer1.contains(node_id) { - Some(1) - } else if self.layer2.contains(node_id) { - Some(2) - } else if self.layer3.contains(node_id) { - Some(3) - } else { - None - } - } - - pub fn is_standby(&self, node_id: &NodeId) -> bool { - self.standby.contains(node_id) - } - - pub fn is_active_mixnode(&self, node_id: &NodeId) -> bool { - self.layer1.contains(node_id) - || self.layer2.contains(node_id) - || self.layer3.contains(node_id) - } - - #[allow(dead_code)] - pub(crate) fn gateways(&self) -> HashSet { - let mut gateways = - HashSet::with_capacity(self.entry_gateways.len() + self.exit_gateways.len()); - gateways.extend(&self.entry_gateways); - gateways.extend(&self.exit_gateways); - gateways - } - - pub(crate) fn active_mixnodes(&self) -> HashSet { - let mut mixnodes = - HashSet::with_capacity(self.layer1.len() + self.layer2.len() + self.layer3.len()); - mixnodes.extend(&self.layer1); - mixnodes.extend(&self.layer2); - mixnodes.extend(&self.layer3); - mixnodes - } -} - #[derive(Clone)] pub(crate) struct ConfigScoreData { pub(crate) config_score_params: ConfigScoreParams, @@ -150,7 +36,7 @@ pub(crate) struct ContractCacheData { pub(crate) legacy_mixnodes: Cache>, pub(crate) legacy_gateways: Cache>, pub(crate) nym_nodes: Cache>, - pub(crate) rewarded_set: Cache, + pub(crate) rewarded_set: Cache, // this purposely does not deal with nym-nodes as they don't have a concept of a blacklist. // instead clients are meant to be filtering out them themselves based on the provided scores. diff --git a/nym-api/src/nym_contract_cache/cache/mod.rs b/nym-api/src/nym_contract_cache/cache/mod.rs index 1738901550..b4c184d4b6 100644 --- a/nym-api/src/nym_contract_cache/cache/mod.rs +++ b/nym-api/src/nym_contract_cache/cache/mod.rs @@ -11,9 +11,10 @@ use nym_api_requests::legacy::{ use nym_api_requests::models::MixnodeStatus; use nym_crypto::asymmetric::ed25519; use nym_mixnet_contract_common::{ - ConfigScoreParams, HistoricalNymNodeVersionEntry, Interval, NodeId, NymNodeDetails, - RewardedSet, RewardingParams, + ConfigScoreParams, EpochRewardedSet, HistoricalNymNodeVersionEntry, Interval, NodeId, + NymNodeDetails, RewardingParams, }; +use nym_topology::CachedEpochRewardedSet; use std::{ collections::HashSet, sync::{ @@ -29,8 +30,6 @@ use tracing::{debug, error}; pub(crate) mod data; pub(crate) mod refresher; -pub(crate) use self::data::CachedRewardedSet; - const CACHE_TIMEOUT_MS: u64 = 100; #[derive(Clone)] @@ -80,7 +79,7 @@ impl NymContractCache { mixnodes: Vec, gateways: Vec, nym_nodes: Vec, - rewarded_set: RewardedSet, + rewarded_set: EpochRewardedSet, config_score_params: ConfigScoreParams, nym_node_version_history: Vec, rewarding_params: RewardingParams, @@ -264,11 +263,11 @@ impl NymContractCache { .into_inner() } - pub async fn rewarded_set(&self) -> Option>> { + pub async fn rewarded_set(&self) -> Option>> { self.get(|cache| &cache.rewarded_set).await } - pub async fn rewarded_set_owned(&self) -> Cache { + pub async fn rewarded_set_owned(&self) -> Cache { self.get_owned(|cache| cache.rewarded_set.clone_cache()) .await .unwrap_or_default() diff --git a/nym-api/src/nym_contract_cache/cache/refresher.rs b/nym-api/src/nym_contract_cache/cache/refresher.rs index 6681ac669e..e4f57dae63 100644 --- a/nym-api/src/nym_contract_cache/cache/refresher.rs +++ b/nym-api/src/nym_contract_cache/cache/refresher.rs @@ -9,7 +9,7 @@ use anyhow::Result; use nym_api_requests::legacy::{ LegacyGatewayBondWithId, LegacyMixNodeBondWithLayer, LegacyMixNodeDetailsWithLayer, }; -use nym_mixnet_contract_common::{LegacyMixLayer, RewardedSet}; +use nym_mixnet_contract_common::{EpochRewardedSet, LegacyMixLayer}; use nym_task::TaskClient; use nym_validator_client::nyxd::contract_traits::{ MixnetQueryClient, NymContractsProvider, VestingQueryClient, @@ -141,9 +141,21 @@ impl NymContractCacheRefresher { } let rewarded_set = self.get_rewarded_set().await; - let layer1 = rewarded_set.layer1.iter().collect::>(); - let layer2 = rewarded_set.layer2.iter().collect::>(); - let layer3 = rewarded_set.layer3.iter().collect::>(); + let layer1 = rewarded_set + .assignment + .layer1 + .iter() + .collect::>(); + let layer2 = rewarded_set + .assignment + .layer2 + .iter() + .collect::>(); + let layer3 = rewarded_set + .assignment + .layer3 + .iter() + .collect::>(); let layer_choices = [ LegacyMixLayer::One, @@ -209,7 +221,7 @@ impl NymContractCacheRefresher { Ok(()) } - async fn get_rewarded_set(&self) -> RewardedSet { + async fn get_rewarded_set(&self) -> EpochRewardedSet { self.nyxd_client .get_rewarded_set_nodes() .await diff --git a/nym-api/src/nym_nodes/handlers/mod.rs b/nym-api/src/nym_nodes/handlers/mod.rs index bae793b15a..6474088b3b 100644 --- a/nym-api/src/nym_nodes/handlers/mod.rs +++ b/nym-api/src/nym_nodes/handlers/mod.rs @@ -19,7 +19,6 @@ use nym_mixnet_contract_common::reward_params::Performance; use nym_mixnet_contract_common::NymNodeDetails; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use std::ops::Deref; use std::time::Duration; use time::{Date, OffsetDateTime}; use utoipa::{IntoParams, ToSchema}; @@ -62,9 +61,13 @@ async fn rewarded_set(State(state): State) -> AxumResult( - rewarded_set: &CachedRewardedSet, + rewarded_set: &CachedEpochRewardedSet, nym_nodes_subset: NI, annotations: &HashMap, active_only: bool, @@ -55,7 +55,7 @@ where /// Given all relevant caches, add appropriate legacy nodes to the part of the response fn add_legacy( nodes: &mut Vec, - rewarded_set: &CachedRewardedSet, + rewarded_set: &CachedEpochRewardedSet, describe_cache: &DescribedNodes, annotated_legacy_nodes: &HashMap, active_only: bool, diff --git a/nym-api/src/support/http/state.rs b/nym-api/src/support/http/state.rs index 6c277441ea..9d55ab3bf1 100644 --- a/nym-api/src/support/http/state.rs +++ b/nym-api/src/support/http/state.rs @@ -8,7 +8,7 @@ use crate::node_describe_cache::DescribedNodes; use crate::node_status_api::handlers::unstable; use crate::node_status_api::models::AxumErrorResponse; use crate::node_status_api::NodeStatusCache; -use crate::nym_contract_cache::cache::{CachedRewardedSet, NymContractCache}; +use crate::nym_contract_cache::cache::NymContractCache; use crate::status::ApiStatusState; use crate::support::caching::cache::SharedCache; use crate::support::caching::Cache; @@ -17,6 +17,7 @@ use axum::extract::FromRef; use nym_api_requests::models::{GatewayBondAnnotated, MixNodeBondAnnotated, NodeAnnotation}; use nym_mixnet_contract_common::NodeId; use nym_task::TaskManager; +use nym_topology::CachedEpochRewardedSet; use std::collections::HashMap; use std::sync::Arc; use time::OffsetDateTime; @@ -166,7 +167,7 @@ impl AppState { pub(crate) async fn rewarded_set( &self, - ) -> Result>, AxumErrorResponse> { + ) -> Result>, AxumErrorResponse> { self.nym_contract_cache() .rewarded_set() .await diff --git a/nym-api/src/support/legacy_helpers.rs b/nym-api/src/support/legacy_helpers.rs index d5135c7643..abc95b1b6d 100644 --- a/nym-api/src/support/legacy_helpers.rs +++ b/nym-api/src/support/legacy_helpers.rs @@ -10,6 +10,25 @@ use nym_mixnet_contract_common::{ Gateway, GatewayBond, LegacyMixLayer, MixNode, MixNodeBond, NymNodeDetails, }; use rand::prelude::SliceRandom; +use std::net::{IpAddr, ToSocketAddrs}; +use std::str::FromStr; + +pub(crate) fn legacy_host_to_ips_and_hostname( + legacy: &str, +) -> Option<(Vec, Option)> { + if let Ok(ip) = IpAddr::from_str(legacy) { + return Some((vec![ip], None)); + } + + let resolved = (legacy, 1789u16) + .to_socket_addrs() + .ok()? + .collect::>(); + Some(( + resolved.into_iter().map(|s| s.ip()).collect(), + Some(legacy.to_string()), + )) +} pub(crate) fn to_legacy_mixnode( nym_node: &NymNodeDetails, diff --git a/nym-api/src/support/nyxd/mod.rs b/nym-api/src/support/nyxd/mod.rs index cb97f75834..180d1810e4 100644 --- a/nym-api/src/support/nyxd/mod.rs +++ b/nym-api/src/support/nyxd/mod.rs @@ -29,8 +29,9 @@ use nym_mixnet_contract_common::mixnode::MixNodeDetails; use nym_mixnet_contract_common::nym_node::Role; use nym_mixnet_contract_common::reward_params::RewardingParams; use nym_mixnet_contract_common::{ - ConfigScoreParams, CurrentIntervalResponse, EpochStatus, ExecuteMsg, GatewayBond, - HistoricalNymNodeVersionEntry, IdentityKey, NymNodeDetails, RewardedSet, RoleAssignment, + ConfigScoreParams, CurrentIntervalResponse, EpochRewardedSet, EpochStatus, ExecuteMsg, + GatewayBond, HistoricalNymNodeVersionEntry, IdentityKey, NymNodeDetails, RewardedSet, + RoleAssignment, }; use nym_validator_client::coconut::EcashApiError; use nym_validator_client::nyxd::contract_traits::mixnet_query_client::MixnetQueryClientExt; @@ -252,7 +253,7 @@ impl Client { nyxd_query!(self, get_rewarding_parameters().await) } - pub(crate) async fn get_rewarded_set_nodes(&self) -> Result { + pub(crate) async fn get_rewarded_set_nodes(&self) -> Result { nyxd_query!(self, get_rewarded_set().await) } diff --git a/nym-api/src/support/storage/mod.rs b/nym-api/src/support/storage/mod.rs index dc5e4c2560..e1d6b729c7 100644 --- a/nym-api/src/support/storage/mod.rs +++ b/nym-api/src/support/storage/mod.rs @@ -623,21 +623,21 @@ impl NymApiStorage { // we MUST have those entries in the database, otherwise the route wouldn't have been chosen // in the first place let layer1_mix_db_id = self - .get_mixnode_database_id(test_route.layer_one_mix().mix_id) + .get_mixnode_database_id(test_route.layer_one_mix().node_id) .await? .ok_or_else(|| NymApiStorageError::DatabaseInconsistency { reason: format!("could not get db id for layer1 mixnode from network monitor run {monitor_run_db_id}"), })?; let layer2_mix_db_id = self - .get_mixnode_database_id(test_route.layer_two_mix().mix_id) + .get_mixnode_database_id(test_route.layer_two_mix().node_id) .await? .ok_or_else(|| NymApiStorageError::DatabaseInconsistency { reason: format!("could not get db id for layer2 mixnode from network monitor run {monitor_run_db_id}"), })?; let layer3_mix_db_id = self - .get_mixnode_database_id(test_route.layer_three_mix().mix_id) + .get_mixnode_database_id(test_route.layer_three_mix().node_id) .await? .ok_or_else(|| NymApiStorageError::DatabaseInconsistency { reason: format!("could not get db id for layer3 mixnode from network monitor run {monitor_run_db_id}"), diff --git a/nym-network-monitor/src/accounting.rs b/nym-network-monitor/src/accounting.rs index 2fcacee75a..df049b4ab6 100644 --- a/nym-network-monitor/src/accounting.rs +++ b/nym-network-monitor/src/accounting.rs @@ -7,7 +7,7 @@ use anyhow::Result; use futures::{pin_mut, stream::FuturesUnordered, StreamExt}; use log::{debug, info}; use nym_sphinx::chunking::{monitoring, SentFragment}; -use nym_topology::{gateway, mix, NymTopology}; +use nym_topology::{NymRouteProvider, RoutingNode}; use nym_types::monitoring::{MonitorMessage, NodeResult}; use nym_validator_client::nym_api::routes::{API_VERSION, STATUS, SUBMIT_GATEWAY, SUBMIT_NODE}; use rand::SeedableRng; @@ -19,8 +19,8 @@ use utoipa::ToSchema; use crate::{NYM_API_URL, PRIVATE_KEY, TOPOLOGY}; struct HydratedRoute { - mix_nodes: Vec, - gateway_node: gateway::LegacyNode, + mix_nodes: Vec, + gateway_node: RoutingNode, } #[derive(Serialize, Deserialize, Debug, Default, ToSchema)] @@ -61,12 +61,12 @@ pub struct NetworkAccount { gateway_stats: HashMap, incomplete_routes: Vec>, #[serde(skip)] - topology: NymTopology, + topology: NymRouteProvider, tested_nodes: HashSet, #[serde(skip)] - mix_details: HashMap, + mix_details: HashMap, #[serde(skip)] - gateway_details: HashMap, + gateway_details: HashMap, } impl NetworkAccount { @@ -126,7 +126,7 @@ impl NetworkAccount { fn new() -> Self { let topology = TOPOLOGY.get().expect("Topology not set yet!").clone(); let mut account = NetworkAccount { - topology, + topology: NymRouteProvider::new(topology, true), ..Default::default() }; for fragment_set in monitoring::FRAGMENTS_SENT.iter() { @@ -162,14 +162,12 @@ impl NetworkAccount { fn hydrate_route(&self, fragment: SentFragment) -> anyhow::Result { let mut rng = ChaCha8Rng::seed_from_u64(fragment.seed() as u64); - let (nodes, gw) = self.topology.random_path_to_gateway( - &mut rng, - fragment.mixnet_params().hops(), - fragment.mixnet_params().destination(), - )?; + let (nodes, gw) = self + .topology + .random_path_to_egress(&mut rng, fragment.mixnet_params().destination())?; Ok(HydratedRoute { - mix_nodes: nodes, - gateway_node: gw, + mix_nodes: nodes.into_iter().cloned().collect(), + gateway_node: gw.clone(), }) } @@ -181,11 +179,11 @@ impl NetworkAccount { let mix_ids = route .mix_nodes .iter() - .map(|n| n.mix_id) + .map(|n| n.node_id) .collect::>(); self.tested_nodes.extend(&mix_ids); self.mix_details - .extend(route.mix_nodes.iter().map(|n| (n.mix_id, n.clone()))); + .extend(route.mix_nodes.iter().map(|n| (n.node_id, n.clone()))); let gateway_stats_entry = self .gateway_stats .entry(route.gateway_node.identity_key.to_base58_string()) diff --git a/nym-network-monitor/src/main.rs b/nym-network-monitor/src/main.rs index 89fee6df6f..26476813a4 100644 --- a/nym-network-monitor/src/main.rs +++ b/nym-network-monitor/src/main.rs @@ -3,6 +3,7 @@ use accounting::submit_metrics; use anyhow::Result; use clap::Parser; use log::{error, info, warn}; +use nym_bin_common::bin_info; use nym_client_core::ForgetMe; use nym_crypto::asymmetric::ed25519::PrivateKey; use nym_network_defaults::setup_env; @@ -158,6 +159,26 @@ fn generate_key_pair() -> Result<()> { Ok(()) } +async fn nym_topology_from_env() -> anyhow::Result { + let api_url = std::env::var(NYM_API)?; + + info!("Generating topology from {api_url}"); + let client = nym_validator_client::client::NymApiClient::new_with_user_agent( + api_url.parse()?, + bin_info!(), + ); + + let rewarded_set = client.get_current_rewarded_set().await?; + + // just get all nodes to make our lives easier because it's just one query for the whole duration of the monitor (?) + let nodes = client.get_all_basic_nodes().await?; + + let mut topology = NymTopology::new_empty(rewarded_set); + topology.add_skimmed_nodes(&nodes); + + Ok(topology) +} + #[tokio::main] async fn main() -> Result<()> { nym_bin_common::logging::setup_logging(); @@ -187,7 +208,7 @@ async fn main() -> Result<()> { .set(if let Some(topology_file) = args.topology { NymTopology::new_from_file(topology_file)? } else { - NymTopology::new_from_env().await? + nym_topology_from_env().await? }) .ok(); diff --git a/nym-node/src/node/mod.rs b/nym-node/src/node/mod.rs index 1311bb4f57..86087582e1 100644 --- a/nym-node/src/node/mod.rs +++ b/nym-node/src/node/mod.rs @@ -41,7 +41,6 @@ use nym_node_requests::api::v1::node::models::{AnnouncePorts, NodeDescription}; use nym_sphinx_acknowledgements::AckKey; use nym_sphinx_addressing::Recipient; use nym_task::{TaskClient, TaskManager}; -use nym_topology::NetworkAddress; use nym_validator_client::client::NymApiClientExt; use nym_validator_client::models::NodeRefreshBody; use nym_validator_client::{NymApiClient, UserAgent}; @@ -535,8 +534,10 @@ impl NymNode { )) } - fn as_gateway_topology_node(&self) -> Result { - let Some(ip) = self.config.host.public_ips.first() else { + fn as_gateway_topology_node(&self) -> Result { + let ip_addresses = self.config.host.public_ips.clone(); + + let Some(ip) = ip_addresses.first() else { return Err(NymNodeError::NoPublicIps); }; @@ -553,15 +554,22 @@ impl NymNode { .announce_ws_port .unwrap_or(self.config.gateway_tasks.bind_address.port()); - Ok(nym_topology::gateway::LegacyNode { + Ok(nym_topology::RoutingNode { node_id: u32::MAX, mix_host, - host: NetworkAddress::IpAddr(*ip), - clients_ws_port, - clients_wss_port: self.config.gateway_tasks.announce_wss_port, + entry: Some(nym_topology::EntryDetails { + ip_addresses, + clients_ws_port, + hostname: self.config.host.hostname.clone(), + clients_wss_port: self.config.gateway_tasks.announce_wss_port, + }), sphinx_key: *self.x25519_sphinx_key(), identity_key: *self.ed25519_identity_key(), - version: env!("CARGO_PKG_VERSION").into(), + supported_roles: nym_topology::SupportedRoles { + mixnode: false, + mixnet_entry: true, + mixnet_exit: true, + }, }) } diff --git a/nym-node/src/node/shared_topology.rs b/nym-node/src/node/shared_topology.rs index 594fb5f4fb..b65f1e1a5b 100644 --- a/nym-node/src/node/shared_topology.rs +++ b/nym-node/src/node/shared_topology.rs @@ -3,7 +3,8 @@ use async_trait::async_trait; use nym_gateway::node::{NymApiTopologyProvider, NymApiTopologyProviderConfig, UserAgent}; -use nym_topology::{gateway, NymTopology, TopologyProvider}; +use nym_topology::node::RoutingNode; +use nym_topology::{NymTopology, Role, TopologyProvider}; use std::sync::Arc; use std::time::Duration; use time::OffsetDateTime; @@ -20,7 +21,7 @@ pub struct NymNodeTopologyProvider { impl NymNodeTopologyProvider { pub fn new( - gateway_node: gateway::LegacyNode, + gateway_node: RoutingNode, cache_ttl: Duration, user_agent: UserAgent, nym_api_url: Vec, @@ -31,6 +32,8 @@ impl NymNodeTopologyProvider { NymApiTopologyProviderConfig { min_mixnode_performance: 50, min_gateway_performance: 0, + use_extended_topology: false, + ignore_egress_epoch_role: true, }, nym_api_url, Some(user_agent), @@ -49,7 +52,7 @@ struct NymNodeTopologyProviderInner { cache_ttl: Duration, cached_at: OffsetDateTime, cached: Option, - gateway_node: gateway::LegacyNode, + gateway_node: RoutingNode, } impl NymNodeTopologyProviderInner { @@ -67,13 +70,14 @@ impl NymNodeTopologyProviderInner { let updated_cache = match self.inner.get_new_topology().await { None => None, Some(mut base) => { - if !base.gateway_exists(&self.gateway_node.identity_key) { + if !base.has_node_details(self.gateway_node.node_id) { debug!( "{} didn't exist in topology. inserting it.", self.gateway_node.identity_key ); - base.insert_gateway(self.gateway_node.clone()); + base.insert_node_details(self.gateway_node.clone()); } + base.force_set_active(self.gateway_node.node_id, Role::EntryGateway); Some(base) } }; diff --git a/sdk/rust/nym-sdk/examples/custom_topology_provider.rs b/sdk/rust/nym-sdk/examples/custom_topology_provider.rs index df5f4ef5a6..7cd5a6f50c 100644 --- a/sdk/rust/nym-sdk/examples/custom_topology_provider.rs +++ b/sdk/rust/nym-sdk/examples/custom_topology_provider.rs @@ -4,7 +4,7 @@ use nym_sdk::mixnet; use nym_sdk::mixnet::MixnetMessageSender; use nym_topology::provider_trait::{async_trait, TopologyProvider}; -use nym_topology::{nym_topology_from_basic_info, NymTopology}; +use nym_topology::NymTopology; use url::Url; struct MyTopologyProvider { @@ -19,6 +19,14 @@ impl MyTopologyProvider { } async fn get_topology(&self) -> NymTopology { + let rewarded_set = self + .validator_client + .get_current_rewarded_set() + .await + .unwrap(); + + let mut base_topology = NymTopology::new_empty(rewarded_set); + let mixnodes = self .validator_client .get_all_basic_active_mixing_assigned_nodes() @@ -39,7 +47,9 @@ impl MyTopologyProvider { .await .unwrap(); - nym_topology_from_basic_info(&filtered_mixnodes, &gateways) + base_topology.add_skimmed_nodes(&filtered_mixnodes); + base_topology.add_skimmed_nodes(&gateways); + base_topology } } diff --git a/sdk/rust/nym-sdk/examples/geo_topology_provider.rs b/sdk/rust/nym-sdk/examples/geo_topology_provider.rs deleted file mode 100644 index 3b327d7377..0000000000 --- a/sdk/rust/nym-sdk/examples/geo_topology_provider.rs +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2023 - Nym Technologies SA -// SPDX-License-Identifier: Apache-2.0 - -use nym_sdk::mixnet; -use nym_sdk::mixnet::MixnetMessageSender; - -#[tokio::main] -async fn main() { - nym_bin_common::logging::setup_logging(); - - let nym_api = "https://validator.nymtech.net/api/".parse().unwrap(); - - // We can group on something which is to a first approximation a continent. - let group_by = mixnet::GroupBy::CountryGroup(mixnet::CountryGroup::Europe); - - // ... or on a nym-address. This means we use the geo location of the gateway that the - // nym-address is connected to. - //let group_by = GroupBy::NymAddress("id.enc@gateway".parse().unwrap()); - - let geo_topology_provider = mixnet::GeoAwareTopologyProvider::new( - vec![nym_api], - // We filter on the version of the mixnodes. Be prepared to manually update - // this to keep this example working, as we can't (currently) fetch to current - // latest version. - group_by, - ); - - // Passing no config makes the client fire up an ephemeral session and figure things out on its own - let mut client = mixnet::MixnetClientBuilder::new_ephemeral() - .custom_topology_provider(Box::new(geo_topology_provider)) - .build() - .unwrap() - .connect_to_mixnet() - .await - .unwrap(); - - let our_address = client.nym_address(); - println!("Our client nym address is: {our_address}"); - - // Send a message through the mixnet to ourselves - client - .send_plain_message(*our_address, "hello there") - .await - .unwrap(); - - println!("Waiting for message (ctrl-c to exit)"); - client - .on_messages(|msg| println!("Received: {}", String::from_utf8_lossy(&msg.message))) - .await; -} diff --git a/sdk/rust/nym-sdk/examples/manually_overwrite_topology.rs b/sdk/rust/nym-sdk/examples/manually_overwrite_topology.rs index 67f4a773a7..38c2b3d7f3 100644 --- a/sdk/rust/nym-sdk/examples/manually_overwrite_topology.rs +++ b/sdk/rust/nym-sdk/examples/manually_overwrite_topology.rs @@ -3,9 +3,7 @@ use nym_sdk::mixnet; use nym_sdk::mixnet::MixnetMessageSender; -use nym_topology::mix::LegacyMixLayer; -use nym_topology::{mix, NymTopology}; -use std::collections::BTreeMap; +use nym_topology::{NymTopology, RoutingNode, SupportedRoles}; #[tokio::main] async fn main() { @@ -13,63 +11,74 @@ async fn main() { // Passing no config makes the client fire up an ephemeral session and figure shit out on its own let mut client = mixnet::MixnetClient::connect_new().await.unwrap(); - let starting_topology = client.read_current_topology().await.unwrap(); + let starting_topology = client.read_current_route_provider().await.unwrap().clone(); // but we don't like our default topology, we want to use only those very specific, hardcoded, nodes: - let mut mixnodes = BTreeMap::new(); - mixnodes.insert( - 1, - vec![mix::LegacyNode { - mix_id: 63, - host: "172.105.92.48".parse().unwrap(), + let nodes = vec![ + RoutingNode { + node_id: 63, mix_host: "172.105.92.48:1789".parse().unwrap(), + entry: None, identity_key: "GLdR2NRVZBiCoCbv4fNqt9wUJZAnNjGXHkx3TjVAUzrK" .parse() .unwrap(), sphinx_key: "CBmYewWf43iarBq349KhbfYMc9ys2ebXWd4Vp4CLQ5Rq" .parse() .unwrap(), - layer: LegacyMixLayer::One, - version: "1.1.0".into(), - }], - ); - mixnodes.insert( - 2, - vec![mix::LegacyNode { - mix_id: 23, - host: "178.79.143.65".parse().unwrap(), + supported_roles: SupportedRoles { + mixnode: true, + mixnet_entry: false, + mixnet_exit: false, + }, + }, + RoutingNode { + node_id: 23, mix_host: "178.79.143.65:1789".parse().unwrap(), + entry: None, identity_key: "4Yr4qmEHd9sgsuQ83191FR2hD88RfsbMmB4tzhhZWriz" .parse() .unwrap(), sphinx_key: "8ndjk5oZ6HxUZNScLJJ7hk39XtUqGexdKgW7hSX6kpWG" .parse() .unwrap(), - layer: LegacyMixLayer::Two, - version: "1.1.0".into(), - }], - ); - mixnodes.insert( - 3, - vec![mix::LegacyNode { - mix_id: 66, - host: "139.162.247.97".parse().unwrap(), + supported_roles: SupportedRoles { + mixnode: true, + mixnet_entry: false, + mixnet_exit: false, + }, + }, + RoutingNode { + node_id: 66, mix_host: "139.162.247.97:1789".parse().unwrap(), + entry: None, identity_key: "66UngapebhJRni3Nj52EW1qcNsWYiuonjkWJzHFsmyYY" .parse() .unwrap(), sphinx_key: "7KyZh8Z8KxuVunqytAJ2eXFuZkCS7BLTZSzujHJZsGa2" .parse() .unwrap(), - layer: LegacyMixLayer::Three, - version: "1.1.0".into(), - }], - ); + supported_roles: SupportedRoles { + mixnode: true, + mixnet_entry: false, + mixnet_exit: false, + }, + }, + ]; + + // make sure our custom nodes are in the fake rewarded set (so they'd be used by default by the client) + let mut rewarded_set = starting_topology.topology.rewarded_set().clone(); + rewarded_set.layer1.insert(nodes[0].node_id); + rewarded_set.layer2.insert(nodes[1].node_id); + rewarded_set.layer3.insert(nodes[2].node_id); // but we like the available gateways, so keep using them! // (we like them because the author of this example is too lazy to use the same hardcoded gateway // during client initialisation to make sure we are able to send to ourselves : ) ) - let custom_topology = NymTopology::new(mixnodes, starting_topology.gateways().to_vec()); + let gateways = starting_topology.topology.entry_capable_nodes(); + + let mut custom_topology = NymTopology::new_empty(rewarded_set); + custom_topology.add_routing_nodes(nodes); + custom_topology.add_routing_nodes(gateways); client.manually_overwrite_topology(custom_topology).await; diff --git a/sdk/rust/nym-sdk/src/lib.rs b/sdk/rust/nym-sdk/src/lib.rs index 5b8afd246c..22e954e926 100644 --- a/sdk/rust/nym-sdk/src/lib.rs +++ b/sdk/rust/nym-sdk/src/lib.rs @@ -10,6 +10,7 @@ pub mod mixnet; pub mod tcp_proxy; pub use error::{Error, Result}; +#[allow(deprecated)] pub use nym_client_core::{ client::{ mix_traffic::transceiver::*, diff --git a/sdk/rust/nym-sdk/src/mixnet.rs b/sdk/rust/nym-sdk/src/mixnet.rs index f8c226a7e7..6fc8cae2dc 100644 --- a/sdk/rust/nym-sdk/src/mixnet.rs +++ b/sdk/rust/nym-sdk/src/mixnet.rs @@ -42,6 +42,7 @@ pub use client::{DisconnectedMixnetClient, IncludedSurbs, MixnetClientBuilder}; pub use config::Config; pub use native_client::MixnetClient; pub use native_client::MixnetClientSender; +#[allow(deprecated)] pub use nym_client_core::{ client::{ base_client::storage::{ diff --git a/sdk/rust/nym-sdk/src/mixnet/client.rs b/sdk/rust/nym-sdk/src/mixnet/client.rs index 249bbd182c..b5eff8eba3 100644 --- a/sdk/rust/nym-sdk/src/mixnet/client.rs +++ b/sdk/rust/nym-sdk/src/mixnet/client.rs @@ -180,6 +180,18 @@ where self } + #[must_use] + pub fn with_extended_topology(mut self, use_extended_topology: bool) -> Self { + self.config.debug_config.topology.use_extended_topology = use_extended_topology; + self + } + + #[must_use] + pub fn with_ignore_epoch_roles(mut self, ignore_epoch_roles: bool) -> Self { + self.config.debug_config.topology.ignore_egress_epoch_role = ignore_epoch_roles; + self + } + /// Use a specific network instead of the default (mainnet) one. #[must_use] pub fn network_details(mut self, network_details: NymNetworkDetails) -> Self { diff --git a/sdk/rust/nym-sdk/src/mixnet/native_client.rs b/sdk/rust/nym-sdk/src/mixnet/native_client.rs index 3da173a605..14b537753e 100644 --- a/sdk/rust/nym-sdk/src/mixnet/native_client.rs +++ b/sdk/rust/nym-sdk/src/mixnet/native_client.rs @@ -18,10 +18,11 @@ use nym_task::{ connections::{ConnectionCommandSender, LaneQueueLengths}, TaskHandle, }; -use nym_topology::NymTopology; +use nym_topology::{NymRouteProvider, NymTopology}; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; +use tokio::sync::RwLockReadGuard; /// Client connected to the Nym mixnet. pub struct MixnetClient { @@ -159,8 +160,11 @@ impl MixnetClient { } /// Gets the value of the currently used network topology. - pub async fn read_current_topology(&self) -> Option { - self.client_state.topology_accessor.current_topology().await + pub async fn read_current_route_provider(&self) -> Option> { + self.client_state + .topology_accessor + .current_route_provider() + .await } /// Restore default topology refreshing behaviour of this client. diff --git a/sdk/rust/nym-sdk/src/mixnet/socks5_client.rs b/sdk/rust/nym-sdk/src/mixnet/socks5_client.rs index 6bc2a46a06..6cbf837c29 100644 --- a/sdk/rust/nym-sdk/src/mixnet/socks5_client.rs +++ b/sdk/rust/nym-sdk/src/mixnet/socks5_client.rs @@ -74,11 +74,6 @@ impl Socks5MixnetClient { .await } - /// Gets the value of the currently used network topology. - pub async fn read_current_topology(&self) -> Option { - self.client_state.topology_accessor.current_topology().await - } - /// Restore default topology refreshing behaviour of this client. pub fn restore_automatic_topology_refreshing(&self) { self.client_state.topology_accessor.release_manual_control() diff --git a/service-providers/ip-packet-router/src/connected_client_handler.rs b/service-providers/ip-packet-router/src/connected_client_handler.rs index 8754fb8394..a1904eba5f 100644 --- a/service-providers/ip-packet-router/src/connected_client_handler.rs +++ b/service-providers/ip-packet-router/src/connected_client_handler.rs @@ -22,9 +22,6 @@ pub(crate) struct ConnectedClientHandler { // The address of the client that this handler is connected to nym_address: Recipient, - // The number of hops the packet should take before reaching the client - mix_hops: Option, - // Channel to receive packets from the tun_listener forward_from_tun_rx: tokio::sync::mpsc::UnboundedReceiver>, @@ -47,7 +44,6 @@ pub(crate) struct ConnectedClientHandler { impl ConnectedClientHandler { pub(crate) fn start( reply_to: Recipient, - reply_to_hops: Option, buffer_timeout: std::time::Duration, client_version: SupportedClientVersion, mixnet_client_sender: nym_sdk::mixnet::MixnetClientSender, @@ -67,7 +63,6 @@ impl ConnectedClientHandler { let connected_client_handler = ConnectedClientHandler { nym_address: reply_to, - mix_hops: reply_to_hops, forward_from_tun_rx, mixnet_client_sender, close_rx, @@ -98,7 +93,7 @@ impl ConnectedClientHandler { } .map_err(|err| IpPacketRouterError::FailedToSerializeResponsePacket { source: err })?; - let input_message = create_input_message(self.nym_address, response_packet, self.mix_hops); + let input_message = create_input_message(self.nym_address, response_packet); self.mixnet_client_sender .send(input_message) diff --git a/service-providers/ip-packet-router/src/lib.rs b/service-providers/ip-packet-router/src/lib.rs index 3d464b05c3..cb4a84ec65 100644 --- a/service-providers/ip-packet-router/src/lib.rs +++ b/service-providers/ip-packet-router/src/lib.rs @@ -11,6 +11,7 @@ pub mod error; mod ip_packet_router; mod mixnet_client; mod mixnet_listener; +pub(crate) mod non_linux_dummy; pub mod request_filter; mod tun_listener; mod util; diff --git a/service-providers/ip-packet-router/src/mixnet_listener.rs b/service-providers/ip-packet-router/src/mixnet_listener.rs index 914cd03b36..6e6980ccd4 100644 --- a/service-providers/ip-packet-router/src/mixnet_listener.rs +++ b/service-providers/ip-packet-router/src/mixnet_listener.rs @@ -1,7 +1,3 @@ -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; -use std::sync::Arc; -use std::{collections::HashMap, net::SocketAddr}; - use bytes::{Bytes, BytesMut}; use futures::StreamExt; use nym_ip_packet_requests::v7::response::{ @@ -23,8 +19,10 @@ use nym_ip_packet_requests::{ use nym_sdk::mixnet::{MixnetMessageSender, Recipient}; use nym_sphinx::receiver::ReconstructedMessage; use nym_task::TaskHandle; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; +use std::sync::Arc; +use std::{collections::HashMap, net::SocketAddr}; use tap::TapFallible; -#[cfg(target_os = "linux")] use tokio::io::AsyncWriteExt; use tokio::sync::RwLock; use tokio_util::codec::Decoder; @@ -95,6 +93,7 @@ impl ConnectedClients { }) } + #[allow(dead_code)] fn lookup_client_from_nym_address(&self, nym_address: &Recipient) -> Option<&ConnectedClient> { self.clients_ipv4_mapping .iter() @@ -111,7 +110,6 @@ impl ConnectedClients { &mut self, ips: IpPair, nym_address: Recipient, - mix_hops: Option, forward_from_tun_tx: tokio::sync::mpsc::UnboundedSender>, close_tx: tokio::sync::oneshot::Sender<()>, handle: tokio::task::JoinHandle<()>, @@ -121,7 +119,6 @@ impl ConnectedClients { let client = ConnectedClient { nym_address, ipv6: ips.ipv6, - mix_hops, last_activity: Arc::new(RwLock::new(std::time::Instant::now())), _close_tx: Arc::new(CloseTx { nym_address, @@ -236,9 +233,6 @@ pub(crate) struct ConnectedClient { // The assigned IPv6 address of this client pub(crate) ipv6: Ipv6Addr, - // Number of mix node hops that the client has requested to use - pub(crate) mix_hops: Option, - // Keep track of last activity so we can disconnect inactive clients pub(crate) last_activity: Arc>, @@ -390,7 +384,13 @@ impl Response { } } +#[cfg(not(target_os = "linux"))] +type TunDevice = crate::non_linux_dummy::DummyDevice; + #[cfg(target_os = "linux")] +type TunDevice = tokio_tun::Tun; + +// #[cfg(target_os = "linux")] pub(crate) struct MixnetListener { // The configuration for the mixnet listener pub(crate) _config: Config, @@ -399,7 +399,7 @@ pub(crate) struct MixnetListener { pub(crate) request_filter: request_filter::RequestFilter, // The TUN device that we use to send and receive packets from the internet - pub(crate) tun_writer: tokio::io::WriteHalf, + pub(crate) tun_writer: tokio::io::WriteHalf, // The mixnet client that we use to send and receive packets from the mixnet pub(crate) mixnet_client: nym_sdk::mixnet::MixnetClient, @@ -412,7 +412,7 @@ pub(crate) struct MixnetListener { pub(crate) connected_clients: ConnectedClients, } -#[cfg(target_os = "linux")] +// #[cfg(target_os = "linux")] impl MixnetListener { // Receving a static connect request from a client with an IP provided that we assign to them, // if it's available. If it's not available, we send a failure response. @@ -429,7 +429,6 @@ impl MixnetListener { let request_id = connect_request.request_id; let requested_ips = connect_request.ips; let reply_to = connect_request.reply_to; - let reply_to_hops = connect_request.reply_to_hops; // TODO: add to connect request let buffer_timeout = nym_ip_packet_requests::codec::BUFFER_TIMEOUT; // TODO: ignoring reply_to_avg_mix_delays for now @@ -464,7 +463,6 @@ impl MixnetListener { let (forward_from_tun_tx, close_tx, handle) = connected_client_handler::ConnectedClientHandler::start( reply_to, - reply_to_hops, buffer_timeout, client_version, self.mixnet_client.split_sender(), @@ -474,7 +472,6 @@ impl MixnetListener { self.connected_clients.connect( requested_ips, reply_to, - reply_to_hops, forward_from_tun_tx, close_tx, handle, @@ -518,7 +515,6 @@ impl MixnetListener { let request_id = connect_request.request_id; let reply_to = connect_request.reply_to; - let reply_to_hops = connect_request.reply_to_hops; // TODO: add to connect request let buffer_timeout = nym_ip_packet_requests::codec::BUFFER_TIMEOUT; // TODO: ignoring reply_to_avg_mix_delays for now @@ -559,21 +555,14 @@ impl MixnetListener { let (forward_from_tun_tx, close_tx, handle) = connected_client_handler::ConnectedClientHandler::start( reply_to, - reply_to_hops, buffer_timeout, client_version, self.mixnet_client.split_sender(), ); // Register the new client in the set of connected clients - self.connected_clients.connect( - new_ips, - reply_to, - reply_to_hops, - forward_from_tun_tx, - close_tx, - handle, - ); + self.connected_clients + .connect(new_ips, reply_to, forward_from_tun_tx, close_tx, handle); Ok(Some(Response::new_dynamic_connect_success( request_id, reply_to, @@ -757,17 +746,7 @@ impl MixnetListener { let response_packet = response.to_bytes()?; - // We could avoid this lookup if we check this when we create the response. - let mix_hops = if let Some(c) = self - .connected_clients - .lookup_client_from_nym_address(recipient) - { - c.mix_hops - } else { - None - }; - - let input_message = create_input_message(*recipient, response_packet, mix_hops); + let input_message = create_input_message(*recipient, response_packet); self.mixnet_client .send(input_message) .await diff --git a/service-providers/ip-packet-router/src/non_linux_dummy.rs b/service-providers/ip-packet-router/src/non_linux_dummy.rs new file mode 100644 index 0000000000..4369f72689 --- /dev/null +++ b/service-providers/ip-packet-router/src/non_linux_dummy.rs @@ -0,0 +1,43 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: Apache-2.0 + +use std::io::Error; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; + +pub(crate) struct DummyDevice; + +impl AsyncRead for DummyDevice { + fn poll_read( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + _buf: &mut ReadBuf<'_>, + ) -> Poll> { + unimplemented!("tunnel devices are not supported by non-linux targets") + } +} + +impl AsyncWrite for DummyDevice { + fn poll_write( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + _buf: &[u8], + ) -> Poll> { + unimplemented!("tunnel devices are not supported by non-linux targets") + } + + fn poll_flush( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll> { + unimplemented!("tunnel devices are not supported by non-linux targets") + } + + fn poll_shutdown( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + ) -> Poll> { + unimplemented!("tunnel devices are not supported by non-linux targets") + } +} diff --git a/service-providers/ip-packet-router/src/util/create_message.rs b/service-providers/ip-packet-router/src/util/create_message.rs index d04d85bd22..dca63c21df 100644 --- a/service-providers/ip-packet-router/src/util/create_message.rs +++ b/service-providers/ip-packet-router/src/util/create_message.rs @@ -4,15 +4,8 @@ use nym_task::connections::TransmissionLane; pub(crate) fn create_input_message( nym_address: Recipient, response_packet: Vec, - mix_hops: Option, ) -> InputMessage { let lane = TransmissionLane::General; let packet_type = None; - InputMessage::new_regular_with_custom_hops( - nym_address, - response_packet, - lane, - packet_type, - mix_hops, - ) + InputMessage::new_regular(nym_address, response_packet, lane, packet_type) } diff --git a/service-providers/network-requester/src/reply.rs b/service-providers/network-requester/src/reply.rs index 6523383ad9..8bfd67e070 100644 --- a/service-providers/network-requester/src/reply.rs +++ b/service-providers/network-requester/src/reply.rs @@ -191,7 +191,6 @@ impl MixnetAddress { recipient: *recipient, data: message, lane: TransmissionLane::ConnectionId(connection_id), - mix_hops: None, }), packet_type, }, diff --git a/tools/internal/mixnet-connectivity-check/src/main.rs b/tools/internal/mixnet-connectivity-check/src/main.rs index be66b4f443..4dd68a61ab 100644 --- a/tools/internal/mixnet-connectivity-check/src/main.rs +++ b/tools/internal/mixnet-connectivity-check/src/main.rs @@ -88,6 +88,8 @@ async fn connectivity_test(args: ConnectivityArgs) -> anyhow::Result<()> { let mixnet_client = if let Some(gateway) = args.gateway { client_builder .request_gateway(gateway.to_string()) + .with_ignore_epoch_roles(true) + .with_extended_topology(true) .build()? } else { client_builder.build()? diff --git a/wasm/client/Cargo.toml b/wasm/client/Cargo.toml index 48589a20c2..dbd1e4b0c3 100644 --- a/wasm/client/Cargo.toml +++ b/wasm/client/Cargo.toml @@ -3,11 +3,11 @@ name = "nym-client-wasm" authors = ["Dave Hrycyszyn ", "Jedrzej Stuczynski "] version = "1.3.0-rc.0" edition = "2021" -keywords = ["nym", "sphinx", "wasm", "webassembly", "privacy", "client"] +keywords = ["nym", "sphinx", "wasm", "webassembly", "privacy"] license = "Apache-2.0" repository = "https://github.com/nymtech/nym" description = "A webassembly client which can be used to interact with the the Nym privacy platform. Wasm is used for Sphinx packet generation." -rust-version = "1.56" +rust-version = "1.76" [lib] crate-type = ["cdylib", "rlib"] @@ -22,7 +22,7 @@ serde_json = { workspace = true } serde-wasm-bindgen = { workspace = true } wasm-bindgen = { workspace = true } wasm-bindgen-futures = { workspace = true } -thiserror = { workspace = true } +thiserror = { workspace = true } tsify = { workspace = true, features = ["js"] } nym-bin-common = { path = "../../common/bin-common" } @@ -30,7 +30,7 @@ wasm-client-core = { path = "../../common/wasm/client-core" } wasm-utils = { path = "../../common/wasm/utils" } nym-node-tester-utils = { path = "../../common/node-tester-utils", optional = true } -nym-node-tester-wasm = { path = "../node-tester", optional = true} +nym-node-tester-wasm = { path = "../node-tester", optional = true } [dev-dependencies] wasm-bindgen-test = { workspace = true } diff --git a/wasm/client/src/client.rs b/wasm/client/src/client.rs index 4bb9f2c2f2..80abcdca30 100644 --- a/wasm/client/src/client.rs +++ b/wasm/client/src/client.rs @@ -30,7 +30,7 @@ use wasm_client_core::nym_task::connections::TransmissionLane; use wasm_client_core::nym_task::TaskManager; use wasm_client_core::storage::core_client_traits::FullWasmClientStorage; use wasm_client_core::storage::ClientStorage; -use wasm_client_core::topology::{SerializableNymTopology, SerializableTopologyExt}; +use wasm_client_core::topology::{SerializableTopologyExt, WasmFriendlyNymTopology}; use wasm_client_core::{ HardcodedTopologyProvider, IdentityKey, NymTopology, PacketType, QueryReqwestRpcNyxdClient, TopologyProvider, @@ -103,7 +103,7 @@ impl NymClientBuilder { // NOTE: you most likely want to use `[NymNodeTester]` instead. #[cfg(feature = "node-tester")] pub fn new_tester( - topology: SerializableNymTopology, + topology: WasmFriendlyNymTopology, on_message: js_sys::Function, gateway: Option, ) -> Result { @@ -340,7 +340,7 @@ impl NymClient { .mix_test_request(test_id, mixnode_identity, num_test_packets) } - pub fn change_hardcoded_topology(&self, topology: SerializableNymTopology) -> Promise { + pub fn change_hardcoded_topology(&self, topology: WasmFriendlyNymTopology) -> Promise { self.client_state.change_hardcoded_topology(topology) } diff --git a/wasm/client/src/helpers.rs b/wasm/client/src/helpers.rs index e8570d02b9..b6cdba3d33 100644 --- a/wasm/client/src/helpers.rs +++ b/wasm/client/src/helpers.rs @@ -8,7 +8,7 @@ use wasm_bindgen_futures::future_to_promise; use wasm_client_core::client::base_client::{ClientInput, ClientState}; use wasm_client_core::client::inbound_messages::InputMessage; use wasm_client_core::error::WasmCoreError; -use wasm_client_core::topology::SerializableNymTopology; +use wasm_client_core::topology::{Role, WasmFriendlyNymTopology}; use wasm_client_core::NymTopology; use wasm_utils::error::simple_js_error; use wasm_utils::{check_promise_result, console_log}; @@ -36,7 +36,7 @@ pub struct NymClientTestRequest { #[cfg(feature = "node-tester")] #[wasm_bindgen] impl NymClientTestRequest { - pub fn injectable_topology(&self) -> SerializableNymTopology { + pub fn injectable_topology(&self) -> WasmFriendlyNymTopology { self.testable_topology.clone().into() } } @@ -78,7 +78,7 @@ impl InputSender for Arc { pub(crate) trait WasmTopologyExt { /// Changes the current network topology to the provided value. - fn change_hardcoded_topology(&self, topology: SerializableNymTopology) -> Promise; + fn change_hardcoded_topology(&self, topology: WasmFriendlyNymTopology) -> Promise; /// Returns the current network topology. fn current_topology(&self) -> Promise; @@ -96,7 +96,7 @@ pub(crate) trait WasmTopologyTestExt { } impl WasmTopologyExt for Arc { - fn change_hardcoded_topology(&self, topology: SerializableNymTopology) -> Promise { + fn change_hardcoded_topology(&self, topology: WasmFriendlyNymTopology) -> Promise { let nym_topology: NymTopology = check_promise_result!(topology.try_into()); let this = Arc::clone(self); @@ -112,11 +112,11 @@ impl WasmTopologyExt for Arc { fn current_topology(&self) -> Promise { let this = Arc::clone(self); future_to_promise(async move { - match this.topology_accessor.current_topology().await { - Some(topology) => Ok(serde_wasm_bindgen::to_value(&SerializableNymTopology::from( - topology, - )) - .expect("SerializableNymTopology failed serialization")), + match this.topology_accessor.current_route_provider().await { + Some(route_provider) => Ok(serde_wasm_bindgen::to_value( + &WasmFriendlyNymTopology::from(route_provider.topology.clone()), + ) + .expect("WasmFriendlyNymTopology failed serialization")), None => Err(WasmCoreError::UnavailableNetworkTopology.into()), } }) @@ -135,21 +135,26 @@ impl WasmTopologyTestExt for Arc { let this = Arc::clone(self); future_to_promise(async move { - let Some(current_topology) = this.topology_accessor.current_topology().await else { + let Some(current_topology) = this.topology_accessor.current_route_provider().await + else { return Err(WasmCoreError::UnavailableNetworkTopology.into()); }; - let Some(mix) = current_topology.find_mix_by_identity(&mixnode_identity) else { + let Ok(node_identity) = mixnode_identity.parse() else { return Err(WasmCoreError::NonExistentMixnode { mixnode_identity }.into()); }; + let Some(mix) = current_topology.node_by_identity(node_identity) else { + return Err(WasmCoreError::NonExistentMixnode { mixnode_identity }.into()); + }; + + let mut updated = current_topology.topology.clone(); + updated.set_testable_node(Role::Layer2, mix.clone()); + let ext = WasmTestMessageExt::new(test_id); let test_msgs = NodeTestMessage::mix_plaintexts(mix, num_test_packets, ext) .map_err(crate::error::WasmClientError::from)?; - let mut updated = current_topology.clone(); - updated.set_mixes_in_layer(mix.layer.into(), vec![mix.to_owned()]); - Ok(JsValue::from(NymClientTestRequest { test_msgs, testable_topology: updated, diff --git a/wasm/node-tester/src/tester.rs b/wasm/node-tester/src/tester.rs index 65991a2955..de0badf5ca 100644 --- a/wasm/node-tester/src/tester.rs +++ b/wasm/node-tester/src/tester.rs @@ -13,6 +13,7 @@ use crate::types::{NodeTestResult, WasmTestMessageExt}; use futures::channel::mpsc; use js_sys::Promise; use nym_node_tester_utils::receiver::SimpleMessageReceiver; +use nym_node_tester_utils::tester::LegacyMixLayer; use nym_node_tester_utils::{NodeTester, PacketSize, PreparedFragment}; use nym_task::TaskManager; use rand::rngs::OsRng; @@ -32,7 +33,7 @@ use wasm_client_core::helpers::{ current_network_topology_async, setup_from_topology, EphemeralCredentialStorage, }; use wasm_client_core::storage::ClientStorage; -use wasm_client_core::topology::SerializableNymTopology; +use wasm_client_core::topology::WasmFriendlyNymTopology; use wasm_client_core::{ nym_task, BandwidthController, ClientKeys, ClientStatsSender, GatewayClient, GatewayClientConfig, GatewayConfig, IdentityKey, InitialisationResult, NodeIdentity, @@ -103,7 +104,7 @@ pub struct NymNodeTesterOpts { nym_api: Option, #[tsify(optional)] - topology: Option, + topology: Option, #[tsify(optional)] gateway: Option, @@ -332,6 +333,7 @@ impl NymNodeTester { tester_permit .existing_identity_mixnode_test_packets( mixnode_identity, + LegacyMixLayer::Two, test_ext, num_test_packets, None, From 67976b1b3023bc7f78b37dfc004ca1563c80f951 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C4=99drzej=20Stuczy=C5=84ski?= Date: Thu, 19 Dec 2024 10:49:56 +0000 Subject: [PATCH 24/64] feature: wireguard metrics (#5278) * experimental log * introduce wireguard metrics updates * add wireguard traffic rates to console logger * missing import * changed order of displayed values * expose bytes information via rest endpoint * clippy --- Cargo.lock | 1 + common/wireguard/Cargo.toml | 1 + common/wireguard/src/lib.rs | 2 + common/wireguard/src/peer_controller.rs | 52 +++++++++++++++++++ gateway/src/node/mod.rs | 8 +++ nym-node/nym-node-metrics/src/lib.rs | 3 ++ nym-node/nym-node-metrics/src/wireguard.rs | 44 ++++++++++++++++ .../src/api/v1/metrics/models.rs | 13 +++++ nym-node/nym-node-requests/src/lib.rs | 6 +++ .../node/http/router/api/v1/metrics/mod.rs | 3 ++ .../router/api/v1/metrics/packets_stats.rs | 2 +- .../http/router/api/v1/metrics/wireguard.rs | 40 ++++++++++++++ nym-node/src/node/metrics/console_logger.rs | 24 +++++++++ nym-node/src/node/mod.rs | 1 + 14 files changed, 199 insertions(+), 1 deletion(-) create mode 100644 nym-node/nym-node-metrics/src/wireguard.rs create mode 100644 nym-node/src/node/http/router/api/v1/metrics/wireguard.rs diff --git a/Cargo.lock b/Cargo.lock index 2de09714d9..d0caa221f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6804,6 +6804,7 @@ dependencies = [ "nym-crypto", "nym-gateway-storage", "nym-network-defaults", + "nym-node-metrics", "nym-task", "nym-wireguard-types", "thiserror", diff --git a/common/wireguard/Cargo.toml b/common/wireguard/Cargo.toml index df333cab82..c999861cd6 100644 --- a/common/wireguard/Cargo.toml +++ b/common/wireguard/Cargo.toml @@ -36,3 +36,4 @@ nym-gateway-storage = { path = "../gateway-storage" } nym-network-defaults = { path = "../network-defaults" } nym-task = { path = "../task" } nym-wireguard-types = { path = "../wireguard-types" } +nym-node-metrics = { path = "../../nym-node/nym-node-metrics" } diff --git a/common/wireguard/src/lib.rs b/common/wireguard/src/lib.rs index 7b5f190193..f397a3bbea 100644 --- a/common/wireguard/src/lib.rs +++ b/common/wireguard/src/lib.rs @@ -85,6 +85,7 @@ pub struct WireguardData { #[cfg(target_os = "linux")] pub async fn start_wireguard( storage: nym_gateway_storage::GatewayStorage, + metrics: nym_node_metrics::NymNodeMetrics, all_peers: Vec, task_client: nym_task::TaskClient, wireguard_data: WireguardData, @@ -175,6 +176,7 @@ pub async fn start_wireguard( let wg_api = std::sync::Arc::new(WgApiWrapper::new(wg_api)); let mut controller = PeerController::new( storage, + metrics, wg_api.clone(), host, peer_bandwidth_managers, diff --git a/common/wireguard/src/peer_controller.rs b/common/wireguard/src/peer_controller.rs index 5f2cf6399f..b002db761b 100644 --- a/common/wireguard/src/peer_controller.rs +++ b/common/wireguard/src/peer_controller.rs @@ -16,7 +16,9 @@ use nym_credential_verification::{ ClientBandwidth, }; use nym_gateway_storage::GatewayStorage; +use nym_node_metrics::NymNodeMetrics; use nym_wireguard_types::DEFAULT_PEER_TIMEOUT_CHECK; +use std::time::{Duration, SystemTime}; use std::{collections::HashMap, sync::Arc}; use tokio::sync::{mpsc, RwLock}; use tokio_stream::{wrappers::IntervalStream, StreamExt}; @@ -65,6 +67,11 @@ pub struct QueryBandwidthControlResponse { pub struct PeerController { storage: GatewayStorage, + + // we have "all" metrics of a node, but they're behind a single Arc pointer, + // so the overhead is minimal + metrics: NymNodeMetrics, + // used to receive commands from individual handles too request_tx: mpsc::Sender, request_rx: mpsc::Receiver, @@ -76,8 +83,10 @@ pub struct PeerController { } impl PeerController { + #[allow(clippy::too_many_arguments)] pub fn new( storage: GatewayStorage, + metrics: NymNodeMetrics, wg_api: Arc, initial_host_information: Host, bw_storage_managers: HashMap, Peer)>, @@ -123,6 +132,7 @@ impl PeerController { request_rx, timeout_check_interval, task_client, + metrics, } } @@ -257,6 +267,46 @@ impl PeerController { })) } + fn update_metrics(&self, new_host: &Host) { + let now = SystemTime::now(); + const ACTIVITY_THRESHOLD: Duration = Duration::from_secs(60); + + let total_peers = new_host.peers.len(); + let mut active_peers = 0; + let mut total_rx = 0; + let mut total_tx = 0; + + for peer in new_host.peers.values() { + total_rx += peer.rx_bytes; + total_tx += peer.tx_bytes; + + // if a peer hasn't performed a handshake in last minute, + // I think it's reasonable to assume it's no longer active + let Some(last_handshake) = peer.last_handshake else { + continue; + }; + let Ok(elapsed) = now.duration_since(last_handshake) else { + continue; + }; + if elapsed < ACTIVITY_THRESHOLD { + active_peers += 1; + } + } + + self.metrics.wireguard.update( + // if the conversion fails it means we're running not running on a 64bit system + // and that's a reason enough for this failure. + total_rx.try_into().expect( + "failed to convert bytes from u64 to usize - are you running on non 64bit system?", + ), + total_tx.try_into().expect( + "failed to convert bytes from u64 to usize - are you running on non 64bit system?", + ), + total_peers, + active_peers, + ); + } + pub async fn run(&mut self) { info!("started wireguard peer controller"); loop { @@ -266,6 +316,8 @@ impl PeerController { log::error!("Can't read wireguard kernel data"); continue; }; + self.update_metrics(&host); + *self.host_information.write().await = host; } _ = self.task_client.recv() => { diff --git a/gateway/src/node/mod.rs b/gateway/src/node/mod.rs index 84b1990351..b3b41961a1 100644 --- a/gateway/src/node/mod.rs +++ b/gateway/src/node/mod.rs @@ -37,6 +37,7 @@ mod internal_service_providers; pub use client_handling::active_clients::ActiveClientsStore; pub use nym_gateway_stats_storage::PersistentStatsStorage; pub use nym_gateway_storage::{error::GatewayStorageError, GatewayStorage}; +use nym_node_metrics::NymNodeMetrics; pub use nym_sdk::{NymApiTopologyProvider, NymApiTopologyProviderConfig, UserAgent}; #[derive(Debug, Clone)] @@ -81,6 +82,8 @@ pub struct GatewayTasksBuilder { metrics_sender: MetricEventsSender, + metrics: NymNodeMetrics, + mnemonic: Arc>, shutdown: TaskClient, @@ -102,12 +105,14 @@ impl Drop for GatewayTasksBuilder { } impl GatewayTasksBuilder { + #[allow(clippy::too_many_arguments)] pub fn new( config: Config, identity: Arc, storage: GatewayStorage, mix_packet_sender: MixForwardingSender, metrics_sender: MetricEventsSender, + metrics: NymNodeMetrics, mnemonic: Arc>, shutdown: TaskClient, ) -> GatewayTasksBuilder { @@ -121,6 +126,7 @@ impl GatewayTasksBuilder { storage, mix_packet_sender, metrics_sender, + metrics, mnemonic, shutdown, ecash_manager: None, @@ -443,6 +449,7 @@ impl GatewayTasksBuilder { pub async fn try_start_wireguard( &mut self, ) -> Result, Box> { + let _ = self.metrics.clone(); unimplemented!("wireguard is not supported on this platform") } @@ -460,6 +467,7 @@ impl GatewayTasksBuilder { let wg_handle = nym_wireguard::start_wireguard( self.storage.clone(), + self.metrics.clone(), all_peers, self.shutdown.fork("wireguard"), wireguard_data, diff --git a/nym-node/nym-node-metrics/src/lib.rs b/nym-node/nym-node-metrics/src/lib.rs index 58ab0f77f7..57a8c74bb3 100644 --- a/nym-node/nym-node-metrics/src/lib.rs +++ b/nym-node/nym-node-metrics/src/lib.rs @@ -4,6 +4,7 @@ use crate::entry::EntryStats; use crate::mixnet::MixingStats; use crate::network::NetworkStats; +use crate::wireguard::WireguardStats; use std::ops::Deref; use std::sync::Arc; @@ -11,6 +12,7 @@ pub mod entry; pub mod events; pub mod mixnet; pub mod network; +pub mod wireguard; #[derive(Clone, Default)] pub struct NymNodeMetrics { @@ -34,6 +36,7 @@ impl Deref for NymNodeMetrics { pub struct NymNodeMetricsInner { pub mixnet: MixingStats, pub entry: EntryStats, + pub wireguard: WireguardStats, pub network: NetworkStats, } diff --git a/nym-node/nym-node-metrics/src/wireguard.rs b/nym-node/nym-node-metrics/src/wireguard.rs new file mode 100644 index 0000000000..8aee90d34b --- /dev/null +++ b/nym-node/nym-node-metrics/src/wireguard.rs @@ -0,0 +1,44 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: Apache-2.0 + +use std::sync::atomic::{AtomicUsize, Ordering}; + +#[derive(Default)] +pub struct WireguardStats { + bytes_rx: AtomicUsize, + bytes_tx: AtomicUsize, + + total_peers: AtomicUsize, + active_peers: AtomicUsize, +} + +impl WireguardStats { + pub fn bytes_rx(&self) -> usize { + self.bytes_rx.load(Ordering::Relaxed) + } + + pub fn bytes_tx(&self) -> usize { + self.bytes_tx.load(Ordering::Relaxed) + } + + pub fn total_peers(&self) -> usize { + self.total_peers.load(Ordering::Relaxed) + } + + pub fn active_peers(&self) -> usize { + self.active_peers.load(Ordering::Relaxed) + } + + pub fn update( + &self, + bytes_rx: usize, + bytes_tx: usize, + total_peers: usize, + active_peers: usize, + ) { + self.bytes_rx.store(bytes_rx, Ordering::Relaxed); + self.bytes_tx.store(bytes_tx, Ordering::Relaxed); + self.total_peers.store(total_peers, Ordering::Relaxed); + self.active_peers.store(active_peers, Ordering::Relaxed); + } +} diff --git a/nym-node/nym-node-requests/src/api/v1/metrics/models.rs b/nym-node/nym-node-requests/src/api/v1/metrics/models.rs index 8e430534c5..0ee4170dfe 100644 --- a/nym-node/nym-node-requests/src/api/v1/metrics/models.rs +++ b/nym-node/nym-node-requests/src/api/v1/metrics/models.rs @@ -4,6 +4,19 @@ pub use mixing::*; pub use session::*; pub use verloc::*; +pub use wireguard::*; + +pub mod wireguard { + use serde::{Deserialize, Serialize}; + + #[derive(Serialize, Deserialize, Debug, Clone, Copy)] + #[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] + pub struct WireguardStats { + pub bytes_tx: usize, + + pub bytes_rx: usize, + } +} pub mod packets { use serde::{Deserialize, Serialize}; diff --git a/nym-node/nym-node-requests/src/lib.rs b/nym-node/nym-node-requests/src/lib.rs index 661936797a..1d7cd01846 100644 --- a/nym-node/nym-node-requests/src/lib.rs +++ b/nym-node/nym-node-requests/src/lib.rs @@ -66,12 +66,18 @@ pub mod routes { pub const LEGACY_MIXING: &str = "/mixing"; pub const PACKETS_STATS: &str = "/packets-stats"; + pub const WIREGUARD_STATS: &str = "/wireguard-stats"; pub const SESSIONS: &str = "/sessions"; pub const VERLOC: &str = "/verloc"; pub const PROMETHEUS: &str = "/prometheus"; absolute_route!(legacy_mixing_absolute, metrics_absolute(), LEGACY_MIXING); absolute_route!(packets_stats_absolute, metrics_absolute(), PACKETS_STATS); + absolute_route!( + wireguard_stats_absolute, + metrics_absolute(), + WIREGUARD_STATS + ); absolute_route!(sessions_absolute, metrics_absolute(), SESSIONS); absolute_route!(verloc_absolute, metrics_absolute(), VERLOC); absolute_route!(prometheus_absolute, metrics_absolute(), PROMETHEUS); diff --git a/nym-node/src/node/http/router/api/v1/metrics/mod.rs b/nym-node/src/node/http/router/api/v1/metrics/mod.rs index 201dbc7a26..71a9760273 100644 --- a/nym-node/src/node/http/router/api/v1/metrics/mod.rs +++ b/nym-node/src/node/http/router/api/v1/metrics/mod.rs @@ -5,6 +5,7 @@ use crate::node::http::api::v1::metrics::packets_stats::packets_stats; use crate::node::http::api::v1::metrics::prometheus::prometheus_metrics; use crate::node::http::api::v1::metrics::sessions::sessions_stats; use crate::node::http::api::v1::metrics::verloc::verloc_stats; +use crate::node::http::api::v1::metrics::wireguard::wireguard_stats; use crate::node::http::state::metrics::MetricsAppState; use axum::extract::FromRef; use axum::routing::get; @@ -16,6 +17,7 @@ pub mod packets_stats; pub mod prometheus; pub mod sessions; pub mod verloc; +pub mod wireguard; #[derive(Debug, Clone, Default)] pub struct Config { @@ -34,6 +36,7 @@ where get(legacy_mixing::legacy_mixing_stats), ) .route(metrics::PACKETS_STATS, get(packets_stats)) + .route(metrics::WIREGUARD_STATS, get(wireguard_stats)) .route(metrics::SESSIONS, get(sessions_stats)) .route(metrics::VERLOC, get(verloc_stats)) .route(metrics::PROMETHEUS, get(prometheus_metrics)) diff --git a/nym-node/src/node/http/router/api/v1/metrics/packets_stats.rs b/nym-node/src/node/http/router/api/v1/metrics/packets_stats.rs index d1a8e27f55..490bcffb57 100644 --- a/nym-node/src/node/http/router/api/v1/metrics/packets_stats.rs +++ b/nym-node/src/node/http/router/api/v1/metrics/packets_stats.rs @@ -1,5 +1,5 @@ // Copyright 2024 - Nym Technologies SA -// SPDX-License-Identifier: Apache-2.0 +// SPDX-License-Identifier: GPL-3.0-only use crate::node::http::state::metrics::MetricsAppState; use axum::extract::{Query, State}; diff --git a/nym-node/src/node/http/router/api/v1/metrics/wireguard.rs b/nym-node/src/node/http/router/api/v1/metrics/wireguard.rs new file mode 100644 index 0000000000..3519ff30dd --- /dev/null +++ b/nym-node/src/node/http/router/api/v1/metrics/wireguard.rs @@ -0,0 +1,40 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::node::http::state::metrics::MetricsAppState; +use axum::extract::{Query, State}; +use nym_http_api_common::{FormattedResponse, OutputParams}; +use nym_node_metrics::NymNodeMetrics; +use nym_node_requests::api::v1::metrics::models::WireguardStats; + +/// If applicable, returns wireguard statistics information of this node. +/// This information is **PURELY** self-reported and in no way validated. +#[utoipa::path( + get, + path = "/wireguard-stats", + context_path = "/api/v1/metrics", + tag = "Metrics", + responses( + (status = 200, content( + ("application/json" = WireguardStats), + ("application/yaml" = WireguardStats) + )) + ), + params(OutputParams), +)] +pub(crate) async fn wireguard_stats( + Query(output): Query, + State(metrics_state): State, +) -> WireguardStatsResponse { + let output = output.output.unwrap_or_default(); + output.to_response(build_response(&metrics_state.metrics)) +} + +fn build_response(metrics: &NymNodeMetrics) -> WireguardStats { + WireguardStats { + bytes_tx: metrics.wireguard.bytes_tx(), + bytes_rx: metrics.wireguard.bytes_rx(), + } +} + +pub type WireguardStatsResponse = FormattedResponse; diff --git a/nym-node/src/node/metrics/console_logger.rs b/nym-node/src/node/metrics/console_logger.rs index ea3d11e94c..37ad46b7f9 100644 --- a/nym-node/src/node/metrics/console_logger.rs +++ b/nym-node/src/node/metrics/console_logger.rs @@ -25,6 +25,9 @@ struct AtLastUpdate { // EGRESS ack_packets_sent: usize, + + wg_tx: usize, + wg_rx: usize, } impl AtLastUpdate { @@ -35,6 +38,8 @@ impl AtLastUpdate { final_hop_packets_received: 0, forward_hop_packets_sent: 0, ack_packets_sent: 0, + wg_tx: 0, + wg_rx: 0, } } } @@ -70,6 +75,9 @@ impl ConsoleLogger { let forward_sent = self.metrics.mixnet.egress.forward_hop_packets_sent(); let acks = self.metrics.mixnet.egress.ack_packets_sent(); + let wg_tx = self.metrics.wireguard.bytes_tx(); + let wg_rx = self.metrics.wireguard.bytes_rx(); + let forward_received_rate = (forward_received - self.at_last_update.forward_hop_packets_received) as f64 / delta_secs; @@ -79,6 +87,9 @@ impl ConsoleLogger { (forward_sent - self.at_last_update.forward_hop_packets_sent) as f64 / delta_secs; let acks_rate = (acks - self.at_last_update.ack_packets_sent) as f64 / delta_secs; + let wg_tx_rate = (wg_tx - self.at_last_update.wg_tx) as f64 / delta_secs; + let wg_rx_rate = (wg_rx - self.at_last_update.wg_rx) as f64 / delta_secs; + info!("↑↓ Packets sent [total] / sent [acks] / received [mix] / received [gw]: {} ({}) / {} ({}) / {} ({}) / {} ({})", forward_sent.human_count_bare(), forward_sent_rate.human_throughput_bare(), @@ -90,11 +101,24 @@ impl ConsoleLogger { final_rate.human_throughput_bare(), ); + // only log wireguard if we have transmitted ANY bytes + if self.at_last_update.wg_rx != 0 { + info!( + "↑↓ Wireguard tx/rx: {} ({}) / {} ({})", + wg_tx.human_count_bytes(), + wg_tx_rate.human_throughput_bytes(), + wg_rx.human_count_bytes(), + wg_rx_rate.human_throughput_bytes() + ) + } + self.at_last_update.time = now; self.at_last_update.forward_hop_packets_received = forward_received; self.at_last_update.final_hop_packets_received = final_received; self.at_last_update.forward_hop_packets_sent = forward_sent; self.at_last_update.ack_packets_sent = acks; + self.at_last_update.wg_tx = wg_tx; + self.at_last_update.wg_rx = wg_rx; // TODO: add websocket-client traffic } diff --git a/nym-node/src/node/mod.rs b/nym-node/src/node/mod.rs index 86087582e1..0f2ac89fe0 100644 --- a/nym-node/src/node/mod.rs +++ b/nym-node/src/node/mod.rs @@ -589,6 +589,7 @@ impl NymNode { self.entry_gateway.client_storage.clone(), mix_packet_sender, metrics_sender, + self.metrics.clone(), self.entry_gateway.mnemonic.clone(), task_client, ); From c99a240ed4b994fbcee7aeb7cb994ae02662e93c Mon Sep 17 00:00:00 2001 From: Sachin Kamath Date: Tue, 5 Nov 2024 15:47:49 +0530 Subject: [PATCH 25/64] nyxd-scraper: add config to make pre-commit storage optional --- common/nyxd-scraper/src/block_processor/mod.rs | 5 ++++- common/nyxd-scraper/src/scraper/mod.rs | 4 ++++ common/nyxd-scraper/src/storage/mod.rs | 12 +++++++----- nym-validator-rewarder/src/config/mod.rs | 5 +++++ 4 files changed, 20 insertions(+), 6 deletions(-) diff --git a/common/nyxd-scraper/src/block_processor/mod.rs b/common/nyxd-scraper/src/block_processor/mod.rs index acb0cbc375..9cb3473c5c 100644 --- a/common/nyxd-scraper/src/block_processor/mod.rs +++ b/common/nyxd-scraper/src/block_processor/mod.rs @@ -44,6 +44,7 @@ impl PendingSync { pub struct BlockProcessor { pruning_options: PruningOptions, + store_precommits: bool, cancel: CancellationToken, synced: Arc, last_processed_height: u32, @@ -68,6 +69,7 @@ pub struct BlockProcessor { impl BlockProcessor { pub async fn new( pruning_options: PruningOptions, + store_precommits: bool, cancel: CancellationToken, synced: Arc, incoming: UnboundedReceiver, @@ -83,6 +85,7 @@ impl BlockProcessor { Ok(BlockProcessor { pruning_options, + store_precommits, cancel, synced, last_processed_height, @@ -128,7 +131,7 @@ impl BlockProcessor { // we won't end up with a corrupted storage. let mut tx = self.storage.begin_processing_tx().await?; - persist_block(&full_info, &mut tx).await?; + persist_block(&full_info, &mut tx, self.store_precommits).await?; // let the modules do whatever they want // the ones wanting the full block: diff --git a/common/nyxd-scraper/src/scraper/mod.rs b/common/nyxd-scraper/src/scraper/mod.rs index 1b3294c914..919a6cae6d 100644 --- a/common/nyxd-scraper/src/scraper/mod.rs +++ b/common/nyxd-scraper/src/scraper/mod.rs @@ -34,6 +34,8 @@ pub struct Config { pub database_path: PathBuf, pub pruning_options: PruningOptions, + + pub store_precommits: bool, } pub struct NyxdScraperBuilder { @@ -62,6 +64,7 @@ impl NyxdScraperBuilder { ); let mut block_processor = BlockProcessor::new( scraper.config.pruning_options, + scraper.config.store_precommits, scraper.cancel_token.clone(), scraper.startup_sync.clone(), processing_rx, @@ -277,6 +280,7 @@ impl NyxdScraper { ) -> Result { BlockProcessor::new( self.config.pruning_options, + self.config.store_precommits, self.cancel_token.clone(), self.startup_sync.clone(), processing_rx, diff --git a/common/nyxd-scraper/src/storage/mod.rs b/common/nyxd-scraper/src/storage/mod.rs index a3b0550677..f2d0921ee5 100644 --- a/common/nyxd-scraper/src/storage/mod.rs +++ b/common/nyxd-scraper/src/storage/mod.rs @@ -219,6 +219,7 @@ impl ScraperStorage { pub async fn persist_block( block: &FullBlockInformation, tx: &mut StorageTransaction, + store_precommits: bool, ) -> Result<(), ScraperError> { let total_gas = crate::helpers::tx_gas_sum(&block.transactions); @@ -231,11 +232,12 @@ pub async fn persist_block( // persist block data persist_block_data(&block.block, total_gas, tx).await?; - // persist commits - if let Some(commit) = &block.block.last_commit { - persist_commits(commit, &block.validators, tx).await?; - } else { - warn!("no commits for block {}", block.block.header.height) + if store_precommits { + if let Some(commit) = &block.block.last_commit { + persist_commits(commit, &block.validators, tx).await?; + } else { + warn!("no commits for block {}", block.block.header.height) + } } // persist txs diff --git a/nym-validator-rewarder/src/config/mod.rs b/nym-validator-rewarder/src/config/mod.rs index 737682cf54..dcd3b61be6 100644 --- a/nym-validator-rewarder/src/config/mod.rs +++ b/nym-validator-rewarder/src/config/mod.rs @@ -112,6 +112,7 @@ impl Config { nyxd_scraper: NyxdScraper { websocket_url, pruning: Default::default(), + store_precommits: true, }, base: Base { upstream_nyxd: nyxd_url, @@ -127,6 +128,7 @@ impl Config { rpc_url: self.base.upstream_nyxd.clone(), database_path: self.storage_paths.nyxd_scraper.clone(), pruning_options: self.nyxd_scraper.pruning, + store_precommits: self.nyxd_scraper.store_precommits, } } @@ -314,6 +316,9 @@ pub struct NyxdScraper { // if the value is missing, use `nothing` pruning as this was the past behaviour #[serde(default = "PruningOptions::nothing")] pub pruning: PruningOptions, + + /// Specifies whether to store pre-commits within the database. + pub store_precommits: bool, // TODO: debug with everything that's currently hardcoded in the scraper } From 80f965a104be2867f2a77f3e16b3fcb43f38e1be Mon Sep 17 00:00:00 2001 From: Sachin Kamath Date: Tue, 5 Nov 2024 16:01:18 +0530 Subject: [PATCH 26/64] clippy --- common/nyxd-scraper/src/block_processor/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/common/nyxd-scraper/src/block_processor/mod.rs b/common/nyxd-scraper/src/block_processor/mod.rs index 9cb3473c5c..3654072a48 100644 --- a/common/nyxd-scraper/src/block_processor/mod.rs +++ b/common/nyxd-scraper/src/block_processor/mod.rs @@ -66,6 +66,7 @@ pub struct BlockProcessor { msg_modules: Vec>, } +#[allow(clippy::too_many_arguments)] impl BlockProcessor { pub async fn new( pruning_options: PruningOptions, From a884aee1e9b5912731c291c9bb76117d6e58f3c9 Mon Sep 17 00:00:00 2001 From: Sachin Kamath Date: Wed, 6 Nov 2024 14:16:13 +0530 Subject: [PATCH 27/64] fix review comments --- .../nyxd-scraper/src/block_processor/mod.rs | 45 ++++++++++++++----- common/nyxd-scraper/src/scraper/mod.rs | 15 +++++-- nym-validator-rewarder/src/config/mod.rs | 6 ++- 3 files changed, 49 insertions(+), 17 deletions(-) diff --git a/common/nyxd-scraper/src/block_processor/mod.rs b/common/nyxd-scraper/src/block_processor/mod.rs index 3654072a48..c6dead9765 100644 --- a/common/nyxd-scraper/src/block_processor/mod.rs +++ b/common/nyxd-scraper/src/block_processor/mod.rs @@ -42,9 +42,32 @@ impl PendingSync { } } +#[derive(Debug, Clone)] +pub struct BlockProcessorConfig { + pub pruning_options: PruningOptions, + pub store_precommits: bool, +} + +impl Default for BlockProcessorConfig { + fn default() -> Self { + Self { + pruning_options: PruningOptions::nothing(), + store_precommits: true, + } + } +} + +impl BlockProcessorConfig { + pub fn new(pruning_options: PruningOptions, store_precommits: bool) -> Self { + Self { + pruning_options, + store_precommits, + } + } +} + pub struct BlockProcessor { - pruning_options: PruningOptions, - store_precommits: bool, + config: BlockProcessorConfig, cancel: CancellationToken, synced: Arc, last_processed_height: u32, @@ -69,8 +92,7 @@ pub struct BlockProcessor { #[allow(clippy::too_many_arguments)] impl BlockProcessor { pub async fn new( - pruning_options: PruningOptions, - store_precommits: bool, + config: BlockProcessorConfig, cancel: CancellationToken, synced: Arc, incoming: UnboundedReceiver, @@ -85,8 +107,7 @@ impl BlockProcessor { let last_pruned_height = last_pruned.try_into().unwrap_or_default(); Ok(BlockProcessor { - pruning_options, - store_precommits, + config, cancel, synced, last_processed_height, @@ -105,7 +126,7 @@ impl BlockProcessor { } pub fn with_pruning(mut self, pruning_options: PruningOptions) -> Self { - self.pruning_options = pruning_options; + self.config.pruning_options = pruning_options; self } @@ -132,7 +153,7 @@ impl BlockProcessor { // we won't end up with a corrupted storage. let mut tx = self.storage.begin_processing_tx().await?; - persist_block(&full_info, &mut tx, self.store_precommits).await?; + persist_block(&full_info, &mut tx, self.config.store_precommits).await?; // let the modules do whatever they want // the ones wanting the full block: @@ -245,7 +266,7 @@ impl BlockProcessor { #[instrument(skip(self))] async fn prune_storage(&mut self) -> Result<(), ScraperError> { - let keep_recent = self.pruning_options.strategy_keep_recent(); + let keep_recent = self.config.pruning_options.strategy_keep_recent(); let last_to_keep = self.last_processed_height - keep_recent; info!( @@ -286,12 +307,12 @@ impl BlockProcessor { async fn maybe_prune_storage(&mut self) -> Result<(), ScraperError> { debug!("checking for storage pruning"); - if self.pruning_options.strategy.is_nothing() { + if self.config.pruning_options.strategy.is_nothing() { trace!("the current pruning strategy is 'nothing'"); return Ok(()); } - let interval = self.pruning_options.strategy_interval(); + let interval = self.config.pruning_options.strategy_interval(); if self.last_pruned_height + interval <= self.last_processed_height { self.prune_storage().await?; } @@ -375,7 +396,7 @@ impl BlockProcessor { if latest_block > self.last_processed_height && self.last_processed_height != 0 { // in case we were offline for a while, // make sure we don't request blocks we'd have to prune anyway - let keep_recent = self.pruning_options.strategy_keep_recent(); + let keep_recent = self.config.pruning_options.strategy_keep_recent(); let last_to_keep = latest_block - keep_recent; self.last_processed_height = max(self.last_processed_height, last_to_keep); diff --git a/common/nyxd-scraper/src/scraper/mod.rs b/common/nyxd-scraper/src/scraper/mod.rs index 919a6cae6d..2b4e228d7c 100644 --- a/common/nyxd-scraper/src/scraper/mod.rs +++ b/common/nyxd-scraper/src/scraper/mod.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::block_processor::types::BlockToProcess; -use crate::block_processor::BlockProcessor; +use crate::block_processor::{BlockProcessor, BlockProcessorConfig}; use crate::block_requester::{BlockRequest, BlockRequester}; use crate::error::ScraperError; use crate::modules::{BlockModule, MsgModule, TxModule}; @@ -62,9 +62,14 @@ impl NyxdScraperBuilder { req_rx, processing_tx.clone(), ); - let mut block_processor = BlockProcessor::new( + + let block_processor_config = BlockProcessorConfig::new( scraper.config.pruning_options, scraper.config.store_precommits, + ); + + let mut block_processor = BlockProcessor::new( + block_processor_config, scraper.cancel_token.clone(), scraper.startup_sync.clone(), processing_rx, @@ -278,9 +283,11 @@ impl NyxdScraper { req_tx: Sender, processing_rx: UnboundedReceiver, ) -> Result { + let block_processor_config = + BlockProcessorConfig::new(self.config.pruning_options, self.config.store_precommits); + BlockProcessor::new( - self.config.pruning_options, - self.config.store_precommits, + block_processor_config, self.cancel_token.clone(), self.startup_sync.clone(), processing_rx, diff --git a/nym-validator-rewarder/src/config/mod.rs b/nym-validator-rewarder/src/config/mod.rs index dcd3b61be6..f0369adde7 100644 --- a/nym-validator-rewarder/src/config/mod.rs +++ b/nym-validator-rewarder/src/config/mod.rs @@ -318,8 +318,12 @@ pub struct NyxdScraper { pub pruning: PruningOptions, /// Specifies whether to store pre-commits within the database. + #[serde(default = "default_store_precommits")] pub store_precommits: bool, - // TODO: debug with everything that's currently hardcoded in the scraper +} + +fn default_store_precommits() -> bool { + true } impl NyxdScraper { From 868d7439ec371bb5c64422386613391d5a58ea25 Mon Sep 17 00:00:00 2001 From: Sachin Kamath Date: Mon, 11 Nov 2024 20:34:52 +0700 Subject: [PATCH 28/64] observatory 0.1 --- common/nyxd-scraper/src/storage/manager.rs | 52 ++++++++ common/nyxd-scraper/src/storage/mod.rs | 12 ++ ...c41d20896db543eed74a6f320c041bcbb723d.json | 16 --- ...2f1482df00de2dc1d2b4debbb2e12553d997b.json | 34 ------ ...50ad69686cf6a109e37d6c3f0623c3e9f91d0.json | 32 ----- nym-data-observatory/Cargo.toml | 8 +- nym-data-observatory/Dockerfile | 27 ----- nym-data-observatory/build.rs | 65 +++++----- .../migrations/001_price_data.sql | 7 ++ .../migrations/002_payment_transactions.sql | 10 ++ nym-data-observatory/pg_up.sh | 13 -- .../src/background_task/mod.rs | 61 ---------- nym-data-observatory/src/chain_scraper/mod.rs | 22 ++++ nym-data-observatory/src/db/mod.rs | 16 ++- nym-data-observatory/src/db/models.rs | 47 +++++--- nym-data-observatory/src/db/queries/joke.rs | 39 ------ nym-data-observatory/src/db/queries/mod.rs | 7 +- .../src/db/queries/payments.rs | 41 +++++++ nym-data-observatory/src/db/queries/price.rs | 46 +++++++ nym-data-observatory/src/http/api/jokes.rs | 78 ------------ nym-data-observatory/src/http/api/mod.rs | 7 +- nym-data-observatory/src/http/api/price.rs | 27 +++++ nym-data-observatory/src/http/error.rs | 7 -- nym-data-observatory/src/main.rs | 78 +++++++----- .../src/payment_listener/mod.rs | 114 ++++++++++++++++++ nym-data-observatory/src/price_scraper/mod.rs | 55 +++++++++ 26 files changed, 518 insertions(+), 403 deletions(-) delete mode 100644 nym-data-observatory/.sqlx/query-249faa11b88b749f50342bb5c9cc41d20896db543eed74a6f320c041bcbb723d.json delete mode 100644 nym-data-observatory/.sqlx/query-aff7fbd06728004d2f2226d20c32f1482df00de2dc1d2b4debbb2e12553d997b.json delete mode 100644 nym-data-observatory/.sqlx/query-e53f479f8cead3dc8aa1875e5d450ad69686cf6a109e37d6c3f0623c3e9f91d0.json delete mode 100644 nym-data-observatory/Dockerfile create mode 100644 nym-data-observatory/migrations/001_price_data.sql create mode 100644 nym-data-observatory/migrations/002_payment_transactions.sql delete mode 100755 nym-data-observatory/pg_up.sh delete mode 100644 nym-data-observatory/src/background_task/mod.rs create mode 100644 nym-data-observatory/src/chain_scraper/mod.rs delete mode 100644 nym-data-observatory/src/db/queries/joke.rs create mode 100644 nym-data-observatory/src/db/queries/payments.rs create mode 100644 nym-data-observatory/src/db/queries/price.rs delete mode 100644 nym-data-observatory/src/http/api/jokes.rs create mode 100644 nym-data-observatory/src/http/api/price.rs create mode 100644 nym-data-observatory/src/payment_listener/mod.rs create mode 100644 nym-data-observatory/src/price_scraper/mod.rs diff --git a/common/nyxd-scraper/src/storage/manager.rs b/common/nyxd-scraper/src/storage/manager.rs index fb40a065b8..5475f5d355 100644 --- a/common/nyxd-scraper/src/storage/manager.rs +++ b/common/nyxd-scraper/src/storage/manager.rs @@ -237,6 +237,58 @@ impl StorageManager { Ok(-1) } } + + #[allow(dead_code)] + pub async fn get_transactions_after_height( + &self, + min_height: i64, + message_type: Option<&str>, + ) -> Result, sqlx::Error> { + match message_type { + Some(msg_type) => { + sqlx::query_as!( + TransactionWithBlock, + r#" + SELECT t.hash, t.height, t.memo, t.raw_log + FROM message m + JOIN "transaction" t ON m.transaction_hash = t.hash + JOIN block b ON t.height = b.height + WHERE t.height > ? + AND m.type = ? + ORDER BY t.height ASC + "#, + min_height, + msg_type + ) + .fetch_all(&self.connection_pool) + .await + } + None => { + sqlx::query_as!( + TransactionWithBlock, + r#" + SELECT t.hash, t.height, t.memo, t.raw_log + FROM message m + JOIN "transaction" t ON m.transaction_hash = t.hash + JOIN block b ON t.height = b.height + WHERE t.height > ? + ORDER BY t.height ASC + "#, + min_height + ) + .fetch_all(&self.connection_pool) + .await + } + } + } +} + +#[derive(Debug, sqlx::FromRow)] +pub struct TransactionWithBlock { + pub hash: String, + pub height: i64, + pub memo: Option, + pub raw_log: Option, } // make those generic over executor so that they could be performed over connection pool and a tx diff --git a/common/nyxd-scraper/src/storage/mod.rs b/common/nyxd-scraper/src/storage/mod.rs index f2d0921ee5..39a2842fa8 100644 --- a/common/nyxd-scraper/src/storage/mod.rs +++ b/common/nyxd-scraper/src/storage/mod.rs @@ -13,6 +13,7 @@ use crate::{ models::{CommitSignature, Validator}, }, }; +use manager::TransactionWithBlock; use sqlx::{ sqlite::{SqliteAutoVacuum, SqliteSynchronous}, types::time::OffsetDateTime, @@ -214,6 +215,17 @@ impl ScraperStorage { pub async fn get_pruned_height(&self) -> Result { Ok(self.manager.get_pruned_height().await?) } + + pub async fn get_transactions_after_height( + &self, + min_height: i64, + message_type: Option<&str>, + ) -> Result, ScraperError> { + Ok(self + .manager + .get_transactions_after_height(min_height, message_type) + .await?) + } } pub async fn persist_block( diff --git a/nym-data-observatory/.sqlx/query-249faa11b88b749f50342bb5c9cc41d20896db543eed74a6f320c041bcbb723d.json b/nym-data-observatory/.sqlx/query-249faa11b88b749f50342bb5c9cc41d20896db543eed74a6f320c041bcbb723d.json deleted file mode 100644 index a42f7b7e32..0000000000 --- a/nym-data-observatory/.sqlx/query-249faa11b88b749f50342bb5c9cc41d20896db543eed74a6f320c041bcbb723d.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "INSERT INTO responses\n (joke_id, joke, date_created)\n VALUES\n ($1, $2, $3)\n ON CONFLICT(joke_id) DO UPDATE SET\n joke=excluded.joke,\n date_created=excluded.date_created;", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Varchar", - "Text", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "249faa11b88b749f50342bb5c9cc41d20896db543eed74a6f320c041bcbb723d" -} diff --git a/nym-data-observatory/.sqlx/query-aff7fbd06728004d2f2226d20c32f1482df00de2dc1d2b4debbb2e12553d997b.json b/nym-data-observatory/.sqlx/query-aff7fbd06728004d2f2226d20c32f1482df00de2dc1d2b4debbb2e12553d997b.json deleted file mode 100644 index 88e19ca1b9..0000000000 --- a/nym-data-observatory/.sqlx/query-aff7fbd06728004d2f2226d20c32f1482df00de2dc1d2b4debbb2e12553d997b.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "SELECT joke_id, joke, date_created FROM responses WHERE joke_id = $1", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "joke_id", - "type_info": "Varchar" - }, - { - "ordinal": 1, - "name": "joke", - "type_info": "Text" - }, - { - "ordinal": 2, - "name": "date_created", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [ - "Text" - ] - }, - "nullable": [ - false, - false, - false - ] - }, - "hash": "aff7fbd06728004d2f2226d20c32f1482df00de2dc1d2b4debbb2e12553d997b" -} diff --git a/nym-data-observatory/.sqlx/query-e53f479f8cead3dc8aa1875e5d450ad69686cf6a109e37d6c3f0623c3e9f91d0.json b/nym-data-observatory/.sqlx/query-e53f479f8cead3dc8aa1875e5d450ad69686cf6a109e37d6c3f0623c3e9f91d0.json deleted file mode 100644 index e770f72289..0000000000 --- a/nym-data-observatory/.sqlx/query-e53f479f8cead3dc8aa1875e5d450ad69686cf6a109e37d6c3f0623c3e9f91d0.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "SELECT joke_id, joke, date_created FROM responses", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "joke_id", - "type_info": "Varchar" - }, - { - "ordinal": 1, - "name": "joke", - "type_info": "Text" - }, - { - "ordinal": 2, - "name": "date_created", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false, - false, - false - ] - }, - "hash": "e53f479f8cead3dc8aa1875e5d450ad69686cf6a109e37d6c3f0623c3e9f91d0" -} diff --git a/nym-data-observatory/Cargo.toml b/nym-data-observatory/Cargo.toml index 220449bfb1..0270057477 100644 --- a/nym-data-observatory/Cargo.toml +++ b/nym-data-observatory/Cargo.toml @@ -24,9 +24,13 @@ nym-task = { path = "../common/task" } nym-node-requests = { path = "../nym-node/nym-node-requests", features = [ "openapi", ] } +nyxd-scraper = {path = "../common/nyxd-scraper"} +reqwest = {workspace= true, features = ["rustls-tls"]} +rocket = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } -sqlx = { workspace = true, features = ["runtime-tokio-rustls", "postgres"] } +sqlx = { workspace = true, features = ["runtime-tokio-rustls", "sqlite", "time"] } +time = {version = "0.3.36"} tokio = { workspace = true, features = ["process", "rt-multi-thread"] } tokio-util = { workspace = true } tracing = { workspace = true } @@ -40,4 +44,4 @@ utoipauto = { workspace = true } [build-dependencies] anyhow = { workspace = true } tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } -sqlx = { workspace = true, features = ["runtime-tokio-rustls", "postgres"] } +sqlx = { workspace = true, features = ["runtime-tokio-rustls", "sqlite"] } diff --git a/nym-data-observatory/Dockerfile b/nym-data-observatory/Dockerfile deleted file mode 100644 index d36a910faf..0000000000 --- a/nym-data-observatory/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -FROM rust:latest AS builder - -COPY ./ /usr/src/nym -WORKDIR /usr/src/nym/nym-data-observatory - -RUN cargo build --release - -#------------------------------------------------------------------- -# The following environment variables are required at runtime: -# -# NYM_DATA_OBSERVATORY_CONNECTION_URL -# -# And optionally: -# -# NYM_DATA_OBSERVATORY_HTTP_PORT -# -# see https://github.com/nymtech/nym/blob/develop/nym-data-observatory/src/main.rs for details -#------------------------------------------------------------------- - -FROM ubuntu:24.04 - -RUN apt update && apt install -yy curl ca-certificates - -WORKDIR /nym - -COPY --from=builder /usr/src/nym/target/release/nym-data-observatory ./ -ENTRYPOINT [ "/nym/nym-data-observatory" ] diff --git a/nym-data-observatory/build.rs b/nym-data-observatory/build.rs index ba795cf1d0..faedd8b4c7 100644 --- a/nym-data-observatory/build.rs +++ b/nym-data-observatory/build.rs @@ -1,58 +1,51 @@ use anyhow::Result; -use sqlx::{Connection, PgConnection}; +use sqlx::{sqlite::SqliteConnectOptions, Connection, SqliteConnection}; use std::io::Write; -use std::{collections::HashMap, fs::File}; +use std::{collections::HashMap, fs::File, path::PathBuf, str::FromStr}; -const POSTGRES_USER: &str = "nym"; -const POSTGRES_PASSWORD: &str = "password123"; -const POSTGRES_DB: &str = "data_obs_db"; - -/// if schema changes, rerun `cargo sqlx prepare` with a running DB -/// https://github.com/launchbadge/sqlx/blob/main/sqlx-cli/README.md#enable-building-in-offline-mode-with-query #[tokio::main] async fn main() -> Result<()> { - let db_url = - format!("postgresql://{POSTGRES_USER}:{POSTGRES_PASSWORD}@localhost:5432/{POSTGRES_DB}"); + let db_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("data_observatory.sqlite"); - export_db_variables(&db_url)?; - // if a live DB is reachable, use that - if PgConnection::connect(&db_url).await.is_ok() { - println!("cargo::rustc-env=SQLX_OFFLINE=false"); - run_migrations(&db_url).await?; - } else { - // by default, run in offline mode - println!("cargo::rustc-env=SQLX_OFFLINE=true"); + // Create the database directory if it doesn't exist + if let Some(parent) = db_path.parent() { + std::fs::create_dir_all(parent)?; } - rerun_if_changed(); + let db_url = format!("sqlite:{}", db_path.display()); + + // Ensure database file is created with proper permissions + let connect_options = SqliteConnectOptions::from_str(&db_url)? + .create_if_missing(true) + .journal_mode(sqlx::sqlite::SqliteJournalMode::Wal) + .foreign_keys(true); + + // Create initial connection to ensure database exists + let mut conn = SqliteConnection::connect_with(&connect_options).await?; + + export_db_variables(&db_url)?; + println!("cargo:rustc-env=SQLX_OFFLINE=false"); + + // Run migrations after ensuring database exists + sqlx::migrate!("./migrations").run(&mut conn).await?; + + // Add rerun-if-changed directives + println!("cargo:rerun-if-changed=migrations"); + println!("cargo:rerun-if-changed=build.rs"); + println!("cargo:rerun-if-changed=src"); Ok(()) } fn export_db_variables(db_url: &str) -> Result<()> { let mut map = HashMap::new(); - map.insert("POSTGRES_USER", POSTGRES_USER); - map.insert("POSTGRES_PASSWORD", POSTGRES_PASSWORD); - map.insert("POSTGRES_DB", POSTGRES_DB); map.insert("DATABASE_URL", db_url); let mut file = File::create(".env")?; for (var, value) in map.iter() { - println!("cargo::rustc-env={}={}", var, value); - writeln!(file, "{}={}", var, value).expect("Failed to write to dotenv file"); + println!("cargo:rustc-env={}={}", var, value); + writeln!(file, "{}={}", var, value)?; } Ok(()) } - -async fn run_migrations(db_url: &str) -> Result<()> { - let mut conn = PgConnection::connect(db_url).await?; - sqlx::migrate!("./migrations").run(&mut conn).await?; - - Ok(()) -} - -fn rerun_if_changed() { - println!("cargo::rerun-if-changed=migrations"); - println!("cargo::rerun-if-changed=src/db/queries"); -} diff --git a/nym-data-observatory/migrations/001_price_data.sql b/nym-data-observatory/migrations/001_price_data.sql new file mode 100644 index 0000000000..f50f6299b0 --- /dev/null +++ b/nym-data-observatory/migrations/001_price_data.sql @@ -0,0 +1,7 @@ +CREATE TABLE price_history ( + timestamp INTEGER PRIMARY KEY, + chf REAL NOT NULL, + usd REAL NOT NULL, + eur REAL NOT NULL, + btc REAL NOT NULL +); \ No newline at end of file diff --git a/nym-data-observatory/migrations/002_payment_transactions.sql b/nym-data-observatory/migrations/002_payment_transactions.sql new file mode 100644 index 0000000000..7ba3b0c8e2 --- /dev/null +++ b/nym-data-observatory/migrations/002_payment_transactions.sql @@ -0,0 +1,10 @@ +CREATE TABLE transactions ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + transaction_hash TEXT NOT NULL UNIQUE, + sender_address TEXT NOT NULL, + receiver_address TEXT NOT NULL, + amount REAL NOT NULL, + timestamp INTEGER NOT NULL, + height INTEGER NOT NULL, + memo TEXT +); diff --git a/nym-data-observatory/pg_up.sh b/nym-data-observatory/pg_up.sh deleted file mode 100755 index ca4e1f7e51..0000000000 --- a/nym-data-observatory/pg_up.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -# .env is generated in build.rs -source .env - -# Launching a container in such a way that it's destroyed after you detach from the terminal: -docker compose up - -# docker exec -it nym-data-observatory-pg /bin/bash -# psql -U youruser -d yourdb - -echo "Tearing down containers to have a clean slate" -docker compose down -v diff --git a/nym-data-observatory/src/background_task/mod.rs b/nym-data-observatory/src/background_task/mod.rs deleted file mode 100644 index 8799b818b4..0000000000 --- a/nym-data-observatory/src/background_task/mod.rs +++ /dev/null @@ -1,61 +0,0 @@ -use core::str; -use serde::Deserialize; -use tokio::process::Command; -use tokio::task::JoinHandle; -use tokio::time::Duration; - -use crate::db::{self, DbPool}; - -const REFRESH_DELAY: Duration = Duration::from_secs(15); -const FAILURE_RETRY_DELAY: Duration = Duration::from_secs(60 * 2); - -pub(crate) async fn spawn_in_background(db_pool: DbPool) -> JoinHandle<()> { - loop { - tracing::info!("Running in a loop 🏃"); - - if let Err(e) = some_network_action(&db_pool).await { - tracing::error!( - "❌ Run failed: {e}, retrying in {}s...", - FAILURE_RETRY_DELAY.as_secs() - ); - tokio::time::sleep(FAILURE_RETRY_DELAY).await; - } else { - tracing::info!( - "✅ Run successful, sleeping for {}s...", - REFRESH_DELAY.as_secs() - ); - tokio::time::sleep(REFRESH_DELAY).await; - } - } -} - -#[derive(Deserialize, Debug)] -pub(crate) struct Response { - #[serde(rename(deserialize = "id"))] - pub(crate) joke_id: String, - pub(crate) joke: String, - #[serde(rename(deserialize = "status"))] - pub(crate) _status: u16, -} - -async fn some_network_action(pool: &DbPool) -> anyhow::Result<()> { - // for demonstration purposes only. You should use reqwest if you need it - let output = Command::new("curl") - .arg("-H") - .arg("Accept: application/json") - .arg("https://icanhazdadjoke.com/") - .output() - .await?; - - if !output.status.success() { - anyhow::bail!("Curl command failed with status: {}", output.status); - } - - let response_str = str::from_utf8(&output.stdout)?; - let joke_response: Response = serde_json::from_str(response_str)?; - - tracing::info!("{:?}", joke_response.joke); - db::queries::insert_joke(pool, joke_response.into()).await?; - - Ok(()) -} diff --git a/nym-data-observatory/src/chain_scraper/mod.rs b/nym-data-observatory/src/chain_scraper/mod.rs new file mode 100644 index 0000000000..157dbd28bb --- /dev/null +++ b/nym-data-observatory/src/chain_scraper/mod.rs @@ -0,0 +1,22 @@ +use nyxd_scraper::{storage::ScraperStorage, Config, NyxdScraper, PruningOptions}; + +pub(crate) async fn run_chain_scraper() -> anyhow::Result { + let websocket_url = + std::env::var("NYXD_WEBSOCKET_URL").expect("NYXD_WEBSOCKET_URL not defined"); + + let rpc_url = std::env::var("NYXD_RPC_URL").expect("NYXD_RPC_URL not defined"); + let websocket_url = reqwest::Url::parse(&websocket_url)?; + let rpc_url = reqwest::Url::parse(&rpc_url)?; + + let scraper = NyxdScraper::builder(Config { + websocket_url, + rpc_url, + database_path: "chain_history.sqlite".into(), + pruning_options: PruningOptions::nothing(), + store_precommits: false, + }); + + let storage = scraper.build_and_start().await?; + + Ok(storage.storage) +} diff --git a/nym-data-observatory/src/db/mod.rs b/nym-data-observatory/src/db/mod.rs index 6e8938a578..74339ca575 100644 --- a/nym-data-observatory/src/db/mod.rs +++ b/nym-data-observatory/src/db/mod.rs @@ -1,13 +1,16 @@ use anyhow::{anyhow, Result}; -use sqlx::{migrate::Migrator, postgres::PgConnectOptions, ConnectOptions, PgPool}; +use sqlx::{migrate::Migrator, sqlite::SqliteConnectOptions, SqlitePool}; use std::str::FromStr; pub(crate) mod models; -pub(crate) mod queries; +pub(crate) mod queries { + pub mod payments; + pub mod price; +} static MIGRATOR: Migrator = sqlx::migrate!("./migrations"); -pub(crate) type DbPool = PgPool; +pub(crate) type DbPool = SqlitePool; pub(crate) struct Storage { pool: DbPool, @@ -16,13 +19,16 @@ pub(crate) struct Storage { impl Storage { pub async fn init(connection_url: String) -> Result { let connect_options = - PgConnectOptions::from_str(&connection_url)?.disable_statement_logging(); + SqliteConnectOptions::from_str(&connection_url)?.create_if_missing(true); let pool = DbPool::connect_with(connect_options) .await .map_err(|err| anyhow!("Failed to connect to {}: {}", &connection_url, err))?; - MIGRATOR.run(&pool).await?; + MIGRATOR + .run(&pool) + .await + .map_err(|err| anyhow!("Failed to run migrations: {}", err))?; Ok(Storage { pool }) } diff --git a/nym-data-observatory/src/db/models.rs b/nym-data-observatory/src/db/models.rs index e1e9682235..24d04de5f1 100644 --- a/nym-data-observatory/src/db/models.rs +++ b/nym-data-observatory/src/db/models.rs @@ -1,22 +1,41 @@ use serde::{Deserialize, Serialize}; use utoipa::ToSchema; -use crate::background_task::Response; +#[derive(Clone, Deserialize, Debug)] +pub(crate) struct CurrencyPrices { + pub(crate) chf: f32, + pub(crate) usd: f32, + pub(crate) eur: f32, + pub(crate) btc: f32, +} + +// Struct to hold Coingecko response +#[derive(Clone, Deserialize, Debug, ToSchema)] +pub(crate) struct CoingeckoPriceResponse { + pub(crate) nym: CurrencyPrices, +} + +#[derive(Clone, Deserialize, Debug, ToSchema)] +pub(crate) struct PriceRecord { + pub(crate) timestamp: i64, + pub(crate) nym: CurrencyPrices, +} #[derive(Serialize, Deserialize, Debug, ToSchema)] -pub(crate) struct JokeDto { - pub(crate) joke_id: String, - pub(crate) joke: String, - pub(crate) date_created: i32, +pub(crate) struct PriceHistory { + pub(crate) timestamp: i64, + pub(crate) chf: f32, + pub(crate) usd: f32, + pub(crate) eur: f32, + pub(crate) btc: f32, } -impl From for JokeDto { - fn from(value: Response) -> Self { - Self { - joke_id: value.joke_id, - joke: value.joke, - // casting not smart, can implicitly panic, don't do this in prod - date_created: chrono::offset::Utc::now().timestamp() as i32, - } - } +#[derive(Serialize, Deserialize, Debug, ToSchema)] +pub(crate) struct PaymentTransaction { + pub(crate) transaction_hash: String, + pub(crate) sender_address: String, + pub(crate) receiver_address: String, + pub(crate) amount: f64, + pub(crate) timestamp: i64, + pub(crate) height: i64, } diff --git a/nym-data-observatory/src/db/queries/joke.rs b/nym-data-observatory/src/db/queries/joke.rs deleted file mode 100644 index 649564a86c..0000000000 --- a/nym-data-observatory/src/db/queries/joke.rs +++ /dev/null @@ -1,39 +0,0 @@ -use crate::db::{models::JokeDto, DbPool}; - -pub(crate) async fn insert_joke(pool: &DbPool, joke: JokeDto) -> anyhow::Result<()> { - let mut conn = pool.acquire().await?; - sqlx::query!( - "INSERT INTO responses - (joke_id, joke, date_created) - VALUES - ($1, $2, $3) - ON CONFLICT(joke_id) DO UPDATE SET - joke=excluded.joke, - date_created=excluded.date_created;", - joke.joke_id, - joke.joke, - joke.date_created as i32, - ) - .execute(&mut *conn) - .await?; - - Ok(()) -} - -pub(crate) async fn select_joke_by_id(pool: &DbPool, joke_id: &str) -> anyhow::Result { - sqlx::query_as!( - JokeDto, - "SELECT joke_id, joke, date_created FROM responses WHERE joke_id = $1", - joke_id - ) - .fetch_one(pool) - .await - .map_err(anyhow::Error::from) -} - -pub(crate) async fn select_all(pool: &DbPool) -> anyhow::Result> { - sqlx::query_as!(JokeDto, "SELECT joke_id, joke, date_created FROM responses",) - .fetch_all(pool) - .await - .map_err(anyhow::Error::from) -} diff --git a/nym-data-observatory/src/db/queries/mod.rs b/nym-data-observatory/src/db/queries/mod.rs index b5723730b4..36867a6e7d 100644 --- a/nym-data-observatory/src/db/queries/mod.rs +++ b/nym-data-observatory/src/db/queries/mod.rs @@ -1,5 +1,6 @@ -// group queries in files by theme -mod joke; +mod payments; +mod price; // re-exporting allows us to access all queries via `queries::bla`` -pub(crate) use joke::{insert_joke, select_all, select_joke_by_id}; +pub(crate) use payments::{get_last_checked_height, insert_payment}; +pub(crate) use price::{get_latest_price, insert_nym_prices}; diff --git a/nym-data-observatory/src/db/queries/payments.rs b/nym-data-observatory/src/db/queries/payments.rs new file mode 100644 index 0000000000..cb9fae1f7f --- /dev/null +++ b/nym-data-observatory/src/db/queries/payments.rs @@ -0,0 +1,41 @@ +use crate::db::DbPool; +use anyhow::Result; + +pub async fn get_last_checked_height(pool: &DbPool) -> Result { + let result = sqlx::query_scalar!("SELECT MAX(height) FROM transactions") + .fetch_one(pool) + .await?; + Ok(result.unwrap_or(0)) +} + +pub async fn insert_payment( + pool: &DbPool, + transaction_hash: String, + sender_address: String, + receiver_address: String, + amount: f64, + height: i64, + memo: Option, +) -> Result<()> { + let timestamp = chrono::Utc::now().timestamp(); + + sqlx::query!( + r#" + INSERT INTO transactions ( + transaction_hash, sender_address, receiver_address, + amount, height, timestamp, memo + ) VALUES (?, ?, ?, ?, ?, ?, ?) + "#, + transaction_hash, + sender_address, + receiver_address, + amount, + height, + timestamp, + memo, + ) + .execute(pool) + .await?; + + Ok(()) +} diff --git a/nym-data-observatory/src/db/queries/price.rs b/nym-data-observatory/src/db/queries/price.rs new file mode 100644 index 0000000000..7c69863744 --- /dev/null +++ b/nym-data-observatory/src/db/queries/price.rs @@ -0,0 +1,46 @@ +use crate::db::models::{PriceHistory, PriceRecord}; +use crate::db::DbPool; + +pub(crate) async fn insert_nym_prices( + pool: &DbPool, + price_data: PriceRecord, +) -> anyhow::Result<()> { + let mut conn = pool.acquire().await?; + let timestamp = price_data.timestamp; + sqlx::query!( + "INSERT INTO price_history + (timestamp, chf, usd, eur, btc) + VALUES + ($1, $2, $3, $4, $5) + ON CONFLICT(timestamp) DO UPDATE SET + chf=excluded.chf, + usd=excluded.usd, + eur=excluded.eur, + btc=excluded.btc;", + timestamp, + price_data.nym.chf, + price_data.nym.usd, + price_data.nym.eur, + price_data.nym.btc, + ) + .execute(&mut *conn) + .await?; + + Ok(()) +} + +pub(crate) async fn get_latest_price(pool: &DbPool) -> anyhow::Result { + let result = sqlx::query!( + "SELECT timestamp, chf, usd, eur, btc FROM price_history ORDER BY timestamp DESC LIMIT 1;" + ) + .fetch_one(pool) + .await?; + + Ok(PriceHistory { + timestamp: result.timestamp, + chf: result.chf as f32, + usd: result.usd as f32, + eur: result.eur as f32, + btc: result.btc as f32, + }) +} diff --git a/nym-data-observatory/src/http/api/jokes.rs b/nym-data-observatory/src/http/api/jokes.rs deleted file mode 100644 index 6d429798e6..0000000000 --- a/nym-data-observatory/src/http/api/jokes.rs +++ /dev/null @@ -1,78 +0,0 @@ -use axum::{ - extract::{Path, State}, - Json, Router, -}; -use serde::Deserialize; -use utoipa::IntoParams; - -use crate::{ - db::{ - models::JokeDto, - queries::{self, select_joke_by_id}, - }, - http::{ - error::{Error, HttpResult}, - state::AppState, - }, -}; - -pub(crate) fn routes() -> Router { - Router::new() - .route("/", axum::routing::get(jokes)) - .route("/:joke_id", axum::routing::get(joke_by_id)) - .route("/fetch_another", axum::routing::get(fetch_another)) -} - -#[utoipa::path( - tag = "Dad Jokes", - get, - path = "/v1/jokes", - responses( - (status = 200, body = Vec) - ) -)] -async fn jokes(State(state): State) -> HttpResult>> { - queries::select_all(state.db_pool()) - .await - .map(Json::from) - .map_err(|_| Error::internal()) -} - -#[derive(Deserialize, IntoParams)] -#[into_params(parameter_in = Path)] -struct JokeIdParam { - joke_id: String, -} - -#[utoipa::path( - tag = "Dad Jokes", - get, - params( - JokeIdParam - ), - path = "/v1/jokes/{joke_id}", - responses( - (status = 200, body = JokeDto) - ) -)] -async fn joke_by_id( - Path(JokeIdParam { joke_id }): Path, - State(state): State, -) -> HttpResult> { - select_joke_by_id(state.db_pool(), &joke_id) - .await - .map(Json::from) - .map_err(|_| Error::not_found(joke_id)) -} - -#[utoipa::path( - tag = "Dad Jokes", - get, - path = "/v1/jokes/fetch_another", - responses( - (status = 200, body = String) - ) -)] -async fn fetch_another(State(_state): State) -> HttpResult> { - Ok(Json(String::from("Done boss, check the DB"))) -} diff --git a/nym-data-observatory/src/http/api/mod.rs b/nym-data-observatory/src/http/api/mod.rs index eeac99445d..4ca198a729 100644 --- a/nym-data-observatory/src/http/api/mod.rs +++ b/nym-data-observatory/src/http/api/mod.rs @@ -7,8 +7,8 @@ use utoipa_swagger_ui::SwaggerUi; use crate::http::{api_docs, server::HttpServer, state::AppState}; -pub(crate) mod jokes; pub(crate) mod mixnodes; +pub(crate) mod price; pub(crate) struct RouterBuilder { unfinished_router: Router, @@ -28,8 +28,9 @@ impl RouterBuilder { .nest( "/v1", Router::new() - .nest("/jokes", jokes::routes()) - .nest("/mixnodes", mixnodes::routes()), + //.nest("/jokes", jokes::routes()) + .nest("/mixnodes", mixnodes::routes()) + .nest("/price", price::routes()), ); Self { diff --git a/nym-data-observatory/src/http/api/price.rs b/nym-data-observatory/src/http/api/price.rs new file mode 100644 index 0000000000..1a239fcf54 --- /dev/null +++ b/nym-data-observatory/src/http/api/price.rs @@ -0,0 +1,27 @@ +use crate::db::models::PriceHistory; +use crate::db::queries::price::get_latest_price; +use crate::http::error::Error; +use crate::http::error::HttpResult; +use crate::http::state::AppState; +use axum::{extract::State, Json, Router}; + +pub(crate) fn routes() -> Router { + Router::new().route("/", axum::routing::get(price)) +} + +#[utoipa::path( + tag = "Nym Price", + get, + path = "/v1/price", + responses( + (status = 200, body = String) + ) +)] + +/// Fetch the latest price cached by the data observatory +async fn price(State(state): State) -> HttpResult> { + get_latest_price(state.db_pool()) + .await + .map(Json::from) + .map_err(|_| Error::internal()) +} diff --git a/nym-data-observatory/src/http/error.rs b/nym-data-observatory/src/http/error.rs index 30ba8a3c9d..fa6b274b69 100644 --- a/nym-data-observatory/src/http/error.rs +++ b/nym-data-observatory/src/http/error.rs @@ -6,13 +6,6 @@ pub(crate) struct Error { } impl Error { - pub(crate) fn not_found(message: String) -> Self { - Self { - message, - status: axum::http::StatusCode::NOT_FOUND, - } - } - pub(crate) fn internal() -> Self { Self { message: String::from("Internal server error"), diff --git a/nym-data-observatory/src/main.rs b/nym-data-observatory/src/main.rs index da8f53d4f9..4373f8850f 100644 --- a/nym-data-observatory/src/main.rs +++ b/nym-data-observatory/src/main.rs @@ -1,11 +1,15 @@ +use chain_scraper::run_chain_scraper; use clap::Parser; use nym_network_defaults::setup_env; use nym_task::signal::wait_for_signal; +use tokio::join; -mod background_task; +mod chain_scraper; mod db; mod http; mod logging; +mod payment_listener; +mod price_scraper; #[derive(Parser, Debug)] #[command(version, about, long_about = None)] @@ -18,25 +22,13 @@ struct Args { #[arg(short, long, default_value = None, env = "NYM_DATA_OBSERVATORY_ENV_FILE")] env_file: Option, - /// DB connection username - #[arg(long, default_value = None, env = "NYM_DATA_OBSERVATORY_CONNECTION_USERNAME")] - connection_username: String, - - /// DB connection password - #[arg(long, default_value = None, env = "NYM_DATA_OBSERVATORY_CONNECTION_PASSWORD")] - connection_password: String, - - /// DB connection host - #[arg(long, default_value = None, env = "NYM_DATA_OBSERVATORY_CONNECTION_HOST")] - connection_host: String, - - /// DB connection port - #[arg(long, default_value = None, env = "NYM_DATA_OBSERVATORY_CONNECTION_PORT")] - connection_port: String, - - /// DB connection database name - #[arg(long, default_value = None, env = "NYM_DATA_OBSERVATORY_CONNECTION_DB")] - connection_db: String, + /// SQLite database file path + #[arg( + long, + default_value = "data_observatory.sqlite", + env = "NYM_DATA_OBSERVATORY_DB_PATH" + )] + db_path: String, } #[tokio::main] @@ -44,30 +36,50 @@ async fn main() -> anyhow::Result<()> { logging::setup_tracing_logger(); let args = Args::parse(); - setup_env(args.env_file); // Defaults to mainnet if empty - let connection_url = format!( - "postgres://{}:{}@{}:{}/{}", - args.connection_username, - args.connection_password, - args.connection_host, - args.connection_port, - args.connection_db - ); + let db_path = args.db_path; + // Ensure parent directory exists + if let Some(parent) = std::path::Path::new(&db_path).parent() { + std::fs::create_dir_all(parent)?; + } + let connection_url = format!("sqlite://{}?mode=rwc", db_path); let storage = db::Storage::init(connection_url).await?; - let db_pool = storage.pool_owned().await; - tokio::spawn(async move { - background_task::spawn_in_background(db_pool).await; - tracing::info!("Started task"); + let observatory_pool = storage.pool_owned().await; + + // Spawn the chain scraper and get its storage + + // Spawn the payment listener task + let payment_listener_handle = tokio::spawn({ + let obs_pool = observatory_pool.clone(); + let chain_storage = run_chain_scraper().await?; + + async move { + if let Err(e) = payment_listener::run_payment_listener(obs_pool, chain_storage).await { + tracing::error!("Payment listener error: {}", e); + } + Ok::<_, anyhow::Error>(()) + } + }); + + // Clone pool for each task that needs it + //let background_pool = db_pool.clone(); + + let price_scraper_handle = tokio::spawn(async move { + price_scraper::run_price_scraper(&observatory_pool).await; }); let shutdown_handles = http::server::start_http_api(storage.pool_owned().await, args.http_port) .await .expect("Failed to start server"); + tracing::info!("Started HTTP server on port {}", args.http_port); + // Wait for the short-lived tasks to complete + let _ = join!(price_scraper_handle, payment_listener_handle); + + // Wait for a signal to terminate the long-running task wait_for_signal().await; if let Err(err) = shutdown_handles.shutdown().await { diff --git a/nym-data-observatory/src/payment_listener/mod.rs b/nym-data-observatory/src/payment_listener/mod.rs new file mode 100644 index 0000000000..4158e65a0d --- /dev/null +++ b/nym-data-observatory/src/payment_listener/mod.rs @@ -0,0 +1,114 @@ +use crate::db::queries; +use nyxd_scraper::storage::ScraperStorage; +use reqwest::Client; +use serde_json::{json, Value}; +use sqlx::SqlitePool; +use std::env; +use tokio::time::{self, Duration}; + +#[derive(Debug)] +struct TransferEvent { + recipient: String, + sender: String, + amount: String, +} + +pub(crate) async fn run_payment_listener( + observatory_pool: SqlitePool, + chain_storage: ScraperStorage, +) -> anyhow::Result<()> { + let payment_receive_address = env::var("PAYMENT_RECEIVE_ADDRESS").map_err(|_| { + anyhow::anyhow!("Environment variable `PAYMENT_RECEIVE_ADDRESS` not defined") + })?; + let webhook_url = env::var("WEBHOOK_URL") + .map_err(|_| anyhow::anyhow!("Environment variable `WEBHOOK_URL` not defined"))?; + + let client = Client::new(); + loop { + let last_checked_height = + queries::payments::get_last_checked_height(&observatory_pool).await?; + tracing::info!("Last checked height: {}", last_checked_height); + + let transactions = chain_storage + .get_transactions_after_height( + last_checked_height, + Some("/cosmos.bank.v1beta1.MsgSend"), + ) + .await?; + + for tx in transactions { + println!("Processing transaction: {}", tx.hash); + if let Some(raw_log) = tx.raw_log.as_deref() { + if let Some(transfer) = parse_transfer_from_raw_log(raw_log)? { + if transfer.recipient == payment_receive_address { + let amount: f64 = parse_unym_amount(&transfer.amount)?; + + queries::payments::insert_payment( + &observatory_pool, + tx.hash.clone(), + transfer.sender.clone(), + transfer.recipient.clone(), + amount, + tx.height, + tx.memo.clone(), + ) + .await?; + + let webhook_data = json!({ + "transaction_hash": tx.hash, + "sender_address": transfer.sender, + "receiver_address": transfer.recipient, + "amount": amount, + "height": tx.height, + "memo": tx.memo, + }); + let _ = client.post(&webhook_url).json(&webhook_data).send().await; + } + } + } + } + + time::sleep(Duration::from_secs(10)).await; + } +} + +fn parse_transfer_from_raw_log(raw_log: &str) -> anyhow::Result> { + let log_value: Value = serde_json::from_str(raw_log)?; + + if let Some(events) = log_value[0]["events"].as_array() { + if let Some(transfer_event) = events.iter().find(|e| e["type"] == "transfer") { + if let Some(attrs) = transfer_event["attributes"].as_array() { + let mut transfer = TransferEvent { + recipient: String::new(), + sender: String::new(), + amount: String::new(), + }; + + for attr in attrs { + match attr["key"].as_str() { + Some("recipient") => { + transfer.recipient = attr["value"].as_str().unwrap_or("").to_string() + } + Some("sender") => { + transfer.sender = attr["value"].as_str().unwrap_or("").to_string() + } + Some("amount") => { + transfer.amount = attr["value"].as_str().unwrap_or("").to_string() + } + _ => continue, + } + } + + return Ok(Some(transfer)); + } + } + } + + Ok(None) +} + +fn parse_unym_amount(amount: &str) -> anyhow::Result { + let amount = amount.trim_end_matches("unym"); + let parsed: f64 = amount.parse()?; + Ok(parsed / 1_000_000.0) +} diff --git a/nym-data-observatory/src/price_scraper/mod.rs b/nym-data-observatory/src/price_scraper/mod.rs new file mode 100644 index 0000000000..938d292b3e --- /dev/null +++ b/nym-data-observatory/src/price_scraper/mod.rs @@ -0,0 +1,55 @@ +use crate::db::{ + models::{CoingeckoPriceResponse, PriceRecord}, + queries::price::insert_nym_prices, +}; +use core::str; +use tokio::task::JoinHandle; +use tokio::time::Duration; + +use crate::db::DbPool; + +const REFRESH_DELAY: Duration = Duration::from_secs(300); +const FAILURE_RETRY_DELAY: Duration = Duration::from_secs(60 * 2); +const COINGECKO_API_URL: &str = + "https://api.coingecko.com/api/v3/simple/price?ids=nym&vs_currencies=chf,usd,eur,btc"; + +pub(crate) async fn run_price_scraper(db_pool: &DbPool) -> JoinHandle<()> { + loop { + tracing::info!("Running in a loop 🏃"); + if let Err(e) = get_coingecko_prices(db_pool).await { + tracing::error!("❌ Failed to get CoinGecko prices: {e}"); + tracing::info!("Retrying in {}s...", FAILURE_RETRY_DELAY.as_secs()); + tokio::time::sleep(FAILURE_RETRY_DELAY).await; + } else { + tracing::info!("✅ Successfully fetched CoinGecko prices"); + tokio::time::sleep(REFRESH_DELAY).await; + } + } +} + +async fn get_coingecko_prices(pool: &DbPool) -> anyhow::Result<()> { + tracing::info!("💰 Fetching CoinGecko prices from {}", COINGECKO_API_URL); + + let response = reqwest::get(COINGECKO_API_URL) + .await? + .json::() + .await; + + tracing::info!("Got response {:?}", response); + match response { + Ok(resp) => { + let price_record = PriceRecord { + timestamp: time::OffsetDateTime::now_utc().unix_timestamp(), + nym: resp.nym, + }; + + insert_nym_prices(pool, price_record).await?; + } + Err(e) => { + //tracing::info!("💰 CoinGecko price response: {:?}", response); + tracing::error!("Error sending request: {}", e); + } + } + + Ok(()) +} From d951ea954831b706a71c4a4fe7e1a5ed9c6dde3e Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Wed, 13 Nov 2024 11:02:45 +0000 Subject: [PATCH 29/64] nyxd-scraper: add optional starting height parameter to scrape before listening for new blocks --- common/nyxd-scraper/src/scraper/mod.rs | 92 +++++++++++++------ nym-data-observatory/src/chain_scraper/mod.rs | 8 +- 2 files changed, 71 insertions(+), 29 deletions(-) diff --git a/common/nyxd-scraper/src/scraper/mod.rs b/common/nyxd-scraper/src/scraper/mod.rs index 2b4e228d7c..1b616f3247 100644 --- a/common/nyxd-scraper/src/scraper/mod.rs +++ b/common/nyxd-scraper/src/scraper/mod.rs @@ -47,7 +47,10 @@ pub struct NyxdScraperBuilder { } impl NyxdScraperBuilder { - pub async fn build_and_start(self) -> Result { + pub async fn build_and_start( + self, + start_block: Option, + ) -> Result { let scraper = NyxdScraper::new(self.config).await?; let (processing_tx, processing_rx) = unbounded_channel(); @@ -90,6 +93,10 @@ impl NyxdScraperBuilder { ) .await?; + if let Some(height) = start_block { + scraper.process_block_range(Some(height), None).await?; + } + scraper.start_tasks(block_requester, block_processor, chain_subscriber); Ok(scraper) @@ -202,10 +209,10 @@ impl NyxdScraper { .await? .with_pruning(PruningOptions::nothing()); - let current_height = self.rpc_client.current_block_height().await? as u32; + let mut current_height = self.rpc_client.current_block_height().await? as u32; let last_processed = block_processor.last_process_height(); - let starting_height = match starting_height { + let mut starting_height = match starting_height { // always attempt to use whatever the user has provided Some(explicit) => explicit, None => { @@ -219,7 +226,8 @@ impl NyxdScraper { } }; - let end_height = match end_height { + let must_catch_up = end_height.is_none(); + let mut end_height = match end_height { // always attempt to use whatever the user has provided Some(explicit) => explicit, None => { @@ -234,32 +242,62 @@ impl NyxdScraper { } }; - info!( - starting_height = starting_height, - end_height = end_height, - "attempting to process block range" - ); - - let range = (starting_height..=end_height).collect::>(); - - // the most likely bottleneck here are going to be the chain queries, - // so batch multiple requests - for batch in range.chunks(4) { - let batch_result = join_all( - batch - .iter() - .map(|height| self.rpc_client.get_basic_block_details(*height)), - ) - .await; - for result in batch_result { - match result { - Ok(block) => block_processor.process_block(block.into()).await?, - Err(err) => { - error!("failed to retrieve the block: {err}. stopping..."); - return Err(err); + let mut last_processed = starting_height; + + while last_processed < current_height { + info!( + starting_height = starting_height, + end_height = end_height, + "attempting to process block range" + ); + + let range = (starting_height..=end_height).collect::>(); + + // the most likely bottleneck here are going to be the chain queries, + // so batch multiple requests + for batch in range.chunks(4) { + let batch_result = join_all( + batch + .iter() + .map(|height| self.rpc_client.get_basic_block_details(*height)), + ) + .await; + for result in batch_result { + match result { + Ok(block) => block_processor.process_block(block.into()).await?, + Err(err) => { + error!("failed to retrieve the block: {err}. stopping..."); + return Err(err); + } } } } + + // if we don't need to catch up, return early + if !must_catch_up { + return Ok(()); + } + + // check if we have caught up to the current block height + last_processed = end_height; + current_height = self.rpc_client.current_block_height().await? as u32; + + info!( + last_processed = last_processed, + current_height = current_height, + "🏃 still need to catch up..." + ); + + starting_height = last_processed + 1; + end_height = current_height; + } + + if must_catch_up { + info!( + last_processed = last_processed, + current_height = current_height, + "✅ block processing has caught up!" + ); } Ok(()) diff --git a/nym-data-observatory/src/chain_scraper/mod.rs b/nym-data-observatory/src/chain_scraper/mod.rs index 157dbd28bb..650b3f0c34 100644 --- a/nym-data-observatory/src/chain_scraper/mod.rs +++ b/nym-data-observatory/src/chain_scraper/mod.rs @@ -8,6 +8,10 @@ pub(crate) async fn run_chain_scraper() -> anyhow::Result { let websocket_url = reqwest::Url::parse(&websocket_url)?; let rpc_url = reqwest::Url::parse(&rpc_url)?; + let start_block_height = std::env::var("NYXD_SCRAPER_START_HEIGHT") + .ok() + .and_then(|value| value.parse::().ok()); + let scraper = NyxdScraper::builder(Config { websocket_url, rpc_url, @@ -16,7 +20,7 @@ pub(crate) async fn run_chain_scraper() -> anyhow::Result { store_precommits: false, }); - let storage = scraper.build_and_start().await?; + let instance = scraper.build_and_start(start_block_height).await?; - Ok(storage.storage) + Ok(instance.storage) } From 6010de978d7c605fcbf9de3491584200a5288d82 Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Wed, 13 Nov 2024 11:03:50 +0000 Subject: [PATCH 30/64] data-observatory: renamed `transactions` to `payments` because there is already `transaction` in the base scraper schema --- .../migrations/002_payment_transactions.sql | 2 +- nym-data-observatory/src/db/models.rs | 2 +- nym-data-observatory/src/db/queries/payments.rs | 4 ++-- nym-data-observatory/src/http/api_docs.rs | 6 +++++- nym-data-observatory/src/payment_listener/mod.rs | 2 +- 5 files changed, 10 insertions(+), 6 deletions(-) diff --git a/nym-data-observatory/migrations/002_payment_transactions.sql b/nym-data-observatory/migrations/002_payment_transactions.sql index 7ba3b0c8e2..77fa6bc83a 100644 --- a/nym-data-observatory/migrations/002_payment_transactions.sql +++ b/nym-data-observatory/migrations/002_payment_transactions.sql @@ -1,4 +1,4 @@ -CREATE TABLE transactions ( +CREATE TABLE payments ( id INTEGER PRIMARY KEY AUTOINCREMENT, transaction_hash TEXT NOT NULL UNIQUE, sender_address TEXT NOT NULL, diff --git a/nym-data-observatory/src/db/models.rs b/nym-data-observatory/src/db/models.rs index 24d04de5f1..8ad20e97c0 100644 --- a/nym-data-observatory/src/db/models.rs +++ b/nym-data-observatory/src/db/models.rs @@ -31,7 +31,7 @@ pub(crate) struct PriceHistory { } #[derive(Serialize, Deserialize, Debug, ToSchema)] -pub(crate) struct PaymentTransaction { +pub(crate) struct PaymentRecord { pub(crate) transaction_hash: String, pub(crate) sender_address: String, pub(crate) receiver_address: String, diff --git a/nym-data-observatory/src/db/queries/payments.rs b/nym-data-observatory/src/db/queries/payments.rs index cb9fae1f7f..c7757d6e67 100644 --- a/nym-data-observatory/src/db/queries/payments.rs +++ b/nym-data-observatory/src/db/queries/payments.rs @@ -2,7 +2,7 @@ use crate::db::DbPool; use anyhow::Result; pub async fn get_last_checked_height(pool: &DbPool) -> Result { - let result = sqlx::query_scalar!("SELECT MAX(height) FROM transactions") + let result = sqlx::query_scalar!("SELECT MAX(height) FROM payments") .fetch_one(pool) .await?; Ok(result.unwrap_or(0)) @@ -21,7 +21,7 @@ pub async fn insert_payment( sqlx::query!( r#" - INSERT INTO transactions ( + INSERT INTO payments ( transaction_hash, sender_address, receiver_address, amount, height, timestamp, memo ) VALUES (?, ?, ?, ?, ?, ?, ?) diff --git a/nym-data-observatory/src/http/api_docs.rs b/nym-data-observatory/src/http/api_docs.rs index ec77e3745c..c7dbe0118d 100644 --- a/nym-data-observatory/src/http/api_docs.rs +++ b/nym-data-observatory/src/http/api_docs.rs @@ -6,5 +6,9 @@ use utoipauto::utoipauto; // https://github.com/ProbablyClem/utoipauto/issues/13#issuecomment-1974911829 #[utoipauto(paths = "./nym-data-observatory/src")] #[derive(OpenApi)] -#[openapi(info(title = "Nym API"), tags(), components(schemas()))] +#[openapi( + info(title = "Nym Data Observatory API"), + tags(), + components(schemas()) +)] pub(super) struct ApiDoc; diff --git a/nym-data-observatory/src/payment_listener/mod.rs b/nym-data-observatory/src/payment_listener/mod.rs index 4158e65a0d..7006b413e6 100644 --- a/nym-data-observatory/src/payment_listener/mod.rs +++ b/nym-data-observatory/src/payment_listener/mod.rs @@ -37,7 +37,7 @@ pub(crate) async fn run_payment_listener( .await?; for tx in transactions { - println!("Processing transaction: {}", tx.hash); + tracing::info!("Processing transaction: {}", tx.hash); if let Some(raw_log) = tx.raw_log.as_deref() { if let Some(transfer) = parse_transfer_from_raw_log(raw_log)? { if transfer.recipient == payment_receive_address { From cf6f437187b66aa11ae10e084ab87c0d6a0b3a75 Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Thu, 5 Dec 2024 10:41:06 +0000 Subject: [PATCH 31/64] Move nym-data-observatory (v0) to nyx-chain-watcher --- .gitignore | 3 ++ Cargo.lock | 54 ++++++++++--------- Cargo.toml | 4 +- .../Cargo.toml | 2 +- nyx-chain-watcher/README.md | 15 ++++++ .../README_SQLX.md | 0 .../build.rs | 2 +- .../docker-compose.yml | 0 .../migrations/000_init.sql | 0 .../migrations/001_price_data.sql | 0 .../migrations/002_payment_transactions.sql | 0 .../src/chain_scraper/mod.rs | 0 .../src/db/mod.rs | 0 .../src/db/models.rs | 0 .../src/db/queries/mod.rs | 0 .../src/db/queries/payments.rs | 0 .../src/db/queries/price.rs | 0 .../src/http/api/mixnodes.rs | 0 .../src/http/api/mod.rs | 0 .../src/http/api/price.rs | 2 +- .../src/http/api/server.rs | 0 .../src/http/api_docs.rs | 8 +-- .../src/http/error.rs | 0 .../src/http/mod.rs | 0 .../src/http/server.rs | 0 .../src/http/state.rs | 0 .../src/logging.rs | 0 .../src/main.rs | 14 ++--- .../src/payment_listener/mod.rs | 6 +-- .../src/price_scraper/mod.rs | 0 30 files changed, 64 insertions(+), 46 deletions(-) rename {nym-data-observatory => nyx-chain-watcher}/Cargo.toml (98%) create mode 100644 nyx-chain-watcher/README.md rename {nym-data-observatory => nyx-chain-watcher}/README_SQLX.md (100%) rename {nym-data-observatory => nyx-chain-watcher}/build.rs (98%) rename {nym-data-observatory => nyx-chain-watcher}/docker-compose.yml (100%) rename {nym-data-observatory => nyx-chain-watcher}/migrations/000_init.sql (100%) rename {nym-data-observatory => nyx-chain-watcher}/migrations/001_price_data.sql (100%) rename {nym-data-observatory => nyx-chain-watcher}/migrations/002_payment_transactions.sql (100%) rename {nym-data-observatory => nyx-chain-watcher}/src/chain_scraper/mod.rs (100%) rename {nym-data-observatory => nyx-chain-watcher}/src/db/mod.rs (100%) rename {nym-data-observatory => nyx-chain-watcher}/src/db/models.rs (100%) rename {nym-data-observatory => nyx-chain-watcher}/src/db/queries/mod.rs (100%) rename {nym-data-observatory => nyx-chain-watcher}/src/db/queries/payments.rs (100%) rename {nym-data-observatory => nyx-chain-watcher}/src/db/queries/price.rs (100%) rename {nym-data-observatory => nyx-chain-watcher}/src/http/api/mixnodes.rs (100%) rename {nym-data-observatory => nyx-chain-watcher}/src/http/api/mod.rs (100%) rename {nym-data-observatory => nyx-chain-watcher}/src/http/api/price.rs (92%) rename {nym-data-observatory => nyx-chain-watcher}/src/http/api/server.rs (100%) rename {nym-data-observatory => nyx-chain-watcher}/src/http/api_docs.rs (65%) rename {nym-data-observatory => nyx-chain-watcher}/src/http/error.rs (100%) rename {nym-data-observatory => nyx-chain-watcher}/src/http/mod.rs (100%) rename {nym-data-observatory => nyx-chain-watcher}/src/http/server.rs (100%) rename {nym-data-observatory => nyx-chain-watcher}/src/http/state.rs (100%) rename {nym-data-observatory => nyx-chain-watcher}/src/logging.rs (100%) rename {nym-data-observatory => nyx-chain-watcher}/src/main.rs (84%) rename {nym-data-observatory => nyx-chain-watcher}/src/payment_listener/mod.rs (96%) rename {nym-data-observatory => nyx-chain-watcher}/src/price_scraper/mod.rs (100%) diff --git a/.gitignore b/.gitignore index 817ddac4f2..a07a3567ba 100644 --- a/.gitignore +++ b/.gitignore @@ -54,3 +54,6 @@ nym-network-monitor/__pycache__ nym-network-monitor/*.key nym-network-monitor/.envrc nym-network-monitor/.envrc + + +*.sqlite \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index d0caa221f9..a8f3de6c1b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5221,31 +5221,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "nym-data-observatory" -version = "0.1.0" -dependencies = [ - "anyhow", - "axum 0.7.7", - "chrono", - "clap 4.5.20", - "nym-bin-common", - "nym-network-defaults", - "nym-node-requests", - "nym-task", - "serde", - "serde_json", - "sqlx", - "tokio", - "tokio-util", - "tower-http", - "tracing", - "tracing-subscriber", - "utoipa", - "utoipa-swagger-ui", - "utoipauto", -] - [[package]] name = "nym-dkg" version = "0.1.0" @@ -6860,6 +6835,35 @@ dependencies = [ "url", ] +[[package]] +name = "nyx-chain-watcher" +version = "0.1.0" +dependencies = [ + "anyhow", + "axum 0.7.7", + "chrono", + "clap 4.5.20", + "nym-bin-common", + "nym-network-defaults", + "nym-node-requests", + "nym-task", + "nyxd-scraper", + "reqwest 0.12.4", + "rocket", + "serde", + "serde_json", + "sqlx", + "time", + "tokio", + "tokio-util", + "tower-http", + "tracing", + "tracing-subscriber", + "utoipa", + "utoipa-swagger-ui", + "utoipauto", +] + [[package]] name = "nyxd-scraper" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index a5b90d7dfd..cd44c15136 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -118,8 +118,8 @@ members = [ "nym-credential-proxy/nym-credential-proxy", "nym-credential-proxy/nym-credential-proxy-requests", "nym-credential-proxy/vpn-api-lib-wasm", - "nym-data-observatory", "nym-network-monitor", + "nyx-chain-watcher", "nym-node", "nym-node/nym-node-requests", "nym-node/nym-node-metrics", @@ -158,11 +158,11 @@ default-members = [ "explorer-api", "nym-api", "nym-credential-proxy/nym-credential-proxy", - "nym-data-observatory", "nym-node", "nym-node-status-api/nym-node-status-agent", "nym-node-status-api/nym-node-status-api", "nym-validator-rewarder", + "nyx-chain-watcher", "service-providers/authenticator", "service-providers/ip-packet-router", "service-providers/network-requester", diff --git a/nym-data-observatory/Cargo.toml b/nyx-chain-watcher/Cargo.toml similarity index 98% rename from nym-data-observatory/Cargo.toml rename to nyx-chain-watcher/Cargo.toml index 0270057477..58564b3d12 100644 --- a/nym-data-observatory/Cargo.toml +++ b/nyx-chain-watcher/Cargo.toml @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 [package] -name = "nym-data-observatory" +name = "nyx-chain-watcher" version = "0.1.0" authors.workspace = true repository.workspace = true diff --git a/nyx-chain-watcher/README.md b/nyx-chain-watcher/README.md new file mode 100644 index 0000000000..4142c4ecee --- /dev/null +++ b/nyx-chain-watcher/README.md @@ -0,0 +1,15 @@ +# Nyx Chain Watcher + +A simple binary to watch addresses on the Nyx chain and to call webhooks when particular message types are in a block. + +## Running locally + +``` +DATABASE_URL=nyx_chain_watcher.sqlite \ +NYXD_WEBSOCKET_URL=wss://rpc.nymtech.net:443/websocket \ +NYXD_RPC_URL=https://rpc.nymtech.net \ +PAYMENT_RECEIVE_ADDRESS=n1... \ +WEBHOOK_URL=https://webhook.site/... \ +cargo run +``` + diff --git a/nym-data-observatory/README_SQLX.md b/nyx-chain-watcher/README_SQLX.md similarity index 100% rename from nym-data-observatory/README_SQLX.md rename to nyx-chain-watcher/README_SQLX.md diff --git a/nym-data-observatory/build.rs b/nyx-chain-watcher/build.rs similarity index 98% rename from nym-data-observatory/build.rs rename to nyx-chain-watcher/build.rs index faedd8b4c7..eb28ed3bf5 100644 --- a/nym-data-observatory/build.rs +++ b/nyx-chain-watcher/build.rs @@ -5,7 +5,7 @@ use std::{collections::HashMap, fs::File, path::PathBuf, str::FromStr}; #[tokio::main] async fn main() -> Result<()> { - let db_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("data_observatory.sqlite"); + let db_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("nyx_chain_watcher.sqlite"); // Create the database directory if it doesn't exist if let Some(parent) = db_path.parent() { diff --git a/nym-data-observatory/docker-compose.yml b/nyx-chain-watcher/docker-compose.yml similarity index 100% rename from nym-data-observatory/docker-compose.yml rename to nyx-chain-watcher/docker-compose.yml diff --git a/nym-data-observatory/migrations/000_init.sql b/nyx-chain-watcher/migrations/000_init.sql similarity index 100% rename from nym-data-observatory/migrations/000_init.sql rename to nyx-chain-watcher/migrations/000_init.sql diff --git a/nym-data-observatory/migrations/001_price_data.sql b/nyx-chain-watcher/migrations/001_price_data.sql similarity index 100% rename from nym-data-observatory/migrations/001_price_data.sql rename to nyx-chain-watcher/migrations/001_price_data.sql diff --git a/nym-data-observatory/migrations/002_payment_transactions.sql b/nyx-chain-watcher/migrations/002_payment_transactions.sql similarity index 100% rename from nym-data-observatory/migrations/002_payment_transactions.sql rename to nyx-chain-watcher/migrations/002_payment_transactions.sql diff --git a/nym-data-observatory/src/chain_scraper/mod.rs b/nyx-chain-watcher/src/chain_scraper/mod.rs similarity index 100% rename from nym-data-observatory/src/chain_scraper/mod.rs rename to nyx-chain-watcher/src/chain_scraper/mod.rs diff --git a/nym-data-observatory/src/db/mod.rs b/nyx-chain-watcher/src/db/mod.rs similarity index 100% rename from nym-data-observatory/src/db/mod.rs rename to nyx-chain-watcher/src/db/mod.rs diff --git a/nym-data-observatory/src/db/models.rs b/nyx-chain-watcher/src/db/models.rs similarity index 100% rename from nym-data-observatory/src/db/models.rs rename to nyx-chain-watcher/src/db/models.rs diff --git a/nym-data-observatory/src/db/queries/mod.rs b/nyx-chain-watcher/src/db/queries/mod.rs similarity index 100% rename from nym-data-observatory/src/db/queries/mod.rs rename to nyx-chain-watcher/src/db/queries/mod.rs diff --git a/nym-data-observatory/src/db/queries/payments.rs b/nyx-chain-watcher/src/db/queries/payments.rs similarity index 100% rename from nym-data-observatory/src/db/queries/payments.rs rename to nyx-chain-watcher/src/db/queries/payments.rs diff --git a/nym-data-observatory/src/db/queries/price.rs b/nyx-chain-watcher/src/db/queries/price.rs similarity index 100% rename from nym-data-observatory/src/db/queries/price.rs rename to nyx-chain-watcher/src/db/queries/price.rs diff --git a/nym-data-observatory/src/http/api/mixnodes.rs b/nyx-chain-watcher/src/http/api/mixnodes.rs similarity index 100% rename from nym-data-observatory/src/http/api/mixnodes.rs rename to nyx-chain-watcher/src/http/api/mixnodes.rs diff --git a/nym-data-observatory/src/http/api/mod.rs b/nyx-chain-watcher/src/http/api/mod.rs similarity index 100% rename from nym-data-observatory/src/http/api/mod.rs rename to nyx-chain-watcher/src/http/api/mod.rs diff --git a/nym-data-observatory/src/http/api/price.rs b/nyx-chain-watcher/src/http/api/price.rs similarity index 92% rename from nym-data-observatory/src/http/api/price.rs rename to nyx-chain-watcher/src/http/api/price.rs index 1a239fcf54..354d3cbfa7 100644 --- a/nym-data-observatory/src/http/api/price.rs +++ b/nyx-chain-watcher/src/http/api/price.rs @@ -18,7 +18,7 @@ pub(crate) fn routes() -> Router { ) )] -/// Fetch the latest price cached by the data observatory +/// Fetch the latest price cached by this API async fn price(State(state): State) -> HttpResult> { get_latest_price(state.db_pool()) .await diff --git a/nym-data-observatory/src/http/api/server.rs b/nyx-chain-watcher/src/http/api/server.rs similarity index 100% rename from nym-data-observatory/src/http/api/server.rs rename to nyx-chain-watcher/src/http/api/server.rs diff --git a/nym-data-observatory/src/http/api_docs.rs b/nyx-chain-watcher/src/http/api_docs.rs similarity index 65% rename from nym-data-observatory/src/http/api_docs.rs rename to nyx-chain-watcher/src/http/api_docs.rs index c7dbe0118d..097bd3c4ba 100644 --- a/nym-data-observatory/src/http/api_docs.rs +++ b/nyx-chain-watcher/src/http/api_docs.rs @@ -4,11 +4,7 @@ use utoipauto::utoipauto; // manually import external structs which are behind feature flags because they // can't be automatically discovered // https://github.com/ProbablyClem/utoipauto/issues/13#issuecomment-1974911829 -#[utoipauto(paths = "./nym-data-observatory/src")] +#[utoipauto(paths = "./nyx-chain-watcher/src")] #[derive(OpenApi)] -#[openapi( - info(title = "Nym Data Observatory API"), - tags(), - components(schemas()) -)] +#[openapi(info(title = "Nyx Chain Watcher API"), tags(), components(schemas()))] pub(super) struct ApiDoc; diff --git a/nym-data-observatory/src/http/error.rs b/nyx-chain-watcher/src/http/error.rs similarity index 100% rename from nym-data-observatory/src/http/error.rs rename to nyx-chain-watcher/src/http/error.rs diff --git a/nym-data-observatory/src/http/mod.rs b/nyx-chain-watcher/src/http/mod.rs similarity index 100% rename from nym-data-observatory/src/http/mod.rs rename to nyx-chain-watcher/src/http/mod.rs diff --git a/nym-data-observatory/src/http/server.rs b/nyx-chain-watcher/src/http/server.rs similarity index 100% rename from nym-data-observatory/src/http/server.rs rename to nyx-chain-watcher/src/http/server.rs diff --git a/nym-data-observatory/src/http/state.rs b/nyx-chain-watcher/src/http/state.rs similarity index 100% rename from nym-data-observatory/src/http/state.rs rename to nyx-chain-watcher/src/http/state.rs diff --git a/nym-data-observatory/src/logging.rs b/nyx-chain-watcher/src/logging.rs similarity index 100% rename from nym-data-observatory/src/logging.rs rename to nyx-chain-watcher/src/logging.rs diff --git a/nym-data-observatory/src/main.rs b/nyx-chain-watcher/src/main.rs similarity index 84% rename from nym-data-observatory/src/main.rs rename to nyx-chain-watcher/src/main.rs index 4373f8850f..3eb513b889 100644 --- a/nym-data-observatory/src/main.rs +++ b/nyx-chain-watcher/src/main.rs @@ -15,18 +15,18 @@ mod price_scraper; #[command(version, about, long_about = None)] struct Args { /// Port to listen on - #[arg(long, default_value_t = 8000, env = "NYM_DATA_OBSERVATORY_HTTP_PORT")] + #[arg(long, default_value_t = 8000, env = "NYX_CHAIN_WATCHER_HTTP_PORT")] http_port: u16, /// Path to the environment variables file. If you don't provide one, variables for the mainnet will be used. - #[arg(short, long, default_value = None, env = "NYM_DATA_OBSERVATORY_ENV_FILE")] + #[arg(short, long, default_value = None, env = "NYX_CHAIN_WATCHER_ENV_FILE")] env_file: Option, /// SQLite database file path #[arg( long, - default_value = "data_observatory.sqlite", - env = "NYM_DATA_OBSERVATORY_DB_PATH" + default_value = "nyx_chain_watcher.sqlite", + env = "DATABASE_URL" )] db_path: String, } @@ -46,13 +46,13 @@ async fn main() -> anyhow::Result<()> { let connection_url = format!("sqlite://{}?mode=rwc", db_path); let storage = db::Storage::init(connection_url).await?; - let observatory_pool = storage.pool_owned().await; + let watcher_pool = storage.pool_owned().await; // Spawn the chain scraper and get its storage // Spawn the payment listener task let payment_listener_handle = tokio::spawn({ - let obs_pool = observatory_pool.clone(); + let obs_pool = watcher_pool.clone(); let chain_storage = run_chain_scraper().await?; async move { @@ -67,7 +67,7 @@ async fn main() -> anyhow::Result<()> { //let background_pool = db_pool.clone(); let price_scraper_handle = tokio::spawn(async move { - price_scraper::run_price_scraper(&observatory_pool).await; + price_scraper::run_price_scraper(&watcher_pool).await; }); let shutdown_handles = http::server::start_http_api(storage.pool_owned().await, args.http_port) diff --git a/nym-data-observatory/src/payment_listener/mod.rs b/nyx-chain-watcher/src/payment_listener/mod.rs similarity index 96% rename from nym-data-observatory/src/payment_listener/mod.rs rename to nyx-chain-watcher/src/payment_listener/mod.rs index 7006b413e6..bddfecbbda 100644 --- a/nym-data-observatory/src/payment_listener/mod.rs +++ b/nyx-chain-watcher/src/payment_listener/mod.rs @@ -14,7 +14,7 @@ struct TransferEvent { } pub(crate) async fn run_payment_listener( - observatory_pool: SqlitePool, + watcher_pool: SqlitePool, chain_storage: ScraperStorage, ) -> anyhow::Result<()> { let payment_receive_address = env::var("PAYMENT_RECEIVE_ADDRESS").map_err(|_| { @@ -26,7 +26,7 @@ pub(crate) async fn run_payment_listener( let client = Client::new(); loop { let last_checked_height = - queries::payments::get_last_checked_height(&observatory_pool).await?; + queries::payments::get_last_checked_height(&watcher_pool).await?; tracing::info!("Last checked height: {}", last_checked_height); let transactions = chain_storage @@ -44,7 +44,7 @@ pub(crate) async fn run_payment_listener( let amount: f64 = parse_unym_amount(&transfer.amount)?; queries::payments::insert_payment( - &observatory_pool, + &watcher_pool, tx.hash.clone(), transfer.sender.clone(), transfer.recipient.clone(), diff --git a/nym-data-observatory/src/price_scraper/mod.rs b/nyx-chain-watcher/src/price_scraper/mod.rs similarity index 100% rename from nym-data-observatory/src/price_scraper/mod.rs rename to nyx-chain-watcher/src/price_scraper/mod.rs From 572875058d5ab067d816d2f1d94f2eca3fbab60a Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Fri, 6 Dec 2024 17:31:25 +0000 Subject: [PATCH 32/64] Add config, overrides and CLI --- .gitignore | 3 +- Cargo.lock | 5 + common/config/Cargo.toml | 3 +- common/config/src/error.rs | 10 + common/config/src/lib.rs | 37 +++ common/nyxd-scraper/src/scraper/mod.rs | 12 +- nym-validator-rewarder/src/config/mod.rs | 1 + nym-wallet/Cargo.lock | 1 + nyx-chain-watcher/Cargo.toml | 10 +- nyx-chain-watcher/README.md | 15 +- nyx-chain-watcher/build.rs | 4 +- nyx-chain-watcher/docker-compose.yml | 31 --- nyx-chain-watcher/src/chain_scraper/mod.rs | 16 +- .../src/cli/commands/build_info.rs | 17 ++ nyx-chain-watcher/src/cli/commands/init.rs | 19 ++ nyx-chain-watcher/src/cli/commands/mod.rs | 3 + .../src/cli/commands/run/args.rs | 101 +++++++ .../src/cli/commands/run/config.rs | 84 ++++++ nyx-chain-watcher/src/cli/commands/run/mod.rs | 82 ++++++ nyx-chain-watcher/src/cli/mod.rs | 67 +++++ nyx-chain-watcher/src/config/mod.rs | 249 ++++++++++++++++++ .../src/config/payments_watcher.rs | 23 ++ nyx-chain-watcher/src/config/template.rs | 29 ++ nyx-chain-watcher/src/env.rs | 25 ++ nyx-chain-watcher/src/error.rs | 40 +++ nyx-chain-watcher/src/main.rs | 84 +----- nyx-chain-watcher/src/models.rs | 14 + nyx-chain-watcher/src/payment_listener/mod.rs | 199 +++++++++----- 28 files changed, 996 insertions(+), 188 deletions(-) create mode 100644 common/config/src/error.rs delete mode 100644 nyx-chain-watcher/docker-compose.yml create mode 100644 nyx-chain-watcher/src/cli/commands/build_info.rs create mode 100644 nyx-chain-watcher/src/cli/commands/init.rs create mode 100644 nyx-chain-watcher/src/cli/commands/mod.rs create mode 100644 nyx-chain-watcher/src/cli/commands/run/args.rs create mode 100644 nyx-chain-watcher/src/cli/commands/run/config.rs create mode 100644 nyx-chain-watcher/src/cli/commands/run/mod.rs create mode 100644 nyx-chain-watcher/src/cli/mod.rs create mode 100644 nyx-chain-watcher/src/config/mod.rs create mode 100644 nyx-chain-watcher/src/config/payments_watcher.rs create mode 100644 nyx-chain-watcher/src/config/template.rs create mode 100644 nyx-chain-watcher/src/env.rs create mode 100644 nyx-chain-watcher/src/error.rs create mode 100644 nyx-chain-watcher/src/models.rs diff --git a/.gitignore b/.gitignore index a07a3567ba..f931d9b0bb 100644 --- a/.gitignore +++ b/.gitignore @@ -56,4 +56,5 @@ nym-network-monitor/.envrc nym-network-monitor/.envrc -*.sqlite \ No newline at end of file +*.sqlite +.build \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index a8f3de6c1b..2255884f04 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4984,6 +4984,7 @@ dependencies = [ "log", "nym-network-defaults", "serde", + "thiserror", "toml 0.8.14", "url", ] @@ -6844,15 +6845,19 @@ dependencies = [ "chrono", "clap 4.5.20", "nym-bin-common", + "nym-config", "nym-network-defaults", "nym-node-requests", "nym-task", + "nym-validator-client", "nyxd-scraper", "reqwest 0.12.4", "rocket", + "schemars", "serde", "serde_json", "sqlx", + "thiserror", "time", "tokio", "tokio-util", diff --git a/common/config/Cargo.toml b/common/config/Cargo.toml index 0723f2c381..d978fe4aee 100644 --- a/common/config/Cargo.toml +++ b/common/config/Cargo.toml @@ -12,7 +12,8 @@ dirs = { workspace = true, optional = true } handlebars = { workspace = true } log = { workspace = true } serde = { workspace = true, features = ["derive"] } -toml = { workspace = true } +thiserror = { workspace = true } +toml = { workspace = true, features = ["display"] } url = { workspace = true } nym-network-defaults = { path = "../network-defaults", features = ["utoipa"] } diff --git a/common/config/src/error.rs b/common/config/src/error.rs new file mode 100644 index 0000000000..9bbd3444f8 --- /dev/null +++ b/common/config/src/error.rs @@ -0,0 +1,10 @@ +use std::io; +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum NymConfigTomlError { + #[error(transparent)] + FileIoFailure(#[from] io::Error), + #[error(transparent)] + TomlSerializeFailure(#[from] toml::ser::Error), +} diff --git a/common/config/src/lib.rs b/common/config/src/lib.rs index 932d7515a6..42dcea8610 100644 --- a/common/config/src/lib.rs +++ b/common/config/src/lib.rs @@ -13,6 +13,7 @@ pub use helpers::{parse_urls, OptionalSet}; pub use toml::de::Error as TomlDeError; pub mod defaults; +pub mod error; pub mod helpers; pub mod legacy_helpers; pub mod serde_helpers; @@ -95,6 +96,42 @@ where config.format_to_writer(file) } +pub fn save_unformatted_config_to_file( + config: &C, + path: P, +) -> Result<(), error::NymConfigTomlError> +where + C: Serialize + ?Sized, + P: AsRef, +{ + let path = path.as_ref(); + log::info!("saving config file to {}", path.display()); + + if let Some(parent) = path.parent() { + create_dir_all(parent)?; + } + + let mut file = File::create(path)?; + + // TODO: check for whether any of our configs store anything sensitive + // and change that to 0o644 instead + #[cfg(target_family = "unix")] + { + use std::os::unix::fs::PermissionsExt; + + let mut perms = fs::metadata(path)?.permissions(); + perms.set_mode(0o600); + fs::set_permissions(path, perms)?; + } + + // let serde format the TOML in whatever ugly way it chooses + // TODO: in https://docs.rs/toml/latest/toml/fn.to_string_pretty.html it recommends using + // https://docs.rs/toml_edit/latest/toml_edit/struct.DocumentMut.html to preserve formatting + let toml_string = toml::to_string_pretty(config)?; + + Ok(file.write_all(toml_string.as_bytes())?) +} + pub fn deserialize_config_from_toml_str(raw: &str) -> Result where C: DeserializeOwned, diff --git a/common/nyxd-scraper/src/scraper/mod.rs b/common/nyxd-scraper/src/scraper/mod.rs index 1b616f3247..e4d133341e 100644 --- a/common/nyxd-scraper/src/scraper/mod.rs +++ b/common/nyxd-scraper/src/scraper/mod.rs @@ -36,6 +36,8 @@ pub struct Config { pub pruning_options: PruningOptions, pub store_precommits: bool, + + pub start_block_height: Option, } pub struct NyxdScraperBuilder { @@ -47,10 +49,9 @@ pub struct NyxdScraperBuilder { } impl NyxdScraperBuilder { - pub async fn build_and_start( - self, - start_block: Option, - ) -> Result { + pub async fn build_and_start(self) -> Result { + let start_block_height = self.config.start_block_height.clone(); + let scraper = NyxdScraper::new(self.config).await?; let (processing_tx, processing_rx) = unbounded_channel(); @@ -93,7 +94,8 @@ impl NyxdScraperBuilder { ) .await?; - if let Some(height) = start_block { + // TODO: decide if this should be removed? + if let Some(height) = start_block_height { scraper.process_block_range(Some(height), None).await?; } diff --git a/nym-validator-rewarder/src/config/mod.rs b/nym-validator-rewarder/src/config/mod.rs index f0369adde7..31c44b07e7 100644 --- a/nym-validator-rewarder/src/config/mod.rs +++ b/nym-validator-rewarder/src/config/mod.rs @@ -129,6 +129,7 @@ impl Config { database_path: self.storage_paths.nyxd_scraper.clone(), pruning_options: self.nyxd_scraper.pruning, store_precommits: self.nyxd_scraper.store_precommits, + start_block_height: None, } } diff --git a/nym-wallet/Cargo.lock b/nym-wallet/Cargo.lock index 6d1ec90fd4..39581d9cc1 100644 --- a/nym-wallet/Cargo.lock +++ b/nym-wallet/Cargo.lock @@ -3162,6 +3162,7 @@ dependencies = [ "log", "nym-network-defaults", "serde", + "thiserror", "toml 0.8.19", "url", ] diff --git a/nyx-chain-watcher/Cargo.toml b/nyx-chain-watcher/Cargo.toml index 58564b3d12..1f9e87d4a2 100644 --- a/nyx-chain-watcher/Cargo.toml +++ b/nyx-chain-watcher/Cargo.toml @@ -1,5 +1,5 @@ # Copyright 2024 - Nym Technologies SA -# SPDX-License-Identifier: Apache-2.0 +# SPDX-License-Identifier: GPL-3.0-only [package] name = "nyx-chain-watcher" @@ -17,19 +17,23 @@ readme.workspace = true anyhow = { workspace = true } axum = { workspace = true, features = ["tokio"] } chrono = { workspace = true } -clap = { workspace = true, features = ["derive", "env"] } -nym-bin-common = { path = "../common/bin-common" } +clap = { workspace = true, features = ["cargo", "derive", "env"] } +nym-config = { path = "../common/config" } +nym-bin-common = { path = "../common/bin-common", features = ["output_format"] } nym-network-defaults = { path = "../common/network-defaults" } nym-task = { path = "../common/task" } nym-node-requests = { path = "../nym-node/nym-node-requests", features = [ "openapi", ] } +nym-validator-client = { path = "../common/client-libs/validator-client" } nyxd-scraper = {path = "../common/nyxd-scraper"} reqwest = {workspace= true, features = ["rustls-tls"]} rocket = { workspace = true } +schemars = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } sqlx = { workspace = true, features = ["runtime-tokio-rustls", "sqlite", "time"] } +thiserror = { workspace = true } time = {version = "0.3.36"} tokio = { workspace = true, features = ["process", "rt-multi-thread"] } tokio-util = { workspace = true } diff --git a/nyx-chain-watcher/README.md b/nyx-chain-watcher/README.md index 4142c4ecee..7c62d08296 100644 --- a/nyx-chain-watcher/README.md +++ b/nyx-chain-watcher/README.md @@ -2,14 +2,17 @@ A simple binary to watch addresses on the Nyx chain and to call webhooks when particular message types are in a block. +Look in [env.rs](./src/env.rs) for the names of environment variables that can be overridden. + ## Running locally ``` -DATABASE_URL=nyx_chain_watcher.sqlite \ -NYXD_WEBSOCKET_URL=wss://rpc.nymtech.net:443/websocket \ -NYXD_RPC_URL=https://rpc.nymtech.net \ -PAYMENT_RECEIVE_ADDRESS=n1... \ -WEBHOOK_URL=https://webhook.site/... \ -cargo run +NYX_CHAIN_WATCHER_HISTORY_DATABASE_PATH=chain_history.sqlite \ +NYX_CHAIN_WATCHER_DATABASE_PATH=nyx_chain_watcher.sqlite \ +NYX_CHAIN_WATCHER_WATCH_ACCOUNTS=n1...,n1...,n1... \ +NYX_CHAIN_WATCHER_WEBHOOK_URL="https://webhook.site" \ +NYX_CHAIN_WATCHER_WEBHOOK_AUTH=1234 \ +cargo run -- run ``` + diff --git a/nyx-chain-watcher/build.rs b/nyx-chain-watcher/build.rs index eb28ed3bf5..c1af059445 100644 --- a/nyx-chain-watcher/build.rs +++ b/nyx-chain-watcher/build.rs @@ -5,7 +5,9 @@ use std::{collections::HashMap, fs::File, path::PathBuf, str::FromStr}; #[tokio::main] async fn main() -> Result<()> { - let db_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("nyx_chain_watcher.sqlite"); + let db_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join(".build") + .join("nyx_chain_watcher.sqlite"); // Create the database directory if it doesn't exist if let Some(parent) = db_path.parent() { diff --git a/nyx-chain-watcher/docker-compose.yml b/nyx-chain-watcher/docker-compose.yml deleted file mode 100644 index b06a988e97..0000000000 --- a/nyx-chain-watcher/docker-compose.yml +++ /dev/null @@ -1,31 +0,0 @@ -services: - postgres: - image: postgres:13 - container_name: nym-data-observatory-pg - environment: - POSTGRES_PASSWORD: password - ports: - - "5432:5432" - volumes: - - pgdata:/var/lib/postgresql/data - - data-observatory: - depends_on: - - postgres - image: nym-data-observatory:latest - build: - context: ../ - dockerfile: nym-data-observatory/Dockerfile - container_name: nym-data-observatory - environment: - NYM_DATA_OBSERVATORY_CONNECTION_USERNAME: "postgres" - NYM_DATA_OBSERVATORY_CONNECTION_PASSWORD: "password" - NYM_DATA_OBSERVATORY_CONNECTION_HOST: "postgres" - NYM_DATA_OBSERVATORY_CONNECTION_PORT: "5432" - NYM_DATA_OBSERVATORY_CONNECTION_DB: "" - NYM_DATA_OBSERVATORY_HTTP_PORT: 8000 - env_file: - - ../envs/qa.env - -volumes: - pgdata: diff --git a/nyx-chain-watcher/src/chain_scraper/mod.rs b/nyx-chain-watcher/src/chain_scraper/mod.rs index 650b3f0c34..36634a5964 100644 --- a/nyx-chain-watcher/src/chain_scraper/mod.rs +++ b/nyx-chain-watcher/src/chain_scraper/mod.rs @@ -1,10 +1,9 @@ -use nyxd_scraper::{storage::ScraperStorage, Config, NyxdScraper, PruningOptions}; +use nyxd_scraper::{storage::ScraperStorage, NyxdScraper, PruningOptions}; -pub(crate) async fn run_chain_scraper() -> anyhow::Result { - let websocket_url = - std::env::var("NYXD_WEBSOCKET_URL").expect("NYXD_WEBSOCKET_URL not defined"); +pub(crate) async fn run_chain_scraper(config: &crate::config::Config) -> anyhow::Result { + let websocket_url = std::env::var("NYXD_WS").expect("NYXD_WS not defined"); - let rpc_url = std::env::var("NYXD_RPC_URL").expect("NYXD_RPC_URL not defined"); + let rpc_url = std::env::var("NYXD").expect("NYXD not defined"); let websocket_url = reqwest::Url::parse(&websocket_url)?; let rpc_url = reqwest::Url::parse(&rpc_url)?; @@ -12,15 +11,16 @@ pub(crate) async fn run_chain_scraper() -> anyhow::Result { .ok() .and_then(|value| value.parse::().ok()); - let scraper = NyxdScraper::builder(Config { + let scraper = NyxdScraper::builder(nyxd_scraper::Config { websocket_url, rpc_url, - database_path: "chain_history.sqlite".into(), + database_path: config.chain_scraper_database_path().into(), pruning_options: PruningOptions::nothing(), store_precommits: false, + start_block_height, }); - let instance = scraper.build_and_start(start_block_height).await?; + let instance = scraper.build_and_start().await?; Ok(instance.storage) } diff --git a/nyx-chain-watcher/src/cli/commands/build_info.rs b/nyx-chain-watcher/src/cli/commands/build_info.rs new file mode 100644 index 0000000000..38e144743c --- /dev/null +++ b/nyx-chain-watcher/src/cli/commands/build_info.rs @@ -0,0 +1,17 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::error::NyxChainWatcherError; +use nym_bin_common::bin_info_owned; +use nym_bin_common::output_format::OutputFormat; + +#[derive(clap::Args, Debug)] +pub(crate) struct Args { + #[clap(short, long, default_value_t = OutputFormat::default())] + output: OutputFormat, +} + +pub(crate) fn execute(args: Args) -> Result<(), NyxChainWatcherError> { + println!("{}", args.output.format(&bin_info_owned!())); + Ok(()) +} diff --git a/nyx-chain-watcher/src/cli/commands/init.rs b/nyx-chain-watcher/src/cli/commands/init.rs new file mode 100644 index 0000000000..163c6e9f1a --- /dev/null +++ b/nyx-chain-watcher/src/cli/commands/init.rs @@ -0,0 +1,19 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::config::{default_config_filepath, Config, ConfigBuilder}; +use crate::error::NyxChainWatcherError; +use nym_config::save_unformatted_config_to_file; + +#[derive(clap::Args, Debug)] +pub(crate) struct Args {} + +pub(crate) async fn execute(_args: Args) -> Result<(), NyxChainWatcherError> { + let config_path = default_config_filepath(); + let data_dir = Config::default_data_directory(&config_path)?; + + let builder = ConfigBuilder::new(config_path.clone(), data_dir); + let config = builder.build(); + + Ok(save_unformatted_config_to_file(&config, &config_path)?) +} diff --git a/nyx-chain-watcher/src/cli/commands/mod.rs b/nyx-chain-watcher/src/cli/commands/mod.rs new file mode 100644 index 0000000000..b1f63f4ae5 --- /dev/null +++ b/nyx-chain-watcher/src/cli/commands/mod.rs @@ -0,0 +1,3 @@ +pub(crate) mod build_info; +pub(crate) mod init; +pub(crate) mod run; diff --git a/nyx-chain-watcher/src/cli/commands/run/args.rs b/nyx-chain-watcher/src/cli/commands/run/args.rs new file mode 100644 index 0000000000..cc20ae2bac --- /dev/null +++ b/nyx-chain-watcher/src/cli/commands/run/args.rs @@ -0,0 +1,101 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::env::vars::*; +use nym_validator_client::nyxd::AccountId; + +#[derive(clap::Args, Debug)] +pub(crate) struct Args { + /// (Override) SQLite database file path for chain watcher + #[arg(long, env = NYX_CHAIN_WATCHER_DATABASE_PATH)] + pub(crate) chain_watcher_db_path: Option, + + /// (Override) SQLite database file path for chain scraper history + #[arg(long, env = NYX_CHAIN_WATCHER_HISTORY_DATABASE_PATH)] + pub(crate) chain_history_db_path: Option, + + /// (Override) Watch for transfers to these recipient accounts + #[clap( + long, + value_delimiter = ',', + env = NYX_CHAIN_WATCHER_WATCH_ACCOUNTS + )] + pub watch_for_transfer_recipient_accounts: Option>, + + /// (Override) Watch for chain messages of these types + #[clap( + long, + value_delimiter = ',', + env = NYX_CHAIN_WATCHER_WATCH_CHAIN_MESSAGE_TYPES + )] + pub watch_for_chain_message_types: Option>, + + /// (Override) The webhook to call when we find something + #[clap( + long, + env = NYX_CHAIN_WATCHER_WEBHOOK_URL + )] + pub webhook_url: Option, + + /// (Override) Optionally, authenticate with the webhook + #[clap( + long, + env = NYX_CHAIN_WATCHER_WEBHOOK_AUTH + )] + pub webhook_auth: Option, +} + +/*impl Args { + pub(super) fn take_mnemonic(&mut self) -> Option> { + self.entry_gateway.mnemonic.take().map(Zeroizing::new) + } +} + +impl Args { + pub(crate) fn build_config(self) -> Result { + let config_path = self.config.config_path(); + let data_dir = Config::default_data_directory(&config_path)?; + + let id = self + .config + .id() + .clone() + .ok_or(NymNodeError::MissingInitArg { + section: "global".to_string(), + name: "id".to_string(), + })?; + + let config = ConfigBuilder::new(id, config_path.clone(), data_dir.clone()) + .with_mode(self.mode.unwrap_or_default()) + .with_host(self.host.build_config_section()) + .with_http(self.http.build_config_section()) + .with_mixnet(self.mixnet.build_config_section()) + .with_wireguard(self.wireguard.build_config_section(&data_dir)) + .with_storage_paths(NymNodePaths::new(&data_dir)) + .with_mixnode(self.mixnode.build_config_section()) + .with_entry_gateway(self.entry_gateway.build_config_section(&data_dir)) + .with_exit_gateway(self.exit_gateway.build_config_section(&data_dir)) + .build(); + + Ok(config) + } + + pub(crate) fn override_config(self, mut config: Config) -> Config { + if let Some(mode) = self.mode { + config.mode = mode; + } + config.host = self.host.override_config_section(config.host); + config.http = self.http.override_config_section(config.http); + config.mixnet = self.mixnet.override_config_section(config.mixnet); + config.wireguard = self.wireguard.override_config_section(config.wireguard); + config.mixnode = self.mixnode.override_config_section(config.mixnode); + config.entry_gateway = self + .entry_gateway + .override_config_section(config.entry_gateway); + config.exit_gateway = self + .exit_gateway + .override_config_section(config.exit_gateway); + config + } +} +*/ diff --git a/nyx-chain-watcher/src/cli/commands/run/config.rs b/nyx-chain-watcher/src/cli/commands/run/config.rs new file mode 100644 index 0000000000..02e5a2fadc --- /dev/null +++ b/nyx-chain-watcher/src/cli/commands/run/config.rs @@ -0,0 +1,84 @@ +use crate::cli::commands::run::args::Args; +use crate::cli::DEFAULT_NYX_CHAIN_WATCHER_ID; +use crate::config::payments_watcher::{HttpAuthenticationOptions, PaymentWatcherEntry}; +use crate::config::{default_config_filepath, Config, ConfigBuilder, PaymentWatcherConfig}; +use crate::error::NyxChainWatcherError; +use tracing::{info, warn}; + +pub(crate) fn get_run_config(args: Args) -> Result { + info!("{args:#?}"); + + let Args { + ref watch_for_transfer_recipient_accounts, + mut watch_for_chain_message_types, + webhook_auth, + ref chain_watcher_db_path, + ref chain_history_db_path, + webhook_url, + } = args; + + // if there are no args set, then try load the config + if args.watch_for_transfer_recipient_accounts.is_none() + && args.watch_for_transfer_recipient_accounts.is_none() + && args.chain_watcher_db_path.is_none() + { + info!("Loading default config file..."); + return Config::read_from_toml_file_in_default_location(); + } + + // set default messages + if watch_for_chain_message_types.is_none() { + watch_for_chain_message_types = Some(vec!["/cosmos.bank.v1beta1.MsgSend".to_string()]); + } + + // warn if no accounts set + if watch_for_transfer_recipient_accounts.is_none() { + warn!( + "You did not specify any accounts to watch in {}. Only chain data will be stored.", + crate::env::vars::NYX_CHAIN_WATCHER_WATCH_ACCOUNTS + ); + } + + let config_path = default_config_filepath(); + let data_dir = Config::default_data_directory(&config_path)?; + + let mut builder = ConfigBuilder::new(config_path, data_dir); + + if let Some(db_path) = chain_watcher_db_path { + info!("Overriding database url with '{db_path}'"); + builder = builder.with_db_path(db_path.clone()); + } + + if let Some(db_path) = chain_history_db_path { + info!("Overriding chain history database url with '{db_path}'"); + builder = builder.with_chain_scraper_db_path(db_path.clone()); + } + + if let Some(webhook_url) = webhook_url { + let authentication = + webhook_auth.map(|token| HttpAuthenticationOptions::AuthorizationBearerToken { token }); + + let watcher_config = PaymentWatcherConfig { + watchers: vec![PaymentWatcherEntry { + id: DEFAULT_NYX_CHAIN_WATCHER_ID.to_string(), + description: None, + watch_for_transfer_recipient_accounts: watch_for_transfer_recipient_accounts + .clone(), + watch_for_chain_message_types, + webhook_url, + authentication, + }], + }; + + info!("Overriding watcher config with env vars"); + + builder = builder.with_payment_watcher_config(watcher_config); + } else { + warn!( + "You did not specify a webhook in {}. Only database items will be stored.", + crate::env::vars::NYX_CHAIN_WATCHER_WEBHOOK_URL + ); + } + + Ok(builder.build()) +} diff --git a/nyx-chain-watcher/src/cli/commands/run/mod.rs b/nyx-chain-watcher/src/cli/commands/run/mod.rs new file mode 100644 index 0000000000..84d3d8e1e5 --- /dev/null +++ b/nyx-chain-watcher/src/cli/commands/run/mod.rs @@ -0,0 +1,82 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::error::NyxChainWatcherError; +use tokio::join; +use tracing::{error, info, trace}; + +mod args; +mod config; + +use crate::chain_scraper::run_chain_scraper; +use crate::{db, http, payment_listener, price_scraper}; +pub(crate) use args::Args; +use nym_task::signal::wait_for_signal; + +pub(crate) async fn execute(args: Args, http_port: u16) -> Result<(), NyxChainWatcherError> { + trace!("passed arguments: {args:#?}"); + + let config = config::get_run_config(args)?; + + let db_path = config.database_path(); + + info!("Config is {config:#?}"); + info!("Database path is {:?}", std::path::Path::new(&db_path).canonicalize().unwrap_or_default()); + info!("Chain History Database path is {:?}", std::path::Path::new(&config.chain_scraper_database_path()).canonicalize().unwrap_or_default()); + + // Ensure parent directory exists + if let Some(parent) = std::path::Path::new(&db_path).parent() { + std::fs::create_dir_all(parent)?; + } + + let connection_url = format!("sqlite://{}?mode=rwc", db_path); + let storage = db::Storage::init(connection_url).await?; + let watcher_pool = storage.pool_owned().await; + + // Spawn the chain scraper and get its storage + + // Spawn the payment listener task + let payment_listener_handle = tokio::spawn({ + let obs_pool = watcher_pool.clone(); + let chain_storage = run_chain_scraper(&config).await?; + let payment_watcher_config = config.payment_watcher_config.unwrap_or_default(); + + async move { + if let Err(e) = payment_listener::run_payment_listener( + payment_watcher_config, + obs_pool, + chain_storage, + ) + .await + { + error!("Payment listener error: {}", e); + } + Ok::<_, anyhow::Error>(()) + } + }); + + // Clone pool for each task that needs it + //let background_pool = db_pool.clone(); + + let price_scraper_handle = tokio::spawn(async move { + price_scraper::run_price_scraper(&watcher_pool).await; + }); + + let shutdown_handles = http::server::start_http_api(storage.pool_owned().await, http_port) + .await + .expect("Failed to start server"); + + info!("Started HTTP server on port {}", http_port); + + // Wait for the short-lived tasks to complete + let _ = join!(price_scraper_handle, payment_listener_handle); + + // Wait for a signal to terminate the long-running task + wait_for_signal().await; + + if let Err(err) = shutdown_handles.shutdown().await { + error!("{err}"); + }; + + Ok(()) +} diff --git a/nyx-chain-watcher/src/cli/mod.rs b/nyx-chain-watcher/src/cli/mod.rs new file mode 100644 index 0000000000..d1f03551de --- /dev/null +++ b/nyx-chain-watcher/src/cli/mod.rs @@ -0,0 +1,67 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::cli::commands::{build_info, init, run}; +use crate::env::vars::*; +use crate::error::NyxChainWatcherError; +use clap::{Parser, Subcommand}; +use nym_bin_common::bin_info; +use std::sync::OnceLock; + +mod commands; + +pub const DEFAULT_NYX_CHAIN_WATCHER_ID: &str = "default-nyx-chain-watcher"; + +// Helper for passing LONG_VERSION to clap +fn pretty_build_info_static() -> &'static str { + static PRETTY_BUILD_INFORMATION: OnceLock = OnceLock::new(); + PRETTY_BUILD_INFORMATION.get_or_init(|| bin_info!().pretty_print()) +} + +#[derive(Parser, Debug)] +#[clap(author = "Nymtech", version, long_version = pretty_build_info_static(), about)] +pub(crate) struct Cli { + /// Path pointing to an env file that configures the nym-chain-watcher and overrides any preconfigured values. + #[clap( + short, + long, + env = NYX_CHAIN_WATCHER_CONFIG_ENV_FILE_ARG + )] + pub(crate) config_env_file: Option, + + /// Flag used for disabling the printed banner in tty. + #[clap( + long, + env = NYX_CHAIN_WATCHER_NO_BANNER_ARG + )] + pub(crate) no_banner: bool, + + /// Port to listen on + #[arg(long, default_value_t = 8000, env = "NYX_CHAIN_WATCHER_HTTP_PORT")] + pub(crate) http_port: u16, + + #[clap(subcommand)] + command: Commands, +} + +impl Cli { + pub(crate) async fn execute(self) -> Result<(), NyxChainWatcherError> { + match self.command { + Commands::BuildInfo(args) => build_info::execute(args), + Commands::Run(args) => run::execute(*args, self.http_port).await, + Commands::Init(args) => init::execute(args).await, + } + } +} + +#[derive(Subcommand, Debug)] +pub(crate) enum Commands { + /// Show build information of this binary + BuildInfo(build_info::Args), + + /// Start this nym-chain-watcher + Run(Box), + + /// Initialise config + Init(init::Args), +} diff --git a/nyx-chain-watcher/src/config/mod.rs b/nyx-chain-watcher/src/config/mod.rs new file mode 100644 index 0000000000..73419a2e9e --- /dev/null +++ b/nyx-chain-watcher/src/config/mod.rs @@ -0,0 +1,249 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::config::template::CONFIG_TEMPLATE; +use nym_bin_common::logging::LoggingSettings; +use nym_config::{ + must_get_home, read_config_from_toml_file, save_unformatted_config_to_file, NymConfigTemplate, + DEFAULT_CONFIG_DIR, DEFAULT_CONFIG_FILENAME, DEFAULT_DATA_DIR, NYM_DIR, +}; +use serde::{Deserialize, Serialize}; +use std::path::{Path, PathBuf}; +use tracing::{debug, error}; + +pub(crate) mod payments_watcher; +mod template; + +pub use crate::config::payments_watcher::PaymentWatcherConfig; +use crate::error::NyxChainWatcherError; + +const DEFAULT_NYM_CHAIN_WATCHER_DIR: &str = "nym-chain-watcher"; + +pub(crate) const DEFAULT_NYM_CHAIN_WATCHER_DB_FILENAME: &str = "nyx_chain_watcher.sqlite"; +pub(crate) const DEFAULT_NYM_CHAIN_SCRAPER_HISTORY_DB_FILENAME: &str = "chain_history.sqlite"; + +/// Derive default path to nym-chain-watcher's config directory. +/// It should get resolved to `$HOME/.nym/nym-chain-watcher/config` +pub fn default_config_directory() -> PathBuf { + must_get_home() + .join(NYM_DIR) + .join(DEFAULT_NYM_CHAIN_WATCHER_DIR) + .join(DEFAULT_CONFIG_DIR) +} + +/// Derive default path to nym-chain-watcher's config file. +/// It should get resolved to `$HOME/.nym/nym-chain-watcher/config/config.toml` +pub fn default_config_filepath() -> PathBuf { + default_config_directory().join(DEFAULT_CONFIG_FILENAME) +} + +pub struct ConfigBuilder { + pub config_path: PathBuf, + + pub data_dir: PathBuf, + + pub db_path: Option, + + pub chain_scraper_db_path: Option, + + pub payment_watcher_config: Option, + + pub logging: Option, +} + +impl ConfigBuilder { + pub fn new(config_path: PathBuf, data_dir: PathBuf) -> Self { + ConfigBuilder { + config_path, + data_dir, + payment_watcher_config: None, + logging: None, + db_path: None, + chain_scraper_db_path: None, + } + } + + pub fn with_db_path(mut self, db_path: String) -> Self { + self.db_path = Some(db_path); + self + } + + pub fn with_chain_scraper_db_path(mut self, chain_scraper_db_path: String) -> Self { + self.chain_scraper_db_path = Some(chain_scraper_db_path); + self + } + + #[allow(dead_code)] + pub fn with_payment_watcher_config( + mut self, + payment_watcher_config: impl Into, + ) -> Self { + self.payment_watcher_config = Some(payment_watcher_config.into()); + self + } + + #[allow(dead_code)] + pub fn with_logging(mut self, section: impl Into>) -> Self { + self.logging = section.into(); + self + } + + pub fn build(self) -> Config { + Config { + logging: self.logging.unwrap_or_default(), + save_path: Some(self.config_path), + payment_watcher_config: self.payment_watcher_config, + data_dir: self.data_dir, + db_path: self.db_path, + chain_scraper_db_path: self.chain_scraper_db_path, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct Config { + // additional metadata holding on-disk location of this config file + #[serde(skip)] + pub(crate) save_path: Option, + + #[serde(skip)] + pub(crate) data_dir: PathBuf, + + #[serde(skip)] + db_path: Option, + + #[serde(skip)] + chain_scraper_db_path: Option, + + pub payment_watcher_config: Option, + + #[serde(default)] + pub logging: LoggingSettings, +} + +impl NymConfigTemplate for Config { + fn template(&self) -> &'static str { + CONFIG_TEMPLATE + } +} + +impl Config { + #[allow(unused)] + pub fn save(&self) -> Result<(), NyxChainWatcherError> { + let save_location = self.save_location(); + debug!( + "attempting to save config file to '{}'", + save_location.display() + ); + save_unformatted_config_to_file(self, &save_location).map_err(|source| { + NyxChainWatcherError::UnformattedConfigSaveFailure { + path: save_location, + source, + } + }) + } + + #[allow(unused)] + pub fn save_location(&self) -> PathBuf { + self.save_path + .clone() + .unwrap_or(self.default_save_location()) + } + + #[allow(unused)] + pub fn default_save_location(&self) -> PathBuf { + default_config_filepath() + } + + pub fn default_data_directory>( + config_path: P, + ) -> Result { + let config_path = config_path.as_ref(); + + // we got a proper path to the .toml file + let Some(config_dir) = config_path.parent() else { + error!( + "'{}' does not have a parent directory. Have you pointed to the fs root?", + config_path.display() + ); + return Err(NyxChainWatcherError::DataDirDerivationFailure); + }; + + let Some(config_dir_name) = config_dir.file_name() else { + error!( + "could not obtain parent directory name of '{}'. Have you used relative paths?", + config_path.display() + ); + return Err(NyxChainWatcherError::DataDirDerivationFailure); + }; + + if config_dir_name != DEFAULT_CONFIG_DIR { + error!( + "the parent directory of '{}' ({}) is not {DEFAULT_CONFIG_DIR}. currently this is not supported", + config_path.display(), config_dir_name.to_str().unwrap_or("UNKNOWN") + ); + return Err(NyxChainWatcherError::DataDirDerivationFailure); + } + + let Some(node_dir) = config_dir.parent() else { + error!( + "'{}' does not have a parent directory. Have you pointed to the fs root?", + config_dir.display() + ); + return Err(NyxChainWatcherError::DataDirDerivationFailure); + }; + + Ok(node_dir.join(DEFAULT_DATA_DIR)) + } + + pub fn database_path(&self) -> String { + self.db_path.clone().unwrap_or_else(|| { + let mut path = self.data_dir.clone().to_path_buf(); + path.push(DEFAULT_NYM_CHAIN_WATCHER_DB_FILENAME); + path.to_str() + .unwrap_or(DEFAULT_NYM_CHAIN_WATCHER_DB_FILENAME) + .to_string() + }) + } + + pub fn chain_scraper_database_path(&self) -> String { + self.chain_scraper_db_path.clone().unwrap_or_else(|| { + let mut path = self.data_dir.clone().to_path_buf(); + path.push(DEFAULT_NYM_CHAIN_SCRAPER_HISTORY_DB_FILENAME); + path.to_str() + .unwrap_or(DEFAULT_NYM_CHAIN_SCRAPER_HISTORY_DB_FILENAME) + .to_string() + }) + } + + // simple wrapper that reads config file and assigns path location + fn read_from_path>(path: P, data_dir: P) -> Result { + let path = path.as_ref(); + let data_dir = data_dir.as_ref(); + let mut loaded: Config = read_config_from_toml_file(path).map_err(|source| { + NyxChainWatcherError::ConfigLoadFailure { + path: path.to_path_buf(), + source, + } + })?; + loaded.data_dir = data_dir.to_path_buf(); + loaded.save_path = Some(path.to_path_buf()); + debug!("loaded config file from {}", path.display()); + Ok(loaded) + } + + #[allow(unused)] + pub fn read_from_toml_file>( + path: P, + data_dir: P, + ) -> Result { + Self::read_from_path(path, data_dir) + } + + pub fn read_from_toml_file_in_default_location() -> Result { + let config_path = default_config_filepath(); + let data_dir = Config::default_data_directory(&config_path)?; + Self::read_from_path(config_path, data_dir) + } +} diff --git a/nyx-chain-watcher/src/config/payments_watcher.rs b/nyx-chain-watcher/src/config/payments_watcher.rs new file mode 100644 index 0000000000..a335830a85 --- /dev/null +++ b/nyx-chain-watcher/src/config/payments_watcher.rs @@ -0,0 +1,23 @@ +use nym_validator_client::nyxd::AccountId; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct PaymentWatcherConfig { + pub watchers: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PaymentWatcherEntry { + pub id: String, + pub description: Option, + pub webhook_url: String, + pub watch_for_transfer_recipient_accounts: Option>, + pub watch_for_chain_message_types: Option>, + pub authentication: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum HttpAuthenticationOptions { + AuthorizationBearerToken { token: String }, +} diff --git a/nyx-chain-watcher/src/config/template.rs b/nyx-chain-watcher/src/config/template.rs new file mode 100644 index 0000000000..f1dde4955c --- /dev/null +++ b/nyx-chain-watcher/src/config/template.rs @@ -0,0 +1,29 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +// While using normal toml marshalling would have been way simpler with less overhead, +// I think it's useful to have comments attached to the saved config file to explain behaviour of +// particular fields. +// Note: any changes to the template must be reflected in the appropriate structs. +pub(crate) const CONFIG_TEMPLATE: &str = r#" +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +[payment_watcher_config] +{{#each payment_watcher_config.watchers }} +[[watchers]] +id={{this.id}} +description='{{this.description}}' +webhook_url='{{this.webhook_url}}' +{{/each}} + + + + +##### logging configuration options ##### + +[logging] + +# TODO + +"#; diff --git a/nyx-chain-watcher/src/env.rs b/nyx-chain-watcher/src/env.rs new file mode 100644 index 0000000000..5b66a63c92 --- /dev/null +++ b/nyx-chain-watcher/src/env.rs @@ -0,0 +1,25 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +#[allow(unused)] +pub mod vars { + pub const NYX_CHAIN_WATCHER_NO_BANNER_ARG: &str = "NYX_CHAIN_WATCHER_NO_BANNER"; + pub const NYX_CHAIN_WATCHER_CONFIG_ENV_FILE_ARG: &str = "NYX_CHAIN_WATCHER_CONFIG_ENV_FILE_ARG"; + + pub const NYX_CHAIN_WATCHER_DATABASE_PATH: &str = "NYX_CHAIN_WATCHER_DATABASE_PATH"; + pub const NYX_CHAIN_WATCHER_HISTORY_DATABASE_PATH: &str = + "NYX_CHAIN_WATCHER_HISTORY_DATABASE_PATH"; + + pub const NYXD_SCRAPER_START_HEIGHT: &str = "NYXD_SCRAPER_START_HEIGHT"; + + pub const NYX_CHAIN_WATCHER_ID_ARG: &str = "NYX_CHAIN_WATCHER_ID"; + pub const NYX_CHAIN_WATCHER_OUTPUT_ARG: &str = "NYX_CHAIN_WATCHER_OUTPUT"; + + pub const NYX_CHAIN_WATCHER_CONFIG_PATH_ARG: &str = "NYX_CHAIN_WATCHER_CONFIG"; + + pub const NYX_CHAIN_WATCHER_WATCH_ACCOUNTS: &str = "NYX_CHAIN_WATCHER_WATCH_ACCOUNTS"; + pub const NYX_CHAIN_WATCHER_WATCH_CHAIN_MESSAGE_TYPES: &str = + "NYX_CHAIN_WATCHER_WATCH_CHAIN_MESSAGE_TYPES"; + pub const NYX_CHAIN_WATCHER_WEBHOOK_URL: &str = "NYX_CHAIN_WATCHER_WEBHOOK_URL"; + pub const NYX_CHAIN_WATCHER_WEBHOOK_AUTH: &str = "NYX_CHAIN_WATCHER_WEBHOOK_AUTH"; +} diff --git a/nyx-chain-watcher/src/error.rs b/nyx-chain-watcher/src/error.rs new file mode 100644 index 0000000000..70230f50dc --- /dev/null +++ b/nyx-chain-watcher/src/error.rs @@ -0,0 +1,40 @@ +use std::io; +use std::path::PathBuf; +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum NyxChainWatcherError { + // #[error("failed to save config file using path '{}'. detailed message: {source}", path.display())] + // ConfigSaveFailure { + // path: PathBuf, + // #[source] + // source: io::Error, + // }, + #[error("failed to save config file using path '{}'. detailed message: {source}", path.display())] + UnformattedConfigSaveFailure { + path: PathBuf, + #[source] + source: nym_config::error::NymConfigTomlError, + }, + + #[error("could not derive path to data directory of this nyx chain watcher")] + DataDirDerivationFailure, + + // #[error("could not derive path to config directory of this nyx chain watcher")] + // ConfigDirDerivationFailure, + #[error("failed to load config file using path '{}'. detailed message: {source}", path.display())] + ConfigLoadFailure { + path: PathBuf, + #[source] + source: io::Error, + }, + + #[error(transparent)] + FileIoFailure(#[from] io::Error), + + #[error(transparent)] + AnyhowFailure(#[from] anyhow::Error), + + #[error(transparent)] + NymConfigTomlE(#[from] nym_config::error::NymConfigTomlError), +} diff --git a/nyx-chain-watcher/src/main.rs b/nyx-chain-watcher/src/main.rs index 3eb513b889..b32649b994 100644 --- a/nyx-chain-watcher/src/main.rs +++ b/nyx-chain-watcher/src/main.rs @@ -1,90 +1,30 @@ -use chain_scraper::run_chain_scraper; -use clap::Parser; +use clap::{crate_name, crate_version, Parser}; +use nym_bin_common::logging::maybe_print_banner; use nym_network_defaults::setup_env; -use nym_task::signal::wait_for_signal; -use tokio::join; mod chain_scraper; +mod cli; +mod config; mod db; +mod env; +mod error; mod http; mod logging; +pub mod models; mod payment_listener; mod price_scraper; -#[derive(Parser, Debug)] -#[command(version, about, long_about = None)] -struct Args { - /// Port to listen on - #[arg(long, default_value_t = 8000, env = "NYX_CHAIN_WATCHER_HTTP_PORT")] - http_port: u16, - - /// Path to the environment variables file. If you don't provide one, variables for the mainnet will be used. - #[arg(short, long, default_value = None, env = "NYX_CHAIN_WATCHER_ENV_FILE")] - env_file: Option, - - /// SQLite database file path - #[arg( - long, - default_value = "nyx_chain_watcher.sqlite", - env = "DATABASE_URL" - )] - db_path: String, -} - #[tokio::main] async fn main() -> anyhow::Result<()> { + let cli = cli::Cli::parse(); + setup_env(cli.config_env_file.as_ref()); logging::setup_tracing_logger(); - let args = Args::parse(); - setup_env(args.env_file); // Defaults to mainnet if empty - - let db_path = args.db_path; - // Ensure parent directory exists - if let Some(parent) = std::path::Path::new(&db_path).parent() { - std::fs::create_dir_all(parent)?; + if !cli.no_banner { + maybe_print_banner(crate_name!(), crate_version!()); } - let connection_url = format!("sqlite://{}?mode=rwc", db_path); - let storage = db::Storage::init(connection_url).await?; - let watcher_pool = storage.pool_owned().await; - - // Spawn the chain scraper and get its storage - - // Spawn the payment listener task - let payment_listener_handle = tokio::spawn({ - let obs_pool = watcher_pool.clone(); - let chain_storage = run_chain_scraper().await?; - - async move { - if let Err(e) = payment_listener::run_payment_listener(obs_pool, chain_storage).await { - tracing::error!("Payment listener error: {}", e); - } - Ok::<_, anyhow::Error>(()) - } - }); - - // Clone pool for each task that needs it - //let background_pool = db_pool.clone(); - - let price_scraper_handle = tokio::spawn(async move { - price_scraper::run_price_scraper(&watcher_pool).await; - }); - - let shutdown_handles = http::server::start_http_api(storage.pool_owned().await, args.http_port) - .await - .expect("Failed to start server"); - - tracing::info!("Started HTTP server on port {}", args.http_port); - - // Wait for the short-lived tasks to complete - let _ = join!(price_scraper_handle, payment_listener_handle); - - // Wait for a signal to terminate the long-running task - wait_for_signal().await; - - if let Err(err) = shutdown_handles.shutdown().await { - tracing::error!("{err}"); - }; + cli.execute().await?; Ok(()) } diff --git a/nyx-chain-watcher/src/models.rs b/nyx-chain-watcher/src/models.rs new file mode 100644 index 0000000000..7972b1bdeb --- /dev/null +++ b/nyx-chain-watcher/src/models.rs @@ -0,0 +1,14 @@ +use rocket::serde::{Deserialize, Serialize}; +use schemars::JsonSchema; +use utoipa::ToSchema; + +#[derive(Serialize, Deserialize, Clone, JsonSchema, ToSchema)] +pub struct WebhookPayload { + pub transaction_hash: String, + pub message_index: u64, + pub sender_address: String, + pub receiver_address: String, + pub amount: String, + pub height: u128, + pub memo: Option, +} diff --git a/nyx-chain-watcher/src/payment_listener/mod.rs b/nyx-chain-watcher/src/payment_listener/mod.rs index bddfecbbda..e0ba3aa668 100644 --- a/nyx-chain-watcher/src/payment_listener/mod.rs +++ b/nyx-chain-watcher/src/payment_listener/mod.rs @@ -1,68 +1,130 @@ +use crate::config::PaymentWatcherConfig; use crate::db::queries; +use crate::models::WebhookPayload; +use nym_validator_client::nyxd::AccountId; use nyxd_scraper::storage::ScraperStorage; use reqwest::Client; -use serde_json::{json, Value}; +use rocket::form::validate::Contains; +use serde_json::Value; use sqlx::SqlitePool; -use std::env; +use std::str::FromStr; use tokio::time::{self, Duration}; +use tracing::{error, info}; #[derive(Debug)] struct TransferEvent { - recipient: String, - sender: String, + recipient: AccountId, + sender: AccountId, amount: String, + message_index: u64, } pub(crate) async fn run_payment_listener( + payment_watcher_config: PaymentWatcherConfig, watcher_pool: SqlitePool, chain_storage: ScraperStorage, ) -> anyhow::Result<()> { - let payment_receive_address = env::var("PAYMENT_RECEIVE_ADDRESS").map_err(|_| { - anyhow::anyhow!("Environment variable `PAYMENT_RECEIVE_ADDRESS` not defined") - })?; - let webhook_url = env::var("WEBHOOK_URL") - .map_err(|_| anyhow::anyhow!("Environment variable `WEBHOOK_URL` not defined"))?; - let client = Client::new(); + + let default_message_types = vec!["/cosmos.bank.v1beta1.MsgSend".to_string()]; + loop { - let last_checked_height = - queries::payments::get_last_checked_height(&watcher_pool).await?; - tracing::info!("Last checked height: {}", last_checked_height); - - let transactions = chain_storage - .get_transactions_after_height( - last_checked_height, - Some("/cosmos.bank.v1beta1.MsgSend"), - ) - .await?; - - for tx in transactions { - tracing::info!("Processing transaction: {}", tx.hash); - if let Some(raw_log) = tx.raw_log.as_deref() { - if let Some(transfer) = parse_transfer_from_raw_log(raw_log)? { - if transfer.recipient == payment_receive_address { - let amount: f64 = parse_unym_amount(&transfer.amount)?; - - queries::payments::insert_payment( - &watcher_pool, - tx.hash.clone(), - transfer.sender.clone(), - transfer.recipient.clone(), - amount, - tx.height, - tx.memo.clone(), - ) - .await?; - - let webhook_data = json!({ - "transaction_hash": tx.hash, - "sender_address": transfer.sender, - "receiver_address": transfer.recipient, - "amount": amount, - "height": tx.height, - "memo": tx.memo, - }); - let _ = client.post(&webhook_url).json(&webhook_data).send().await; + // 1. get the last height this watcher ran at + let last_checked_height = queries::payments::get_last_checked_height(&watcher_pool).await?; + info!("Last checked height: {}", last_checked_height); + + // 2. iterate through watchers + for watcher in &payment_watcher_config.watchers { + let watch_for_chain_message_types = watcher + .watch_for_chain_message_types + .as_ref() + .unwrap_or(&default_message_types); + + // 3. build up transactions that match the message types we are looking for + let mut transactions = vec![]; + for message_type in watch_for_chain_message_types { + match chain_storage + .get_transactions_after_height( + last_checked_height, + Some(message_type), + ) + .await { + Ok(txs) => { + for t in txs { + transactions.push(t); + } + } + Err(e) => error!("Failed to get transactions (message_type = {message_type}) from scraper database: {e}") + } + } + + for tx in transactions { + info!( + "[watcher = {}] Processing transaction: {}", + watcher.id, tx.hash + ); + if let Some(raw_log) = tx.raw_log.as_deref() { + if let Some(watch_for_transfer_recipient_accounts) = + &watcher.watch_for_transfer_recipient_accounts + { + // 4. match recipient accounts we are looking for + match parse_transfer_from_raw_log( + raw_log, + watch_for_transfer_recipient_accounts, + ) { + Ok(transfer_events) => { + for transfer in transfer_events { + let amount: f64 = parse_unym_amount(&transfer.amount)?; + + queries::payments::insert_payment( + &watcher_pool, + tx.hash.clone(), + transfer.sender.clone().to_string(), + transfer.recipient.clone().to_string(), + amount, + tx.height, + tx.memo.clone(), + ) + .await?; + + let webhook_data = WebhookPayload { + transaction_hash: tx.hash.clone(), + message_index: transfer.message_index, + sender_address: transfer.sender.to_string(), + receiver_address: transfer.recipient.to_string(), + amount: transfer.amount, + height: tx.height as u128, + memo: tx.memo.clone(), + }; + match client + .post(&watcher.webhook_url) + .json(&webhook_data) + .send() + .await + { + Ok(res) => info!( + "[watcher = {}] ✅ Webhook {} {} - tx {}, index {}", + watcher.id, + res.status(), + res.url(), + tx.hash, + transfer.message_index, + ), + Err(e) => error!( + "[watcher = {}] ❌ Webhook {:?} {:?} error = {}", + watcher.id, + e.status(), + e.url(), + e, + ), + } + } + } + Err(e) => error!( + "[watcher = {}] ❌ Parse logs for tx {} failed, error = {}", + watcher.id, tx.hash, e, + ), + } } } } @@ -72,39 +134,56 @@ pub(crate) async fn run_payment_listener( } } -fn parse_transfer_from_raw_log(raw_log: &str) -> anyhow::Result> { +fn parse_transfer_from_raw_log( + raw_log: &str, + watch_for_transfer_recipient_accounts: &Vec, +) -> anyhow::Result> { let log_value: Value = serde_json::from_str(raw_log)?; + let mut transfers: Vec = vec![]; + if let Some(events) = log_value[0]["events"].as_array() { - if let Some(transfer_event) = events.iter().find(|e| e["type"] == "transfer") { + for transfer_event in events.iter().filter(|e| e["type"] == "transfer") { if let Some(attrs) = transfer_event["attributes"].as_array() { - let mut transfer = TransferEvent { - recipient: String::new(), - sender: String::new(), - amount: String::new(), - }; + let mut recipient: Option = None; + let mut sender: Option = None; + let mut amount: Option = None; + let message_index: Option = Some(0u64); for attr in attrs { match attr["key"].as_str() { Some("recipient") => { - transfer.recipient = attr["value"].as_str().unwrap_or("").to_string() + recipient = + AccountId::from_str(attr["value"].as_str().unwrap_or("")).ok(); } Some("sender") => { - transfer.sender = attr["value"].as_str().unwrap_or("").to_string() + sender = AccountId::from_str(attr["value"].as_str().unwrap_or("")).ok(); } Some("amount") => { - transfer.amount = attr["value"].as_str().unwrap_or("").to_string() + amount = Some(attr["value"].as_str().unwrap_or("").to_string()) } + // TODO: parse message index _ => continue, } } - return Ok(Some(transfer)); + if let (Some(recipient), Some(sender), Some(amount), Some(message_index)) = + (recipient, sender, amount, message_index) + { + if watch_for_transfer_recipient_accounts.contains(&recipient) { + transfers.push(TransferEvent { + recipient, + sender, + amount, + message_index, + }); + } + } } } } - Ok(None) + Ok(transfers) } fn parse_unym_amount(amount: &str) -> anyhow::Result { From 235165171b0e60211936bee8366d93df86fd1206 Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Fri, 6 Dec 2024 20:10:36 +0000 Subject: [PATCH 33/64] Remove migration from seed app --- nyx-chain-watcher/migrations/000_init.sql | 6 ------ 1 file changed, 6 deletions(-) delete mode 100644 nyx-chain-watcher/migrations/000_init.sql diff --git a/nyx-chain-watcher/migrations/000_init.sql b/nyx-chain-watcher/migrations/000_init.sql deleted file mode 100644 index 5fa9e40e00..0000000000 --- a/nyx-chain-watcher/migrations/000_init.sql +++ /dev/null @@ -1,6 +0,0 @@ -CREATE TABLE responses ( - id SERIAL PRIMARY KEY, - joke_id VARCHAR NOT NULL UNIQUE, - joke TEXT NOT NULL, - date_created INTEGER NOT NULL -); From df004f834f930d348c4ed5c765b297d9099d57ae Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Fri, 6 Dec 2024 20:10:46 +0000 Subject: [PATCH 34/64] Add example to README --- nyx-chain-watcher/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/nyx-chain-watcher/README.md b/nyx-chain-watcher/README.md index 7c62d08296..956828d4ef 100644 --- a/nyx-chain-watcher/README.md +++ b/nyx-chain-watcher/README.md @@ -10,6 +10,7 @@ Look in [env.rs](./src/env.rs) for the names of environment variables that can b NYX_CHAIN_WATCHER_HISTORY_DATABASE_PATH=chain_history.sqlite \ NYX_CHAIN_WATCHER_DATABASE_PATH=nyx_chain_watcher.sqlite \ NYX_CHAIN_WATCHER_WATCH_ACCOUNTS=n1...,n1...,n1... \ +NYX_CHAIN_WATCHER_WATCH_CHAIN_MESSAGE_TYPES="/cosmos.bank.v1beta1.MsgSend,/ibc.applications.transfer.v1.MsgTransfer" NYX_CHAIN_WATCHER_WEBHOOK_URL="https://webhook.site" \ NYX_CHAIN_WATCHER_WEBHOOK_AUTH=1234 \ cargo run -- run From 5b6ae39dabfbfd377217d593366a76cd48bb8e63 Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Fri, 6 Dec 2024 20:11:14 +0000 Subject: [PATCH 35/64] init saves example config --- nyx-chain-watcher/src/cli/commands/init.rs | 29 ++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/nyx-chain-watcher/src/cli/commands/init.rs b/nyx-chain-watcher/src/cli/commands/init.rs index 163c6e9f1a..a545552556 100644 --- a/nyx-chain-watcher/src/cli/commands/init.rs +++ b/nyx-chain-watcher/src/cli/commands/init.rs @@ -1,9 +1,14 @@ // Copyright 2024 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only -use crate::config::{default_config_filepath, Config, ConfigBuilder}; +use crate::cli::DEFAULT_NYX_CHAIN_WATCHER_ID; +use crate::config::payments_watcher::HttpAuthenticationOptions::AuthorizationBearerToken; +use crate::config::payments_watcher::PaymentWatcherEntry; +use crate::config::{default_config_filepath, Config, ConfigBuilder, PaymentWatcherConfig}; use crate::error::NyxChainWatcherError; use nym_config::save_unformatted_config_to_file; +use nym_validator_client::nyxd::AccountId; +use std::str::FromStr; #[derive(clap::Args, Debug)] pub(crate) struct Args {} @@ -12,7 +17,27 @@ pub(crate) async fn execute(_args: Args) -> Result<(), NyxChainWatcherError> { let config_path = default_config_filepath(); let data_dir = Config::default_data_directory(&config_path)?; - let builder = ConfigBuilder::new(config_path.clone(), data_dir); + let builder = ConfigBuilder::new(config_path.clone(), data_dir).with_payment_watcher_config( + PaymentWatcherConfig { + watchers: vec![PaymentWatcherEntry { + id: DEFAULT_NYX_CHAIN_WATCHER_ID.to_string(), + webhook_url: "https://webhook.site".to_string(), + watch_for_transfer_recipient_accounts: Some(vec![AccountId::from_str( + "n17g9a2pwwkg8m60wf59pq6mv0c2wusg9ukparkz", + ) + .unwrap()]), + authentication: Some(AuthorizationBearerToken { + token: "1234".to_string(), + }), + description: None, + watch_for_chain_message_types: Some(vec![ + "/cosmos.bank.v1beta1.MsgSend".to_string(), + "/ibc.applications.transfer.v1.MsgTransfer".to_string(), + ]), + }], + }, + ); + let config = builder.build(); Ok(save_unformatted_config_to_file(&config, &config_path)?) From 156e892baa0ff09e30daa8b27bb0ea1cec2e29f8 Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Fri, 6 Dec 2024 20:11:45 +0000 Subject: [PATCH 36/64] parse message index and process all log entries --- nyx-chain-watcher/src/chain_scraper/mod.rs | 4 +- nyx-chain-watcher/src/cli/commands/run/mod.rs | 14 ++- nyx-chain-watcher/src/payment_listener/mod.rs | 108 +++++++++++------- 3 files changed, 80 insertions(+), 46 deletions(-) diff --git a/nyx-chain-watcher/src/chain_scraper/mod.rs b/nyx-chain-watcher/src/chain_scraper/mod.rs index 36634a5964..3a00d7f665 100644 --- a/nyx-chain-watcher/src/chain_scraper/mod.rs +++ b/nyx-chain-watcher/src/chain_scraper/mod.rs @@ -1,6 +1,8 @@ use nyxd_scraper::{storage::ScraperStorage, NyxdScraper, PruningOptions}; -pub(crate) async fn run_chain_scraper(config: &crate::config::Config) -> anyhow::Result { +pub(crate) async fn run_chain_scraper( + config: &crate::config::Config, +) -> anyhow::Result { let websocket_url = std::env::var("NYXD_WS").expect("NYXD_WS not defined"); let rpc_url = std::env::var("NYXD").expect("NYXD not defined"); diff --git a/nyx-chain-watcher/src/cli/commands/run/mod.rs b/nyx-chain-watcher/src/cli/commands/run/mod.rs index 84d3d8e1e5..d3095fc37d 100644 --- a/nyx-chain-watcher/src/cli/commands/run/mod.rs +++ b/nyx-chain-watcher/src/cli/commands/run/mod.rs @@ -21,8 +21,18 @@ pub(crate) async fn execute(args: Args, http_port: u16) -> Result<(), NyxChainWa let db_path = config.database_path(); info!("Config is {config:#?}"); - info!("Database path is {:?}", std::path::Path::new(&db_path).canonicalize().unwrap_or_default()); - info!("Chain History Database path is {:?}", std::path::Path::new(&config.chain_scraper_database_path()).canonicalize().unwrap_or_default()); + info!( + "Database path is {:?}", + std::path::Path::new(&db_path) + .canonicalize() + .unwrap_or_default() + ); + info!( + "Chain History Database path is {:?}", + std::path::Path::new(&config.chain_scraper_database_path()) + .canonicalize() + .unwrap_or_default() + ); // Ensure parent directory exists if let Some(parent) = std::path::Path::new(&db_path).parent() { diff --git a/nyx-chain-watcher/src/payment_listener/mod.rs b/nyx-chain-watcher/src/payment_listener/mod.rs index e0ba3aa668..4df72b5a42 100644 --- a/nyx-chain-watcher/src/payment_listener/mod.rs +++ b/nyx-chain-watcher/src/payment_listener/mod.rs @@ -1,3 +1,4 @@ +use crate::config::payments_watcher::HttpAuthenticationOptions; use crate::config::PaymentWatcherConfig; use crate::db::queries; use crate::models::WebhookPayload; @@ -9,7 +10,7 @@ use serde_json::Value; use sqlx::SqlitePool; use std::str::FromStr; use tokio::time::{self, Duration}; -use tracing::{error, info}; +use tracing::{error, info, trace}; #[derive(Debug)] struct TransferEvent { @@ -59,10 +60,6 @@ pub(crate) async fn run_payment_listener( } for tx in transactions { - info!( - "[watcher = {}] Processing transaction: {}", - watcher.id, tx.hash - ); if let Some(raw_log) = tx.raw_log.as_deref() { if let Some(watch_for_transfer_recipient_accounts) = &watcher.watch_for_transfer_recipient_accounts @@ -73,6 +70,13 @@ pub(crate) async fn run_payment_listener( watch_for_transfer_recipient_accounts, ) { Ok(transfer_events) => { + if !transfer_events.is_empty() { + info!( + "[watcher = {}] Processing transaction: {} - {} payment events found", + watcher.id, tx.hash, transfer_events.len() + ); + } + for transfer in transfer_events { let amount: f64 = parse_unym_amount(&transfer.amount)?; @@ -96,12 +100,19 @@ pub(crate) async fn run_payment_listener( height: tx.height as u128, memo: tx.memo.clone(), }; - match client - .post(&watcher.webhook_url) - .json(&webhook_data) - .send() - .await - { + + let mut request_builder = + client.post(&watcher.webhook_url).json(&webhook_data); + + if let Some(auth) = &watcher.authentication { + match auth { + HttpAuthenticationOptions::AuthorizationBearerToken { token } => { + request_builder = request_builder.bearer_auth(token); + } + } + } + + match request_builder.send().await { Ok(res) => info!( "[watcher = {}] ✅ Webhook {} {} - tx {}, index {}", watcher.id, @@ -142,41 +153,52 @@ fn parse_transfer_from_raw_log( let mut transfers: Vec = vec![]; - if let Some(events) = log_value[0]["events"].as_array() { - for transfer_event in events.iter().filter(|e| e["type"] == "transfer") { - if let Some(attrs) = transfer_event["attributes"].as_array() { - let mut recipient: Option = None; - let mut sender: Option = None; - let mut amount: Option = None; - let message_index: Option = Some(0u64); - - for attr in attrs { - match attr["key"].as_str() { - Some("recipient") => { - recipient = - AccountId::from_str(attr["value"].as_str().unwrap_or("")).ok(); - } - Some("sender") => { - sender = AccountId::from_str(attr["value"].as_str().unwrap_or("")).ok(); - } - Some("amount") => { - amount = Some(attr["value"].as_str().unwrap_or("").to_string()) + let default_value = vec![]; + let log_entries: &Vec = log_value.as_array().unwrap_or(&default_value); + + trace!("contains {} log entries", log_entries.len()); + + for log_entry in log_entries { + let message_index = log_entry["msg_index"].as_u64().unwrap_or_default(); + + trace!("entry - {message_index}..."); + + if let Some(events) = log_entry["events"].as_array() { + for transfer_event in events.iter().filter(|e| e["type"] == "transfer") { + if let Some(attrs) = transfer_event["attributes"].as_array() { + let mut recipient: Option = None; + let mut sender: Option = None; + let mut amount: Option = None; + + for attr in attrs { + match attr["key"].as_str() { + Some("recipient") => { + recipient = + AccountId::from_str(attr["value"].as_str().unwrap_or("")).ok(); + } + Some("sender") => { + sender = + AccountId::from_str(attr["value"].as_str().unwrap_or("")).ok(); + } + Some("amount") => { + amount = Some(attr["value"].as_str().unwrap_or("").to_string()) + } + // TODO: parse message index + _ => continue, } - // TODO: parse message index - _ => continue, } - } - if let (Some(recipient), Some(sender), Some(amount), Some(message_index)) = - (recipient, sender, amount, message_index) - { - if watch_for_transfer_recipient_accounts.contains(&recipient) { - transfers.push(TransferEvent { - recipient, - sender, - amount, - message_index, - }); + if let (Some(recipient), Some(sender), Some(amount)) = + (recipient, sender, amount) + { + if watch_for_transfer_recipient_accounts.contains(&recipient) { + transfers.push(TransferEvent { + recipient, + sender, + amount, + message_index, + }); + } } } } From 39f525e88e37744c066ae3fd7a28ae730193e7e8 Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Fri, 6 Dec 2024 20:27:39 +0000 Subject: [PATCH 37/64] Add Dockerfile and workflow to build --- nyx-chain-watcher/Dockerfile | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 nyx-chain-watcher/Dockerfile diff --git a/nyx-chain-watcher/Dockerfile b/nyx-chain-watcher/Dockerfile new file mode 100644 index 0000000000..6599e8a66e --- /dev/null +++ b/nyx-chain-watcher/Dockerfile @@ -0,0 +1,32 @@ +FROM rust:latest AS builder + +COPY ./ /usr/src/nym +WORKDIR /usr/src/nym/nyx-chain-watcher + +RUN cargo build --release + +#------------------------------------------------------------------- +# The following environment variables are required at runtime: +# +# NYX_CHAIN_WATCHER_DATABASE_PATH = /mnt/nyx-chain-watchter.sqlite +# NYX_CHAIN_WATCHER_HISTORY_DATABASE_PATH = /mnt/chain-history.sqlite +# NYX_CHAIN_WATCHER_WATCH_ACCOUNTS = "n1...,n1...,n1..." +# +# And optionally: +# +# NYX_CHAIN_WATCHER_WATCH_CHAIN_MESSAGE_TYPES = "/cosmos.bank.v1beta1.MsgSend,/ibc.applications.transfer.v1.MsgTransfer" +# NYX_CHAIN_WATCHER_CONFIG_ENV_FILE_ARG = /mnt/sandbox.env for sandbox environment +# +# see https://github.com/nymtech/nym/blob/develop/nyx-chain-watcher/src/cli/commands/run/args.rs for details +# and https://github.com/nymtech/nym/blob/develop/nyx-chain-watcher/src/env.rs for env vars +#------------------------------------------------------------------- + +FROM ubuntu:24.04 + +RUN apt update && apt install -yy curl ca-certificates + +WORKDIR /nym + +COPY --from=builder /usr/src/nym/target/release/nyx-chain-watcher ./ +ENTRYPOINT [ "/nym/nyx-chain-watcher" ] + From 541d46e8996e1bed556013a05498015569eb6f39 Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Fri, 6 Dec 2024 21:42:29 +0000 Subject: [PATCH 38/64] Fix docker entry point and bump version --- nyx-chain-watcher/Cargo.toml | 2 +- nyx-chain-watcher/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nyx-chain-watcher/Cargo.toml b/nyx-chain-watcher/Cargo.toml index 1f9e87d4a2..6104a7877f 100644 --- a/nyx-chain-watcher/Cargo.toml +++ b/nyx-chain-watcher/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "nyx-chain-watcher" -version = "0.1.0" +version = "0.1.1" authors.workspace = true repository.workspace = true homepage.workspace = true diff --git a/nyx-chain-watcher/Dockerfile b/nyx-chain-watcher/Dockerfile index 6599e8a66e..e4419d4664 100644 --- a/nyx-chain-watcher/Dockerfile +++ b/nyx-chain-watcher/Dockerfile @@ -28,5 +28,5 @@ RUN apt update && apt install -yy curl ca-certificates WORKDIR /nym COPY --from=builder /usr/src/nym/target/release/nyx-chain-watcher ./ -ENTRYPOINT [ "/nym/nyx-chain-watcher" ] +ENTRYPOINT [ "/nym/nyx-chain-watcher", "run" ] From d8a6ca48c1c56455afb6068eda9c5ab9b6d2c2de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C4=99drzej=20Stuczy=C5=84ski?= Date: Mon, 9 Dec 2024 13:43:49 +0000 Subject: [PATCH 39/64] implemented starting block logic inside the chain scraper itself --- Cargo.lock | 2 +- .../nyxd-scraper/src/block_processor/mod.rs | 49 ++++++++++++++++++- common/nyxd-scraper/src/error.rs | 3 ++ common/nyxd-scraper/src/lib.rs | 2 +- common/nyxd-scraper/src/rpc_client.rs | 11 +++++ common/nyxd-scraper/src/scraper/mod.rs | 38 +++++++++----- .../src/cli/process_block.rs | 2 +- .../src/cli/process_until.rs | 2 +- nyx-chain-watcher/src/chain_scraper/mod.rs | 22 +++++++-- nyx-chain-watcher/src/env.rs | 2 + nyx-chain-watcher/src/main.rs | 5 ++ 11 files changed, 117 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2255884f04..91ac39c4ad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6838,7 +6838,7 @@ dependencies = [ [[package]] name = "nyx-chain-watcher" -version = "0.1.0" +version = "0.1.1" dependencies = [ "anyhow", "axum 0.7.7", diff --git a/common/nyxd-scraper/src/block_processor/mod.rs b/common/nyxd-scraper/src/block_processor/mod.rs index c6dead9765..355c6d3907 100644 --- a/common/nyxd-scraper/src/block_processor/mod.rs +++ b/common/nyxd-scraper/src/block_processor/mod.rs @@ -46,6 +46,8 @@ impl PendingSync { pub struct BlockProcessorConfig { pub pruning_options: PruningOptions, pub store_precommits: bool, + pub explicit_starting_block_height: Option, + pub use_best_effort_start_height: bool, } impl Default for BlockProcessorConfig { @@ -53,15 +55,24 @@ impl Default for BlockProcessorConfig { Self { pruning_options: PruningOptions::nothing(), store_precommits: true, + explicit_starting_block_height: None, + use_best_effort_start_height: false, } } } impl BlockProcessorConfig { - pub fn new(pruning_options: PruningOptions, store_precommits: bool) -> Self { + pub fn new( + pruning_options: PruningOptions, + store_precommits: bool, + explicit_starting_block_height: Option, + use_best_effort_start_height: bool, + ) -> Self { Self { pruning_options, store_precommits, + explicit_starting_block_height, + use_best_effort_start_height, } } } @@ -403,6 +414,42 @@ impl BlockProcessor { let request_range = self.last_processed_height + 1..latest_block + 1; info!("we need to request {request_range:?} to resync"); self.request_missing_blocks(request_range).await?; + return Ok(()); + } + + // this is the first time starting up + if self.last_processed_height == 0 { + let Some(starting_height) = self.config.explicit_starting_block_height else { + // nothing to do + return Ok(()); + }; + + info!("attempting to start the scraper from block {starting_height}"); + let earliest_available = + self.rpc_client.earliest_available_block_height().await? as u32; + info!("earliest available block height: {earliest_available}"); + + if earliest_available > starting_height && self.config.use_best_effort_start_height { + error!("the earliest available block is higher than the desired starting height"); + return Err(ScraperError::BlocksUnavailable { + height: starting_height, + }); + } + + let starting_height = if earliest_available > starting_height { + // add few additional blocks to account for all the startup waiting + // because the node might have pruned few blocks since + earliest_available + 10 + } else { + starting_height + }; + + let request_range = starting_height..latest_block + 1; + + info!("going to start the scraper from block {starting_height}"); + info!("we need to request {request_range:?} before properly starting up"); + + self.request_missing_blocks(request_range).await?; } Ok(()) diff --git a/common/nyxd-scraper/src/error.rs b/common/nyxd-scraper/src/error.rs index 9876536aa5..6e413983bd 100644 --- a/common/nyxd-scraper/src/error.rs +++ b/common/nyxd-scraper/src/error.rs @@ -19,6 +19,9 @@ pub enum ScraperError { #[error("the block scraper is already running")] ScraperAlreadyRunning, + #[error("block information for height {height} is not available on the provided rpc endpoint")] + BlocksUnavailable { height: u32 }, + #[error("failed to establish websocket connection to {url}: {source}")] WebSocketConnectionFailure { url: String, diff --git a/common/nyxd-scraper/src/lib.rs b/common/nyxd-scraper/src/lib.rs index 332e38d214..806de871dc 100644 --- a/common/nyxd-scraper/src/lib.rs +++ b/common/nyxd-scraper/src/lib.rs @@ -16,5 +16,5 @@ pub mod storage; pub use block_processor::pruning::{PruningOptions, PruningStrategy}; pub use modules::{BlockModule, MsgModule, TxModule}; -pub use scraper::{Config, NyxdScraper}; +pub use scraper::{Config, NyxdScraper, StartingBlockOpts}; pub use storage::models; diff --git a/common/nyxd-scraper/src/rpc_client.rs b/common/nyxd-scraper/src/rpc_client.rs index d2c141f711..f5e3b8c035 100644 --- a/common/nyxd-scraper/src/rpc_client.rs +++ b/common/nyxd-scraper/src/rpc_client.rs @@ -117,6 +117,17 @@ impl RpcClient { Ok(info.last_block_height.value()) } + pub(crate) async fn earliest_available_block_height(&self) -> Result { + debug!("getting earliest available block height"); + + let status = self + .inner + .status() + .await + .map_err(|source| ScraperError::AbciInfoQueryFailure { source })?; + Ok(status.sync_info.earliest_block_height.value()) + } + async fn get_transaction_results( &self, raw: &[Vec], diff --git a/common/nyxd-scraper/src/scraper/mod.rs b/common/nyxd-scraper/src/scraper/mod.rs index e4d133341e..e0508b2c09 100644 --- a/common/nyxd-scraper/src/scraper/mod.rs +++ b/common/nyxd-scraper/src/scraper/mod.rs @@ -24,6 +24,15 @@ use url::Url; mod subscriber; +#[derive(Default, Clone, Copy)] +pub struct StartingBlockOpts { + pub start_block_height: Option, + + /// If the scraper fails to start from the desired height, rather than failing, + /// attempt to use the next available height + pub use_best_effort_start_height: bool, +} + pub struct Config { /// Url to the websocket endpoint of a validator, for example `wss://rpc.nymtech.net/websocket` pub websocket_url: Url, @@ -37,7 +46,7 @@ pub struct Config { pub store_precommits: bool, - pub start_block_height: Option, + pub start_block: StartingBlockOpts, } pub struct NyxdScraperBuilder { @@ -50,8 +59,6 @@ pub struct NyxdScraperBuilder { impl NyxdScraperBuilder { pub async fn build_and_start(self) -> Result { - let start_block_height = self.config.start_block_height.clone(); - let scraper = NyxdScraper::new(self.config).await?; let (processing_tx, processing_rx) = unbounded_channel(); @@ -70,6 +77,8 @@ impl NyxdScraperBuilder { let block_processor_config = BlockProcessorConfig::new( scraper.config.pruning_options, scraper.config.store_precommits, + scraper.config.start_block.start_block_height, + scraper.config.start_block.use_best_effort_start_height, ); let mut block_processor = BlockProcessor::new( @@ -94,11 +103,6 @@ impl NyxdScraperBuilder { ) .await?; - // TODO: decide if this should be removed? - if let Some(height) = start_block_height { - scraper.process_block_range(Some(height), None).await?; - } - scraper.start_tasks(block_requester, block_processor, chain_subscriber); Ok(scraper) @@ -175,7 +179,10 @@ impl NyxdScraper { self.task_tracker.close(); } - pub async fn process_single_block(&self, height: u32) -> Result<(), ScraperError> { + // DO NOT USE UNLESS YOU KNOW EXACTLY WHAT YOU'RE DOING + // AS THIS WILL NOT USE ANY OF YOUR REGISTERED MODULES + // YOU WILL BE FIRED IF YOU USE IT : ) + pub async fn unsafe_process_single_block(&self, height: u32) -> Result<(), ScraperError> { info!(height = height, "attempting to process a single block"); if !self.task_tracker.is_empty() { return Err(ScraperError::ScraperAlreadyRunning); @@ -194,7 +201,10 @@ impl NyxdScraper { block_processor.process_block(block.into()).await } - pub async fn process_block_range( + // DO NOT USE UNLESS YOU KNOW EXACTLY WHAT YOU'RE DOING + // AS THIS WILL NOT USE ANY OF YOUR REGISTERED MODULES + // YOU WILL BE FIRED IF YOU USE IT : ) + pub async fn unsafe_process_block_range( &self, starting_height: Option, end_height: Option, @@ -323,8 +333,12 @@ impl NyxdScraper { req_tx: Sender, processing_rx: UnboundedReceiver, ) -> Result { - let block_processor_config = - BlockProcessorConfig::new(self.config.pruning_options, self.config.store_precommits); + let block_processor_config = BlockProcessorConfig::new( + self.config.pruning_options, + self.config.store_precommits, + self.config.start_block.start_block_height, + self.config.start_block.use_best_effort_start_height, + ); BlockProcessor::new( block_processor_config, diff --git a/nym-validator-rewarder/src/cli/process_block.rs b/nym-validator-rewarder/src/cli/process_block.rs index b3d090cfda..c4f3563566 100644 --- a/nym-validator-rewarder/src/cli/process_block.rs +++ b/nym-validator-rewarder/src/cli/process_block.rs @@ -26,7 +26,7 @@ pub(crate) async fn execute(args: Args) -> Result<(), NymRewarderError> { NyxdScraper::new(config.scraper_config()) .await? - .process_single_block(args.height) + .unsafe_process_single_block(args.height) .await?; Ok(()) } diff --git a/nym-validator-rewarder/src/cli/process_until.rs b/nym-validator-rewarder/src/cli/process_until.rs index 3de3a3fff7..04a5809caf 100644 --- a/nym-validator-rewarder/src/cli/process_until.rs +++ b/nym-validator-rewarder/src/cli/process_until.rs @@ -39,7 +39,7 @@ pub(crate) async fn execute(args: Args) -> Result<(), NymRewarderError> { NyxdScraper::new(config.scraper_config()) .await? - .process_block_range(args.start_height, args.stop_height) + .unsafe_process_block_range(args.start_height, args.stop_height) .await?; Ok(()) } diff --git a/nyx-chain-watcher/src/chain_scraper/mod.rs b/nyx-chain-watcher/src/chain_scraper/mod.rs index 3a00d7f665..61fbf0f6db 100644 --- a/nyx-chain-watcher/src/chain_scraper/mod.rs +++ b/nyx-chain-watcher/src/chain_scraper/mod.rs @@ -1,3 +1,4 @@ +use crate::env::vars::{NYXD_SCRAPER_START_HEIGHT, NYXD_SCRAPER_USE_BEST_EFFORT_START_HEIGHT}; use nyxd_scraper::{storage::ScraperStorage, NyxdScraper, PruningOptions}; pub(crate) async fn run_chain_scraper( @@ -9,9 +10,19 @@ pub(crate) async fn run_chain_scraper( let websocket_url = reqwest::Url::parse(&websocket_url)?; let rpc_url = reqwest::Url::parse(&rpc_url)?; - let start_block_height = std::env::var("NYXD_SCRAPER_START_HEIGHT") - .ok() - .and_then(|value| value.parse::().ok()); + // why are those not part of CLI? : ( + let start_block_height = match std::env::var(NYXD_SCRAPER_START_HEIGHT).ok() { + None => None, + // blow up if passed malformed env value + Some(raw) => Some(raw.parse()?), + }; + + let use_best_effort_start_height = + match std::env::var(NYXD_SCRAPER_USE_BEST_EFFORT_START_HEIGHT).ok() { + None => false, + // blow up if passed malformed env value + Some(raw) => raw.parse()?, + }; let scraper = NyxdScraper::builder(nyxd_scraper::Config { websocket_url, @@ -19,7 +30,10 @@ pub(crate) async fn run_chain_scraper( database_path: config.chain_scraper_database_path().into(), pruning_options: PruningOptions::nothing(), store_precommits: false, - start_block_height, + start_block: nyxd_scraper::StartingBlockOpts { + start_block_height, + use_best_effort_start_height, + }, }); let instance = scraper.build_and_start().await?; diff --git a/nyx-chain-watcher/src/env.rs b/nyx-chain-watcher/src/env.rs index 5b66a63c92..009cbae10f 100644 --- a/nyx-chain-watcher/src/env.rs +++ b/nyx-chain-watcher/src/env.rs @@ -11,6 +11,8 @@ pub mod vars { "NYX_CHAIN_WATCHER_HISTORY_DATABASE_PATH"; pub const NYXD_SCRAPER_START_HEIGHT: &str = "NYXD_SCRAPER_START_HEIGHT"; + pub const NYXD_SCRAPER_USE_BEST_EFFORT_START_HEIGHT: &str = + "NYXD_SCRAPER_USE_BEST_EFFORT_START_HEIGHT"; pub const NYX_CHAIN_WATCHER_ID_ARG: &str = "NYX_CHAIN_WATCHER_ID"; pub const NYX_CHAIN_WATCHER_OUTPUT_ARG: &str = "NYX_CHAIN_WATCHER_OUTPUT"; diff --git a/nyx-chain-watcher/src/main.rs b/nyx-chain-watcher/src/main.rs index b32649b994..e6c3f8ea26 100644 --- a/nyx-chain-watcher/src/main.rs +++ b/nyx-chain-watcher/src/main.rs @@ -1,6 +1,8 @@ use clap::{crate_name, crate_version, Parser}; +use nym_bin_common::bin_info_owned; use nym_bin_common::logging::maybe_print_banner; use nym_network_defaults::setup_env; +use tracing::info; mod chain_scraper; mod cli; @@ -24,6 +26,9 @@ async fn main() -> anyhow::Result<()> { maybe_print_banner(crate_name!(), crate_version!()); } + let bin_info = bin_info_owned!(); + info!("using the following version: {bin_info}"); + cli.execute().await?; Ok(()) From a6f4f017c776a68024b44a2754359de5d4b92d22 Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Mon, 9 Dec 2024 15:04:32 +0000 Subject: [PATCH 40/64] Bump version --- Cargo.lock | 2 +- nyx-chain-watcher/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 91ac39c4ad..878b8f1e1a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6838,7 +6838,7 @@ dependencies = [ [[package]] name = "nyx-chain-watcher" -version = "0.1.1" +version = "0.1.2" dependencies = [ "anyhow", "axum 0.7.7", diff --git a/nyx-chain-watcher/Cargo.toml b/nyx-chain-watcher/Cargo.toml index 6104a7877f..99f9c16f26 100644 --- a/nyx-chain-watcher/Cargo.toml +++ b/nyx-chain-watcher/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "nyx-chain-watcher" -version = "0.1.1" +version = "0.1.2" authors.workspace = true repository.workspace = true homepage.workspace = true From 2b26a88d6c9eff2ea4838b169b72e8d4609a4fa6 Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Mon, 9 Dec 2024 15:29:28 +0000 Subject: [PATCH 41/64] Bump version --- Cargo.lock | 2 +- nyx-chain-watcher/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 878b8f1e1a..dbeb0d9a6d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6838,7 +6838,7 @@ dependencies = [ [[package]] name = "nyx-chain-watcher" -version = "0.1.2" +version = "0.1.3" dependencies = [ "anyhow", "axum 0.7.7", diff --git a/nyx-chain-watcher/Cargo.toml b/nyx-chain-watcher/Cargo.toml index 99f9c16f26..7a05e9d231 100644 --- a/nyx-chain-watcher/Cargo.toml +++ b/nyx-chain-watcher/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "nyx-chain-watcher" -version = "0.1.2" +version = "0.1.3" authors.workspace = true repository.workspace = true homepage.workspace = true From 1890367bfc5cf503a617790ce41d59ceed53ed02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C4=99drzej=20Stuczy=C5=84ski?= Date: Mon, 9 Dec 2024 16:13:02 +0000 Subject: [PATCH 42/64] allow conversion from CometBFT block subscription --- common/nyxd-scraper/src/block_processor/types.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/common/nyxd-scraper/src/block_processor/types.rs b/common/nyxd-scraper/src/block_processor/types.rs index 93ffb3ba76..6909ff32a0 100644 --- a/common/nyxd-scraper/src/block_processor/types.rs +++ b/common/nyxd-scraper/src/block_processor/types.rs @@ -84,13 +84,7 @@ impl TryFrom for BlockToProcess { // TODO: we're losing `result_begin_block` and `result_end_block` here but maybe that's fine? let maybe_block = match event.data { - // we don't care about `NewBlock` until CometBFT 0.38, i.e. until we upgrade to wasmd 0.50 - EventData::NewBlock { .. } => { - return Err(ScraperError::InvalidSubscriptionEvent { - query, - kind: "NewBlock".to_string(), - }) - } + EventData::NewBlock { block, .. } => block, EventData::LegacyNewBlock { block, .. } => block, EventData::Tx { .. } => { return Err(ScraperError::InvalidSubscriptionEvent { From 60e8e53f3bc6d4de4c466437ad84c85fc7f5a1ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C4=99drzej=20Stuczy=C5=84ski?= Date: Mon, 9 Dec 2024 16:13:15 +0000 Subject: [PATCH 43/64] explicitly build websocket client in 0.37 compat mode --- common/nyxd-scraper/src/scraper/subscriber.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/common/nyxd-scraper/src/scraper/subscriber.rs b/common/nyxd-scraper/src/scraper/subscriber.rs index c2b5bdbbfd..e296a80713 100644 --- a/common/nyxd-scraper/src/scraper/subscriber.rs +++ b/common/nyxd-scraper/src/scraper/subscriber.rs @@ -3,6 +3,7 @@ use crate::block_processor::types::BlockToProcess; use crate::error::ScraperError; +use tendermint_rpc::client::CompatMode; use tendermint_rpc::event::Event; use tendermint_rpc::query::EventType; use tendermint_rpc::{SubscriptionClient, WebSocketClient, WebSocketClientDriver}; @@ -38,7 +39,16 @@ impl ChainSubscriber { ) -> Result { // sure, we could have just used websocket client entirely, but let's keep the logic for // getting current blocks and historical blocks completely separate with the dual connection - let (client, driver) = WebSocketClient::new(websocket_endpoint.as_str()) + let websocket_url = websocket_endpoint.as_str().try_into().map_err(|source| { + ScraperError::WebSocketConnectionFailure { + url: websocket_endpoint.to_string(), + source, + } + })?; + + let (client, driver) = WebSocketClient::builder(websocket_url) + .compat_mode(CompatMode::V0_37) + .build() .await .map_err(|source| ScraperError::WebSocketConnectionFailure { url: websocket_endpoint.to_string(), From be185824b42f0ea3a0ed958fcbda297d096a70cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C4=99drzej=20Stuczy=C5=84ski?= Date: Mon, 9 Dec 2024 16:29:34 +0000 Subject: [PATCH 44/64] extra logs --- common/nyxd-scraper/src/block_processor/mod.rs | 9 ++++++++- common/nyxd-scraper/src/scraper/mod.rs | 6 +++++- common/nyxd-scraper/src/storage/mod.rs | 6 ++++++ nyx-chain-watcher/src/chain_scraper/mod.rs | 8 ++++++-- nyx-chain-watcher/src/cli/commands/run/mod.rs | 3 ++- 5 files changed, 27 insertions(+), 5 deletions(-) diff --git a/common/nyxd-scraper/src/block_processor/mod.rs b/common/nyxd-scraper/src/block_processor/mod.rs index 355c6d3907..7a23bb7751 100644 --- a/common/nyxd-scraper/src/block_processor/mod.rs +++ b/common/nyxd-scraper/src/block_processor/mod.rs @@ -117,6 +117,8 @@ impl BlockProcessor { let last_pruned = storage.get_pruned_height().await?; let last_pruned_height = last_pruned.try_into().unwrap_or_default(); + debug!(last_processed_height = %last_processed_height, pruned_height = %last_pruned_height, "setting up block processor..."); + Ok(BlockProcessor { config, cancel, @@ -399,12 +401,15 @@ impl BlockProcessor { // but we need it to help the compiler figure out the future is `Send` async fn startup_resync(&mut self) -> Result<(), ScraperError> { assert!(self.pending_sync.is_empty()); + info!("attempting to run startup resync..."); self.maybe_prune_storage().await?; let latest_block = self.rpc_client.current_block_height().await? as u32; + info!("obtained latest block height: {latest_block}"); if latest_block > self.last_processed_height && self.last_processed_height != 0 { + info!("we have already processed some blocks in the past - attempting to resume..."); // in case we were offline for a while, // make sure we don't request blocks we'd have to prune anyway let keep_recent = self.config.pruning_options.strategy_keep_recent(); @@ -419,7 +424,9 @@ impl BlockProcessor { // this is the first time starting up if self.last_processed_height == 0 { + info!("this is the first time starting up"); let Some(starting_height) = self.config.explicit_starting_block_height else { + info!("no starting block height set - will use the default behaviour"); // nothing to do return Ok(()); }; @@ -456,7 +463,7 @@ impl BlockProcessor { } pub(crate) async fn run(&mut self) { - info!("starting processing loop"); + info!("starting block processor processing loop"); // sure, we could be more efficient and reset it on every processed block, // but the overhead is so minimal that it doesn't matter diff --git a/common/nyxd-scraper/src/scraper/mod.rs b/common/nyxd-scraper/src/scraper/mod.rs index e0508b2c09..71cb535645 100644 --- a/common/nyxd-scraper/src/scraper/mod.rs +++ b/common/nyxd-scraper/src/scraper/mod.rs @@ -139,7 +139,7 @@ pub struct NyxdScraper { task_tracker: TaskTracker, cancel_token: CancellationToken, startup_sync: Arc, - pub storage: ScraperStorage, + storage: ScraperStorage, rpc_client: RpcClient, } @@ -163,6 +163,10 @@ impl NyxdScraper { }) } + pub fn storage(&self) -> ScraperStorage { + self.storage.clone() + } + fn start_tasks( &self, mut block_requester: BlockRequester, diff --git a/common/nyxd-scraper/src/storage/mod.rs b/common/nyxd-scraper/src/storage/mod.rs index 39a2842fa8..3b29438bbb 100644 --- a/common/nyxd-scraper/src/storage/mod.rs +++ b/common/nyxd-scraper/src/storage/mod.rs @@ -55,6 +55,12 @@ pub(crate) fn log_db_operation_time(op_name: &str, start_time: Instant) { impl ScraperStorage { #[instrument] pub async fn init + Debug>(database_path: P) -> Result { + let database_path = database_path.as_ref(); + debug!( + "initialising scraper database path to '{}'", + database_path.display() + ); + let opts = sqlx::sqlite::SqliteConnectOptions::new() .journal_mode(sqlx::sqlite::SqliteJournalMode::Wal) .synchronous(SqliteSynchronous::Normal) diff --git a/nyx-chain-watcher/src/chain_scraper/mod.rs b/nyx-chain-watcher/src/chain_scraper/mod.rs index 61fbf0f6db..83205e5f03 100644 --- a/nyx-chain-watcher/src/chain_scraper/mod.rs +++ b/nyx-chain-watcher/src/chain_scraper/mod.rs @@ -1,9 +1,10 @@ use crate::env::vars::{NYXD_SCRAPER_START_HEIGHT, NYXD_SCRAPER_USE_BEST_EFFORT_START_HEIGHT}; use nyxd_scraper::{storage::ScraperStorage, NyxdScraper, PruningOptions}; +use tracing::info; pub(crate) async fn run_chain_scraper( config: &crate::config::Config, -) -> anyhow::Result { +) -> anyhow::Result { let websocket_url = std::env::var("NYXD_WS").expect("NYXD_WS not defined"); let rpc_url = std::env::var("NYXD").expect("NYXD not defined"); @@ -38,5 +39,8 @@ pub(crate) async fn run_chain_scraper( let instance = scraper.build_and_start().await?; - Ok(instance.storage) + info!("🚧 blocking until the chain has caught up..."); + instance.wait_for_startup_sync().await; + + Ok(instance) } diff --git a/nyx-chain-watcher/src/cli/commands/run/mod.rs b/nyx-chain-watcher/src/cli/commands/run/mod.rs index d3095fc37d..96344a4014 100644 --- a/nyx-chain-watcher/src/cli/commands/run/mod.rs +++ b/nyx-chain-watcher/src/cli/commands/run/mod.rs @@ -48,7 +48,8 @@ pub(crate) async fn execute(args: Args, http_port: u16) -> Result<(), NyxChainWa // Spawn the payment listener task let payment_listener_handle = tokio::spawn({ let obs_pool = watcher_pool.clone(); - let chain_storage = run_chain_scraper(&config).await?; + let chain_scraper = run_chain_scraper(&config).await?; + let chain_storage = chain_scraper.storage(); let payment_watcher_config = config.payment_watcher_config.unwrap_or_default(); async move { From 98a4cb4ae8b3d55d8979ccb4fe617c448a44887e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C4=99drzej=20Stuczy=C5=84ski?= Date: Mon, 9 Dec 2024 16:49:00 +0000 Subject: [PATCH 45/64] even more logs --- common/nyxd-scraper/src/block_processor/mod.rs | 7 ++++++- nyx-chain-watcher/src/chain_scraper/mod.rs | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/common/nyxd-scraper/src/block_processor/mod.rs b/common/nyxd-scraper/src/block_processor/mod.rs index 7a23bb7751..60b21e19a0 100644 --- a/common/nyxd-scraper/src/block_processor/mod.rs +++ b/common/nyxd-scraper/src/block_processor/mod.rs @@ -417,7 +417,12 @@ impl BlockProcessor { self.last_processed_height = max(self.last_processed_height, last_to_keep); let request_range = self.last_processed_height + 1..latest_block + 1; - info!("we need to request {request_range:?} to resync"); + info!( + keep_recent = %keep_recent, + last_to_keep = %last_to_keep, + last_processed_height = %self.last_processed_height, + "we need to request {request_range:?} to resync" + ); self.request_missing_blocks(request_range).await?; return Ok(()); } diff --git a/nyx-chain-watcher/src/chain_scraper/mod.rs b/nyx-chain-watcher/src/chain_scraper/mod.rs index 83205e5f03..f2c37bb8a4 100644 --- a/nyx-chain-watcher/src/chain_scraper/mod.rs +++ b/nyx-chain-watcher/src/chain_scraper/mod.rs @@ -1,5 +1,5 @@ use crate::env::vars::{NYXD_SCRAPER_START_HEIGHT, NYXD_SCRAPER_USE_BEST_EFFORT_START_HEIGHT}; -use nyxd_scraper::{storage::ScraperStorage, NyxdScraper, PruningOptions}; +use nyxd_scraper::{NyxdScraper, PruningOptions}; use tracing::info; pub(crate) async fn run_chain_scraper( From b06349efd076600ffb2b45257b8990035ad69e19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C4=99drzej=20Stuczy=C5=84ski?= Date: Mon, 9 Dec 2024 16:58:40 +0000 Subject: [PATCH 46/64] added env variable to nuke the db --- nyx-chain-watcher/src/chain_scraper/mod.rs | 19 +++++++++++++++++-- nyx-chain-watcher/src/env.rs | 2 ++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/nyx-chain-watcher/src/chain_scraper/mod.rs b/nyx-chain-watcher/src/chain_scraper/mod.rs index f2c37bb8a4..701dd467dc 100644 --- a/nyx-chain-watcher/src/chain_scraper/mod.rs +++ b/nyx-chain-watcher/src/chain_scraper/mod.rs @@ -1,6 +1,10 @@ -use crate::env::vars::{NYXD_SCRAPER_START_HEIGHT, NYXD_SCRAPER_USE_BEST_EFFORT_START_HEIGHT}; +use crate::env::vars::{ + NYXD_SCRAPER_START_HEIGHT, NYXD_SCRAPER_UNSAFE_NUKE_DB, + NYXD_SCRAPER_USE_BEST_EFFORT_START_HEIGHT, +}; use nyxd_scraper::{NyxdScraper, PruningOptions}; -use tracing::info; +use std::fs; +use tracing::{info, warn}; pub(crate) async fn run_chain_scraper( config: &crate::config::Config, @@ -25,6 +29,17 @@ pub(crate) async fn run_chain_scraper( Some(raw) => raw.parse()?, }; + let nuke_db: bool = match std::env::var(NYXD_SCRAPER_UNSAFE_NUKE_DB).ok() { + None => false, + // blow up if passed malformed env value + Some(raw) => raw.parse()?, + }; + + if nuke_db { + warn!("☢️☢️☢️ NUKING THE SCRAPER DATABASE"); + fs::remove_file(config.chain_scraper_database_path())?; + } + let scraper = NyxdScraper::builder(nyxd_scraper::Config { websocket_url, rpc_url, diff --git a/nyx-chain-watcher/src/env.rs b/nyx-chain-watcher/src/env.rs index 009cbae10f..fc8c1a9f61 100644 --- a/nyx-chain-watcher/src/env.rs +++ b/nyx-chain-watcher/src/env.rs @@ -14,6 +14,8 @@ pub mod vars { pub const NYXD_SCRAPER_USE_BEST_EFFORT_START_HEIGHT: &str = "NYXD_SCRAPER_USE_BEST_EFFORT_START_HEIGHT"; + pub const NYXD_SCRAPER_UNSAFE_NUKE_DB: &str = "NYXD_SCRAPER_UNSAFE_NUKE_DB"; + pub const NYX_CHAIN_WATCHER_ID_ARG: &str = "NYX_CHAIN_WATCHER_ID"; pub const NYX_CHAIN_WATCHER_OUTPUT_ARG: &str = "NYX_CHAIN_WATCHER_OUTPUT"; From eeea32fdca11f26ecf01bb113dd8f689826df19c Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Mon, 9 Dec 2024 17:26:47 +0000 Subject: [PATCH 47/64] add websocket rpcs to env files --- envs/canary.env | 3 ++- envs/qa.env | 4 ++-- envs/sandbox.env | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/envs/canary.env b/envs/canary.env index 84aa65eefa..6bdd85494e 100644 --- a/envs/canary.env +++ b/envs/canary.env @@ -19,6 +19,7 @@ MULTISIG_CONTRACT_ADDRESS=n1zwv6feuzhy6a9wekh96cd57lsarmqlwxdypdsplw6zhfncqw6ftq COCONUT_DKG_CONTRACT_ADDRESS=n1aakfpghcanxtc45gpqlx8j3rq0zcpyf49qmhm9mdjrfx036h4z5sy2vfh9 EXPLORER_API=https://canary-explorer.performance.nymte.ch/api/ -NYXD=https://canary-validator.performance.nymte.ch +NYXD=https://rpc.canary-validator.performance.nymte.ch NYM_API=https://canary-api.performance.nymte.ch/api/ +NYXD_WS=wss://rpc.canary-validator.performance.nymte.ch/websocket NYM_VPN_API=https://nym-vpn-api-git-deploy-canary-nyx-network-staging.vercel.app/api/ diff --git a/envs/qa.env b/envs/qa.env index 0a53e1aa0c..2a4fddb0a7 100644 --- a/envs/qa.env +++ b/envs/qa.env @@ -19,7 +19,7 @@ VESTING_CONTRACT_ADDRESS=n1jlzdxnyces4hrhqz68dqk28mrw5jgwtcfq0c2funcwrmw0dx9l9s8 REWARDING_VALIDATOR_ADDRESS=n1rfvpsynktze6wvn6ldskj8xgwfzzk5v6pnff39 EXPLORER_API=https://qa-network-explorer.qa.nymte.ch/api/ -NYXD=https://qa-validator.qa.nymte.ch -NYXD_WS=wss://qa-validator.qa.nymte.ch/websocket/ +NYXD=https://rpc.qa-validator.qa.nymte.ch +NYXD_WS=wss://rpc.qa-validator.qa.nymte.ch/websocket NYM_API=https://qa-nym-api.qa.nymte.ch/api/ NYM_VPN_API=https://nym-vpn-api-git-deploy-qa-nyx-network-staging.vercel.app/api/ diff --git a/envs/sandbox.env b/envs/sandbox.env index 3033312368..68e51fec59 100644 --- a/envs/sandbox.env +++ b/envs/sandbox.env @@ -21,6 +21,6 @@ ECASH_CONTRACT_ADDRESS=n1v3vydvs2ued84yv3khqwtgldmgwn0elljsdh08dr5s2j9x4rc5fs9jl STATISTICS_SERVICE_DOMAIN_ADDRESS="http://0.0.0.0" EXPLORER_API=https://sandbox-explorer.nymtech.net/api/ NYXD=https://rpc.sandbox.nymtech.net -NYXD_WS=wss://rpc.sandbox.nymtech.net/websocket/ +NYXD_WS=wss://rpc.sandbox.nymtech.net/websocket NYM_API=https://sandbox-nym-api1.nymtech.net/api/ NYM_VPN_API=https://nym-vpn-api-git-deploy-sandbox-nyx-network-staging.vercel.app/api/ From e20bea9d32b39f156ac20eb5e6b07788e9ccef1f Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Mon, 9 Dec 2024 17:27:29 +0000 Subject: [PATCH 48/64] bump version --- Cargo.lock | 2 +- nyx-chain-watcher/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dbeb0d9a6d..9fe47f5026 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6838,7 +6838,7 @@ dependencies = [ [[package]] name = "nyx-chain-watcher" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "axum 0.7.7", diff --git a/nyx-chain-watcher/Cargo.toml b/nyx-chain-watcher/Cargo.toml index 7a05e9d231..05c43ae147 100644 --- a/nyx-chain-watcher/Cargo.toml +++ b/nyx-chain-watcher/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "nyx-chain-watcher" -version = "0.1.3" +version = "0.1.4" authors.workspace = true repository.workspace = true homepage.workspace = true From 06c7394861799416fc1e7e3cdfba336c5b4d2f59 Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Tue, 10 Dec 2024 17:43:33 +0000 Subject: [PATCH 49/64] change webhook payload to have a structured coin for `funds` --- nyx-chain-watcher/src/models.rs | 4 +++- nyx-chain-watcher/src/payment_listener/mod.rs | 13 ++++--------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/nyx-chain-watcher/src/models.rs b/nyx-chain-watcher/src/models.rs index 7972b1bdeb..7c0ac47683 100644 --- a/nyx-chain-watcher/src/models.rs +++ b/nyx-chain-watcher/src/models.rs @@ -1,6 +1,7 @@ use rocket::serde::{Deserialize, Serialize}; use schemars::JsonSchema; use utoipa::ToSchema; +use nym_validator_client::nyxd::CosmWasmCoin; #[derive(Serialize, Deserialize, Clone, JsonSchema, ToSchema)] pub struct WebhookPayload { @@ -8,7 +9,8 @@ pub struct WebhookPayload { pub message_index: u64, pub sender_address: String, pub receiver_address: String, - pub amount: String, + pub funds: CosmWasmCoin, pub height: u128, pub memo: Option, } + diff --git a/nyx-chain-watcher/src/payment_listener/mod.rs b/nyx-chain-watcher/src/payment_listener/mod.rs index 4df72b5a42..30a7765589 100644 --- a/nyx-chain-watcher/src/payment_listener/mod.rs +++ b/nyx-chain-watcher/src/payment_listener/mod.rs @@ -2,7 +2,7 @@ use crate::config::payments_watcher::HttpAuthenticationOptions; use crate::config::PaymentWatcherConfig; use crate::db::queries; use crate::models::WebhookPayload; -use nym_validator_client::nyxd::AccountId; +use nym_validator_client::nyxd::{AccountId, Coin}; use nyxd_scraper::storage::ScraperStorage; use reqwest::Client; use rocket::form::validate::Contains; @@ -78,7 +78,8 @@ pub(crate) async fn run_payment_listener( } for transfer in transfer_events { - let amount: f64 = parse_unym_amount(&transfer.amount)?; + let funds = Coin::from_str(&transfer.amount)?; + let amount: f64 = funds.amount as f64 / 1e6f64; // convert to major value, there will be precision loss queries::payments::insert_payment( &watcher_pool, @@ -96,7 +97,7 @@ pub(crate) async fn run_payment_listener( message_index: transfer.message_index, sender_address: transfer.sender.to_string(), receiver_address: transfer.recipient.to_string(), - amount: transfer.amount, + funds: funds.into(), height: tx.height as u128, memo: tx.memo.clone(), }; @@ -207,9 +208,3 @@ fn parse_transfer_from_raw_log( Ok(transfers) } - -fn parse_unym_amount(amount: &str) -> anyhow::Result { - let amount = amount.trim_end_matches("unym"); - let parsed: f64 = amount.parse()?; - Ok(parsed / 1_000_000.0) -} From 1113e0c599955f0cd6fe1754ed82a7ea98f62bf1 Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Tue, 10 Dec 2024 19:58:21 +0000 Subject: [PATCH 50/64] formatting --- nyx-chain-watcher/src/models.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nyx-chain-watcher/src/models.rs b/nyx-chain-watcher/src/models.rs index 7c0ac47683..93a9f9e592 100644 --- a/nyx-chain-watcher/src/models.rs +++ b/nyx-chain-watcher/src/models.rs @@ -1,7 +1,7 @@ +use nym_validator_client::nyxd::CosmWasmCoin; use rocket::serde::{Deserialize, Serialize}; use schemars::JsonSchema; use utoipa::ToSchema; -use nym_validator_client::nyxd::CosmWasmCoin; #[derive(Serialize, Deserialize, Clone, JsonSchema, ToSchema)] pub struct WebhookPayload { @@ -13,4 +13,3 @@ pub struct WebhookPayload { pub height: u128, pub memo: Option, } - From c02e93004fd8292e296390024018e55af58ae7f6 Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Wed, 11 Dec 2024 15:33:18 +0000 Subject: [PATCH 51/64] nyx-chain-watcher: return average price over 24 hours --- Cargo.lock | 2 +- nyx-chain-watcher/Cargo.toml | 2 +- nyx-chain-watcher/src/db/models.rs | 8 ++-- nyx-chain-watcher/src/db/queries/price.rs | 48 ++++++++++++++++++++-- nyx-chain-watcher/src/http/api/mixnodes.rs | 21 ---------- nyx-chain-watcher/src/http/api/mod.rs | 9 +--- nyx-chain-watcher/src/http/api/price.rs | 25 +++++++++-- 7 files changed, 72 insertions(+), 43 deletions(-) delete mode 100644 nyx-chain-watcher/src/http/api/mixnodes.rs diff --git a/Cargo.lock b/Cargo.lock index 9fe47f5026..1df5d34b7d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6838,7 +6838,7 @@ dependencies = [ [[package]] name = "nyx-chain-watcher" -version = "0.1.4" +version = "0.1.5" dependencies = [ "anyhow", "axum 0.7.7", diff --git a/nyx-chain-watcher/Cargo.toml b/nyx-chain-watcher/Cargo.toml index 05c43ae147..3b671ad14e 100644 --- a/nyx-chain-watcher/Cargo.toml +++ b/nyx-chain-watcher/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "nyx-chain-watcher" -version = "0.1.4" +version = "0.1.5" authors.workspace = true repository.workspace = true homepage.workspace = true diff --git a/nyx-chain-watcher/src/db/models.rs b/nyx-chain-watcher/src/db/models.rs index 8ad20e97c0..9a433ee8ec 100644 --- a/nyx-chain-watcher/src/db/models.rs +++ b/nyx-chain-watcher/src/db/models.rs @@ -24,10 +24,10 @@ pub(crate) struct PriceRecord { #[derive(Serialize, Deserialize, Debug, ToSchema)] pub(crate) struct PriceHistory { pub(crate) timestamp: i64, - pub(crate) chf: f32, - pub(crate) usd: f32, - pub(crate) eur: f32, - pub(crate) btc: f32, + pub(crate) chf: f64, + pub(crate) usd: f64, + pub(crate) eur: f64, + pub(crate) btc: f64, } #[derive(Serialize, Deserialize, Debug, ToSchema)] diff --git a/nyx-chain-watcher/src/db/queries/price.rs b/nyx-chain-watcher/src/db/queries/price.rs index 7c69863744..f61ee09750 100644 --- a/nyx-chain-watcher/src/db/queries/price.rs +++ b/nyx-chain-watcher/src/db/queries/price.rs @@ -1,5 +1,7 @@ use crate::db::models::{PriceHistory, PriceRecord}; use crate::db::DbPool; +use chrono::Local; +use std::ops::Sub; pub(crate) async fn insert_nym_prices( pool: &DbPool, @@ -38,9 +40,47 @@ pub(crate) async fn get_latest_price(pool: &DbPool) -> anyhow::Result anyhow::Result { + // now less 1 day + let earliest_timestamp = Local::now().sub(chrono::Duration::days(1)).timestamp(); + + let result = sqlx::query!( + "SELECT timestamp, chf, usd, eur, btc FROM price_history WHERE timestamp >= $1;", + earliest_timestamp + ) + .fetch_all(pool) + .await?; + + let count = result.len() as f64; + + let mut price = PriceHistory { + timestamp: Local::now().timestamp(), + chf: 0f64, + usd: 0f64, + eur: 0f64, + btc: 0f64, + }; + + for p in &result { + price.chf += p.chf; + price.usd += p.usd; + price.eur += p.eur; + price.btc += p.btc; + } + + if count > 0f64 { + price.chf /= count; + price.usd /= count; + price.eur /= count; + price.btc /= count; + } + + Ok(price) +} diff --git a/nyx-chain-watcher/src/http/api/mixnodes.rs b/nyx-chain-watcher/src/http/api/mixnodes.rs deleted file mode 100644 index 0d8ffe881a..0000000000 --- a/nyx-chain-watcher/src/http/api/mixnodes.rs +++ /dev/null @@ -1,21 +0,0 @@ -use axum::{extract::State, Json, Router}; - -use crate::http::{error::HttpResult, state::AppState}; - -pub(crate) fn routes() -> Router { - Router::new().route("/", axum::routing::get(mixnodes)) -} - -#[utoipa::path( - tag = "Mixnodes", - get, - path = "/v1/mixnodes", - responses( - (status = 200, body = String) - ) -)] -async fn mixnodes(State(_state): State) -> HttpResult> { - Ok(Json( - serde_json::json!({"message": "😎 Nothing to see here, move along 😎"}), - )) -} diff --git a/nyx-chain-watcher/src/http/api/mod.rs b/nyx-chain-watcher/src/http/api/mod.rs index 4ca198a729..145f07c9ee 100644 --- a/nyx-chain-watcher/src/http/api/mod.rs +++ b/nyx-chain-watcher/src/http/api/mod.rs @@ -7,7 +7,6 @@ use utoipa_swagger_ui::SwaggerUi; use crate::http::{api_docs, server::HttpServer, state::AppState}; -pub(crate) mod mixnodes; pub(crate) mod price; pub(crate) struct RouterBuilder { @@ -25,13 +24,7 @@ impl RouterBuilder { "/", axum::routing::get(|| async { Redirect::permanent("/swagger") }), ) - .nest( - "/v1", - Router::new() - //.nest("/jokes", jokes::routes()) - .nest("/mixnodes", mixnodes::routes()) - .nest("/price", price::routes()), - ); + .nest("/v1", Router::new().nest("/price", price::routes())); Self { unfinished_router: router, diff --git a/nyx-chain-watcher/src/http/api/price.rs b/nyx-chain-watcher/src/http/api/price.rs index 354d3cbfa7..1971f35f7d 100644 --- a/nyx-chain-watcher/src/http/api/price.rs +++ b/nyx-chain-watcher/src/http/api/price.rs @@ -1,23 +1,24 @@ use crate::db::models::PriceHistory; -use crate::db::queries::price::get_latest_price; +use crate::db::queries::price::{get_average_price, get_latest_price}; use crate::http::error::Error; use crate::http::error::HttpResult; use crate::http::state::AppState; use axum::{extract::State, Json, Router}; pub(crate) fn routes() -> Router { - Router::new().route("/", axum::routing::get(price)) + Router::new() + .route("/", axum::routing::get(price)) + .route("/average", axum::routing::get(average_price)) } #[utoipa::path( - tag = "Nym Price", + tag = "NYM Price", get, path = "/v1/price", responses( (status = 200, body = String) ) )] - /// Fetch the latest price cached by this API async fn price(State(state): State) -> HttpResult> { get_latest_price(state.db_pool()) @@ -25,3 +26,19 @@ async fn price(State(state): State) -> HttpResult> .map(Json::from) .map_err(|_| Error::internal()) } + +#[utoipa::path( + tag = "NYM Price", + get, + path = "/v1/price/average", + responses( + (status = 200, body = String) + ) +)] +/// Fetch the average price cached by this API +async fn average_price(State(state): State) -> HttpResult> { + get_average_price(state.db_pool()) + .await + .map(Json::from) + .map_err(|_| Error::internal()) +} From a507ffe3714f37e1b003d500e9c98ee6b2595176 Mon Sep 17 00:00:00 2001 From: Sachin Kamath Date: Thu, 12 Dec 2024 01:09:18 +0530 Subject: [PATCH 52/64] chain-scraper : use tx module for parsing transactions --- Cargo.lock | 1 + common/nyxd-scraper/src/lib.rs | 1 + common/nyxd-scraper/src/storage/manager.rs | 52 ---- common/nyxd-scraper/src/storage/mod.rs | 12 - nyx-chain-watcher/Cargo.toml | 1 + .../003_create_transactions_table.sql | 12 + nyx-chain-watcher/src/chain_scraper/mod.rs | 141 +++++++++- nyx-chain-watcher/src/cli/commands/run/mod.rs | 15 +- nyx-chain-watcher/src/payment_listener/mod.rs | 248 ++++++------------ 9 files changed, 233 insertions(+), 250 deletions(-) create mode 100644 nyx-chain-watcher/migrations/003_create_transactions_table.sql diff --git a/Cargo.lock b/Cargo.lock index 1df5d34b7d..7bddfe9c66 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6841,6 +6841,7 @@ name = "nyx-chain-watcher" version = "0.1.5" dependencies = [ "anyhow", + "async-trait", "axum 0.7.7", "chrono", "clap 4.5.20", diff --git a/common/nyxd-scraper/src/lib.rs b/common/nyxd-scraper/src/lib.rs index 806de871dc..7d22921a3e 100644 --- a/common/nyxd-scraper/src/lib.rs +++ b/common/nyxd-scraper/src/lib.rs @@ -15,6 +15,7 @@ pub(crate) mod scraper; pub mod storage; pub use block_processor::pruning::{PruningOptions, PruningStrategy}; +pub use block_processor::types::ParsedTransactionResponse; pub use modules::{BlockModule, MsgModule, TxModule}; pub use scraper::{Config, NyxdScraper, StartingBlockOpts}; pub use storage::models; diff --git a/common/nyxd-scraper/src/storage/manager.rs b/common/nyxd-scraper/src/storage/manager.rs index 5475f5d355..fb40a065b8 100644 --- a/common/nyxd-scraper/src/storage/manager.rs +++ b/common/nyxd-scraper/src/storage/manager.rs @@ -237,58 +237,6 @@ impl StorageManager { Ok(-1) } } - - #[allow(dead_code)] - pub async fn get_transactions_after_height( - &self, - min_height: i64, - message_type: Option<&str>, - ) -> Result, sqlx::Error> { - match message_type { - Some(msg_type) => { - sqlx::query_as!( - TransactionWithBlock, - r#" - SELECT t.hash, t.height, t.memo, t.raw_log - FROM message m - JOIN "transaction" t ON m.transaction_hash = t.hash - JOIN block b ON t.height = b.height - WHERE t.height > ? - AND m.type = ? - ORDER BY t.height ASC - "#, - min_height, - msg_type - ) - .fetch_all(&self.connection_pool) - .await - } - None => { - sqlx::query_as!( - TransactionWithBlock, - r#" - SELECT t.hash, t.height, t.memo, t.raw_log - FROM message m - JOIN "transaction" t ON m.transaction_hash = t.hash - JOIN block b ON t.height = b.height - WHERE t.height > ? - ORDER BY t.height ASC - "#, - min_height - ) - .fetch_all(&self.connection_pool) - .await - } - } - } -} - -#[derive(Debug, sqlx::FromRow)] -pub struct TransactionWithBlock { - pub hash: String, - pub height: i64, - pub memo: Option, - pub raw_log: Option, } // make those generic over executor so that they could be performed over connection pool and a tx diff --git a/common/nyxd-scraper/src/storage/mod.rs b/common/nyxd-scraper/src/storage/mod.rs index 3b29438bbb..2095bde1e3 100644 --- a/common/nyxd-scraper/src/storage/mod.rs +++ b/common/nyxd-scraper/src/storage/mod.rs @@ -13,7 +13,6 @@ use crate::{ models::{CommitSignature, Validator}, }, }; -use manager::TransactionWithBlock; use sqlx::{ sqlite::{SqliteAutoVacuum, SqliteSynchronous}, types::time::OffsetDateTime, @@ -221,17 +220,6 @@ impl ScraperStorage { pub async fn get_pruned_height(&self) -> Result { Ok(self.manager.get_pruned_height().await?) } - - pub async fn get_transactions_after_height( - &self, - min_height: i64, - message_type: Option<&str>, - ) -> Result, ScraperError> { - Ok(self - .manager - .get_transactions_after_height(min_height, message_type) - .await?) - } } pub async fn persist_block( diff --git a/nyx-chain-watcher/Cargo.toml b/nyx-chain-watcher/Cargo.toml index 3b671ad14e..5c6fe60719 100644 --- a/nyx-chain-watcher/Cargo.toml +++ b/nyx-chain-watcher/Cargo.toml @@ -15,6 +15,7 @@ readme.workspace = true [dependencies] anyhow = { workspace = true } +async-trait.workspace = true axum = { workspace = true, features = ["tokio"] } chrono = { workspace = true } clap = { workspace = true, features = ["cargo", "derive", "env"] } diff --git a/nyx-chain-watcher/migrations/003_create_transactions_table.sql b/nyx-chain-watcher/migrations/003_create_transactions_table.sql new file mode 100644 index 0000000000..e707ddaf85 --- /dev/null +++ b/nyx-chain-watcher/migrations/003_create_transactions_table.sql @@ -0,0 +1,12 @@ +CREATE TABLE IF NOT EXISTS transactions ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + tx_hash TEXT NOT NULL, + height INTEGER NOT NULL, + message_index INTEGER NOT NULL, + sender TEXT NOT NULL, + recipient TEXT NOT NULL, + amount TEXT NOT NULL, + memo TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + UNIQUE(tx_hash, message_index) +); \ No newline at end of file diff --git a/nyx-chain-watcher/src/chain_scraper/mod.rs b/nyx-chain-watcher/src/chain_scraper/mod.rs index 701dd467dc..2835a8d105 100644 --- a/nyx-chain-watcher/src/chain_scraper/mod.rs +++ b/nyx-chain-watcher/src/chain_scraper/mod.rs @@ -1,13 +1,20 @@ +use crate::config::PaymentWatcherConfig; use crate::env::vars::{ NYXD_SCRAPER_START_HEIGHT, NYXD_SCRAPER_UNSAFE_NUKE_DB, NYXD_SCRAPER_USE_BEST_EFFORT_START_HEIGHT, }; -use nyxd_scraper::{NyxdScraper, PruningOptions}; +use async_trait::async_trait; +use nyxd_scraper::{ + error::ScraperError, storage::StorageTransaction, NyxdScraper, ParsedTransactionResponse, + PruningOptions, TxModule, +}; +use sqlx::SqlitePool; use std::fs; use tracing::{info, warn}; pub(crate) async fn run_chain_scraper( config: &crate::config::Config, + db_pool: SqlitePool, ) -> anyhow::Result { let websocket_url = std::env::var("NYXD_WS").expect("NYXD_WS not defined"); @@ -40,6 +47,10 @@ pub(crate) async fn run_chain_scraper( fs::remove_file(config.chain_scraper_database_path())?; } + if config.payment_watcher_config.is_none() { + anyhow::bail!("No payment watcher config found, not running chain scraper"); + } + let scraper = NyxdScraper::builder(nyxd_scraper::Config { websocket_url, rpc_url, @@ -50,7 +61,11 @@ pub(crate) async fn run_chain_scraper( start_block_height, use_best_effort_start_height, }, - }); + }) + .with_tx_module(EventScraperModule::new( + db_pool, + config.payment_watcher_config.clone().unwrap_or_default(), + )); let instance = scraper.build_and_start().await?; @@ -59,3 +74,125 @@ pub(crate) async fn run_chain_scraper( Ok(instance) } + +pub struct EventScraperModule { + db_pool: SqlitePool, + payment_config: PaymentWatcherConfig, +} + +impl EventScraperModule { + pub fn new(db_pool: SqlitePool, payment_config: PaymentWatcherConfig) -> Self { + Self { + db_pool, + payment_config, + } + } + + #[allow(clippy::too_many_arguments)] + async fn store_transfer_event( + &self, + tx_hash: &str, + height: i64, + message_index: i64, + sender: String, + recipient: String, + amount: String, + memo: Option, + ) -> Result<(), sqlx::Error> { + sqlx::query!( + r#" + INSERT INTO transactions (tx_hash, height, message_index, sender, recipient, amount, memo) + VALUES (?, ?, ?, ?, ?, ?, ?) + "#, + tx_hash, + height, + message_index, + sender, + recipient, + amount, + memo + ) + .execute(&self.db_pool) + .await?; + + Ok(()) + } +} + +#[async_trait] +impl TxModule for EventScraperModule { + async fn handle_tx( + &mut self, + tx: &ParsedTransactionResponse, + _: &mut StorageTransaction, + ) -> Result<(), ScraperError> { + let events = &tx.tx_result.events; + let height = tx.height.value() as i64; + let tx_hash = tx.hash.to_string(); + let memo = tx.tx.body.memo.clone(); + + // Don't process failed transactions + if !tx.tx_result.code.is_ok() { + return Ok(()); + } + + // Process each event + for event in events { + // Only process transfer events + if event.kind == "transfer" { + let mut recipient = None; + let mut sender = None; + let mut amount = None; + // TODO: get message index from event + let message_index = 0; + + // Extract transfer event attributes + for attr in &event.attributes { + if let (Ok(key), Ok(value)) = (attr.key_str(), attr.value_str()) { + match key { + "recipient" => recipient = Some(value.to_string()), + "sender" => sender = Some(value.to_string()), + "amount" => amount = Some(value.to_string()), + _ => continue, + } + } + } + + // If we have all required fields, check if recipient is watched and store + if let (Some(recipient), Some(sender), Some(amount)) = (recipient, sender, amount) { + // Check if any watcher is watching this recipient + let is_watched = self.payment_config.watchers.iter().any(|watcher| { + if let Some(watched_accounts) = + &watcher.watch_for_transfer_recipient_accounts + { + watched_accounts + .iter() + .any(|account| account.to_string() == recipient) + } else { + false + } + }); + + if is_watched { + if let Err(e) = self + .store_transfer_event( + &tx_hash, + height, + message_index, + sender, + recipient, + amount, + Some(memo.clone()), + ) + .await + { + warn!("Failed to store transfer event: {}", e); + } + } + } + } + } + + Ok(()) + } +} diff --git a/nyx-chain-watcher/src/cli/commands/run/mod.rs b/nyx-chain-watcher/src/cli/commands/run/mod.rs index 96344a4014..ef2a553995 100644 --- a/nyx-chain-watcher/src/cli/commands/run/mod.rs +++ b/nyx-chain-watcher/src/cli/commands/run/mod.rs @@ -47,18 +47,15 @@ pub(crate) async fn execute(args: Args, http_port: u16) -> Result<(), NyxChainWa // Spawn the payment listener task let payment_listener_handle = tokio::spawn({ - let obs_pool = watcher_pool.clone(); - let chain_scraper = run_chain_scraper(&config).await?; - let chain_storage = chain_scraper.storage(); + let price_scraper_pool = storage.pool_owned().await; + let scraper_pool = storage.pool_owned().await; + run_chain_scraper(&config, scraper_pool).await?; let payment_watcher_config = config.payment_watcher_config.unwrap_or_default(); async move { - if let Err(e) = payment_listener::run_payment_listener( - payment_watcher_config, - obs_pool, - chain_storage, - ) - .await + if let Err(e) = + payment_listener::run_payment_listener(payment_watcher_config, price_scraper_pool) + .await { error!("Payment listener error: {}", e); } diff --git a/nyx-chain-watcher/src/payment_listener/mod.rs b/nyx-chain-watcher/src/payment_listener/mod.rs index 30a7765589..826f23dfd8 100644 --- a/nyx-chain-watcher/src/payment_listener/mod.rs +++ b/nyx-chain-watcher/src/payment_listener/mod.rs @@ -2,33 +2,19 @@ use crate::config::payments_watcher::HttpAuthenticationOptions; use crate::config::PaymentWatcherConfig; use crate::db::queries; use crate::models::WebhookPayload; -use nym_validator_client::nyxd::{AccountId, Coin}; -use nyxd_scraper::storage::ScraperStorage; +use nym_validator_client::nyxd::Coin; use reqwest::Client; -use rocket::form::validate::Contains; -use serde_json::Value; use sqlx::SqlitePool; use std::str::FromStr; use tokio::time::{self, Duration}; -use tracing::{error, info, trace}; - -#[derive(Debug)] -struct TransferEvent { - recipient: AccountId, - sender: AccountId, - amount: String, - message_index: u64, -} +use tracing::{error, info}; pub(crate) async fn run_payment_listener( payment_watcher_config: PaymentWatcherConfig, watcher_pool: SqlitePool, - chain_storage: ScraperStorage, ) -> anyhow::Result<()> { let client = Client::new(); - let default_message_types = vec!["/cosmos.bank.v1beta1.MsgSend".to_string()]; - loop { // 1. get the last height this watcher ran at let last_checked_height = queries::payments::get_last_checked_height(&watcher_pool).await?; @@ -36,107 +22,82 @@ pub(crate) async fn run_payment_listener( // 2. iterate through watchers for watcher in &payment_watcher_config.watchers { - let watch_for_chain_message_types = watcher - .watch_for_chain_message_types - .as_ref() - .unwrap_or(&default_message_types); + if watcher.watch_for_transfer_recipient_accounts.is_some() { + // 3. Query new transactions for this watcher's recipient accounts + let transactions = sqlx::query!( + r#" + SELECT * FROM transactions + WHERE height > ? + ORDER BY height ASC, message_index ASC + "#, + last_checked_height + ) + .fetch_all(&watcher_pool) + .await?; + + if !transactions.is_empty() { + info!( + "[watcher = {}] Processing {} transactions", + watcher.id, + transactions.len() + ); + } - // 3. build up transactions that match the message types we are looking for - let mut transactions = vec![]; - for message_type in watch_for_chain_message_types { - match chain_storage - .get_transactions_after_height( - last_checked_height, - Some(message_type), + for tx in transactions { + let funds = Coin::from_str(&tx.amount)?; + let amount: f64 = funds.amount as f64 / 1e6f64; // convert to major value, there will be precision loss + + // Store transaction hash for later use + let tx_hash = tx.tx_hash.clone(); + let message_index = tx.message_index; + + queries::payments::insert_payment( + &watcher_pool, + tx.tx_hash, + tx.sender.clone(), + tx.recipient.clone(), + amount, + tx.height, + tx.memo.clone(), ) - .await { - Ok(txs) => { - for t in txs { - transactions.push(t); + .await?; + + let webhook_data = WebhookPayload { + transaction_hash: tx_hash.clone(), + message_index: message_index as u64, + sender_address: tx.sender, + receiver_address: tx.recipient, + funds: funds.into(), + height: tx.height as u128, + memo: tx.memo, + }; + + let mut request_builder = client.post(&watcher.webhook_url).json(&webhook_data); + + if let Some(auth) = &watcher.authentication { + match auth { + HttpAuthenticationOptions::AuthorizationBearerToken { token } => { + request_builder = request_builder.bearer_auth(token); + } } } - Err(e) => error!("Failed to get transactions (message_type = {message_type}) from scraper database: {e}") - } - } - - for tx in transactions { - if let Some(raw_log) = tx.raw_log.as_deref() { - if let Some(watch_for_transfer_recipient_accounts) = - &watcher.watch_for_transfer_recipient_accounts - { - // 4. match recipient accounts we are looking for - match parse_transfer_from_raw_log( - raw_log, - watch_for_transfer_recipient_accounts, - ) { - Ok(transfer_events) => { - if !transfer_events.is_empty() { - info!( - "[watcher = {}] Processing transaction: {} - {} payment events found", - watcher.id, tx.hash, transfer_events.len() - ); - } - - for transfer in transfer_events { - let funds = Coin::from_str(&transfer.amount)?; - let amount: f64 = funds.amount as f64 / 1e6f64; // convert to major value, there will be precision loss - - queries::payments::insert_payment( - &watcher_pool, - tx.hash.clone(), - transfer.sender.clone().to_string(), - transfer.recipient.clone().to_string(), - amount, - tx.height, - tx.memo.clone(), - ) - .await?; - let webhook_data = WebhookPayload { - transaction_hash: tx.hash.clone(), - message_index: transfer.message_index, - sender_address: transfer.sender.to_string(), - receiver_address: transfer.recipient.to_string(), - funds: funds.into(), - height: tx.height as u128, - memo: tx.memo.clone(), - }; - - let mut request_builder = - client.post(&watcher.webhook_url).json(&webhook_data); - - if let Some(auth) = &watcher.authentication { - match auth { - HttpAuthenticationOptions::AuthorizationBearerToken { token } => { - request_builder = request_builder.bearer_auth(token); - } - } - } - - match request_builder.send().await { - Ok(res) => info!( - "[watcher = {}] ✅ Webhook {} {} - tx {}, index {}", - watcher.id, - res.status(), - res.url(), - tx.hash, - transfer.message_index, - ), - Err(e) => error!( - "[watcher = {}] ❌ Webhook {:?} {:?} error = {}", - watcher.id, - e.status(), - e.url(), - e, - ), - } - } - } - Err(e) => error!( - "[watcher = {}] ❌ Parse logs for tx {} failed, error = {}", - watcher.id, tx.hash, e, - ), - } + match request_builder.send().await { + Ok(res) => info!( + "[watcher = {}] ✅ Webhook {} {} - tx {}, index {}", + watcher.id, + res.status(), + res.url(), + tx_hash, + message_index, + ), + Err(e) => error!( + "[watcher = {}] ❌ Webhook {:?} {:?} error = {}", + watcher.id, + e.status(), + e.url(), + e, + ), } } } @@ -145,66 +106,3 @@ pub(crate) async fn run_payment_listener( time::sleep(Duration::from_secs(10)).await; } } - -fn parse_transfer_from_raw_log( - raw_log: &str, - watch_for_transfer_recipient_accounts: &Vec, -) -> anyhow::Result> { - let log_value: Value = serde_json::from_str(raw_log)?; - - let mut transfers: Vec = vec![]; - - let default_value = vec![]; - let log_entries: &Vec = log_value.as_array().unwrap_or(&default_value); - - trace!("contains {} log entries", log_entries.len()); - - for log_entry in log_entries { - let message_index = log_entry["msg_index"].as_u64().unwrap_or_default(); - - trace!("entry - {message_index}..."); - - if let Some(events) = log_entry["events"].as_array() { - for transfer_event in events.iter().filter(|e| e["type"] == "transfer") { - if let Some(attrs) = transfer_event["attributes"].as_array() { - let mut recipient: Option = None; - let mut sender: Option = None; - let mut amount: Option = None; - - for attr in attrs { - match attr["key"].as_str() { - Some("recipient") => { - recipient = - AccountId::from_str(attr["value"].as_str().unwrap_or("")).ok(); - } - Some("sender") => { - sender = - AccountId::from_str(attr["value"].as_str().unwrap_or("")).ok(); - } - Some("amount") => { - amount = Some(attr["value"].as_str().unwrap_or("").to_string()) - } - // TODO: parse message index - _ => continue, - } - } - - if let (Some(recipient), Some(sender), Some(amount)) = - (recipient, sender, amount) - { - if watch_for_transfer_recipient_accounts.contains(&recipient) { - transfers.push(TransferEvent { - recipient, - sender, - amount, - message_index, - }); - } - } - } - } - } - } - - Ok(transfers) -} From 7b92e471c8eecf44b6cd374a6be16be15a52f84e Mon Sep 17 00:00:00 2001 From: Sachin Kamath Date: Thu, 12 Dec 2024 01:38:15 +0530 Subject: [PATCH 53/64] bugfix: dont manually set last_processed_height for pruning=nothing strat. --- common/nyxd-scraper/src/block_processor/mod.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/common/nyxd-scraper/src/block_processor/mod.rs b/common/nyxd-scraper/src/block_processor/mod.rs index 60b21e19a0..e75d62b7df 100644 --- a/common/nyxd-scraper/src/block_processor/mod.rs +++ b/common/nyxd-scraper/src/block_processor/mod.rs @@ -414,7 +414,10 @@ impl BlockProcessor { // make sure we don't request blocks we'd have to prune anyway let keep_recent = self.config.pruning_options.strategy_keep_recent(); let last_to_keep = latest_block - keep_recent; - self.last_processed_height = max(self.last_processed_height, last_to_keep); + + if !self.config.pruning_options.strategy.is_nothing() { + self.last_processed_height = max(self.last_processed_height, last_to_keep); + } let request_range = self.last_processed_height + 1..latest_block + 1; info!( From 94ab78606a8fc1d1f8d56b843864bcba3c4881ee Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Wed, 11 Dec 2024 21:32:35 +0000 Subject: [PATCH 54/64] Bump version --- Cargo.lock | 2 +- nyx-chain-watcher/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7bddfe9c66..0036b717b5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6838,7 +6838,7 @@ dependencies = [ [[package]] name = "nyx-chain-watcher" -version = "0.1.5" +version = "0.1.6" dependencies = [ "anyhow", "async-trait", diff --git a/nyx-chain-watcher/Cargo.toml b/nyx-chain-watcher/Cargo.toml index 5c6fe60719..57b4ca4675 100644 --- a/nyx-chain-watcher/Cargo.toml +++ b/nyx-chain-watcher/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "nyx-chain-watcher" -version = "0.1.5" +version = "0.1.6" authors.workspace = true repository.workspace = true homepage.workspace = true From 4f07343efdec27c97b2a3b5ee083aa84f3026022 Mon Sep 17 00:00:00 2001 From: Sachin Kamath Date: Tue, 17 Dec 2024 15:05:24 +0530 Subject: [PATCH 55/64] api: fetch addresses from config. --- nyx-chain-watcher/src/http/api/mod.rs | 4 +- nyx-chain-watcher/src/http/api/server.rs | 91 ----------------------- nyx-chain-watcher/src/http/api/watcher.rs | 44 +++++++++++ 3 files changed, 47 insertions(+), 92 deletions(-) delete mode 100644 nyx-chain-watcher/src/http/api/server.rs create mode 100644 nyx-chain-watcher/src/http/api/watcher.rs diff --git a/nyx-chain-watcher/src/http/api/mod.rs b/nyx-chain-watcher/src/http/api/mod.rs index 145f07c9ee..9e405a439d 100644 --- a/nyx-chain-watcher/src/http/api/mod.rs +++ b/nyx-chain-watcher/src/http/api/mod.rs @@ -8,6 +8,7 @@ use utoipa_swagger_ui::SwaggerUi; use crate::http::{api_docs, server::HttpServer, state::AppState}; pub(crate) mod price; +pub(crate) mod watcher; pub(crate) struct RouterBuilder { unfinished_router: Router, @@ -24,7 +25,8 @@ impl RouterBuilder { "/", axum::routing::get(|| async { Redirect::permanent("/swagger") }), ) - .nest("/v1", Router::new().nest("/price", price::routes())); + .nest("/v1", Router::new().nest("/price", price::routes())) + .nest("/v1", Router::new().nest("/watcher", watcher::routes())); Self { unfinished_router: router, diff --git a/nyx-chain-watcher/src/http/api/server.rs b/nyx-chain-watcher/src/http/api/server.rs deleted file mode 100644 index 92ac268eae..0000000000 --- a/nyx-chain-watcher/src/http/api/server.rs +++ /dev/null @@ -1,91 +0,0 @@ -use axum::Router; -use core::net::SocketAddr; -use tokio::{net::TcpListener, task::JoinHandle}; -use tokio_util::sync::{CancellationToken, WaitForCancellationFutureOwned}; - -use crate::{ - db::DbPool, - http::{api::RouterBuilder, state::AppState}, -}; - -/// Return handles that allow for graceful shutdown of server + awaiting its -/// background tokio task -pub(crate) async fn start_http_api( - db_pool: DbPool, - http_port: u16, - nym_http_cache_ttl: u64, -) -> anyhow::Result { - let router_builder = RouterBuilder::with_default_routes(); - - let state = AppState::new(db_pool, nym_http_cache_ttl); - let router = router_builder.with_state(state); - - let bind_addr = format!("0.0.0.0:{}", http_port); - let server = router.build_server(bind_addr).await?; - - Ok(start_server(server)) -} - -fn start_server(server: HttpServer) -> ShutdownHandles { - // one copy is stored to trigger a graceful shutdown later - let shutdown_button = CancellationToken::new(); - // other copy is given to server to listen for a shutdown - let shutdown_receiver = shutdown_button.clone(); - let shutdown_receiver = shutdown_receiver.cancelled_owned(); - - let server_handle = tokio::spawn(async move { server.run(shutdown_receiver).await }); - - ShutdownHandles { - server_handle, - shutdown_button, - } -} - -pub(crate) struct ShutdownHandles { - server_handle: JoinHandle>, - shutdown_button: CancellationToken, -} - -impl ShutdownHandles { - /// Send graceful shutdown signal to server and wait for server task to complete - pub(crate) async fn shutdown(self) -> anyhow::Result<()> { - self.shutdown_button.cancel(); - - match self.server_handle.await { - Ok(Ok(_)) => { - tracing::info!("HTTP server shut down without errors"); - } - Ok(Err(err)) => { - tracing::error!("HTTP server terminated with: {err}"); - anyhow::bail!(err) - } - Err(err) => { - tracing::error!("Server task panicked: {err}"); - } - }; - - Ok(()) - } -} - -pub(crate) struct HttpServer { - router: Router, - listener: TcpListener, -} - -impl HttpServer { - pub(crate) fn new(router: Router, listener: TcpListener) -> Self { - Self { router, listener } - } - - pub(crate) async fn run(self, receiver: WaitForCancellationFutureOwned) -> std::io::Result<()> { - // into_make_service_with_connect_info allows us to see client ip address - axum::serve( - self.listener, - self.router - .into_make_service_with_connect_info::(), - ) - .with_graceful_shutdown(receiver) - .await - } -} diff --git a/nyx-chain-watcher/src/http/api/watcher.rs b/nyx-chain-watcher/src/http/api/watcher.rs new file mode 100644 index 0000000000..07c2d9c422 --- /dev/null +++ b/nyx-chain-watcher/src/http/api/watcher.rs @@ -0,0 +1,44 @@ +use crate::config::Config; +use crate::http::error::Error; +use crate::http::error::HttpResult; +use crate::http::state::AppState; +use axum::{Json, Router}; + +pub(crate) fn routes() -> Router { + Router::new().route("/addresses", axum::routing::get(get_addresses)) +} + +#[utoipa::path( + tag = "Watcher Configuration", + get, + path = "/v1/watcher/addresses", + responses( + (status = 200, body = Vec) + ) +)] + +/// Fetch the addresses being watched by the chain watcher +async fn get_addresses() -> HttpResult>> { + let config = + Config::read_from_toml_file_in_default_location().map_err(|_| Error::internal())?; + + let addresses = config + .payment_watcher_config + .as_ref() + .and_then(|config| { + config.watchers.iter().find_map(|watcher| { + watcher + .watch_for_transfer_recipient_accounts + .as_ref() + .map(|accounts| { + accounts + .iter() + .map(|account| account.to_string()) + .collect::>() + }) + }) + }) + .unwrap_or_default(); + + Ok(Json(addresses)) +} From f5ca1ee20ac808255f18b6468f8b9f59c561704f Mon Sep 17 00:00:00 2001 From: Mark Sinclair Date: Tue, 17 Dec 2024 15:03:49 +0000 Subject: [PATCH 56/64] Bump version --- Cargo.lock | 2 +- nyx-chain-watcher/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0036b717b5..a60922da0b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6838,7 +6838,7 @@ dependencies = [ [[package]] name = "nyx-chain-watcher" -version = "0.1.6" +version = "0.1.7" dependencies = [ "anyhow", "async-trait", diff --git a/nyx-chain-watcher/Cargo.toml b/nyx-chain-watcher/Cargo.toml index 57b4ca4675..1385bbaddb 100644 --- a/nyx-chain-watcher/Cargo.toml +++ b/nyx-chain-watcher/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "nyx-chain-watcher" -version = "0.1.6" +version = "0.1.7" authors.workspace = true repository.workspace = true homepage.workspace = true From c805aa79a4f0e371489d7a039940a0d86649aebf Mon Sep 17 00:00:00 2001 From: Sachin Kamath Date: Thu, 19 Dec 2024 08:56:13 +0530 Subject: [PATCH 57/64] nyx-chain-watcher: fallback to env variable when reading config --- nyx-chain-watcher/src/chain_scraper/mod.rs | 4 -- nyx-chain-watcher/src/http/api/watcher.rs | 44 ++++++++++++---------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/nyx-chain-watcher/src/chain_scraper/mod.rs b/nyx-chain-watcher/src/chain_scraper/mod.rs index 2835a8d105..93f768a236 100644 --- a/nyx-chain-watcher/src/chain_scraper/mod.rs +++ b/nyx-chain-watcher/src/chain_scraper/mod.rs @@ -47,10 +47,6 @@ pub(crate) async fn run_chain_scraper( fs::remove_file(config.chain_scraper_database_path())?; } - if config.payment_watcher_config.is_none() { - anyhow::bail!("No payment watcher config found, not running chain scraper"); - } - let scraper = NyxdScraper::builder(nyxd_scraper::Config { websocket_url, rpc_url, diff --git a/nyx-chain-watcher/src/http/api/watcher.rs b/nyx-chain-watcher/src/http/api/watcher.rs index 07c2d9c422..68e6f4f91c 100644 --- a/nyx-chain-watcher/src/http/api/watcher.rs +++ b/nyx-chain-watcher/src/http/api/watcher.rs @@ -1,8 +1,9 @@ use crate::config::Config; -use crate::http::error::Error; +use crate::env; use crate::http::error::HttpResult; use crate::http::state::AppState; use axum::{Json, Router}; +use std::env::var; pub(crate) fn routes() -> Router { Router::new().route("/addresses", axum::routing::get(get_addresses)) @@ -19,26 +20,29 @@ pub(crate) fn routes() -> Router { /// Fetch the addresses being watched by the chain watcher async fn get_addresses() -> HttpResult>> { - let config = - Config::read_from_toml_file_in_default_location().map_err(|_| Error::internal())?; - - let addresses = config - .payment_watcher_config - .as_ref() - .and_then(|config| { - config.watchers.iter().find_map(|watcher| { - watcher - .watch_for_transfer_recipient_accounts - .as_ref() - .map(|accounts| { - accounts - .iter() - .map(|account| account.to_string()) - .collect::>() - }) + let addresses = match Config::read_from_toml_file_in_default_location() { + Ok(config) => config + .payment_watcher_config + .as_ref() + .and_then(|config| { + config.watchers.iter().find_map(|watcher| { + watcher + .watch_for_transfer_recipient_accounts + .as_ref() + .map(|accounts| { + accounts + .iter() + .map(|account| account.to_string()) + .collect::>() + }) + }) }) - }) - .unwrap_or_default(); + .unwrap_or_default(), + // If the config file doesn't exist, fall back to env variable + Err(_) => var(env::vars::NYX_CHAIN_WATCHER_WATCH_ACCOUNTS) + .map(|accounts| accounts.split(',').map(String::from).collect()) + .unwrap_or_default(), + }; Ok(Json(addresses)) } From 6e66cc2467d392bc5946682721d47164686d91e4 Mon Sep 17 00:00:00 2001 From: Sachin Kamath Date: Thu, 19 Dec 2024 09:07:32 +0530 Subject: [PATCH 58/64] validator-rewarder: fix errors --- nym-validator-rewarder/src/config/mod.rs | 7 +++++-- .../src/rewarder/block_signing/mod.rs | 16 ++++++++++------ nym-validator-rewarder/src/rewarder/mod.rs | 4 ++-- 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/nym-validator-rewarder/src/config/mod.rs b/nym-validator-rewarder/src/config/mod.rs index 31c44b07e7..59e43c2a02 100644 --- a/nym-validator-rewarder/src/config/mod.rs +++ b/nym-validator-rewarder/src/config/mod.rs @@ -12,7 +12,7 @@ use nym_config::{ DEFAULT_CONFIG_DIR, DEFAULT_CONFIG_FILENAME, DEFAULT_DATA_DIR, NYM_DIR, }; use nym_validator_client::nyxd::{AccountId, Coin}; -use nyxd_scraper::PruningOptions; +use nyxd_scraper::{PruningOptions, StartingBlockOpts}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, DisplayFromStr}; use std::io; @@ -129,7 +129,10 @@ impl Config { database_path: self.storage_paths.nyxd_scraper.clone(), pruning_options: self.nyxd_scraper.pruning, store_precommits: self.nyxd_scraper.store_precommits, - start_block_height: None, + start_block: StartingBlockOpts { + start_block_height: None, + use_best_effort_start_height: true, + }, } } diff --git a/nym-validator-rewarder/src/rewarder/block_signing/mod.rs b/nym-validator-rewarder/src/rewarder/block_signing/mod.rs index 19b149ce25..5a32069a04 100644 --- a/nym-validator-rewarder/src/rewarder/block_signing/mod.rs +++ b/nym-validator-rewarder/src/rewarder/block_signing/mod.rs @@ -31,7 +31,7 @@ impl EpochSigning { trace!("attempting to get pre-commit for {address} at height {height}"); if let Some(precommit) = self .nyxd_scraper - .storage + .storage() .get_precommit(address, height) .await? { @@ -102,7 +102,11 @@ impl EpochSigning { current_epoch.end_rfc3339() ); - let validators = self.nyxd_scraper.storage.get_all_known_validators().await?; + let validators = self + .nyxd_scraper + .storage() + .get_all_known_validators() + .await?; debug!("retrieved {} known validators", validators.len()); let epoch_start = current_epoch.start_time; @@ -110,7 +114,7 @@ impl EpochSigning { let Some(first_block) = self .nyxd_scraper - .storage + .storage() .get_first_block_height_after(epoch_start) .await? else { @@ -121,7 +125,7 @@ impl EpochSigning { let Some(last_block) = self .nyxd_scraper - .storage + .storage() .get_last_block_height_before(epoch_end) .await? else { @@ -168,7 +172,7 @@ impl EpochSigning { let signed = self .nyxd_scraper - .storage + .storage() .get_signed_between_times(&validator.consensus_address, epoch_start, epoch_end) .await?; signed_in_epoch.insert(validator, RawValidatorResult::new(signed, vp, whitelisted)); @@ -176,7 +180,7 @@ impl EpochSigning { let total = self .nyxd_scraper - .storage + .storage() .get_blocks_between(epoch_start, epoch_end) .await?; diff --git a/nym-validator-rewarder/src/rewarder/mod.rs b/nym-validator-rewarder/src/rewarder/mod.rs index b423c03dea..8766c924a2 100644 --- a/nym-validator-rewarder/src/rewarder/mod.rs +++ b/nym-validator-rewarder/src/rewarder/mod.rs @@ -439,7 +439,7 @@ impl Rewarder { if let Some(epoch_signing) = &self.epoch_signing { if epoch_signing .nyxd_scraper - .storage + .storage() .get_first_block_height_after(epoch_start) .await? .is_none() @@ -451,7 +451,7 @@ impl Rewarder { if epoch_signing .nyxd_scraper - .storage + .storage() .get_last_block_height_before(epoch_end) .await? .is_none() From bad85abff318be752ae93e27c985d3a6fd435020 Mon Sep 17 00:00:00 2001 From: Sachin Kamath Date: Thu, 19 Dec 2024 09:13:32 +0530 Subject: [PATCH 59/64] chain-watcher: bump version --- Cargo.lock | 2 +- nyx-chain-watcher/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a60922da0b..6e8b6602d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6838,7 +6838,7 @@ dependencies = [ [[package]] name = "nyx-chain-watcher" -version = "0.1.7" +version = "0.1.8" dependencies = [ "anyhow", "async-trait", diff --git a/nyx-chain-watcher/Cargo.toml b/nyx-chain-watcher/Cargo.toml index 1385bbaddb..f5371c78c7 100644 --- a/nyx-chain-watcher/Cargo.toml +++ b/nyx-chain-watcher/Cargo.toml @@ -3,7 +3,7 @@ [package] name = "nyx-chain-watcher" -version = "0.1.7" +version = "0.1.8" authors.workspace = true repository.workspace = true homepage.workspace = true From 64373548e46060aaeb813c97321cbab3159b3dfe Mon Sep 17 00:00:00 2001 From: Sachin Kamath Date: Thu, 19 Dec 2024 20:14:06 +0530 Subject: [PATCH 60/64] chain-watcher: windows workaround for db path, add sqlx --- ...ad476160b26ea84486c563e731cfd757d7904.json | 12 ++++ ...95710f4119917e5f0bf3af4103a428515843b.json | 44 ++++++++++++ ...3f3707065e7e34e920c49f2d50bef7ab9a968.json | 12 ++++ ...ba380e7d5f0127f4745705b8ac9af6c170d19.json | 68 +++++++++++++++++++ ...a8f6d4ac14e7e9b7b482bd57fae568ebd96ba.json | 20 ++++++ ...612f219ae969e98b331af04381429fef8716c.json | 12 ++++ ...6fffe5631446fe66f17b8a86cbdca704a3517.json | 44 ++++++++++++ nyx-chain-watcher/build.rs | 13 ++-- 8 files changed, 221 insertions(+), 4 deletions(-) create mode 100644 nyx-chain-watcher/.sqlx/query-26aa8b5b2d6d98257792d4726b3ad476160b26ea84486c563e731cfd757d7904.json create mode 100644 nyx-chain-watcher/.sqlx/query-5931f7d30101a718f49487e9f7895710f4119917e5f0bf3af4103a428515843b.json create mode 100644 nyx-chain-watcher/.sqlx/query-5fcd0f525ab790a7ebe8659d6cf3f3707065e7e34e920c49f2d50bef7ab9a968.json create mode 100644 nyx-chain-watcher/.sqlx/query-7b9abf4ff422b8d7a942955dc4fba380e7d5f0127f4745705b8ac9af6c170d19.json create mode 100644 nyx-chain-watcher/.sqlx/query-83df8a7c5fb24ba4d89f1feb300a8f6d4ac14e7e9b7b482bd57fae568ebd96ba.json create mode 100644 nyx-chain-watcher/.sqlx/query-a9d6227ecfa9866096da1367518612f219ae969e98b331af04381429fef8716c.json create mode 100644 nyx-chain-watcher/.sqlx/query-b3ced10067918010e613f61f6fb6fffe5631446fe66f17b8a86cbdca704a3517.json diff --git a/nyx-chain-watcher/.sqlx/query-26aa8b5b2d6d98257792d4726b3ad476160b26ea84486c563e731cfd757d7904.json b/nyx-chain-watcher/.sqlx/query-26aa8b5b2d6d98257792d4726b3ad476160b26ea84486c563e731cfd757d7904.json new file mode 100644 index 0000000000..f900b0740e --- /dev/null +++ b/nyx-chain-watcher/.sqlx/query-26aa8b5b2d6d98257792d4726b3ad476160b26ea84486c563e731cfd757d7904.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "INSERT INTO price_history\n (timestamp, chf, usd, eur, btc)\n VALUES\n ($1, $2, $3, $4, $5)\n ON CONFLICT(timestamp) DO UPDATE SET\n chf=excluded.chf,\n usd=excluded.usd,\n eur=excluded.eur,\n btc=excluded.btc;", + "describe": { + "columns": [], + "parameters": { + "Right": 5 + }, + "nullable": [] + }, + "hash": "26aa8b5b2d6d98257792d4726b3ad476160b26ea84486c563e731cfd757d7904" +} diff --git a/nyx-chain-watcher/.sqlx/query-5931f7d30101a718f49487e9f7895710f4119917e5f0bf3af4103a428515843b.json b/nyx-chain-watcher/.sqlx/query-5931f7d30101a718f49487e9f7895710f4119917e5f0bf3af4103a428515843b.json new file mode 100644 index 0000000000..2c705f58e4 --- /dev/null +++ b/nyx-chain-watcher/.sqlx/query-5931f7d30101a718f49487e9f7895710f4119917e5f0bf3af4103a428515843b.json @@ -0,0 +1,44 @@ +{ + "db_name": "SQLite", + "query": "SELECT timestamp, chf, usd, eur, btc FROM price_history WHERE timestamp >= $1;", + "describe": { + "columns": [ + { + "name": "timestamp", + "ordinal": 0, + "type_info": "Int64" + }, + { + "name": "chf", + "ordinal": 1, + "type_info": "Float" + }, + { + "name": "usd", + "ordinal": 2, + "type_info": "Float" + }, + { + "name": "eur", + "ordinal": 3, + "type_info": "Float" + }, + { + "name": "btc", + "ordinal": 4, + "type_info": "Float" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + false, + false, + false, + false, + false + ] + }, + "hash": "5931f7d30101a718f49487e9f7895710f4119917e5f0bf3af4103a428515843b" +} diff --git a/nyx-chain-watcher/.sqlx/query-5fcd0f525ab790a7ebe8659d6cf3f3707065e7e34e920c49f2d50bef7ab9a968.json b/nyx-chain-watcher/.sqlx/query-5fcd0f525ab790a7ebe8659d6cf3f3707065e7e34e920c49f2d50bef7ab9a968.json new file mode 100644 index 0000000000..db7977c254 --- /dev/null +++ b/nyx-chain-watcher/.sqlx/query-5fcd0f525ab790a7ebe8659d6cf3f3707065e7e34e920c49f2d50bef7ab9a968.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n INSERT INTO transactions (tx_hash, height, message_index, sender, recipient, amount, memo)\n VALUES (?, ?, ?, ?, ?, ?, ?)\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 7 + }, + "nullable": [] + }, + "hash": "5fcd0f525ab790a7ebe8659d6cf3f3707065e7e34e920c49f2d50bef7ab9a968" +} diff --git a/nyx-chain-watcher/.sqlx/query-7b9abf4ff422b8d7a942955dc4fba380e7d5f0127f4745705b8ac9af6c170d19.json b/nyx-chain-watcher/.sqlx/query-7b9abf4ff422b8d7a942955dc4fba380e7d5f0127f4745705b8ac9af6c170d19.json new file mode 100644 index 0000000000..c873acca91 --- /dev/null +++ b/nyx-chain-watcher/.sqlx/query-7b9abf4ff422b8d7a942955dc4fba380e7d5f0127f4745705b8ac9af6c170d19.json @@ -0,0 +1,68 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT * FROM transactions\n WHERE height > ? \n ORDER BY height ASC, message_index ASC\n ", + "describe": { + "columns": [ + { + "name": "id", + "ordinal": 0, + "type_info": "Int64" + }, + { + "name": "tx_hash", + "ordinal": 1, + "type_info": "Text" + }, + { + "name": "height", + "ordinal": 2, + "type_info": "Int64" + }, + { + "name": "message_index", + "ordinal": 3, + "type_info": "Int64" + }, + { + "name": "sender", + "ordinal": 4, + "type_info": "Text" + }, + { + "name": "recipient", + "ordinal": 5, + "type_info": "Text" + }, + { + "name": "amount", + "ordinal": 6, + "type_info": "Text" + }, + { + "name": "memo", + "ordinal": 7, + "type_info": "Text" + }, + { + "name": "created_at", + "ordinal": 8, + "type_info": "Datetime" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false, + true, + true + ] + }, + "hash": "7b9abf4ff422b8d7a942955dc4fba380e7d5f0127f4745705b8ac9af6c170d19" +} diff --git a/nyx-chain-watcher/.sqlx/query-83df8a7c5fb24ba4d89f1feb300a8f6d4ac14e7e9b7b482bd57fae568ebd96ba.json b/nyx-chain-watcher/.sqlx/query-83df8a7c5fb24ba4d89f1feb300a8f6d4ac14e7e9b7b482bd57fae568ebd96ba.json new file mode 100644 index 0000000000..6af7f2a5bc --- /dev/null +++ b/nyx-chain-watcher/.sqlx/query-83df8a7c5fb24ba4d89f1feb300a8f6d4ac14e7e9b7b482bd57fae568ebd96ba.json @@ -0,0 +1,20 @@ +{ + "db_name": "SQLite", + "query": "SELECT MAX(height) FROM payments", + "describe": { + "columns": [ + { + "name": "MAX(height)", + "ordinal": 0, + "type_info": "Int64" + } + ], + "parameters": { + "Right": 0 + }, + "nullable": [ + true + ] + }, + "hash": "83df8a7c5fb24ba4d89f1feb300a8f6d4ac14e7e9b7b482bd57fae568ebd96ba" +} diff --git a/nyx-chain-watcher/.sqlx/query-a9d6227ecfa9866096da1367518612f219ae969e98b331af04381429fef8716c.json b/nyx-chain-watcher/.sqlx/query-a9d6227ecfa9866096da1367518612f219ae969e98b331af04381429fef8716c.json new file mode 100644 index 0000000000..73a8c0e7ef --- /dev/null +++ b/nyx-chain-watcher/.sqlx/query-a9d6227ecfa9866096da1367518612f219ae969e98b331af04381429fef8716c.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n INSERT INTO payments (\n transaction_hash, sender_address, receiver_address,\n amount, height, timestamp, memo\n ) VALUES (?, ?, ?, ?, ?, ?, ?)\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 7 + }, + "nullable": [] + }, + "hash": "a9d6227ecfa9866096da1367518612f219ae969e98b331af04381429fef8716c" +} diff --git a/nyx-chain-watcher/.sqlx/query-b3ced10067918010e613f61f6fb6fffe5631446fe66f17b8a86cbdca704a3517.json b/nyx-chain-watcher/.sqlx/query-b3ced10067918010e613f61f6fb6fffe5631446fe66f17b8a86cbdca704a3517.json new file mode 100644 index 0000000000..2bda55298c --- /dev/null +++ b/nyx-chain-watcher/.sqlx/query-b3ced10067918010e613f61f6fb6fffe5631446fe66f17b8a86cbdca704a3517.json @@ -0,0 +1,44 @@ +{ + "db_name": "SQLite", + "query": "SELECT timestamp, chf, usd, eur, btc FROM price_history ORDER BY timestamp DESC LIMIT 1;", + "describe": { + "columns": [ + { + "name": "timestamp", + "ordinal": 0, + "type_info": "Int64" + }, + { + "name": "chf", + "ordinal": 1, + "type_info": "Float" + }, + { + "name": "usd", + "ordinal": 2, + "type_info": "Float" + }, + { + "name": "eur", + "ordinal": 3, + "type_info": "Float" + }, + { + "name": "btc", + "ordinal": 4, + "type_info": "Float" + } + ], + "parameters": { + "Right": 0 + }, + "nullable": [ + false, + false, + false, + false, + false + ] + }, + "hash": "b3ced10067918010e613f61f6fb6fffe5631446fe66f17b8a86cbdca704a3517" +} diff --git a/nyx-chain-watcher/build.rs b/nyx-chain-watcher/build.rs index c1af059445..4abd950a85 100644 --- a/nyx-chain-watcher/build.rs +++ b/nyx-chain-watcher/build.rs @@ -14,8 +14,13 @@ async fn main() -> Result<()> { std::fs::create_dir_all(parent)?; } + // Platform specific database URL for SQLite connection + #[cfg(target_family = "unix")] let db_url = format!("sqlite:{}", db_path.display()); + #[cfg(target_family = "windows")] + let db_url = format!("sqlite:///{}", db_path.display()); + // Ensure database file is created with proper permissions let connect_options = SqliteConnectOptions::from_str(&db_url)? .create_if_missing(true) @@ -25,11 +30,12 @@ async fn main() -> Result<()> { // Create initial connection to ensure database exists let mut conn = SqliteConnection::connect_with(&connect_options).await?; + sqlx::migrate!("./migrations").run(&mut conn).await?; export_db_variables(&db_url)?; - println!("cargo:rustc-env=SQLX_OFFLINE=false"); - // Run migrations after ensuring database exists - sqlx::migrate!("./migrations").run(&mut conn).await?; + // Force SQLx to prepare all queries during build + println!("cargo:rustc-env=SQLX_OFFLINE=true"); + println!("cargo:rustc-env=DATABASE_URL={}", db_url); // Add rerun-if-changed directives println!("cargo:rerun-if-changed=migrations"); @@ -45,7 +51,6 @@ fn export_db_variables(db_url: &str) -> Result<()> { let mut file = File::create(".env")?; for (var, value) in map.iter() { - println!("cargo:rustc-env={}={}", var, value); writeln!(file, "{}={}", var, value)?; } From d0722e5f632c3f9fa2abd0c783c101c6423c006e Mon Sep 17 00:00:00 2001 From: Sachin Kamath Date: Thu, 19 Dec 2024 21:01:21 +0530 Subject: [PATCH 61/64] chain-watcher: try fix windows path --- nyx-chain-watcher/build.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/nyx-chain-watcher/build.rs b/nyx-chain-watcher/build.rs index 4abd950a85..5cfb413716 100644 --- a/nyx-chain-watcher/build.rs +++ b/nyx-chain-watcher/build.rs @@ -14,12 +14,8 @@ async fn main() -> Result<()> { std::fs::create_dir_all(parent)?; } - // Platform specific database URL for SQLite connection - #[cfg(target_family = "unix")] - let db_url = format!("sqlite:{}", db_path.display()); - - #[cfg(target_family = "windows")] - let db_url = format!("sqlite:///{}", db_path.display()); + let db_path_str = db_path.display().to_string().replace('\\', "/"); + let db_url = format!("sqlite:{}", db_path_str); // Ensure database file is created with proper permissions let connect_options = SqliteConnectOptions::from_str(&db_url)? From 4f283f565c652b3a64350112735dc39ba7791287 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20H=C3=A4ggblad?= Date: Fri, 20 Dec 2024 01:16:39 +0100 Subject: [PATCH 62/64] Add assignes for the root cargo ecosystem (#5297) --- .github/dependabot.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 37d85d98d0..839c34c714 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -31,3 +31,5 @@ updates: update-types: - "patch" open-pull-requests-limit: 10 + assignees: + - "octol" From 7d5e3ef7d3609441d63c81b978abae9146ada48a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C4=99drzej=20Stuczy=C5=84ski?= Date: Fri, 20 Dec 2024 10:32:56 +0000 Subject: [PATCH 63/64] feature: expand nym-node prometheus metrics (#5298) * fixed bearer auth for prometheus route * basic prometheus metrics * added rates on global values * improved structure on the prometheus metrics * added additional metrics for ingress websockets and egress mixnet connections * some channel business metrics * fixed metrics registration and added additional variants * added counter for number of disk persisted packets * counter for pending egress packets * counter for pending egress forward packets * clippy --- Cargo.lock | 4 +- common/client-libs/mixnet-client/Cargo.toml | 4 +- .../client-libs/mixnet-client/src/client.rs | 217 ++++++--- common/nonexhaustive-delayqueue/src/lib.rs | 8 + common/nym-metrics/Cargo.toml | 2 +- common/nym-metrics/src/lib.rs | 461 +++++++++++++++--- .../node/client_handling/active_clients.rs | 7 + .../client_handling/websocket/common_state.rs | 2 + .../client_handling/websocket/listener.rs | 8 +- gateway/src/node/mod.rs | 1 + nym-node/nym-node-metrics/src/lib.rs | 9 + nym-node/nym-node-metrics/src/mixnet.rs | 17 +- nym-node/nym-node-metrics/src/network.rs | 32 ++ nym-node/nym-node-metrics/src/process.rs | 57 +++ .../src/prometheus_wrapper.rs | 390 +++++++++++++++ nym-node/src/config/metrics.rs | 17 +- nym-node/src/config/mod.rs | 1 + nym-node/src/node/http/error.rs | 2 +- .../node/http/router/api/v1/metrics/mod.rs | 33 +- .../http/router/api/v1/metrics/prometheus.rs | 26 +- nym-node/src/node/http/router/mod.rs | 7 + nym-node/src/node/http/state/metrics.rs | 2 - nym-node/src/node/http/state/mod.rs | 12 +- nym-node/src/node/metrics/aggregator.rs | 2 +- nym-node/src/node/metrics/events_listener.rs | 2 +- .../node/metrics/handler/client_sessions.rs | 11 +- .../at_last_update.rs | 219 +++++++++ .../handler/global_prometheus_updater/mod.rs | 223 +++++++++ .../metrics/handler/legacy_packet_data.rs | 2 +- nym-node/src/node/metrics/handler/mod.rs | 19 +- .../handler/pending_egress_packets_updater.rs | 60 +++ .../handler/prometheus_events_handler.rs | 6 + nym-node/src/node/mixnet/handler.rs | 9 +- nym-node/src/node/mixnet/packet_forwarding.rs | 25 +- nym-node/src/node/mod.rs | 62 ++- nym-node/src/node/shared_topology.rs | 6 +- .../ip-packet-router/src/error.rs | 2 +- .../src/request_filter/mod.rs | 11 +- 38 files changed, 1731 insertions(+), 247 deletions(-) create mode 100644 nym-node/nym-node-metrics/src/process.rs create mode 100644 nym-node/nym-node-metrics/src/prometheus_wrapper.rs create mode 100644 nym-node/src/node/metrics/handler/global_prometheus_updater/at_last_update.rs create mode 100644 nym-node/src/node/metrics/handler/global_prometheus_updater/mod.rs create mode 100644 nym-node/src/node/metrics/handler/pending_egress_packets_updater.rs create mode 100644 nym-node/src/node/metrics/handler/prometheus_events_handler.rs diff --git a/Cargo.lock b/Cargo.lock index 6e8b6602d6..df18e60636 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5655,18 +5655,20 @@ version = "0.1.0" dependencies = [ "dashmap", "lazy_static", - "log", "prometheus", + "tracing", ] [[package]] name = "nym-mixnet-client" version = "0.1.0" dependencies = [ + "dashmap", "futures", "nym-sphinx", "nym-task", "tokio", + "tokio-stream", "tokio-util", "tracing", ] diff --git a/common/client-libs/mixnet-client/Cargo.toml b/common/client-libs/mixnet-client/Cargo.toml index 25dd62f702..b240aab513 100644 --- a/common/client-libs/mixnet-client/Cargo.toml +++ b/common/client-libs/mixnet-client/Cargo.toml @@ -8,10 +8,12 @@ license.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +dashmap = { workspace = true } futures = { workspace = true } tracing = { workspace = true } -tokio = { workspace = true, features = ["time"] } +tokio = { workspace = true, features = ["time", "sync"] } tokio-util = { workspace = true, features = ["codec"], optional = true } +tokio-stream = { workspace = true } # internal nym-sphinx = { path = "../../nymsphinx" } diff --git a/common/client-libs/mixnet-client/src/client.rs b/common/client-libs/mixnet-client/src/client.rs index b8eebcdcc5..94e32f8404 100644 --- a/common/client-libs/mixnet-client/src/client.rs +++ b/common/client-libs/mixnet-client/src/client.rs @@ -1,21 +1,24 @@ -// Copyright 2021 - Nym Technologies SA +// Copyright 2021-2024 - Nym Technologies SA // SPDX-License-Identifier: Apache-2.0 -use futures::channel::mpsc; +use dashmap::DashMap; use futures::StreamExt; use nym_sphinx::addressing::nodes::NymNodeRoutingAddress; use nym_sphinx::framing::codec::NymCodec; use nym_sphinx::framing::packet::FramedNymPacket; use nym_sphinx::params::PacketType; use nym_sphinx::NymPacket; -use std::collections::HashMap; use std::io; use std::net::SocketAddr; -use std::sync::atomic::{AtomicU32, Ordering}; +use std::ops::Deref; +use std::sync::atomic::{AtomicU32, AtomicUsize, Ordering}; use std::sync::Arc; use std::time::Duration; use tokio::net::TcpStream; +use tokio::sync::mpsc; +use tokio::sync::mpsc::error::TrySendError; use tokio::time::sleep; +use tokio_stream::wrappers::ReceiverStream; use tokio_util::codec::Framed; use tracing::*; @@ -55,11 +58,37 @@ pub trait SendWithoutResponse { } pub struct Client { - conn_new: HashMap, + active_connections: ActiveConnections, + connections_count: Arc, config: Config, } -struct ConnectionSender { +#[derive(Default, Clone)] +pub struct ActiveConnections { + inner: Arc>, +} + +impl ActiveConnections { + pub fn pending_packets(&self) -> usize { + self.inner + .iter() + .map(|sender| { + let max_capacity = sender.channel.max_capacity(); + let capacity = sender.channel.capacity(); + max_capacity - capacity + }) + .sum() + } +} + +impl Deref for ActiveConnections { + type Target = DashMap; + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +pub struct ConnectionSender { channel: mpsc::Sender, current_reconnection_attempt: Arc, } @@ -73,46 +102,53 @@ impl ConnectionSender { } } -impl Client { - pub fn new(config: Config) -> Client { - Client { - conn_new: HashMap::new(), - config, - } - } +struct ManagedConnection { + address: SocketAddr, + message_receiver: ReceiverStream, + connection_timeout: Duration, + current_reconnection: Arc, +} - async fn manage_connection( +impl ManagedConnection { + fn new( address: SocketAddr, - receiver: mpsc::Receiver, + message_receiver: mpsc::Receiver, connection_timeout: Duration, - current_reconnection: &AtomicU32, - ) { + current_reconnection: Arc, + ) -> Self { + ManagedConnection { + address, + message_receiver: ReceiverStream::new(message_receiver), + connection_timeout, + current_reconnection, + } + } + + async fn run(self) { + let address = self.address; let connection_fut = TcpStream::connect(address); - let conn = match tokio::time::timeout(connection_timeout, connection_fut).await { + let conn = match tokio::time::timeout(self.connection_timeout, connection_fut).await { Ok(stream_res) => match stream_res { Ok(stream) => { - debug!("Managed to establish connection to {}", address); + debug!("Managed to establish connection to {}", self.address); // if we managed to connect, reset the reconnection count (whatever it might have been) - current_reconnection.store(0, Ordering::Release); + self.current_reconnection.store(0, Ordering::Release); Framed::new(stream, NymCodec) } Err(err) => { - debug!( - "failed to establish connection to {} (err: {})", - address, err - ); + debug!("failed to establish connection to {address} (err: {err})",); return; } }, Err(_) => { debug!( - "failed to connect to {} within {:?}", - address, connection_timeout + "failed to connect to {address} within {:?}", + self.connection_timeout ); // we failed to connect - increase reconnection attempt - current_reconnection.fetch_add(1, Ordering::SeqCst); + self.current_reconnection.fetch_add(1, Ordering::SeqCst); return; } }; @@ -120,15 +156,28 @@ impl Client { // Take whatever the receiver channel produces and put it on the connection. // We could have as well used conn.send_all(receiver.map(Ok)), but considering we don't care // about neither receiver nor the connection, it doesn't matter which one gets consumed - if let Err(err) = receiver.map(Ok).forward(conn).await { - warn!("Failed to forward packets to {} - {err}", address); + if let Err(err) = self.message_receiver.map(Ok).forward(conn).await { + warn!("Failed to forward packets to {address}: {err}"); } debug!( - "connection manager to {} is finished. Either the connection failed or mixnet client got dropped", - address + "connection manager to {address} is finished. Either the connection failed or mixnet client got dropped", ); } +} + +impl Client { + pub fn new(config: Config, connections_count: Arc) -> Client { + Client { + active_connections: Default::default(), + connections_count, + config, + } + } + + pub fn active_connections(&self) -> ActiveConnections { + self.active_connections.clone() + } /// If we're trying to reconnect, determine how long we should wait. fn determine_backoff(&self, current_attempt: u32) -> Option { @@ -148,7 +197,7 @@ impl Client { } fn make_connection(&mut self, address: NymNodeRoutingAddress, pending_packet: FramedNymPacket) { - let (mut sender, receiver) = mpsc::channel(self.config.maximum_connection_buffer_size); + let (sender, receiver) = mpsc::channel(self.config.maximum_connection_buffer_size); // this CAN'T fail because we just created the channel which has a non-zero capacity if self.config.maximum_connection_buffer_size > 0 { @@ -156,15 +205,16 @@ impl Client { } // if we already tried to connect to `address` before, grab the current attempt count - let current_reconnection_attempt = if let Some(existing) = self.conn_new.get_mut(&address) { - existing.channel = sender; - Arc::clone(&existing.current_reconnection_attempt) - } else { - let new_entry = ConnectionSender::new(sender); - let current_attempt = Arc::clone(&new_entry.current_reconnection_attempt); - self.conn_new.insert(address, new_entry); - current_attempt - }; + let current_reconnection_attempt = + if let Some(mut existing) = self.active_connections.get_mut(&address) { + existing.channel = sender; + Arc::clone(&existing.current_reconnection_attempt) + } else { + let new_entry = ConnectionSender::new(sender); + let current_attempt = Arc::clone(&new_entry.current_reconnection_attempt); + self.active_connections.insert(address, new_entry); + current_attempt + }; // load the actual value. let reconnection_attempt = current_reconnection_attempt.load(Ordering::Acquire); @@ -173,6 +223,7 @@ impl Client { // copy the value before moving into another task let initial_connection_timeout = self.config.initial_connection_timeout; + let connections_count = self.connections_count.clone(); tokio::spawn(async move { // before executing the manager, wait for what was specified, if anything if let Some(backoff) = backoff { @@ -180,13 +231,16 @@ impl Client { sleep(backoff).await; } - Self::manage_connection( + connections_count.fetch_add(1, Ordering::SeqCst); + ManagedConnection::new( address.into(), receiver, initial_connection_timeout, - ¤t_reconnection_attempt, + current_reconnection_attempt, ) - .await + .run() + .await; + connections_count.fetch_sub(1, Ordering::SeqCst); }); } } @@ -201,49 +255,47 @@ impl SendWithoutResponse for Client { trace!("Sending packet to {address:?}"); let framed_packet = FramedNymPacket::new(packet, packet_type); - if let Some(sender) = self.conn_new.get_mut(&address) { - if let Err(err) = sender.channel.try_send(framed_packet) { - if err.is_full() { - debug!("Connection to {} seems to not be able to handle all the traffic - dropping the current packet", address); + let Some(sender) = self.active_connections.get_mut(&address) else { + // there was never a connection to begin with + debug!("establishing initial connection to {}", address); + // it's not a 'big' error, but we did not manage to send the packet, but queue the packet + // for sending for as soon as the connection is created + self.make_connection(address, framed_packet); + return Err(io::Error::new( + io::ErrorKind::NotConnected, + "connection is in progress", + )); + }; + + let sending_res = sender.channel.try_send(framed_packet); + drop(sender); + + sending_res.map_err(|err| { + match err { + TrySendError::Full(_) => { + debug!("Connection to {address} seems to not be able to handle all the traffic - dropping the current packet"); // it's not a 'big' error, but we did not manage to send the packet // if the queue is full, we can't really do anything but to drop the packet - Err(io::Error::new( + io::Error::new( io::ErrorKind::WouldBlock, "connection queue is full", - )) - } else if err.is_disconnected() { + ) + } + TrySendError::Closed(dropped) => { debug!( - "Connection to {} seems to be dead. attempting to re-establish it...", - address + "Connection to {address} seems to be dead. attempting to re-establish it...", ); + // it's not a 'big' error, but we did not manage to send the packet, but queue // it up to send it as soon as the connection is re-established - self.make_connection(address, err.into_inner()); - Err(io::Error::new( + self.make_connection(address, dropped); + io::Error::new( io::ErrorKind::ConnectionAborted, "reconnection attempt is in progress", - )) - } else { - // this can't really happen, but let's safe-guard against it in case something changes in futures library - Err(io::Error::new( - io::ErrorKind::Other, - "unknown connection buffer error", - )) + ) } - } else { - Ok(()) } - } else { - // there was never a connection to begin with - debug!("establishing initial connection to {}", address); - // it's not a 'big' error, but we did not manage to send the packet, but queue the packet - // for sending for as soon as the connection is created - self.make_connection(address, framed_packet); - Err(io::Error::new( - io::ErrorKind::NotConnected, - "connection is in progress", - )) - } + } ) } } @@ -252,12 +304,15 @@ mod tests { use super::*; fn dummy_client() -> Client { - Client::new(Config { - initial_reconnection_backoff: Duration::from_millis(10_000), - maximum_reconnection_backoff: Duration::from_millis(300_000), - initial_connection_timeout: Duration::from_millis(1_500), - maximum_connection_buffer_size: 128, - }) + Client::new( + Config { + initial_reconnection_backoff: Duration::from_millis(10_000), + maximum_reconnection_backoff: Duration::from_millis(300_000), + initial_connection_timeout: Duration::from_millis(1_500), + maximum_connection_buffer_size: 128, + }, + Default::default(), + ) } #[test] diff --git a/common/nonexhaustive-delayqueue/src/lib.rs b/common/nonexhaustive-delayqueue/src/lib.rs index ee8c9f214c..6c4853f675 100644 --- a/common/nonexhaustive-delayqueue/src/lib.rs +++ b/common/nonexhaustive-delayqueue/src/lib.rs @@ -65,6 +65,14 @@ impl NonExhaustiveDelayQueue { pub fn remove(&mut self, key: &QueueKey) -> Expired { self.inner.remove(key) } + + pub fn len(&self) -> usize { + self.inner.len() + } + + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } } impl Default for NonExhaustiveDelayQueue { diff --git a/common/nym-metrics/Cargo.toml b/common/nym-metrics/Cargo.toml index 4310ae28f6..1ac3bce16a 100644 --- a/common/nym-metrics/Cargo.toml +++ b/common/nym-metrics/Cargo.toml @@ -12,6 +12,6 @@ license.workspace = true [dependencies] prometheus = { workspace = true } -log = { workspace = true } +tracing = { workspace = true } dashmap = { workspace = true } lazy_static = { workspace = true } diff --git a/common/nym-metrics/src/lib.rs b/common/nym-metrics/src/lib.rs index 565d62cd5f..7c6373ccc7 100644 --- a/common/nym-metrics/src/lib.rs +++ b/common/nym-metrics/src/lib.rs @@ -1,14 +1,18 @@ use dashmap::DashMap; -pub use log::error; -use log::{debug, warn}; use std::fmt; -pub use std::time::Instant; +use tracing::{debug, error, warn}; + +use prometheus::{ + core::Collector, Encoder as _, Gauge, Histogram, HistogramOpts, IntCounter, IntGauge, Registry, + TextEncoder, +}; -use prometheus::{core::Collector, Encoder as _, IntCounter, IntGauge, Registry, TextEncoder}; +pub use prometheus::HistogramTimer; +pub use std::time::Instant; #[macro_export] macro_rules! prepend_package_name { - ($name: literal) => { + ($name: tt) => { &format!( "{}_{}", std::module_path!() @@ -23,15 +27,29 @@ macro_rules! prepend_package_name { #[macro_export] macro_rules! inc_by { + ($name:literal, $x:expr, $help: expr) => { + $crate::REGISTRY.maybe_register_and_inc_by( + $crate::prepend_package_name!($name), + $x as i64, + $help, + ); + }; ($name:literal, $x:expr) => { - $crate::REGISTRY.inc_by($crate::prepend_package_name!($name), $x as i64); + $crate::REGISTRY.maybe_register_and_inc_by( + $crate::prepend_package_name!($name), + $x as i64, + None, + ); }; } #[macro_export] macro_rules! inc { + ($name:literal, $help: expr) => { + $crate::REGISTRY.maybe_register_and_inc($crate::prepend_package_name!($name), $help); + }; ($name:literal) => { - $crate::REGISTRY.inc($crate::prepend_package_name!($name)); + $crate::REGISTRY.maybe_register_and_inc($crate::prepend_package_name!($name), None); }; } @@ -42,6 +60,71 @@ macro_rules! metrics { }; } +#[macro_export] +macro_rules! set_metric { + ($name:literal, $x:expr, $help: expr) => { + $crate::REGISTRY.maybe_register_and_set( + $crate::prepend_package_name!($name), + $x as i64, + $help, + ); + }; + ($name:literal, $x:expr) => { + $crate::REGISTRY.maybe_register_and_set( + $crate::prepend_package_name!($name), + $x as i64, + None, + ); + }; +} + +#[macro_export] +macro_rules! set_metric_float { + ($name:literal, $x:expr, $help: expr) => { + $crate::REGISTRY.maybe_register_and_set_float( + $crate::prepend_package_name!($name), + $x as f64, + $help, + ); + }; + ($name:literal, $x:expr) => { + $crate::REGISTRY.maybe_register_and_set_float( + $crate::prepend_package_name!($name), + $x as f64, + None, + ); + }; +} + +#[macro_export] +macro_rules! add_histogram_obs { + ($name:expr, $x:expr, $b:expr, $help:expr) => { + $crate::REGISTRY.maybe_register_and_add_to_histogram( + $crate::prepend_package_name!($name), + $x as f64, + Some($b), + $help, + ); + }; + + ($name:expr, $x:expr, $b:expr) => { + $crate::REGISTRY.maybe_register_and_add_to_histogram( + $crate::prepend_package_name!($name), + $x as f64, + Some($b), + None, + ); + }; + ($name:expr, $x:expr) => { + $crate::REGISTRY.maybe_register_and_add_to_histogram( + $crate::prepend_package_name!($name), + $x as f64, + None, + None, + ); + }; +} + #[macro_export] macro_rules! nanos { ( $name:literal, $x:expr ) => {{ @@ -50,7 +133,7 @@ macro_rules! nanos { let r = $x; let duration = start.elapsed().as_nanos() as i64; let name = $crate::prepend_package_name!($name); - $crate::REGISTRY.inc_by(&format!("{}_nanos", $name), duration); + $crate::REGISTRY.maybe_register_and_inc_by(&format!("{}_nanos", $name), duration, None); r }}; } @@ -59,15 +142,100 @@ lazy_static::lazy_static! { pub static ref REGISTRY: MetricsController = MetricsController::default(); } +pub fn metrics_registry() -> &'static MetricsController { + ®ISTRY +} + #[derive(Default)] pub struct MetricsController { registry: Registry, registry_index: DashMap, } -enum Metric { - C(Box), - G(Box), +pub enum Metric { + IntCounter(Box), + IntGauge(Box), + FloatGauge(Box), + Histogram(Box), +} + +impl Metric { + pub fn new_int_counter(name: &str, help: &str) -> Option { + match IntCounter::new(sanitize_metric_name(name), help) { + Ok(c) => Some(c.into()), + Err(err) => { + error!("Failed to create counter {name:?}: {err}"); + None + } + } + } + + pub fn new_int_gauge(name: &str, help: &str) -> Option { + match IntGauge::new(sanitize_metric_name(name), help) { + Ok(g) => Some(g.into()), + Err(err) => { + error!("Failed to create gauge {name:?}: {err}"); + None + } + } + } + + pub fn new_float_gauge(name: &str, help: &str) -> Option { + match Gauge::new(sanitize_metric_name(name), help) { + Ok(g) => Some(g.into()), + Err(err) => { + error!("Failed to create gauge {name:?}: {err}"); + None + } + } + } + + pub fn new_histogram(name: &str, help: &str, buckets: Option<&[f64]>) -> Option { + let mut opts = HistogramOpts::new(sanitize_metric_name(name), help); + if let Some(buckets) = buckets { + opts = opts.buckets(buckets.to_vec()) + } + match Histogram::with_opts(opts) { + Ok(h) => Some(Metric::Histogram(Box::new(h))), + Err(err) => { + error!("failed to create histogram {name:?}: {err}"); + None + } + } + } + + fn as_collector(&self) -> Box { + match self { + Metric::IntCounter(c) => c.clone(), + Metric::IntGauge(g) => g.clone(), + Metric::FloatGauge(g) => g.clone(), + Metric::Histogram(h) => h.clone(), + } + } +} + +impl From for Metric { + fn from(v: IntCounter) -> Self { + Metric::IntCounter(Box::new(v)) + } +} + +impl From for Metric { + fn from(v: IntGauge) -> Self { + Metric::IntGauge(Box::new(v)) + } +} + +impl From for Metric { + fn from(v: Gauge) -> Self { + Metric::FloatGauge(Box::new(v)) + } +} + +impl From for Metric { + fn from(v: Histogram) -> Self { + Metric::Histogram(Box::new(v)) + } } fn fq_name(c: &dyn Collector) -> String { @@ -81,34 +249,92 @@ impl Metric { #[inline(always)] fn fq_name(&self) -> String { match self { - Metric::C(c) => fq_name(c.as_ref()), - Metric::G(g) => fq_name(g.as_ref()), + Metric::IntCounter(c) => fq_name(c.as_ref()), + Metric::IntGauge(g) => fq_name(g.as_ref()), + Metric::FloatGauge(g) => fq_name(g.as_ref()), + Metric::Histogram(h) => fq_name(h.as_ref()), } } #[inline(always)] fn inc(&self) { match self { - Metric::C(c) => c.inc(), - Metric::G(g) => g.inc(), + Metric::IntCounter(c) => c.inc(), + Metric::IntGauge(g) => g.inc(), + Metric::FloatGauge(g) => g.inc(), + Metric::Histogram(_) => { + warn!("invalid operation: attempted to call increment on a histogram") + } } } #[inline(always)] fn inc_by(&self, value: i64) { match self { - Metric::C(c) => c.inc_by(value as u64), - Metric::G(g) => g.add(value), + Metric::IntCounter(c) => c.inc_by(value as u64), + Metric::IntGauge(g) => g.add(value), + Metric::FloatGauge(g) => { + warn!("attempted to increment a float gauge ('{}') by an integer - this is most likely a bug", self.fq_name()); + g.add(value as f64) + } + Metric::Histogram(_) => { + warn!("invalid operation: attempted to call increment on a histogram") + } } } #[inline(always)] fn set(&self, value: i64) { match self { - Metric::C(_c) => { + Metric::IntCounter(_c) => { + warn!("Cannot set value for counter {:?}", self.fq_name()); + } + Metric::IntGauge(g) => g.set(value), + Metric::FloatGauge(g) => { + warn!("attempted to set a float gauge ('{}') to an integer value - this is most likely a bug", self.fq_name()); + g.set(value as f64) + } + Metric::Histogram(_) => { + warn!("invalid operation: attempted to call set on a histogram") + } + } + } + + #[inline(always)] + fn set_float(&self, value: f64) { + match self { + Metric::IntCounter(_c) => { warn!("Cannot set value for counter {:?}", self.fq_name()); } - Metric::G(g) => g.set(value), + Metric::IntGauge(g) => { + warn!("attempted to set a integer gauge ('{}') to a float value - this is most likely a bug", self.fq_name()); + g.set(value as i64) + } + Metric::FloatGauge(g) => g.set(value), + Metric::Histogram(_) => { + warn!("invalid operation: attempted to call increment on a histogram") + } + } + } + + #[inline(always)] + fn add_histogram_observation(&self, value: f64) { + match self { + Metric::Histogram(h) => { + h.observe(value); + } + _ => warn!("attempted to add histogram observation on a non-histogram metric"), + } + } + + #[inline(always)] + fn start_timer(&self) -> Option { + match self { + Metric::Histogram(h) => Some(h.start_timer()), + _ => { + warn!("attempted to start histogram observation on a non-histogram metric"); + None + } } } } @@ -145,93 +371,165 @@ impl MetricsController { } } - pub fn set(&self, name: &str, value: i64) { + pub fn register_int_gauge<'a>(&self, name: &str, help: impl Into>) { + let Some(metric) = Metric::new_int_gauge(name, help.into().unwrap_or(name)) else { + return; + }; + self.register_metric(metric); + } + + pub fn register_float_gauge<'a>(&self, name: &str, help: impl Into>) { + let Some(metric) = Metric::new_float_gauge(name, help.into().unwrap_or(name)) else { + return; + }; + self.register_metric(metric); + } + + pub fn register_int_counter<'a>(&self, name: &str, help: impl Into>) { + let Some(metric) = Metric::new_int_counter(name, help.into().unwrap_or(name)) else { + return; + }; + self.register_metric(metric); + } + + pub fn register_histogram<'a>( + &self, + name: &str, + help: impl Into>, + buckets: Option<&[f64]>, + ) { + let Some(metric) = Metric::new_histogram(name, help.into().unwrap_or(name), buckets) else { + return; + }; + self.register_metric(metric); + } + + pub fn set(&self, name: &str, value: i64) -> bool { if let Some(metric) = self.registry_index.get(name) { metric.set(value); + true } else { - let gauge = match IntGauge::new(sanitize_metric_name(name), name) { - Ok(g) => g, - Err(e) => { - debug!("Failed to create gauge {:?}:\n{}", name, e); - return; - } - }; - self.register_gauge(Box::new(gauge)); - self.set(name, value) + false } } - pub fn inc(&self, name: &str) { + pub fn set_float(&self, name: &str, value: f64) -> bool { + if let Some(metric) = self.registry_index.get(name) { + metric.set_float(value); + true + } else { + false + } + } + + pub fn add_to_histogram(&self, name: &str, value: f64) -> bool { + if let Some(metric) = self.registry_index.get(name) { + metric.add_histogram_observation(value); + true + } else { + false + } + } + + pub fn start_timer(&self, name: &str) -> Option { + self.registry_index + .get(name) + .and_then(|metric| metric.start_timer()) + } + + pub fn inc(&self, name: &str) -> bool { if let Some(metric) = self.registry_index.get(name) { metric.inc(); + true } else { - let counter = match IntCounter::new(sanitize_metric_name(name), name) { - Ok(c) => c, - Err(e) => { - debug!("Failed to create counter {:?}:\n{}", name, e); - return; - } - }; - self.register_counter(Box::new(counter)); - self.inc(name) + false } } - pub fn inc_by(&self, name: &str, value: i64) { + pub fn inc_by(&self, name: &str, value: i64) -> bool { if let Some(metric) = self.registry_index.get(name) { metric.inc_by(value); + true } else { - let counter = match IntCounter::new(sanitize_metric_name(name), name) { - Ok(c) => c, - Err(e) => { - debug!("Failed to create counter {:?}:\n{}", name, e); - return; - } - }; - self.register_counter(Box::new(counter)); - self.inc_by(name, value) + false } } - fn register_gauge(&self, metric: Box) { - let fq_name = metric - .desc() - .first() - .map(|d| d.fq_name.clone()) - .unwrap_or_default(); + pub fn maybe_register_and_set<'a>( + &self, + name: &str, + value: i64, + help: impl Into>, + ) { + if !self.set(name, value) { + let help = help.into(); + self.register_int_gauge(name, help); + self.set(name, value); + } + } - if self.registry_index.contains_key(&fq_name) { - return; + pub fn maybe_register_and_set_float<'a>( + &self, + name: &str, + value: f64, + help: impl Into>, + ) { + if !self.set_float(name, value) { + let help = help.into(); + self.register_float_gauge(name, help); + self.set_float(name, value); } + } - match self.registry.register(metric.clone()) { - Ok(_) => { - self.registry_index - .insert(fq_name, Metric::G(metric.clone())); - } - Err(e) => { - debug!("Failed to register {:?}:\n{}", fq_name, e) - } + pub fn maybe_register_and_add_to_histogram<'a>( + &self, + name: &str, + value: f64, + buckets: Option<&[f64]>, + help: impl Into>, + ) { + if !self.add_to_histogram(name, value) { + let help = help.into(); + self.register_histogram(name, help, buckets); + self.add_to_histogram(name, value); + } + } + + pub fn maybe_register_and_inc<'a>(&self, name: &str, help: impl Into>) { + if !self.inc(name) { + let help = help.into(); + self.register_int_counter(name, help); + self.inc(name); } } - fn register_counter(&self, metric: Box) { - let fq_name = metric - .desc() - .first() - .map(|d| d.fq_name.clone()) - .unwrap_or_default(); + pub fn maybe_register_and_inc_by<'a>( + &self, + name: &str, + value: i64, + help: impl Into>, + ) { + if !self.inc_by(name, value) { + let help = help.into(); + self.register_int_counter(name, help); + self.inc_by(name, value); + } + } + + pub fn register_metric(&self, metric: impl Into) { + let m = metric.into(); + let fq_name = m.fq_name(); if self.registry_index.contains_key(&fq_name) { return; } - match self.registry.register(metric.clone()) { + + match self.registry.register(m.as_collector()) { Ok(_) => { - self.registry_index - .insert(fq_name, Metric::C(metric.clone())); + self.registry_index.insert(fq_name, m); } - Err(e) => { - debug!("Failed to register {:?}:\n{}", fq_name, e) + Err(err) => { + debug!("Failed to register '{fq_name}': {err}") } } } @@ -275,4 +573,15 @@ mod tests { "packets_sent_34_242_65_133:1789" ) } + + #[test] + fn prepend_package_name() { + let literal = prepend_package_name!("foo"); + assert_eq!(literal, "nym_metrics_foo"); + + let bar = "bar"; + let format = format!("foomp_{}", bar); + let formatted = prepend_package_name!(format); + assert_eq!(formatted, "nym_metrics_foomp_bar"); + } } diff --git a/gateway/src/node/client_handling/active_clients.rs b/gateway/src/node/client_handling/active_clients.rs index 3dacfd3f63..4a3e55bef5 100644 --- a/gateway/src/node/client_handling/active_clients.rs +++ b/gateway/src/node/client_handling/active_clients.rs @@ -163,4 +163,11 @@ impl ActiveClientsStore { pub(crate) fn size(&self) -> usize { self.inner.len() } + + pub fn pending_packets(&self) -> usize { + self.inner + .iter() + .map(|client| client.get_sender_ref().len()) + .sum() + } } diff --git a/gateway/src/node/client_handling/websocket/common_state.rs b/gateway/src/node/client_handling/websocket/common_state.rs index 19a5943c53..223c534b26 100644 --- a/gateway/src/node/client_handling/websocket/common_state.rs +++ b/gateway/src/node/client_handling/websocket/common_state.rs @@ -7,6 +7,7 @@ use nym_crypto::asymmetric::identity; use nym_gateway_storage::GatewayStorage; use nym_mixnet_client::forwarder::MixForwardingSender; use nym_node_metrics::events::MetricEventsSender; +use nym_node_metrics::NymNodeMetrics; use std::sync::Arc; // I can see this being possible expanded with say storage or client store @@ -17,6 +18,7 @@ pub(crate) struct CommonHandlerState { pub(crate) local_identity: Arc, pub(crate) only_coconut_credentials: bool, pub(crate) bandwidth_cfg: BandwidthFlushingBehaviourConfig, + pub(crate) metrics: NymNodeMetrics, pub(crate) metrics_sender: MetricEventsSender, pub(crate) outbound_mix_sender: MixForwardingSender, pub(crate) active_clients_store: ActiveClientsStore, diff --git a/gateway/src/node/client_handling/websocket/listener.rs b/gateway/src/node/client_handling/websocket/listener.rs index ed484d17b6..faa659f0fa 100644 --- a/gateway/src/node/client_handling/websocket/listener.rs +++ b/gateway/src/node/client_handling/websocket/listener.rs @@ -61,7 +61,13 @@ impl Listener { remote_addr, shutdown, ); - tokio::spawn(handle.start_handling()); + tokio::spawn(async move { + // TODO: refactor it similarly to the mixnet listener on the nym-node + let metrics_ref = handle.shared_state.metrics.clone(); + metrics_ref.network.new_ingress_websocket_client(); + handle.start_handling().await; + metrics_ref.network.disconnected_ingress_websocket_client(); + }); } Err(err) => warn!("failed to get client: {err}"), } diff --git a/gateway/src/node/mod.rs b/gateway/src/node/mod.rs index b3b41961a1..3304f223b3 100644 --- a/gateway/src/node/mod.rs +++ b/gateway/src/node/mod.rs @@ -252,6 +252,7 @@ impl GatewayTasksBuilder { local_identity: Arc::clone(&self.identity_keypair), only_coconut_credentials: self.config.gateway.enforce_zk_nyms, bandwidth_cfg: (&self.config).into(), + metrics: self.metrics.clone(), metrics_sender: self.metrics_sender.clone(), outbound_mix_sender: self.mix_packet_sender.clone(), active_clients_store: active_clients_store.clone(), diff --git a/nym-node/nym-node-metrics/src/lib.rs b/nym-node/nym-node-metrics/src/lib.rs index 57a8c74bb3..904cdc1407 100644 --- a/nym-node/nym-node-metrics/src/lib.rs +++ b/nym-node/nym-node-metrics/src/lib.rs @@ -1,9 +1,15 @@ // Copyright 2024 - Nym Technologies SA // SPDX-License-Identifier: Apache-2.0 +#![warn(clippy::expect_used)] +#![warn(clippy::unwrap_used)] +#![warn(clippy::todo)] +#![warn(clippy::dbg_macro)] + use crate::entry::EntryStats; use crate::mixnet::MixingStats; use crate::network::NetworkStats; +use crate::process::NodeStats; use crate::wireguard::WireguardStats; use std::ops::Deref; use std::sync::Arc; @@ -12,6 +18,8 @@ pub mod entry; pub mod events; pub mod mixnet; pub mod network; +pub mod process; +pub mod prometheus_wrapper; pub mod wireguard; #[derive(Clone, Default)] @@ -39,4 +47,5 @@ pub struct NymNodeMetricsInner { pub wireguard: WireguardStats, pub network: NetworkStats, + pub process: NodeStats, } diff --git a/nym-node/nym-node-metrics/src/mixnet.rs b/nym-node/nym-node-metrics/src/mixnet.rs index 043d2d1e7c..32d84e509b 100644 --- a/nym-node/nym-node-metrics/src/mixnet.rs +++ b/nym-node/nym-node-metrics/src/mixnet.rs @@ -131,13 +131,6 @@ impl MixingStats { .or_default() .dropped += 1; } - - pub fn egress_dropped_final_hop_packet(&self) { - todo!() - // self.egress - // .final_hop_packets_dropped - // .fetch_add(1, Ordering::Relaxed); - } } #[derive(Clone, Copy, Default, PartialEq)] @@ -148,6 +141,8 @@ pub struct EgressRecipientStats { #[derive(Default)] pub struct EgressMixingStats { + disk_persisted_packets: AtomicUsize, + // this includes ACKS! forward_hop_packets_sent: AtomicUsize, @@ -159,6 +154,14 @@ pub struct EgressMixingStats { } impl EgressMixingStats { + pub fn add_disk_persisted_packet(&self) { + self.disk_persisted_packets.fetch_add(1, Ordering::Relaxed); + } + + pub fn disk_persisted_packets(&self) -> usize { + self.disk_persisted_packets.load(Ordering::Relaxed) + } + pub fn forward_hop_packets_sent(&self) -> usize { self.forward_hop_packets_sent.load(Ordering::Relaxed) } diff --git a/nym-node/nym-node-metrics/src/network.rs b/nym-node/nym-node-metrics/src/network.rs index de00d78560..7de912fc3c 100644 --- a/nym-node/nym-node-metrics/src/network.rs +++ b/nym-node/nym-node-metrics/src/network.rs @@ -2,11 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::Arc; #[derive(Default)] pub struct NetworkStats { // for now just experiment with basic data, we could always extend it active_ingress_mixnet_connections: AtomicUsize, + + active_ingress_websocket_connections: AtomicUsize, + + // the reason for additional `Arc` on this one is that the handler wasn't + // designed with metrics in mind and this single counter has been woven through + // the call stack + active_egress_mixnet_connections: Arc, } impl NetworkStats { @@ -20,8 +28,32 @@ impl NetworkStats { .fetch_sub(1, Ordering::Relaxed); } + pub fn new_ingress_websocket_client(&self) { + self.active_ingress_websocket_connections + .fetch_add(1, Ordering::Relaxed); + } + + pub fn disconnected_ingress_websocket_client(&self) { + self.active_ingress_websocket_connections + .fetch_sub(1, Ordering::Relaxed); + } + pub fn active_ingress_mixnet_connections_count(&self) -> usize { self.active_ingress_mixnet_connections .load(Ordering::Relaxed) } + + pub fn active_ingress_websocket_connections_count(&self) -> usize { + self.active_ingress_websocket_connections + .load(Ordering::Relaxed) + } + + pub fn active_egress_mixnet_connections_counter(&self) -> Arc { + self.active_egress_mixnet_connections.clone() + } + + pub fn active_egress_mixnet_connections_count(&self) -> usize { + self.active_egress_mixnet_connections + .load(Ordering::Relaxed) + } } diff --git a/nym-node/nym-node-metrics/src/process.rs b/nym-node/nym-node-metrics/src/process.rs new file mode 100644 index 0000000000..8e19688249 --- /dev/null +++ b/nym-node/nym-node-metrics/src/process.rs @@ -0,0 +1,57 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: Apache-2.0 + +use std::sync::atomic::{AtomicUsize, Ordering}; + +#[derive(Default)] +pub struct NodeStats { + pub final_hop_packets_pending_delivery: AtomicUsize, + + pub forward_hop_packets_pending_delivery: AtomicUsize, + + pub forward_hop_packets_being_delayed: AtomicUsize, + + // packets that haven't yet been delayed and are waiting for their chance + pub packet_forwarder_queue_size: AtomicUsize, +} + +impl NodeStats { + pub fn update_final_hop_packets_pending_delivery(&self, current: usize) { + self.final_hop_packets_pending_delivery + .store(current, Ordering::Relaxed); + } + + pub fn final_hop_packets_pending_delivery_count(&self) -> usize { + self.final_hop_packets_pending_delivery + .load(Ordering::Relaxed) + } + + pub fn update_forward_hop_packets_pending_delivery(&self, current: usize) { + self.forward_hop_packets_pending_delivery + .store(current, Ordering::Relaxed); + } + + pub fn forward_hop_packets_pending_delivery_count(&self) -> usize { + self.forward_hop_packets_pending_delivery + .load(Ordering::Relaxed) + } + + pub fn update_forward_hop_packets_being_delayed(&self, current: usize) { + self.forward_hop_packets_being_delayed + .store(current, Ordering::Relaxed); + } + + pub fn forward_hop_packets_being_delayed_count(&self) -> usize { + self.forward_hop_packets_being_delayed + .load(Ordering::Relaxed) + } + + pub fn update_packet_forwarder_queue_size(&self, current: usize) { + self.packet_forwarder_queue_size + .store(current, Ordering::Relaxed); + } + + pub fn packet_forwarder_queue_size(&self) -> usize { + self.packet_forwarder_queue_size.load(Ordering::Relaxed) + } +} diff --git a/nym-node/nym-node-metrics/src/prometheus_wrapper.rs b/nym-node/nym-node-metrics/src/prometheus_wrapper.rs new file mode 100644 index 0000000000..2d61ee55c4 --- /dev/null +++ b/nym-node/nym-node-metrics/src/prometheus_wrapper.rs @@ -0,0 +1,390 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use nym_metrics::{metrics_registry, HistogramTimer, Metric}; +use std::sync::LazyLock; +use strum::{Display, EnumCount, EnumIter, EnumProperty, IntoEnumIterator}; + +pub static PROMETHEUS_METRICS: LazyLock = + LazyLock::new(NymNodePrometheusMetrics::initialise); + +const CLIENT_SESSION_DURATION_BUCKETS: &[f64] = &[ + // sub 3s (implicitly) + 3., // 3s - 15s + 15., // 15s - 70s + 70., // 70s - 2min + 120., // 2 min - 5 min + 300., // 5min - 15min + 900., // 15min - 1h + 3600., // 1h - 12h + 43200., // 12h - 23.5h + 88200., // 23.5h - 24.5h + 86400., // 24.5h - 72h + 259200., // 72h+ (implicitly) +]; + +#[derive(Clone, Debug, EnumIter, Display, EnumProperty, EnumCount, Eq, Hash, PartialEq)] +#[strum(serialize_all = "snake_case", prefix = "nym_node_")] +pub enum PrometheusMetric { + // # MIXNET + // ## INGRESS + #[strum(props(help = "The number of ingress forward hop sphinx packets received"))] + MixnetIngressForwardPacketsReceived, + + #[strum(props(help = "The number of ingress final hop sphinx packets received"))] + MixnetIngressFinalHopPacketsReceived, + + #[strum(props(help = "The number of ingress malformed sphinx packets received"))] + MixnetIngressMalformedPacketsReceived, + + #[strum(props( + help = "The number of ingress forward sphinx packets that specified excessive delay received" + ))] + MixnetIngressExcessiveDelayPacketsReceived, + + #[strum(props(help = "The number of ingress forward hop sphinx packets dropped"))] + MixnetIngressForwardPacketsDropped, + + #[strum(props(help = "The number of ingress final hop sphinx packets dropped"))] + MixnetIngressFinalHopPacketsDropped, + + #[strum(props(help = "The current rate of receiving ingress forward hop sphinx packets"))] + MixnetIngressForwardPacketsReceivedRate, + + #[strum(props(help = "The current rate of receiving ingress final hop sphinx packets"))] + MixnetIngressFinalHopPacketsReceivedRate, + + #[strum(props(help = "The current rate of receiving ingress malformed sphinx packets"))] + MixnetIngressMalformedPacketsReceivedRate, + + #[strum(props( + help = "The current rate of receiving ingress sphinx packets that specified excessive delay" + ))] + MixnetIngressExcessiveDelayPacketsReceivedRate, + + #[strum(props(help = "The current rate of dropping ingress forward hop sphinx packets"))] + MixnetIngressForwardPacketsDroppedRate, + + #[strum(props(help = "The current rate of dropping ingress final hop sphinx packets"))] + MixnetIngressFinalHopPacketsDroppedRate, + + // ## EGRESS + #[strum(props( + help = "The number of unwrapped final hop packets stored on disk for offline clients" + ))] + MixnetEgressStoredOnDiskFinalHopPackets, + + #[strum(props(help = "The number of egress forward hop sphinx packets sent/forwarded"))] + MixnetEgressForwardPacketsSent, + + #[strum(props( + help = "The number of egress forward hop sphinx packets sent/forwarded (acks only)" + ))] + MixnetEgressAckSent, + + #[strum(props(help = "The number of egress forward hop sphinx packets dropped"))] + MixnetEgressForwardPacketsDropped, + + #[strum(props( + help = "The current rate of sending/forwarding egress forward hop sphinx packets" + ))] + MixnetEgressForwardPacketsSendRate, + + #[strum(props( + help = "The current rate of sending/forwarding egress forward hop sphinx packets (acks only)" + ))] + MixnetEgressAckSendRate, + + #[strum(props(help = "The current rate of dropping egress forward hop sphinx packets"))] + MixnetEgressForwardPacketsDroppedRate, + + // # ENTRY + #[strum(props(help = "The number of unique users"))] + EntryClientUniqueUsers, + + #[strum(props(help = "The number of client sessions started"))] + EntryClientSessionsStarted, + + #[strum(props(help = "The number of client sessions finished"))] + EntryClientSessionsFinished, + + #[strum(to_string = "entry_client_sessions_durations_{typ}")] + #[strum(props(help = "The distribution of client sessions duration of the specified type"))] + EntryClientSessionsDurations { typ: String }, + + // # WIREGUARD + #[strum(props(help = "The amount of bytes transmitted via wireguard"))] + WireguardBytesTx, + + #[strum(props(help = "The amount of bytes received via wireguard"))] + WireguardBytesRx, + + #[strum(props(help = "The current number of all registered wireguard peers"))] + WireguardTotalPeers, + + #[strum(props(help = "The current number of active wireguard peers"))] + WireguardActivePeers, + + #[strum(props(help = "The current sending rate of wireguard"))] + WireguardBytesTxRate, + + #[strum(props(help = "The current receiving rate of wireguard"))] + WireguardBytesRxRate, + + // # NETWORK + #[strum(props(help = "The number of active ingress mixnet connections"))] + NetworkActiveIngressMixnetConnections, + + #[strum(props(help = "The number of active ingress websocket connections"))] + NetworkActiveIngressWebSocketConnections, + + #[strum(props(help = "The number of active egress mixnet connections"))] + NetworkActiveEgressMixnetConnections, + + // # PROCESS + #[strum(props(help = "The current number of packets being delayed"))] + ProcessForwardHopPacketsBeingDelayed, + + #[strum(props( + help = "The current number of packets waiting in the queue to get delayed and sent into the mixnet" + ))] + ProcessPacketForwarderQueueSize, + + #[strum(props( + help = "The latency distribution of attempting to retrieve network topology (from nym-api)" + ))] + ProcessTopologyQueryResolutionLatency, + + #[strum(props( + help = "The current number of final hop packets stuck in channels waiting to get delivered to appropriate websocket connections" + ))] + ProcessFinalHopPacketsPendingDelivery, + + #[strum(props( + help = "The current number of forward hop packets stuck in channels waiting to get delivered to appropriate TCP connections" + ))] + ProcessForwardHopPacketsPendingDelivery, +} + +impl PrometheusMetric { + fn name(&self) -> String { + self.to_string() + } + + fn help(&self) -> &'static str { + // SAFETY: every variant has a `help` prop defined (and there's a unit test is checking for that) + #[allow(clippy::unwrap_used)] + self.get_str("help").unwrap() + } + + fn is_complex(&self) -> bool { + matches!(self, PrometheusMetric::EntryClientSessionsDurations { .. }) + // match self { + // PrometheusMetric::EntryClientSessionsDurations { .. } => true, + // _ => false, + // } + } + + fn to_registrable_metric(&self) -> Option { + let name = self.name(); + let help = self.help(); + + match self { + PrometheusMetric::MixnetIngressForwardPacketsReceived => { + Metric::new_int_gauge(&name, help) + } + PrometheusMetric::MixnetIngressFinalHopPacketsReceived => { + Metric::new_int_gauge(&name, help) + } + PrometheusMetric::MixnetIngressMalformedPacketsReceived => { + Metric::new_int_gauge(&name, help) + } + PrometheusMetric::MixnetIngressExcessiveDelayPacketsReceived => { + Metric::new_int_gauge(&name, help) + } + PrometheusMetric::MixnetIngressForwardPacketsDropped => { + Metric::new_int_gauge(&name, help) + } + PrometheusMetric::MixnetIngressFinalHopPacketsDropped => { + Metric::new_int_gauge(&name, help) + } + PrometheusMetric::MixnetIngressForwardPacketsReceivedRate => { + Metric::new_float_gauge(&name, help) + } + PrometheusMetric::MixnetIngressFinalHopPacketsReceivedRate => { + Metric::new_float_gauge(&name, help) + } + PrometheusMetric::MixnetIngressMalformedPacketsReceivedRate => { + Metric::new_float_gauge(&name, help) + } + PrometheusMetric::MixnetIngressExcessiveDelayPacketsReceivedRate => { + Metric::new_float_gauge(&name, help) + } + PrometheusMetric::MixnetIngressForwardPacketsDroppedRate => { + Metric::new_float_gauge(&name, help) + } + PrometheusMetric::MixnetIngressFinalHopPacketsDroppedRate => { + Metric::new_float_gauge(&name, help) + } + PrometheusMetric::MixnetEgressStoredOnDiskFinalHopPackets => { + Metric::new_int_gauge(&name, help) + } + PrometheusMetric::MixnetEgressForwardPacketsSent => Metric::new_int_gauge(&name, help), + PrometheusMetric::MixnetEgressAckSent => Metric::new_int_gauge(&name, help), + PrometheusMetric::MixnetEgressForwardPacketsDropped => { + Metric::new_int_gauge(&name, help) + } + PrometheusMetric::MixnetEgressForwardPacketsSendRate => { + Metric::new_float_gauge(&name, help) + } + PrometheusMetric::MixnetEgressAckSendRate => Metric::new_float_gauge(&name, help), + PrometheusMetric::MixnetEgressForwardPacketsDroppedRate => { + Metric::new_float_gauge(&name, help) + } + PrometheusMetric::EntryClientUniqueUsers => Metric::new_int_gauge(&name, help), + PrometheusMetric::EntryClientSessionsStarted => Metric::new_int_gauge(&name, help), + PrometheusMetric::EntryClientSessionsFinished => Metric::new_int_gauge(&name, help), + PrometheusMetric::EntryClientSessionsDurations { .. } => { + Metric::new_histogram(&name, help, Some(CLIENT_SESSION_DURATION_BUCKETS)) + } + PrometheusMetric::WireguardBytesTx => Metric::new_int_gauge(&name, help), + PrometheusMetric::WireguardBytesRx => Metric::new_int_gauge(&name, help), + PrometheusMetric::WireguardTotalPeers => Metric::new_int_gauge(&name, help), + PrometheusMetric::WireguardActivePeers => Metric::new_int_gauge(&name, help), + PrometheusMetric::WireguardBytesTxRate => Metric::new_float_gauge(&name, help), + PrometheusMetric::WireguardBytesRxRate => Metric::new_float_gauge(&name, help), + PrometheusMetric::NetworkActiveIngressMixnetConnections => { + Metric::new_int_gauge(&name, help) + } + PrometheusMetric::NetworkActiveIngressWebSocketConnections => { + Metric::new_int_gauge(&name, help) + } + PrometheusMetric::NetworkActiveEgressMixnetConnections => { + Metric::new_int_gauge(&name, help) + } + PrometheusMetric::ProcessForwardHopPacketsBeingDelayed => { + Metric::new_int_gauge(&name, help) + } + PrometheusMetric::ProcessPacketForwarderQueueSize => Metric::new_int_gauge(&name, help), + PrometheusMetric::ProcessTopologyQueryResolutionLatency => { + Metric::new_histogram(&name, help, None) + } + PrometheusMetric::ProcessFinalHopPacketsPendingDelivery => { + Metric::new_int_gauge(&name, help) + } + PrometheusMetric::ProcessForwardHopPacketsPendingDelivery => { + Metric::new_int_gauge(&name, help) + } + } + } + + fn set(&self, value: i64) { + metrics_registry().set(&self.name(), value); + } + + fn set_float(&self, value: f64) { + metrics_registry().set_float(&self.name(), value); + } + + fn inc(&self) { + metrics_registry().inc(&self.name()); + } + + fn inc_by(&self, value: i64) { + metrics_registry().inc_by(&self.name(), value); + } + + fn observe_histogram(&self, value: f64) { + let reg = metrics_registry(); + if !reg.add_to_histogram(&self.name(), value) { + if let Some(registrable) = self.to_registrable_metric() { + reg.register_metric(registrable); + reg.add_to_histogram(&self.name(), value); + } + } + } + + fn start_timer(&self) -> Option { + metrics_registry().start_timer(&self.name()) + } +} + +#[non_exhaustive] +pub struct NymNodePrometheusMetrics {} + +impl NymNodePrometheusMetrics { + // initialise all fields on startup with default values so that they'd be immediately available for query + pub(crate) fn initialise() -> Self { + let registry = metrics_registry(); + + // we can't initialise complex metrics as their names will only be fully known at runtime + for kind in PrometheusMetric::iter() { + if !kind.is_complex() { + if let Some(metric) = kind.to_registrable_metric() { + registry.register_metric(metric); + } + } + } + + NymNodePrometheusMetrics {} + } + + pub fn set(&self, metric: PrometheusMetric, value: i64) { + metric.set(value) + } + + pub fn set_float(&self, metric: PrometheusMetric, value: f64) { + metric.set_float(value) + } + + pub fn inc(&self, metric: PrometheusMetric) { + metric.inc() + } + + pub fn inc_by(&self, metric: PrometheusMetric, value: i64) { + metric.inc_by(value) + } + + pub fn observe_histogram(&self, metric: PrometheusMetric, value: f64) { + metric.observe_histogram(value) + } + + pub fn start_timer(&self, metric: PrometheusMetric) -> Option { + metric.start_timer() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use strum::IntoEnumIterator; + + #[test] + fn prometheus_metrics() { + // a sanity check for anyone adding new metrics. if this test fails, + // make sure any methods on `PrometheusMetric` enum don't need updating + // or require custom Display impl + assert_eq!(37, PrometheusMetric::COUNT) + } + + #[test] + fn every_variant_has_help_property() { + for variant in PrometheusMetric::iter() { + assert!(variant.get_str("help").is_some()) + } + } + + #[test] + fn prometheus_metrics_names() { + // make sure nothing changed in our serialisation + let simple = PrometheusMetric::MixnetIngressForwardPacketsReceived.to_string(); + assert_eq!("nym_node_mixnet_ingress_forward_packets_received", simple); + + let parameterised = + PrometheusMetric::EntryClientSessionsDurations { typ: "vpn".into() }.to_string(); + assert_eq!( + "nym_node_entry_client_sessions_durations_vpn", + parameterised + ) + } +} diff --git a/nym-node/src/config/metrics.rs b/nym-node/src/config/metrics.rs index 80f9dcea6d..5bdec5bdbf 100644 --- a/nym-node/src/config/metrics.rs +++ b/nym-node/src/config/metrics.rs @@ -25,6 +25,14 @@ pub struct Debug { #[serde(with = "humantime_serde")] pub stale_mixnet_metrics_cleaner_rate: Duration, + /// Specify the target rate of updating global prometheus counters. + #[serde(with = "humantime_serde")] + pub global_prometheus_counters_update_rate: Duration, + + /// Specify the target rate of updating egress packets pending delivery counter. + #[serde(with = "humantime_serde")] + pub pending_egress_packets_update_rate: Duration, + /// Specify the rate of updating clients sessions #[serde(with = "humantime_serde")] pub clients_sessions_update_rate: Duration, @@ -42,8 +50,10 @@ impl Debug { const DEFAULT_CONSOLE_LOGGING_INTERVAL: Duration = Duration::from_millis(60_000); const DEFAULT_LEGACY_MIXING_UPDATE_RATE: Duration = Duration::from_millis(30_000); const DEFAULT_AGGREGATOR_UPDATE_RATE: Duration = Duration::from_secs(5); - const DEFAULT_STALE_MIXNET_ETRICS_UPDATE_RATE: Duration = Duration::from_secs(3600); + const DEFAULT_STALE_MIXNET_METRICS_UPDATE_RATE: Duration = Duration::from_secs(3600); const DEFAULT_CLIENT_SESSIONS_UPDATE_RATE: Duration = Duration::from_secs(3600); + const GLOBAL_PROMETHEUS_COUNTERS_UPDATE_INTERVAL: Duration = Duration::from_secs(30); + const DEFAULT_PENDING_EGRESS_PACKETS_UPDATE_RATE: Duration = Duration::from_secs(30); } impl Default for Debug { @@ -53,7 +63,10 @@ impl Default for Debug { console_logging_update_interval: Self::DEFAULT_CONSOLE_LOGGING_INTERVAL, legacy_mixing_metrics_update_rate: Self::DEFAULT_LEGACY_MIXING_UPDATE_RATE, aggregator_update_rate: Self::DEFAULT_AGGREGATOR_UPDATE_RATE, - stale_mixnet_metrics_cleaner_rate: Self::DEFAULT_STALE_MIXNET_ETRICS_UPDATE_RATE, + stale_mixnet_metrics_cleaner_rate: Self::DEFAULT_STALE_MIXNET_METRICS_UPDATE_RATE, + global_prometheus_counters_update_rate: + Self::GLOBAL_PROMETHEUS_COUNTERS_UPDATE_INTERVAL, + pending_egress_packets_update_rate: Self::DEFAULT_PENDING_EGRESS_PACKETS_UPDATE_RATE, clients_sessions_update_rate: Self::DEFAULT_CLIENT_SESSIONS_UPDATE_RATE, } } diff --git a/nym-node/src/config/mod.rs b/nym-node/src/config/mod.rs index 6b996f24be..8f0cb9f220 100644 --- a/nym-node/src/config/mod.rs +++ b/nym-node/src/config/mod.rs @@ -444,6 +444,7 @@ pub struct Http { /// An optional bearer token for accessing certain http endpoints. /// Currently only used for obtaining mixnode's stats. #[serde(default)] + #[serde(deserialize_with = "de_maybe_stringified")] pub access_token: Option, /// Specify whether basic system information should be exposed. diff --git a/nym-node/src/node/http/error.rs b/nym-node/src/node/http/error.rs index 8019c5fd58..251705f520 100644 --- a/nym-node/src/node/http/error.rs +++ b/nym-node/src/node/http/error.rs @@ -1,5 +1,5 @@ // Copyright 2024 - Nym Technologies SA -// SPDX-License-Identifier: Apache-2.0 +// SPDX-License-Identifier: GPL-3.0-only use std::io; use std::net::SocketAddr; diff --git a/nym-node/src/node/http/router/api/v1/metrics/mod.rs b/nym-node/src/node/http/router/api/v1/metrics/mod.rs index 71a9760273..51af714434 100644 --- a/nym-node/src/node/http/router/api/v1/metrics/mod.rs +++ b/nym-node/src/node/http/router/api/v1/metrics/mod.rs @@ -10,7 +10,12 @@ use crate::node::http::state::metrics::MetricsAppState; use axum::extract::FromRef; use axum::routing::get; use axum::Router; +use nym_http_api_common::middleware::bearer_auth::AuthLayer; use nym_node_requests::routes::api::v1::metrics; +use nym_node_requests::routes::api::v1::metrics::prometheus_absolute; +use std::sync::Arc; +use tracing::info; +use zeroize::Zeroizing; pub mod legacy_mixing; pub mod packets_stats; @@ -21,16 +26,23 @@ pub mod wireguard; #[derive(Debug, Clone, Default)] pub struct Config { - // + pub bearer_token: Option>>, } #[allow(deprecated)] -pub(super) fn routes(_config: Config) -> Router +pub(super) fn routes(config: Config) -> Router where S: Send + Sync + 'static + Clone, MetricsAppState: FromRef, { - Router::new() + if config.bearer_token.is_none() { + info!( + "bearer token hasn't been set. '{}' route will not be exposed", + prometheus_absolute() + ) + } + + let router = Router::new() .route( metrics::LEGACY_MIXING, get(legacy_mixing::legacy_mixing_stats), @@ -38,6 +50,17 @@ where .route(metrics::PACKETS_STATS, get(packets_stats)) .route(metrics::WIREGUARD_STATS, get(wireguard_stats)) .route(metrics::SESSIONS, get(sessions_stats)) - .route(metrics::VERLOC, get(verloc_stats)) - .route(metrics::PROMETHEUS, get(prometheus_metrics)) + .route(metrics::VERLOC, get(verloc_stats)); + + let auth_middleware = config.bearer_token.map(AuthLayer::new); + + // don't expose prometheus route without bearer token set + if let Some(auth_middleware) = auth_middleware { + router.route( + metrics::PROMETHEUS, + get(prometheus_metrics).route_layer(auth_middleware), + ) + } else { + router + } } diff --git a/nym-node/src/node/http/router/api/v1/metrics/prometheus.rs b/nym-node/src/node/http/router/api/v1/metrics/prometheus.rs index bdb0050d79..2bd8a3356b 100644 --- a/nym-node/src/node/http/router/api/v1/metrics/prometheus.rs +++ b/nym-node/src/node/http/router/api/v1/metrics/prometheus.rs @@ -1,12 +1,6 @@ // Copyright 2024 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only -use crate::node::http::state::metrics::MetricsAppState; -use axum::extract::State; -use axum::http::StatusCode; -use axum_extra::TypedHeader; -use headers::authorization::Bearer; -use headers::Authorization; use nym_metrics::metrics; /// Returns `prometheus` compatible metrics @@ -25,21 +19,7 @@ use nym_metrics::metrics; ("prometheus_token" = []) ) )] -pub(crate) async fn prometheus_metrics( - TypedHeader(authorization): TypedHeader>, - State(state): State, -) -> Result { - if authorization.token().is_empty() { - return Err(StatusCode::UNAUTHORIZED); - } - // TODO: is 500 the correct error code here? - let Some(metrics_key) = state.prometheus_access_token else { - return Err(StatusCode::INTERNAL_SERVER_ERROR); - }; - - if metrics_key != authorization.token() { - return Err(StatusCode::UNAUTHORIZED); - } - - Ok(metrics!()) +// the AuthLayer is protecting access to this endpoint +pub(crate) async fn prometheus_metrics() -> String { + metrics!() } diff --git a/nym-node/src/node/http/router/mod.rs b/nym-node/src/node/http/router/mod.rs index 449638c8d6..3d40e9a67e 100644 --- a/nym-node/src/node/http/router/mod.rs +++ b/nym-node/src/node/http/router/mod.rs @@ -20,6 +20,8 @@ use nym_node_requests::api::SignedHostInformation; use nym_node_requests::routes; use std::net::SocketAddr; use std::path::Path; +use std::sync::Arc; +use zeroize::Zeroizing; pub mod api; pub mod landing_page; @@ -115,6 +117,11 @@ impl HttpServerConfig { self.api.v1_config.authenticator.details = Some(authenticator); self } + + pub fn with_prometheus_bearer_token(mut self, bearer_token: Option) -> Self { + self.api.v1_config.metrics.bearer_token = bearer_token.map(|b| Arc::new(Zeroizing::new(b))); + self + } } pub struct NymNodeRouter { diff --git a/nym-node/src/node/http/state/metrics.rs b/nym-node/src/node/http/state/metrics.rs index c9156c021e..8b10fb0a30 100644 --- a/nym-node/src/node/http/state/metrics.rs +++ b/nym-node/src/node/http/state/metrics.rs @@ -9,8 +9,6 @@ pub use nym_verloc::measurements::metrics::SharedVerlocStats; #[derive(Clone)] pub struct MetricsAppState { - pub(crate) prometheus_access_token: Option, - pub(crate) metrics: NymNodeMetrics, pub(crate) verloc: SharedVerlocStats, diff --git a/nym-node/src/node/http/state/mod.rs b/nym-node/src/node/http/state/mod.rs index b9b58c0cce..0e2fa98786 100644 --- a/nym-node/src/node/http/state/mod.rs +++ b/nym-node/src/node/http/state/mod.rs @@ -24,17 +24,7 @@ impl AppState { // does it have to be? // also no. startup_time: Instant::now(), - metrics: MetricsAppState { - prometheus_access_token: None, - metrics, - verloc, - }, + metrics: MetricsAppState { metrics, verloc }, } } - - #[must_use] - pub fn with_metrics_key(mut self, bearer_token: impl Into>) -> Self { - self.metrics.prometheus_access_token = bearer_token.into(); - self - } } diff --git a/nym-node/src/node/metrics/aggregator.rs b/nym-node/src/node/metrics/aggregator.rs index c166eb0d3c..8f76b14edb 100644 --- a/nym-node/src/node/metrics/aggregator.rs +++ b/nym-node/src/node/metrics/aggregator.rs @@ -44,7 +44,7 @@ impl MetricsAggregator { self.event_sender.clone() } - pub fn register_handler(&mut self, handler: H, update_interval: Duration) + pub fn register_handler(&mut self, handler: H, update_interval: impl Into>) where H: MetricsHandler, { diff --git a/nym-node/src/node/metrics/events_listener.rs b/nym-node/src/node/metrics/events_listener.rs index 755fb6cc8b..939f19b3a9 100644 --- a/nym-node/src/node/metrics/events_listener.rs +++ b/nym-node/src/node/metrics/events_listener.rs @@ -1,2 +1,2 @@ // Copyright 2024 - Nym Technologies SA -// SPDX-License-Identifier: Apache-2.0 +// SPDX-License-Identifier: GPL-3.0-only diff --git a/nym-node/src/node/metrics/handler/client_sessions.rs b/nym-node/src/node/metrics/handler/client_sessions.rs index 8394eb1d89..2f2326c541 100644 --- a/nym-node/src/node/metrics/handler/client_sessions.rs +++ b/nym-node/src/node/metrics/handler/client_sessions.rs @@ -1,5 +1,5 @@ // Copyright 2024 - Nym Technologies SA -// SPDX-License-Identifier: Apache-2.0 +// SPDX-License-Identifier: GPL-3.0-only use crate::node::metrics::handler::{ MetricsHandler, OnStartMetricsHandler, OnUpdateMetricsHandler, @@ -10,6 +10,8 @@ use nym_gateway_stats_storage::error::StatsStorageError; use nym_gateway_stats_storage::models::{TicketType, ToSessionType}; use nym_node_metrics::entry::{ActiveSession, ClientSessions, FinishedSession}; use nym_node_metrics::events::GatewaySessionEvent; +use nym_node_metrics::prometheus_wrapper::PrometheusMetric::EntryClientSessionsDurations; +use nym_node_metrics::prometheus_wrapper::PROMETHEUS_METRICS; use nym_node_metrics::NymNodeMetrics; use nym_sphinx_types::DestinationAddressBytes; use time::{Date, Duration, OffsetDateTime}; @@ -53,6 +55,13 @@ impl GatewaySessionStatsHandler { ) -> Result<(), StatsStorageError> { if let Some(session) = self.storage.get_active_session(client).await? { if let Some(finished_session) = session.end_at(stop_time) { + PROMETHEUS_METRICS.observe_histogram( + EntryClientSessionsDurations { + typ: finished_session.typ.to_string(), + }, + finished_session.duration.as_secs_f64(), + ); + self.storage .insert_finished_session(self.current_day, finished_session) .await?; diff --git a/nym-node/src/node/metrics/handler/global_prometheus_updater/at_last_update.rs b/nym-node/src/node/metrics/handler/global_prometheus_updater/at_last_update.rs new file mode 100644 index 0000000000..af16be7ecb --- /dev/null +++ b/nym-node/src/node/metrics/handler/global_prometheus_updater/at_last_update.rs @@ -0,0 +1,219 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use nym_node_metrics::mixnet::{EgressMixingStats, IngressMixingStats, MixingStats}; +use nym_node_metrics::wireguard::WireguardStats; +use nym_node_metrics::NymNodeMetrics; +use time::OffsetDateTime; + +// used to calculate traffic rates +#[derive(Debug)] +pub(crate) struct AtLastUpdate { + time: OffsetDateTime, + + mixnet: LastMixnet, + wireguard: LastWireguard, +} + +impl AtLastUpdate { + pub(crate) fn is_initial(&self) -> bool { + self.time == OffsetDateTime::UNIX_EPOCH + } + + pub(crate) fn rates(&self, previous: &Self) -> RateSinceUpdate { + let delta_secs = (self.time - previous.time).as_seconds_f64(); + + RateSinceUpdate { + mixnet: self.mixnet.rates(&previous.mixnet, delta_secs), + wireguard: self.wireguard.rates(&previous.wireguard, delta_secs), + } + } +} + +impl Default for AtLastUpdate { + fn default() -> Self { + AtLastUpdate { + time: OffsetDateTime::now_utc(), + mixnet: Default::default(), + wireguard: Default::default(), + } + } +} + +impl From<&NymNodeMetrics> for AtLastUpdate { + fn from(metrics: &NymNodeMetrics) -> Self { + AtLastUpdate { + time: OffsetDateTime::now_utc(), + mixnet: (&metrics.mixnet).into(), + wireguard: (&metrics.wireguard).into(), + } + } +} + +#[derive(Debug, Default)] +struct LastMixnet { + ingres: LastMixnetIngress, + egress: LastMixnetEgress, +} + +impl LastMixnet { + fn rates(&self, previous: &Self, time_delta_secs: f64) -> MixnetRateSinceUpdate { + MixnetRateSinceUpdate { + ingress: self.ingres.rates(&previous.ingres, time_delta_secs), + egress: self.egress.rates(&previous.egress, time_delta_secs), + } + } +} + +impl From<&MixingStats> for LastMixnet { + fn from(value: &MixingStats) -> Self { + LastMixnet { + ingres: (&value.ingress).into(), + egress: (&value.egress).into(), + } + } +} + +#[derive(Debug, Default)] +struct LastMixnetIngress { + forward_hop_packets_received: usize, + final_hop_packets_received: usize, + malformed_packets_received: usize, + excessive_delay_packets: usize, + forward_hop_packets_dropped: usize, + final_hop_packets_dropped: usize, +} + +impl LastMixnetIngress { + fn rates(&self, previous: &Self, time_delta_secs: f64) -> MixnetIngressRateSinceUpdate { + let forward_hop_packets_received_delta = + self.forward_hop_packets_received - previous.forward_hop_packets_received; + let final_hop_packets_received_delta = + self.final_hop_packets_received - previous.final_hop_packets_received; + let malformed_packets_received_delta = + self.malformed_packets_received - previous.malformed_packets_received; + let excessive_delay_packets_delta = + self.excessive_delay_packets - previous.excessive_delay_packets; + let forward_hop_packets_dropped_delta = + self.forward_hop_packets_dropped - previous.forward_hop_packets_dropped; + let final_hop_packets_dropped_delta = + self.final_hop_packets_dropped - previous.final_hop_packets_dropped; + + MixnetIngressRateSinceUpdate { + forward_hop_packets_received_sec: forward_hop_packets_received_delta as f64 + / time_delta_secs, + final_hop_packets_received_sec: final_hop_packets_received_delta as f64 + / time_delta_secs, + malformed_packets_received_sec: malformed_packets_received_delta as f64 + / time_delta_secs, + excessive_delay_packets_sec: excessive_delay_packets_delta as f64 / time_delta_secs, + forward_hop_packets_dropped_sec: forward_hop_packets_dropped_delta as f64 + / time_delta_secs, + final_hop_packets_dropped_sec: final_hop_packets_dropped_delta as f64 / time_delta_secs, + } + } +} + +impl From<&IngressMixingStats> for LastMixnetIngress { + fn from(value: &IngressMixingStats) -> Self { + LastMixnetIngress { + forward_hop_packets_received: value.forward_hop_packets_received(), + final_hop_packets_received: value.final_hop_packets_received(), + malformed_packets_received: value.malformed_packets_received(), + excessive_delay_packets: value.excessive_delay_packets(), + forward_hop_packets_dropped: value.forward_hop_packets_dropped(), + final_hop_packets_dropped: value.final_hop_packets_dropped(), + } + } +} + +#[derive(Debug, Default)] +struct LastMixnetEgress { + forward_hop_packets_sent: usize, + ack_packets_sent: usize, + forward_hop_packets_dropped: usize, +} + +impl LastMixnetEgress { + fn rates(&self, previous: &Self, time_delta_secs: f64) -> MixnetEgressRateSinceUpdate { + let forward_hop_packets_sent_delta = + self.forward_hop_packets_sent - previous.forward_hop_packets_sent; + let ack_packets_sent_delta = self.ack_packets_sent - previous.ack_packets_sent; + let forward_hop_packets_dropped_delta = + self.forward_hop_packets_dropped - previous.forward_hop_packets_dropped; + + MixnetEgressRateSinceUpdate { + forward_hop_packets_sent_sec: forward_hop_packets_sent_delta as f64 / time_delta_secs, + ack_packets_sent_sec: ack_packets_sent_delta as f64 / time_delta_secs, + forward_hop_packets_dropped_sec: forward_hop_packets_dropped_delta as f64 + / time_delta_secs, + } + } +} + +impl From<&EgressMixingStats> for LastMixnetEgress { + fn from(value: &EgressMixingStats) -> Self { + LastMixnetEgress { + forward_hop_packets_sent: value.forward_hop_packets_sent(), + ack_packets_sent: value.ack_packets_sent(), + forward_hop_packets_dropped: value.forward_hop_packets_dropped(), + } + } +} + +#[derive(Debug, Default)] +struct LastWireguard { + bytes_tx: usize, + bytes_rx: usize, +} + +impl LastWireguard { + fn rates(&self, previous: &Self, time_delta_secs: f64) -> WireguardRateSinceUpdate { + let bytes_tx_delta = self.bytes_tx - previous.bytes_tx; + let bytes_rx_delta = self.bytes_rx - previous.bytes_rx; + + WireguardRateSinceUpdate { + bytes_tx_sec: bytes_tx_delta as f64 / time_delta_secs, + bytes_rx_sec: bytes_rx_delta as f64 / time_delta_secs, + } + } +} + +impl From<&WireguardStats> for LastWireguard { + fn from(value: &WireguardStats) -> Self { + LastWireguard { + bytes_tx: value.bytes_tx(), + bytes_rx: value.bytes_rx(), + } + } +} + +pub(crate) struct RateSinceUpdate { + pub(crate) mixnet: MixnetRateSinceUpdate, + pub(crate) wireguard: WireguardRateSinceUpdate, +} + +pub(crate) struct MixnetRateSinceUpdate { + pub(crate) ingress: MixnetIngressRateSinceUpdate, + pub(crate) egress: MixnetEgressRateSinceUpdate, +} + +pub(crate) struct MixnetIngressRateSinceUpdate { + pub(crate) forward_hop_packets_received_sec: f64, + pub(crate) final_hop_packets_received_sec: f64, + pub(crate) malformed_packets_received_sec: f64, + pub(crate) excessive_delay_packets_sec: f64, + pub(crate) forward_hop_packets_dropped_sec: f64, + pub(crate) final_hop_packets_dropped_sec: f64, +} + +pub(crate) struct MixnetEgressRateSinceUpdate { + pub(crate) forward_hop_packets_sent_sec: f64, + pub(crate) ack_packets_sent_sec: f64, + pub(crate) forward_hop_packets_dropped_sec: f64, +} + +pub(crate) struct WireguardRateSinceUpdate { + pub(crate) bytes_tx_sec: f64, + pub(crate) bytes_rx_sec: f64, +} diff --git a/nym-node/src/node/metrics/handler/global_prometheus_updater/mod.rs b/nym-node/src/node/metrics/handler/global_prometheus_updater/mod.rs new file mode 100644 index 0000000000..81c61d9db8 --- /dev/null +++ b/nym-node/src/node/metrics/handler/global_prometheus_updater/mod.rs @@ -0,0 +1,223 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::node::metrics::handler::global_prometheus_updater::at_last_update::AtLastUpdate; +use crate::node::metrics::handler::{ + MetricsHandler, OnStartMetricsHandler, OnUpdateMetricsHandler, +}; +use async_trait::async_trait; +use nym_node_metrics::prometheus_wrapper::{ + NymNodePrometheusMetrics, PrometheusMetric, PROMETHEUS_METRICS, +}; +use nym_node_metrics::NymNodeMetrics; + +mod at_last_update; + +// it can be anything, we just need a unique type_id to register our handler +pub struct GlobalPrometheusData; + +pub struct PrometheusGlobalNodeMetricsRegistryUpdater { + metrics: NymNodeMetrics, + prometheus_wrapper: &'static NymNodePrometheusMetrics, + at_last_update: AtLastUpdate, +} + +impl PrometheusGlobalNodeMetricsRegistryUpdater { + pub(crate) fn new(metrics: NymNodeMetrics) -> Self { + Self { + metrics, + prometheus_wrapper: &PROMETHEUS_METRICS, + at_last_update: Default::default(), + } + } +} + +#[async_trait] +impl OnStartMetricsHandler for PrometheusGlobalNodeMetricsRegistryUpdater {} + +#[async_trait] +impl OnUpdateMetricsHandler for PrometheusGlobalNodeMetricsRegistryUpdater { + async fn on_update(&mut self) { + let entry_guard = self.metrics.entry.client_sessions().await; + use PrometheusMetric::*; + + // # MIXNET + // ## INGRESS + self.prometheus_wrapper.set( + MixnetIngressForwardPacketsReceived, + self.metrics.mixnet.ingress.forward_hop_packets_received() as i64, + ); + self.prometheus_wrapper.set( + MixnetIngressFinalHopPacketsReceived, + self.metrics.mixnet.ingress.final_hop_packets_received() as i64, + ); + self.prometheus_wrapper.set( + MixnetIngressMalformedPacketsReceived, + self.metrics.mixnet.ingress.malformed_packets_received() as i64, + ); + self.prometheus_wrapper.set( + MixnetIngressExcessiveDelayPacketsReceived, + self.metrics.mixnet.ingress.excessive_delay_packets() as i64, + ); + self.prometheus_wrapper.set( + MixnetEgressForwardPacketsDropped, + self.metrics.mixnet.ingress.forward_hop_packets_dropped() as i64, + ); + self.prometheus_wrapper.set( + MixnetIngressFinalHopPacketsDropped, + self.metrics.mixnet.ingress.final_hop_packets_dropped() as i64, + ); + + // ## EGRESS + self.prometheus_wrapper.set( + MixnetEgressStoredOnDiskFinalHopPackets, + self.metrics.mixnet.egress.disk_persisted_packets() as i64, + ); + self.prometheus_wrapper.set( + MixnetEgressForwardPacketsSent, + self.metrics.mixnet.egress.forward_hop_packets_sent() as i64, + ); + self.prometheus_wrapper.set( + MixnetEgressAckSent, + self.metrics.mixnet.egress.ack_packets_sent() as i64, + ); + self.prometheus_wrapper.set( + MixnetEgressForwardPacketsDropped, + self.metrics.mixnet.egress.forward_hop_packets_dropped() as i64, + ); + + // # ENTRY + self.prometheus_wrapper.set( + EntryClientUniqueUsers, + entry_guard.unique_users.len() as i64, + ); + self.prometheus_wrapper.set( + EntryClientSessionsStarted, + entry_guard.sessions_started as i64, + ); + self.prometheus_wrapper.set( + EntryClientSessionsFinished, + entry_guard.finished_sessions.len() as i64, + ); + + // # WIREGUARD + self.prometheus_wrapper + .set(WireguardBytesRx, self.metrics.wireguard.bytes_rx() as i64); + self.prometheus_wrapper + .set(WireguardBytesTx, self.metrics.wireguard.bytes_tx() as i64); + self.prometheus_wrapper.set( + WireguardTotalPeers, + self.metrics.wireguard.total_peers() as i64, + ); + self.prometheus_wrapper.set( + WireguardActivePeers, + self.metrics.wireguard.active_peers() as i64, + ); + + // # NETWORK + self.prometheus_wrapper.set( + NetworkActiveIngressMixnetConnections, + self.metrics + .network + .active_ingress_mixnet_connections_count() as i64, + ); + self.prometheus_wrapper.set( + NetworkActiveIngressWebSocketConnections, + self.metrics + .network + .active_ingress_websocket_connections_count() as i64, + ); + self.prometheus_wrapper.set( + NetworkActiveIngressWebSocketConnections, + self.metrics + .network + .active_egress_mixnet_connections_count() as i64, + ); + + // # PROCESS + self.prometheus_wrapper.set( + ProcessForwardHopPacketsBeingDelayed, + self.metrics + .process + .forward_hop_packets_being_delayed_count() as i64, + ); + self.prometheus_wrapper.set( + ProcessPacketForwarderQueueSize, + self.metrics.process.packet_forwarder_queue_size() as i64, + ); + self.prometheus_wrapper.set( + ProcessFinalHopPacketsPendingDelivery, + self.metrics + .process + .final_hop_packets_pending_delivery_count() as i64, + ); + self.prometheus_wrapper.set( + ProcessForwardHopPacketsPendingDelivery, + self.metrics + .process + .forward_hop_packets_pending_delivery_count() as i64, + ); + + let updated = AtLastUpdate::from(&self.metrics); + + // # RATES + if !self.at_last_update.is_initial() { + let diff = updated.rates(&self.at_last_update); + + self.prometheus_wrapper.set_float( + MixnetIngressForwardPacketsReceivedRate, + diff.mixnet.ingress.forward_hop_packets_received_sec, + ); + self.prometheus_wrapper.set_float( + MixnetIngressFinalHopPacketsReceivedRate, + diff.mixnet.ingress.final_hop_packets_received_sec, + ); + self.prometheus_wrapper.set_float( + MixnetIngressMalformedPacketsReceivedRate, + diff.mixnet.ingress.malformed_packets_received_sec, + ); + self.prometheus_wrapper.set_float( + MixnetIngressExcessiveDelayPacketsReceivedRate, + diff.mixnet.ingress.excessive_delay_packets_sec, + ); + self.prometheus_wrapper.set_float( + MixnetIngressForwardPacketsDroppedRate, + diff.mixnet.ingress.forward_hop_packets_dropped_sec, + ); + self.prometheus_wrapper.set_float( + MixnetIngressFinalHopPacketsDroppedRate, + diff.mixnet.ingress.final_hop_packets_dropped_sec, + ); + + // ## EGRESS + self.prometheus_wrapper.set_float( + MixnetEgressForwardPacketsSendRate, + diff.mixnet.egress.forward_hop_packets_sent_sec, + ); + self.prometheus_wrapper.set_float( + MixnetEgressAckSendRate, + diff.mixnet.egress.ack_packets_sent_sec, + ); + self.prometheus_wrapper.set_float( + MixnetEgressForwardPacketsDroppedRate, + diff.mixnet.egress.forward_hop_packets_dropped_sec, + ); + + // # WIREGUARD + self.prometheus_wrapper + .set_float(WireguardBytesRxRate, diff.wireguard.bytes_rx_sec); + self.prometheus_wrapper + .set_float(WireguardBytesTxRate, diff.wireguard.bytes_tx_sec); + } + self.at_last_update = updated; + } +} + +#[async_trait] +impl MetricsHandler for PrometheusGlobalNodeMetricsRegistryUpdater { + type Events = GlobalPrometheusData; + + async fn handle_event(&mut self, _event: Self::Events) { + panic!("this should have never been called! MetricsHandler has been incorrectly called on PrometheusNodeMetricsRegistryUpdater") + } +} diff --git a/nym-node/src/node/metrics/handler/legacy_packet_data.rs b/nym-node/src/node/metrics/handler/legacy_packet_data.rs index 4b649e831a..6240993797 100644 --- a/nym-node/src/node/metrics/handler/legacy_packet_data.rs +++ b/nym-node/src/node/metrics/handler/legacy_packet_data.rs @@ -1,5 +1,5 @@ // Copyright 2024 - Nym Technologies SA -// SPDX-License-Identifier: Apache-2.0 +// SPDX-License-Identifier: GPL-3.0-only use crate::node::metrics::handler::{ MetricsHandler, OnStartMetricsHandler, OnUpdateMetricsHandler, diff --git a/nym-node/src/node/metrics/handler/mod.rs b/nym-node/src/node/metrics/handler/mod.rs index 025b1419b7..764acc0ff9 100644 --- a/nym-node/src/node/metrics/handler/mod.rs +++ b/nym-node/src/node/metrics/handler/mod.rs @@ -1,5 +1,5 @@ // Copyright 2024 - Nym Technologies SA -// SPDX-License-Identifier: Apache-2.0 +// SPDX-License-Identifier: GPL-3.0-only use async_trait::async_trait; use std::any; @@ -9,8 +9,11 @@ use tokio::time::Instant; use tracing::trace; pub(crate) mod client_sessions; +pub(crate) mod global_prometheus_updater; pub(crate) mod legacy_packet_data; pub(crate) mod mixnet_data_cleaner; +pub(crate) mod pending_egress_packets_updater; +pub(crate) mod prometheus_events_handler; pub(crate) trait RegistrableHandler: Downcast + OnStartMetricsHandler + OnUpdateMetricsHandler + Send + Sync + 'static @@ -63,23 +66,23 @@ pub(crate) trait OnStartMetricsHandler { #[async_trait] pub(crate) trait OnUpdateMetricsHandler { - async fn on_update(&mut self); + async fn on_update(&mut self) {} } pub(crate) struct HandlerWrapper { handler: Box>, - update_interval: Duration, + update_interval: Option, last_updated: Instant, } impl HandlerWrapper { - pub fn new(update_interval: Duration, handler: U) -> Self + pub fn new(update_interval: impl Into>, handler: U) -> Self where U: MetricsHandler, { HandlerWrapper { handler: Box::new(handler), - update_interval, + update_interval: update_interval.into(), last_updated: Instant::now(), } } @@ -107,11 +110,15 @@ impl OnStartMetricsHandler for HandlerWrapper { #[async_trait] impl OnUpdateMetricsHandler for HandlerWrapper { async fn on_update(&mut self) { + let Some(update_interval) = self.update_interval else { + return; + }; + let name = any::type_name::(); trace!("on update for handler for events of type {name}"); let elapsed = self.last_updated.elapsed(); - if elapsed < self.update_interval { + if elapsed < update_interval { trace!("too soon for updates"); return; } diff --git a/nym-node/src/node/metrics/handler/pending_egress_packets_updater.rs b/nym-node/src/node/metrics/handler/pending_egress_packets_updater.rs new file mode 100644 index 0000000000..d1fde40f70 --- /dev/null +++ b/nym-node/src/node/metrics/handler/pending_egress_packets_updater.rs @@ -0,0 +1,60 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +use crate::node::metrics::handler::{ + MetricsHandler, OnStartMetricsHandler, OnUpdateMetricsHandler, +}; +use async_trait::async_trait; +use nym_gateway::node::ActiveClientsStore; +use nym_mixnet_client::client::ActiveConnections; +use nym_node_metrics::NymNodeMetrics; + +// it can be anything, we just need a unique type_id to register our handler +pub struct PendingEgressPackets; + +pub struct PendingEgressPacketsUpdater { + metrics: NymNodeMetrics, + active_websocket_clients: ActiveClientsStore, + active_mixnet_connections: ActiveConnections, +} + +impl PendingEgressPacketsUpdater { + pub(crate) fn new( + metrics: NymNodeMetrics, + active_clients: ActiveClientsStore, + active_mixnet_connections: ActiveConnections, + ) -> Self { + PendingEgressPacketsUpdater { + metrics, + active_websocket_clients: active_clients, + active_mixnet_connections, + } + } +} + +#[async_trait] +impl OnStartMetricsHandler for PendingEgressPacketsUpdater {} + +#[async_trait] +impl OnUpdateMetricsHandler for PendingEgressPacketsUpdater { + async fn on_update(&mut self) { + let pending_final = self.active_websocket_clients.pending_packets(); + self.metrics + .process + .update_final_hop_packets_pending_delivery(pending_final); + + let pending_forward = self.active_mixnet_connections.pending_packets(); + self.metrics + .process + .update_forward_hop_packets_pending_delivery(pending_forward) + } +} + +#[async_trait] +impl MetricsHandler for PendingEgressPacketsUpdater { + type Events = PendingEgressPackets; + + async fn handle_event(&mut self, _event: Self::Events) { + panic!("this should have never been called! MetricsHandler has been incorrectly called on PendingEgressPacketsUpdater") + } +} diff --git a/nym-node/src/node/metrics/handler/prometheus_events_handler.rs b/nym-node/src/node/metrics/handler/prometheus_events_handler.rs new file mode 100644 index 0000000000..28a1696278 --- /dev/null +++ b/nym-node/src/node/metrics/handler/prometheus_events_handler.rs @@ -0,0 +1,6 @@ +// Copyright 2024 - Nym Technologies SA +// SPDX-License-Identifier: GPL-3.0-only + +// pub struct PrometheusEventsHandler { +// // +// } diff --git a/nym-node/src/node/mixnet/handler.rs b/nym-node/src/node/mixnet/handler.rs index 6cfb7d50d1..e645636c96 100644 --- a/nym-node/src/node/mixnet/handler.rs +++ b/nym-node/src/node/mixnet/handler.rs @@ -112,7 +112,14 @@ impl ConnectionHandler { .await { Err(err) => error!("Failed to store client data - {err}"), - Ok(_) => trace!("Stored packet for {client}"), + Ok(_) => { + self.shared + .metrics + .mixnet + .egress + .add_disk_persisted_packet(); + trace!("Stored packet for {client}") + } } } Ok(_) => trace!("Pushed received packet to {client}"), diff --git a/nym-node/src/node/mixnet/packet_forwarding.rs b/nym-node/src/node/mixnet/packet_forwarding.rs index bcd51a52a9..5e70841c86 100644 --- a/nym-node/src/node/mixnet/packet_forwarding.rs +++ b/nym-node/src/node/mixnet/packet_forwarding.rs @@ -1,5 +1,5 @@ // Copyright 2024 - Nym Technologies SA -// SPDX-License-Identifier: Apache-2.0 +// SPDX-License-Identifier: GPL-3.0-only use futures::StreamExt; use nym_mixnet_client::forwarder::{ @@ -80,7 +80,7 @@ impl PacketForwarder { C: SendWithoutResponse, { let delayed_packet = packet.into_inner(); - self.forward_packet(delayed_packet) + self.forward_packet(delayed_packet); } fn handle_new_packet(&mut self, new_packet: PacketToForward) @@ -102,6 +102,18 @@ impl PacketForwarder { } } + fn update_queue_len_metric(&self) { + self.metrics + .process + .update_forward_hop_packets_being_delayed(self.delay_queue.len()); + } + + fn update_channel_size_metric(&self, channel_size: usize) { + self.metrics + .process + .update_packet_forwarder_queue_size(channel_size) + } + pub async fn run(&mut self) where C: SendWithoutResponse, @@ -125,18 +137,21 @@ impl PacketForwarder { // and hence it can't happen that ALL senders are dropped #[allow(clippy::unwrap_used)] self.handle_new_packet(new_packet.unwrap()); + let channel_len = self.packet_sender.len(); if processed % 1000 == 0 { - let queue_len = self.packet_sender.len(); - match queue_len { + match channel_len { n if n > 200 => error!("there are currently {n} mix packets waiting to get forwarded!"), n if n > 50 => warn!("there are currently {n} mix packets waiting to get forwarded"), n => trace!("there are currently {n} mix packets waiting to get forwarded"), } } - + self.update_channel_size_metric(channel_len); processed += 1; } } + + // update the metrics on either new packet being inserted or packet being removed + self.update_queue_len_metric(); } trace!("PacketForwarder: Exiting"); } diff --git a/nym-node/src/node/mod.rs b/nym-node/src/node/mod.rs index 0f2ac89fe0..f91fce211b 100644 --- a/nym-node/src/node/mod.rs +++ b/nym-node/src/node/mod.rs @@ -21,8 +21,10 @@ use crate::node::http::{HttpServerConfig, NymNodeHttpServer, NymNodeRouter}; use crate::node::metrics::aggregator::MetricsAggregator; use crate::node::metrics::console_logger::ConsoleLogger; use crate::node::metrics::handler::client_sessions::GatewaySessionStatsHandler; +use crate::node::metrics::handler::global_prometheus_updater::PrometheusGlobalNodeMetricsRegistryUpdater; use crate::node::metrics::handler::legacy_packet_data::LegacyMixingStatsUpdater; use crate::node::metrics::handler::mixnet_data_cleaner::MixnetMetricsCleaner; +use crate::node::metrics::handler::pending_egress_packets_updater::PendingEgressPacketsUpdater; use crate::node::mixnet::packet_forwarding::PacketForwarder; use crate::node::mixnet::shared::ProcessingConfig; use crate::node::mixnet::SharedFinalHopData; @@ -30,6 +32,7 @@ use crate::node::shared_topology::NymNodeTopologyProvider; use nym_bin_common::bin_info; use nym_crypto::asymmetric::{ed25519, x25519}; use nym_gateway::node::{ActiveClientsStore, GatewayTasksBuilder}; +use nym_mixnet_client::client::ActiveConnections; use nym_mixnet_client::forwarder::MixForwardingSender; use nym_network_requester::{ set_active_gateway, setup_fs_gateways_storage, store_gateway_details, CustomGatewayDetails, @@ -751,7 +754,8 @@ impl NymNode { .with_authenticator_details(auth_details) .with_used_exit_policy(exit_policy_details) .with_description(self.description.clone()) - .with_auxiliary_details(auxiliary_details); + .with_auxiliary_details(auxiliary_details) + .with_prometheus_bearer_token(self.config.http.access_token.clone()); if self.config.http.expose_system_info { config = config.with_system_info(get_system_info( @@ -772,8 +776,7 @@ impl NymNode { config.api.v1_config.node.roles.ip_packet_router_enabled = true; } - let app_state = AppState::new(self.metrics.clone(), self.verloc_stats.clone()) - .with_metrics_key(self.config.http.access_token.clone()); + let app_state = AppState::new(self.metrics.clone(), self.verloc_stats.clone()); Ok(NymNodeRouter::new(config, app_state) .build_server(&self.config.http.bind_address) @@ -844,7 +847,12 @@ impl NymNode { tokio::spawn(async move { verloc_measurer.run().await }); } - pub(crate) fn setup_metrics_backend(&self, shutdown: TaskClient) -> MetricEventsSender { + pub(crate) fn setup_metrics_backend( + &self, + active_clients_store: ActiveClientsStore, + active_egress_mixnet_connections: ActiveConnections, + shutdown: TaskClient, + ) -> MetricEventsSender { info!("setting up node metrics..."); // aggregator (to listen for any metrics events) @@ -870,12 +878,35 @@ impl NymNode { self.config.metrics.debug.clients_sessions_update_rate, ); - // handler for periodically cleaning up stale recipient/sender darta + // handler for periodically cleaning up stale recipient/sender data metrics_aggregator.register_handler( MixnetMetricsCleaner::new(self.metrics.clone()), self.config.metrics.debug.stale_mixnet_metrics_cleaner_rate, ); + // handler for updating the value of forward/final hop packets pending delivery + metrics_aggregator.register_handler( + PendingEgressPacketsUpdater::new( + self.metrics.clone(), + active_clients_store, + active_egress_mixnet_connections, + ), + self.config.metrics.debug.pending_egress_packets_update_rate, + ); + + // handler for updating the prometheus registry from the global atomic metrics counters + // such as number of packets received + metrics_aggregator.register_handler( + PrometheusGlobalNodeMetricsRegistryUpdater::new(self.metrics.clone()), + self.config + .metrics + .debug + .global_prometheus_counters_update_rate, + ); + + // handler for handling prometheus metrics events + // metrics_aggregator.register_handler(PrometheusEventsHandler{}, None); + // note: we're still measuring things such as number of mixed packets, // but since they're stored as atomic integers, they are incremented directly at source // rather than going through event pipeline @@ -909,7 +940,7 @@ impl NymNode { &self, active_clients_store: &ActiveClientsStore, shutdown: TaskClient, - ) -> MixForwardingSender { + ) -> (MixForwardingSender, ActiveConnections) { let processing_config = ProcessingConfig::new(&self.config); // we're ALWAYS listening for mixnet packets, either for forward or final hops (or both) @@ -926,7 +957,13 @@ impl NymNode { self.config.mixnet.debug.initial_connection_timeout, self.config.mixnet.debug.maximum_connection_buffer_size, ); - let mixnet_client = nym_mixnet_client::Client::new(mixnet_client_config); + let mixnet_client = nym_mixnet_client::Client::new( + mixnet_client_config, + self.metrics + .network + .active_egress_mixnet_connections_counter(), + ); + let active_connections = mixnet_client.active_connections(); let mut packet_forwarder = PacketForwarder::new( mixnet_client, @@ -951,7 +988,7 @@ impl NymNode { ); mixnet::Listener::new(self.config.mixnet.bind_address, shared).start(); - mix_packet_sender + (mix_packet_sender, active_connections) } pub(crate) async fn run(mut self) -> Result<(), NymNodeError> { @@ -981,14 +1018,19 @@ impl NymNode { self.start_verloc_measurements(task_manager.subscribe_named("verloc-measurements")); - let metrics_sender = self.setup_metrics_backend(task_manager.subscribe_named("metrics")); let active_clients_store = ActiveClientsStore::new(); - let mix_packet_sender = self.start_mixnet_listener( + let (mix_packet_sender, active_egress_mixnet_connections) = self.start_mixnet_listener( &active_clients_store, task_manager.subscribe_named("mixnet-traffic"), ); + let metrics_sender = self.setup_metrics_backend( + active_clients_store.clone(), + active_egress_mixnet_connections, + task_manager.subscribe_named("metrics"), + ); + self.start_gateway_tasks( metrics_sender, active_clients_store, diff --git a/nym-node/src/node/shared_topology.rs b/nym-node/src/node/shared_topology.rs index b65f1e1a5b..f3ba72fd00 100644 --- a/nym-node/src/node/shared_topology.rs +++ b/nym-node/src/node/shared_topology.rs @@ -3,6 +3,7 @@ use async_trait::async_trait; use nym_gateway::node::{NymApiTopologyProvider, NymApiTopologyProviderConfig, UserAgent}; +use nym_node_metrics::prometheus_wrapper::{PrometheusMetric, PROMETHEUS_METRICS}; use nym_topology::node::RoutingNode; use nym_topology::{NymTopology, Role, TopologyProvider}; use std::sync::Arc; @@ -11,7 +12,6 @@ use time::OffsetDateTime; use tokio::sync::Mutex; use tracing::debug; use url::Url; - // I wouldn't be surprised if this became the start of the node topology cache #[derive(Clone)] @@ -97,6 +97,10 @@ impl TopologyProvider for NymNodeTopologyProvider { if let Some(cached) = guard.cached_topology() { return Some(cached); } + + // the observation will be included on drop + let _timer = + PROMETHEUS_METRICS.start_timer(PrometheusMetric::ProcessTopologyQueryResolutionLatency); guard.update_cache().await } } diff --git a/service-providers/ip-packet-router/src/error.rs b/service-providers/ip-packet-router/src/error.rs index 28e21e6edd..7c8a4590d5 100644 --- a/service-providers/ip-packet-router/src/error.rs +++ b/service-providers/ip-packet-router/src/error.rs @@ -29,7 +29,7 @@ pub enum IpPacketRouterError { #[error("failed to connect to mixnet: {source}")] FailedToConnectToMixnet { source: nym_sdk::Error }, - #[error("the entity wrapping the network requester has disconnected")] + #[error("the entity wrapping the ip packet router has disconnected")] DisconnectedParent, #[error("received packet has an invalid version: {0}")] diff --git a/service-providers/ip-packet-router/src/request_filter/mod.rs b/service-providers/ip-packet-router/src/request_filter/mod.rs index c11fb6c9d6..599a00a1d2 100644 --- a/service-providers/ip-packet-router/src/request_filter/mod.rs +++ b/service-providers/ip-packet-router/src/request_filter/mod.rs @@ -56,13 +56,10 @@ impl RequestFilter { pub(crate) async fn check_address(&self, address: &SocketAddr) -> bool { match &*self.inner { RequestFilterInner::ExitPolicy { policy_filter } => { - match policy_filter.check(address).await { - Err(err) => { - warn!("failed to validate '{address}' against the exit policy: {err}"); - false - } - Ok(res) => res, - } + policy_filter.check(address).await.unwrap_or_else(|err| { + warn!("failed to validate '{address}' against the exit policy: {err}"); + false + }) } } } From 41fb17a31bb7b82bb7e73fa503970506a2209bae Mon Sep 17 00:00:00 2001 From: dynco-nym <173912580+dynco-nym@users.noreply.github.com> Date: Fri, 20 Dec 2024 12:18:45 +0100 Subject: [PATCH 64/64] Extend swagger docs (#5235) * WIP adding derive(ToSchema) * Derive ToSchema for more types * ContractBuildInformation on /nym_contracts_detailed * rustfmt * Add cfg_attr * A bunch of annotations * Compiles with utoipa 5.2 * WIP * Post rebase fixes * Gitattributes to ignore .sqlx diffs * generate Sqlx schema files * Improvements * Move ecash schema out of ecash crate * Move redocly config to nym-api/ * Move redocly config to nym-api/ * Remove ErrorResponse * Move generated openapi spec to .gitignore * Include BSL licence * Remove utoipa from ecash toml file * Remove placeholder annotations * Chain-watcher rebase changes * Update licence info * Treat Scalar as String in OpenAPI --- .gitattributes | 1 + .gitignore | 3 +- Cargo.lock | 142 ++++----- Cargo.toml | 6 +- LICENSES/BSL-1.0 | 23 ++ .../contracts-common/Cargo.toml | 4 +- .../contracts-common/src/types.rs | 1 + .../mixnet-contract/src/gateway.rs | 3 + .../mixnet-contract/src/mixnode.rs | 15 + .../mixnet-contract/src/nym_node.rs | 11 + .../mixnet-contract/src/reward_params.rs | 11 + .../mixnet-contract/src/rewarding/mod.rs | 5 + .../mixnet-contract/src/types.rs | 8 + common/credentials-interface/Cargo.toml | 1 + .../exit-policy/src/policy/address_policy.rs | 7 +- common/http-api-common/src/lib.rs | 1 - common/nym_offline_compact_ecash/Cargo.toml | 2 +- .../src/common_types.rs | 2 +- common/nym_offline_compact_ecash/src/lib.rs | 2 +- common/ticketbooks-merkle/Cargo.toml | 3 +- common/ticketbooks-merkle/src/lib.rs | 15 +- common/types/Cargo.toml | 1 + common/types/src/monitoring.rs | 6 +- deny.toml | 1 + nym-api/Cargo.toml | 2 +- nym-api/nym-api-requests/Cargo.toml | 2 +- nym-api/nym-api-requests/src/ecash/models.rs | 272 ++++++++++++++---- nym-api/nym-api-requests/src/legacy.rs | 2 +- nym-api/nym-api-requests/src/models.rs | 45 ++- nym-api/nym-api-requests/src/nym_nodes.rs | 10 +- nym-api/redocly/.redocly.lint-ignore.yaml | 47 +++ nym-api/redocly/.redocly.yaml | 12 + nym-api/redocly/readme.MD | 27 ++ nym-api/redocly/redocly.sh | 5 + nym-api/src/ecash/api_routes/issued.rs | 4 +- .../src/ecash/api_routes/partial_signing.rs | 6 +- nym-api/src/ecash/api_routes/spending.rs | 6 +- nym-api/src/network/handlers.rs | 18 +- nym-api/src/network/models.rs | 5 +- .../handlers/without_monitor.rs | 20 +- nym-api/src/node_status_api/models.rs | 14 +- nym-api/src/nym_nodes/handlers/legacy.rs | 4 +- .../nym_nodes/handlers/unstable/full_fat.rs | 2 +- .../src/nym_nodes/handlers/unstable/mod.rs | 1 + .../handlers/unstable/semi_skimmed.rs | 2 +- .../nym_nodes/handlers/unstable/skimmed.rs | 33 ++- nym-api/src/support/http/helpers.rs | 2 + nym-api/src/support/http/openapi.rs | 80 +----- .../src/api/v1/mod.rs | 2 - .../src/api/v1/ticketbook/models.rs | 3 + .../src/http/router/api/v1/openapi.rs | 3 - .../src/http/router/api/v1/ticketbook/mod.rs | 46 +-- .../http/router/api/v1/ticketbook/shares.rs | 12 +- .../nym-credential-proxy/src/http/types.rs | 5 +- .../src/http/api/gateways.rs | 4 +- .../src/http/api/metrics/sessions.rs | 2 +- .../src/http/api/mixnodes.rs | 2 +- .../src/http/api/services/mod.rs | 2 +- .../nym-node-status-api/src/http/api_docs.rs | 1 - .../nym-node-status-api/src/http/mod.rs | 20 +- nym-node/nym-node-requests/Cargo.toml | 2 +- nym-node/nym-node-requests/src/api/mod.rs | 18 +- .../src/api/v1/metrics/models.rs | 17 +- .../src/api/v1/node/models.rs | 3 + .../http/router/api/v1/authenticator/root.rs | 4 +- .../api/v1/gateway/client_interfaces/mod.rs | 12 +- .../node/http/router/api/v1/gateway/root.rs | 4 +- .../src/node/http/router/api/v1/health.rs | 4 +- .../router/api/v1/ip_packet_router/root.rs | 4 +- .../router/api/v1/metrics/legacy_mixing.rs | 4 +- .../router/api/v1/metrics/packets_stats.rs | 4 +- .../http/router/api/v1/metrics/sessions.rs | 4 +- .../node/http/router/api/v1/metrics/verloc.rs | 15 +- .../http/router/api/v1/metrics/wireguard.rs | 4 +- .../node/http/router/api/v1/mixnode/root.rs | 4 +- .../api/v1/network_requester/exit_policy.rs | 4 +- .../router/api/v1/network_requester/root.rs | 4 +- .../node/http/router/api/v1/node/auxiliary.rs | 4 +- .../router/api/v1/node/build_information.rs | 4 +- .../http/router/api/v1/node/description.rs | 4 +- .../node/http/router/api/v1/node/hardware.rs | 6 +- .../router/api/v1/node/host_information.rs | 6 +- .../src/node/http/router/api/v1/node/roles.rs | 4 +- .../src/node/http/router/api/v1/openapi.rs | 4 - nym-node/src/node/http/router/types.rs | 4 +- ...9146f8b40d46d14aa0ecb7237d5d78a9f463f.json | 12 + ...3381fe8e839998717e6d1a0502f54438fc9b0.json | 12 + ...e836c7cff2646b6b76559543b4e4056094d58.json | 12 + ...91d372e750a74e2eb5a252a787cf571c326e4.json | 12 + ...738f6bda8532a5bd2d944884cd87784eefe43.json | 26 ++ ...a93e944b0d44ed1f7c1036f306e34372da11c.json | 20 ++ ...c8643ae77e22ad908147b910d9406234911f0.json | 12 + ...b9036df67d74966a35d9af74d43e93d3524eb.json | 12 + ...36dcb76982d7859e406daa12c97f671e6fd3b.json | 20 ++ ...1cb75bd6df809fb1f17620ab888d184291f0b.json | 12 + ...3f419a849d4ec45af40b052a4cbf09b44f3ec.json | 20 ++ ...57ed06dbebb3615b912fa59d9e22a097ded57.json | 12 + ...8b10dd0f01a04f77634d09af260b97a155264.json | 12 + ...efa10eee3e1488f7bb990847374c09e8a5944.json | 12 + ...e35e94e172738678c7299334c47a54caf5c50.json | 12 + ...ecb9fbe7b3636feb0d36cbad0610a0685cccc.json | 12 + ...00a9e2b6064f2252c66882fd4a7dd17e187cc.json | 12 + ...8d33ad532e06c270adc6f62c4fd6c40ecabcb.json | 12 + ...18384da48966d5a2b2de842e747ee1d7a5769.json | 12 + ...b310d02d77083207b6a42eb1f8e4dd80b00a8.json | 12 + ...9ce71582635df47f52dcf3fd1df4e7be6b96d.json | 20 ++ ...7472d7a3c6080766ab0f02aba4c776545adad.json | 20 ++ ...873717ba4b9d8f83bcb05f8a39094f0ff7c32.json | 20 ++ ...4f40adb4d74539bfe36432ae62a2b5268f5fd.json | 12 + ...a989aecff01e835cb8fc04acee1b83480a970.json | 20 ++ ...ab220797a39071754ad20bc14819fcced6c56.json | 12 + ...bb5ca4bbbb50e7a7fcaacc5361dde3157247a.json | 20 ++ nyx-chain-watcher/src/db/models.rs | 2 +- nyx-chain-watcher/src/models.rs | 11 + 114 files changed, 1116 insertions(+), 443 deletions(-) create mode 100644 .gitattributes create mode 100644 LICENSES/BSL-1.0 create mode 100644 nym-api/redocly/.redocly.lint-ignore.yaml create mode 100644 nym-api/redocly/.redocly.yaml create mode 100644 nym-api/redocly/readme.MD create mode 100755 nym-api/redocly/redocly.sh create mode 100644 nym-validator-rewarder/.sqlx/query-0a802236d0b9cc7679971f884a89146f8b40d46d14aa0ecb7237d5d78a9f463f.json create mode 100644 nym-validator-rewarder/.sqlx/query-0e03fcbde46a0296e029624ae083381fe8e839998717e6d1a0502f54438fc9b0.json create mode 100644 nym-validator-rewarder/.sqlx/query-1d3938bc8d8d6829289ef7ff78ee836c7cff2646b6b76559543b4e4056094d58.json create mode 100644 nym-validator-rewarder/.sqlx/query-1ebd5510a96bc84a88cc91316f891d372e750a74e2eb5a252a787cf571c326e4.json create mode 100644 nym-validator-rewarder/.sqlx/query-227e0cd05334ac228ca55f309f3738f6bda8532a5bd2d944884cd87784eefe43.json create mode 100644 nym-validator-rewarder/.sqlx/query-2561fb016951ea4cd29e43fb9a4a93e944b0d44ed1f7c1036f306e34372da11c.json create mode 100644 nym-validator-rewarder/.sqlx/query-2db92c21c933bc1eadc486867f7c8643ae77e22ad908147b910d9406234911f0.json create mode 100644 nym-validator-rewarder/.sqlx/query-3227631b516dd16b8474e050393b9036df67d74966a35d9af74d43e93d3524eb.json create mode 100644 nym-validator-rewarder/.sqlx/query-397bde15134e32921ad87037e9436dcb76982d7859e406daa12c97f671e6fd3b.json create mode 100644 nym-validator-rewarder/.sqlx/query-3b75821c30e4e2a87fea04c92a91cb75bd6df809fb1f17620ab888d184291f0b.json create mode 100644 nym-validator-rewarder/.sqlx/query-3bdf81a9db6075f6f77224c30553f419a849d4ec45af40b052a4cbf09b44f3ec.json create mode 100644 nym-validator-rewarder/.sqlx/query-422a516baacf8ba26ea2dca46fa57ed06dbebb3615b912fa59d9e22a097ded57.json create mode 100644 nym-validator-rewarder/.sqlx/query-6be3f8abfa7a2e05721e533e4128b10dd0f01a04f77634d09af260b97a155264.json create mode 100644 nym-validator-rewarder/.sqlx/query-7c2aea05703247a865d5639bd84efa10eee3e1488f7bb990847374c09e8a5944.json create mode 100644 nym-validator-rewarder/.sqlx/query-84e200c64ff49be7241d43841d0e35e94e172738678c7299334c47a54caf5c50.json create mode 100644 nym-validator-rewarder/.sqlx/query-98b2ac25c05850c31990ce0b48fecb9fbe7b3636feb0d36cbad0610a0685cccc.json create mode 100644 nym-validator-rewarder/.sqlx/query-a3db765a00f07d80656e58a543500a9e2b6064f2252c66882fd4a7dd17e187cc.json create mode 100644 nym-validator-rewarder/.sqlx/query-b865ffb57fa5f31de74e0dce28a8d33ad532e06c270adc6f62c4fd6c40ecabcb.json create mode 100644 nym-validator-rewarder/.sqlx/query-b9e16b5c6e11cfa2d3dfee8c0bf18384da48966d5a2b2de842e747ee1d7a5769.json create mode 100644 nym-validator-rewarder/.sqlx/query-be654926e94fb6a07ebb94dd526b310d02d77083207b6a42eb1f8e4dd80b00a8.json create mode 100644 nym-validator-rewarder/.sqlx/query-c88d07fecc3f33deaa6e93db1469ce71582635df47f52dcf3fd1df4e7be6b96d.json create mode 100644 nym-validator-rewarder/.sqlx/query-ceb15dea9ac66e69cfc2fa8fbd57472d7a3c6080766ab0f02aba4c776545adad.json create mode 100644 nym-validator-rewarder/.sqlx/query-d67b6b3fc1099b3ca48eed945d9873717ba4b9d8f83bcb05f8a39094f0ff7c32.json create mode 100644 nym-validator-rewarder/.sqlx/query-dcb00d96a003c9ad0b6213ac6974f40adb4d74539bfe36432ae62a2b5268f5fd.json create mode 100644 nym-validator-rewarder/.sqlx/query-e08a6456f6bd3cc5e8201d18dc3a989aecff01e835cb8fc04acee1b83480a970.json create mode 100644 nym-validator-rewarder/.sqlx/query-eba74b6531013fe5a83287bd50dab220797a39071754ad20bc14819fcced6c56.json create mode 100644 nym-validator-rewarder/.sqlx/query-fa2ea62ed8ccb08d0ef70bc212cbb5ca4bbbb50e7a7fcaacc5361dde3157247a.json diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000..24e0b39b6a --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +nym-validator-rewarder/.sqlx/** diff=nodiff diff --git a/.gitignore b/.gitignore index f931d9b0bb..5646f38f53 100644 --- a/.gitignore +++ b/.gitignore @@ -54,7 +54,8 @@ nym-network-monitor/__pycache__ nym-network-monitor/*.key nym-network-monitor/.envrc nym-network-monitor/.envrc +nym-api/redocly/formatted-openapi.json *.sqlite -.build \ No newline at end of file +.build diff --git a/Cargo.lock b/Cargo.lock index df18e60636..30a01aa784 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3836,6 +3836,12 @@ dependencies = [ "scopeguard", ] +[[package]] +name = "lockfree-object-pool" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9374ef4228402d4b7e403e5838cb880d9ee663314b0a900d5a6aabf0c213552e" + [[package]] name = "log" version = "0.4.22" @@ -4355,27 +4361,6 @@ dependencies = [ "libc", ] -[[package]] -name = "num_enum" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" -dependencies = [ - "num_enum_derive", -] - -[[package]] -name = "num_enum_derive" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn 2.0.90", -] - [[package]] name = "num_threads" version = "0.1.7" @@ -5001,6 +4986,7 @@ dependencies = [ "serde", "serde_json", "thiserror", + "utoipa", "vergen", ] @@ -5192,6 +5178,7 @@ dependencies = [ "strum 0.26.3", "thiserror", "time", + "utoipa", ] [[package]] @@ -6530,6 +6517,7 @@ dependencies = [ "serde_json", "sha2 0.10.8", "time", + "utoipa", ] [[package]] @@ -6593,6 +6581,7 @@ dependencies = [ "thiserror", "ts-rs", "url", + "utoipa", "x25519-dalek", ] @@ -7485,39 +7474,6 @@ dependencies = [ "log", ] -[[package]] -name = "proc-macro-crate" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" -dependencies = [ - "toml_edit 0.21.1", -] - -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.109", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] - [[package]] name = "proc-macro-error-attr2" version = "2.0.0" @@ -7949,7 +7905,6 @@ checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10" dependencies = [ "base64 0.22.1", "bytes", - "futures-channel", "futures-core", "futures-util", "http 1.1.0", @@ -8875,6 +8830,12 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + [[package]] name = "siphasher" version = "0.3.11" @@ -9932,7 +9893,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.14", + "toml_edit", ] [[package]] @@ -9944,17 +9905,6 @@ dependencies = [ "serde", ] -[[package]] -name = "toml_edit" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" -dependencies = [ - "indexmap 2.2.6", - "toml_datetime", - "winnow 0.5.40", -] - [[package]] name = "toml_edit" version = "0.22.14" @@ -9965,7 +9915,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.13", + "winnow", ] [[package]] @@ -10612,9 +10562,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "utoipa" -version = "4.2.3" +version = "5.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5afb1a60e207dca502682537fefcfd9921e71d0b83e9576060f09abc6efab23" +checksum = "514a48569e4e21c86d0b84b5612b5e73c0b2cf09db63260134ba426d4e8ea714" dependencies = [ "indexmap 2.2.6", "serde", @@ -10624,11 +10574,10 @@ dependencies = [ [[package]] name = "utoipa-gen" -version = "4.3.0" +version = "5.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bf0e16c02bc4bf5322ab65f10ab1149bdbcaa782cba66dc7057370a3f8190be" +checksum = "5629efe65599d0ccd5d493688cbf6e03aa7c1da07fe59ff97cf5977ed0637f66" dependencies = [ - "proc-macro-error", "proc-macro2", "quote", "regex", @@ -10638,14 +10587,13 @@ dependencies = [ [[package]] name = "utoipa-swagger-ui" -version = "7.1.0" +version = "8.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "943e0ff606c6d57d410fd5663a4d7c074ab2c5f14ab903b9514565e59fa1189e" +checksum = "a5c80b4dd79ea382e8374d67dcce22b5c6663fa13a82ad3886441d1bbede5e35" dependencies = [ "axum 0.7.7", "mime_guess", "regex", - "reqwest 0.12.4", "rust-embed", "serde", "serde_json", @@ -10656,18 +10604,18 @@ dependencies = [ [[package]] name = "utoipauto" -version = "0.1.14" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608b8f2279483be386261655b562e40877ea434eb92093c894a644fda2021860" +checksum = "cba36db2c397c614110554a60fbb4bb97d5f8c6823775c766e6f455e37377047" dependencies = [ "utoipauto-macro", ] [[package]] name = "utoipauto-core" -version = "0.1.12" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17e82ab96c5a55263b5bed151b8426410d93aa909a453acdbd4b6792b5af7d64" +checksum = "268d76aaebb80eba79240b805972e52d7d410d4bcc52321b951318b0f440cd60" dependencies = [ "proc-macro2", "quote", @@ -10676,9 +10624,9 @@ dependencies = [ [[package]] name = "utoipauto-macro" -version = "0.1.12" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8338dc3c9526011ffaa2aa6bd60ddfda9d49d2123108690755c6e34844212" +checksum = "382673bda1d05c85b4550d32fd4192ccd4cffe9a908543a0795d1e7682b36246" dependencies = [ "proc-macro2", "quote", @@ -11269,15 +11217,6 @@ version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" -[[package]] -name = "winnow" -version = "0.5.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" -dependencies = [ - "memchr", -] - [[package]] name = "winnow" version = "0.6.13" @@ -11390,9 +11329,9 @@ dependencies = [ [[package]] name = "zip" -version = "1.1.4" +version = "2.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cc23c04387f4da0374be4533ad1208cbb091d5c11d070dfef13676ad6497164" +checksum = "40dd8c92efc296286ce1fbd16657c5dbefff44f1b4ca01cc5f517d8b7b3d3e2e" dependencies = [ "arbitrary", "crc32fast", @@ -11400,8 +11339,9 @@ dependencies = [ "displaydoc", "flate2", "indexmap 2.2.6", - "num_enum", + "memchr", "thiserror", + "zopfli", ] [[package]] @@ -11431,3 +11371,17 @@ dependencies = [ "wasmtimer", "zeroize", ] + +[[package]] +name = "zopfli" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5019f391bac5cf252e93bbcc53d039ffd62c7bfb7c150414d61369afe57e946" +dependencies = [ + "bumpalo", + "crc32fast", + "lockfree-object-pool", + "log", + "once_cell", + "simd-adler32", +] diff --git a/Cargo.toml b/Cargo.toml index cd44c15136..122784eb5a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -347,9 +347,9 @@ tracing-log = "0.2" ts-rs = "10.0.0" tungstenite = { version = "0.20.1", default-features = false } url = "2.5" -utoipa = "4.2" -utoipa-swagger-ui = "7.1" -utoipauto = "0.1" +utoipa = "5.2" +utoipa-swagger-ui = "8.0" +utoipauto = "0.2" uuid = "*" vergen = { version = "=8.3.1", default-features = false } walkdir = "2" diff --git a/LICENSES/BSL-1.0 b/LICENSES/BSL-1.0 new file mode 100644 index 0000000000..36b7cd93cd --- /dev/null +++ b/LICENSES/BSL-1.0 @@ -0,0 +1,23 @@ +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/common/cosmwasm-smart-contracts/contracts-common/Cargo.toml b/common/cosmwasm-smart-contracts/contracts-common/Cargo.toml index fa8a511222..4a80db6afa 100644 --- a/common/cosmwasm-smart-contracts/contracts-common/Cargo.toml +++ b/common/cosmwasm-smart-contracts/contracts-common/Cargo.toml @@ -13,6 +13,7 @@ cosmwasm-std = { workspace = true } cosmwasm-schema = { workspace = true } cw-storage-plus = { workspace = true } schemars = { workspace = true } +utoipa = { workspace = true, optional = true } serde = { workspace = true, features = ["derive"] } thiserror = { workspace = true } @@ -23,4 +24,5 @@ serde_json = { workspace = true } vergen = { workspace = true, features = ["build", "git", "gitcl", "rustc", "cargo"] } [features] -naive_float = [] \ No newline at end of file +naive_float = [] +utoipa = ["dep:utoipa"] diff --git a/common/cosmwasm-smart-contracts/contracts-common/src/types.rs b/common/cosmwasm-smart-contracts/contracts-common/src/types.rs index 320658662e..456fd45a76 100644 --- a/common/cosmwasm-smart-contracts/contracts-common/src/types.rs +++ b/common/cosmwasm-smart-contracts/contracts-common/src/types.rs @@ -221,6 +221,7 @@ fn default_unknown() -> String { // TODO: there's no reason this couldn't be used for proper binaries, but in that case // perhaps the struct should get renamed and moved to a "more" common crate #[cw_serde] +#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))] pub struct ContractBuildInformation { /// Provides the name of the binary, i.e. the content of `CARGO_PKG_NAME` environmental variable. #[serde(default = "default_unknown")] diff --git a/common/cosmwasm-smart-contracts/mixnet-contract/src/gateway.rs b/common/cosmwasm-smart-contracts/mixnet-contract/src/gateway.rs index ed0ee864e8..e21ad52d69 100644 --- a/common/cosmwasm-smart-contracts/mixnet-contract/src/gateway.rs +++ b/common/cosmwasm-smart-contracts/mixnet-contract/src/gateway.rs @@ -42,9 +42,11 @@ pub struct Gateway { #[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))] pub struct GatewayBond { /// Original amount pledged by the operator of this node. + #[cfg_attr(feature = "utoipa", schema(value_type = crate::CoinSchema))] pub pledge_amount: Coin, /// Address of the owner of this gateway. + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub owner: Addr, /// Block height at which this gateway has been bonded. @@ -55,6 +57,7 @@ pub struct GatewayBond { /// Entity who bonded this gateway on behalf of the owner. /// If exists, it's most likely the address of the vesting contract. + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub proxy: Option, } diff --git a/common/cosmwasm-smart-contracts/mixnet-contract/src/mixnode.rs b/common/cosmwasm-smart-contracts/mixnet-contract/src/mixnode.rs index bbae58a935..763082a1f1 100644 --- a/common/cosmwasm-smart-contracts/mixnet-contract/src/mixnode.rs +++ b/common/cosmwasm-smart-contracts/mixnet-contract/src/mixnode.rs @@ -82,20 +82,25 @@ impl MixNodeDetails { // currently this struct is shared between mixnodes and nymnodes #[cw_serde] +#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))] pub struct NodeRewarding { /// Information provided by the operator that influence the cost function. pub cost_params: NodeCostParams, /// Total pledge and compounded reward earned by the node operator. + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub operator: Decimal, /// Total delegation and compounded reward earned by all node delegators. + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub delegates: Decimal, /// Cumulative reward earned by the "unit delegation" since the block 0. + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub total_unit_reward: Decimal, /// Value of the theoretical "unit delegation" that has delegated to this node at block 0. + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub unit_delegation: Decimal, /// Marks the epoch when this node was last rewarded so that we wouldn't accidentally attempt @@ -492,14 +497,17 @@ impl NodeRewarding { ::cosmwasm_schema::schemars::JsonSchema, )] #[schemars(crate = "::cosmwasm_schema::schemars")] +#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))] pub struct MixNodeBond { /// Unique id assigned to the bonded mixnode. pub mix_id: NodeId, /// Address of the owner of this mixnode. + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub owner: Addr, /// Original amount pledged by the operator of this node. + #[cfg_attr(feature = "utoipa", schema(value_type = crate::CoinSchema))] pub original_pledge: Coin, // REMOVED (but might be needed due to legacy things, idk yet) @@ -510,6 +518,7 @@ pub struct MixNodeBond { /// Entity who bonded this mixnode on behalf of the owner. /// If exists, it's most likely the address of the vesting contract. + #[cfg_attr(feature = "utoipa", schema(value_type = Option))] pub proxy: Option, /// Block height at which this mixnode has been bonded. @@ -545,6 +554,7 @@ impl MixNodeBond { feature = "generate-ts", ts(export, export_to = "ts-packages/types/src/types/rust/Mixnode.ts") )] +#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))] pub struct MixNode { /// Network address of this mixnode, for example 1.1.1.1 or foo.mixnode.com pub host: String, @@ -571,11 +581,14 @@ pub struct MixNode { /// The cost parameters, or the cost function, defined for the particular mixnode that influences /// how the rewards should be split between the node operator and its delegators. #[cw_serde] +#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))] pub struct NodeCostParams { /// The profit margin of the associated node, i.e. the desired percent of the reward to be distributed to the operator. + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub profit_margin_percent: Percent, /// Operating cost of the associated node per the entire interval. + #[cfg_attr(feature = "utoipa", schema(value_type = crate::CoinSchema))] pub interval_operating_cost: Coin, } @@ -680,7 +693,9 @@ pub struct PendingMixNodeChanges { } #[derive(Default, Copy, Clone, Debug, Serialize, Deserialize, JsonSchema)] +#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))] pub struct LegacyPendingMixNodeChanges { + #[cfg_attr(feature = "utoipa", schema(value_type = Option))] pub pledge_change: Option, } diff --git a/common/cosmwasm-smart-contracts/mixnet-contract/src/nym_node.rs b/common/cosmwasm-smart-contracts/mixnet-contract/src/nym_node.rs index 375fd34374..4087489cc3 100644 --- a/common/cosmwasm-smart-contracts/mixnet-contract/src/nym_node.rs +++ b/common/cosmwasm-smart-contracts/mixnet-contract/src/nym_node.rs @@ -231,6 +231,7 @@ pub struct RoleMetadata { /// Full details associated with given node. #[cw_serde] +#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))] pub struct NymNodeDetails { /// Basic bond information of this node, such as owner address, original pledge, etc. pub bond_information: NymNodeBond, @@ -288,14 +289,19 @@ impl NymNodeDetails { } #[cw_serde] +#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))] pub struct NymNodeBond { /// Unique id assigned to the bonded node. + #[cfg_attr(feature = "utoipa", schema(value_type = u32))] pub node_id: NodeId, /// Address of the owner of this nym-node. + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub owner: Addr, /// Original amount pledged by the operator of this node. + + #[cfg_attr(feature = "utoipa", schema(value_type = crate::CoinSchema))] pub original_pledge: Coin, /// Block height at which this nym-node has been bonded. @@ -348,6 +354,7 @@ impl NymNodeBond { feature = "generate-ts", ts(export, export_to = "ts-packages/types/src/types/rust/NymNode.ts") )] +#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))] pub struct NymNode { /// Network address of this nym-node, for example 1.1.1.1 or foo.mixnode.com /// that is used to discover other capabilities of this node. @@ -358,6 +365,7 @@ pub struct NymNode { pub custom_http_port: Option, /// Base58-encoded ed25519 EdDSA public key. + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub identity_key: IdentityKey, // TODO: I don't think we want to include sphinx keys here, // given we want to rotate them and keeping that in sync with contract will be a PITA @@ -435,8 +443,11 @@ pub struct NodeConfigUpdate { export_to = "ts-packages/types/src/types/rust/PendingNodeChanges.ts" ) )] +#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))] pub struct PendingNodeChanges { + #[cfg_attr(feature = "utoipa", schema(value_type = Option))] pub pledge_change: Option, + #[cfg_attr(feature = "utoipa", schema(value_type = Option))] pub cost_params_change: Option, } diff --git a/common/cosmwasm-smart-contracts/mixnet-contract/src/reward_params.rs b/common/cosmwasm-smart-contracts/mixnet-contract/src/reward_params.rs index 40e34b2888..55bba8f9a6 100644 --- a/common/cosmwasm-smart-contracts/mixnet-contract/src/reward_params.rs +++ b/common/cosmwasm-smart-contracts/mixnet-contract/src/reward_params.rs @@ -21,31 +21,37 @@ pub type WorkFactor = Decimal; )] #[cw_serde] #[derive(Copy)] +#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))] pub struct IntervalRewardParams { /// Current value of the rewarding pool. /// It is expected to be constant throughout the interval. #[cfg_attr(feature = "generate-ts", ts(type = "string"))] + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub reward_pool: Decimal, /// Current value of the staking supply. /// It is expected to be constant throughout the interval. #[cfg_attr(feature = "generate-ts", ts(type = "string"))] + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub staking_supply: Decimal, /// Defines the percentage of stake needed to reach saturation for all of the nodes in the rewarded set. /// Also known as `beta`. #[cfg_attr(feature = "generate-ts", ts(type = "string"))] + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub staking_supply_scale_factor: Percent, // computed values /// Current value of the computed reward budget per epoch, per node. /// It is expected to be constant throughout the interval. #[cfg_attr(feature = "generate-ts", ts(type = "string"))] + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub epoch_reward_budget: Decimal, /// Current value of the stake saturation point. /// It is expected to be constant throughout the interval. #[cfg_attr(feature = "generate-ts", ts(type = "string"))] + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub stake_saturation_point: Decimal, // constants(-ish) @@ -54,6 +60,7 @@ pub struct IntervalRewardParams { /// It is not really expected to be changing very often. /// As a matter of fact, unless there's a very specific reason, it should remain constant. #[cfg_attr(feature = "generate-ts", ts(type = "string"))] + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub sybil_resistance: Percent, // default: 10 @@ -61,6 +68,7 @@ pub struct IntervalRewardParams { /// It is not really expected to be changing very often. /// As a matter of fact, unless there's a very specific reason, it should remain constant. #[cfg_attr(feature = "generate-ts", ts(type = "string"))] + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub active_set_work_factor: Decimal, // default: 2% @@ -70,6 +78,7 @@ pub struct IntervalRewardParams { /// It is not really expected to be changing very often. /// As a matter of fact, unless there's a very specific reason, it should remain constant. #[cfg_attr(feature = "generate-ts", ts(type = "string"))] + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub interval_pool_emission: Percent, } @@ -90,6 +99,7 @@ impl IntervalRewardParams { )] #[cw_serde] #[derive(Copy)] +#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))] pub struct RewardingParams { /// Parameters that should remain unchanged throughout an interval. pub interval: IntervalRewardParams, @@ -254,6 +264,7 @@ impl RewardingParams { )] #[cw_serde] #[derive(Copy)] +#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))] pub struct RewardedSetParams { /// The expected number of nodes assigned entry gateway role (i.e. [`Role::EntryGateway`]) pub entry_gateways: u32, diff --git a/common/cosmwasm-smart-contracts/mixnet-contract/src/rewarding/mod.rs b/common/cosmwasm-smart-contracts/mixnet-contract/src/rewarding/mod.rs index e1c320f806..ce54caab33 100644 --- a/common/cosmwasm-smart-contracts/mixnet-contract/src/rewarding/mod.rs +++ b/common/cosmwasm-smart-contracts/mixnet-contract/src/rewarding/mod.rs @@ -17,10 +17,12 @@ pub mod simulator; )] #[cw_serde] #[derive(Copy, Default)] +#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))] pub struct RewardEstimate { /// The amount of **decimal** coins that are going to get distributed to the node, /// i.e. the operator and all its delegators. #[cfg_attr(feature = "generate-ts", ts(type = "string"))] + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub total_node_reward: Decimal, // note that operator reward includes the operating_cost, @@ -28,14 +30,17 @@ pub struct RewardEstimate { // in that case the operator reward would still be `1nym` as opposed to 0 /// The share of the reward that is going to get distributed to the node operator. #[cfg_attr(feature = "generate-ts", ts(type = "string"))] + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub operator: Decimal, /// The share of the reward that is going to get distributed among the node delegators. #[cfg_attr(feature = "generate-ts", ts(type = "string"))] + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub delegates: Decimal, /// The operating cost of this node. Note: it's already included in the operator reward. #[cfg_attr(feature = "generate-ts", ts(type = "string"))] + #[cfg_attr(feature = "utoipa", schema(value_type = String))] pub operating_cost: Decimal, } diff --git a/common/cosmwasm-smart-contracts/mixnet-contract/src/types.rs b/common/cosmwasm-smart-contracts/mixnet-contract/src/types.rs index d722f5af93..b3c4e5b2be 100644 --- a/common/cosmwasm-smart-contracts/mixnet-contract/src/types.rs +++ b/common/cosmwasm-smart-contracts/mixnet-contract/src/types.rs @@ -175,6 +175,14 @@ where } } +#[cfg(feature = "utoipa")] +#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))] +#[cfg_attr(feature = "utoipa", schema(title = "Coin"))] +pub struct CoinSchema { + pub denom: String, + pub amount: String, +} + /// The current state of the mixnet contract. #[cw_serde] pub struct ContractState { diff --git a/common/credentials-interface/Cargo.toml b/common/credentials-interface/Cargo.toml index 2142acf14a..f7f957d68b 100644 --- a/common/credentials-interface/Cargo.toml +++ b/common/credentials-interface/Cargo.toml @@ -16,6 +16,7 @@ serde = { workspace = true, features = ["derive"] } thiserror = { workspace = true } strum = { workspace = true, features = ["derive"] } time = { workspace = true, features = ["serde"] } +utoipa = { workspace = true } rand = { workspace = true } nym-compact-ecash = { path = "../nym_offline_compact_ecash" } diff --git a/common/exit-policy/src/policy/address_policy.rs b/common/exit-policy/src/policy/address_policy.rs index 621fd291e9..07a1595d93 100644 --- a/common/exit-policy/src/policy/address_policy.rs +++ b/common/exit-policy/src/policy/address_policy.rs @@ -86,7 +86,6 @@ impl Display for AddressPolicyAction { /// ``` #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] #[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] -#[cfg_attr(feature = "openapi", aliases(ExitPolicy))] pub struct AddressPolicy { /// A list of rules to apply to find out whether an address is /// contained by this policy. @@ -727,10 +726,10 @@ mod test { let policy = AddressPolicy::parse_from_torrc( r#" ExitPolicy reject 1.2.3.4/32:* -ExitPolicy reject 1.2.3.5:* +ExitPolicy reject 1.2.3.5:* ExitPolicy reject 1.2.3.6/16:* -ExitPolicy reject 1.2.3.6/16:123-456 -ExitPolicy accept *:53 +ExitPolicy reject 1.2.3.6/16:123-456 +ExitPolicy accept *:53 ExitPolicy accept6 *6:119 ExitPolicy accept *4:120 ExitPolicy reject6 [FC00::]/7:* diff --git a/common/http-api-common/src/lib.rs b/common/http-api-common/src/lib.rs index dbc3bc0125..ffd258de95 100644 --- a/common/http-api-common/src/lib.rs +++ b/common/http-api-common/src/lib.rs @@ -10,7 +10,6 @@ use serde::{Deserialize, Serialize}; pub mod middleware; #[derive(Debug, Clone)] -#[cfg_attr(feature = "utoipa", derive(utoipa::ToSchema))] pub enum FormattedResponse { Json(Json), Yaml(Yaml), diff --git a/common/nym_offline_compact_ecash/Cargo.toml b/common/nym_offline_compact_ecash/Cargo.toml index 2497edabd9..5b6e17f262 100644 --- a/common/nym_offline_compact_ecash/Cargo.toml +++ b/common/nym_offline_compact_ecash/Cargo.toml @@ -63,4 +63,4 @@ par_signing = ["rayon"] # but given it's not done very frequently, it shouldn't be too much of a problem # furthermore, we can't and shouldn't dedicate the entire nym-api CPU just for verification, # but this feature might potentially be desirable for clients. -par_verify = ["rayon"] \ No newline at end of file +par_verify = ["rayon"] diff --git a/common/nym_offline_compact_ecash/src/common_types.rs b/common/nym_offline_compact_ecash/src/common_types.rs index 14f9b1cb66..bc450f97a4 100644 --- a/common/nym_offline_compact_ecash/src/common_types.rs +++ b/common/nym_offline_compact_ecash/src/common_types.rs @@ -4,10 +4,10 @@ use crate::ecash_group_parameters; use crate::error::Result; use crate::helpers::{g1_tuple_to_bytes, recover_g1_tuple}; -use bls12_381::{G1Projective, Scalar}; use serde::{Deserialize, Serialize}; use subtle::Choice; +pub use bls12_381::{G1Projective, G2Projective, Scalar}; pub type SignerIndex = u64; #[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] diff --git a/common/nym_offline_compact_ecash/src/lib.rs b/common/nym_offline_compact_ecash/src/lib.rs index c6346a159f..77e1ce5199 100644 --- a/common/nym_offline_compact_ecash/src/lib.rs +++ b/common/nym_offline_compact_ecash/src/lib.rs @@ -36,7 +36,7 @@ pub mod common_types; pub mod constants; pub mod error; mod helpers; -mod proofs; +pub mod proofs; pub mod scheme; pub mod tests; mod traits; diff --git a/common/ticketbooks-merkle/Cargo.toml b/common/ticketbooks-merkle/Cargo.toml index 5b6576da78..3c3f278c6b 100644 --- a/common/ticketbooks-merkle/Cargo.toml +++ b/common/ticketbooks-merkle/Cargo.toml @@ -14,6 +14,7 @@ readme.workspace = true sha2 = { workspace = true } rs_merkle = { workspace = true } schemars = { workspace = true } +utoipa = { workspace = true } serde = { workspace = true, features = ["derive"] } time = { workspace = true } @@ -23,4 +24,4 @@ nym-serde-helpers = { path = "../serde-helpers", features = ["date", "base64", " [dev-dependencies] rand_chacha = { workspace = true } rand = { workspace = true } -serde_json = { workspace = true } \ No newline at end of file +serde_json = { workspace = true } diff --git a/common/ticketbooks-merkle/src/lib.rs b/common/ticketbooks-merkle/src/lib.rs index f031a3dfc4..acaf088949 100644 --- a/common/ticketbooks-merkle/src/lib.rs +++ b/common/ticketbooks-merkle/src/lib.rs @@ -14,15 +14,18 @@ use serde::{Deserialize, Serialize}; use sha2::Digest; use std::fmt::{Debug, Formatter}; use time::Date; +use utoipa::ToSchema; // no point in importing the entire contract commons just for this one type pub type DepositId = u32; pub type DKGEpochId = u64; -#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, JsonSchema)] +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, JsonSchema, ToSchema)] #[serde(rename_all = "camelCase")] pub struct IssuedTicketbook { + #[schema(value_type = u32)] pub deposit_id: DepositId, + #[schema(value_type = u32)] pub epoch_id: DKGEpochId, // 96 bytes serialised 'BlindedSignature' @@ -37,9 +40,11 @@ pub struct IssuedTicketbook { #[schemars(with = "String")] #[serde(with = "nym_serde_helpers::date")] + #[schema(value_type = String)] pub expiration_date: Date, #[schemars(with = "String")] + #[schema(value_type = String)] pub ticketbook_type: TicketType, } @@ -80,7 +85,7 @@ pub struct InsertedMerkleLeaf { pub leaf: MerkleLeaf, } -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, PartialOrd, PartialEq, Eq)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, PartialOrd, PartialEq, Eq, ToSchema)] pub struct MerkleLeaf { #[schemars(with = "String")] #[serde(with = "nym_serde_helpers::hex")] @@ -162,16 +167,14 @@ impl IssuedTicketbooksMerkleTree { } } -#[derive(Serialize, Deserialize, JsonSchema)] +#[derive(Serialize, Deserialize, JsonSchema, ToSchema)] pub struct IssuedTicketbooksFullMerkleProof { #[schemars(with = "String")] #[serde(with = "inner_proof_base64_serde")] + #[schema(value_type = String)] inner_proof: MerkleProof, - included_leaves: Vec, - total_leaves: usize, - #[schemars(with = "String")] #[serde(with = "nym_serde_helpers::hex")] root: Vec, diff --git a/common/types/Cargo.toml b/common/types/Cargo.toml index 77f30ad7e2..6ca8a3d9e4 100644 --- a/common/types/Cargo.toml +++ b/common/types/Cargo.toml @@ -22,6 +22,7 @@ strum = { workspace = true, features = ["derive"] } thiserror = { workspace = true } ts-rs = { workspace = true } url = { workspace = true } +utoipa = { workspace = true } x25519-dalek = { workspace = true, features = ["static_secrets"] } cosmwasm-std = { workspace = true } diff --git a/common/types/src/monitoring.rs b/common/types/src/monitoring.rs index 4768964fc1..09024caaa8 100644 --- a/common/types/src/monitoring.rs +++ b/common/types/src/monitoring.rs @@ -3,6 +3,7 @@ use nym_mixnet_contract_common::NodeId; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::{collections::HashSet, sync::LazyLock, time::SystemTime}; +use utoipa::ToSchema; static NETWORK_MONITORS: LazyLock> = LazyLock::new(|| { let mut nm = HashSet::new(); @@ -10,8 +11,9 @@ static NETWORK_MONITORS: LazyLock> = LazyLock::new(|| { nm }); -#[derive(Debug, Serialize, Deserialize, JsonSchema, Clone)] +#[derive(Debug, Serialize, Deserialize, JsonSchema, Clone, ToSchema)] pub struct NodeResult { + #[schema(value_type = u32)] pub node_id: NodeId, pub identity: String, pub reliability: u8, @@ -34,7 +36,7 @@ pub enum MonitorResults { Gateway(Vec), } -#[derive(Serialize, Deserialize, JsonSchema)] +#[derive(Serialize, Deserialize, JsonSchema, ToSchema)] pub struct MonitorMessage { results: Vec, signature: String, diff --git a/deny.toml b/deny.toml index 83982be952..4da5bf5c59 100644 --- a/deny.toml +++ b/deny.toml @@ -95,6 +95,7 @@ allow = [ "Apache-2.0", "BSD-2-Clause", "BSD-3-Clause", + "BSL-1.0", "ISC", "0BSD", "MPL-2.0", diff --git a/nym-api/Cargo.toml b/nym-api/Cargo.toml index 27814af1db..e931625fdf 100644 --- a/nym-api/Cargo.toml +++ b/nym-api/Cargo.toml @@ -105,7 +105,7 @@ nym-gateway-client = { path = "../common/client-libs/gateway-client" } nym-inclusion-probability = { path = "../common/inclusion-probability" } nym-mixnet-contract-common = { path = "../common/cosmwasm-smart-contracts/mixnet-contract", features = ["utoipa"] } nym-vesting-contract-common = { path = "../common/cosmwasm-smart-contracts/vesting-contract" } -nym-contracts-common = { path = "../common/cosmwasm-smart-contracts/contracts-common", features = ["naive_float"] } +nym-contracts-common = { path = "../common/cosmwasm-smart-contracts/contracts-common", features = ["naive_float", "utoipa"] } nym-multisig-contract-common = { path = "../common/cosmwasm-smart-contracts/multisig-contract" } nym-coconut = { path = "../common/nymcoconut", features = ["key-zeroize"] } nym-sphinx = { path = "../common/nymsphinx" } diff --git a/nym-api/nym-api-requests/Cargo.toml b/nym-api/nym-api-requests/Cargo.toml index 27e70e9361..a67960e1e3 100644 --- a/nym-api/nym-api-requests/Cargo.toml +++ b/nym-api/nym-api-requests/Cargo.toml @@ -31,7 +31,7 @@ nym-crypto = { path = "../../common/crypto", features = ["serde", "asymmetric"] nym-ecash-time = { path = "../../common/ecash-time" } nym-compact-ecash = { path = "../../common/nym_offline_compact_ecash" } nym-contracts-common = { path = "../../common/cosmwasm-smart-contracts/contracts-common", features = ["naive_float"] } -nym-mixnet-contract-common = { path = "../../common/cosmwasm-smart-contracts/mixnet-contract" } +nym-mixnet-contract-common = { path = "../../common/cosmwasm-smart-contracts/mixnet-contract", features = ["utoipa"] } nym-node-requests = { path = "../../nym-node/nym-node-requests", default-features = false, features = ["openapi"] } nym-network-defaults = { path = "../../common/network-defaults" } nym-ticketbooks-merkle = { path = "../../common/ticketbooks-merkle" } diff --git a/nym-api/nym-api-requests/src/ecash/models.rs b/nym-api/nym-api-requests/src/ecash/models.rs index ca18f8c15b..71488b98a5 100644 --- a/nym-api/nym-api-requests/src/ecash/models.rs +++ b/nym-api/nym-api-requests/src/ecash/models.rs @@ -1,7 +1,6 @@ // Copyright 2023-2024 - Nym Technologies SA // SPDX-License-Identifier: Apache-2.0 -use crate::helpers::PlaceholderJsonSchemaImpl; use cosmrs::AccountId; use nym_compact_ecash::scheme::coin_indices_signatures::AnnotatedCoinIndexSignature; use nym_compact_ecash::scheme::expiration_date_signatures::AnnotatedExpirationDateSignature; @@ -14,7 +13,6 @@ use nym_credentials_interface::{ }; use nym_crypto::asymmetric::{ed25519, identity}; use nym_ticketbooks_merkle::{IssuedTicketbook, IssuedTicketbooksFullMerkleProof}; -use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use sha2::Digest; use std::collections::BTreeMap; @@ -23,25 +21,25 @@ use thiserror::Error; use time::Date; use utoipa::ToSchema; -#[derive(Serialize, Deserialize, Clone, JsonSchema, ToSchema)] +#[derive(Serialize, Deserialize, Clone, ToSchema)] pub struct VerifyEcashTicketBody { /// The cryptographic material required for spending the underlying credential. - #[schemars(with = "PlaceholderJsonSchemaImpl")] + #[schema(value_type = String)] pub credential: CredentialSpendingData, /// Cosmos address of the sender of the credential - #[schemars(with = "String")] + #[schema(value_type = String)] pub gateway_cosmos_addr: AccountId, } -#[derive(Serialize, Deserialize, Clone, JsonSchema, ToSchema)] +#[derive(Serialize, Deserialize, Clone, ToSchema)] pub struct VerifyEcashCredentialBody { /// The cryptographic material required for spending the underlying credential. - #[schemars(with = "PlaceholderJsonSchemaImpl")] + #[schema(value_type = openapi_schema::CredentialSpendingData)] pub credential: CredentialSpendingData, /// Cosmos address of the sender of the credential - #[schemars(with = "String")] + #[schema(value_type = String)] pub gateway_cosmos_addr: AccountId, /// Multisig proposal for releasing funds for the provided bandwidth credential @@ -62,8 +60,16 @@ impl VerifyEcashCredentialBody { } } -#[derive(Debug, Serialize, Deserialize, JsonSchema, ToSchema)] +/// Used exclusively as part of OpenAPI docs +#[derive(ToSchema)] +pub enum EcashTicketVerificationResult { + Ok(()), + EcashTicketVerificationRejection, +} + +#[derive(Debug, Serialize, Deserialize, ToSchema)] pub struct EcashTicketVerificationResponse { + #[schema(value_type = EcashTicketVerificationResult)] pub verified: Result<(), EcashTicketVerificationRejection>, } @@ -75,18 +81,18 @@ impl EcashTicketVerificationResponse { } } -#[derive(Debug, Error, Serialize, Deserialize, JsonSchema, ToSchema)] +#[derive(Debug, Error, Serialize, Deserialize, ToSchema)] pub enum EcashTicketVerificationRejection { #[error("invalid ticket spent date. expected either today's ({today}) or yesterday's* ({yesterday}) date but got {received} instead\n*assuming it's before 1AM UTC")] InvalidSpentDate { - #[schemars(with = "String")] #[serde(with = "crate::helpers::date_serde")] + #[schema(value_type = String, example = "1970-01-01")] today: Date, - #[schemars(with = "String")] #[serde(with = "crate::helpers::date_serde")] + #[schema(value_type = String, example = "1970-01-01")] yesterday: Date, - #[schemars(with = "String")] #[serde(with = "crate::helpers::date_serde")] + #[schema(value_type = String, example = "1970-01-01")] received: Date, }, @@ -106,26 +112,26 @@ pub enum EcashTicketVerificationRejection { } // All strings are base58 encoded representations of structs -#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, JsonSchema, ToSchema)] +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, ToSchema)] pub struct BlindSignRequestBody { - #[schemars(with = "PlaceholderJsonSchemaImpl")] + #[schema(value_type = openapi_schema::WithdrawalRequest)] pub inner_sign_request: WithdrawalRequest, /// the id of the associated deposit pub deposit_id: u32, /// Signature on the inner sign request and the tx hash - #[schemars(with = "PlaceholderJsonSchemaImpl")] + #[schema(value_type = String)] pub signature: identity::Signature, - #[schemars(with = "PlaceholderJsonSchemaImpl")] + #[schema(value_type = openapi_schema::PublicKeyUser)] pub ecash_pubkey: PublicKeyUser, - #[schemars(with = "String")] #[serde(with = "crate::helpers::date_serde")] + #[schema(value_type = String, example = "1970-01-01")] pub expiration_date: Date, - #[schemars(with = "String")] + #[schema(value_type = String)] pub ticketbook_type: TicketType, } @@ -180,9 +186,9 @@ impl BlindSignRequestBody { } } -#[derive(Debug, Serialize, Deserialize, JsonSchema, ToSchema)] +#[derive(Debug, Serialize, Deserialize, ToSchema)] pub struct BlindedSignatureResponse { - #[schemars(with = "PlaceholderJsonSchemaImpl")] + #[schema(value_type = openapi_schema::BlindedSignature)] pub blinded_signature: BlindedSignature, } @@ -211,9 +217,9 @@ impl BlindedSignatureResponse { } } -#[derive(Serialize, Deserialize, JsonSchema, ToSchema)] +#[derive(Serialize, Deserialize, ToSchema)] pub struct MasterVerificationKeyResponse { - #[schemars(with = "PlaceholderJsonSchemaImpl")] + #[schema(value_type = openapi_schema::VerificationKeyAuth)] pub key: VerificationKeyAuth, } @@ -223,9 +229,9 @@ impl MasterVerificationKeyResponse { } } -#[derive(Serialize, Deserialize, JsonSchema, ToSchema)] +#[derive(Serialize, Deserialize, ToSchema)] pub struct VerificationKeyResponse { - #[schemars(with = "PlaceholderJsonSchemaImpl")] + #[schema(value_type = openapi_schema::VerificationKeyAuth)] pub key: VerificationKeyAuth, } @@ -235,9 +241,8 @@ impl VerificationKeyResponse { } } -#[derive(Serialize, Deserialize, JsonSchema)] +#[derive(Serialize, Deserialize)] pub struct CosmosAddressResponse { - #[schemars(with = "String")] pub addr: AccountId, } @@ -247,45 +252,189 @@ impl CosmosAddressResponse { } } -#[derive(Serialize, Deserialize, JsonSchema, ToSchema)] +#[derive(Serialize, Deserialize, ToSchema)] pub struct PartialExpirationDateSignatureResponse { pub epoch_id: u64, - #[schemars(with = "String")] #[serde(with = "crate::helpers::date_serde")] - #[schema(value_type = String)] + #[schema(value_type = String, example = "1970-01-01")] pub expiration_date: Date, - #[schemars(with = "PlaceholderJsonSchemaImpl")] + #[schema(value_type = openapi_schema::AnnotatedExpirationDateSignature)] pub signatures: Vec, } -#[derive(Serialize, Deserialize, JsonSchema, ToSchema)] +#[derive(Serialize, Deserialize, ToSchema)] pub struct PartialCoinIndicesSignatureResponse { pub epoch_id: u64, - #[schemars(with = "PlaceholderJsonSchemaImpl")] + #[schema(value_type = openapi_schema::AnnotatedCoinIndexSignature)] pub signatures: Vec, } -#[derive(Serialize, Deserialize, JsonSchema, ToSchema)] +#[derive(Serialize, Deserialize, ToSchema)] pub struct AggregatedExpirationDateSignatureResponse { pub epoch_id: u64, - #[schemars(with = "String")] #[serde(with = "crate::helpers::date_serde")] + #[schema(value_type = String, example = "1970-01-01")] pub expiration_date: Date, - #[schemars(with = "PlaceholderJsonSchemaImpl")] + #[schema(value_type = Vec)] pub signatures: Vec, } -#[derive(Serialize, Deserialize, JsonSchema, ToSchema)] +#[derive(Serialize, Deserialize, ToSchema)] pub struct AggregatedCoinIndicesSignatureResponse { pub epoch_id: u64, - #[schemars(with = "PlaceholderJsonSchemaImpl")] + #[schema(value_type = openapi_schema::Signature)] pub signatures: Vec, } -#[derive(Clone, Serialize, Deserialize, Debug, JsonSchema)] +/// duplicate types from `nym-compact-ecash`, but these do derive `ToSchema``` +pub mod openapi_schema { + #![allow(dead_code)] + use nym_compact_ecash::common_types::{G2Projective, Scalar}; + + use super::*; + + #[derive(ToSchema)] + pub struct AnnotatedExpirationDateSignature { + pub signature: Signature, + pub expiration_timestamp: u32, + pub spending_timestamp: u32, + } + + #[derive(ToSchema)] + pub struct AnnotatedCoinIndexSignature { + pub signature: Signature, + pub index: u64, + } + + #[derive(ToSchema)] + pub struct Signature { + #[schema(value_type = String)] + pub(crate) h: G1Projective, + #[schema(value_type = String)] + pub(crate) s: G1Projective, + } + + #[derive(ToSchema)] + pub struct PublicKeyUser { + #[schema(value_type = String)] + pub(crate) pk: G1Projective, + } + + #[derive(ToSchema)] + pub struct BlindedSignature { + #[schema(value_type = String)] + pub h: G1Projective, + #[schema(value_type = String)] + pub c: G1Projective, + } + + #[derive(ToSchema)] + pub struct VerificationKeyAuth { + #[schema(value_type = String)] + pub(crate) alpha: G2Projective, + #[schema(value_type = Vec)] + pub(crate) beta_g1: Vec, + #[schema(value_type = Vec)] + pub(crate) beta_g2: Vec, + } + + #[derive(ToSchema)] + pub struct WithdrawalRequest { + #[schema(value_type = String)] + joined_commitment_hash: G1Projective, + #[schema(value_type = String)] + joined_commitment: G1Projective, + #[schema(value_type = Vec)] + private_attributes_commitments: Vec, + zk_proof: WithdrawalReqProof, + } + + #[derive(ToSchema)] + pub struct WithdrawalReqProof { + #[schema(value_type = String)] + challenge: Scalar, + #[schema(value_type = String)] + response_opening: Scalar, + #[schema(value_type = Vec)] + response_openings: Vec, + #[schema(value_type = Vec)] + response_attributes: Vec, + } + + #[derive(ToSchema)] + pub struct CredentialSpendingData { + pub payment: Payment, + + #[schema(value_type = [u8; 72], format = Binary)] + pub pay_info: PayInfo, + + pub spend_date: Date, + + // pub value: u64, + /// The (DKG) epoch id under which the credential has been issued so that the verifier could use correct verification key for validation. + pub epoch_id: u64, + } + + #[derive(ToSchema)] + pub struct Payment { + #[schema(value_type = String)] + pub kappa: G2Projective, + #[schema(value_type = String)] + pub kappa_e: G2Projective, + pub sig: Signature, + + pub sig_exp: Signature, + #[schema(value_type = Vec)] + pub kappa_k: Vec, + pub omega: Vec, + #[schema(value_type = Vec)] + pub ss: Vec, + #[schema(value_type = Vec)] + pub tt: Vec, + #[schema(value_type = Vec)] + pub aa: Vec, + pub spend_value: u64, + #[schema(value_type = String)] + pub cc: G1Projective, + pub t_type: u8, + pub zk_proof: SpendProof, + } + + #[derive(PartialEq, Eq, Debug, Clone, Copy, ToSchema)] + pub struct PayInfo { + #[schema(content_encoding = "base16")] + pub pay_info_bytes: [u8; 72], + } + + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ToSchema)] + pub struct SpendProof { + #[schema(value_type = String)] + challenge: Scalar, + #[schema(value_type = String)] + response_r: Scalar, + #[schema(value_type = String)] + response_r_e: Scalar, + #[schema(value_type = Vec)] + responses_r_k: Vec, + #[schema(value_type = Vec)] + responses_l: Vec, + #[schema(value_type = Vec)] + responses_o_a: Vec, + #[schema(value_type = String)] + response_o_c: Scalar, + #[schema(value_type = Vec)] + responses_mu: Vec, + #[schema(value_type = Vec)] + responses_o_mu: Vec, + #[schema(value_type = Vec)] + responses_attributes: Vec, + } +} + +#[derive(Clone, Serialize, Deserialize, Debug)] pub struct Pagination { /// last_key is the last value returned in the previous query. /// it's used to indicate the start of the next (this) page. @@ -297,12 +446,8 @@ pub struct Pagination { pub limit: Option, } -#[derive(Clone, Serialize, Deserialize, Debug, JsonSchema, PartialEq)] -pub struct SerialNumberWrapper( - #[serde(with = "nym_serde_helpers::bs58")] - #[schemars(with = "String")] - Vec, -); +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, ToSchema)] +pub struct SerialNumberWrapper(#[serde(with = "nym_serde_helpers::bs58")] Vec); impl Deref for SerialNumberWrapper { type Target = Vec; @@ -323,14 +468,13 @@ impl From> for SerialNumberWrapper { } } -#[derive(Clone, Serialize, Deserialize, Debug, JsonSchema, PartialEq)] +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, ToSchema)] pub struct BatchRedeemTicketsBody { #[serde(with = "nym_serde_helpers::bs58")] - #[schemars(with = "String")] pub digest: Vec, pub included_serial_numbers: Vec, pub proposal_id: u64, - #[schemars(with = "String")] + #[schema(value_type = String)] pub gateway_cosmos_addr: AccountId, } @@ -366,16 +510,15 @@ impl BatchRedeemTicketsBody { } } -#[derive(Debug, Serialize, Deserialize, JsonSchema, ToSchema)] +#[derive(Debug, Serialize, Deserialize, ToSchema)] pub struct EcashBatchTicketRedemptionResponse { pub proposal_accepted: bool, } -#[derive(Clone, Serialize, Deserialize, Debug, JsonSchema, ToSchema)] +#[derive(Clone, Serialize, Deserialize, Debug, ToSchema)] #[serde(rename_all = "camelCase")] pub struct SpentCredentialsResponse { #[serde(with = "nym_serde_helpers::base64")] - #[schemars(with = "String")] #[schema(value_type = String)] pub bitmap: Vec, } @@ -388,18 +531,19 @@ impl SpentCredentialsResponse { pub type DepositId = u32; -#[derive(Clone, Serialize, Deserialize, Debug, JsonSchema, ToSchema, PartialEq, Eq)] +#[derive(Clone, Serialize, Deserialize, Debug, ToSchema, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct CommitedDeposit { + #[schema(value_type = u32)] pub deposit_id: DepositId, pub merkle_index: usize, } -#[derive(Clone, Serialize, Deserialize, Debug, JsonSchema, ToSchema)] +#[derive(Clone, Serialize, Deserialize, Debug, ToSchema)] #[serde(rename_all = "camelCase")] pub struct IssuedTicketbooksForResponseBody { - #[schemars(with = "String")] #[serde(with = "crate::helpers::date_serde")] + #[schema(value_type = String, example = "1970-01-01")] pub expiration_date: Date, pub deposits: Vec, pub merkle_root: Option<[u8; 32]>, @@ -419,13 +563,13 @@ impl IssuedTicketbooksForResponseBody { } } -#[derive(Clone, Serialize, Deserialize, Debug, JsonSchema, ToSchema)] +#[derive(Clone, Serialize, Deserialize, Debug, ToSchema)] #[serde(rename_all = "camelCase")] pub struct IssuedTicketbooksForResponse { pub body: IssuedTicketbooksForResponseBody, - /// Signature on the body - #[schemars(with = "PlaceholderJsonSchemaImpl")] + /// Signature on the body + #[schema(value_type = String)] pub signature: identity::Signature, } @@ -437,22 +581,24 @@ impl IssuedTicketbooksForResponse { } } -#[derive(Serialize, Deserialize, JsonSchema, ToSchema, Debug)] +#[derive(Serialize, Deserialize, ToSchema, Debug)] #[serde(rename_all = "camelCase")] pub struct IssuedTicketbooksChallengeRequest { - #[schemars(with = "String")] #[serde(with = "crate::helpers::date_serde")] + #[schema(value_type = String, example = "1970-01-01")] pub expiration_date: Date, + #[schema(value_type = Vec)] pub deposits: Vec, } -#[derive(Serialize, Deserialize, JsonSchema, ToSchema, Clone, Debug)] +#[derive(Serialize, Deserialize, ToSchema, Clone, Debug)] #[serde(rename_all = "camelCase")] pub struct IssuedTicketbooksChallengeResponseBody { - #[schemars(with = "String")] #[serde(with = "crate::helpers::date_serde")] + #[schema(value_type = String, example = "1970-01-01")] pub expiration_date: Date, + #[schema(value_type = BTreeMap)] pub partial_ticketbooks: BTreeMap, pub merkle_proof: IssuedTicketbooksFullMerkleProof, } @@ -486,12 +632,12 @@ impl IssuedTicketbooksChallengeResponseBody { } } -#[derive(Serialize, Deserialize, JsonSchema, ToSchema, Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize, ToSchema)] #[serde(rename_all = "camelCase")] pub struct IssuedTicketbooksChallengeResponse { pub body: IssuedTicketbooksChallengeResponseBody, - #[schemars(with = "PlaceholderJsonSchemaImpl")] + #[schema(value_type = String)] pub signature: identity::Signature, } diff --git a/nym-api/nym-api-requests/src/legacy.rs b/nym-api/nym-api-requests/src/legacy.rs index f0810ffa8e..cf1250653f 100644 --- a/nym-api/nym-api-requests/src/legacy.rs +++ b/nym-api/nym-api-requests/src/legacy.rs @@ -52,7 +52,7 @@ impl From for MixNodeBond { } } -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, ToSchema)] pub struct LegacyMixNodeDetailsWithLayer { /// Basic bond information of this mixnode, such as owner address, original pledge, etc. pub bond_information: LegacyMixNodeBondWithLayer, diff --git a/nym-api/nym-api-requests/src/models.rs b/nym-api/nym-api-requests/src/models.rs index 720553ccf3..ba5d6a94b2 100644 --- a/nym-api/nym-api-requests/src/models.rs +++ b/nym-api/nym-api-requests/src/models.rs @@ -72,7 +72,7 @@ impl Display for RequestError { } } -#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, JsonSchema)] +#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, JsonSchema, ToSchema)] #[cfg_attr(feature = "generate-ts", derive(ts_rs::TS))] #[cfg_attr( feature = "generate-ts", @@ -145,7 +145,7 @@ pub struct NodePerformance { pub last_24h: Performance, } -#[derive(Serialize, Deserialize, Debug, Clone, Copy, Hash, JsonSchema)] +#[derive(Serialize, Deserialize, Debug, Clone, Copy, Hash, JsonSchema, ToSchema)] #[serde(rename_all = "camelCase")] #[cfg_attr(feature = "generate-ts", derive(ts_rs::TS))] #[cfg_attr( @@ -190,7 +190,7 @@ impl From for Role { // imo for now there's no point in exposing more than that, // nym-api shouldn't be calculating apy or stake saturation for you. // it should just return its own metrics (performance) and then you can do with it as you wish -#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize, JsonSchema)] +#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize, JsonSchema, ToSchema)] #[cfg_attr(feature = "generate-ts", derive(ts_rs::TS))] #[cfg_attr( feature = "generate-ts", @@ -202,13 +202,14 @@ impl From for Role { pub struct NodeAnnotation { #[cfg_attr(feature = "generate-ts", ts(type = "string"))] // legacy + #[schema(value_type = String)] pub last_24h_performance: Performance, pub current_role: Option, pub detailed_performance: DetailedNodePerformance, } -#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize, JsonSchema)] +#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize, JsonSchema, ToSchema)] #[cfg_attr(feature = "generate-ts", derive(ts_rs::TS))] #[cfg_attr( feature = "generate-ts", @@ -244,7 +245,7 @@ impl DetailedNodePerformance { } } -#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize, JsonSchema)] +#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize, JsonSchema, ToSchema)] #[cfg_attr(feature = "generate-ts", derive(ts_rs::TS))] #[cfg_attr( feature = "generate-ts", @@ -266,7 +267,7 @@ impl RoutingScore { } } -#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize, JsonSchema)] +#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize, JsonSchema, ToSchema)] #[cfg_attr(feature = "generate-ts", derive(ts_rs::TS))] #[cfg_attr( feature = "generate-ts", @@ -397,12 +398,15 @@ pub struct MixNodeBondAnnotated { #[schema(value_type = String)] pub performance: Performance, pub node_performance: NodePerformance, + #[schema(value_type = String)] pub estimated_operator_apy: Decimal, + #[schema(value_type = String)] pub estimated_delegators_apy: Decimal, pub blacklisted: bool, // a rather temporary thing until we query self-described endpoints of mixnodes #[serde(default)] + #[schema(value_type = Vec)] pub ip_addresses: Vec, } @@ -471,11 +475,13 @@ pub struct GatewayBondAnnotated { pub self_described: Option, // NOTE: the performance field is deprecated in favour of node_performance + #[schema(value_type = String)] pub performance: Performance, pub node_performance: NodePerformance, pub blacklisted: bool, #[serde(default)] + #[schema(value_type = Vec)] pub ip_addresses: Vec, } @@ -526,21 +532,24 @@ impl GatewayBondAnnotated { } } -#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema)] +#[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, ToSchema)] pub struct GatewayDescription { // for now only expose what we need. this struct will evolve in the future (or be incorporated into nym-node properly) } #[derive(Debug, Serialize, Deserialize, JsonSchema, ToSchema, IntoParams)] pub struct ComputeRewardEstParam { - #[schema(value_type = String)] + #[schema(value_type = Option)] + #[param(value_type = Option)] pub performance: Option, pub active_in_rewarded_set: Option, pub pledge_amount: Option, pub total_delegation: Option, - #[schema(value_type = CoinSchema)] + #[schema(value_type = Option)] + #[param(value_type = Option)] pub interval_operating_cost: Option, - #[schema(value_type = String)] + #[schema(value_type = Option)] + #[param(value_type = Option)] pub profit_margin_percent: Option, } @@ -698,8 +707,11 @@ pub struct MixnodeStatusReportResponse { pub mix_id: NodeId, pub identity: IdentityKey, pub owner: String, + #[schema(value_type = u8)] pub most_recent: Uptime, + #[schema(value_type = u8)] pub last_hour: Uptime, + #[schema(value_type = u8)] pub last_day: Uptime, } @@ -825,6 +837,7 @@ pub struct CirculatingSupplyResponse { #[derive(Clone, Debug, Serialize, Deserialize, schemars::JsonSchema, ToSchema)] pub struct HostInformation { + #[schema(value_type = Vec)] pub ip_address: Vec, pub hostname: Option, pub keys: HostKeys, @@ -844,15 +857,18 @@ impl From for HostInf pub struct HostKeys { #[serde(with = "bs58_ed25519_pubkey")] #[schemars(with = "String")] + #[schema(value_type = String)] pub ed25519: ed25519::PublicKey, #[serde(with = "bs58_x25519_pubkey")] #[schemars(with = "String")] + #[schema(value_type = String)] pub x25519: x25519::PublicKey, #[serde(default)] #[serde(with = "option_bs58_x25519_pubkey")] #[schemars(with = "Option")] + #[schema(value_type = String)] pub x25519_noise: Option, } @@ -896,6 +912,7 @@ pub struct OffsetDateTimeJsonSchemaWrapper( default = "unix_epoch", with = "crate::helpers::overengineered_offset_date_time_serde" )] + #[schema(inline)] pub OffsetDateTime, ); @@ -1233,13 +1250,13 @@ pub struct SignerInformationResponse { pub verification_key: Option, } -#[derive(Clone, Debug, Serialize, Deserialize, schemars::JsonSchema, Default)] +#[derive(Clone, Debug, Serialize, Deserialize, schemars::JsonSchema, Default, ToSchema)] pub struct TestNode { pub node_id: Option, pub identity_key: Option, } -#[derive(Clone, Debug, Serialize, Deserialize, schemars::JsonSchema)] +#[derive(Clone, Debug, Serialize, Deserialize, schemars::JsonSchema, ToSchema)] pub struct TestRoute { pub gateway: TestNode, pub layer1: TestNode, @@ -1274,10 +1291,12 @@ pub struct NetworkMonitorRunDetailsResponse { pub struct NoiseDetails { #[schemars(with = "String")] #[serde(with = "bs58_x25519_pubkey")] + #[schema(value_type = String)] pub x25119_pubkey: x25519::PublicKey, pub mixnet_port: u16, + #[schema(value_type = Vec)] pub ip_addresses: Vec, } @@ -1285,12 +1304,14 @@ pub struct NoiseDetails { pub struct NodeRefreshBody { #[serde(with = "bs58_ed25519_pubkey")] #[schemars(with = "String")] + #[schema(value_type = String)] pub node_identity: ed25519::PublicKey, // a poor man's nonce pub request_timestamp: i64, #[schemars(with = "PlaceholderJsonSchemaImpl")] + #[schema(value_type = String)] pub signature: ed25519::Signature, } diff --git a/nym-api/nym-api-requests/src/nym_nodes.rs b/nym-api/nym-api-requests/src/nym_nodes.rs index d8574e36d5..2d91940fe4 100644 --- a/nym-api/nym-api-requests/src/nym_nodes.rs +++ b/nym-api/nym-api-requests/src/nym_nodes.rs @@ -14,19 +14,19 @@ use std::net::IpAddr; use time::OffsetDateTime; use utoipa::ToSchema; -#[derive(Clone, Debug, Serialize, Deserialize, schemars::JsonSchema)] -pub struct CachedNodesResponse { +#[derive(Clone, Debug, Serialize, Deserialize, schemars::JsonSchema, ToSchema)] +pub struct CachedNodesResponse { pub refreshed_at: OffsetDateTimeJsonSchemaWrapper, pub nodes: Vec, } -impl From> for CachedNodesResponse { +impl From> for CachedNodesResponse { fn from(nodes: Vec) -> Self { CachedNodesResponse::new(nodes) } } -impl CachedNodesResponse { +impl CachedNodesResponse { pub fn new(nodes: Vec) -> Self { CachedNodesResponse { refreshed_at: OffsetDateTime::now_utc().into(), @@ -130,6 +130,7 @@ pub struct SkimmedNode { #[serde(with = "bs58_ed25519_pubkey")] #[schemars(with = "String")] + #[schema(value_type = String)] pub ed25519_identity_pubkey: ed25519::PublicKey, #[schema(value_type = Vec)] @@ -139,6 +140,7 @@ pub struct SkimmedNode { #[serde(with = "bs58_x25519_pubkey")] #[schemars(with = "String")] + #[schema(value_type = String)] pub x25519_sphinx_pubkey: x25519::PublicKey, #[serde(alias = "epoch_role")] diff --git a/nym-api/redocly/.redocly.lint-ignore.yaml b/nym-api/redocly/.redocly.lint-ignore.yaml new file mode 100644 index 0000000000..4cd758f99f --- /dev/null +++ b/nym-api/redocly/.redocly.lint-ignore.yaml @@ -0,0 +1,47 @@ +# This file instructs Redocly's linter to ignore the rules contained for specific parts of your API. +# See https://redocly.com/docs/cli/ for more information. +formatted-openapi.json: + path-parameters-defined: + - >- + #/paths/~1v1~1status~1mixnode~1{mix_id}~1compute-reward-estimation/post/parameters/0/name + - >- + #/paths/~1v1~1status~1mixnode~1{mix_id}~1compute-reward-estimation/post/parameters/1/name + - >- + #/paths/~1v1~1status~1mixnode~1{mix_id}~1compute-reward-estimation/post/parameters/2/name + - >- + #/paths/~1v1~1status~1mixnode~1{mix_id}~1compute-reward-estimation/post/parameters/3/name + - >- + #/paths/~1v1~1status~1mixnode~1{mix_id}~1compute-reward-estimation/post/parameters/4/name + - >- + #/paths/~1v1~1status~1mixnode~1{mix_id}~1compute-reward-estimation/post/parameters/5/name + operation-operationId-unique: + - >- + #/paths/~1v1~1status~1mixnodes~1active~1detailed/get/get_active_set_detailed + - '#/paths/~1v1~1status~1mixnodes~1detailed/get/get_mixnodes_detailed' + - >- + #/paths/~1v1~1status~1mixnodes~1rewarded~1detailed/get/get_rewarded_set_detailed + no-unused-components: + - '#/components/schemas/AxumErrorResponse' + - '#/components/schemas/DateQuery' + - '#/components/schemas/EcashTicketVerificationRejection' + - '#/components/schemas/ExpirationDatePathParam' + - '#/components/schemas/FullFatNode' + - '#/components/schemas/G2ProjectiveSchema' + - '#/components/schemas/HistoricalPerformanceResponse' + - '#/components/schemas/HistoricalUptimeResponse' + - '#/components/schemas/MasterVerificationKeyResponse' + - '#/components/schemas/MixnodeStatusReport' + - '#/components/schemas/NodeId' + - '#/components/schemas/NodeRoleQueryParam' + - '#/components/schemas/NoiseDetails' + - '#/components/schemas/NymNodeDescription' + - '#/components/schemas/NymNodeDetails' + - '#/components/schemas/PaginationRequest' + - '#/components/schemas/PartialCoinIndicesSignatureResponse' + - '#/components/schemas/PayInfo' + - '#/components/schemas/SpentCredentialsResponse' + - '#/components/schemas/UptimeHistoryResponse' + - '#/components/schemas/VerifyEcashCredentialBody' + - '#/components/responses/AxumErrorResponse' + - '#/components/responses/CirculatingSupplyResponse' + - '#/components/responses/RequestError' diff --git a/nym-api/redocly/.redocly.yaml b/nym-api/redocly/.redocly.yaml new file mode 100644 index 0000000000..99a71053f0 --- /dev/null +++ b/nym-api/redocly/.redocly.yaml @@ -0,0 +1,12 @@ +extends: + - minimal +apis: + nym-api: + root: ./formatted-openapi.json +rules: + # https://redocly.com/docs/cli/rules/oas/operation-summary + operation-summary: off + # https://redocly.com/docs/cli/rules/oas/security-defined + security-defined: off + # https://redocly.com/docs/cli/rules/oas/operation-2xx-response + operation-2xx-response: off diff --git a/nym-api/redocly/readme.MD b/nym-api/redocly/readme.MD new file mode 100644 index 0000000000..79ac2fbb9f --- /dev/null +++ b/nym-api/redocly/readme.MD @@ -0,0 +1,27 @@ +# Test / validate OpenAPI spec + +`redocly` CLI is an [OpenAPI linter][docs] that enforces good practices by +checking whether a series of lints are applied to your OpenAPI spec. + +## Install + +You need `npm` and `npx` ([official instructions][instructions]) + +## Run + +``` +./redocly.sh +``` + +## Configuration + +- redocly.yaml is the main [config file](https://redocly.com/docs/redoc/config) + +## Ignore file + +- specifies lints to ignore (some lints may be false alarms/not applicable) +- if you want to add current CLI warnings to an ignore file, run redocly CLI + with `--generate-ignore-file` + +[docs]: https://redocly.com/docs/redoc +[instructions]: https://redocly.com/docs/cli/installation diff --git a/nym-api/redocly/redocly.sh b/nym-api/redocly/redocly.sh new file mode 100755 index 0000000000..4e7d67693d --- /dev/null +++ b/nym-api/redocly/redocly.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +curl -s http://localhost:8000/api-docs/openapi.json | jq . > formatted-openapi.json +npx @redocly/cli@latest lint --config .redocly.yaml \ + # --generate-ignore-file diff --git a/nym-api/src/ecash/api_routes/issued.rs b/nym-api/src/ecash/api_routes/issued.rs index 376b532475..8c2fe10053 100644 --- a/nym-api/src/ecash/api_routes/issued.rs +++ b/nym-api/src/ecash/api_routes/issued.rs @@ -48,7 +48,7 @@ pub(crate) struct ExpirationDatePathParam { context_path = "/v1/ecash", responses( (status = 200, body = IssuedTicketbooksForResponse), - (status = 400, body = ErrorResponse, description = "this nym-api is not an ecash signer in the current epoch"), + (status = 400, body = String, description = "this nym-api is not an ecash signer in the current epoch"), ) )] async fn issued_ticketbooks_for( @@ -73,7 +73,7 @@ async fn issued_ticketbooks_for( context_path = "/v1/ecash", responses( (status = 200, body = IssuedTicketbooksChallengeResponse), - (status = 400, body = ErrorResponse, description = "this nym-api is not an ecash signer in the current epoch"), + (status = 400, body = String, description = "this nym-api is not an ecash signer in the current epoch"), ) )] async fn issued_ticketbooks_challenge( diff --git a/nym-api/src/ecash/api_routes/partial_signing.rs b/nym-api/src/ecash/api_routes/partial_signing.rs index afe522cbee..b5d53a5ed0 100644 --- a/nym-api/src/ecash/api_routes/partial_signing.rs +++ b/nym-api/src/ecash/api_routes/partial_signing.rs @@ -42,7 +42,7 @@ pub(crate) fn partial_signing_routes() -> Router { path = "/v1/ecash/blind-sign", responses( (status = 200, body = BlindedSignatureResponse), - (status = 400, body = ErrorResponse, description = "this nym-api is not an ecash signer in the current epoch"), + (status = 400, body = String, description = "this nym-api is not an ecash signer in the current epoch"), ) )] @@ -118,7 +118,7 @@ struct ExpirationDateParam { path = "/v1/ecash/partial-expiration-date-signatures", responses( (status = 200, body = PartialExpirationDateSignatureResponse), - (status = 400, body = ErrorResponse, description = "this nym-api is not an ecash signer in the current epoch"), + (status = 400, body = String, description = "this nym-api is not an ecash signer in the current epoch"), ) )] async fn partial_expiration_date_signatures( @@ -156,7 +156,7 @@ async fn partial_expiration_date_signatures( path = "/v1/ecash/partial-coin-indices-signatures", responses( (status = 200, body = PartialExpirationDateSignatureResponse), - (status = 400, body = ErrorResponse, description = "this nym-api is not an ecash signer in the current epoch"), + (status = 400, body = String, description = "this nym-api is not an ecash signer in the current epoch"), ) )] async fn partial_coin_indices_signatures( diff --git a/nym-api/src/ecash/api_routes/spending.rs b/nym-api/src/ecash/api_routes/spending.rs index f52aeda2eb..873d9044d0 100644 --- a/nym-api/src/ecash/api_routes/spending.rs +++ b/nym-api/src/ecash/api_routes/spending.rs @@ -52,7 +52,7 @@ fn reject_ticket( path = "/v1/ecash/verify-ecash-ticket", responses( (status = 200, body = EcashTicketVerificationResponse), - (status = 400, body = ErrorResponse, description = "this nym-api is not an ecash signer in the current epoch"), + (status = 400, body = String, description = "this nym-api is not an ecash signer in the current epoch"), ) )] async fn verify_ticket( @@ -155,7 +155,7 @@ async fn verify_ticket( path = "/v1/ecash/batch-redeem-ecash-tickets", responses( (status = 200, body = EcashBatchTicketRedemptionResponse), - (status = 400, body = ErrorResponse, description = "this nym-api is not an ecash signer in the current epoch"), + (status = 400, body = String, description = "this nym-api is not an ecash signer in the current epoch"), ) )] async fn batch_redeem_tickets( @@ -235,7 +235,7 @@ async fn batch_redeem_tickets( get, path = "/v1/ecash/double-spending-filter-v1", responses( - (status = 500, body = ErrorResponse, description = "bloomfilters got disabled"), + (status = 500, body = String, description = "bloomfilters got disabled"), ) )] #[deprecated] diff --git a/nym-api/src/network/handlers.rs b/nym-api/src/network/handlers.rs index 7bb77bc3fc..ff398a16cb 100644 --- a/nym-api/src/network/handlers.rs +++ b/nym-api/src/network/handlers.rs @@ -47,12 +47,19 @@ pub(crate) struct ContractVersionSchemaResponse { pub version: String, } +#[allow(dead_code)] // not dead, used in OpenAPI docs +#[derive(ToSchema)] +pub struct ContractInformationContractVersion { + pub(crate) address: Option, + pub(crate) details: Option, +} + #[utoipa::path( tag = "network", get, path = "/v1/network/nym-contracts", responses( - (status = 200, body = HashMap>) + (status = 200, body = HashMap) ) )] async fn nym_contracts( @@ -73,12 +80,19 @@ async fn nym_contracts( .into() } +#[allow(dead_code)] // not dead, used in OpenAPI docs +#[derive(ToSchema)] +pub struct ContractInformationBuildInformation { + pub(crate) address: Option, + pub(crate) details: Option, +} + #[utoipa::path( tag = "network", get, path = "/v1/network/nym-contracts-detailed", responses( - (status = 200, body = HashMap>) + (status = 200, body = HashMap) ) )] async fn nym_contracts_detailed( diff --git a/nym-api/src/network/models.rs b/nym-api/src/network/models.rs index 66d0324c26..1819ce024d 100644 --- a/nym-api/src/network/models.rs +++ b/nym-api/src/network/models.rs @@ -4,8 +4,9 @@ use nym_config::defaults::NymNetworkDetails; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; +use utoipa::ToSchema; -#[derive(Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[derive(Clone, Serialize, Deserialize, JsonSchema, ToSchema)] pub struct NetworkDetails { pub(crate) connected_nyxd: String, pub(crate) network: NymNetworkDetails, @@ -20,7 +21,7 @@ impl NetworkDetails { } } -#[derive(Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[derive(Clone, Serialize, Deserialize, JsonSchema, ToSchema)] #[serde(rename_all = "snake_case")] pub struct ContractInformation { pub(crate) address: Option, diff --git a/nym-api/src/node_status_api/handlers/without_monitor.rs b/nym-api/src/node_status_api/handlers/without_monitor.rs index 4aaa6f8253..e364e1ee31 100644 --- a/nym-api/src/node_status_api/handlers/without_monitor.rs +++ b/nym-api/src/node_status_api/handlers/without_monitor.rs @@ -1,6 +1,8 @@ // Copyright 2021-2024 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only +// we want to mark the routes as deprecated in swagger, but still expose them +#![allow(deprecated)] use crate::node_status_api::handlers::MixIdParam; use crate::node_status_api::helpers::{ _get_active_set_legacy_mixnodes_detailed, _get_legacy_mixnodes_detailed, @@ -20,8 +22,6 @@ use nym_mixnet_contract_common::NodeId; use nym_types::monitoring::MonitorMessage; use tracing::error; -// we want to mark the routes as deprecated in swagger, but still expose them -#[allow(deprecated)] pub(super) fn mandatory_routes() -> Router { Router::new() .route( @@ -63,9 +63,9 @@ pub(super) fn mandatory_routes() -> Router { path = "/v1/status/submit-gateway-monitoring-results", responses( (status = 200), - (status = 400, body = ErrorResponse, description = "TBD"), - (status = 403, body = ErrorResponse, description = "TBD"), - (status = 500, body = ErrorResponse, description = "TBD"), + (status = 400, body = String, description = "TBD"), + (status = 403, body = String, description = "TBD"), + (status = 500, body = String, description = "TBD"), ), )] pub(crate) async fn submit_gateway_monitoring_results( @@ -107,9 +107,9 @@ pub(crate) async fn submit_gateway_monitoring_results( path = "/v1/status/submit-node-monitoring-results", responses( (status = 200), - (status = 400, body = ErrorResponse, description = "TBD"), - (status = 403, body = ErrorResponse, description = "TBD"), - (status = 500, body = ErrorResponse, description = "TBD"), + (status = 400, body = String, description = "TBD"), + (status = 403, body = String, description = "TBD"), + (status = 500, body = String, description = "TBD"), ), )] pub(crate) async fn submit_node_monitoring_results( @@ -198,7 +198,7 @@ async fn get_mixnode_stake_saturation( ), path = "/v1/status/mixnode/{mix_id}/inclusion-probability", responses( - (status = 200, body = InclusionProbabilityResponse) + (status = 200, body = nym_api_requests::models::InclusionProbabilityResponse) ) )] #[deprecated] @@ -217,7 +217,7 @@ async fn get_mixnode_inclusion_probability( get, path = "/v1/status/mixnodes/inclusion-probability", responses( - (status = 200, body = AllInclusionProbabilitiesResponse) + (status = 200, body = nym_api_requests::models::AllInclusionProbabilitiesResponse) ) )] #[deprecated] diff --git a/nym-api/src/node_status_api/models.rs b/nym-api/src/node_status_api/models.rs index 958188db9d..2e33fd8025 100644 --- a/nym-api/src/node_status_api/models.rs +++ b/nym-api/src/node_status_api/models.rs @@ -22,6 +22,7 @@ use std::fmt::Display; use thiserror::Error; use time::{Date, OffsetDateTime}; use tracing::error; +use utoipa::ToSchema; #[derive(Error, Debug)] #[error("Received uptime value was within 0-100 range (got {received})")] @@ -128,14 +129,17 @@ impl From for Performance { } } -#[derive(Clone, Serialize, Deserialize, Debug, JsonSchema)] +#[derive(Clone, Serialize, Deserialize, Debug, JsonSchema, ToSchema)] pub struct MixnodeStatusReport { + #[schema(value_type = u32)] pub(crate) mix_id: NodeId, + #[schema(value_type = String)] pub(crate) identity: IdentityKey, - + #[schema(value_type = u8)] pub(crate) most_recent: Uptime, - + #[schema(value_type = u8)] pub(crate) last_hour: Uptime, + #[schema(value_type = u8)] pub(crate) last_day: Uptime, } @@ -315,8 +319,12 @@ impl From for OldHistoricalUptimeResponse { // TODO rocket remove smurf name after eliminating `rocket` pub(crate) type AxumResult = Result; + +// #[derive(ToSchema, ToResponse)] +// #[schema(title = "ErrorResponse")] pub(crate) struct AxumErrorResponse { message: RequestError, + // #[schema(value_type = u16)] status: StatusCode, } diff --git a/nym-api/src/nym_nodes/handlers/legacy.rs b/nym-api/src/nym_nodes/handlers/legacy.rs index 3dd6a51f74..3262f997c1 100644 --- a/nym-api/src/nym_nodes/handlers/legacy.rs +++ b/nym-api/src/nym_nodes/handlers/legacy.rs @@ -27,7 +27,7 @@ pub(crate) fn legacy_nym_node_routes() -> Router { get, path = "/v1/gateways/described", responses( - (status = 200, body = Vec) + (status = 200, body = Vec) ) )] #[deprecated] @@ -81,7 +81,7 @@ async fn get_gateways_described( get, path = "/v1/mixnodes/described", responses( - (status = 200, body = Vec) + (status = 200, body = Vec) ) )] #[deprecated] diff --git a/nym-api/src/nym_nodes/handlers/unstable/full_fat.rs b/nym-api/src/nym_nodes/handlers/unstable/full_fat.rs index d16d68e240..debc284208 100644 --- a/nym-api/src/nym_nodes/handlers/unstable/full_fat.rs +++ b/nym-api/src/nym_nodes/handlers/unstable/full_fat.rs @@ -12,7 +12,7 @@ use nym_api_requests::nym_nodes::{CachedNodesResponse, FullFatNode}; tag = "Unstable Nym Nodes", get, params(NodesParamsWithRole), - path = "/", + path = "", context_path = "/v1/unstable/nym-nodes/full-fat", responses( // (status = 200, body = CachedNodesResponse) diff --git a/nym-api/src/nym_nodes/handlers/unstable/mod.rs b/nym-api/src/nym_nodes/handlers/unstable/mod.rs index 88d0338e6f..c629834919 100644 --- a/nym-api/src/nym_nodes/handlers/unstable/mod.rs +++ b/nym-api/src/nym_nodes/handlers/unstable/mod.rs @@ -88,6 +88,7 @@ struct NodesParamsWithRole { } #[derive(Debug, Deserialize, utoipa::IntoParams)] +#[into_params(parameter_in = Query)] struct NodesParams { #[allow(dead_code)] semver_compatibility: Option, diff --git a/nym-api/src/nym_nodes/handlers/unstable/semi_skimmed.rs b/nym-api/src/nym_nodes/handlers/unstable/semi_skimmed.rs index 294ec222e5..e8b9ffb2bc 100644 --- a/nym-api/src/nym_nodes/handlers/unstable/semi_skimmed.rs +++ b/nym-api/src/nym_nodes/handlers/unstable/semi_skimmed.rs @@ -12,7 +12,7 @@ use nym_api_requests::nym_nodes::{CachedNodesResponse, SemiSkimmedNode}; tag = "Unstable Nym Nodes", get, params(NodesParamsWithRole), - path = "/", + path = "", context_path = "/v1/unstable/nym-nodes/semi-skimmed", responses( // (status = 200, body = CachedNodesResponse) diff --git a/nym-api/src/nym_nodes/handlers/unstable/skimmed.rs b/nym-api/src/nym_nodes/handlers/unstable/skimmed.rs index b46234082e..9d712d0462 100644 --- a/nym-api/src/nym_nodes/handlers/unstable/skimmed.rs +++ b/nym-api/src/nym_nodes/handlers/unstable/skimmed.rs @@ -9,16 +9,20 @@ use crate::support::caching::Cache; use crate::support::http::state::AppState; use axum::extract::{Query, State}; use axum::Json; -use nym_api_requests::models::{NodeAnnotation, NymNodeDescription}; +use nym_api_requests::models::{ + NodeAnnotation, NymNodeDescription, OffsetDateTimeJsonSchemaWrapper, +}; use nym_api_requests::nym_nodes::{ CachedNodesResponse, NodeRole, NodeRoleQueryParam, PaginatedCachedNodesResponse, SkimmedNode, }; +use nym_api_requests::pagination::PaginatedResponse; use nym_mixnet_contract_common::NodeId; use nym_topology::CachedEpochRewardedSet; use std::collections::HashMap; use std::future::Future; use tokio::sync::RwLockReadGuard; use tracing::trace; +use utoipa::ToSchema; pub type PaginatedSkimmedNodes = AxumResult>>; @@ -270,16 +274,25 @@ async fn nodes_basic( ))) } +#[allow(dead_code)] // not dead, used in OpenAPI docs +#[derive(ToSchema)] +#[schema(title = "PaginatedCachedNodesResponse")] +pub struct PaginatedCachedNodesResponseSchema { + pub refreshed_at: OffsetDateTimeJsonSchemaWrapper, + #[schema(value_type = SkimmedNode)] + pub nodes: PaginatedResponse, +} + /// Return all Nym Nodes and optionally legacy mixnodes/gateways (if `no-legacy` flag is not used) /// that are currently bonded. #[utoipa::path( tag = "Unstable Nym Nodes", get, params(NodesParamsWithRole), - path = "/", + path = "", context_path = "/v1/unstable/nym-nodes/skimmed", responses( - (status = 200, body = PaginatedCachedNodesResponse) + (status = 200, body = PaginatedCachedNodesResponseSchema) ) )] pub(super) async fn nodes_basic_all( @@ -312,7 +325,7 @@ pub(super) async fn nodes_basic_all( path = "/active", context_path = "/v1/unstable/nym-nodes/skimmed", responses( - (status = 200, body = PaginatedCachedNodesResponse) + (status = 200, body = PaginatedCachedNodesResponseSchema) ) )] pub(super) async fn nodes_basic_active( @@ -364,7 +377,7 @@ async fn mixnodes_basic( path = "/mixnodes/all", context_path = "/v1/unstable/nym-nodes/skimmed", responses( - (status = 200, body = PaginatedCachedNodesResponse) + (status = 200, body = PaginatedCachedNodesResponseSchema) ) )] pub(super) async fn mixnodes_basic_all( @@ -383,7 +396,7 @@ pub(super) async fn mixnodes_basic_all( path = "/mixnodes/active", context_path = "/v1/unstable/nym-nodes/skimmed", responses( - (status = 200, body = PaginatedCachedNodesResponse) + (status = 200, body = PaginatedCachedNodesResponseSchema) ) )] pub(super) async fn mixnodes_basic_active( @@ -421,7 +434,7 @@ async fn entry_gateways_basic( path = "/entry-gateways/active", context_path = "/v1/unstable/nym-nodes/skimmed", responses( - (status = 200, body = PaginatedCachedNodesResponse) + (status = 200, body = PaginatedCachedNodesResponseSchema) ) )] pub(super) async fn entry_gateways_basic_active( @@ -440,7 +453,7 @@ pub(super) async fn entry_gateways_basic_active( path = "/entry-gateways/all", context_path = "/v1/unstable/nym-nodes/skimmed", responses( - (status = 200, body = PaginatedCachedNodesResponse) + (status = 200, body = PaginatedCachedNodesResponseSchema) ) )] pub(super) async fn entry_gateways_basic_all( @@ -478,7 +491,7 @@ async fn exit_gateways_basic( path = "/exit-gateways/active", context_path = "/v1/unstable/nym-nodes/skimmed", responses( - (status = 200, body = PaginatedCachedNodesResponse) + (status = 200, body = PaginatedCachedNodesResponseSchema) ) )] pub(super) async fn exit_gateways_basic_active( @@ -497,7 +510,7 @@ pub(super) async fn exit_gateways_basic_active( path = "/exit-gateways/all", context_path = "/v1/unstable/nym-nodes/skimmed", responses( - (status = 200, body = PaginatedCachedNodesResponse) + (status = 200, body = PaginatedCachedNodesResponseSchema) ) )] pub(super) async fn exit_gateways_basic_all( diff --git a/nym-api/src/support/http/helpers.rs b/nym-api/src/support/http/helpers.rs index 4ea95d4cd2..92f6d5585a 100644 --- a/nym-api/src/support/http/helpers.rs +++ b/nym-api/src/support/http/helpers.rs @@ -14,6 +14,8 @@ pub struct PaginationRequest { } #[derive(Deserialize, IntoParams, ToSchema)] +#[schema(title = "NodeId")] +#[schema(as = NodeId)] #[into_params(parameter_in = Path)] pub(crate) struct NodeIdParam { #[schema(value_type = u32)] diff --git a/nym-api/src/support/http/openapi.rs b/nym-api/src/support/http/openapi.rs index 99713e1650..9d2f49af93 100644 --- a/nym-api/src/support/http/openapi.rs +++ b/nym-api/src/support/http/openapi.rs @@ -2,9 +2,7 @@ // SPDX-License-Identifier: GPL-3.0-only #![allow(deprecated)] - use crate::network::handlers::ContractVersionSchemaResponse; -use nym_api_requests::models; use utoipa::OpenApi; use utoipauto::utoipauto; @@ -13,7 +11,15 @@ use utoipauto::utoipauto; // for automatic model discovery based on ToSchema / IntoParams implementation. // Then you can remove `components(schemas)` manual imports below -#[utoipauto(paths = "./nym-api/src")] +// dependencies which have derive(ToSchema) behind a feature flag with cfg_attr +// cannot be autodiscovered because proc macros run before feature flags. +// Tracking issue: https://github.com/ProbablyClem/utoipauto/issues/13 + +#[utoipauto(paths = "./nym-api/src, + ./nym-api/nym-api-requests/src from nym-api-requests, + ./common/config/src from nym-config, + ./common/ticketbooks-merkle/src from nym-ticketbooks-merkle, + ./common/nym_offline_compact_ecash/src from nym_compact_ecash")] #[derive(OpenApi)] #[openapi( info(title = "Nym API"), @@ -24,79 +30,19 @@ use utoipauto::utoipauto; ), tags(), components(schemas( - models::CirculatingSupplyResponse, - models::CoinSchema, nym_mixnet_contract_common::Interval, - nym_api_requests::models::NodeRefreshBody, - nym_api_requests::models::GatewayStatusReportResponse, - nym_api_requests::models::GatewayUptimeHistoryResponse, - nym_api_requests::models::GatewayCoreStatusResponse, - nym_api_requests::models::GatewayUptimeResponse, - nym_api_requests::models::RewardEstimationResponse, - nym_api_requests::models::UptimeResponse, - nym_api_requests::models::ComputeRewardEstParam, - nym_api_requests::models::MixNodeBondAnnotated, - nym_api_requests::models::GatewayBondAnnotated, - nym_api_requests::models::MixnodeTestResultResponse, - nym_api_requests::models::StakeSaturationResponse, - nym_api_requests::models::InclusionProbabilityResponse, - nym_api_requests::models::AllInclusionProbabilitiesResponse, - nym_api_requests::models::InclusionProbability, - nym_api_requests::models::SelectionChance, - crate::network::models::NetworkDetails, + nym_mixnet_contract_common::IntervalRewardParams, + nym_mixnet_contract_common::RewardingParams, + nym_mixnet_contract_common::reward_params::RewardedSetParams, nym_config::defaults::NymNetworkDetails, nym_config::defaults::ChainDetails, nym_config::defaults::DenomDetailsOwned, nym_config::defaults::ValidatorDetails, nym_config::defaults::NymContracts, ContractVersionSchemaResponse, - crate::network::models::ContractInformation, - nym_api_requests::models::ApiHealthResponse, - nym_api_requests::models::ApiStatus, nym_bin_common::build_information::BinaryBuildInformationOwned, - nym_api_requests::models::SignerInformationResponse, - nym_api_requests::models::LegacyDescribedGateway, - nym_mixnet_contract_common::Gateway, - nym_mixnet_contract_common::GatewayBond, - nym_api_requests::models::NymNodeDescription, - nym_api_requests::models::HostInformation, - nym_api_requests::models::HostKeys, nym_node_requests::api::v1::node::models::AuxiliaryDetails, - nym_api_requests::models::NetworkRequesterDetails, - nym_api_requests::models::IpPacketRouterDetails, - nym_api_requests::models::AuthenticatorDetails, - nym_api_requests::models::WebSockets, - nym_api_requests::nym_nodes::NodeRole, - nym_api_requests::models::LegacyDescribedMixNode, - nym_api_requests::ecash::VerificationKeyResponse, - nym_api_requests::ecash::models::AggregatedExpirationDateSignatureResponse, - nym_api_requests::ecash::models::AggregatedCoinIndicesSignatureResponse, - nym_api_requests::ecash::models::MasterVerificationKeyResponse, - nym_api_requests::ecash::models::BlindedSignatureResponse, - nym_api_requests::ecash::models::BlindSignRequestBody, - nym_api_requests::ecash::models::PartialExpirationDateSignatureResponse, - nym_api_requests::ecash::models::PartialCoinIndicesSignatureResponse, - nym_api_requests::ecash::models::EcashTicketVerificationResponse, - nym_api_requests::ecash::models::EcashTicketVerificationRejection, - nym_api_requests::ecash::models::EcashBatchTicketRedemptionResponse, - nym_api_requests::ecash::models::VerifyEcashTicketBody, - nym_api_requests::ecash::models::VerifyEcashCredentialBody, - nym_api_requests::ecash::models::CommitedDeposit, - nym_api_requests::ecash::models::IssuedTicketbooksForResponseBody, - nym_api_requests::ecash::models::IssuedTicketbooksForResponse, - nym_api_requests::ecash::models::IssuedTicketbooksChallengeRequest, - nym_api_requests::ecash::models::IssuedTicketbooksChallengeResponseBody, - nym_api_requests::ecash::models::IssuedTicketbooksChallengeResponse, - nym_api_requests::nym_nodes::SkimmedNode, - nym_api_requests::nym_nodes::SemiSkimmedNode, - nym_api_requests::nym_nodes::FullFatNode, - nym_api_requests::nym_nodes::BasicEntryInformation, - nym_api_requests::nym_nodes::NodeRoleQueryParam, - nym_api_requests::models::AnnotationResponse, - nym_api_requests::models::NodePerformanceResponse, - nym_api_requests::models::NodeDatePerformanceResponse, - nym_api_requests::models::PerformanceHistoryResponse, - nym_api_requests::models::UptimeHistoryResponse, + nym_contracts_common::ContractBuildInformation )) )] pub(crate) struct ApiDoc; diff --git a/nym-credential-proxy/nym-credential-proxy-requests/src/api/v1/mod.rs b/nym-credential-proxy/nym-credential-proxy-requests/src/api/v1/mod.rs index 2e131231cd..d2c5dc46e3 100644 --- a/nym-credential-proxy/nym-credential-proxy-requests/src/api/v1/mod.rs +++ b/nym-credential-proxy/nym-credential-proxy-requests/src/api/v1/mod.rs @@ -10,9 +10,7 @@ use uuid::Uuid; pub mod ticketbook; #[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] -#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] pub struct ErrorResponse { - #[cfg_attr(feature = "openapi",schema(value_type = Option, example = "c48f9ce3-a1e9-4886-8000-13f290f34501"))] pub uuid: Option, pub message: String, } diff --git a/nym-credential-proxy/nym-credential-proxy-requests/src/api/v1/ticketbook/models.rs b/nym-credential-proxy/nym-credential-proxy-requests/src/api/v1/ticketbook/models.rs index 81082ea154..22f04a7483 100644 --- a/nym-credential-proxy/nym-credential-proxy-requests/src/api/v1/ticketbook/models.rs +++ b/nym-credential-proxy/nym-credential-proxy-requests/src/api/v1/ticketbook/models.rs @@ -46,6 +46,7 @@ pub struct TicketbookRequest { // needs to be explicit in case user creates request at 23:59:59.999, but it reaches vpn-api at 00:00:00.001 #[schemars(with = "String")] #[serde(with = "crate::helpers::date_serde")] + #[cfg_attr(feature = "openapi", schema(value_type = String))] pub expiration_date: Date, #[schemars(with = "String")] @@ -246,10 +247,12 @@ pub struct WebhookTicketbookWalletShares { #[schemars(with = "String")] #[serde(with = "time::serde::rfc3339")] + #[cfg_attr(feature = "openapi", schema(value_type = String))] pub created: OffsetDateTime, #[schemars(with = "String")] #[serde(with = "time::serde::rfc3339")] + #[cfg_attr(feature = "openapi", schema(value_type = String))] pub updated: OffsetDateTime, } diff --git a/nym-credential-proxy/nym-credential-proxy/src/http/router/api/v1/openapi.rs b/nym-credential-proxy/nym-credential-proxy/src/http/router/api/v1/openapi.rs index 2315a658a1..812392056a 100644 --- a/nym-credential-proxy/nym-credential-proxy/src/http/router/api/v1/openapi.rs +++ b/nym-credential-proxy/nym-credential-proxy/src/http/router/api/v1/openapi.rs @@ -2,7 +2,6 @@ // SPDX-License-Identifier: GPL-3.0-only use crate::http::router::api; -use crate::http::types::RequestError; use axum::Router; use nym_credential_proxy_requests::api as api_requests; use nym_credential_proxy_requests::routes::api::{v1, v1_absolute}; @@ -69,7 +68,6 @@ pub(crate) struct ApiDoc; schemas( api::Output, api::OutputParams, - api_requests::v1::ErrorResponse, api_requests::v1::ticketbook::models::DepositResponse, api_requests::v1::ticketbook::models::PartialVerificationKeysResponse, api_requests::v1::ticketbook::models::CurrentEpochResponse, @@ -90,7 +88,6 @@ pub(crate) struct ApiDoc; api_requests::v1::ticketbook::models::SharesQueryParams, api_requests::v1::ticketbook::models::PlaceholderJsonSchemaImpl, ), - responses(RequestError), ), modifiers(&SecurityAddon), )] diff --git a/nym-credential-proxy/nym-credential-proxy/src/http/router/api/v1/ticketbook/mod.rs b/nym-credential-proxy/nym-credential-proxy/src/http/router/api/v1/ticketbook/mod.rs index 335107f3db..530d2bdad1 100644 --- a/nym-credential-proxy/nym-credential-proxy/src/http/router/api/v1/ticketbook/mod.rs +++ b/nym-credential-proxy/nym-credential-proxy/src/http/router/api/v1/ticketbook/mod.rs @@ -48,14 +48,14 @@ pub type FormattedTicketbookWalletSharesAsyncResponse = ), responses( (status = 200, content( - ("application/json" = TicketbookWalletSharesResponse), - ("application/yaml" = TicketbookWalletSharesResponse), + (TicketbookWalletSharesResponse = "application/json"), + (TicketbookWalletSharesResponse = "application/yaml"), )), (status = 400, description = "the provided request hasn't been created against correct attributes"), (status = 401, description = "authentication token is missing or is invalid"), (status = 422, description = "provided request was malformed"), - (status = 500, body = ErrorResponse, description = "failed to obtain a ticketbook"), - (status = 503, body = ErrorResponse, description = "ticketbooks can't be issued at this moment: the epoch transition is probably taking place"), + (status = 500, body = String, description = "failed to obtain a ticketbook"), + (status = 503, body = String, description = "ticketbooks can't be issued at this moment: the epoch transition is probably taking place"), ), params(TicketbookObtainQueryParams), security( @@ -138,15 +138,15 @@ pub(crate) async fn obtain_ticketbook_shares( ), responses( (status = 200, content( - ("application/json" = TicketbookWalletSharesAsyncResponse), - ("application/yaml" = TicketbookWalletSharesAsyncResponse), + (TicketbookWalletSharesAsyncResponse = "application/json"), + (TicketbookWalletSharesAsyncResponse = "application/yaml"), )), (status = 400, description = "the provided request hasn't been created against correct attributes"), (status = 401, description = "authentication token is missing or is invalid"), (status = 409, description = "shares were already requested"), (status = 422, description = "provided request was malformed"), - (status = 500, body = ErrorResponse, description = "failed to obtain a ticketbook"), - (status = 503, body = ErrorResponse, description = "ticketbooks can't be issued at this moment: the epoch transition is probably taking place"), + (status = 500, body = String, description = "failed to obtain a ticketbook"), + (status = 503, body = String, description = "ticketbooks can't be issued at this moment: the epoch transition is probably taking place"), ), params(TicketbookObtainQueryParams), security( @@ -235,11 +235,11 @@ pub(crate) async fn obtain_ticketbook_shares_async( tag = "Ticketbook", responses( (status = 200, content( - ("application/json" = DepositResponse), - ("application/yaml" = DepositResponse), + (DepositResponse = "application/json"), + (DepositResponse = "application/yaml"), )), (status = 401, description = "authentication token is missing or is invalid"), - (status = 500, body = ErrorResponse, description = "failed to obtain current deposit information"), + (status = 500, body = String, description = "failed to obtain current deposit information"), ), params(OutputParams), security( @@ -270,12 +270,12 @@ pub(crate) async fn current_deposit( tag = "Ticketbook", responses( (status = 200, content( - ("application/json" = PartialVerificationKeysResponse), - ("application/yaml" = PartialVerificationKeysResponse), + (PartialVerificationKeysResponse = "application/json"), + (PartialVerificationKeysResponse = "application/yaml"), )), (status = 401, description = "authentication token is missing or is invalid"), - (status = 500, body = ErrorResponse, description = "failed to obtain current epoch information"), - (status = 503, body = ErrorResponse, description = "credentials can't be issued at this moment: the epoch transition is probably taking place"), + (status = 500, body = String, description = "failed to obtain current epoch information"), + (status = 503, body = String, description = "credentials can't be issued at this moment: the epoch transition is probably taking place"), ), params(OutputParams), security( @@ -320,12 +320,12 @@ pub(crate) async fn partial_verification_keys( tag = "Ticketbook", responses( (status = 200, content( - ("application/json" = MasterVerificationKeyResponse), - ("application/yaml" = MasterVerificationKeyResponse), + (MasterVerificationKeyResponse = "application/json"), + (MasterVerificationKeyResponse = "application/yaml"), )), (status = 401, description = "authentication token is missing or is invalid"), - (status = 500, body = ErrorResponse, description = "failed to obtain current epoch information"), - (status = 503, body = ErrorResponse, description = "credentials can't be issued at this moment: the epoch transition is probably taking place"), + (status = 500, body = String, description = "failed to obtain current epoch information"), + (status = 503, body = String, description = "credentials can't be issued at this moment: the epoch transition is probably taking place"), ), params(OutputParams), security( @@ -365,12 +365,12 @@ pub(crate) async fn master_verification_key( tag = "Ticketbook", responses( (status = 200, content( - ("application/json" = CurrentEpochResponse), - ("application/yaml" = CurrentEpochResponse), + (CurrentEpochResponse = "application/json"), + (CurrentEpochResponse = "application/yaml"), )), (status = 401, description = "authentication token is missing or is invalid"), - (status = 500, body = ErrorResponse, description = "failed to obtain current epoch information"), - (status = 503, body = ErrorResponse, description = "credentials can't be issued at this moment: the epoch transition is probably taking place"), + (status = 500, body = String, description = "failed to obtain current epoch information"), + (status = 503, body = String, description = "credentials can't be issued at this moment: the epoch transition is probably taking place"), ), params(OutputParams), security( diff --git a/nym-credential-proxy/nym-credential-proxy/src/http/router/api/v1/ticketbook/shares.rs b/nym-credential-proxy/nym-credential-proxy/src/http/router/api/v1/ticketbook/shares.rs index cfe4893a69..8b3dacb82d 100644 --- a/nym-credential-proxy/nym-credential-proxy/src/http/router/api/v1/ticketbook/shares.rs +++ b/nym-credential-proxy/nym-credential-proxy/src/http/router/api/v1/ticketbook/shares.rs @@ -80,12 +80,12 @@ async fn shares_to_response( tag = "Ticketbook Wallet Shares", responses( (status = 200, content( - ("application/json" = TicketbookWalletSharesResponse), - ("application/yaml" = TicketbookWalletSharesResponse), + (TicketbookWalletSharesResponse = "application/json"), + (TicketbookWalletSharesResponse = "application/yaml"), )), (status = 404, description = "share_id not found"), (status = 401, description = "authentication token is missing or is invalid"), - (status = 500, body = ErrorResponse, description = "failed to query for bandwidth blinded shares"), + (status = 500, body = String, description = "failed to query for bandwidth blinded shares"), ), params(OutputParams), security( @@ -155,12 +155,12 @@ pub(crate) async fn query_for_shares_by_id( tag = "Ticketbook Wallet Shares", responses( (status = 200, content( - ("application/json" = TicketbookWalletSharesResponse), - ("application/yaml" = TicketbookWalletSharesResponse), + (TicketbookWalletSharesResponse = "application/json"), + (TicketbookWalletSharesResponse = "application/yaml"), )), (status = 404, description = "share_id not found"), (status = 401, description = "authentication token is missing or is invalid"), - (status = 500, body = ErrorResponse, description = "failed to query for bandwidth blinded shares"), + (status = 500, body = String, description = "failed to query for bandwidth blinded shares"), ), params(SharesQueryParams), security( diff --git a/nym-credential-proxy/nym-credential-proxy/src/http/types.rs b/nym-credential-proxy/nym-credential-proxy/src/http/types.rs index 8602103456..6538c5a9a7 100644 --- a/nym-credential-proxy/nym-credential-proxy/src/http/types.rs +++ b/nym-credential-proxy/nym-credential-proxy/src/http/types.rs @@ -6,14 +6,11 @@ use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; use axum::Json; use nym_credential_proxy_requests::api::v1::ErrorResponse; -use utoipa::ToResponse; use uuid::Uuid; -#[derive(Debug, Clone, ToResponse)] -#[response(description = "Error response with additional message")] +#[derive(Debug, Clone)] pub struct RequestError { pub inner: ErrorResponse, - pub status: StatusCode, } diff --git a/nym-node-status-api/nym-node-status-api/src/http/api/gateways.rs b/nym-node-status-api/nym-node-status-api/src/http/api/gateways.rs index c1f4073767..80e870f198 100644 --- a/nym-node-status-api/nym-node-status-api/src/http/api/gateways.rs +++ b/nym-node-status-api/nym-node-status-api/src/http/api/gateways.rs @@ -27,7 +27,7 @@ pub(crate) fn routes() -> Router { ), path = "/v2/gateways", responses( - (status = 200, body = PagedGateway) + (status = 200, body = PagedResult) ) )] async fn gateways( @@ -48,7 +48,7 @@ async fn gateways( ), path = "/v2/gateways/skinny", responses( - (status = 200, body = PagedGatewaySkinny) + (status = 200, body = PagedResult) ) )] async fn gateways_skinny( diff --git a/nym-node-status-api/nym-node-status-api/src/http/api/metrics/sessions.rs b/nym-node-status-api/nym-node-status-api/src/http/api/metrics/sessions.rs index e2e4cd9ced..409c042753 100644 --- a/nym-node-status-api/nym-node-status-api/src/http/api/metrics/sessions.rs +++ b/nym-node-status-api/nym-node-status-api/src/http/api/metrics/sessions.rs @@ -35,7 +35,7 @@ pub(crate) struct SessionQueryParams { ), path = "/v2/metrics/sessions", responses( - (status = 200, body = PagedSessionStats) + (status = 200, body = PagedResult) ) )] #[instrument(level = tracing::Level::DEBUG, skip(state))] diff --git a/nym-node-status-api/nym-node-status-api/src/http/api/mixnodes.rs b/nym-node-status-api/nym-node-status-api/src/http/api/mixnodes.rs index f42d0bf91c..ae9842e1df 100644 --- a/nym-node-status-api/nym-node-status-api/src/http/api/mixnodes.rs +++ b/nym-node-status-api/nym-node-status-api/src/http/api/mixnodes.rs @@ -28,7 +28,7 @@ pub(crate) fn routes() -> Router { ), path = "/v2/mixnodes", responses( - (status = 200, body = PagedMixnode) + (status = 200, body = PagedResult) ) )] #[instrument(level = tracing::Level::DEBUG, skip_all, fields(page=pagination.page, size=pagination.size))] diff --git a/nym-node-status-api/nym-node-status-api/src/http/api/services/mod.rs b/nym-node-status-api/nym-node-status-api/src/http/api/services/mod.rs index 5650684c43..4a817ab759 100644 --- a/nym-node-status-api/nym-node-status-api/src/http/api/services/mod.rs +++ b/nym-node-status-api/nym-node-status-api/src/http/api/services/mod.rs @@ -35,7 +35,7 @@ pub(crate) struct ServicesQueryParams { ), path = "/v2/services", responses( - (status = 200, body = PagedService) + (status = 200, body = PagedResult) ) )] #[instrument(level = tracing::Level::DEBUG, skip(state))] diff --git a/nym-node-status-api/nym-node-status-api/src/http/api_docs.rs b/nym-node-status-api/nym-node-status-api/src/http/api_docs.rs index fec4d25cd5..124c7ebee0 100644 --- a/nym-node-status-api/nym-node-status-api/src/http/api_docs.rs +++ b/nym-node-status-api/nym-node-status-api/src/http/api_docs.rs @@ -1,4 +1,3 @@ -use crate::http::{Gateway, GatewaySkinny, Mixnode, Service, SessionStats}; use utoipa::OpenApi; use utoipauto::utoipauto; diff --git a/nym-node-status-api/nym-node-status-api/src/http/mod.rs b/nym-node-status-api/nym-node-status-api/src/http/mod.rs index b1a7bf742d..2fa8373318 100644 --- a/nym-node-status-api/nym-node-status-api/src/http/mod.rs +++ b/nym-node-status-api/nym-node-status-api/src/http/mod.rs @@ -1,4 +1,4 @@ -use models::{Gateway, GatewaySkinny, Mixnode, Service, SessionStats}; +use utoipa::ToSchema; pub(crate) mod api; pub(crate) mod api_docs; @@ -8,28 +8,14 @@ pub(crate) mod server; pub(crate) mod state; #[derive(Debug, Clone, serde::Serialize, serde::Deserialize, utoipa::ToSchema)] -// exclude generic from auto-discovery -#[utoipauto::utoipa_ignore] -// https://docs.rs/utoipa/latest/utoipa/derive.ToSchema.html#generic-schemas-with-aliases -// Generic structs can only be included via aliases, not directly, because they -// it would cause an error in generated Swagger docs. -// Instead, you have to manually monomorphize each generic struct that -// you wish to document -#[aliases( - PagedGateway = PagedResult, - PagedGatewaySkinny = PagedResult, - PagedMixnode = PagedResult, - PagedService = PagedResult, - PagedSessionStats = PagedResult -)] -pub struct PagedResult { +pub struct PagedResult { pub page: usize, pub size: usize, pub total: usize, pub items: Vec, } -impl PagedResult { +impl PagedResult { pub fn paginate(pagination: Pagination, res: Vec) -> Self { let total = res.len(); let (size, mut page) = pagination.intoto_inner_values(); diff --git a/nym-node/nym-node-requests/Cargo.toml b/nym-node/nym-node-requests/Cargo.toml index fc1d0853c3..8b8849c480 100644 --- a/nym-node/nym-node-requests/Cargo.toml +++ b/nym-node/nym-node-requests/Cargo.toml @@ -35,7 +35,7 @@ async-trait = { workspace = true, optional = true } nym-http-api-client = { path = "../../common/http-api-client", optional = true } ## openapi: -utoipa = { workspace = true, optional = true } +utoipa = { workspace = true, features = ["time"], optional = true } nym-bin-common = { path = "../../common/bin-common", features = [ "bin_info_schema", ] } diff --git a/nym-node/nym-node-requests/src/api/mod.rs b/nym-node/nym-node-requests/src/api/mod.rs index 6bf586f71b..5e09d150b7 100644 --- a/nym-node/nym-node-requests/src/api/mod.rs +++ b/nym-node/nym-node-requests/src/api/mod.rs @@ -1,15 +1,14 @@ // Copyright 2023 - Nym Technologies SA // SPDX-License-Identifier: Apache-2.0 -use crate::api::v1::node::models::{ - HostInformation, LegacyHostInformation, LegacyHostInformationV2, -}; +use crate::api::v1::node::models::{LegacyHostInformation, LegacyHostInformationV2}; use crate::error::Error; use nym_crypto::asymmetric::identity; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::fmt::{Display, Formatter}; use std::ops::Deref; +use utoipa::ToSchema; #[cfg(feature = "client")] pub mod client; @@ -19,12 +18,16 @@ pub mod v1; pub use client::Client; // create the type alias manually if openapi is not enabled -#[cfg(not(feature = "openapi"))] -pub type SignedHostInformation = SignedData; +pub type SignedHostInformation = SignedData; + +#[derive(ToSchema)] +pub struct SignedDataHostInfo { + // #[serde(flatten)] + pub data: crate::api::v1::node::models::HostInformation, + pub signature: String, +} #[derive(Debug, Clone, Serialize, Deserialize)] -#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] -#[cfg_attr(feature = "openapi", aliases(SignedHostInformation = SignedData))] pub struct SignedData { // #[serde(flatten)] pub data: T, @@ -90,7 +93,6 @@ impl Deref for SignedData { } #[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] -#[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] pub struct ErrorResponse { pub message: String, } diff --git a/nym-node/nym-node-requests/src/api/v1/metrics/models.rs b/nym-node/nym-node-requests/src/api/v1/metrics/models.rs index 0ee4170dfe..555982bb2a 100644 --- a/nym-node/nym-node-requests/src/api/v1/metrics/models.rs +++ b/nym-node/nym-node-requests/src/api/v1/metrics/models.rs @@ -97,6 +97,13 @@ pub mod verloc { use serde::{Deserialize, Serialize}; use std::time::Duration; use time::OffsetDateTime; + #[derive(Serialize, Deserialize, Debug, Clone, Copy, Eq, PartialEq)] + #[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] + pub struct VerlocNodeResult { + #[serde(with = "bs58_ed25519_pubkey")] + #[cfg_attr(feature = "openapi", schema(value_type = String))] + pub node_identity: ed25519::PublicKey, + } #[derive(Serialize, Deserialize, Default, Debug, Clone)] #[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] @@ -129,15 +136,6 @@ pub mod verloc { pub results: Vec, } - #[derive(Serialize, Deserialize, Debug, Clone, Copy, Eq, PartialEq)] - #[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] - pub struct VerlocNodeResult { - #[serde(with = "bs58_ed25519_pubkey")] - pub node_identity: ed25519::PublicKey, - - pub latest_measurement: Option, - } - #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] pub struct VerlocMeasurement { @@ -174,6 +172,7 @@ pub mod session { #[cfg_attr(feature = "openapi", derive(utoipa::ToSchema))] pub struct SessionStats { #[serde(with = "time::serde::rfc3339")] + #[cfg_attr(feature = "openapi", schema(value_type = String))] pub update_time: OffsetDateTime, pub unique_active_users: u32, diff --git a/nym-node/nym-node-requests/src/api/v1/node/models.rs b/nym-node/nym-node-requests/src/api/v1/node/models.rs index d0f1e3df03..5f047912fa 100644 --- a/nym-node/nym-node-requests/src/api/v1/node/models.rs +++ b/nym-node/nym-node-requests/src/api/v1/node/models.rs @@ -111,6 +111,7 @@ pub struct HostKeys { #[serde(alias = "ed25519")] #[serde(with = "bs58_ed25519_pubkey")] #[schemars(with = "String")] + #[cfg_attr(feature = "openapi", schema(value_type = String))] pub ed25519_identity: ed25519::PublicKey, /// Base58-encoded x25519 public key of this node used for sphinx/outfox packet creation. @@ -118,12 +119,14 @@ pub struct HostKeys { #[serde(alias = "x25519")] #[serde(with = "bs58_x25519_pubkey")] #[schemars(with = "String")] + #[cfg_attr(feature = "openapi", schema(value_type = String))] pub x25519_sphinx: x25519::PublicKey, /// Base58-encoded x25519 public key of this node used for the noise protocol. #[serde(default)] #[serde(with = "option_bs58_x25519_pubkey")] #[schemars(with = "Option")] + #[cfg_attr(feature = "openapi", schema(value_type = Option))] pub x25519_noise: Option, } diff --git a/nym-node/src/node/http/router/api/v1/authenticator/root.rs b/nym-node/src/node/http/router/api/v1/authenticator/root.rs index 054d224779..6c6ed9c835 100644 --- a/nym-node/src/node/http/router/api/v1/authenticator/root.rs +++ b/nym-node/src/node/http/router/api/v1/authenticator/root.rs @@ -15,8 +15,8 @@ use nym_node_requests::api::v1::authenticator::models::Authenticator; responses( (status = 501, description = "the endpoint hasn't been implemented yet"), (status = 200, content( - ("application/json" = Authenticator), - ("application/yaml" = Authenticator) + (Authenticator = "application/json"), + (Authenticator = "application/yaml") )) ), params(OutputParams) diff --git a/nym-node/src/node/http/router/api/v1/gateway/client_interfaces/mod.rs b/nym-node/src/node/http/router/api/v1/gateway/client_interfaces/mod.rs index 96215f7192..6e2b5c57d2 100644 --- a/nym-node/src/node/http/router/api/v1/gateway/client_interfaces/mod.rs +++ b/nym-node/src/node/http/router/api/v1/gateway/client_interfaces/mod.rs @@ -45,8 +45,8 @@ pub(crate) fn routes( responses( (status = 501, description = "the endpoint hasn't been implemented yet"), (status = 200, content( - ("application/json" = ClientInterfaces), - ("application/yaml" = ClientInterfaces) + (ClientInterfaces = "application/json"), + (ClientInterfaces = "application/yaml") )) ), params(OutputParams) @@ -71,8 +71,8 @@ pub type ClientInterfacesResponse = FormattedResponse; responses( (status = 501, description = "the endpoint hasn't been implemented yet"), (status = 200, content( - ("application/json" = WebSockets), - ("application/yaml" = WebSockets) + (WebSockets = "application/json"), + (WebSockets = "application/yaml") )) ), params(OutputParams) @@ -97,8 +97,8 @@ pub type MixnetWebSocketsResponse = FormattedResponse; responses( (status = 501, description = "the endpoint hasn't been implemented yet"), (status = 200, content( - ("application/json" = Wireguard), - ("application/yaml" = Wireguard) + (Wireguard = "application/json"), + (Wireguard = "application/yaml") )) ), params(OutputParams) diff --git a/nym-node/src/node/http/router/api/v1/gateway/root.rs b/nym-node/src/node/http/router/api/v1/gateway/root.rs index 4738acb30e..2d96374b2e 100644 --- a/nym-node/src/node/http/router/api/v1/gateway/root.rs +++ b/nym-node/src/node/http/router/api/v1/gateway/root.rs @@ -15,8 +15,8 @@ use nym_node_requests::api::v1::gateway::models::Gateway; responses( (status = 501, description = "the endpoint hasn't been implemented yet"), (status = 200, content( - ("application/json" = Gateway), - ("application/yaml" = Gateway) + (Gateway = "application/json"), + (Gateway = "application/yaml") )) ), params(OutputParams) diff --git a/nym-node/src/node/http/router/api/v1/health.rs b/nym-node/src/node/http/router/api/v1/health.rs index 0972ea6339..510107039f 100644 --- a/nym-node/src/node/http/router/api/v1/health.rs +++ b/nym-node/src/node/http/router/api/v1/health.rs @@ -14,8 +14,8 @@ use nym_node_requests::api::v1::health::models::NodeHealth; tag = "Health", responses( (status = 200, content( - ("application/json" = Vec), - ("application/yaml" = Vec) + (Vec = "application/json"), + (Vec = "application/yaml") ), description = "the api is available and healthy") ), params(OutputParams) diff --git a/nym-node/src/node/http/router/api/v1/ip_packet_router/root.rs b/nym-node/src/node/http/router/api/v1/ip_packet_router/root.rs index 18c503bd5c..e85cee897e 100644 --- a/nym-node/src/node/http/router/api/v1/ip_packet_router/root.rs +++ b/nym-node/src/node/http/router/api/v1/ip_packet_router/root.rs @@ -15,8 +15,8 @@ use nym_node_requests::api::v1::ip_packet_router::models::IpPacketRouter; responses( (status = 501, description = "the endpoint hasn't been implemented yet"), (status = 200, content( - ("application/json" = IpPacketRouter), - ("application/yaml" = IpPacketRouter) + (IpPacketRouter = "application/json"), + (IpPacketRouter = "application/yaml") )) ), params(OutputParams) diff --git a/nym-node/src/node/http/router/api/v1/metrics/legacy_mixing.rs b/nym-node/src/node/http/router/api/v1/metrics/legacy_mixing.rs index 23cfd0607d..de9126aead 100644 --- a/nym-node/src/node/http/router/api/v1/metrics/legacy_mixing.rs +++ b/nym-node/src/node/http/router/api/v1/metrics/legacy_mixing.rs @@ -16,8 +16,8 @@ use nym_node_requests::api::v1::metrics::models::LegacyMixingStats; tag = "Metrics", responses( (status = 200, content( - ("application/json" = LegacyMixingStats), - ("application/yaml" = LegacyMixingStats) + (LegacyMixingStats = "application/json"), + (LegacyMixingStats = "application/yaml") )) ), params(OutputParams), diff --git a/nym-node/src/node/http/router/api/v1/metrics/packets_stats.rs b/nym-node/src/node/http/router/api/v1/metrics/packets_stats.rs index 490bcffb57..3faf5be346 100644 --- a/nym-node/src/node/http/router/api/v1/metrics/packets_stats.rs +++ b/nym-node/src/node/http/router/api/v1/metrics/packets_stats.rs @@ -18,8 +18,8 @@ use nym_node_requests::api::v1::metrics::models::packets::{ tag = "Metrics", responses( (status = 200, content( - ("application/json" = PacketsStats), - ("application/yaml" = PacketsStats) + (PacketsStats = "application/json"), + (PacketsStats = "application/yaml") )) ), params(OutputParams), diff --git a/nym-node/src/node/http/router/api/v1/metrics/sessions.rs b/nym-node/src/node/http/router/api/v1/metrics/sessions.rs index 0b76e83205..e2fd969e17 100644 --- a/nym-node/src/node/http/router/api/v1/metrics/sessions.rs +++ b/nym-node/src/node/http/router/api/v1/metrics/sessions.rs @@ -17,8 +17,8 @@ use time::macros::time; tag = "Metrics", responses( (status = 200, content( - ("application/json" = SessionStats), - ("application/yaml" = SessionStats) + (SessionStats = "application/json"), + (SessionStats = "application/yaml") )) ), params(OutputParams), diff --git a/nym-node/src/node/http/router/api/v1/metrics/verloc.rs b/nym-node/src/node/http/router/api/v1/metrics/verloc.rs index 1a15986373..8a2a046637 100644 --- a/nym-node/src/node/http/router/api/v1/metrics/verloc.rs +++ b/nym-node/src/node/http/router/api/v1/metrics/verloc.rs @@ -1,14 +1,15 @@ // Copyright 2024 - Nym Technologies SA // SPDX-License-Identifier: GPL-3.0-only -use crate::node::http::state::metrics::MetricsAppState; use axum::extract::{Query, State}; use nym_http_api_common::{FormattedResponse, OutputParams}; use nym_node_requests::api::v1::metrics::models::{ - VerlocMeasurement, VerlocNodeResult, VerlocResult, VerlocResultData, VerlocStats, + VerlocNodeResult, VerlocResult, VerlocResultData, VerlocStats, }; use nym_verloc::measurements::SharedVerlocStats; +use crate::node::http::state::metrics::MetricsAppState; + /// If applicable, returns verloc statistics information of this node. #[utoipa::path( get, @@ -17,8 +18,8 @@ use nym_verloc::measurements::SharedVerlocStats; tag = "Metrics", responses( (status = 200, content( - ("application/json" = VerlocStats), - ("application/yaml" = VerlocStats) + (VerlocStats = "application/json"), + (VerlocStats = "application/yaml") )) ), params(OutputParams), @@ -42,12 +43,6 @@ async fn build_response(verloc_stats: &SharedVerlocStats) -> VerlocStats { .iter() .map(|r| VerlocNodeResult { node_identity: r.node_identity, - latest_measurement: r.latest_measurement.map(|l| VerlocMeasurement { - minimum: l.minimum, - mean: l.mean, - maximum: l.maximum, - standard_deviation: l.standard_deviation, - }), }) .collect(), } diff --git a/nym-node/src/node/http/router/api/v1/metrics/wireguard.rs b/nym-node/src/node/http/router/api/v1/metrics/wireguard.rs index 3519ff30dd..1e4668a805 100644 --- a/nym-node/src/node/http/router/api/v1/metrics/wireguard.rs +++ b/nym-node/src/node/http/router/api/v1/metrics/wireguard.rs @@ -16,8 +16,8 @@ use nym_node_requests::api::v1::metrics::models::WireguardStats; tag = "Metrics", responses( (status = 200, content( - ("application/json" = WireguardStats), - ("application/yaml" = WireguardStats) + (WireguardStats = "application/json"), + (WireguardStats = "application/yaml") )) ), params(OutputParams), diff --git a/nym-node/src/node/http/router/api/v1/mixnode/root.rs b/nym-node/src/node/http/router/api/v1/mixnode/root.rs index 4f1133ebc3..dff028e6c2 100644 --- a/nym-node/src/node/http/router/api/v1/mixnode/root.rs +++ b/nym-node/src/node/http/router/api/v1/mixnode/root.rs @@ -15,8 +15,8 @@ use nym_node_requests::api::v1::mixnode::models::Mixnode; responses( (status = 501, description = "the endpoint hasn't been implemented yet"), (status = 200, content( - ("application/json" = Mixnode), - ("application/yaml" = Mixnode) + (Mixnode = "application/json"), + (Mixnode = "application/yaml") )) ), params(OutputParams) diff --git a/nym-node/src/node/http/router/api/v1/network_requester/exit_policy.rs b/nym-node/src/node/http/router/api/v1/network_requester/exit_policy.rs index a58cd8862c..1640fb0d66 100644 --- a/nym-node/src/node/http/router/api/v1/network_requester/exit_policy.rs +++ b/nym-node/src/node/http/router/api/v1/network_requester/exit_policy.rs @@ -13,8 +13,8 @@ use nym_node_requests::api::v1::network_requester::exit_policy::models::UsedExit tag = "Network Requester", responses( (status = 200, content( - ("application/json" = UsedExitPolicy), - ("application/yaml" = UsedExitPolicy) + (UsedExitPolicy = "application/json"), + (UsedExitPolicy = "application/yaml") )) ), params(OutputParams) diff --git a/nym-node/src/node/http/router/api/v1/network_requester/root.rs b/nym-node/src/node/http/router/api/v1/network_requester/root.rs index 426614b07f..fe9f2c07a2 100644 --- a/nym-node/src/node/http/router/api/v1/network_requester/root.rs +++ b/nym-node/src/node/http/router/api/v1/network_requester/root.rs @@ -15,8 +15,8 @@ use nym_node_requests::api::v1::network_requester::models::NetworkRequester; responses( (status = 501, description = "the endpoint hasn't been implemented yet"), (status = 200, content( - ("application/json" = NetworkRequester), - ("application/yaml" = NetworkRequester) + (NetworkRequester = "application/json"), + (NetworkRequester = "application/yaml") )) ), params(OutputParams) diff --git a/nym-node/src/node/http/router/api/v1/node/auxiliary.rs b/nym-node/src/node/http/router/api/v1/node/auxiliary.rs index 9db2b5f6dd..b163371ef2 100644 --- a/nym-node/src/node/http/router/api/v1/node/auxiliary.rs +++ b/nym-node/src/node/http/router/api/v1/node/auxiliary.rs @@ -14,8 +14,8 @@ use nym_node_requests::api::v1::node::models::AuxiliaryDetails; tag = "Node", responses( (status = 200, content( - ("application/json" = AuxiliaryDetails), - ("application/yaml" = AuxiliaryDetails) + (AuxiliaryDetails = "application/json"), + (AuxiliaryDetails = "application/yaml") )), ), params(OutputParams) diff --git a/nym-node/src/node/http/router/api/v1/node/build_information.rs b/nym-node/src/node/http/router/api/v1/node/build_information.rs index 6073482c3e..7761a99f4d 100644 --- a/nym-node/src/node/http/router/api/v1/node/build_information.rs +++ b/nym-node/src/node/http/router/api/v1/node/build_information.rs @@ -13,8 +13,8 @@ use nym_node_requests::api::v1::node::models::BinaryBuildInformationOwned; tag = "Node", responses( (status = 200, content( - ("application/json" = BinaryBuildInformationOwned), - ("application/yaml" = BinaryBuildInformationOwned) + (BinaryBuildInformationOwned = "application/json"), + (BinaryBuildInformationOwned = "application/yaml") )) ), params(OutputParams) diff --git a/nym-node/src/node/http/router/api/v1/node/description.rs b/nym-node/src/node/http/router/api/v1/node/description.rs index ff569c78fb..bd6e4ee8b5 100644 --- a/nym-node/src/node/http/router/api/v1/node/description.rs +++ b/nym-node/src/node/http/router/api/v1/node/description.rs @@ -14,8 +14,8 @@ use nym_node_requests::api::v1::node::models::NodeDescription; tag = "Node", responses( (status = 200, content( - ("application/json" = NodeDescription), - ("application/yaml" = NodeDescription) + (NodeDescription = "application/json"), + (NodeDescription = "application/yaml") )), ), params(OutputParams) diff --git a/nym-node/src/node/http/router/api/v1/node/hardware.rs b/nym-node/src/node/http/router/api/v1/node/hardware.rs index 969e93458f..b0f5e124d3 100644 --- a/nym-node/src/node/http/router/api/v1/node/hardware.rs +++ b/nym-node/src/node/http/router/api/v1/node/hardware.rs @@ -15,10 +15,10 @@ use nym_node_requests::api::v1::node::models::HostSystem; tag = "Node", responses( (status = 200, content( - ("application/json" = HostSystem), - ("application/yaml" = HostSystem) + (HostSystem = "application/json"), + (HostSystem = "application/yaml") )), - (status = 403, body = ErrorResponse, description = "the node does not wish to expose the system information") + (status = 403, body = String, description = "the node does not wish to expose the system information") ), params(OutputParams) )] diff --git a/nym-node/src/node/http/router/api/v1/node/host_information.rs b/nym-node/src/node/http/router/api/v1/node/host_information.rs index d6cea0acde..76eb5ddfa0 100644 --- a/nym-node/src/node/http/router/api/v1/node/host_information.rs +++ b/nym-node/src/node/http/router/api/v1/node/host_information.rs @@ -3,7 +3,7 @@ use axum::extract::Query; use nym_http_api_common::{FormattedResponse, OutputParams}; -use nym_node_requests::api::v1::node::models::SignedHostInformation; +use nym_node_requests::api::{v1::node::models::SignedHostInformation, SignedDataHostInfo}; /// Returns host information of this node. #[utoipa::path( @@ -13,8 +13,8 @@ use nym_node_requests::api::v1::node::models::SignedHostInformation; tag = "Node", responses( (status = 200, content( - ("application/json" = SignedHostInformation), - ("application/yaml" = SignedHostInformation) + (SignedDataHostInfo = "application/json"), + (SignedDataHostInfo = "application/yaml") )) ), params(OutputParams) diff --git a/nym-node/src/node/http/router/api/v1/node/roles.rs b/nym-node/src/node/http/router/api/v1/node/roles.rs index 40b98d868a..c1228a5188 100644 --- a/nym-node/src/node/http/router/api/v1/node/roles.rs +++ b/nym-node/src/node/http/router/api/v1/node/roles.rs @@ -13,8 +13,8 @@ use nym_node_requests::api::v1::node::models::NodeRoles; tag = "Node", responses( (status = 200, content( - ("application/json" = NodeRoles), - ("application/yaml" = NodeRoles) + (NodeRoles = "application/json"), + (NodeRoles = "application/yaml") )) ), params(OutputParams) diff --git a/nym-node/src/node/http/router/api/v1/openapi.rs b/nym-node/src/node/http/router/api/v1/openapi.rs index a46812464f..6b209c450f 100644 --- a/nym-node/src/node/http/router/api/v1/openapi.rs +++ b/nym-node/src/node/http/router/api/v1/openapi.rs @@ -2,7 +2,6 @@ // SPDX-License-Identifier: GPL-3.0-only use crate::node::http::router::api; -use crate::node::http::router::types::{ErrorResponse, RequestError}; use axum::Router; use nym_node_requests::api as api_requests; use nym_node_requests::routes::api::{v1, v1_absolute}; @@ -35,13 +34,11 @@ use utoipa_swagger_ui::SwaggerUi; ), components( schemas( - ErrorResponse, nym_http_api_common::Output, nym_http_api_common::OutputParams, api_requests::v1::health::models::NodeHealth, api_requests::v1::health::models::NodeStatus, api_requests::v1::node::models::BinaryBuildInformationOwned, - api_requests::v1::node::models::SignedHostInformation, api_requests::v1::node::models::HostInformation, api_requests::v1::node::models::HostKeys, api_requests::v1::node::models::NodeRoles, @@ -71,7 +68,6 @@ use utoipa_swagger_ui::SwaggerUi; api_requests::v1::network_requester::exit_policy::models::UsedExitPolicy, api_requests::v1::ip_packet_router::models::IpPacketRouter, ), - responses(RequestError), ), modifiers(&SecurityAddon), )] diff --git a/nym-node/src/node/http/router/types.rs b/nym-node/src/node/http/router/types.rs index 7a20492fc6..c8b692bda9 100644 --- a/nym-node/src/node/http/router/types.rs +++ b/nym-node/src/node/http/router/types.rs @@ -5,10 +5,8 @@ use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; use axum::Json; pub use nym_node_requests::api::ErrorResponse; -use utoipa::ToResponse; -#[derive(Debug, Clone, ToResponse)] -#[response(description = "Error response with additional message")] +#[derive(Debug, Clone)] pub(crate) struct RequestError { pub(crate) inner: ErrorResponse, diff --git a/nym-validator-rewarder/.sqlx/query-0a802236d0b9cc7679971f884a89146f8b40d46d14aa0ecb7237d5d78a9f463f.json b/nym-validator-rewarder/.sqlx/query-0a802236d0b9cc7679971f884a89146f8b40d46d14aa0ecb7237d5d78a9f463f.json new file mode 100644 index 0000000000..e130a550d3 --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-0a802236d0b9cc7679971f884a89146f8b40d46d14aa0ecb7237d5d78a9f463f.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n INSERT INTO block_signing_rewarding_details(\n rewarding_epoch_id,\n total_voting_power_at_epoch_start,\n num_blocks,\n spent,\n rewarding_tx,\n rewarding_error,\n monitor_only\n ) VALUES (?, ?, ?, ?, ?, ?, ?)\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 7 + }, + "nullable": [] + }, + "hash": "0a802236d0b9cc7679971f884a89146f8b40d46d14aa0ecb7237d5d78a9f463f" +} diff --git a/nym-validator-rewarder/.sqlx/query-0e03fcbde46a0296e029624ae083381fe8e839998717e6d1a0502f54438fc9b0.json b/nym-validator-rewarder/.sqlx/query-0e03fcbde46a0296e029624ae083381fe8e839998717e6d1a0502f54438fc9b0.json new file mode 100644 index 0000000000..53b4bf4730 --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-0e03fcbde46a0296e029624ae083381fe8e839998717e6d1a0502f54438fc9b0.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "UPDATE pruning SET last_pruned_height = ?", + "describe": { + "columns": [], + "parameters": { + "Right": 1 + }, + "nullable": [] + }, + "hash": "0e03fcbde46a0296e029624ae083381fe8e839998717e6d1a0502f54438fc9b0" +} diff --git a/nym-validator-rewarder/.sqlx/query-1d3938bc8d8d6829289ef7ff78ee836c7cff2646b6b76559543b4e4056094d58.json b/nym-validator-rewarder/.sqlx/query-1d3938bc8d8d6829289ef7ff78ee836c7cff2646b6b76559543b4e4056094d58.json new file mode 100644 index 0000000000..1d6a5bdaf6 --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-1d3938bc8d8d6829289ef7ff78ee836c7cff2646b6b76559543b4e4056094d58.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n INSERT INTO ticketbook_issuance_reward(\n ticketbook_expiration_date,\n api_endpoint,\n operator_account,\n whitelisted,\n banned,\n amount,\n issued_partial_ticketbooks,\n share_of_issued_ticketbooks,\n skipped_verification,\n subsample_size\n ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 10 + }, + "nullable": [] + }, + "hash": "1d3938bc8d8d6829289ef7ff78ee836c7cff2646b6b76559543b4e4056094d58" +} diff --git a/nym-validator-rewarder/.sqlx/query-1ebd5510a96bc84a88cc91316f891d372e750a74e2eb5a252a787cf571c326e4.json b/nym-validator-rewarder/.sqlx/query-1ebd5510a96bc84a88cc91316f891d372e750a74e2eb5a252a787cf571c326e4.json new file mode 100644 index 0000000000..f189ef3675 --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-1ebd5510a96bc84a88cc91316f891d372e750a74e2eb5a252a787cf571c326e4.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "DELETE FROM \"transaction\" WHERE height < ?", + "describe": { + "columns": [], + "parameters": { + "Right": 1 + }, + "nullable": [] + }, + "hash": "1ebd5510a96bc84a88cc91316f891d372e750a74e2eb5a252a787cf571c326e4" +} diff --git a/nym-validator-rewarder/.sqlx/query-227e0cd05334ac228ca55f309f3738f6bda8532a5bd2d944884cd87784eefe43.json b/nym-validator-rewarder/.sqlx/query-227e0cd05334ac228ca55f309f3738f6bda8532a5bd2d944884cd87784eefe43.json new file mode 100644 index 0000000000..c21d9bd39e --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-227e0cd05334ac228ca55f309f3738f6bda8532a5bd2d944884cd87784eefe43.json @@ -0,0 +1,26 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT * FROM validator \n WHERE EXISTS (\n SELECT 1 FROM pre_commit\n WHERE height == ?\n AND pre_commit.validator_address = validator.consensus_address\n )\n ", + "describe": { + "columns": [ + { + "name": "consensus_address", + "ordinal": 0, + "type_info": "Text" + }, + { + "name": "consensus_pubkey", + "ordinal": 1, + "type_info": "Text" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + false, + false + ] + }, + "hash": "227e0cd05334ac228ca55f309f3738f6bda8532a5bd2d944884cd87784eefe43" +} diff --git a/nym-validator-rewarder/.sqlx/query-2561fb016951ea4cd29e43fb9a4a93e944b0d44ed1f7c1036f306e34372da11c.json b/nym-validator-rewarder/.sqlx/query-2561fb016951ea4cd29e43fb9a4a93e944b0d44ed1f7c1036f306e34372da11c.json new file mode 100644 index 0000000000..c4d9958ff8 --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-2561fb016951ea4cd29e43fb9a4a93e944b0d44ed1f7c1036f306e34372da11c.json @@ -0,0 +1,20 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT height\n FROM block\n ORDER BY height ASC\n LIMIT 1\n ", + "describe": { + "columns": [ + { + "name": "height", + "ordinal": 0, + "type_info": "Int64" + } + ], + "parameters": { + "Right": 0 + }, + "nullable": [ + true + ] + }, + "hash": "2561fb016951ea4cd29e43fb9a4a93e944b0d44ed1f7c1036f306e34372da11c" +} diff --git a/nym-validator-rewarder/.sqlx/query-2db92c21c933bc1eadc486867f7c8643ae77e22ad908147b910d9406234911f0.json b/nym-validator-rewarder/.sqlx/query-2db92c21c933bc1eadc486867f7c8643ae77e22ad908147b910d9406234911f0.json new file mode 100644 index 0000000000..13c8d883b8 --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-2db92c21c933bc1eadc486867f7c8643ae77e22ad908147b910d9406234911f0.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n INSERT INTO ticketbook_issuance_rewarding_details(\n ticketbook_expiration_date,\n approximate_deposits,\n spent,\n rewarding_tx,\n rewarding_error,\n monitor_only\n ) VALUES (?, ?, ?, ?, ?, ?)\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 6 + }, + "nullable": [] + }, + "hash": "2db92c21c933bc1eadc486867f7c8643ae77e22ad908147b910d9406234911f0" +} diff --git a/nym-validator-rewarder/.sqlx/query-3227631b516dd16b8474e050393b9036df67d74966a35d9af74d43e93d3524eb.json b/nym-validator-rewarder/.sqlx/query-3227631b516dd16b8474e050393b9036df67d74966a35d9af74d43e93d3524eb.json new file mode 100644 index 0000000000..dd4aa3560e --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-3227631b516dd16b8474e050393b9036df67d74966a35d9af74d43e93d3524eb.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "DELETE FROM block WHERE height < ?", + "describe": { + "columns": [], + "parameters": { + "Right": 1 + }, + "nullable": [] + }, + "hash": "3227631b516dd16b8474e050393b9036df67d74966a35d9af74d43e93d3524eb" +} diff --git a/nym-validator-rewarder/.sqlx/query-397bde15134e32921ad87037e9436dcb76982d7859e406daa12c97f671e6fd3b.json b/nym-validator-rewarder/.sqlx/query-397bde15134e32921ad87037e9436dcb76982d7859e406daa12c97f671e6fd3b.json new file mode 100644 index 0000000000..a1147e12e0 --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-397bde15134e32921ad87037e9436dcb76982d7859e406daa12c97f671e6fd3b.json @@ -0,0 +1,20 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT height\n FROM block\n WHERE timestamp < ?\n ORDER BY timestamp DESC\n LIMIT 1\n ", + "describe": { + "columns": [ + { + "name": "height", + "ordinal": 0, + "type_info": "Int64" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + true + ] + }, + "hash": "397bde15134e32921ad87037e9436dcb76982d7859e406daa12c97f671e6fd3b" +} diff --git a/nym-validator-rewarder/.sqlx/query-3b75821c30e4e2a87fea04c92a91cb75bd6df809fb1f17620ab888d184291f0b.json b/nym-validator-rewarder/.sqlx/query-3b75821c30e4e2a87fea04c92a91cb75bd6df809fb1f17620ab888d184291f0b.json new file mode 100644 index 0000000000..b979eb62c4 --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-3b75821c30e4e2a87fea04c92a91cb75bd6df809fb1f17620ab888d184291f0b.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n INSERT INTO block_signing_rewarding_epoch (id, start_time, end_time, budget, disabled)\n VALUES (?, ?, ?, ?, ?)\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 5 + }, + "nullable": [] + }, + "hash": "3b75821c30e4e2a87fea04c92a91cb75bd6df809fb1f17620ab888d184291f0b" +} diff --git a/nym-validator-rewarder/.sqlx/query-3bdf81a9db6075f6f77224c30553f419a849d4ec45af40b052a4cbf09b44f3ec.json b/nym-validator-rewarder/.sqlx/query-3bdf81a9db6075f6f77224c30553f419a849d4ec45af40b052a4cbf09b44f3ec.json new file mode 100644 index 0000000000..0f91e82479 --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-3bdf81a9db6075f6f77224c30553f419a849d4ec45af40b052a4cbf09b44f3ec.json @@ -0,0 +1,20 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT last_pruned_height FROM pruning\n ", + "describe": { + "columns": [ + { + "name": "last_pruned_height", + "ordinal": 0, + "type_info": "Int64" + } + ], + "parameters": { + "Right": 0 + }, + "nullable": [ + false + ] + }, + "hash": "3bdf81a9db6075f6f77224c30553f419a849d4ec45af40b052a4cbf09b44f3ec" +} diff --git a/nym-validator-rewarder/.sqlx/query-422a516baacf8ba26ea2dca46fa57ed06dbebb3615b912fa59d9e22a097ded57.json b/nym-validator-rewarder/.sqlx/query-422a516baacf8ba26ea2dca46fa57ed06dbebb3615b912fa59d9e22a097ded57.json new file mode 100644 index 0000000000..fe59f444cf --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-422a516baacf8ba26ea2dca46fa57ed06dbebb3615b912fa59d9e22a097ded57.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n INSERT INTO \"transaction\" (hash, height, \"index\", success, num_messages, memo, gas_wanted, gas_used, raw_log)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\n ON CONFLICT (hash) DO UPDATE\n SET height = excluded.height,\n \"index\" = excluded.\"index\",\n success = excluded.success,\n num_messages = excluded.num_messages,\n memo = excluded.memo,\n gas_wanted = excluded.gas_wanted,\n gas_used = excluded.gas_used,\n raw_log = excluded.raw_log\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 9 + }, + "nullable": [] + }, + "hash": "422a516baacf8ba26ea2dca46fa57ed06dbebb3615b912fa59d9e22a097ded57" +} diff --git a/nym-validator-rewarder/.sqlx/query-6be3f8abfa7a2e05721e533e4128b10dd0f01a04f77634d09af260b97a155264.json b/nym-validator-rewarder/.sqlx/query-6be3f8abfa7a2e05721e533e4128b10dd0f01a04f77634d09af260b97a155264.json new file mode 100644 index 0000000000..edf75914cf --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-6be3f8abfa7a2e05721e533e4128b10dd0f01a04f77634d09af260b97a155264.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n INSERT INTO block_signing_reward (\n rewarding_epoch_id,\n validator_consensus_address,\n operator_account,\n whitelisted,\n amount,\n voting_power,\n voting_power_share,\n signed_blocks,\n signed_blocks_percent\n ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 9 + }, + "nullable": [] + }, + "hash": "6be3f8abfa7a2e05721e533e4128b10dd0f01a04f77634d09af260b97a155264" +} diff --git a/nym-validator-rewarder/.sqlx/query-7c2aea05703247a865d5639bd84efa10eee3e1488f7bb990847374c09e8a5944.json b/nym-validator-rewarder/.sqlx/query-7c2aea05703247a865d5639bd84efa10eee3e1488f7bb990847374c09e8a5944.json new file mode 100644 index 0000000000..929cec2080 --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-7c2aea05703247a865d5639bd84efa10eee3e1488f7bb990847374c09e8a5944.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "DELETE FROM pre_commit WHERE height < ?", + "describe": { + "columns": [], + "parameters": { + "Right": 1 + }, + "nullable": [] + }, + "hash": "7c2aea05703247a865d5639bd84efa10eee3e1488f7bb990847374c09e8a5944" +} diff --git a/nym-validator-rewarder/.sqlx/query-84e200c64ff49be7241d43841d0e35e94e172738678c7299334c47a54caf5c50.json b/nym-validator-rewarder/.sqlx/query-84e200c64ff49be7241d43841d0e35e94e172738678c7299334c47a54caf5c50.json new file mode 100644 index 0000000000..efc1aefbbd --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-84e200c64ff49be7241d43841d0e35e94e172738678c7299334c47a54caf5c50.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n INSERT INTO block (height, hash, num_txs, total_gas, proposer_address, timestamp)\n VALUES (?, ?, ?, ?, ?, ?)\n ON CONFLICT DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 6 + }, + "nullable": [] + }, + "hash": "84e200c64ff49be7241d43841d0e35e94e172738678c7299334c47a54caf5c50" +} diff --git a/nym-validator-rewarder/.sqlx/query-98b2ac25c05850c31990ce0b48fecb9fbe7b3636feb0d36cbad0610a0685cccc.json b/nym-validator-rewarder/.sqlx/query-98b2ac25c05850c31990ce0b48fecb9fbe7b3636feb0d36cbad0610a0685cccc.json new file mode 100644 index 0000000000..c994b23894 --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-98b2ac25c05850c31990ce0b48fecb9fbe7b3636feb0d36cbad0610a0685cccc.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n INSERT INTO message (transaction_hash, \"index\", type, height)\n VALUES (?, ?, ?, ?)\n ON CONFLICT (transaction_hash, \"index\") DO UPDATE\n SET height = excluded.height,\n type = excluded.type\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 4 + }, + "nullable": [] + }, + "hash": "98b2ac25c05850c31990ce0b48fecb9fbe7b3636feb0d36cbad0610a0685cccc" +} diff --git a/nym-validator-rewarder/.sqlx/query-a3db765a00f07d80656e58a543500a9e2b6064f2252c66882fd4a7dd17e187cc.json b/nym-validator-rewarder/.sqlx/query-a3db765a00f07d80656e58a543500a9e2b6064f2252c66882fd4a7dd17e187cc.json new file mode 100644 index 0000000000..af56095d43 --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-a3db765a00f07d80656e58a543500a9e2b6064f2252c66882fd4a7dd17e187cc.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "DELETE FROM message WHERE height < ?", + "describe": { + "columns": [], + "parameters": { + "Right": 1 + }, + "nullable": [] + }, + "hash": "a3db765a00f07d80656e58a543500a9e2b6064f2252c66882fd4a7dd17e187cc" +} diff --git a/nym-validator-rewarder/.sqlx/query-b865ffb57fa5f31de74e0dce28a8d33ad532e06c270adc6f62c4fd6c40ecabcb.json b/nym-validator-rewarder/.sqlx/query-b865ffb57fa5f31de74e0dce28a8d33ad532e06c270adc6f62c4fd6c40ecabcb.json new file mode 100644 index 0000000000..69132402be --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-b865ffb57fa5f31de74e0dce28a8d33ad532e06c270adc6f62c4fd6c40ecabcb.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n INSERT INTO ticketbook_issuance_epoch(\n expiration_date,\n total_budget,\n whitelist_size,\n budget_per_operator,\n disabled\n ) VALUES (?, ?, ?, ?, ?)\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 5 + }, + "nullable": [] + }, + "hash": "b865ffb57fa5f31de74e0dce28a8d33ad532e06c270adc6f62c4fd6c40ecabcb" +} diff --git a/nym-validator-rewarder/.sqlx/query-b9e16b5c6e11cfa2d3dfee8c0bf18384da48966d5a2b2de842e747ee1d7a5769.json b/nym-validator-rewarder/.sqlx/query-b9e16b5c6e11cfa2d3dfee8c0bf18384da48966d5a2b2de842e747ee1d7a5769.json new file mode 100644 index 0000000000..d17600cd48 --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-b9e16b5c6e11cfa2d3dfee8c0bf18384da48966d5a2b2de842e747ee1d7a5769.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n INSERT INTO banned_ticketbook_issuer(\n operator_account,\n api_endpoint,\n banned_on,\n associated_ticketbook_expiration_date,\n reason,\n evidence\n ) VALUES (?, ?, ?, ?, ?, ?)\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 6 + }, + "nullable": [] + }, + "hash": "b9e16b5c6e11cfa2d3dfee8c0bf18384da48966d5a2b2de842e747ee1d7a5769" +} diff --git a/nym-validator-rewarder/.sqlx/query-be654926e94fb6a07ebb94dd526b310d02d77083207b6a42eb1f8e4dd80b00a8.json b/nym-validator-rewarder/.sqlx/query-be654926e94fb6a07ebb94dd526b310d02d77083207b6a42eb1f8e4dd80b00a8.json new file mode 100644 index 0000000000..ca3f00b385 --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-be654926e94fb6a07ebb94dd526b310d02d77083207b6a42eb1f8e4dd80b00a8.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "UPDATE metadata SET last_processed_height = MAX(last_processed_height, ?)", + "describe": { + "columns": [], + "parameters": { + "Right": 1 + }, + "nullable": [] + }, + "hash": "be654926e94fb6a07ebb94dd526b310d02d77083207b6a42eb1f8e4dd80b00a8" +} diff --git a/nym-validator-rewarder/.sqlx/query-c88d07fecc3f33deaa6e93db1469ce71582635df47f52dcf3fd1df4e7be6b96d.json b/nym-validator-rewarder/.sqlx/query-c88d07fecc3f33deaa6e93db1469ce71582635df47f52dcf3fd1df4e7be6b96d.json new file mode 100644 index 0000000000..e588b0a25a --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-c88d07fecc3f33deaa6e93db1469ce71582635df47f52dcf3fd1df4e7be6b96d.json @@ -0,0 +1,20 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT last_processed_height FROM metadata\n ", + "describe": { + "columns": [ + { + "name": "last_processed_height", + "ordinal": 0, + "type_info": "Int64" + } + ], + "parameters": { + "Right": 0 + }, + "nullable": [ + false + ] + }, + "hash": "c88d07fecc3f33deaa6e93db1469ce71582635df47f52dcf3fd1df4e7be6b96d" +} diff --git a/nym-validator-rewarder/.sqlx/query-ceb15dea9ac66e69cfc2fa8fbd57472d7a3c6080766ab0f02aba4c776545adad.json b/nym-validator-rewarder/.sqlx/query-ceb15dea9ac66e69cfc2fa8fbd57472d7a3c6080766ab0f02aba4c776545adad.json new file mode 100644 index 0000000000..38d47056a8 --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-ceb15dea9ac66e69cfc2fa8fbd57472d7a3c6080766ab0f02aba4c776545adad.json @@ -0,0 +1,20 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT operator_account\n FROM banned_ticketbook_issuer\n ", + "describe": { + "columns": [ + { + "name": "operator_account", + "ordinal": 0, + "type_info": "Text" + } + ], + "parameters": { + "Right": 0 + }, + "nullable": [ + false + ] + }, + "hash": "ceb15dea9ac66e69cfc2fa8fbd57472d7a3c6080766ab0f02aba4c776545adad" +} diff --git a/nym-validator-rewarder/.sqlx/query-d67b6b3fc1099b3ca48eed945d9873717ba4b9d8f83bcb05f8a39094f0ff7c32.json b/nym-validator-rewarder/.sqlx/query-d67b6b3fc1099b3ca48eed945d9873717ba4b9d8f83bcb05f8a39094f0ff7c32.json new file mode 100644 index 0000000000..0413a18c5b --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-d67b6b3fc1099b3ca48eed945d9873717ba4b9d8f83bcb05f8a39094f0ff7c32.json @@ -0,0 +1,20 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT COUNT(*) as count FROM pre_commit\n WHERE \n validator_address == ?\n AND height >= ? \n AND height <= ?\n ", + "describe": { + "columns": [ + { + "name": "count", + "ordinal": 0, + "type_info": "Int" + } + ], + "parameters": { + "Right": 3 + }, + "nullable": [ + false + ] + }, + "hash": "d67b6b3fc1099b3ca48eed945d9873717ba4b9d8f83bcb05f8a39094f0ff7c32" +} diff --git a/nym-validator-rewarder/.sqlx/query-dcb00d96a003c9ad0b6213ac6974f40adb4d74539bfe36432ae62a2b5268f5fd.json b/nym-validator-rewarder/.sqlx/query-dcb00d96a003c9ad0b6213ac6974f40adb4d74539bfe36432ae62a2b5268f5fd.json new file mode 100644 index 0000000000..1c629ba0ff --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-dcb00d96a003c9ad0b6213ac6974f40adb4d74539bfe36432ae62a2b5268f5fd.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n INSERT INTO validator (consensus_address, consensus_pubkey)\n VALUES (?, ?)\n ON CONFLICT DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 2 + }, + "nullable": [] + }, + "hash": "dcb00d96a003c9ad0b6213ac6974f40adb4d74539bfe36432ae62a2b5268f5fd" +} diff --git a/nym-validator-rewarder/.sqlx/query-e08a6456f6bd3cc5e8201d18dc3a989aecff01e835cb8fc04acee1b83480a970.json b/nym-validator-rewarder/.sqlx/query-e08a6456f6bd3cc5e8201d18dc3a989aecff01e835cb8fc04acee1b83480a970.json new file mode 100644 index 0000000000..a7a5ad47d1 --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-e08a6456f6bd3cc5e8201d18dc3a989aecff01e835cb8fc04acee1b83480a970.json @@ -0,0 +1,20 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT height\n FROM block\n WHERE timestamp > ?\n ORDER BY timestamp\n LIMIT 1\n ", + "describe": { + "columns": [ + { + "name": "height", + "ordinal": 0, + "type_info": "Int64" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + true + ] + }, + "hash": "e08a6456f6bd3cc5e8201d18dc3a989aecff01e835cb8fc04acee1b83480a970" +} diff --git a/nym-validator-rewarder/.sqlx/query-eba74b6531013fe5a83287bd50dab220797a39071754ad20bc14819fcced6c56.json b/nym-validator-rewarder/.sqlx/query-eba74b6531013fe5a83287bd50dab220797a39071754ad20bc14819fcced6c56.json new file mode 100644 index 0000000000..c3ce58bb1f --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-eba74b6531013fe5a83287bd50dab220797a39071754ad20bc14819fcced6c56.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n INSERT INTO pre_commit (validator_address, height, timestamp, voting_power, proposer_priority)\n VALUES (?, ?, ?, ?, ?)\n ON CONFLICT (validator_address, timestamp) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 5 + }, + "nullable": [] + }, + "hash": "eba74b6531013fe5a83287bd50dab220797a39071754ad20bc14819fcced6c56" +} diff --git a/nym-validator-rewarder/.sqlx/query-fa2ea62ed8ccb08d0ef70bc212cbb5ca4bbbb50e7a7fcaacc5361dde3157247a.json b/nym-validator-rewarder/.sqlx/query-fa2ea62ed8ccb08d0ef70bc212cbb5ca4bbbb50e7a7fcaacc5361dde3157247a.json new file mode 100644 index 0000000000..4bbca10169 --- /dev/null +++ b/nym-validator-rewarder/.sqlx/query-fa2ea62ed8ccb08d0ef70bc212cbb5ca4bbbb50e7a7fcaacc5361dde3157247a.json @@ -0,0 +1,20 @@ +{ + "db_name": "SQLite", + "query": "\n SELECT expiration_date as \"expiration_date: Date\"\n FROM ticketbook_issuance_epoch\n ORDER BY expiration_date DESC\n LIMIT 1\n ", + "describe": { + "columns": [ + { + "name": "expiration_date: Date", + "ordinal": 0, + "type_info": "Date" + } + ], + "parameters": { + "Right": 0 + }, + "nullable": [ + false + ] + }, + "hash": "fa2ea62ed8ccb08d0ef70bc212cbb5ca4bbbb50e7a7fcaacc5361dde3157247a" +} diff --git a/nyx-chain-watcher/src/db/models.rs b/nyx-chain-watcher/src/db/models.rs index 9a433ee8ec..ce99cedb89 100644 --- a/nyx-chain-watcher/src/db/models.rs +++ b/nyx-chain-watcher/src/db/models.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use utoipa::ToSchema; -#[derive(Clone, Deserialize, Debug)] +#[derive(Clone, Deserialize, Debug, ToSchema)] pub(crate) struct CurrencyPrices { pub(crate) chf: f32, pub(crate) usd: f32, diff --git a/nyx-chain-watcher/src/models.rs b/nyx-chain-watcher/src/models.rs index 93a9f9e592..ce1c0886f4 100644 --- a/nyx-chain-watcher/src/models.rs +++ b/nyx-chain-watcher/src/models.rs @@ -9,7 +9,18 @@ pub struct WebhookPayload { pub message_index: u64, pub sender_address: String, pub receiver_address: String, + #[schema(value_type = openapi_schema::Coin)] pub funds: CosmWasmCoin, pub height: u128, pub memo: Option, } + +pub mod openapi_schema { + use super::*; + + #[derive(ToSchema)] + pub struct Coin { + pub denom: String, + pub amount: String, + } +}