From 1101eed4605f691f0cf73ab9a14617a851678501 Mon Sep 17 00:00:00 2001 From: Marcus Pousette Date: Tue, 24 Dec 2024 20:56:32 +0100 Subject: [PATCH] wip --- .github/workflows/ci.yml | 8 +- .github/workflows/release.yml | 6 + .prettierignore | 4 +- package.json | 4 +- .../clients/peerbit-proxy/proxy/src/client.ts | 6 + .../clients/peerbit-proxy/proxy/src/host.ts | 6 +- .../window/e2e/browser/child/src/App.tsx | 27 +- .../window/e2e/browser/child/src/db.ts | 2 +- .../window/e2e/browser/playwright.config.ts | 2 + .../clients/peerbit-server/node/package.json | 2 +- .../peerbit-server/node/test/api.spec.ts | 3 +- .../peerbit-server/node/test/client.spec.ts | 1 - .../peerbit-server/test-lib/package.json | 2 +- packages/clients/peerbit/package.json | 10 +- packages/clients/peerbit/src/libp2p.ts | 5 +- packages/clients/peerbit/src/peer.ts | 64 +- .../test/{dial.spec.ts => connect.spec.ts} | 35 +- packages/log/benchmark/append.ts | 36 +- packages/log/package.json | 2 +- packages/log/src/entry-index.ts | 73 +- packages/log/src/entry-v0.ts | 5 +- packages/log/src/log.ts | 50 +- packages/log/test/append.spec.ts | 18 + packages/log/test/from.spec.ts | 6 +- packages/log/test/join.spec.ts | 24 + packages/log/test/log.spec.ts | 2 +- .../test/index.spec.ts | 2 +- .../data/document/document/src/domain.ts | 5 +- .../data/document/document/src/program.ts | 11 +- .../data/document/document/src/search.ts | 15 +- .../data/document/document/test/data.ts | 4 +- .../document/document/test/domain.spec.ts | 1 + .../data/document/document/test/index.spec.ts | 63 +- .../data/document/document/test/utils.ts | 24 + .../data/shared-log/benchmark/get-samples.ts | 346 +- .../data/shared-log/benchmark/index.ts | 66 +- .../data/shared-log/benchmark/memory/child.ts | 10 +- .../data/shared-log/benchmark/partial-sync.ts | 147 + .../shared-log/benchmark/replication-prune.ts | 10 +- .../data/shared-log/benchmark/replication.ts | 6 +- .../data/shared-log/benchmark/to-rebalance.ts | 163 + .../data/shared-log/benchmark/utils.ts | 78 + .../programs/data/shared-log/package.json | 3 +- .../programs/data/shared-log/src/debounce.ts | 24 +- .../data/shared-log/src/exchange-heads.ts | 24 +- .../programs/data/shared-log/src/index.ts | 2534 ++++--- .../programs/data/shared-log/src/integers.ts | 102 + packages/programs/data/shared-log/src/pid.ts | 44 +- .../programs/data/shared-log/src/ranges.ts | 1599 +++-- .../shared-log/src/replication-domain-hash.ts | 61 +- .../shared-log/src/replication-domain-time.ts | 18 +- .../data/shared-log/src/replication-domain.ts | 52 +- .../data/shared-log/src/replication.ts | 19 +- packages/programs/data/shared-log/src/role.ts | 10 +- .../data/shared-log/src/sync/index.ts | 49 + .../data/shared-log/src/sync/rateless-iblt.ts | 664 ++ .../data/shared-log/src/sync/simple.ts | 412 ++ .../programs/data/shared-log/src/utils.ts | 14 +- .../data/shared-log/test/append.spec.ts | 4 +- .../data/shared-log/test/domain-time.spec.ts | 11 +- .../data/shared-log/test/encryption.spec.ts | 2 +- .../data/shared-log/test/events.spec.ts | 104 + .../data/shared-log/test/join.spec.ts | 68 +- .../data/shared-log/test/leader.spec.ts | 228 +- .../data/shared-log/test/lifecycle.spec.ts | 244 + .../data/shared-log/test/load.spec.ts | 27 +- .../shared-log/test/migration-8-9.spec.ts | 4 +- .../data/shared-log/test/network.spec.ts | 6 +- .../data/shared-log/test/observer.spec.ts | 8 +- .../data/shared-log/test/open-close.spec.ts | 153 - .../data/shared-log/test/ranges.spec.ts | 2953 +++++--- .../shared-log/test/rateless-iblt.spec.ts | 560 ++ .../data/shared-log/test/replicate.spec.ts | 99 +- .../data/shared-log/test/replication.spec.ts | 5982 ++++++++++------- .../data/shared-log/test/sharding.spec.ts | 2481 ++++--- .../programs/data/shared-log/test/utils.ts | 275 +- .../data/shared-log/test/utils/access.ts | 4 +- .../test/utils/stores/event-store.ts | 46 +- .../programs/data/string/src/string-store.ts | 4 +- packages/programs/program/src/client.ts | 9 +- packages/programs/program/src/handler.ts | 4 + packages/programs/program/src/program.ts | 10 +- .../programs/program/test/handler.spec.ts | 40 +- packages/programs/program/test/utils.ts | 1 + packages/transport/blocks/package.json | 2 +- .../transport/libp2p-test-utils/package.json | 12 +- packages/transport/pubsub/package.json | 2 +- .../e2e/browser/browser-node/package.json | 2 +- packages/transport/stream/package.json | 2 +- packages/transport/stream/src/index.ts | 18 +- packages/transport/stream/src/routes.ts | 37 +- packages/transport/stream/test/routes.spec.ts | 14 +- packages/transport/stream/test/stream.spec.ts | 4 - .../utils/indexer/interface/src/errors.ts | 5 + packages/utils/indexer/interface/src/id.ts | 2 +- .../indexer/interface/src/index-engine.ts | 1 - packages/utils/indexer/interface/src/index.ts | 1 + packages/utils/indexer/interface/src/query.ts | 8 +- packages/utils/indexer/simple/src/index.ts | 117 +- packages/utils/indexer/sqlite3/package.json | 6 +- packages/utils/indexer/sqlite3/src/engine.ts | 233 +- .../indexer/sqlite3/src/query-planner.ts | 354 + packages/utils/indexer/sqlite3/src/schema.ts | 658 +- .../sqlite3/src/sqlite3-messages.worker.ts | 5 + .../indexer/sqlite3/src/sqlite3.browser.ts | 8 + packages/utils/indexer/sqlite3/src/sqlite3.ts | 37 +- .../utils/indexer/sqlite3/src/sqlite3.wasm.ts | 12 +- .../indexer/sqlite3/src/sqlite3.worker.ts | 7 +- packages/utils/indexer/sqlite3/src/types.ts | 1 + .../utils/indexer/sqlite3/test/array.spec.ts | 322 + .../utils/indexer/sqlite3/test/basic.spec.ts | 264 + .../sqlite3/test/query-planner.spec.ts | 392 ++ .../utils/indexer/sqlite3/test/shape.spec.ts | 476 ++ .../utils/indexer/sqlite3/test/sort.spec.ts | 73 +- .../utils/indexer/sqlite3/test/u64.spec.ts | 14 + .../utils/indexer/tests/src/benchmarks.ts | 730 +- packages/utils/indexer/tests/src/tests.ts | 745 +- packages/utils/rateless-iblt/.gitignore | 2 + packages/utils/rateless-iblt/Cargo.lock | 679 ++ packages/utils/rateless-iblt/Cargo.toml | 31 + packages/utils/rateless-iblt/LICENSE | 21 + packages/utils/rateless-iblt/README.md | 101 + packages/utils/rateless-iblt/TODO | 5 + .../rateless-iblt/benches/riblt_bench.rs | 64 + packages/utils/rateless-iblt/package.json | 58 + packages/utils/rateless-iblt/src/encoding.rs | 397 ++ packages/utils/rateless-iblt/src/lib.rs | 7 + packages/utils/rateless-iblt/src/sketch.rs | 101 + packages/utils/rateless-iblt/src/testing.rs | 50 + packages/utils/rateless-iblt/src/tests.rs | 2343 +++++++ packages/utils/rateless-iblt/src/wasm.rs | 173 + .../utils/rateless-iblt/test/index.spec.ts | 65 + packages/utils/rateless-iblt/test/index.ts | 7 + yarn.lock | 2853 ++++---- 134 files changed, 23076 insertions(+), 8755 deletions(-) rename packages/clients/peerbit/test/{dial.spec.ts => connect.spec.ts} (63%) create mode 100644 packages/programs/data/document/document/test/utils.ts create mode 100644 packages/programs/data/shared-log/benchmark/partial-sync.ts create mode 100644 packages/programs/data/shared-log/benchmark/to-rebalance.ts create mode 100644 packages/programs/data/shared-log/benchmark/utils.ts create mode 100644 packages/programs/data/shared-log/src/integers.ts create mode 100644 packages/programs/data/shared-log/src/sync/index.ts create mode 100644 packages/programs/data/shared-log/src/sync/rateless-iblt.ts create mode 100644 packages/programs/data/shared-log/src/sync/simple.ts create mode 100644 packages/programs/data/shared-log/test/lifecycle.spec.ts delete mode 100644 packages/programs/data/shared-log/test/open-close.spec.ts create mode 100644 packages/programs/data/shared-log/test/rateless-iblt.spec.ts create mode 100644 packages/utils/indexer/interface/src/errors.ts create mode 100644 packages/utils/indexer/sqlite3/src/query-planner.ts create mode 100644 packages/utils/indexer/sqlite3/test/array.spec.ts create mode 100644 packages/utils/indexer/sqlite3/test/basic.spec.ts create mode 100644 packages/utils/indexer/sqlite3/test/query-planner.spec.ts create mode 100644 packages/utils/indexer/sqlite3/test/shape.spec.ts create mode 100644 packages/utils/rateless-iblt/.gitignore create mode 100644 packages/utils/rateless-iblt/Cargo.lock create mode 100644 packages/utils/rateless-iblt/Cargo.toml create mode 100644 packages/utils/rateless-iblt/LICENSE create mode 100644 packages/utils/rateless-iblt/README.md create mode 100644 packages/utils/rateless-iblt/TODO create mode 100644 packages/utils/rateless-iblt/benches/riblt_bench.rs create mode 100644 packages/utils/rateless-iblt/package.json create mode 100644 packages/utils/rateless-iblt/src/encoding.rs create mode 100644 packages/utils/rateless-iblt/src/lib.rs create mode 100644 packages/utils/rateless-iblt/src/sketch.rs create mode 100644 packages/utils/rateless-iblt/src/testing.rs create mode 100644 packages/utils/rateless-iblt/src/tests.rs create mode 100644 packages/utils/rateless-iblt/src/wasm.rs create mode 100644 packages/utils/rateless-iblt/test/index.spec.ts create mode 100644 packages/utils/rateless-iblt/test/index.ts diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c681797f4..0aeea1da3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -29,7 +29,9 @@ jobs: cache: yarn - name: Install deps run: | - yarn && npx playwright install-deps + curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh + yarn + npx playwright install-deps - name: Build run: | yarn build @@ -59,7 +61,9 @@ jobs: cache: yarn - name: Install deps run: | - yarn && npx playwright install-deps + curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh + yarn + npx playwright install-deps - name: Build run: | yarn build diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a255c271a..39b4a3a2a 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -36,6 +36,12 @@ jobs: with: node-version: lts/* registry-url: 'https://registry.npmjs.org' + + - name: Install deps + run: | + curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh + yarn + - name: Build Packages if: ${{ steps.release.outputs.releases_created }} run: | diff --git a/.prettierignore b/.prettierignore index c692685ea..5704f13a2 100644 --- a/.prettierignore +++ b/.prettierignore @@ -1 +1,3 @@ -**/public/peerbit/** \ No newline at end of file +**/public/peerbit/** +**/target/** +**/pkg/** diff --git a/package.json b/package.json index edfe0c638..7a07a2fb7 100644 --- a/package.json +++ b/package.json @@ -43,7 +43,8 @@ "packages/utils/cache", "packages/utils/logger", "packages/utils/keychain", - "packages/utils/indexer/*" + "packages/utils/indexer/*", + "packages/utils/rateless-iblt" ], "engines": { "node": ">=18" @@ -87,6 +88,7 @@ "aegir": "github:marcus-pousette/aegir#multiple-assets", "eslint-config-peerbit": "https://github.com/dao-xyz/eslint-config-peerbit", "benchmark": "^2.1.4", + "tinybench": "^3", "chai-as-promised": "^7.1.1", "dotenv": "^16.4.5", "eslint-plugin-n": "^17.10.2", diff --git a/packages/clients/peerbit-proxy/proxy/src/client.ts b/packages/clients/peerbit-proxy/proxy/src/client.ts index 9870a5023..d251728cc 100644 --- a/packages/clients/peerbit-proxy/proxy/src/client.ts +++ b/packages/clients/peerbit-proxy/proxy/src/client.ts @@ -434,6 +434,12 @@ export class PeerbitProxyClient implements ProgramClient { return response.value; } + async hangUp( + _address: PeerId | PublicSignKey | string | Multiaddr, + ): Promise { + throw new Error("Not implemented"); + } + get services(): { pubsub: PubSub; blocks: Blocks; keychain: Keychain } { return this._services; } diff --git a/packages/clients/peerbit-proxy/proxy/src/host.ts b/packages/clients/peerbit-proxy/proxy/src/host.ts index 27f0339bc..9af5d2047 100644 --- a/packages/clients/peerbit-proxy/proxy/src/host.ts +++ b/packages/clients/peerbit-proxy/proxy/src/host.ts @@ -3,7 +3,7 @@ import { type PeerId } from "@libp2p/interface"; import { type Multiaddr } from "@multiformats/multiaddr"; import { type AnyStore } from "@peerbit/any-store-interface"; import { type Blocks } from "@peerbit/blocks-interface"; -import { Ed25519Keypair } from "@peerbit/crypto"; +import { Ed25519Keypair, type PublicSignKey } from "@peerbit/crypto"; import type { Indices } from "@peerbit/indexer-interface"; import { type Keychain } from "@peerbit/keychain"; import { type ProgramClient } from "@peerbit/program"; @@ -82,6 +82,10 @@ export class PeerbitProxyHost implements ProgramClient { return this.hostClient.dial(address); } + hangUp(address: PeerId | PublicSignKey | string | Multiaddr): Promise { + return this.hostClient.hangUp(address); + } + get services(): { pubsub: PubSub; blocks: Blocks; keychain: Keychain } { return this.hostClient.services; } diff --git a/packages/clients/peerbit-proxy/window/e2e/browser/child/src/App.tsx b/packages/clients/peerbit-proxy/window/e2e/browser/child/src/App.tsx index 6f6ee5c33..054a91fdf 100644 --- a/packages/clients/peerbit-proxy/window/e2e/browser/child/src/App.tsx +++ b/packages/clients/peerbit-proxy/window/e2e/browser/child/src/App.tsx @@ -8,7 +8,7 @@ const client = await createClient("*"); export const App = () => { const mounted = useRef(false); - const dbRef = useRef(); + const dbRef = useRef>(); const [_, forceUpdate] = useReducer((x) => x + 1, 0); useEffect(() => { const queryParameters = new URLSearchParams(window.location.search); @@ -18,19 +18,22 @@ export const App = () => { } mounted.current = true; client - .open>(new SharedLog({ id: new Uint8Array(32) }), { - args: { - onChange: (change: Change) => { - forceUpdate(); - setTimeout(() => { - dbRef.current?.log.load().then(() => { - forceUpdate(); - console.log(client.messages.id, dbRef.current?.log.length); - }); - }, 1000); + .open>( + new SharedLog({ id: new Uint8Array(32) }), + { + args: { + onChange: (change: Change) => { + forceUpdate(); + setTimeout(() => { + dbRef.current?.log.load().then(() => { + forceUpdate(); + console.log(client.messages.id, dbRef.current?.log.length); + }); + }, 1000); + }, }, }, - }) + ) .then((x: any) => { dbRef.current = x; if (queryParameters.get("read") !== "true") { diff --git a/packages/clients/peerbit-proxy/window/e2e/browser/child/src/db.ts b/packages/clients/peerbit-proxy/window/e2e/browser/child/src/db.ts index 6f87ac0c6..a648b01d4 100644 --- a/packages/clients/peerbit-proxy/window/e2e/browser/child/src/db.ts +++ b/packages/clients/peerbit-proxy/window/e2e/browser/child/src/db.ts @@ -5,7 +5,7 @@ import { SharedLog } from "@peerbit/shared-log"; @variant("test-log") export class TestLog extends Program { @field({ type: SharedLog }) - log: SharedLog; + log: SharedLog; constructor() { super(); diff --git a/packages/clients/peerbit-proxy/window/e2e/browser/playwright.config.ts b/packages/clients/peerbit-proxy/window/e2e/browser/playwright.config.ts index 2363f695a..25452105a 100644 --- a/packages/clients/peerbit-proxy/window/e2e/browser/playwright.config.ts +++ b/packages/clients/peerbit-proxy/window/e2e/browser/playwright.config.ts @@ -74,10 +74,12 @@ export default defineConfig({ { command: "yarn --cwd ./child start", url: "http://localhost:5201", + reuseExistingServer: true, }, { command: "yarn --cwd ./parent start", url: "http://localhost:5202", + reuseExistingServer: true, }, ], }); diff --git a/packages/clients/peerbit-server/node/package.json b/packages/clients/peerbit-server/node/package.json index 6b0c42cde..ed7960b95 100644 --- a/packages/clients/peerbit-server/node/package.json +++ b/packages/clients/peerbit-server/node/package.json @@ -62,7 +62,7 @@ "build-lib": "tsc -p tsconfig.json", "build-ui": "cd ../frontend && yarn build && cd ../node", "postbuild": "cp src/nginx-template.conf dist/src/ && cp -r ../frontend/dist/. dist/ui", - "test": "aegir test", + "test": "aegir test --t node", "lint": "aegir lint" }, "devDependencies": { diff --git a/packages/clients/peerbit-server/node/test/api.spec.ts b/packages/clients/peerbit-server/node/test/api.spec.ts index 2c4f83a24..074e0b9b2 100644 --- a/packages/clients/peerbit-server/node/test/api.spec.ts +++ b/packages/clients/peerbit-server/node/test/api.spec.ts @@ -75,7 +75,9 @@ describe("server", () => { let node: Peerbit; afterEach(async () => { + // @ts-ignore await node?.stop(); + // @ts-ignore server?.close(); }); it("bootstrap on start", async () => { @@ -92,7 +94,6 @@ describe("server", () => { describe("api", () => { let session: TestSession, peer: ProgramClient, server: http.Server; let db: PermissionedString; - before(async () => {}); beforeEach(async () => { diff --git a/packages/clients/peerbit-server/node/test/client.spec.ts b/packages/clients/peerbit-server/node/test/client.spec.ts index c17d3d535..35ff1ee5c 100644 --- a/packages/clients/peerbit-server/node/test/client.spec.ts +++ b/packages/clients/peerbit-server/node/test/client.spec.ts @@ -20,6 +20,5 @@ describe("client", () => { ).to.be.instanceOf(AnyBlockStore); expect((client.services.blocks as any)["canRelayMessage"]).equal(true); expect((client.services.pubsub as any)["canRelayMessage"]).equal(true); - expect((client.services as any)["relay"]).to.exist; }); }); diff --git a/packages/clients/peerbit-server/test-lib/package.json b/packages/clients/peerbit-server/test-lib/package.json index 90e7673ad..947a27c30 100644 --- a/packages/clients/peerbit-server/test-lib/package.json +++ b/packages/clients/peerbit-server/test-lib/package.json @@ -65,7 +65,7 @@ "devDependencies": { "tty-table": "^4.2.1", "@peerbit/test-utils": "*", - "libp2p": "^2.2.1" + "libp2p": "^2.3.1" }, "dependencies": { "@peerbit/string": "*", diff --git a/packages/clients/peerbit/package.json b/packages/clients/peerbit/package.json index 85bf647ca..86c06b44b 100644 --- a/packages/clients/peerbit/package.json +++ b/packages/clients/peerbit/package.json @@ -73,11 +73,11 @@ "@peerbit/indexer-sqlite3": "^1.1.2", "datastore-level": "^11.0.1", "@chainsafe/libp2p-yamux": "^7.0.1", - "@libp2p/webrtc": "^5.0.16", - "@libp2p/websockets": "^9.0.11", - "@libp2p/identify": "^3.0.10", - "@libp2p/circuit-relay-v2": "^3.1.0", - "@libp2p/tcp": "^10.0.11", + "@libp2p/webrtc": "^5.0.19", + "@libp2p/websockets": "^9.0.13", + "@libp2p/identify": "^3.0.12", + "@libp2p/circuit-relay-v2": "^3.1.3", + "@libp2p/tcp": "^10.0.13", "level": "^8.0.1", "memory-level": "^1.0.0", "path-browserify": "^1.0.1" diff --git a/packages/clients/peerbit/src/libp2p.ts b/packages/clients/peerbit/src/libp2p.ts index c57a740cc..dc13ce174 100644 --- a/packages/clients/peerbit/src/libp2p.ts +++ b/packages/clients/peerbit/src/libp2p.ts @@ -45,13 +45,16 @@ export const createLibp2pExtended = ( ): Promise => { let extraServices: any = {}; - if (!opts.services?.["relay"]) { + if (opts.services?.["relay"] == null) { + delete opts.services?.["relay"]; + } else if (!opts.services?.["relay"]) { const relayComponent = relay(); if (relayComponent) { // will be null in browser extraServices["relay"] = relayComponent; } } + if (!opts.services?.["identify"]) { extraServices["identify"] = identify(); } diff --git a/packages/clients/peerbit/src/peer.ts b/packages/clients/peerbit/src/peer.ts index b8c92ab92..79f872ad9 100644 --- a/packages/clients/peerbit/src/peer.ts +++ b/packages/clients/peerbit/src/peer.ts @@ -1,4 +1,5 @@ import { privateKeyFromRaw } from "@libp2p/crypto/keys"; +import type { PeerId } from "@libp2p/interface"; import "@libp2p/peer-id"; import { type Multiaddr, @@ -10,6 +11,7 @@ import { DirectBlock } from "@peerbit/blocks"; import { Ed25519Keypair, Ed25519PublicKey, + PublicSignKey, Secp256k1Keypair, getKeypairFromPrivateKey, } from "@peerbit/crypto"; @@ -26,7 +28,6 @@ import { ProgramHandler, } from "@peerbit/program"; import { DirectSub } from "@peerbit/pubsub"; -import { waitFor } from "@peerbit/time"; import { LevelDatastore } from "datastore-level"; import type { Libp2p } from "libp2p"; import sodium from "libsodium-wrappers"; @@ -119,7 +120,7 @@ export class Peerbit implements ProgramClient { let libp2pExtended: Libp2pExtended | undefined = (options as Libp2pOptions) .libp2p as Libp2pExtended; - const asRelay = (options as SimpleLibp2pOptions).relay; + const asRelay = (options as SimpleLibp2pOptions).relay ?? true; const directory = options.directory; const hasDir = directory != null; @@ -176,19 +177,25 @@ export class Peerbit implements ProgramClient { : undefined; } + const services: any = { + keychain: (c: any) => keychain, + blocks: (c: any) => + new DirectBlock(c, { + canRelayMessage: asRelay, + directory: blocksDirectory, + }), + pubsub: (c: any) => new DirectSub(c, { canRelayMessage: asRelay }), + ...extendedOptions?.services, + }; + + if (!asRelay) { + services.relay = null; + } + libp2pExtended = await createLibp2pExtended({ ...extendedOptions, privateKey, - services: { - keychain: (c: any) => keychain, - blocks: (c: any) => - new DirectBlock(c, { - canRelayMessage: asRelay, - directory: blocksDirectory, - }), - pubsub: (c: any) => new DirectSub(c, { canRelayMessage: asRelay }), - ...extendedOptions?.services, - } as any, // TODO types are funky + services, datastore, }); } @@ -280,16 +287,37 @@ export class Peerbit implements ProgramClient { ? address : address.getMultiaddrs(); const connection = await this.libp2p.dial(maddress); + const publicKey = Ed25519PublicKey.fromPeerId(connection.remotePeer); // TODO, do this as a promise instead using the onPeerConnected vents in pubsub and blocks - return ( - (await waitFor( - () => - this.libp2p.services.pubsub.peers.has(publicKey.hashcode()) && - this.libp2p.services.blocks.peers.has(publicKey.hashcode()), - )) || false + try { + await this.libp2p.services.pubsub.waitFor(publicKey.hashcode(), { + neighbour: true, + }); + } catch (error) { + throw new Error(`Failed to dial peer. Not available on Pubsub`); + } + + try { + await this.libp2p.services.blocks.waitFor(publicKey.hashcode(), { + neighbour: true, + }); + } catch (error) { + throw new Error(`Failed to dial peer. Not available on Blocks`); + } + return true; + } + + async hangUp(address: PeerId | PublicSignKey | string | Multiaddr) { + await this.libp2p.hangUp( + address instanceof PublicSignKey + ? address.toPeerId() + : typeof address === "string" + ? multiaddr(address) + : address, ); + // TODO wait for pubsub and blocks to disconnect? } async start() { diff --git a/packages/clients/peerbit/test/dial.spec.ts b/packages/clients/peerbit/test/connect.spec.ts similarity index 63% rename from packages/clients/peerbit/test/dial.spec.ts rename to packages/clients/peerbit/test/connect.spec.ts index 13578929d..45ac5329c 100644 --- a/packages/clients/peerbit/test/dial.spec.ts +++ b/packages/clients/peerbit/test/connect.spec.ts @@ -1,5 +1,5 @@ import { SeekDelivery } from "@peerbit/stream-interface"; -import { waitFor } from "@peerbit/time"; +import { waitFor, waitForResolved } from "@peerbit/time"; import { expect } from "chai"; import { Peerbit } from "../src/index.js"; @@ -52,3 +52,36 @@ describe(`dial`, function () { ); }); }); + +describe(`hangup`, function () { + let clients: [Peerbit, Peerbit]; + + beforeEach(async () => { + clients = [ + await Peerbit.create({ + relay: false, // https://github.com/libp2p/js-libp2p/issues/2794 + }), + await Peerbit.create({ + relay: false, // https://github.com/libp2p/js-libp2p/issues/2794 + }), + ]; + }); + + afterEach(async () => { + await Promise.all(clients.map((c) => c.stop())); + }); + + it("pubsub subscribers clears up", async () => { + let topic = "topic"; + await clients[0].services.pubsub.subscribe(topic); + await clients[1].services.pubsub.subscribe(topic); + await clients[0].dial(clients[1].getMultiaddrs()[0]); + await waitForResolved(() => + expect(clients[0].services.pubsub.peers.size).to.eq(1), + ); + await clients[0].hangUp(clients[1].peerId); + await waitForResolved(() => + expect(clients[0].services.pubsub.peers.size).to.eq(0), + ); + }); +}); diff --git a/packages/log/benchmark/append.ts b/packages/log/benchmark/append.ts index b3e43a528..850538454 100644 --- a/packages/log/benchmark/append.ts +++ b/packages/log/benchmark/append.ts @@ -1,7 +1,7 @@ import { AnyBlockStore } from "@peerbit/blocks"; import { Ed25519Keypair } from "@peerbit/crypto"; import { create } from "@peerbit/indexer-sqlite3"; -import B from "benchmark"; +import * as B from "tinybench"; import { Log } from "../src/log.js"; // Run with "node --loader ts-node/esm ./benchmark/append.ts" @@ -10,34 +10,26 @@ let log: Log; let store: AnyBlockStore; const key = await Ed25519Keypair.create(); +const close = () => { + return log?.close(); +}; const reset = async () => { + await close(); log = new Log(); store = new AnyBlockStore(); await log.open(store, key, { indexer: await create() }); }; await reset(); -const suite = new B.Suite({ delay: 100 }); -suite - .add("chain", { - fn: async (deferred: any) => { - await log.append(new Uint8Array([1, 2, 3])); - deferred.resolve(); - }, - defer: true, - }) - .add("no-next", { - fn: async (deferred: any) => { - await log.append(new Uint8Array([1, 2, 3]), { meta: { next: [] } }); - deferred.resolve(); - }, - defer: true, +const suite = new B.Bench({ warmupIterations: 1000, setup: reset }); +await suite + .add("chain", async () => { + await log.append(new Uint8Array([1, 2, 3])); }) - .on("cycle", async (event: any) => { - console.log(String(event.target)); - await reset(); - }) - .on("error", (err: any) => { - throw err; + .add("no-next", async () => { + await log.append(new Uint8Array([1, 2, 3]), { meta: { next: [] } }); }) .run(); + +await close(); +console.table(suite.table()); diff --git a/packages/log/package.json b/packages/log/package.json index ebf57ffcf..a8acc8e83 100644 --- a/packages/log/package.json +++ b/packages/log/package.json @@ -66,7 +66,7 @@ "@peerbit/crypto": "2.3.2", "@peerbit/logger": "1.0.3", "@peerbit/time": "2.0.7", - "libp2p": "^2.2.1", + "libp2p": "^2.3.1", "p-queue": "^8.0.1", "path-browserify": "^1.0.1", "uuid": "^10.0.0", diff --git a/packages/log/src/entry-index.ts b/packages/log/src/entry-index.ts index 9b7c6e2e1..009fb0cb0 100644 --- a/packages/log/src/entry-index.ts +++ b/packages/log/src/entry-index.ts @@ -5,6 +5,7 @@ import type { PublicSignKey } from "@peerbit/crypto"; import { BoolQuery, type Index, + type IndexedResults, Not, Or, type Query, @@ -29,7 +30,6 @@ export type ResultsIterator = { }; const ENTRY_CACHE_MAX_SIZE = 10; // TODO as param for log - type ResolveFullyOptions = | true | { @@ -174,6 +174,19 @@ export class EntryIndex { amount: number, ): Promise[]> => { const results = await iterator.next(amount); + return coerce(results); + }; + + const all = async (): Promise[]> => { + const results = await iterator.all(); + return coerce(results); + }; + + const coerce = async ( + results: IndexedResults<{ + [x: string]: any; + }>, + ): Promise[]> => { if (resolveInFull) { const maybeResolved = await Promise.all( results.map((x) => this.resolve(x.value.hash, resolveInFullOptions)), @@ -194,16 +207,7 @@ export class EntryIndex { close: iterator.close, done: iterator.done, next, - all: async () => { - const results: ReturnTypeFromResolveOptions[] = []; - while (!iterator.done()) { - for (const element of await next(100)) { - results.push(element); - } - } - await iterator.close(); - return results; - }, + all, }; } @@ -264,6 +268,10 @@ export class EntryIndex { } async has(k: string) { + let mem = this.cache.get(k); + if (mem) { + return true; + } const result = await this.properties.index.get(toId(k), { shape: { hash: true }, }); @@ -301,12 +309,13 @@ export class EntryIndex { return existingPromise; } else { const fn = async () => { - this.cache.add(entry.hash, entry); - if (properties.unique === true || !(await this.has(entry.hash))) { this._length++; } + // add cache after .has check before .has uses the cache + this.cache.add(entry.hash, entry); + await this.properties.index.put(entry.toShallow(properties.isHead)); // check if gids has been shadowed, by query all nexts that have a different gid @@ -365,15 +374,15 @@ export class EntryIndex { } } - async delete(k: string, shallow: ShallowEntry | undefined = undefined) { + async delete(k: string, from?: Entry | ShallowEntry) { this.cache.del(k); - if (shallow && shallow.hash !== k) { + if (from && from.hash !== k) { throw new Error("Shallow hash doesn't match the key"); } - shallow = shallow || (await this.getShallow(k))?.value; - if (!shallow) { + from = from || (await this.getShallow(k))?.value; + if (!from) { return; // already deleted } @@ -384,8 +393,8 @@ export class EntryIndex { this._length -= deleted.length; // mark all next entries as new heads - await this.privateUpdateNextHeadProperty(shallow, true); - return shallow; + await this.privateUpdateNextHeadProperty(from, true); + return from; } } @@ -456,21 +465,21 @@ export class EntryIndex { options?: ResolveFullyOptions, ): Promise | undefined> { let coercedOptions = typeof options === "object" ? options : undefined; - if (await this.has(k)) { - let mem = this.cache.get(k); - if (mem === undefined) { - mem = await this.resolveFromStore(k, coercedOptions); - if (mem) { - this.properties.init(mem); - mem.hash = k; - } else if (coercedOptions?.ignoreMissing !== true) { - throw new Error("Failed to load entry from head with hash: " + k); - } - this.cache.add(k, mem ?? undefined); + /* if (await this.has(k)) { */ + let mem = this.cache.get(k); + if (mem === undefined) { + mem = await this.resolveFromStore(k, coercedOptions); + if (mem) { + this.properties.init(mem); + mem.hash = k; + } else if (coercedOptions?.ignoreMissing !== true) { + throw new Error("Failed to load entry from head with hash: " + k); } - return mem ? mem : undefined; + this.cache.add(k, mem ?? undefined); } - return undefined; + return mem ? mem : undefined; + /* } + return undefined; */ } private async resolveFromStore( diff --git a/packages/log/src/entry-v0.ts b/packages/log/src/entry-v0.ts index 67d9b50a3..17f258882 100644 --- a/packages/log/src/entry-v0.ts +++ b/packages/log/src/entry-v0.ts @@ -19,6 +19,7 @@ import { X25519PublicKey, randomBytes, sha256Base64, + toBase64, } from "@peerbit/crypto"; import { verify } from "@peerbit/crypto"; import { type Keychain } from "@peerbit/keychain"; @@ -387,8 +388,8 @@ export class EntryV0 await store.rm(this.hash); } - static createGid(seed?: Uint8Array): Promise { - return sha256Base64(seed || randomBytes(32)); + static createGid(seed?: Uint8Array): Promise | string { + return seed ? sha256Base64(seed) : toBase64(randomBytes(32)); } static async create(properties: { diff --git a/packages/log/src/log.ts b/packages/log/src/log.ts index 73422255b..08319ff18 100644 --- a/packages/log/src/log.ts +++ b/packages/log/src/log.ts @@ -104,6 +104,11 @@ export const ENTRY_JOIN_SHAPE = { meta: { type: true, next: true, gid: true, clock: true }, } as const; +type PendingDelete = { + entry: ShallowOrFullEntry; + fn: () => Promise; +}; + @variant(0) export class Log { @field({ type: fixedArray("u8", 32) }) @@ -536,7 +541,10 @@ export class Log { toMultiHash: false, }); - const removed: ShallowOrFullEntry[] = await this.processEntry(entry); + const pendingDeletes: ( + | PendingDelete + | { entry: Entry; fn: undefined } + )[] = await this.processEntry(entry); entry.init({ encoding: this._encoding, keychain: this._keychain }); @@ -544,16 +552,17 @@ export class Log { if (trimmed) { for (const entry of trimmed) { - removed.push(entry); + pendingDeletes.push({ entry, fn: undefined }); } } - + const removed = pendingDeletes.map((x) => x.entry); const changes: Change = { added: [{ head: true, entry }], removed, }; await (options?.onChange || this._onChange)?.(changes); + await Promise.all(pendingDeletes.map((x) => x.fn?.())); return { entry, removed }; } @@ -792,7 +801,6 @@ export class Log { (await Entry.fromMultihash(this._storage, a, { remote: { timeout: options?.remote?.timeout }, })); - if (!nested) { throw new Error("Missing entry in joinRecursively: " + a); } @@ -823,15 +831,20 @@ export class Log { toMultiHash: true, }); - const removed: ShallowOrFullEntry[] = await this.processEntry(entry); + const pendingDeletes: ( + | PendingDelete + | { entry: Entry; fn: undefined } + )[] = await this.processEntry(entry); const trimmed = await this.trim(options?.trim); if (trimmed) { for (const entry of trimmed) { - removed.push(entry); + pendingDeletes.push({ entry, fn: undefined }); } } + const removed = pendingDeletes.map((x) => x.entry); + await options?.onChange?.({ added: [{ head: options.isHead, entry }], removed: removed, @@ -841,12 +854,18 @@ export class Log { removed: removed, }); + await Promise.all(pendingDeletes.map((x) => x.fn?.())); return true; } - private async processEntry(entry: Entry): Promise { + private async processEntry(entry: Entry): Promise< + { + entry: ShallowOrFullEntry; + fn: () => Promise; + }[] + > { if (entry.meta.type === EntryType.CUT) { - return this.deleteRecursively(entry, true); + return this.prepareDeleteRecursively(entry, true); } return []; } @@ -880,10 +899,7 @@ export class Log { const stack = Array.isArray(from) ? [...from] : [from]; const promises: (Promise | void)[] = []; let counter = 0; - const toDelete: { - entry: ShallowOrFullEntry; - fn: () => Promise; - }[] = []; + const toDelete: PendingDelete[] = []; while (stack.length > 0) { const entry = stack.pop()!; @@ -918,10 +934,7 @@ export class Log { async prepareDelete( hash: string, - ): Promise< - | { entry: ShallowEntry; fn: () => Promise } - | { entry: undefined } - > { + ): Promise | { entry: undefined }> { let entry = await this._entryIndex.getShallow(hash); if (!entry) { return { entry: undefined }; @@ -930,7 +943,10 @@ export class Log { entry: entry.value, fn: async () => { await this._trim.deleteFromCache(hash); - const removedEntry = await this._entryIndex.delete(hash, entry.value); + const removedEntry = (await this._entryIndex.delete( + hash, + entry.value, + )) as ShallowEntry; return removedEntry; }, }; diff --git a/packages/log/test/append.spec.ts b/packages/log/test/append.spec.ts index 2adbf0441..c42423693 100644 --- a/packages/log/test/append.spec.ts +++ b/packages/log/test/append.spec.ts @@ -81,6 +81,24 @@ describe("append", function () { expect(await blockExists(e2.hash)).to.be.false; expect(await blockExists(e3.hash)).to.be.true; }); + + it("can resolve the full entry from deleted", async () => { + const log = new Log(); + + let resolved: any = undefined; + await log.open(store, signKey, { + onChange: async (change) => { + if (change.removed.length > 0) { + resolved = await ( + await log.get(change.removed[0].hash) + )?.getPayloadValue(); + } + }, + }); + await log.append(new Uint8Array([1])); + await log.append(new Uint8Array([2]), { meta: { type: EntryType.CUT } }); + expect(resolved).to.deep.eq(new Uint8Array([1])); + }); }); describe("append 100 items to a log", () => { diff --git a/packages/log/test/from.spec.ts b/packages/log/test/from.spec.ts index 7dda195b8..1b964e47c 100644 --- a/packages/log/test/from.spec.ts +++ b/packages/log/test/from.spec.ts @@ -173,10 +173,8 @@ describe("from", function () { await log1.join(log2); expect(log1.length).equal(16); - assert.deepStrictEqual( - (await log1.toArray()).map((e) => e.payload.getValue()), - firstWriteExpectedData, - ); + const out = (await log1.toArray()).map((e) => e.payload.getValue()); + assert.deepStrictEqual(out, firstWriteExpectedData); }); it("respects timeout parameter", async () => { diff --git a/packages/log/test/join.spec.ts b/packages/log/test/join.spec.ts index 0a4dd7e75..e4d2910d2 100644 --- a/packages/log/test/join.spec.ts +++ b/packages/log/test/join.spec.ts @@ -299,6 +299,30 @@ describe("join", function () { Entry.fromMultihash = fromMultihash; }); + it("can resolve full entry", async () => { + const { entry: a1 } = await log1.append(new Uint8Array([1])); + const { entry: b1 } = await log1.append(new Uint8Array([2]), { + meta: { + next: [a1], + type: EntryType.CUT, + }, + }); + + let resolved: any = undefined; + + await log2.join([a1]); + await log2.join([b1], { + onChange: async (change) => { + if (change.removed.length > 0) { + resolved = await ( + await log2.get(change.removed[0].hash) + )?.getPayloadValue(); + } + }, + }); + expect(resolved).to.deep.eq(new Uint8Array([1])); + }); + it("joins cut", async () => { const { entry: a1 } = await log1.append(new Uint8Array([0, 1])); const { entry: b1 } = await log2.append(new Uint8Array([1, 0]), { diff --git a/packages/log/test/log.spec.ts b/packages/log/test/log.spec.ts index ba380151f..fd18beeb3 100644 --- a/packages/log/test/log.spec.ts +++ b/packages/log/test/log.spec.ts @@ -115,7 +115,7 @@ describe("properties", function () { ); assert.deepStrictEqual(entry, undefined); expect(fetched).to.be.true; - expect(remoteFetchOptions.remote).to.be.undefined; + expect(!remoteFetchOptions.remote).to.be.true; }); it("fetches remotes with timeout", async () => { diff --git a/packages/programs/acl/identity-access-controller/test/index.spec.ts b/packages/programs/acl/identity-access-controller/test/index.spec.ts index 10b02def5..8014e7603 100644 --- a/packages/programs/acl/identity-access-controller/test/index.spec.ts +++ b/packages/programs/acl/identity-access-controller/test/index.spec.ts @@ -82,7 +82,7 @@ describe("index", () => { it("replicates by default", async () => { const s = new TestStore({ publicKey: session.peers[0].peerId }); const l0a = await session.peers[0].open(s); - const checkRole = async (log: SharedLog) => { + const checkRole = async (log: SharedLog) => { expect(await log.isReplicating()).to.be.true; expect( (await log.getMyReplicationSegments()).reduce( diff --git a/packages/programs/data/document/document/src/domain.ts b/packages/programs/data/document/document/src/domain.ts index 7473f3e34..eeb4f26d7 100644 --- a/packages/programs/data/document/document/src/domain.ts +++ b/packages/programs/data/document/document/src/domain.ts @@ -10,14 +10,14 @@ import { } from "../src/index.js"; type RangeArgs = { from: number; to: number }; -export type CustomDomain = ReplicationDomain; +export type CustomDomain = ReplicationDomain; export const createDocumentDomain = ( db: Documents, options: { fromValue: (value: T) => number; fromMissing?: ( - entry: EntryReplicated | ShallowEntry | Entry, + entry: EntryReplicated<"u32"> | ShallowEntry | Entry, ) => number; }, ): CustomDomain => { @@ -25,6 +25,7 @@ export const createDocumentDomain = ( const fromMissing = options.fromMissing || (() => 0xffffffff); return { type: "custom", + resolution: "u32", fromArgs(args, log) { if (!args) { return { offset: log.node.identity.publicKey }; diff --git a/packages/programs/data/document/document/src/program.ts b/packages/programs/data/document/document/src/program.ts index e548386bf..10f189fbe 100644 --- a/packages/programs/data/document/document/src/program.ts +++ b/packages/programs/data/document/document/src/program.ts @@ -79,7 +79,7 @@ export type CanPerform = ( export type SetupOptions< T, I = T, - D extends ReplicationDomain = any, + D extends ReplicationDomain = any, > = { type: AbstractType; canOpen?: (program: T) => MaybePromise; @@ -97,13 +97,13 @@ export type SetupOptions< } & Exclude, "compatibility">; export type ExtractArgs = - T extends ReplicationDomain ? Args : never; + T extends ReplicationDomain ? Args : never; @variant("documents") export class Documents< T, I extends Record = T extends Record ? T : any, - D extends ReplicationDomain = any, + D extends ReplicationDomain = any, > extends Program, DocumentEvents & ProgramEvents> { @field({ type: SharedLog }) log: SharedLog; @@ -562,10 +562,7 @@ export class Documents< // Program specific if (value instanceof Program) { // if replicator, then open - if ( - (await this.canOpen!(value, item)) && - (await this.log.isReplicator(item)) // TODO types, throw runtime error if replicator is not provided - ) { + if (await this.canOpen!(value, item)) { value = (await this.node.open(value, { parent: this as Program, existing: "reuse", diff --git a/packages/programs/data/document/document/src/search.ts b/packages/programs/data/document/document/src/search.ts index 2426025cd..03898be29 100644 --- a/packages/programs/data/document/document/src/search.ts +++ b/packages/programs/data/document/document/src/search.ts @@ -192,7 +192,11 @@ const isTransformerWithFunction = ( return (options as TransformerAsFunction).transform != null; }; -export type OpenOptions> = { +export type OpenOptions< + T, + I, + D extends ReplicationDomain, +> = { documentType: AbstractType; dbType: AbstractType>; log: SharedLog; @@ -212,7 +216,7 @@ type IndexableClass = new ( export class DocumentIndex< T, I extends Record, - D extends ReplicationDomain, + D extends ReplicationDomain, > extends Program> { @field({ type: RPC }) _query: RPC>; @@ -435,6 +439,13 @@ export class DocumentIndex< )?.[0]?.results[0]?.value; } + public async getFromGid(gid: string) { + const iterator = this.index.iterate({ query: { gid } }); + const one = await iterator.next(1); + await iterator.close(); + return one[0]; + } + public async put(value: T, entry: Entry, id: indexerTypes.IdKey) { const idString = id.primitive; if (this._isProgramValues) { diff --git a/packages/programs/data/document/document/test/data.ts b/packages/programs/data/document/document/test/data.ts index 1d88d4c0d..bd6ffc098 100644 --- a/packages/programs/data/document/document/test/data.ts +++ b/packages/programs/data/document/document/test/data.ts @@ -40,7 +40,9 @@ export class Document { @variant("test_documents") export class TestStore< - D extends ReplicationDomain = ReplicationDomainHash, + D extends ReplicationDomain = ReplicationDomainHash< + "u32" | "u64" + >, > extends Program>> { @field({ type: Uint8Array }) id: Uint8Array; diff --git a/packages/programs/data/document/document/test/domain.spec.ts b/packages/programs/data/document/document/test/domain.spec.ts index 647c54ba3..874242bf5 100644 --- a/packages/programs/data/document/document/test/domain.spec.ts +++ b/packages/programs/data/document/document/test/domain.spec.ts @@ -98,6 +98,7 @@ describe("domain", () => { from: 2, to: 3, }, + eager: true, }, }, ); diff --git a/packages/programs/data/document/document/test/index.spec.ts b/packages/programs/data/document/document/test/index.spec.ts index 96c027396..43f32c6fc 100644 --- a/packages/programs/data/document/document/test/index.spec.ts +++ b/packages/programs/data/document/document/test/index.spec.ts @@ -61,15 +61,13 @@ describe("index", () => { describe("operations", () => { describe("basic", () => { - let store: TestStore; - let store2: TestStore; + let store: TestStore | undefined = undefined; before(async () => { session = await TestSession.connected(2); }); afterEach(async () => { await store?.close(); - await store2?.close(); }); after(async () => { @@ -618,14 +616,12 @@ describe("index", () => { describe("index", () => { let store: TestStore; - let store2: TestStore; before(async () => { session = await TestSession.connected(2); }); afterEach(async () => { await store?.close(); - await store2?.close(); }); after(async () => { @@ -1247,6 +1243,7 @@ describe("index", () => { replicas: { min: 1, }, + timeUntilRoleMaturity: 0, }, }); @@ -1258,6 +1255,7 @@ describe("index", () => { replicas: { min: 1, }, + timeUntilRoleMaturity: 0, }, }); @@ -1269,6 +1267,7 @@ describe("index", () => { replicas: { min: 1, }, + timeUntilRoleMaturity: 0, }, }); @@ -1319,13 +1318,13 @@ describe("index", () => { describe("concurrency", () => { before(() => {}); - let abortController: AbortController, + /* let abortController: AbortController, interval: ReturnType; afterEach(() => { - clearTimeout(interval); - abortController.abort(); + interval && clearTimeout(interval); + abortController && abortController.abort(); }); - + */ after(async () => { await session.stop(); }); @@ -1600,7 +1599,7 @@ describe("index", () => { await waitForResolved( () => expect( - stores[i].docs.log.syncInFlight + stores[i].docs.log.syncronizer.syncInFlight .get(stores[storeIndex].node.identity.publicKey.hashcode()) ?.has(resp.entry.hash), ).to.be.true, @@ -2032,7 +2031,7 @@ describe("index", () => { const request = new SearchRequest({ query: [], }); - const iterator = await stores[1].docs.index.iterate(request); + const iterator = stores[1].docs.index.iterate(request); await iterator.next(2); await iterator.next(1); expect(iterator.done()).to.be.true; @@ -2247,15 +2246,18 @@ describe("index", () => { await store1.docs.put(doc1); await store2.docs.put(new Document({ id: doc1.id, number: 2n })); - await waitForResolved(async () => - expect(await store1.docs.index.getSize()).equal(1), - ); - await waitForResolved(async () => - expect(await store2.docs.index.getSize()).equal(1), - ); - - await waitForResolved(() => expect(remoteQueries1).equal(1)); - await waitForResolved(() => expect(remoteQueries2).equal(1)); + /* TODO force test env to make sure remote queries are performed + + await waitForResolved(async () => + expect(await store1.docs.index.getSize()).equal(1), + ); + await waitForResolved(async () => + expect(await store2.docs.index.getSize()).equal(1), + ); + + await waitForResolved(() => expect(remoteQueries1).equal(1)); + await waitForResolved(() => expect(remoteQueries2).equal(1)); + */ // expect doc1 to be the "truth" @@ -2470,23 +2472,25 @@ describe("index", () => { await subProgram.close(); expect(subProgram.closed).to.be.true; }); - + /* TID + it("non-replicator will not open by default", async () => { const subProgram = new SubProgram(); await stores[1].store.docs.put(subProgram); expect(subProgram.closed).to.be.true; - }); + }); */ it("can open program when sync", async () => { const subProgram = new SubProgram(); await stores[1].store.docs.put(subProgram); - expect(subProgram.closed).to.be.true; // Because observer? Not open by default? + + expect(subProgram.closed).to.be.false; // TODO is this expected because stores[1] is only observer? await stores[0].store.docs.log.log.join( [...(await stores[1].store.docs.log.log.toArray()).values()].map((x) => deserialize(serialize(x), Entry), ), ); - expect(subProgram.closed).to.be.true; // Because observer? Not open by default? + expect(subProgram.closed).to.be.false; // TODO is this expected because stores[1] is only observer? }); it("will drop on delete", async () => { @@ -2561,12 +2565,12 @@ describe("index", () => { store2 = await session.peers[1].open(store.clone()); const subProgram = new SubProgram(); await store.docs.put(subProgram); - expect(subProgram.closed).to.be.false; + await waitForResolved(() => expect(subProgram.closed).to.be.false); await waitForResolved(async () => expect(await store2.docs.index.getSize()).equal(1), ); const stores = [store, store2]; - for (const s of stores) { + for (const [i, s] of stores.entries()) { const results = await s.docs.index.search( new SearchRequest({ query: [ @@ -2576,7 +2580,12 @@ describe("index", () => { ); expect(results).to.have.length(1); expect(results[0].id).to.deep.equal(subProgram.id); - expect(results[0].closed).to.be.false; + try { + await waitForResolved(() => expect(results[0].closed).to.be.false); + } catch (error) { + console.error("Substore was never openend: " + i); + throw error; + } } }); }); diff --git a/packages/programs/data/document/document/test/utils.ts b/packages/programs/data/document/document/test/utils.ts new file mode 100644 index 000000000..49b0ded8d --- /dev/null +++ b/packages/programs/data/document/document/test/utils.ts @@ -0,0 +1,24 @@ +import type { ProgramClient } from "@peerbit/program"; +import type { DirectSub } from "@peerbit/pubsub"; +import { delay } from "@peerbit/time"; + +export const slowDownSend = ( + from: ProgramClient, + to: ProgramClient, + ms = 3000, +) => { + const directsub = from.services.pubsub as DirectSub; + for (const [_key, peer] of directsub.peers) { + if (peer.publicKey.equals(to.identity.publicKey)) { + const writeFn = peer.write.bind(peer); + peer.write = async (msg, priority) => { + await delay(ms); + if (peer.outboundStream) { + return writeFn(msg, priority); + } + }; + return; + } + } + throw new Error("Could not find peer"); +}; diff --git a/packages/programs/data/shared-log/benchmark/get-samples.ts b/packages/programs/data/shared-log/benchmark/get-samples.ts index 913938fdb..12c11f1f5 100644 --- a/packages/programs/data/shared-log/benchmark/get-samples.ts +++ b/packages/programs/data/shared-log/benchmark/get-samples.ts @@ -1,81 +1,287 @@ -import { Ed25519Keypair } from "@peerbit/crypto"; +import { Ed25519Keypair, PublicSignKey } from "@peerbit/crypto"; import type { Index } from "@peerbit/indexer-interface"; import { create as createIndex } from "@peerbit/indexer-sqlite3"; -import B from "benchmark"; +import * as B from "tinybench"; +import { createNumbers, denormalizer } from "../src/integers.js"; import { - ReplicationRangeIndexable, - getEvenlySpacedU32, + ReplicationIntent, + ReplicationRangeIndexableU32, + ReplicationRangeIndexableU64, getSamples, } from "../src/ranges.js"; // Run with "node --loader ts-node/esm ./benchmark/get-samples.ts" -let create = async ( - ...rects: ReplicationRangeIndexable[] -): Promise<[Index, any]> => { - const indices = await createIndex(); - const index = await indices.init({ schema: ReplicationRangeIndexable }); - await indices.start(); - for (const rect of rects) { - await index.put(rect); - } - return [index, indices]; -}; - -let a = (await Ed25519Keypair.create()).publicKey; -let b = (await Ed25519Keypair.create()).publicKey; -let c = (await Ed25519Keypair.create()).publicKey; - -let ranges: ReplicationRangeIndexable[] = []; -let rangeCount = 1000; -for (let i = 0; i < rangeCount; i++) { - ranges.push( - ...[ - new ReplicationRangeIndexable({ - publicKey: a, - length: 0.2 / rangeCount, - offset: (0 + rangeCount / i) % 1, - timestamp: 0n, - }), - new ReplicationRangeIndexable({ - publicKey: b, - length: 0.4 / rangeCount, - offset: (0.333 + rangeCount / i) % 1, - timestamp: 0n, - }), - new ReplicationRangeIndexable({ - publicKey: c, - length: 0.6 / rangeCount, - offset: (0.666 + rangeCount / i) % 1, - timestamp: 0n, - }), - new ReplicationRangeIndexable({ - publicKey: c, - length: 0.6 / rangeCount, - offset: (0.666 + rangeCount / i) % 1, - timestamp: 0n, - }), - ], +const resolutions: ("u32" | "u64")[] = ["u32", "u64"]; +for (const resolution of resolutions) { + const rangeClass = + resolution === "u32" + ? ReplicationRangeIndexableU32 + : ReplicationRangeIndexableU64; + const numbers = createNumbers(resolution); + const denormalizeFn = denormalizer(resolution); + + const createReplicationRangeFromNormalized = (properties: { + id?: Uint8Array; + publicKey: PublicSignKey; + length: number; + offset: number; + timestamp?: bigint; + mode?: ReplicationIntent; + }) => { + return new rangeClass({ + id: properties.id, + publicKey: properties.publicKey, + mode: properties.mode, + // @ts-ignore + length: denormalizeFn(properties.length), + // @ts-ignore + offset: denormalizeFn(properties.offset), + timestamp: properties.timestamp, + }); + }; + + let create = async (...rects: any[]): Promise<[Index, any]> => { + const indices = await createIndex(); + const index = await indices.init({ schema: rangeClass as any }); + await indices.start(); + for (const rect of rects) { + await index.put(rect); + } + return [index, indices]; + }; + + let a = (await Ed25519Keypair.create()).publicKey; + let b = (await Ed25519Keypair.create()).publicKey; + let c = (await Ed25519Keypair.create()).publicKey; + + const suite = new B.Bench({ name: resolution, warmupIterations: 1000 }); + + let index: Index | undefined; + let indices: any = undefined; + + let sampleSize = 2; + let rangeCount = 1e4; + + // this bench tests that the getSamples function can handle overlapping ranges in a more performant way than the sparse ranges + suite.add( + "get samples one range - " + resolution, + async () => { + const samples = await getSamples( + numbers.getGrid(numbers.denormalize(Math.random()), sampleSize), + index!, + 0, + numbers, + ); + + if (samples.size !== 1) { + throw new Error( + "Expected at least " + 1 + " samples, got " + samples.size, + ); + } + }, + { + beforeAll: async () => { + const out = await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 1, + offset: Math.random(), + timestamp: 0n, + }), + ); + index = out[0]; + indices = out[1]; + }, + afterAll: async () => { + await indices.stop(); + }, + }, + ); + + suite.add( + "get samples one range unique replicators provided - " + resolution, + async () => { + const samples = await getSamples( + numbers.getGrid(numbers.denormalize(Math.random()), sampleSize), + index!, + 0, + numbers, + { + uniqueReplicators: new Set([a.hashcode()]), + }, + ); + + if (samples.size !== 1) { + throw new Error( + "Expected at least " + 1 + " samples, got " + samples.size, + ); + } + }, + { + beforeAll: async () => { + const out = await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 1, + offset: Math.random(), + timestamp: 0n, + }), + ); + index = out[0]; + indices = out[1]; + }, + afterAll: async () => { + await indices.stop(); + }, + }, ); -} -const [index, indices] = await create(...ranges); -const suite = new B.Suite(); -suite - .add("getSamples", { - fn: async (deferred: any) => { - await getSamples(getEvenlySpacedU32(Math.random(), 2), index, 0); - deferred.resolve(); + suite.add( + "get samples overlapping - " + resolution, + async () => { + const point = numbers.denormalize(Math.random()); + const samples = await getSamples( + numbers.getGrid(point, sampleSize), + index!, + 0, + numbers, + { + onlyIntersecting: true, + }, + ); + if (samples.size < sampleSize) { + throw new Error( + "Expected at least " + sampleSize + " samples, got " + samples.size, + ); + } }, - defer: true, - }) - .on("cycle", (event: any) => { - console.log(String(event.target)); - }) - .on("error", (err: any) => { - throw err; - }) - .on("complete", async function (this: any) { - await indices.drop(); - }) - .run(); + { + beforeAll: async () => { + let ranges: any[] = []; + + // add 2 overlapping ranges + ranges.push( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 1, + offset: 0.1 % 1, + timestamp: 0n, + }), + ); + ranges.push( + createReplicationRangeFromNormalized({ + publicKey: b, + length: 1, + offset: 0.7 % 1, + timestamp: 0n, + }), + ); + + // add sparse ranges + for (let i = 0; i < rangeCount; i++) { + ranges.push( + ...[ + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.2 / rangeCount, + offset: Math.random(), + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.4 / rangeCount, + offset: Math.random(), + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.6 / rangeCount, + offset: Math.random(), + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.6 / rangeCount, + offset: Math.random(), + timestamp: 0n, + }), + ], + ); + } + + const out = await create(...ranges); + index = out[0]; + indices = out[1]; + }, + afterAll: async () => { + await indices.stop(); + }, + }, + ); + + suite.add( + "get samples sparse - " + resolution, + async () => { + const samples = await getSamples( + numbers.getGrid(numbers.denormalize(Math.random()), sampleSize), + index!, + 0, + numbers, + ); + + if (samples.size < sampleSize) { + throw new Error( + "Expected at least " + sampleSize + " samples, got " + samples.size, + ); + } + }, + { + beforeAll: async () => { + let ranges: any[] = []; + + for (let i = 0; i < rangeCount; i++) { + ranges.push( + ...[ + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.2 / rangeCount, + offset: Math.random(), + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.4 / rangeCount, + offset: Math.random(), + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.6 / rangeCount, + offset: Math.random(), + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.6 / rangeCount, + offset: Math.random(), + timestamp: 0n, + }), + ], + ); + } + + const out = await create(...ranges); + index = out[0]; + indices = out[1]; + }, + afterAll: async () => { + await indices.stop(); + }, + }, + ); + + await suite.run(); + + console.table(suite.table()); +} diff --git a/packages/programs/data/shared-log/benchmark/index.ts b/packages/programs/data/shared-log/benchmark/index.ts index fb90beaae..da8e4b55f 100644 --- a/packages/programs/data/shared-log/benchmark/index.ts +++ b/packages/programs/data/shared-log/benchmark/index.ts @@ -2,8 +2,8 @@ import { deserialize, field, option, serialize, variant } from "@dao-xyz/borsh"; import { type ProgramClient } from "@peerbit/program"; import { Program } from "@peerbit/program"; import { TestSession } from "@peerbit/test-utils"; -import B from "benchmark"; import crypto from "crypto"; +import { Bench } from "tinybench"; import { v4 as uuid } from "uuid"; import { type Args, SharedLog } from "../src/index.js"; @@ -33,16 +33,16 @@ class Document { } @variant("test_shared_log") -class TestStore extends Program> { +class TestStore extends Program> { @field({ type: SharedLog }) - logs: SharedLog; + logs: SharedLog; - constructor(properties?: { logs: SharedLog }) { + constructor(properties?: { logs: SharedLog }) { super(); this.logs = properties?.logs || new SharedLog(); } - async open(options?: Args): Promise { + async open(options?: Args): Promise { await this.logs.open({ ...options, encoding: { @@ -57,7 +57,7 @@ const peersCount = 1; const session = await TestSession.connected(peersCount); const store = new TestStore({ - logs: new SharedLog({ + logs: new SharedLog({ id: new Uint8Array(32), }), }); @@ -69,44 +69,24 @@ await client.open(store, { factor: 1, }, trim: { type: "length" as const, to: 100 }, - onChange: (change) => { - change.added.forEach(async (added) => { - const doc = await added.entry.getPayloadValue(); - resolver.get(doc.id)!(); - resolver.delete(doc.id); - }); - }, }, }); -const resolver: Map void> = new Map(); -const suite = new B.Suite(); -suite - .add("put", { - fn: async (deferred: any) => { - const doc = new Document({ - id: uuid(), - name: "hello", - number: 1n, - bytes: crypto.randomBytes(1200), - }); - resolver.set(doc.id, () => { - deferred.resolve(); - }); - await store.logs.append(doc, { meta: { next: [] } }); - }, +const suite = new Bench({ name: "put" }); + +const bytes = crypto.randomBytes(1200); + +suite.add("put", async () => { + const doc = new Document({ + id: uuid(), + name: "hello", + number: 1n, + bytes, + }); + await store.logs.append(doc, { meta: { next: [] } }); +}); - minSamples: 300, - defer: true, - }) - .on("cycle", (event: any) => { - console.log(String(event.target)); - }) - .on("error", (err: any) => { - throw err; - }) - .on("complete", async function (this: any, ...args: any[]) { - await store.drop(); - await session.stop(); - }) - .run(); +await suite.run(); +console.table(suite.table()); +await store.drop(); +await session.stop(); diff --git a/packages/programs/data/shared-log/benchmark/memory/child.ts b/packages/programs/data/shared-log/benchmark/memory/child.ts index 771b065b6..2fd3cb380 100644 --- a/packages/programs/data/shared-log/benchmark/memory/child.ts +++ b/packages/programs/data/shared-log/benchmark/memory/child.ts @@ -37,15 +37,15 @@ class Document { } @variant("test_documents") -class TestStore extends Program>> { +class TestStore extends Program>> { @field({ type: SharedLog }) - docs: SharedLog; + docs: SharedLog; - constructor(properties: { docs: SharedLog }) { + constructor(properties: { docs: SharedLog }) { super(); this.docs = properties.docs; } - async open(options?: Partial>): Promise { + async open(options?: Partial>): Promise { await this.docs.open({ ...options, encoding: BORSH_ENCODING(Document) }); } } @@ -77,7 +77,7 @@ try { ]); store1 = new TestStore({ - docs: new SharedLog(), + docs: new SharedLog(), }); const client: ProgramClient = session.peers[0]; diff --git a/packages/programs/data/shared-log/benchmark/partial-sync.ts b/packages/programs/data/shared-log/benchmark/partial-sync.ts new file mode 100644 index 000000000..02e20c6aa --- /dev/null +++ b/packages/programs/data/shared-log/benchmark/partial-sync.ts @@ -0,0 +1,147 @@ +// this benchmark test the time it takes for two nodes with almost the same data to sync up +import { privateKeyFromRaw } from "@libp2p/crypto/keys"; +import { TestSession } from "@peerbit/test-utils"; +import { waitForResolved } from "@peerbit/time"; +import { expect } from "chai"; +import { v4 as uuid } from "uuid"; +import { createReplicationDomainHash } from "../src/replication-domain-hash.js"; +import { RatelessIBLTSynchronizer } from "../src/sync/rateless-iblt.js"; +import { SimpleSyncronizer } from "../src/sync/simple.js"; +import type { TestSetupConfig } from "../test/utils.js"; +import { EventStore } from "../test/utils/stores/event-store.js"; + +// Run with "node --loader ts-node/esm ./benchmark/partial-sync.ts" +let db1: EventStore = undefined as any; +let db2: EventStore = undefined as any; + +const store = new EventStore(); + +let syncedCount = 20e3; +let unsyncedCount = 1; +let totalCount = syncedCount + unsyncedCount * 2; + +const reset = async (session: TestSession, setup: TestSetupConfig) => { + db1 = await session.peers[0].open(store.clone(), { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }); + + db2 = await session.peers[1].open(store.clone(), { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }); + + for (let i = 0; i < syncedCount; i++) { + const entry = await db1.add(uuid(), { meta: { next: [] } }); + await db2.log.join([entry.entry]); + } + + expect(db1.log.log.length).to.equal(syncedCount); + expect(db2.log.log.length).to.equal(syncedCount); + + for (let i = 0; i < unsyncedCount; i++) { + await db1.add(uuid(), { meta: { next: [] } }); + await db2.add(uuid(), { meta: { next: [] } }); + } + + expect(db1.log.log.length).to.equal(syncedCount + unsyncedCount); + expect(db2.log.log.length).to.equal(syncedCount + unsyncedCount); +}; + +export const testSetups: TestSetupConfig[] = [ + { + domain: createReplicationDomainHash("u32"), + type: "u32", + syncronizer: SimpleSyncronizer, + name: "u32-simple", + } /* + { + domain: createReplicationDomainHash("u64"), + type: "u64", + syncronizer: SimpleSyncronizer, + name: "u64-simple", + }, */, + { + domain: createReplicationDomainHash("u64"), + type: "u64", + syncronizer: RatelessIBLTSynchronizer, + name: "u64-iblt", + }, +]; + +for (const setup of testSetups) { + let session: TestSession = await TestSession.disconnected(2, [ + { + // TODO dialing fails with this? + libp2p: { + privateKey: privateKeyFromRaw( + new Uint8Array([ + 204, 234, 187, 172, 226, 232, 70, 175, 62, 211, 147, 91, 229, 157, + 168, 15, 45, 242, 144, 98, 75, 58, 208, 9, 223, 143, 251, 52, 252, + 159, 64, 83, 52, 197, 24, 246, 24, 234, 141, 183, 151, 82, 53, 142, + 57, 25, 148, 150, 26, 209, 223, 22, 212, 40, 201, 6, 191, 72, 148, + 82, 66, 138, 199, 185, + ]), + ), + }, + }, + { + libp2p: { + privateKey: privateKeyFromRaw( + new Uint8Array([ + 237, 55, 205, 86, 40, 44, 73, 169, 196, 118, 36, 69, 214, 122, 28, + 157, 208, 163, 15, 215, 104, 193, 151, 177, 62, 231, 253, 120, 122, + 222, 174, 242, 120, 50, 165, 97, 8, 235, 97, 186, 148, 251, 100, + 168, 49, 10, 119, 71, 246, 246, 174, 163, 198, 54, 224, 6, 174, 212, + 159, 187, 2, 137, 47, 192, + ]), + ), + }, + }, + ]); + + console.log("Resetting..."); + await reset(session, setup); + console.log("Reset"); + + if (!db1 || !db2) { + throw new Error("db1 or db2 is undefined"); + } + + console.log("Starting sync..."); + const timeLabel = + setup.name + + ": " + + "Entries " + + totalCount + + " of which " + + unsyncedCount * 2 + + " are unsynced"; + + console.log("Dialing..."); + await waitForResolved(() => db1.node.dial(db2.node.getMultiaddrs())); + console.time(timeLabel); + console.log("Waiting for sync..."); + + await waitForResolved( + () => { + expect(db1.log.log.length).to.equal(totalCount); + expect(db2.log.log.length).to.equal(totalCount); + }, + { + timeout: 3e4, + }, + ); + + console.timeEnd(timeLabel); + + await session.stop(); +} diff --git a/packages/programs/data/shared-log/benchmark/replication-prune.ts b/packages/programs/data/shared-log/benchmark/replication-prune.ts index 2a8798d49..1fa1b58b9 100644 --- a/packages/programs/data/shared-log/benchmark/replication-prune.ts +++ b/packages/programs/data/shared-log/benchmark/replication-prune.ts @@ -50,10 +50,12 @@ let session: TestSession = await TestSession.connected(3, [ }, }, ]); -let db1: EventStore, db2: EventStore, db3: EventStore; +let db1: EventStore, + db2: EventStore, + db3: EventStore; const init = async (min: number, max?: number) => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicas: { min, @@ -62,7 +64,7 @@ const init = async (min: number, max?: number) => { replicate: false, }, }); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -75,7 +77,7 @@ const init = async (min: number, max?: number) => { }, ))!; - db3 = (await EventStore.open>( + db3 = (await EventStore.open>( db1.address!, session.peers[2], { diff --git a/packages/programs/data/shared-log/benchmark/replication.ts b/packages/programs/data/shared-log/benchmark/replication.ts index ab0716c92..fb174d754 100644 --- a/packages/programs/data/shared-log/benchmark/replication.ts +++ b/packages/programs/data/shared-log/benchmark/replication.ts @@ -39,13 +39,13 @@ let session: TestSession = await TestSession.connected(2, [ }, ]); -let db1: EventStore, db2: EventStore; +let db1: EventStore, db2: EventStore; let abortController = new AbortController(); let resolvers: Map void }> = new Map(); -db1 = await session.peers[0].open(new EventStore(), { +db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 1, @@ -53,7 +53,7 @@ db1 = await session.peers[0].open(new EventStore(), { }, }); -db2 = (await EventStore.open>( +db2 = (await EventStore.open>( db1.address!, session.peers[1], { diff --git a/packages/programs/data/shared-log/benchmark/to-rebalance.ts b/packages/programs/data/shared-log/benchmark/to-rebalance.ts new file mode 100644 index 000000000..2edf4342a --- /dev/null +++ b/packages/programs/data/shared-log/benchmark/to-rebalance.ts @@ -0,0 +1,163 @@ +// @ts-nocheck +import { Ed25519Keypair, randomBytes } from "@peerbit/crypto"; +import type { Index, Indices } from "@peerbit/indexer-interface"; +import { create as createIndex } from "@peerbit/indexer-sqlite3"; +import * as B from "tinybench"; +import { + type EntryReplicated, + ReplicationIntent, + toRebalance, +} from "../src/ranges.js"; +import { getEntryAndRangeConstructors } from "./utils.js"; + +// Run with "node --loader ts-node/esm ./benchmark/to-rebalance.ts" + +const resolutions: ("u32" | "u64")[] = ["u32", "u64"]; + +for (const resolution of resolutions) { + const { createEntry, createRange, entryClass, numbers } = + getEntryAndRangeConstructors(resolution); + + let create = async (...rects: EntryReplicated[]) => { + const indices = await createIndex(); + await indices.start(); + index = await indices.init({ schema: entryClass as any }); + for (const rect of rects) { + await index!.put(rect); + } + return [index, indices] as [Index>, Indices]; + }; + + let a = (await Ed25519Keypair.create()).publicKey; + + const suite = new B.Bench({ name: resolution }); + + let index: Index; + let indices: any = undefined; + let entryCount = 1e3; + let rangeboundaryAssigned = 10; + // this bench tests that the getSamples function can handle overlapping ranges in a more performant way than the sparse ranges + + const consumeAllFromAsyncIterator = async ( + iter: AsyncIterable>, + ): Promise[]> => { + const result = []; + for await (const entry of iter) { + result.push(entry); + } + return result; + }; + + const fullRange = createRange({ + id: randomBytes(32), + mode: ReplicationIntent.Strict, + publicKey: a, + length: 1, + offset: 0, + }); + + const noRange = createRange({ + id: randomBytes(32), + mode: ReplicationIntent.Strict, + publicKey: a, + length: 0, + offset: 0, + }); + + const smallRange = createRange({ + id: randomBytes(32), + mode: ReplicationIntent.Strict, + publicKey: a, + length: 0.001, + offset: 0, + }); + + const anotherSmallRange = createRange({ + id: randomBytes(32), + mode: ReplicationIntent.Strict, + publicKey: a, + length: 0.001, + offset: 0.5, + }); + + let entries: EntryReplicated[] = []; + for (let i = 0; i < entryCount; i++) { + entries.push( + createEntry({ + coordinate: numbers.denormalize(Math.random()), + hash: String(i), + assignedToRangeBoundary: i < rangeboundaryAssigned, + }), + ); + } + + const out = await create(...entries); + index = out[0]; + indices = out[1]; + + suite.add("to rebalance all - " + resolution, async () => { + const samples = await consumeAllFromAsyncIterator( + toRebalance( + [ + { + range: fullRange, + type: "added", + }, + ], + index, + ), + ); + if (samples.length === 0) { + throw new Error("Expecting samples"); + } + }); + + suite.add("range boundary - " + resolution, async () => { + const samples = await consumeAllFromAsyncIterator( + toRebalance( + [ + { + range: noRange, + type: "added", + }, + ], + index, + ), + ); + if (samples.length !== rangeboundaryAssigned) { + throw new Error( + "Expecting samples: " + + rangeboundaryAssigned + + " got " + + samples.length, + ); + } + }); + + suite.add("updated - " + resolution, async () => { + const samples = await consumeAllFromAsyncIterator( + toRebalance( + [ + { + prev: smallRange, + range: anotherSmallRange, + type: "updated", + }, + ], + index, + ), + ); + if (samples.length === 0) { + throw new Error( + "Expecting samples: " + + rangeboundaryAssigned + + " got " + + samples.length, + ); + } + }); + + await suite.run(); + console.table(suite.table()); + await indices.stop(); +} diff --git a/packages/programs/data/shared-log/benchmark/utils.ts b/packages/programs/data/shared-log/benchmark/utils.ts new file mode 100644 index 000000000..7951cacac --- /dev/null +++ b/packages/programs/data/shared-log/benchmark/utils.ts @@ -0,0 +1,78 @@ +import { type PublicSignKey, randomBytes } from "@peerbit/crypto"; +import { LamportClock, Meta } from "@peerbit/log"; +import { + type NumberFromType, + createNumbers, + denormalizer, +} from "../src/integers.js"; +import { + EntryReplicatedU32, + EntryReplicatedU64, + ReplicationIntent, + ReplicationRangeIndexableU32, + ReplicationRangeIndexableU64, +} from "../src/ranges.js"; + +export const getEntryAndRangeConstructors = ( + resolution: R, +) => { + const numbers = createNumbers(resolution); + const rangeClass = + resolution === "u32" + ? ReplicationRangeIndexableU32 + : ReplicationRangeIndexableU64; + const denormalizeFn = denormalizer(resolution); + + const entryClass = + resolution === "u32" ? EntryReplicatedU32 : EntryReplicatedU64; + + const createEntryReplicated = (properties: { + coordinate: NumberFromType; + hash: string; + meta?: Meta; + assignedToRangeBoundary: boolean; + }) => { + return new entryClass({ + coordinates: [properties.coordinate], + assignedToRangeBoundary: properties.assignedToRangeBoundary, + hash: properties.hash, + meta: + properties.meta || + new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "a", + next: [], + type: 0, + data: undefined, + }), + } as any); + }; + + const createReplicationRangeFromNormalized = (properties: { + id?: Uint8Array; + publicKey: PublicSignKey; + length: number; + offset: number; + timestamp?: bigint; + mode?: ReplicationIntent; + }) => { + return new rangeClass({ + id: properties.id, + publicKey: properties.publicKey, + mode: properties.mode, + // @ts-ignore + length: denormalizeFn(properties.length), + // @ts-ignore + offset: denormalizeFn(properties.offset), + timestamp: properties.timestamp, + }); + }; + + return { + createEntry: createEntryReplicated, + createRange: createReplicationRangeFromNormalized, + entryClass, + rangeClass, + numbers, + }; +}; diff --git a/packages/programs/data/shared-log/package.json b/packages/programs/data/shared-log/package.json index 380e21406..3fe321c42 100644 --- a/packages/programs/data/shared-log/package.json +++ b/packages/programs/data/shared-log/package.json @@ -63,7 +63,8 @@ "@peerbit/program": "5.0.8", "@peerbit/log": "4.0.19", "@peerbit/rpc": "5.0.17", - "@peerbit/time": "2.0.7" + "@peerbit/time": "2.0.7", + "@peerbit/riblt": "0.0.1" }, "devDependencies": { "@peerbit/test-utils": "^2.1.7" diff --git a/packages/programs/data/shared-log/src/debounce.ts b/packages/programs/data/shared-log/src/debounce.ts index 13664499c..ca6c318f7 100644 --- a/packages/programs/data/shared-log/src/debounce.ts +++ b/packages/programs/data/shared-log/src/debounce.ts @@ -109,7 +109,7 @@ export const debounceFixedInterval = < return debounced as T; }; -export const debounceAcculmulator = ( +export const debounceAccumulator = ( fn: (args: V) => any, create: () => { delete: (string: K) => void; @@ -145,18 +145,26 @@ export const debounceAcculmulator = ( export const debouncedAccumulatorMap = ( fn: (args: Map) => any, delay: number, + merge?: (into: T, from: T) => void, ) => { - return debounceAcculmulator< - string, - { key: string; value: T }, - Map - >( + return debounceAccumulator>( fn, () => { const map = new Map(); + let add = merge + ? (props: { key: string; value: T }) => { + let prev = map.get(props.key); + if (prev != null) { + merge(prev, props.value); + } else { + map.set(props.key, props.value); + } + } + : (props: { key: string; value: T }) => { + map.set(props.key, props.value); + }; return { - add: (props: { key: string; value: T }) => - map.set(props.key, props.value), + add, delete: (key: string) => map.delete(key), size: () => map.size, value: map, diff --git a/packages/programs/data/shared-log/src/exchange-heads.ts b/packages/programs/data/shared-log/src/exchange-heads.ts index 9918e53cc..7867ad0a4 100644 --- a/packages/programs/data/shared-log/src/exchange-heads.ts +++ b/packages/programs/data/shared-log/src/exchange-heads.ts @@ -39,28 +39,6 @@ export class ExchangeHeadsMessage extends TransportMessage { } } -@variant([0, 1]) -export class RequestMaybeSync extends TransportMessage { - @field({ type: vec("string") }) - hashes: string[]; - - constructor(props: { hashes: string[] }) { - super(); - this.hashes = props.hashes; - } -} - -@variant([0, 2]) -export class ResponseMaybeSync extends TransportMessage { - @field({ type: vec("string") }) - hashes: string[]; - - constructor(props: { hashes: string[] }) { - super(); - this.hashes = props.hashes; - } -} - @variant([0, 3]) export class RequestIPrune extends TransportMessage { // Hashes which I want to prune @@ -89,7 +67,7 @@ const MAX_EXCHANGE_MESSAGE_SIZE = 1e5; // 100kb. Too large size might not be fas export const createExchangeHeadsMessages = async function* ( log: Log, - heads: Entry[] | string[], + heads: Entry[] | string[] | Set, ): AsyncGenerator, void, void> { let size = 0; let current: EntryWithRefs[] = []; diff --git a/packages/programs/data/shared-log/src/index.ts b/packages/programs/data/shared-log/src/index.ts index 61e2dc826..cbd0771e6 100644 --- a/packages/programs/data/shared-log/src/index.ts +++ b/packages/programs/data/shared-log/src/index.ts @@ -1,5 +1,6 @@ import { BorshError, field, variant } from "@dao-xyz/borsh"; import { AnyBlockStore, RemoteBlocks } from "@peerbit/blocks"; +import { Cache } from "@peerbit/cache"; import { AccessError, PublicSignKey, @@ -10,9 +11,11 @@ import { And, ByteMatchQuery, type Index, + NotStartedError as IndexNotStartedError, Or, Sort, StringMatch, + toId, } from "@peerbit/indexer-interface"; import { type AppendOptions, @@ -21,6 +24,7 @@ import { Log, type LogEvents, type LogProperties, + Meta, ShallowEntry, type ShallowOrFullEntry, } from "@peerbit/log"; @@ -35,13 +39,10 @@ import { AcknowledgeDelivery, DeliveryMode, NotStartedError, + SeekDelivery, SilentDelivery, } from "@peerbit/stream-interface"; -import { - AbortError, - /* delay, */ - waitFor, -} from "@peerbit/time"; +import { AbortError, waitFor } from "@peerbit/time"; import pDefer, { type DeferredPromise } from "p-defer"; import PQueue from "p-queue"; import { concat } from "uint8arrays"; @@ -49,7 +50,7 @@ import { BlocksMessage } from "./blocks.js"; import { type CPUUsage, CPUUsageIntervalLag } from "./cpu.js"; import { type DebouncedAccumulatorMap, - debounceAcculmulator, + debounceAccumulator, debounceFixedInterval, debouncedAccumulatorMap, } from "./debounce.js"; @@ -57,31 +58,43 @@ import { EntryWithRefs, ExchangeHeadsMessage, RequestIPrune, - RequestMaybeSync, ResponseIPrune, - ResponseMaybeSync, createExchangeHeadsMessages, } from "./exchange-heads.js"; +import { + MAX_U32, + type NumberFromType, + type Numbers, + bytesToNumber, + createNumbers, + denormalizer, +} from "./integers.js"; import { TransportMessage } from "./message.js"; import { PIDReplicationController } from "./pid.js"; import { - EntryReplicated, + type EntryReplicated, + EntryReplicatedU32, + EntryReplicatedU64, ReplicationIntent, - ReplicationRange, - ReplicationRangeIndexable, + type ReplicationRangeIndexable, + ReplicationRangeIndexableU32, + ReplicationRangeIndexableU64, + ReplicationRangeMessage, + SyncStatus, + appromixateCoverage, getCoverSet, - getEvenlySpacedU32, getSamples, - hasCoveringRange, + iHaveCoveringRange, isMatured, + isReplicationRangeMessage, + mergeRanges, minimumWidthToCover, - shouldAssigneToRangeBoundary, + shouldAssigneToRangeBoundary as shouldAssignToRangeBoundary, toRebalance, } from "./ranges.js"; import { type ReplicationDomainHash, createReplicationDomainHash, - hashToU32, } from "./replication-domain-hash.js"; import { type ReplicationDomainTime, @@ -94,12 +107,12 @@ import { type ReplicationDomain, debounceAggregationChanges, mergeReplicationChanges, - type u32, } from "./replication-domain.js"; import { AbsoluteReplicas, AddedReplicationSegmentMessage, AllReplicatingSegmentsMessage, + MinReplicas, ReplicationError, type ReplicationLimits, RequestReplicationInfoMessage, @@ -109,7 +122,9 @@ import { encodeReplicas, maxReplicas, } from "./replication.js"; -import { MAX_U32, Observer, Replicator, scaleToU32 } from "./role.js"; +import { Observer, Replicator } from "./role.js"; +import type { SynchronizerConstructor, Syncronizer } from "./sync/index.js"; +import { SimpleSyncronizer } from "./sync/simple.js"; import { groupByGid } from "./utils.js"; export { @@ -121,7 +136,12 @@ export { }; export { type CPUUsage, CPUUsageIntervalLag }; export * from "./replication.js"; -export { EntryReplicated, ReplicationRangeIndexable }; +export { + type ReplicationRangeIndexable, + type EntryReplicated, + EntryReplicatedU32, + EntryReplicatedU64, +}; export const logger = loggerFn({ module: "shared-log" }); const getLatestEntry = ( @@ -144,32 +164,37 @@ export type ReplicationLimitsOptions = | Partial | { min?: number; max?: number }; -export type DynamicReplicationOptions = { +export type DynamicReplicationOptions = { limits?: { interval?: number; storage?: number; cpu?: number | { max: number; monitor?: CPUUsage }; }; -}; +} & ( + | { offset: number; normalized?: true | undefined } + | { offset: NumberFromType; normalized: false } + | { offset?: undefined; normalized?: undefined } +); export type FixedReplicationOptions = { id?: Uint8Array; normalized?: boolean; - factor: number | "all" | "right"; + factor: number | bigint | "all" | "right"; strict?: boolean; // if true, only this range will be replicated - offset?: number; + offset?: number | bigint; + syncStatus?: SyncStatus; }; -export type ReplicationOptions = - | DynamicReplicationOptions +export type ReplicationOptions = + | DynamicReplicationOptions | FixedReplicationOptions | FixedReplicationOptions[] | number | boolean; const isAdaptiveReplicatorOption = ( - options: ReplicationOptions, -): options is DynamicReplicationOptions => { + options: ReplicationOptions, +): options is DynamicReplicationOptions => { if (typeof options === "number") { return false; } @@ -185,14 +210,14 @@ const isAdaptiveReplicatorOption = ( return true; }; -const isUnreplicationOptions = (options?: ReplicationOptions): boolean => +const isUnreplicationOptions = (options?: ReplicationOptions): boolean => options === false || options === 0 || ((options as FixedReplicationOptions)?.offset === undefined && (options as FixedReplicationOptions)?.factor === 0); const isReplicationOptionsDependentOnPreviousState = ( - options?: ReplicationOptions, + options?: ReplicationOptions, ): boolean => { if (options === true) { return true; @@ -211,14 +236,66 @@ const isReplicationOptionsDependentOnPreviousState = ( return false; }; -export type SharedLogOptions> = { - replicate?: ReplicationOptions; +interface IndexableDomain { + numbers: Numbers; + constructorEntry: new (properties: { + coordinates: NumberFromType[]; + hash: string; + meta: Meta; + assignedToRangeBoundary: boolean; + }) => EntryReplicated; + constructorRange: new ( + properties: { + id?: Uint8Array; + offset: NumberFromType; + length: NumberFromType; + mode?: ReplicationIntent; + timestamp?: bigint; + } & ({ publicKeyHash: string } | { publicKey: PublicSignKey }), + ) => ReplicationRangeIndexable; +} + +const createIndexableDomainFromResolution = ( + resolution: R, +): IndexableDomain => { + const denormalizerFn = denormalizer(resolution); + const byteToNumberFn = bytesToNumber(resolution); + if (resolution === "u32") { + return { + constructorEntry: EntryReplicatedU32, + constructorRange: ReplicationRangeIndexableU32, + denormalize: denormalizerFn, + bytesToNumber: byteToNumberFn, + numbers: createNumbers(resolution), + } as any as IndexableDomain; + } else if (resolution === "u64") { + return { + constructorEntry: EntryReplicatedU64, + constructorRange: ReplicationRangeIndexableU64, + denormalize: denormalizerFn, + bytesToNumber: byteToNumberFn, + numbers: createNumbers(resolution), + } as any as IndexableDomain; + } + throw new Error("Unsupported resolution"); +}; + +export type SharedLogOptions< + T, + D extends ReplicationDomain, + R extends "u32" | "u64" = D extends ReplicationDomain + ? I + : "u32", +> = { + replicate?: ReplicationOptions; replicas?: ReplicationLimitsOptions; respondToIHaveTimeout?: number; canReplicate?: (publicKey: PublicSignKey) => Promise | boolean; - sync?: (entry: ShallowOrFullEntry | EntryReplicated) => boolean; + sync?: (entry: ShallowOrFullEntry | EntryReplicated) => boolean; + syncronizer?: SynchronizerConstructor; timeUntilRoleMaturity?: number; waitForReplicatorTimeout?: number; + waitForPruneDelay?: number; distributionDebounceTime?: number; compatibility?: number; domain?: D; @@ -227,6 +304,7 @@ export type SharedLogOptions> = { export const DEFAULT_MIN_REPLICAS = 2; export const WAIT_FOR_REPLICATOR_TIMEOUT = 9000; export const WAIT_FOR_ROLE_MATURITY = 5000; +export const WAIT_FOR_PRUNE_DELAY = 5000; const PRUNE_DEBOUNCE_INTERVAL = 500; // DONT SET THIS ANY LOWER, because it will make the pid controller unstable as the system responses are not fast enough to updates from the pid controller @@ -250,8 +328,11 @@ const checkMinReplicasLimit = (minReplicas: number) => { export type Args< T, - D extends ReplicationDomain = ReplicationDomainHash, -> = LogProperties & LogEvents & SharedLogOptions; + D extends ReplicationDomain, + R extends "u32" | "u64" = D extends ReplicationDomain + ? I + : "u32", +> = LogProperties & LogEvents & SharedLogOptions; export type SharedAppendOptions = AppendOptions & { replicas?: AbsoluteReplicas | number; @@ -273,9 +354,12 @@ export interface SharedLogEvents extends ProgramEvents { @variant("shared_log") export class SharedLog< - T = Uint8Array, - D extends ReplicationDomain = ReplicationDomainHash, -> extends Program, SharedLogEvents> { + T, + D extends ReplicationDomain, + R extends "u32" | "u64" = D extends ReplicationDomain + ? I + : "u32", +> extends Program, SharedLogEvents> { @field({ type: Log }) log: Log; @@ -286,11 +370,15 @@ export class SharedLog< private _isReplicating!: boolean; private _isAdaptiveReplicating!: boolean; - private _replicationRangeIndex!: Index; - private _entryCoordinatesIndex!: Index; + private _replicationRangeIndex!: Index>; + private _entryCoordinatesIndex!: Index>; + private coordinateToHash!: Cache; + private uniqueReplicators!: Set; /* private _totalParticipation!: number; */ - private _gidPeersHistory!: Map>; + + // gid -> coordinate -> publicKeyHash list (of owners) + _gidPeersHistory!: Map>; private _onSubscriptionFn!: (arg: any) => any; private _onUnsubscriptionFn!: (arg: any) => any; @@ -301,7 +389,7 @@ export class SharedLog< private _logProperties?: LogProperties & LogEvents & - SharedLogOptions; + SharedLogOptions; private _closeController!: AbortController; private _respondToIHaveTimeout!: any; private _pendingDeletes!: Map< @@ -324,13 +412,16 @@ export class SharedLog< } >; - private pendingMaturity!: Map< + // public key hash to range id to range + pendingMaturity!: Map< string, - { - timestamp: bigint; - ranges: Map; - timeout: ReturnType; - } + Map< + string, + { + range: ReplicationChange; + timeout: ReturnType; + } + > >; // map of peerId to timeout private latestReplicationInfoMessage!: Map; @@ -340,7 +431,7 @@ export class SharedLog< private openTime!: number; private oldestOpenTime!: number; - private sync?: (entry: ShallowOrFullEntry | EntryReplicated) => boolean; + private sync?: (entry: ShallowOrFullEntry | EntryReplicated) => boolean; // A fn that we can call many times that recalculates the participation role private rebalanceParticipationDebounced: @@ -348,11 +439,12 @@ export class SharedLog< | undefined; // A fn for debouncing the calls for pruning - pruneDebouncedFn!: DebouncedAccumulatorMap< - Entry | ShallowEntry | EntryReplicated - >; + pruneDebouncedFn!: DebouncedAccumulatorMap<{ + entry: Entry | ShallowEntry | EntryReplicated; + leaders: Map; + }>; private responseToPruneDebouncedFn!: ReturnType< - typeof debounceAcculmulator< + typeof debounceAccumulator< string, { hashes: string[]; @@ -361,6 +453,10 @@ export class SharedLog< Map> > >; + + private _requestIPruneSent!: Map>; // tracks entry hash to peer hash for requesting I prune messages + private _requestIPruneResponseReplicatorSet!: Map>; // tracks entry hash to peer hash + private replicationChangeDebounceFn!: ReturnType< typeof debounceAggregationChanges >; @@ -368,15 +464,7 @@ export class SharedLog< // regular distribution checks private distributeQueue?: PQueue; - // Syncing and dedeplucation work - private syncMoreInterval?: ReturnType; - - // map of hash to public keys that we can ask for entries - private syncInFlightQueue!: Map; - private syncInFlightQueueInverted!: Map>; - - // map of hash to public keys that we have asked for entries - syncInFlight!: Map>; + syncronizer!: Syncronizer; replicas!: ReplicationLimits; @@ -384,11 +472,13 @@ export class SharedLog< timeUntilRoleMaturity!: number; waitForReplicatorTimeout!: number; + waitForPruneDelay!: number; distributionDebounceTime!: number; replicationController!: PIDReplicationController; history!: { usedMemory: number; factor: number }[]; domain!: D; + indexableDomain!: IndexableDomain; interval: any; constructor(properties?: { id?: Uint8Array }) { @@ -417,8 +507,9 @@ export class SharedLog< if (segments.length > 0) { const segment = segments[0].toReplicationRange(); return new Replicator({ - factor: segment.factor / MAX_U32, - offset: segment.offset / MAX_U32, + // TODO types + factor: (segment.factor as number) / MAX_U32, + offset: (segment.offset as number) / MAX_U32, }); } @@ -430,38 +521,9 @@ export class SharedLog< if (!this._isReplicating) { return false; } - - /* - if (isAdaptiveReplicatorOption(this._replicationSettings)) { - return true; - } - - if ((this.replicationSettings as FixedReplicationOptions).factor !== 0) { - return true; - } */ - return (await this.countReplicationSegments()) > 0; } - /* get totalParticipation(): number { - return this._totalParticipation; - } */ - - async calculateTotalParticipation() { - const sum = await this.replicationIndex.sum({ key: "width" }); - return Number(sum) / MAX_U32; - } - - async countReplicationSegments() { - const count = await this.replicationIndex.count({ - query: new StringMatch({ - key: "hash", - value: this.node.identity.publicKey.hashcode(), - }), - }); - return count; - } - private setupRebalanceDebounceFunction( interval = RECALCULATE_PARTICIPATION_DEBOUNCE_INTERVAL, ) { @@ -490,14 +552,18 @@ export class SharedLog< } private async _replicate( - options?: ReplicationOptions, + options?: ReplicationOptions, { reset, checkDuplicates, + syncStatus, announce, + mergeSegments, }: { reset?: boolean; checkDuplicates?: boolean; + syncStatus?: SyncStatus; + mergeSegments?: boolean; announce?: ( msg: AddedReplicationSegmentMessage | AllReplicatingSegmentsMessage, ) => void; @@ -507,7 +573,7 @@ export class SharedLog< if (isUnreplicationOptions(options)) { await this.unreplicate(); } else { - let ranges: ReplicationRangeIndexable[] = []; + let ranges: ReplicationRangeIndexable[] = []; if (options == null) { options = {}; @@ -531,7 +597,7 @@ export class SharedLog< ranges = [maybeRange]; offsetWasProvided = true; - } else if (options instanceof ReplicationRange) { + } else if (isReplicationRangeMessage(options)) { ranges = [ options.toReplicationRangeIndexable(this.node.identity.publicKey), ]; @@ -557,15 +623,57 @@ export class SharedLog< } for (const rangeArg of rangeArgs) { + let timestamp: bigint | undefined = undefined; + if (rangeArg.id != null) { + // fetch the previous timestamp if it exists + const indexed = await this.replicationIndex.get(toId(rangeArg.id), { + shape: { id: true, timestamp: true }, + }); + if (indexed) { + timestamp = indexed.value.timestamp; + } + } const normalized = rangeArg.normalized ?? true; offsetWasProvided = rangeArg.offset != null; const offset = - rangeArg.offset ?? - (normalized ? Math.random() : scaleToU32(Math.random())); + rangeArg.offset != null + ? normalized + ? this.indexableDomain.numbers.denormalize( + rangeArg.offset as number, + ) + : rangeArg.offset + : this.indexableDomain.numbers.random(); let factor = rangeArg.factor; - let width = normalized ? 1 : scaleToU32(1); + let fullWidth = this.indexableDomain.numbers.maxValue; + + let factorDenormalized = !normalized + ? factor + : this.indexableDomain.numbers.denormalize(factor as number); ranges.push( - new ReplicationRangeIndexable({ + new this.indexableDomain.constructorRange({ + id: rangeArg.id, + // @ts-ignore + offset: offset, + // @ts-ignore + length: (factor === "all" + ? fullWidth + : factor === "right" + ? // @ts-ignore + fullWidth - offset + : factorDenormalized) as NumberFromType, + /* typeof factor === "number" + ? factor + : factor === "all" + ? width + // @ts-ignore + : width - offset, */ + publicKeyHash: this.node.identity.publicKey.hashcode(), + mode: rangeArg.strict + ? ReplicationIntent.Strict + : ReplicationIntent.NonStrict, // automatic means that this range might be reused later for dynamic replication behaviour + timestamp: timestamp ?? BigInt(+new Date()), + }), + /* new ReplicationRangeIndexable({ id: rangeArg.id, normalized, offset: offset, @@ -574,14 +682,23 @@ export class SharedLog< ? factor : factor === "all" ? width + // @ts-ignore : width - offset, publicKeyHash: this.node.identity.publicKey.hashcode(), mode: rangeArg.strict ? ReplicationIntent.Strict : ReplicationIntent.NonStrict, // automatic means that this range might be reused later for dynamic replication behaviour timestamp: BigInt(+new Date()), - }), + }), */ + ); + } + + if (mergeSegments && ranges.length > 1) { + const mergedSegment = mergeRanges( + ranges, + this.indexableDomain.numbers, ); + ranges = [mergedSegment]; } } @@ -603,13 +720,14 @@ export class SharedLog< reset: resetRanges ?? false, checkDuplicates, announce, + syncStatus, }); return ranges; } } - setupDebouncedRebalancing(options?: DynamicReplicationOptions) { + setupDebouncedRebalancing(options?: DynamicReplicationOptions) { this.cpuUsage?.stop?.(); this.replicationController = new PIDReplicationController( @@ -640,18 +758,23 @@ export class SharedLog< } async replicate( - rangeOrEntry?: ReplicationOptions | Entry | Entry[], + rangeOrEntry?: ReplicationOptions | Entry | Entry[], options?: { reset?: boolean; checkDuplicates?: boolean; + mergeSegments?: boolean; announce?: ( msg: AllReplicatingSegmentsMessage | AddedReplicationSegmentMessage, ) => void; }, ) { - let range: ReplicationRange[] | ReplicationOptions | undefined = undefined; + let range: + | ReplicationRangeMessage[] + | ReplicationOptions + | undefined = undefined; + let syncStatus = SyncStatus.Unsynced; - if (rangeOrEntry instanceof ReplicationRange) { + if (rangeOrEntry instanceof ReplicationRangeMessage) { range = rangeOrEntry; } else if (rangeOrEntry instanceof Entry) { range = { @@ -659,8 +782,10 @@ export class SharedLog< offset: await this.domain.fromEntry(rangeOrEntry), normalized: false, }; + syncStatus = SyncStatus.Synced; /// we already have the entries } else if (Array.isArray(rangeOrEntry)) { - let ranges: (ReplicationRange | FixedReplicationOptions)[] = []; + let ranges: (ReplicationRangeMessage | FixedReplicationOptions)[] = + []; for (const entry of rangeOrEntry) { if (entry instanceof Entry) { ranges.push({ @@ -668,6 +793,8 @@ export class SharedLog< offset: await this.domain.fromEntry(entry), normalized: false, }); + + syncStatus = SyncStatus.Synced; /// we already have the entries } else { ranges.push(entry); } @@ -677,17 +804,17 @@ export class SharedLog< range = rangeOrEntry ?? true; } - return this._replicate(range, options); + return this._replicate(range, { ...options, syncStatus }); } - async unreplicate(rangeOrEntry?: Entry | ReplicationRange) { + async unreplicate(rangeOrEntry?: Entry | ReplicationRangeMessage) { let range: FixedReplicationOptions; if (rangeOrEntry instanceof Entry) { range = { factor: 1, offset: await this.domain.fromEntry(rangeOrEntry), }; - } else if (rangeOrEntry instanceof ReplicationRange) { + } else if (rangeOrEntry instanceof ReplicationRangeMessage) { range = rangeOrEntry; } else { this._isReplicating = false; @@ -720,59 +847,57 @@ export class SharedLog< key: PublicSignKey | string, options?: { noEvent?: boolean }, ) { - const fn = async () => { - const keyHash = typeof key === "string" ? key : key.hashcode(); - const deleted = await this.replicationIndex - .iterate({ - query: { hash: keyHash }, - }) - .all(); + const keyHash = typeof key === "string" ? key : key.hashcode(); + const deleted = await this.replicationIndex + .iterate({ + query: { hash: keyHash }, + }) + .all(); - await this.replicationIndex.del({ query: { hash: keyHash } }); + this.uniqueReplicators.delete(keyHash); + await this.replicationIndex.del({ query: { hash: keyHash } }); - await this.updateOldestTimestampFromIndex(); + await this.updateOldestTimestampFromIndex(); - const isMe = this.node.identity.publicKey.hashcode() === keyHash; - if (isMe) { - // announce that we are no longer replicating + const isMe = this.node.identity.publicKey.hashcode() === keyHash; + if (isMe) { + // announce that we are no longer replicating - await this.rpc.send( - new AllReplicatingSegmentsMessage({ segments: [] }), - { priority: 1 }, - ); - } + await this.rpc.send(new AllReplicatingSegmentsMessage({ segments: [] }), { + priority: 1, + }); + } - if (options?.noEvent !== true) { - if (key instanceof PublicSignKey) { - this.events.dispatchEvent( - new CustomEvent("replication:change", { - detail: { publicKey: key }, - }), - ); - } else { - throw new Error("Key was not a PublicSignKey"); - } + if (options?.noEvent !== true) { + if (key instanceof PublicSignKey) { + this.events.dispatchEvent( + new CustomEvent("replication:change", { + detail: { publicKey: key }, + }), + ); + } else { + throw new Error("Key was not a PublicSignKey"); } + } - deleted.forEach((x) => { - return this.replicationChangeDebounceFn.add({ - range: x.value, - type: "removed", - }); + for (const x of deleted) { + this.replicationChangeDebounceFn.add({ + range: x.value, + type: "removed", }); + } - const pendingMaturity = this.pendingMaturity.get(keyHash); - if (pendingMaturity) { - clearTimeout(pendingMaturity.timeout); - this.pendingMaturity.delete(keyHash); - } - - if (!isMe) { - this.rebalanceParticipationDebounced?.(); + const pendingMaturity = this.pendingMaturity.get(keyHash); + if (pendingMaturity) { + for (const [_k, v] of pendingMaturity) { + clearTimeout(v.timeout); } - }; + this.pendingMaturity.delete(keyHash); + } - return fn(); + if (!isMe) { + this.rebalanceParticipationDebounced?.(); + } } private async updateOldestTimestampFromIndex() { @@ -792,266 +917,275 @@ export class SharedLog< } private async removeReplicationRange(ids: Uint8Array[], from: PublicSignKey) { - const fn = async () => { - let idMatcher = new Or( - ids.map((x) => new ByteMatchQuery({ key: "id", value: x })), - ); + let idMatcher = new Or( + ids.map((x) => new ByteMatchQuery({ key: "id", value: x })), + ); - // make sure we are not removing something that is owned by the replicator - let identityMatcher = new StringMatch({ - key: "hash", - value: from.hashcode(), - }); + // make sure we are not removing something that is owned by the replicator + let identityMatcher = new StringMatch({ + key: "hash", + value: from.hashcode(), + }); - let query = new And([idMatcher, identityMatcher]); + let query = new And([idMatcher, identityMatcher]); - const pendingMaturity = this.pendingMaturity.get(from.hashcode()); - if (pendingMaturity) { - for (const id of ids) { - pendingMaturity.ranges.delete(id.toString()); - } - if (pendingMaturity.ranges.size === 0) { - clearTimeout(pendingMaturity.timeout); - this.pendingMaturity.delete(from.hashcode()); + const pendingMaturity = this.pendingMaturity.get(from.hashcode()); + if (pendingMaturity) { + for (const id of ids) { + const info = pendingMaturity.get(id.toString()); + if (info) { + clearTimeout(info.timeout); + pendingMaturity.delete(id.toString()); } } + if (pendingMaturity.size === 0) { + this.pendingMaturity.delete(from.hashcode()); + } + } - await this.replicationIndex.del({ query }); + await this.replicationIndex.del({ query }); - await this.updateOldestTimestampFromIndex(); + const otherSegmentsIterator = this.replicationIndex.iterate( + { query: { hash: from.hashcode() } }, + { shape: { id: true } }, + ); + if ((await otherSegmentsIterator.next(1)).length === 0) { + this.uniqueReplicators.delete(from.hashcode()); + } + await otherSegmentsIterator.close(); - this.events.dispatchEvent( - new CustomEvent("replication:change", { - detail: { publicKey: from }, - }), - ); + await this.updateOldestTimestampFromIndex(); - if (!from.equals(this.node.identity.publicKey)) { - this.rebalanceParticipationDebounced?.(); - } - }; + this.events.dispatchEvent( + new CustomEvent("replication:change", { + detail: { publicKey: from }, + }), + ); - return fn(); + if (!from.equals(this.node.identity.publicKey)) { + this.rebalanceParticipationDebounced?.(); + } } + addedReplciationRangesFrom: Set = new Set(); private async addReplicationRange( - ranges: ReplicationRangeIndexable[], + ranges: ReplicationRangeIndexable[], from: PublicSignKey, { reset, checkDuplicates, }: { reset?: boolean; checkDuplicates?: boolean } = {}, ) { - const fn = async () => { - if ( - this._isTrustedReplicator && - !(await this._isTrustedReplicator(from)) - ) { - return undefined; - } - - let isNewReplicator = false; - - let diffs: ReplicationChanges; - let deleted: ReplicationRangeIndexable[] | undefined = undefined; - if (reset) { - deleted = ( - await this.replicationIndex - .iterate({ - query: { hash: from.hashcode() }, - }) - .all() - ).map((x) => x.value); + if (this._isTrustedReplicator && !(await this._isTrustedReplicator(from))) { + return undefined; + } + ( + this.addedReplciationRangesFrom || + (this.addedReplciationRangesFrom = new Set()) + ).add(from.hashcode()); + + let isNewReplicator = false; + + let diffs: ReplicationChanges; + let deleted: ReplicationRangeIndexable[] | undefined = undefined; + if (reset) { + deleted = ( + await this.replicationIndex + .iterate({ + query: { hash: from.hashcode() }, + }) + .all() + ).map((x) => x.value); - let prevCount = deleted.length; + let prevCount = deleted.length; - await this.replicationIndex.del({ query: { hash: from.hashcode() } }); + await this.replicationIndex.del({ query: { hash: from.hashcode() } }); - diffs = [ - ...deleted.map((x) => { - return { range: x, type: "removed" as const }; - }), - ...ranges.map((x) => { - return { range: x, type: "added" as const }; - }), - ]; + diffs = [ + ...deleted.map((x) => { + return { range: x, type: "removed" as const }; + }), + ...ranges.map((x) => { + return { range: x, type: "added" as const }; + }), + ]; - isNewReplicator = prevCount === 0 && ranges.length > 0; + isNewReplicator = prevCount === 0 && ranges.length > 0; + } else { + let existing = await this.replicationIndex + .iterate( + { + query: ranges.map( + (x) => new ByteMatchQuery({ key: "id", value: x.id }), + ), + }, + { reference: true }, + ) + .all(); + if (existing.length === 0) { + let prevCount = await this.replicationIndex.count({ + query: new StringMatch({ key: "hash", value: from.hashcode() }), + }); + isNewReplicator = prevCount === 0; } else { - let existing = await this.replicationIndex - .iterate( - { - query: ranges.map( - (x) => new ByteMatchQuery({ key: "id", value: x.id }), - ), - }, - { reference: true }, - ) - .all(); - if (existing.length === 0) { - let prevCount = await this.replicationIndex.count({ - query: new StringMatch({ key: "hash", value: from.hashcode() }), - }); - isNewReplicator = prevCount === 0; - } else { - isNewReplicator = false; - } + isNewReplicator = false; + } - if (checkDuplicates) { - let deduplicated: ReplicationRangeIndexable[] = []; + if (checkDuplicates) { + let deduplicated: ReplicationRangeIndexable[] = []; - // TODO also deduplicate/de-overlap among the ranges that ought to be inserted? - for (const range of ranges) { - if (!(await hasCoveringRange(this.replicationIndex, range))) { - deduplicated.push(range); - } + // TODO also deduplicate/de-overlap among the ranges that ought to be inserted? + for (const range of ranges) { + if (!(await iHaveCoveringRange(this.replicationIndex, range))) { + deduplicated.push(range); } - ranges = deduplicated; - } - let existingMap = new Map(); - for (const result of existing) { - existingMap.set(result.value.idString, result.value); } + ranges = deduplicated; + } + let existingMap = new Map>(); + for (const result of existing) { + existingMap.set(result.value.idString, result.value); + } - let changes: ReplicationChanges = ranges - .map((x) => { - const prev = existingMap.get(x.idString); - if (prev) { - if (prev.equalRange(x)) { - return undefined; - } - return { range: x, prev, type: "updated" }; - } else { - return { range: x, type: "added" }; + let changes: ReplicationChanges = ranges + .map((x) => { + const prev = existingMap.get(x.idString); + if (prev) { + if (prev.equalRange(x)) { + return undefined; } - }) - .filter((x) => x != null) as ReplicationChanges; - diffs = changes; - } + return { range: x, prev, type: "updated" }; + } else { + return { range: x, type: "added" }; + } + }) + .filter((x) => x != null) as ReplicationChanges; + diffs = changes; + } - let now = +new Date(); - let minRoleAge = await this.getDefaultMinRoleAge(); - let isAllMature = true; + this.uniqueReplicators.add(from.hashcode()); - for (const diff of diffs) { - if (diff.type === "added" || diff.type === "updated") { - await this.replicationIndex.put(diff.range); - if (!reset) { - this.oldestOpenTime = Math.min( - Number(diff.range.timestamp), - this.oldestOpenTime, - ); + let now = +new Date(); + let minRoleAge = await this.getDefaultMinRoleAge(); + let isAllMature = true; + + for (const diff of diffs) { + if (diff.type === "added" || diff.type === "updated") { + /* if (this.closed) { + return; + } */ + await this.replicationIndex.put(diff.range); + + if (!reset) { + this.oldestOpenTime = Math.min( + Number(diff.range.timestamp), + this.oldestOpenTime, + ); + } + + const isMature = isMatured(diff.range, now, minRoleAge); + + if ( + !isMature /* && diff.range.hash !== this.node.identity.publicKey.hashcode() */ + ) { + // second condition is to avoid the case where we are adding a range that we own + isAllMature = false; + let pendingRanges = this.pendingMaturity.get(diff.range.hash); + if (!pendingRanges) { + pendingRanges = new Map(); + this.pendingMaturity.set(diff.range.hash, pendingRanges); } - const isMature = isMatured(diff.range, now, minRoleAge); + let waitForMaturityTime = Math.max( + minRoleAge - (now - Number(diff.range.timestamp)), + 0, + ); - if ( - !isMature /* && diff.range.hash !== this.node.identity.publicKey.hashcode() */ - ) { - // second condition is to avoid the case where we are adding a range that we own - isAllMature = false; - let prevPendingMaturity = this.pendingMaturity.get(diff.range.hash); - let map: Map; - let waitForMaturityTime = Math.max( - minRoleAge - (now - Number(diff.range.timestamp)), - 0, - ); + const setupTimeout = () => + setTimeout(async () => { + this.events.dispatchEvent( + new CustomEvent("replicator:mature", { + detail: { publicKey: from }, + }), + ); - if (prevPendingMaturity) { - map = prevPendingMaturity.ranges; - if (prevPendingMaturity.timestamp < diff.range.timestamp) { - // something has changed so we need to reset the timeout - clearTimeout(prevPendingMaturity.timeout); - prevPendingMaturity.timestamp = diff.range.timestamp; - prevPendingMaturity.timeout = setTimeout(() => { - this.events.dispatchEvent( - new CustomEvent( - "replicator:mature", - { - detail: { publicKey: from }, - }, - ), - ); - for (const value of map.values()) { - this.replicationChangeDebounceFn.add(value); // we need to call this here because the outcom of findLeaders will be different when some ranges become mature, i.e. some of data we own might be prunable! - } - }, waitForMaturityTime); + this.replicationChangeDebounceFn.add(diff); // we need to call this here because the outcom of findLeaders will be different when some ranges become mature, i.e. some of data we own might be prunable! + pendingRanges.delete(diff.range.idString); + if (pendingRanges.size === 0) { + this.pendingMaturity.delete(diff.range.hash); } - } else { - map = new Map(); - this.pendingMaturity.set(diff.range.hash, { - timestamp: diff.range.timestamp, - ranges: map, - timeout: setTimeout(() => { - this.events.dispatchEvent( - new CustomEvent( - "replicator:mature", - { - detail: { publicKey: from }, - }, - ), - ); - for (const value of map.values()) { - this.replicationChangeDebounceFn.add(value); // we need to call this here because the outcom of findLeaders will be different when some ranges become mature, i.e. some of data we own might be prunable! - } - }, waitForMaturityTime), - }); - } + }, waitForMaturityTime); - map.set(diff.range.idString, diff); + let prevPendingMaturity = pendingRanges.get(diff.range.idString); + if (prevPendingMaturity) { + // only reset the timer if the new range is older than the previous one, this means that waitForMaturityTime less than the previous one + clearTimeout(prevPendingMaturity.timeout); + prevPendingMaturity.timeout = setupTimeout(); + } else { + pendingRanges.set(diff.range.idString, { + range: diff, + timeout: setupTimeout(), + }); } - } else { - const prev = this.pendingMaturity.get(diff.range.hash); + } + } else { + const pendingFromPeer = this.pendingMaturity.get(diff.range.hash); + if (pendingFromPeer) { + const prev = pendingFromPeer.get(diff.range.idString); if (prev) { - prev.ranges.delete(diff.range.idString); + clearTimeout(prev.timeout); + pendingFromPeer.delete(diff.range.idString); + } + if (pendingFromPeer.size === 0) { + this.pendingMaturity.delete(diff.range.hash); } } } + } - if (reset) { - await this.updateOldestTimestampFromIndex(); - } + if (reset) { + await this.updateOldestTimestampFromIndex(); + } + + this.events.dispatchEvent( + new CustomEvent("replication:change", { + detail: { publicKey: from }, + }), + ); + if (isNewReplicator) { this.events.dispatchEvent( - new CustomEvent("replication:change", { + new CustomEvent("replicator:join", { detail: { publicKey: from }, }), ); - if (isNewReplicator) { + if (isAllMature) { this.events.dispatchEvent( - new CustomEvent("replicator:join", { + new CustomEvent("replicator:mature", { detail: { publicKey: from }, }), ); - - if (isAllMature) { - this.events.dispatchEvent( - new CustomEvent("replicator:mature", { - detail: { publicKey: from }, - }), - ); - } } + } - diffs.length > 0 && - diffs.map((x) => this.replicationChangeDebounceFn.add(x)); - - if (!from.equals(this.node.identity.publicKey)) { - this.rebalanceParticipationDebounced?.(); + if (diffs.length > 0) { + for (const diff of diffs) { + this.replicationChangeDebounceFn.add(diff); } + } - return diffs; - }; + if (!from.equals(this.node.identity.publicKey)) { + this.rebalanceParticipationDebounced?.(); + } - // we sequialize this because we are going to queries to check wether to add or not - // if two processes do the same this both process might add a range while only one in practice should - return fn(); + return diffs; } async startAnnounceReplicating( - range: ReplicationRangeIndexable[], + range: ReplicationRangeIndexable[], options: { + syncStatus?: SyncStatus; reset?: boolean; checkDuplicates?: boolean; announce?: ( @@ -1081,7 +1215,6 @@ export class SharedLog< segments: range.map((x) => x.toReplicationRange()), }); } - if (options.announce) { return options.announce(message); } else { @@ -1092,6 +1225,44 @@ export class SharedLog< } } + private removePeerFromGidPeerHistory(publicKeyHash: string, gid?: string) { + if (gid) { + const gidMap = this._gidPeersHistory.get(gid); + if (gidMap) { + gidMap.delete(publicKeyHash); + + if (gidMap.size === 0) { + this._gidPeersHistory.delete(gid); + } + } + } else { + for (const key of this._gidPeersHistory.keys()) { + this.removePeerFromGidPeerHistory(publicKeyHash, key); + } + } + } + + private addPeersToGidPeerHistory( + gid: string, + publicKeys: Iterable, + reset?: boolean, + ) { + let set = this._gidPeersHistory.get(gid); + if (!set) { + set = new Set(); + this._gidPeersHistory.set(gid, set); + } else { + if (reset) { + set.clear(); + } + } + + for (const key of publicKeys) { + set.add(key); + } + return set; + } + async append( data: T, options?: SharedAppendOptions | undefined, @@ -1100,14 +1271,15 @@ export class SharedLog< removed: ShallowOrFullEntry[]; }> { const appendOptions: AppendOptions = { ...options }; - const minReplicas = options?.replicas - ? typeof options.replicas === "number" - ? new AbsoluteReplicas(options.replicas) - : options.replicas - : this.replicas.min; - const minReplicasValue = minReplicas.getValue(this); + const minReplicas = this.getClampedReplicas( + options?.replicas + ? typeof options.replicas === "number" + ? new AbsoluteReplicas(options.replicas) + : options.replicas + : undefined, + ); const minReplicasData = encodeReplicas(minReplicas); - + const minReplicasValue = minReplicas.getValue(this); checkMinReplicasLimit(minReplicasValue); if (!appendOptions.meta) { @@ -1134,21 +1306,26 @@ export class SharedLog< } const result = await this.log.append(data, appendOptions); + let mode: DeliveryMode | undefined = undefined; if (options?.replicate) { await this.replicate(result.entry, { checkDuplicates: true }); } - let { leaders, isLeader } = await this.findLeadersPersist( - { - entry: result.entry, - minReplicas: minReplicas.getValue(this), - }, + const coordinates = await this.createCoordinates( result.entry, - { persist: {} }, + minReplicasValue, ); + let isLeader = false; + let leaders = await this.findLeaders(coordinates, result.entry, { + persist: {}, + onLeader: (key) => { + isLeader = isLeader || this.node.identity.publicKey.hashcode() === key; + }, + }); + // -------------- if (options?.target !== "none") { @@ -1157,41 +1334,33 @@ export class SharedLog< ])) { if (options?.target === "replicators" || !options?.target) { if (message.heads[0].gidRefrences.length > 0) { - const newAndOldLeaders = new Map(leaders); for (const ref of message.heads[0].gidRefrences) { const entryFromGid = this.log.entryIndex.getHeads(ref, false); for (const entry of await entryFromGid.all()) { - let coordinate = await this.getCoordinates(entry); - if (coordinate == null) { - coordinate = await this.createCoordinates( + let coordinates = await this.getCoordinates(entry); + if (coordinates == null) { + coordinates = await this.createCoordinates( entry, minReplicasValue, ); // TODO are we every to come here? } - for (const [hash, features] of await this.findLeaders( - coordinate, - )) { - newAndOldLeaders.set(hash, features); + + const result = await this._findLeaders(coordinates); + for (const [k, v] of result) { + leaders.set(k, v); } } } - leaders = newAndOldLeaders; - } - - let set = this._gidPeersHistory.get(result.entry.meta.gid); - if (!set) { - set = new Set(leaders.keys()); - this._gidPeersHistory.set(result.entry.meta.gid, set); - } else { - for (const [receiver, _features] of leaders) { - set.add(receiver); - } } + const set = this.addPeersToGidPeerHistory( + result.entry.meta.gid, + leaders.keys(), + ); mode = isLeader - ? new SilentDelivery({ redundancy: 1, to: leaders.keys() }) - : new AcknowledgeDelivery({ redundancy: 1, to: leaders.keys() }); + ? new SilentDelivery({ redundancy: 1, to: set }) + : new AcknowledgeDelivery({ redundancy: 1, to: set }); } // TODO add options for waiting ? @@ -1204,7 +1373,7 @@ export class SharedLog< if (!isLeader) { this.pruneDebouncedFn.add({ key: result.entry.hash, - value: result.entry, + value: { entry: result.entry, leaders }, }); } this.rebalanceParticipationDebounced?.(); @@ -1212,35 +1381,50 @@ export class SharedLog< return result; } - async open(options?: Args): Promise { + async open(options?: Args): Promise { this.replicas = { - min: options?.replicas?.min - ? typeof options?.replicas?.min === "number" - ? new AbsoluteReplicas(options?.replicas?.min) - : options?.replicas?.min - : new AbsoluteReplicas(DEFAULT_MIN_REPLICAS), + min: + options?.replicas?.min != null + ? typeof options?.replicas?.min === "number" + ? new AbsoluteReplicas(options?.replicas?.min) + : options?.replicas?.min + : new AbsoluteReplicas(DEFAULT_MIN_REPLICAS), max: options?.replicas?.max ? typeof options?.replicas?.max === "number" ? new AbsoluteReplicas(options?.replicas?.max) : options.replicas.max : undefined, }; - this.domain = options?.domain ?? (createReplicationDomainHash() as D); + // TODO types + this.domain = options?.domain + ? (options.domain as any as D) + : (createReplicationDomainHash("u32") as D); + this.indexableDomain = createIndexableDomainFromResolution( + this.domain.resolution, + ); this._respondToIHaveTimeout = options?.respondToIHaveTimeout ?? 2e4; this._pendingDeletes = new Map(); this._pendingIHave = new Map(); this.latestReplicationInfoMessage = new Map(); - this.syncInFlightQueue = new Map(); - this.syncInFlightQueueInverted = new Map(); - this.syncInFlight = new Map(); + this.coordinateToHash = new Cache({ max: 1e6, ttl: 1e4 }); + + this.uniqueReplicators = new Set(); + this.openTime = +new Date(); this.oldestOpenTime = this.openTime; this.distributionDebounceTime = options?.distributionDebounceTime || DEFAULT_DISTRIBUTION_DEBOUNCE_TIME; // expect > 0 + this.timeUntilRoleMaturity = options?.timeUntilRoleMaturity ?? WAIT_FOR_ROLE_MATURITY; this.waitForReplicatorTimeout = options?.waitForReplicatorTimeout || WAIT_FOR_REPLICATOR_TIMEOUT; + this.waitForPruneDelay = options?.waitForPruneDelay || WAIT_FOR_PRUNE_DELAY; + + if (this.waitForReplicatorTimeout < this.timeUntilRoleMaturity) { + this.waitForReplicatorTimeout = this.timeUntilRoleMaturity; // does not makes sense to expect a replicator to mature faster than it is reachable + } + this._closeController = new AbortController(); this._isTrustedReplicator = options?.canReplicate; this.sync = options?.sync; @@ -1266,15 +1450,14 @@ export class SharedLog< await this.remoteBlocks.start(); - /* this._totalParticipation = 0; */ const logScope = await this.node.indexer.scope(id); const replicationIndex = await logScope.scope("replication"); this._replicationRangeIndex = await replicationIndex.init({ - schema: ReplicationRangeIndexable, + schema: this.indexableDomain.constructorRange, }); this._entryCoordinatesIndex = await replicationIndex.init({ - schema: EntryReplicated, + schema: this.indexableDomain.constructorEntry, }); const logIndex = await logScope.scope("log"); @@ -1291,9 +1474,9 @@ export class SharedLog< ], })) > 0; - /* this._totalParticipation = await this.calculateTotalParticipation(); */ - this._gidPeersHistory = new Map(); + this._requestIPruneSent = new Map(); + this._requestIPruneResponseReplicatorSet = new Map(); this.replicationChangeDebounceFn = debounceAggregationChanges( (change) => @@ -1308,9 +1491,16 @@ export class SharedLog< this.prune(map); }, PRUNE_DEBOUNCE_INTERVAL, // TODO make this dynamic on the number of replicators + (into, from) => { + for (const [k, v] of from.leaders) { + if (!into.leaders.has(k)) { + into.leaders.set(k, v); + } + } + }, ); - this.responseToPruneDebouncedFn = debounceAcculmulator< + this.responseToPruneDebouncedFn = debounceAccumulator< string, { hashes: string[]; @@ -1327,6 +1517,7 @@ export class SharedLog< } hashes.push(hash); } + hashes.length > 0 && this.rpc.send(new ResponseIPrune({ hashes }), { mode: new SilentDelivery({ @@ -1384,6 +1575,21 @@ export class SharedLog< indexer: logIndex, }); + this.syncronizer = options?.syncronizer + ? new options.syncronizer({ + entryIndex: this.entryCoordinatesIndex, + log: this.log, + rangeIndex: this._replicationRangeIndex, + rpc: this.rpc, + coordinateToHash: this.coordinateToHash, + }) + : new SimpleSyncronizer({ + log: this.log, + rpc: this.rpc, + entryIndex: this.entryCoordinatesIndex, + coordinateToHash: this.coordinateToHash, + }); + // Open for communcation await this.rpc.open({ queryType: TransportMessage, @@ -1408,61 +1614,6 @@ export class SharedLog< await this.rpc.subscribe(); - const requestSync = async () => { - /** - * This method fetches entries that we potentially want. - * In a case in which we become replicator of a segment, - * multiple remote peers might want to send us entries - * This method makes sure that we only request on entry from the remotes at a time - * so we don't get flooded with the same entry - */ - const requestHashes: string[] = []; - const from: Set = new Set(); - for (const [key, value] of this.syncInFlightQueue) { - if (!(await this.log.has(key))) { - // TODO test that this if statement actually does anymeaningfull - if (value.length > 0) { - requestHashes.push(key); - const publicKeyHash = value.shift()!.hashcode(); - from.add(publicKeyHash); - const invertedSet = - this.syncInFlightQueueInverted.get(publicKeyHash); - if (invertedSet) { - if (invertedSet.delete(key)) { - if (invertedSet.size === 0) { - this.syncInFlightQueueInverted.delete(publicKeyHash); - } - } - } - } - if (value.length === 0) { - this.syncInFlightQueue.delete(key); // no-one more to ask for this entry - } - } else { - this.syncInFlightQueue.delete(key); - } - } - - const nowMin10s = +new Date() - 1e4; - for (const [key, map] of this.syncInFlight) { - // cleanup "old" missing syncs - for (const [hash, { timestamp }] of map) { - if (timestamp < nowMin10s) { - map.delete(hash); - } - } - if (map.size === 0) { - this.syncInFlight.delete(key); - } - } - this.requestSync(requestHashes, from).finally(() => { - if (this.closed) { - return; - } - this.syncMoreInterval = setTimeout(requestSync, 3e3); - }); - }; - // if we had a previous session with replication info, and new replication info dictates that we unreplicate // we should do that. Otherwise if options is a unreplication we dont need to do anything because // we are already unreplicated (as we are just opening) @@ -1483,8 +1634,7 @@ export class SharedLog< reset: true, }); } - - requestSync(); + await this.syncronizer.open(); this.interval = setInterval(() => { this.rebalanceParticipationDebounced?.(); @@ -1519,17 +1669,17 @@ export class SharedLog< const promises: Promise[] = []; const iterator = this.replicationIndex.iterate(); - let checked = new Set(); + let checkedIsAlive = new Set(); while (!iterator.done()) { for (const segment of await iterator.next(1000)) { if ( - checked.has(segment.value.hash) || + checkedIsAlive.has(segment.value.hash) || this.node.identity.publicKey.hashcode() === segment.value.hash ) { continue; } - checked.add(segment.value.hash); + checkedIsAlive.add(segment.value.hash); promises.push( this.waitFor(segment.value.hash, { @@ -1541,6 +1691,7 @@ export class SharedLog< const key = await this.node.services.pubsub.getPublicKey( segment.value.hash, ); + if (!key) { throw new Error( "Failed to resolve public key from hash: " + @@ -1631,19 +1782,22 @@ export class SharedLog< let eager = options?.eager ?? false; const range = await this.domain.fromArgs(args, this); - const set = await getCoverSet({ + const set = await getCoverSet({ peers: this.replicationIndex, start: range.offset, widthToCoverScaled: range.length ?? - (await minimumWidthToCover(this.replicas.min.getValue(this))), + (await minimumWidthToCover( + this.replicas.min.getValue(this), + this.indexableDomain.numbers, + )), roleAge, eager, - intervalWidth: MAX_U32, + numbers: this.indexableDomain.numbers, }); // add all in flight - for (const [key, _] of this.syncInFlight) { + for (const [key, _] of this.syncronizer.syncInFlight) { set.add(key); } @@ -1651,14 +1805,19 @@ export class SharedLog< } private async _close() { - clearTimeout(this.syncMoreInterval); + await this.syncronizer.close(); - for (const [_key, value] of this.pendingMaturity) { - clearTimeout(value.timeout); + for (const [_key, peerMap] of this.pendingMaturity) { + for (const [_key2, info] of peerMap) { + clearTimeout(info.timeout); + } } + this.pendingMaturity.clear(); this.distributeQueue?.clear(); + this.coordinateToHash.clear(); + this.uniqueReplicators.clear(); this._closeController.abort(); @@ -1685,13 +1844,14 @@ export class SharedLog< await this.remoteBlocks.stop(); this._pendingDeletes.clear(); this._pendingIHave.clear(); - this.syncInFlightQueue.clear(); - this.syncInFlightQueueInverted.clear(); - this.syncInFlight.clear(); this.latestReplicationInfoMessage.clear(); this._gidPeersHistory.clear(); + this._requestIPruneSent.clear(); + this._requestIPruneResponseReplicatorSet.clear(); this.pruneDebouncedFn = undefined as any; this.rebalanceParticipationDebounced = undefined; + this._replicationRangeIndex.stop(); + this._entryCoordinatesIndex.stop(); this._replicationRangeIndex = undefined as any; this._entryCoordinatesIndex = undefined as any; @@ -1713,6 +1873,8 @@ export class SharedLog< if (!superDropped) { return superDropped; } + await this._entryCoordinatesIndex.drop(); + await this._replicationRangeIndex.drop(); await this.log.drop(); await this._close(); return true; @@ -1772,9 +1934,16 @@ export class SharedLog< for (const [gid, entries] of groupedByGid) { const fn = async () => { + /// we clear sync in flight here because we want to join before that, so that entries are totally accounted for + await this.syncronizer.onReceivedEntries({ + entries, + from: context.from!, + }); + const headsWithGid = await this.log.entryIndex .getHeads(gid) .all(); + const latestEntry = getLatestEntry(entries)!; const maxReplicasFromHead = @@ -1797,28 +1966,44 @@ export class SharedLog< maxMaxReplicas, ); - const isReplicating = await this.isReplicating(); - - let isLeader: - | Map< - string, - { - intersecting: boolean; - } - > - | false; + const isReplicating = this._isReplicating; + let isLeader = false; + let fromIsLeader = false; + let leaders: Map | false; if (isReplicating) { - isLeader = await this.waitForIsLeader( + leaders = await this._waitForReplicators( cursor, - this.node.identity.publicKey.hashcode(), + latestEntry, + [ + { + key: this.node.identity.publicKey.hashcode(), + replicator: true, + }, + ], + { + // we do this here so that we quickly assume leader role (and also so that 'from' is also assumed to be leader) + // TODO potential side effects? + roleAge: 0, + onLeader: (key) => { + isLeader = + isLeader || + this.node.identity.publicKey.hashcode() === key; + fromIsLeader = + fromIsLeader || context.from!.hashcode() === key; + }, + }, ); } else { - isLeader = await this.findLeaders(cursor); - - isLeader = isLeader.has(this.node.identity.publicKey.hashcode()) - ? isLeader - : false; + leaders = await this.findLeaders(cursor, latestEntry, { + onLeader: (key) => { + fromIsLeader = + fromIsLeader || context.from!.hashcode() === key; + isLeader = + isLeader || + this.node.identity.publicKey.hashcode() === key; + }, + }); } if (this.closed) { @@ -1831,24 +2016,16 @@ export class SharedLog< if (isLeader) { for (const entry of entries) { this.pruneDebouncedFn.delete(entry.entry.hash); - } - - for (const entry of entries) { - await this.persistCoordinate({ - leaders: isLeader, - coordinates: cursor, - entry: entry.entry, - }); - } + this._requestIPruneSent.delete(entry.entry.hash); + this._requestIPruneResponseReplicatorSet.delete( + entry.entry.hash, + ); - const fromIsLeader = isLeader.get(context.from!.hashcode()); - if (fromIsLeader) { - let peerSet = this._gidPeersHistory.get(gid); - if (!peerSet) { - peerSet = new Set(); - this._gidPeersHistory.set(gid, peerSet); + if (fromIsLeader) { + this.addPeersToGidPeerHistory(gid, [ + context.from!.hashcode(), + ]); } - peerSet.add(context.from!.hashcode()); } if (maxReplicasFromNewEntries < maxReplicasFromHead) { @@ -1885,22 +2062,15 @@ export class SharedLog< await this.log.join(toMerge); toDelete?.map((x) => - this.pruneDebouncedFn.add({ key: x.hash, value: x }), + // TODO types + this.pruneDebouncedFn.add({ + key: x.hash, + value: { entry: x, leaders: leaders as Map }, + }), ); this.rebalanceParticipationDebounced?.(); } - /// we clear sync in flight here because we want to join before that, so that entries are totally accounted for - for (const entry of entries) { - const set = this.syncInFlight.get(context.from!.hashcode()); - if (set) { - set.delete(entry.entry.hash); - if (set?.size === 0) { - this.syncInFlight.delete(context.from!.hashcode()); - } - } - } - if (maybeDelete) { for (const entries of maybeDelete as EntryWithRefs[][]) { const headsWithGid = await this.log.entryIndex @@ -1918,12 +2088,16 @@ export class SharedLog< }); if (!isLeader) { - entries.map((x) => + for (const x of entries) { this.pruneDebouncedFn.add({ key: x.entry.hash, - value: x.entry, - }), - ); + // TODO types + value: { + entry: x.entry, + leaders: leaders as Map, + }, + }); + } } } } @@ -1937,24 +2111,45 @@ export class SharedLog< const hasAndIsLeader: string[] = []; // await delay(3000) for (const hash of msg.hashes) { + // if we expect the remote to be owner of this entry because we are to prune ourselves, then we need to remove the remote + // this is due to that the remote has previously indicated to be a replicator to help us prune but now has changed their mind + const outGoingPrunes = + this._requestIPruneResponseReplicatorSet.get(hash); + if (outGoingPrunes) { + outGoingPrunes.delete(context.from.hashcode()); + } + const indexedEntry = await this.log.entryIndex.getShallow(hash); - if ( - indexedEntry && - ( - await this.findLeadersPersist( + let isLeader = false; + + if (indexedEntry) { + this.removePeerFromGidPeerHistory( + context.from!.hashcode(), + indexedEntry!.value.meta.gid, + ); + + await this._waitForReplicators( + await this.createCoordinates( + indexedEntry.value, + decodeReplicas(indexedEntry.value).getValue(this), + ), + indexedEntry.value, + [ { - entry: indexedEntry.value, - minReplicas: decodeReplicas(indexedEntry.value).getValue( - this, - ), + key: this.node.identity.publicKey.hashcode(), + replicator: true, }, - indexedEntry.value, - ) - ).isLeader - ) { - this._gidPeersHistory - .get(indexedEntry.value.meta.gid) - ?.delete(context.from.hashcode()); + ], + { + onLeader: (key) => { + isLeader = + isLeader || key === this.node.identity.publicKey.hashcode(); + }, + }, + ); + } + + if (isLeader) { hasAndIsLeader.push(hash); hasAndIsLeader.length > 0 && @@ -1986,21 +2181,26 @@ export class SharedLog< clearTimeout(timeout); }, callback: async (entry: Entry) => { - if ( - ( - await this.findLeadersPersist( - { - entry, - minReplicas: decodeReplicas(entry).getValue(this), - }, - entry, - ) - ).isLeader - ) { - for (const peer of requesting) { - this._gidPeersHistory.get(entry.meta.gid)?.delete(peer); - } - + this.removePeerFromGidPeerHistory( + context.from!.hashcode(), + entry.meta.gid, + ); + let isLeader = false; + await this.findLeaders( + await this.createCoordinates( + entry, + decodeReplicas(entry).getValue(this), + ), + entry, + { + onLeader: (key) => { + isLeader = + isLeader || + key === this.node.identity.publicKey.hashcode(); + }, + }, + ); + if (isLeader) { this.responseToPruneDebouncedFn.add({ hashes: [entry.hash], peers: requesting, @@ -2018,7 +2218,9 @@ export class SharedLog< for (const hash of msg.hashes) { this._pendingDeletes.get(hash)?.resolve(context.from.hashcode()); } - } else if (msg instanceof RequestMaybeSync) { + } else if (await this.syncronizer.onMessage(msg, context)) { + return; // the syncronizer has handled the message + } /* else if (msg instanceof RequestMaybeSync) { const requestHashes: string[] = []; for (const hash of msg.hashes) { @@ -2058,7 +2260,7 @@ export class SharedLog< mode: new SilentDelivery({ to: [context.from!], redundancy: 1 }), }); } - } else if (msg instanceof BlocksMessage) { + } */ else if (msg instanceof BlocksMessage) { await this.remoteBlocks.onMessage(msg.message); } else if (msg instanceof RequestReplicationInfoMessage) { // TODO this message type is never used, should we remove it? @@ -2134,6 +2336,10 @@ export class SharedLog< let reset = msg instanceof AllReplicatingSegmentsMessage; + if (this.closed) { + return; + } + await this.addReplicationRange( replicationInfoMessage.segments.map((x) => x.toReplicationRangeIndexable(context.from!), @@ -2151,6 +2357,9 @@ export class SharedLog< if (e instanceof NotStartedError) { return; } + if (e instanceof IndexNotStartedError) { + return; + } logger.error( "Failed to find peer who updated replication settings: " + e?.message, @@ -2166,7 +2375,11 @@ export class SharedLog< throw new Error("Unexpected message"); } } catch (e: any) { - if (e instanceof AbortError) { + if ( + e instanceof AbortError || + e instanceof NotStartedError || + e instanceof IndexNotStartedError + ) { return; } @@ -2191,6 +2404,42 @@ export class SharedLog< } } + async calculateTotalParticipation(options?: { sum?: boolean }) { + if (options?.sum) { + const ranges = await this.replicationIndex.iterate().all(); + let sum = 0; + for (const range of ranges) { + sum += range.value.widthNormalized; + } + return sum; + } + return appromixateCoverage({ + peers: this._replicationRangeIndex, + numbers: this.indexableDomain.numbers, + samples: 25, + }); + } + + /* async calculateTotalParticipation() { + const sum = await this.replicationIndex.sum({ key: "width" }); + return Number(sum) / MAX_U32; + } + */ + async countReplicationSegments() { + const count = await this.replicationIndex.count({ + query: new StringMatch({ + key: "hash", + value: this.node.identity.publicKey.hashcode(), + }), + }); + return count; + } + + async getAllReplicationSegments() { + const ranges = await this.replicationIndex.iterate().all(); + return ranges.map((x) => x.value); + } + async getMyReplicationSegments() { const ranges = await this.replicationIndex .iterate({ @@ -2203,7 +2452,7 @@ export class SharedLog< return ranges.map((x) => x.value); } - async getMyTotalParticipation() { + async calculateMyTotalParticipation() { // sum all of my replicator rects return (await this.getMyReplicationSegments()).reduce( (acc, { widthNormalized }) => acc + widthNormalized, @@ -2211,14 +2460,14 @@ export class SharedLog< ); } - get replicationIndex(): Index { + get replicationIndex(): Index> { if (!this._replicationRangeIndex) { throw new ClosedError(); } return this._replicationRangeIndex; } - get entryCoordinatesIndex(): Index { + get entryCoordinatesIndex(): Index> { if (!this._entryCoordinatesIndex) { throw new ClosedError(); } @@ -2243,12 +2492,12 @@ export class SharedLog< async waitForReplicator(...keys: PublicSignKey[]) { const check = async () => { for (const k of keys) { - const rects = await this.replicationIndex - ?.iterate( - { query: new StringMatch({ key: "hash", value: k.hashcode() }) }, - { reference: true }, - ) - .all(); + const iterator = this.replicationIndex?.iterate( + { query: new StringMatch({ key: "hash", value: k.hashcode() }) }, + { reference: true }, + ); + const rects = await iterator?.next(1); + await iterator.close(); const rect = rects[0]?.value; if ( !rect || @@ -2259,6 +2508,8 @@ export class SharedLog< } return true; }; + + // TODO do event based return waitFor(() => check(), { signal: this._closeController.signal, }).catch((e) => { @@ -2275,175 +2526,173 @@ export class SharedLog< options?: { verifySignatures?: boolean; timeout?: number; - replicate?: boolean; + replicate?: + | boolean + | { + mergeSegments?: boolean; + }; }, ): Promise { - let messageToSend: AddedReplicationSegmentMessage | undefined = undefined; - + let entriesToReplicate: Entry[] = []; if (options?.replicate) { // TODO this block should perhaps be called from a callback on the this.log.join method on all the ignored element because already joined, like "onAlreadyJoined" // check which entrise we already have but not are replicating, and replicate them - let alreadyJoined: Entry[] = []; + // we can not just do the 'join' call because it will ignore the already joined entries for (const element of entries) { if (typeof element === "string") { const entry = await this.log.get(element); if (entry) { - alreadyJoined.push(entry); + entriesToReplicate.push(entry); } } else if (element instanceof Entry) { if (await this.log.has(element.hash)) { - alreadyJoined.push(element); + entriesToReplicate.push(element); } } else { const entry = await this.log.get(element.hash); if (entry) { - alreadyJoined.push(entry); + entriesToReplicate.push(entry); } } } - - // assume is heads - await this.replicate(alreadyJoined, { - checkDuplicates: true, - announce: (msg) => { - messageToSend = msg; - }, - }); } - let joinOptions = options?.replicate - ? { - ...options, - onChange: async (change: Change) => { - if (change.added) { - for (const entry of change.added) { - if (entry.head) { - await this.replicate(entry.entry, { - checkDuplicates: true, - - // we override the announce step here to make sure we announce all new replication info - // in one large message instead - announce: (msg) => { - if (msg instanceof AllReplicatingSegmentsMessage) { - throw new Error("Unexpected"); - } - - if (messageToSend) { - // merge segments to make it into one messages - for (const segment of msg.segments) { - messageToSend.segments.push(segment); - } - } else { - messageToSend = msg; - } - }, - }); - } + const onChangeForReplication = options?.replicate + ? async (change: Change) => { + if (change.added) { + for (const entry of change.added) { + if (entry.head) { + entriesToReplicate.push(entry.entry); } } - }, + } + } + : undefined; + + const persistCoordinate = async (entry: Entry) => { + const minReplicas = decodeReplicas(entry).getValue(this); + await this.findLeaders( + await this.createCoordinates(entry, minReplicas), + entry, + { persist: {} }, + ); + }; + let entriesToPersist: Entry[] = []; + let joinOptions = { + ...options, + onChange: async (change: Change) => { + await onChangeForReplication?.(change); + for (const entry of change.added) { + if (!entry.head) { + continue; + } + + if (!options?.replicate) { + // we persist coordinates for all added entries here + + await persistCoordinate(entry.entry); + } else { + // else we persist after replication range update has been done so that + // the indexed info becomes up to date + entriesToPersist.push(entry.entry); + } } - : options; + }, + }; await this.log.join(entries, joinOptions); - if (messageToSend) { - await this.rpc.send(messageToSend, { - priority: 1, - }); - } - } + if (options?.replicate) { + let messageToSend: AddedReplicationSegmentMessage | undefined = undefined; + await this.replicate(entriesToReplicate, { + checkDuplicates: true, + mergeSegments: + typeof options.replicate !== "boolean" && options.replicate + ? options.replicate.mergeSegments + : false, - private async findLeadersPersist( - cursor: - | number[] - | { - entry: ShallowOrFullEntry | EntryReplicated; - minReplicas: number; - }, - entry: ShallowOrFullEntry | EntryReplicated, - options?: { - roleAge?: number; - // persist even if not leader - persist?: { - prev?: EntryReplicated[]; - }; - }, - ): Promise<{ - leaders: Map; - isLeader: boolean; - }> { - const coordinates = Array.isArray(cursor) - ? cursor - : await this.createCoordinates(cursor.entry, cursor.minReplicas); - const minReplicas = coordinates.length; - const leaders = await this.findLeaders(coordinates, options); - const isLeader = leaders.has(this.node.identity.publicKey.hashcode()); - - if (isLeader || options?.persist) { - let assignToRangeBoundary: boolean | undefined = undefined; - if (options?.persist?.prev) { - assignToRangeBoundary = shouldAssigneToRangeBoundary( - leaders, - minReplicas, - ); - const prev = options.persist.prev; - // dont do anthing if nothing has changed - if (prev.length > 0) { - let allTheSame = true; - - for (const element of prev) { - if (element.assignedToRangeBoundary !== assignToRangeBoundary) { - allTheSame = false; - break; - } + // we override the announce step here to make sure we announce all new replication info + // in one large message instead + announce: (msg) => { + if (msg instanceof AllReplicatingSegmentsMessage) { + throw new Error("Unexpected"); } - if (allTheSame) { - return { leaders, isLeader }; + if (messageToSend) { + // merge segments to make it into one messages + for (const segment of msg.segments) { + messageToSend.segments.push(segment); + } + } else { + messageToSend = msg; } - } + }, + }); + + for (const entry of entriesToPersist) { + await persistCoordinate(entry); } - !this.closed && - (await this.persistCoordinate( - { - leaders, - coordinates, - entry, - }, - { - assignToRangeBoundary, - }, - )); + if (messageToSend) { + await this.rpc.send(messageToSend, { + priority: 1, + }); + } } - - return { leaders, isLeader }; - } - - async isLeader( - cursor: - | number[] - | { - entry: ShallowOrFullEntry | EntryReplicated; - replicas: number; - }, - options?: { - roleAge?: number; - }, - ): Promise { - const leaders = await this.findLeaders(cursor, options); - return leaders.has(this.node.identity.publicKey.hashcode()); } + /* + private async updateLeaders( + cursor: NumberFromType, + prev: EntryReplicated, + options?: { + roleAge?: number; + }, + ): Promise<{ + isLeader: boolean; + leaders: Map; + }> { + // we consume a list of coordinates in this method since if we are leader of one coordinate we want to persist all of them + const leaders = await this._findLeaders(cursor, options); + const isLeader = leaders.has(this.node.identity.publicKey.hashcode()); + const isAtRangeBoundary = shouldAssignToRangeBoundary(leaders, 1); + + // dont do anthing if nothing has changed + if (prev.assignedToRangeBoundary !== isAtRangeBoundary) { + return { isLeader, leaders }; + } + + await this.entryCoordinatesIndex.put( + new this.indexableDomain.constructorEntry({ + assignedToRangeBoundary: isAtRangeBoundary, + coordinate: cursor, + meta: prev.meta, + hash: prev.hash, + }), + ); + + return { isLeader, leaders }; + } + */ - private async waitForIsLeader( - cursor: number[], - hash: string, + private async _waitForReplicators( + cursors: NumberFromType[], + entry: Entry | EntryReplicated | ShallowEntry, + waitFor: { key: string; replicator: boolean }[], options: { - timeout: number; + timeout?: number; + roleAge?: number; + onLeader?: (key: string) => void; + // persist even if not leader + persist?: + | { + prev?: EntryReplicated; + } + | false; } = { timeout: this.waitForReplicatorTimeout }, ): Promise | false> { + const timeout = options.timeout ?? this.waitForReplicatorTimeout; + return new Promise((resolve, reject) => { const removeListeners = () => { this.events.removeEventListener("replication:change", roleListener); @@ -2459,21 +2708,37 @@ export class SharedLog< resolve(false); }; - const timer = setTimeout(() => { + const timer = setTimeout(async () => { removeListeners(); resolve(false); - }, options.timeout); + }, timeout); - const check = () => - this.findLeaders(cursor).then((leaders) => { - const isLeader = leaders.has(hash); - if (isLeader) { - removeListeners(); - clearTimeout(timer); - resolve(leaders); - } + const check = async () => { + let leaderKeys = new Set(); + const leaders = await this.findLeaders(cursors, entry, { + ...options, + onLeader: (key) => { + options?.onLeader && options.onLeader(key); + leaderKeys.add(key); + }, }); + for (const waitForKey of waitFor) { + if (waitForKey.replicator && !leaderKeys!.has(waitForKey.key)) { + return; + } + + if (!waitForKey.replicator && leaderKeys!.has(waitForKey.key)) { + return; + } + } + options?.onLeader && leaderKeys.forEach(options.onLeader); + + removeListeners(); + clearTimeout(timer); + resolve(leaders); + }; + const roleListener = () => { check(); }; @@ -2485,13 +2750,62 @@ export class SharedLog< }); } - async findLeaders( + /* + private async waitForIsLeader( + cursors: NumberFromType[], + hash: string, + options: { + timeout: number; + } = { timeout: this.waitForReplicatorTimeout }, + ): Promise | false> { + return new Promise((resolve, reject) => { + const removeListeners = () => { + this.events.removeEventListener("replication:change", roleListener); + this.events.removeEventListener("replicator:mature", roleListener); // TODO replication:change event ? + this._closeController.signal.removeEventListener( + "abort", + abortListener, + ); + }; + const abortListener = () => { + removeListeners(); + clearTimeout(timer); + resolve(false); + }; + + const timer = setTimeout(() => { + removeListeners(); + resolve(false); + }, options.timeout); + + const check = async () => { + const leaders = await this.mergeLeadersMap(await Promise.all(cursors.map(x => this.findLeaders(x)))); + const isLeader = leaders.has(hash); + if (isLeader) { + removeListeners(); + clearTimeout(timer); + resolve(leaders); + } + } + + const roleListener = () => { + check(); + }; + + this.events.addEventListener("replication:change", roleListener); // TODO replication:change event ? + this.events.addEventListener("replicator:mature", roleListener); // TODO replication:change event ? + this._closeController.signal.addEventListener("abort", abortListener); + check(); + }); + } */ + + /* async findLeaders( cursor: - | number[] + | NumberFromType[] | { - entry: ShallowOrFullEntry | EntryReplicated; - replicas: number; - }, + entry: ShallowOrFullEntry | EntryReplicated; + replicas: number; + }, options?: { roleAge?: number; }, @@ -2505,44 +2819,51 @@ export class SharedLog< const coordinates = Array.isArray(cursor) ? cursor : await this.createCoordinates(cursor.entry, cursor.replicas); - const leaders = await this.findLeadersFromU32(coordinates, options); + const leaders = await this.findLeadersFromN(coordinates, options); return leaders; - } + } */ - private async groupByLeaders( - cursors: ( - | number[] - | { - entry: ShallowOrFullEntry | EntryReplicated; - replicas: number; - } - )[], + /* private async groupByLeaders( + entries: (ShallowOrFullEntry | EntryReplicated)[], options?: { roleAge?: number; }, ) { - const leaders = await Promise.all( - cursors.map((x) => this.findLeaders(x, options)), - ); - const map = new Map(); - leaders.forEach((leader, i) => { - for (const [hash] of leader) { - const arr = map.get(hash) ?? []; - arr.push(i); - map.set(hash, arr); + try { + const leaders = await Promise.all( + entries.map(async (x) => { + return this.findLeadersFromEntry(x, decodeReplicas(x).getValue(this), options); + }), + ); + const map = new Map(); + leaders.forEach((leader, i) => { + for (const [hash] of leader) { + const arr = map.get(hash) ?? []; + arr.push(i); + map.set(hash, arr); + } + }); + return map; + } catch (error) { + if (error instanceof NotStartedError || error instanceof IndexNotStartedError) { + // ignore because we are shutting down + return new Map(); + } else { + throw error; } - }); - - return map; - } + } + } */ - private async createCoordinates( - entry: ShallowOrFullEntry | EntryReplicated, + async createCoordinates( + entry: ShallowOrFullEntry | EntryReplicated | NumberFromType, minReplicas: number, ) { - const cursor = await this.domain.fromEntry(entry); - const out = getEvenlySpacedU32(cursor, minReplicas); + const cursor = + typeof entry === "number" || typeof entry === "bigint" + ? entry + : await this.domain.fromEntry(entry); + const out = this.indexableDomain.numbers.getGrid(cursor, minReplicas); return out; } @@ -2550,42 +2871,46 @@ export class SharedLog< const result = await this.entryCoordinatesIndex .iterate({ query: { hash: entry.hash } }) .all(); - return result.map((x) => x.value.coordinate); + return result[0].value.coordinates; } - private async persistCoordinate( - properties: { - coordinates: number[]; - entry: ShallowOrFullEntry | EntryReplicated; - leaders: - | Map< - string, - { - intersecting: boolean; - } - > - | false; - }, - options?: { - assignToRangeBoundary?: boolean; - }, - ) { - let assignedToRangeBoundary = - options?.assignToRangeBoundary ?? - shouldAssigneToRangeBoundary( - properties.leaders, - properties.coordinates.length, - ); + private async persistCoordinate(properties: { + coordinates: NumberFromType[]; + entry: ShallowOrFullEntry | EntryReplicated; + leaders: + | Map< + string, + { + intersecting: boolean; + } + > + | false; + replicas: number; + prev?: EntryReplicated; + }) { + let assignedToRangeBoundary = shouldAssignToRangeBoundary( + properties.leaders, + properties.replicas, + ); + + if ( + properties.prev && + properties.prev.assignedToRangeBoundary === assignedToRangeBoundary + ) { + return; // no change + } + + await this.entryCoordinatesIndex.put( + new this.indexableDomain.constructorEntry({ + assignedToRangeBoundary, + coordinates: properties.coordinates, + meta: properties.entry.meta, + hash: properties.entry.hash, + }), + ); for (const coordinate of properties.coordinates) { - await this.entryCoordinatesIndex.put( - new EntryReplicated({ - assignedToRangeBoundary, - coordinate, - meta: properties.entry.meta, - hash: properties.entry.hash, - }), - ); + this.coordinateToHash.add(coordinate, properties.entry.hash); } if (properties.entry.meta.next.length > 0) { @@ -2599,39 +2924,134 @@ export class SharedLog< } } - private async deleteCoordinates( - properties: { gid: string } | { hash: string }, - ) { + private async deleteCoordinates(properties: { hash: string }) { await this.entryCoordinatesIndex.del({ query: properties }); } async getDefaultMinRoleAge(): Promise { - if ((await this.isReplicating()) === false) { + if (this._isReplicating === false) { return 0; } const now = +new Date(); - const replLength = await this.replicationIndex.getSize(); + const subscribers = + (await this.node.services.pubsub.getSubscribers(this.rpc.topic)) + ?.length ?? 1; const diffToOldest = - replLength > 1 ? now - this.oldestOpenTime - 1 : Number.MAX_SAFE_INTEGER; - return Math.min( + subscribers > 1 ? now - this.oldestOpenTime - 1 : Number.MAX_SAFE_INTEGER; + + const result = Math.min( this.timeUntilRoleMaturity, Math.max(diffToOldest, this.timeUntilRoleMaturity), Math.max( - Math.round((this.timeUntilRoleMaturity * Math.log(replLength + 1)) / 3), + Math.round( + (this.timeUntilRoleMaturity * Math.log(subscribers + 1)) / 3, + ), this.timeUntilRoleMaturity, ), ); // / 3 so that if 2 replicators and timeUntilRoleMaturity = 1e4 the result will be 1 + + return result; + /* return Math.min(1e3, this.timeUntilRoleMaturity); */ + } + + async findLeaders( + cursors: NumberFromType[], + entry: Entry | EntryReplicated | ShallowEntry, + options?: { + roleAge?: number; + onLeader?: (key: string) => void; + // persist even if not leader + persist?: + | { + prev?: EntryReplicated; + } + | false; + }, + ): Promise> { + // we consume a list of coordinates in this method since if we are leader of one coordinate we want to persist all of them + let isLeader = false; + + const set = await this._findLeaders(cursors, options); + for (const key of set.keys()) { + if (options?.onLeader) { + options.onLeader(key); + isLeader = isLeader || key === this.node.identity.publicKey.hashcode(); + } + } + + if (options?.persist !== false) { + if (isLeader || options?.persist) { + !this.closed && + (await this.persistCoordinate({ + leaders: set, + coordinates: cursors, + replicas: cursors.length, + entry, + prev: options?.persist?.prev, + })); + } + } + + return set; + } + + async isLeader( + properties: { + entry: ShallowOrFullEntry | EntryReplicated; + replicas: number; + }, + options?: { + roleAge?: number; + onLeader?: (key: string) => void; + // persist even if not leader + persist?: + | { + prev?: EntryReplicated; + } + | false; + }, + ): Promise { + let cursors: NumberFromType[] = await this.createCoordinates( + properties.entry, + properties.replicas, + ); + + const leaders = await this.findLeaders(cursors, properties.entry, options); + if (leaders.has(this.node.identity.publicKey.hashcode())) { + return true; + } + return false; } - private async findLeadersFromU32( - cursor: u32[], + private async _findLeaders( + cursors: NumberFromType[], options?: { roleAge?: number; }, ): Promise> { const roleAge = options?.roleAge ?? (await this.getDefaultMinRoleAge()); // TODO -500 as is added so that i f someone else is just as new as us, then we treat them as mature as us. without -500 we might be slower syncing if two nodes starts almost at the same time - return getSamples(cursor, this.replicationIndex, roleAge); + return getSamples( + cursors, + this.replicationIndex, + roleAge, + this.indexableDomain.numbers, + { + uniqueReplicators: this.uniqueReplicators, + }, + ); + } + + async findLeadersFromEntry( + entry: ShallowOrFullEntry | EntryReplicated, + replicas: number, + options?: { + roleAge?: number; + }, + ): Promise> { + const coordinates = await this.createCoordinates(entry, replicas); + const result = await this._findLeaders(coordinates, options); + return result; } async isReplicator( @@ -2642,7 +3062,10 @@ export class SharedLog< }, ) { return this.isLeader( - { entry, replicas: decodeReplicas(entry).getValue(this) }, + { + entry, + replicas: maxReplicas(this, [entry]), + }, options, ); } @@ -2657,10 +3080,23 @@ export class SharedLog< } if (!subscribed) { - for (const [_a, b] of this._gidPeersHistory) { - b.delete(publicKey.hashcode()); + this.removePeerFromGidPeerHistory(publicKey.hashcode()); + + for (const [k, v] of this._requestIPruneSent) { + v.delete(publicKey.hashcode()); + if (v.size === 0) { + this._requestIPruneSent.delete(k); + } + } + + for (const [k, v] of this._requestIPruneResponseReplicatorSet) { + v.delete(publicKey.hashcode()); + if (v.size === 0) { + this._requestIPruneSent.delete(k); + } } - this.clearSyncProcessPublicKey(publicKey); + + this.syncronizer.onPeerDisconnected(publicKey); (await this.replicationIndex.count({ query: { hash: publicKey.hashcode() }, @@ -2681,7 +3117,7 @@ export class SharedLog< segments: replicationSegments.map((x) => x.toReplicationRange()), }), { - mode: new SilentDelivery({ redundancy: 1, to: [publicKey] }), + mode: new SeekDelivery({ redundancy: 1, to: [publicKey] }), }, ) .catch((e) => logger.error(e.toString())); @@ -2690,7 +3126,7 @@ export class SharedLog< // for backwards compatibility this.rpc .send(new ResponseRoleMessage({ role: await this.getRole() }), { - mode: new SilentDelivery({ redundancy: 1, to: [publicKey] }), + mode: new SeekDelivery({ redundancy: 1, to: [publicKey] }), }) .catch((e) => logger.error(e.toString())); } @@ -2700,16 +3136,37 @@ export class SharedLog< } } + private getClampedReplicas(customValue?: MinReplicas) { + if (!customValue) { + return this.replicas.min; + } + const min = customValue.getValue(this); + const maxValue = Math.max(this.replicas.min.getValue(this), min); + + if (this.replicas.max) { + return new AbsoluteReplicas( + Math.min(maxValue, this.replicas.max.getValue(this)), + ); + } + return new AbsoluteReplicas(maxValue); + } + prune( - entries: - | (EntryReplicated | ShallowOrFullEntry)[] - | Map>, + entries: Map< + string, + { + entry: EntryReplicated | ShallowOrFullEntry; + leaders: Map | Set; + } + >, options?: { timeout?: number; unchecked?: boolean }, ): Promise[] { if (options?.unchecked) { return [...entries.values()].map((x) => { - this._gidPeersHistory.delete(x.meta.gid); - return this.log.remove(x, { + this._gidPeersHistory.delete(x.entry.meta.gid); + this._requestIPruneSent.delete(x.entry.hash); + this._requestIPruneResponseReplicatorSet.delete(x.entry.hash); + return this.log.remove(x.entry, { recursively: true, }); }); @@ -2725,18 +3182,27 @@ export class SharedLog< // - Peers join and leave, which means we might not be a replicator anymore const promises: Promise[] = []; - const filteredEntries: (EntryReplicated | ShallowOrFullEntry)[] = []; - const deleted = new Set(); - for (const entry of entries.values()) { + let peerToEntries: Map = new Map(); + let cleanupTimer: ReturnType[] = []; + + for (const { entry, leaders } of entries.values()) { + for (const leader of leaders.keys()) { + let set = peerToEntries.get(leader); + if (!set) { + set = []; + peerToEntries.set(leader, set); + } + + set.push(entry.hash); + } + const pendingPrev = this._pendingDeletes.get(entry.hash); if (pendingPrev) { promises.push(pendingPrev.promise.promise); continue; } - filteredEntries.push(entry); - const existCounter = new Set(); const minReplicas = decodeReplicas(entry); const deferredPromise: DeferredPromise = pDefer(); @@ -2748,17 +3214,52 @@ export class SharedLog< } clearTimeout(timeout); }; + const resolve = () => { clear(); - deferredPromise.resolve(); + cleanupTimer.push( + setTimeout(async () => { + if ( + await this.isLeader({ + entry, + replicas: minReplicas.getValue(this), + }) + ) { + return; + } + + this._gidPeersHistory.delete(entry.meta.gid); + this._requestIPruneSent.delete(entry.hash); + this._requestIPruneResponseReplicatorSet.delete(entry.hash); + + return this.log + .remove(entry, { + recursively: true, + }) + .then(() => { + deferredPromise.resolve(); + }) + .catch((e) => { + deferredPromise.reject(e); + }) + .finally(async () => { + this._gidPeersHistory.delete(entry.meta.gid); + this._requestIPruneSent.delete(entry.hash); + this._requestIPruneResponseReplicatorSet.delete(entry.hash); + // TODO in the case we become leader again here we need to re-add the entry + }); + }, this.waitForPruneDelay), + ); }; const reject = (e: any) => { clear(); + this._requestIPruneSent.delete(entry.hash); + this._requestIPruneResponseReplicatorSet.delete(entry.hash); deferredPromise.reject(e); }; - let cursor: number[] | undefined = undefined; + let cursor: NumberFromType[] | undefined = undefined; const timeout = setTimeout(async () => { reject( @@ -2773,41 +3274,48 @@ export class SharedLog< }, reject, resolve: async (publicKeyHash: string) => { - const minReplicasValue = minReplicas.getValue(this); - const minMinReplicasValue = this.replicas.max - ? Math.min(minReplicasValue, this.replicas.max.getValue(this)) - : minReplicasValue; - - const leaders = await this.waitForIsLeader( - cursor ?? - (cursor = await this.createCoordinates( - entry, - minMinReplicasValue, - )), - publicKeyHash, + const minReplicasObj = this.getClampedReplicas(minReplicas); + const minReplicasValue = minReplicasObj.getValue(this); + + // TODO is this check necessary + + if ( + !(await this._waitForReplicators( + cursor ?? + (cursor = await this.createCoordinates( + entry, + minReplicasValue, + )), + entry, + [ + { key: publicKeyHash, replicator: true }, + { + key: this.node.identity.publicKey.hashcode(), + replicator: false, + }, + ], + { + persist: false, + }, + )) + ) { + return; + } + + let existCounter = this._requestIPruneResponseReplicatorSet.get( + entry.hash, ); - if (leaders) { - if (leaders.has(this.node.identity.publicKey.hashcode())) { - reject(new Error("Failed to delete, is leader")); - return; - } + if (!existCounter) { + existCounter = new Set(); + this._requestIPruneResponseReplicatorSet.set( + entry.hash, + existCounter, + ); + } + existCounter.add(publicKeyHash); - existCounter.add(publicKeyHash); - if (minMinReplicasValue <= existCounter.size) { - clear(); - this._gidPeersHistory.delete(entry.meta.gid); - this.log - .remove(entry, { - recursively: true, - }) - .then(() => { - deleted.add(entry.hash); - return resolve(); - }) - .catch((e: any) => { - reject(new Error("Failed to delete entry: " + e.toString())); - }); - } + if (minReplicasValue <= existCounter.size) { + resolve(); } }, }); @@ -2815,82 +3323,118 @@ export class SharedLog< promises.push(deferredPromise.promise); } - if (filteredEntries.length === 0) { - return promises; + const emitMessages = async (entries: string[], to: string) => { + const filteredSet: string[] = []; + for (const entry of entries) { + let set = this._requestIPruneSent.get(entry); + if (!set) { + set = new Set(); + this._requestIPruneSent.set(entry, set); + } + + /* if (set.has(to)) { + continue; + } */ + set.add(to); + filteredSet.push(entry); + } + if (filteredSet.length > 0) { + return this.rpc.send( + new RequestIPrune({ + hashes: filteredSet, + }), + { + mode: new SilentDelivery({ + to: [to], // TODO group by peers? + redundancy: 1, + }), + priority: 1, + }, + ); + } + }; + + for (const [k, v] of peerToEntries) { + emitMessages(v, k); } - const emitMessages = (entries: string[], to: string) => { + /* const fn = async () => { this.rpc.send( new RequestIPrune({ - hashes: entries, + hashes: filteredEntries.map(x => x.hash), }), { mode: new SilentDelivery({ - to: [to], // TODO group by peers? + to: [...await this.getReplicators()], redundancy: 1, }), priority: 1, }, - ); + ) }; + fn() */ - const maxReplicasValue = maxReplicas(this, filteredEntries); - this.groupByLeaders( - filteredEntries.map((x) => { - return { entry: x, replicas: maxReplicasValue }; // TODO choose right maxReplicasValue, should it really be for all entries combined? - }), - ).then((map) => { - for (const [peer, idx] of map) { - emitMessages( - idx.map((i) => filteredEntries[i].hash), - peer, + /* const onPeersChange = async ( + e?: CustomEvent, + reason?: string, + ) => { + if ( + true // e.detail.publicKey.equals(this.node.identity.publicKey) === false // TODO proper condition + ) { + + const peerToEntryMap = await this.groupByLeaders( + filteredEntries + .filter((x) => !readyToDelete.has(x.hash)) + .map((x) => { + return { entry: x, replicas: maxReplicasValue }; // TODO choose right maxReplicasValue, should it really be for all entries combined? + }), ); - } - }); - - const onPeersChange = async (e: CustomEvent) => { - if (e.detail.publicKey.equals(this.node.identity.publicKey) === false) { - const peerEntries = ( - await this.groupByLeaders( - filteredEntries - .filter((x) => !deleted.has(x.hash)) - .map((x) => { - return { entry: x, replicas: maxReplicasValue }; // TODO choose right maxReplicasValue, should it really be for all entries combined? - }), - ) - ).get(e.detail.publicKey.hashcode()); - if (peerEntries && peerEntries.length > 0) { - emitMessages( - peerEntries.map((x) => filteredEntries[x].hash), - e.detail.publicKey.hashcode(), - ); + for (const receiver of peerToEntryMap.keys()) { + if (receiver === this.node.identity.publicKey.hashcode()) { + continue; + } + const peerEntries = peerToEntryMap.get(receiver); + if (peerEntries && peerEntries.length > 0) { + emitMessages( + peerEntries.map((x) => filteredEntries[x].hash), + receiver, + ); + } } } - }; + }; */ // check joining peers + /* this.events.addEventListener("replication:change", onPeersChange); this.events.addEventListener("replicator:mature", onPeersChange); - this.events.addEventListener("replicator:join", onPeersChange); - Promise.allSettled(promises).finally(() => { + this.events.addEventListener("replicator:join", onPeersChange); */ + + let cleanup = () => { + for (const timer of cleanupTimer) { + clearTimeout(timer); + } + /* this.events.removeEventListener("replication:change", onPeersChange); this.events.removeEventListener("replicator:mature", onPeersChange); - this.events.removeEventListener("replicator:join", onPeersChange); - }); + this.events.removeEventListener("replicator:join", onPeersChange); */ + this._closeController.signal.removeEventListener("abort", cleanup); + }; + Promise.allSettled(promises).finally(cleanup); + this._closeController.signal.addEventListener("abort", cleanup); return promises; } /** * For debugging */ - async getPrunable() { + async getPrunable(roleAge?: number) { const heads = await this.log.getHeads(true).all(); let prunable: Entry[] = []; for (const head of heads) { - const isLeader = await this.isLeader({ - entry: head, - replicas: maxReplicas(this, [head]), - }); - + const isLeader = await this.isLeader( + { entry: head, replicas: maxReplicas(this, [head]) }, + { roleAge }, + ); if (!isLeader) { prunable.push(head); } @@ -2898,15 +3442,14 @@ export class SharedLog< return prunable; } - async getNonPrunable() { + async getNonPrunable(roleAge?: number) { const heads = await this.log.getHeads(true).all(); let nonPrunable: Entry[] = []; for (const head of heads) { - const isLeader = await this.isLeader({ - entry: head, - replicas: maxReplicas(this, [head]), - }); - + const isLeader = await this.isLeader( + { entry: head, replicas: maxReplicas(this, [head]) }, + { roleAge }, + ); if (isLeader) { nonPrunable.push(head); } @@ -2920,7 +3463,7 @@ export class SharedLog< } this.onReplicationChange( - (await this.getMyReplicationSegments()).map((x) => { + (await this.getAllReplicationSegments()).map((x) => { return { range: x, type: "added" }; }), ); @@ -2942,104 +3485,100 @@ export class SharedLog< return; } + await this.log.trim(); + const change = mergeReplicationChanges(changeOrChanges); const changed = false; try { - await this.log.trim(); + const uncheckedDeliver: Map< + string, + Map> + > = new Map(); - const uncheckedDeliver: Map> = new Map(); - - const allEntriesToDelete: EntryReplicated[] = []; - - for await (const { gid, entries: coordinates } of toRebalance( + for await (const entryReplicated of toRebalance( change, this.entryCoordinatesIndex, )) { if (this.closed) { break; } - const oldPeersSet = this._gidPeersHistory.get(gid); - if (this.closed) { - return; - } + let oldPeersSet = this._gidPeersHistory.get(entryReplicated.gid); + let isLeader = false; - let { isLeader, leaders: currentPeers } = await this.findLeadersPersist( - coordinates.map((x) => x.coordinate), - coordinates[0], + let currentPeers = await this.findLeaders( + entryReplicated.coordinates, + entryReplicated, { + // we do this to make sure new replicators get data even though they are not mature so they can figure out if they want to replicate more or less + // TODO make this smarter because if a new replicator is not mature and want to replicate too much data the syncing overhead can be bad roleAge: 0, - persist: { - prev: coordinates, - }, }, ); - if (isLeader) { - for (const entry of coordinates) { - this.pruneDebouncedFn.delete(entry.hash); - } - } - - const currentPeersSet = new Set(currentPeers.keys()); - this._gidPeersHistory.set(gid, currentPeersSet); - for (const [currentPeer] of currentPeers) { if (currentPeer === this.node.identity.publicKey.hashcode()) { + isLeader = true; continue; } if (!oldPeersSet?.has(currentPeer)) { let set = uncheckedDeliver.get(currentPeer); if (!set) { - set = new Set(); + set = new Map(); uncheckedDeliver.set(currentPeer, set); } - for (const entry of coordinates) { - set.add(entry.hash); + if (!set.has(entryReplicated.hash)) { + set.set(entryReplicated.hash, entryReplicated); } + + /* for (const entry of coordinates) { + let arr = set.get(entry.hash); + if (!arr) { + arr = []; + set.set(entry.hash, arr); + } + arr.push(entry); + } */ } } + this.addPeersToGidPeerHistory( + entryReplicated.gid, + currentPeers.keys(), + true, + ); + if (!isLeader) { - if (currentPeers.size > 0) { - // If we are observer, never prune locally created entries, since we dont really know who can store them - // if we are replicator, we will always persist entries that we need to so filtering on createdLocally will not make a difference - let entriesToDelete = coordinates; - - if (this.sync) { - entriesToDelete = entriesToDelete.filter( - (entry) => this.sync!(entry) === false, - ); - } - allEntriesToDelete.push(...entriesToDelete); + if (!this.sync || this.sync(entryReplicated) === false) { + this.pruneDebouncedFn.add({ + key: entryReplicated.hash, + value: { entry: entryReplicated, leaders: currentPeers }, + }); } + + this.responseToPruneDebouncedFn.delete(entryReplicated.hash); // don't allow others to prune because of expecting me to replicating this entry } else { - for (const entry of coordinates) { - await this._pendingDeletes - .get(entry.hash) - ?.reject( - new Error( - "Failed to delete, is leader again. Closed: " + this.closed, - ), - ); - } + this.pruneDebouncedFn.delete(entryReplicated.hash); + await this._pendingDeletes + .get(entryReplicated.hash) + ?.reject( + new Error( + "Failed to delete, is leader again. Closed: " + this.closed, + ), + ); + this._requestIPruneSent.delete(entryReplicated.hash); } } - for (const [target, entries] of uncheckedDeliver) { - this.rpc.send(new RequestMaybeSync({ hashes: [...entries] }), { - mode: new SilentDelivery({ to: [target], redundancy: 1 }), + this.syncronizer.onMaybeMissingEntries({ + entries, + targets: [target], }); } - if (allEntriesToDelete.length > 0) { - allEntriesToDelete.map((x) => - this.pruneDebouncedFn.add({ key: x.hash, value: x }), - ); - } return changed; } catch (error: any) { logger.error(error.toString()); @@ -3047,31 +3586,8 @@ export class SharedLog< } } - private async requestSync(hashes: string[], to: Set | string[]) { - const now = +new Date(); - for (const node of to) { - let map = this.syncInFlight.get(node); - if (!map) { - map = new Map(); - this.syncInFlight.set(node, map); - } - for (const hash of hashes) { - map.set(hash, { timestamp: now }); - } - } - - await this.rpc.send( - new ResponseMaybeSync({ - hashes: hashes, - }), - { - mode: new SilentDelivery({ to, redundancy: 1 }), - }, - ); - } - async _onUnsubscription(evt: CustomEvent) { - logger.debug( + logger.trace( `Peer disconnected '${evt.detail.from.hashcode()}' from '${JSON.stringify( evt.detail.unsubscriptions.map((x) => x), )} '`, @@ -3086,7 +3602,7 @@ export class SharedLog< } async _onSubscription(evt: CustomEvent) { - logger.debug( + logger.trace( `New peer '${evt.detail.from.hashcode()}' connected to '${JSON.stringify( evt.detail.subscriptions.map((x) => x), )}'`, @@ -3100,37 +3616,6 @@ export class SharedLog< ); } - async addToHistory(usedMemory: number, factor: number) { - (this.history || (this.history = [])).push({ usedMemory, factor }); - - // Keep only the last N entries in the history array (you can adjust N based on your needs) - const maxHistoryLength = 10; - if (this.history.length > maxHistoryLength) { - this.history.shift(); - } - } - - async calculateTrend() { - // Calculate the average change in factor per unit change in memory usage - const factorChanges = this.history.map((entry, index) => { - if (index > 0) { - const memoryChange = - entry.usedMemory - this.history[index - 1].usedMemory; - if (memoryChange !== 0) { - const factorChange = entry.factor - this.history[index - 1].factor; - return factorChange / memoryChange; - } - } - return 0; - }); - - // Return the average factor change per unit memory change - return ( - factorChanges.reduce((sum, change) => sum + change, 0) / - factorChanges.length - ); - } - async rebalanceParticipation() { // update more participation rate to converge to the average expected rate or bounded by // resources such as memory and or cpu @@ -3171,9 +3656,9 @@ export class SharedLog< if (relativeDifference > 0.0001) { // TODO can not reuse old range, since it will (potentially) affect the index because of sideeffects - dynamicRange = new ReplicationRangeIndexable({ - offset: hashToU32(this.node.identity.publicKey.bytes), - length: scaleToU32(newFactor), + dynamicRange = new this.indexableDomain.constructorRange({ + offset: dynamicRange.start1, + length: this.indexableDomain.numbers.denormalize(newFactor), publicKeyHash: dynamicRange.hash, id: dynamicRange.id, mode: dynamicRange.mode, @@ -3208,6 +3693,23 @@ export class SharedLog< return resp; } + + private getDynamicRangeOffset(): NumberFromType { + const options = this._logProperties + ?.replicate as DynamicReplicationOptions; + if (options?.offset != null) { + const normalized = options.normalized ?? true; + return ( + normalized + ? this.indexableDomain.numbers.denormalize(Number(options.offset)) + : options.offset + ) as NumberFromType; + } + + return this.indexableDomain.numbers.bytesToNumber( + this.node.identity.publicKey.bytes, + ); + } async getDynamicRange() { let dynamicRangeId = getIdForDynamicRange(this.node.identity.publicKey); let range = ( @@ -3223,10 +3725,9 @@ export class SharedLog< .all() )?.[0]?.value; if (!range) { - range = new ReplicationRangeIndexable({ - normalized: true, - offset: Math.random(), - length: 0, + range = new this.indexableDomain.constructorRange({ + offset: this.getDynamicRangeOffset(), + length: this.indexableDomain.numbers.zero, publicKeyHash: this.node.identity.publicKey.hashcode(), mode: ReplicationIntent.NonStrict, timestamp: BigInt(+new Date()), @@ -3245,53 +3746,18 @@ export class SharedLog< return range; } - private clearSyncProcess(hash: string) { - const inflight = this.syncInFlightQueue.get(hash); - if (inflight) { - for (const key of inflight) { - const map = this.syncInFlightQueueInverted.get(key.hashcode()); - if (map) { - map.delete(hash); - if (map.size === 0) { - this.syncInFlightQueueInverted.delete(key.hashcode()); - } - } - } - - this.syncInFlightQueue.delete(hash); - } - } - - private clearSyncProcessPublicKey(publicKey: PublicSignKey) { - this.syncInFlight.delete(publicKey.hashcode()); - const map = this.syncInFlightQueueInverted.get(publicKey.hashcode()); - if (map) { - for (const hash of map) { - const arr = this.syncInFlightQueue.get(hash); - if (arr) { - const filtered = arr.filter((x) => !x.equals(publicKey)); - if (filtered.length > 0) { - this.syncInFlightQueue.set(hash, filtered); - } else { - this.syncInFlightQueue.delete(hash); - } - } - } - this.syncInFlightQueueInverted.delete(publicKey.hashcode()); - } - } - private async onEntryAdded(entry: Entry) { const ih = this._pendingIHave.get(entry.hash); + if (ih) { ih.clear(); ih.callback(entry); } - this.clearSyncProcess(entry.hash); + this.syncronizer.onEntryAdded(entry); } onEntryRemoved(hash: string) { - this.clearSyncProcess(hash); + this.syncronizer.onEntryRemoved(hash); } } diff --git a/packages/programs/data/shared-log/src/integers.ts b/packages/programs/data/shared-log/src/integers.ts new file mode 100644 index 000000000..1db269b99 --- /dev/null +++ b/packages/programs/data/shared-log/src/integers.ts @@ -0,0 +1,102 @@ +import { BinaryReader } from "@dao-xyz/borsh"; + +export type u32 = number; +export type u64 = bigint; +export type NumberFromType = U extends "u32" + ? number + : bigint; +export const MAX_U32 = 4294967295; +export const MAX_U64 = 18446744073709551615n; +export const HALF_MAX_U32 = 2147483647; // rounded down +export const HALF_MAX_U64 = 9223372036854775807n; // rounded down + +export const denormalizer = ( + resolution: R, +): ((number: number) => NumberFromType) => { + if (resolution === "u32") { + return ((value: number) => { + const result = Math.round(value * MAX_U32); + return result > MAX_U32 ? MAX_U32 : result; + }) as (number: number) => NumberFromType; + } + return ((value: number) => { + let result = BigInt(Math.round(value * Number(MAX_U64))); + return result > MAX_U64 ? MAX_U64 : result; + }) as (number: number) => NumberFromType; +}; + +export const bytesToNumber = ( + resolution: R, +): ((arr: Uint8Array) => NumberFromType) => { + if (resolution === "u32") { + return ((arr: Uint8Array): number => { + const seedNumber = new BinaryReader(arr).u32(); + return seedNumber; + }) as (arr: Uint8Array) => NumberFromType; + } + return ((arr: Uint8Array): bigint => { + const seedNumber = new BinaryReader(arr).u64(); + return seedNumber; + }) as (arr: Uint8Array) => NumberFromType; +}; + +export interface Numbers { + zero: NumberFromType; + maxValue: NumberFromType; + random: () => NumberFromType; + getGrid: (from: NumberFromType, count: number) => NumberFromType[]; + divRound: (a: NumberFromType, b: number | bigint) => NumberFromType; + abs: (a: NumberFromType) => NumberFromType; + min: (a: NumberFromType, b: NumberFromType) => NumberFromType; + denormalize: (value: number) => NumberFromType; + bytesToNumber: (bytes: Uint8Array) => NumberFromType; +} + +const getEvenlySpacedU32 = (from: number, count: number): number[] => { + let ret: number[] = new Array(count); + for (let i = 0; i < count; i++) { + ret[i] = Math.round(from + (i * MAX_U32) / count) % MAX_U32; + } + return ret; +}; + +const getEvenlySpacedU64 = (from: bigint, count: number): bigint[] => { + let ret: bigint[] = new Array(count); + for (let i = 0; i < count; i++) { + ret[i] = (from + (BigInt(i) * MAX_U64) / BigInt(count)) % MAX_U64; + } + return ret; +}; + +export const createNumbers = ( + resolution: N, +): Numbers => { + const denormalizerFn = denormalizer(resolution); + if (resolution === "u32") { + return { + random: () => denormalizerFn(Math.random()), + zero: 0, + maxValue: MAX_U32, + getGrid: getEvenlySpacedU32 as any, // TODO fix this, + divRound: (a, b) => Math.round(a / Number(b)) as any, + abs: (a) => Math.abs(a as number), + min: (a, b) => Math.min(a as number, b as number), + denormalize: denormalizerFn, + bytesToNumber: bytesToNumber(resolution), + } as Numbers; + } else if (resolution === "u64") { + return { + random: () => denormalizerFn(Math.random()), + zero: 0n, + maxValue: MAX_U64, + getGrid: getEvenlySpacedU64 as any, // TODO fix this + divRound: (a, b) => (a as bigint) / BigInt(b), + abs: (a) => (a < 0n ? -a : a), + min: (a, b) => (a < b ? a : b), + denormalize: denormalizerFn, + bytesToNumber: bytesToNumber(resolution), + } as Numbers; + } else { + throw new Error("Unsupported resolution"); + } +}; diff --git a/packages/programs/data/shared-log/src/pid.ts b/packages/programs/data/shared-log/src/pid.ts index 76d4fde5b..20c3ce554 100644 --- a/packages/programs/data/shared-log/src/pid.ts +++ b/packages/programs/data/shared-log/src/pid.ts @@ -37,8 +37,9 @@ export class PIDReplicationController { peerCount: number; cpuUsage: number | undefined; }) { - const { memoryUsage, totalFactor, peerCount, cpuUsage, currentFactor } = + let { memoryUsage, totalFactor, peerCount, cpuUsage, currentFactor } = properties; + this.prevTotalFactor = totalFactor; this.prevMemoryUsage = memoryUsage; @@ -134,26 +135,27 @@ export class PIDReplicationController { this.integral = 0; } - /* console.log({ - id: this.id, - currentFactor, - newFactor, - factorDiff: newFactor - currentFactor, - pTerm, - dTerm, - iTerm, - totalError, - errorFromEven, - errorTarget: errorBalance, - errorCoverage, - errorMemory, - errorCPU, - peerCount, - totalFactor, - targetScaler: balanceErrorScaler, - memoryUsage, - estimatedTotalSize, - }); */ + /* if (this.id === "3YUU2tgXPB1v7NMdPob37WDcixg4vi7qF1PkbSJFNc4=") + console.log({ + id: this.id, + currentFactor, + newFactor, + factorDiff: newFactor - currentFactor, + pTerm, + dTerm, + iTerm, + totalError, + errorFromEven, + errorTarget: errorBalance, + errorCoverage, + errorMemory, + errorCPU, + peerCount, + totalFactor, + targetScaler: balanceErrorScaler, + memoryUsage, + estimatedTotalSize, + }); */ return Math.max(Math.min(newFactor, 1), 0); } diff --git a/packages/programs/data/shared-log/src/ranges.ts b/packages/programs/data/shared-log/src/ranges.ts index abe2fc9dd..0432e1b2e 100644 --- a/packages/programs/data/shared-log/src/ranges.ts +++ b/packages/programs/data/shared-log/src/ranges.ts @@ -1,5 +1,11 @@ -import { deserialize, field, serialize, variant } from "@dao-xyz/borsh"; -import { PublicSignKey, equals, randomBytes, toBase64 } from "@peerbit/crypto"; +import { deserialize, field, serialize, variant, vec } from "@dao-xyz/borsh"; +import { + PublicSignKey, + equals, + randomBytes, + sha256Base64Sync, + toBase64, +} from "@peerbit/crypto"; import { And, BoolQuery, @@ -18,30 +24,48 @@ import { Sort, SortDirection, StringMatch, - iteratorInSeries, + /* iteratorInSeries, */ } from "@peerbit/indexer-interface"; import { id } from "@peerbit/indexer-interface"; import { Meta, ShallowMeta } from "@peerbit/log"; -import { type ReplicationChanges, type u32 } from "./replication-domain.js"; -import { MAX_U32, scaleToU32 } from "./role.js"; -import { groupByGidSync } from "./utils.js"; +import { + MAX_U32, + MAX_U64, + type NumberFromType, + type Numbers, +} from "./integers.js"; +import { type ReplicationChanges } from "./replication-domain.js"; export enum ReplicationIntent { NonStrict = 0, // indicates that the segment will be replicated and nearby data might be replicated as well Strict = 1, // only replicate data in the segment to the specified replicator, not any other data } -export const getSegmentsFromOffsetAndRange = ( - offset: number, - factor: number, -): [[number, number], [number, number]] => { +export enum SyncStatus { + Unsynced = 0, + Synced = 1, +} + +const min = (a: number | bigint, b: number | bigint) => (a < b ? a : b); + +const getSegmentsFromOffsetAndRange = ( + offset: T, + factor: T, + zero: T, + max: T, +): [[T, T], [T, T]] => { let start1 = offset; + // @ts-ignore let end1Unscaled = offset + factor; // only add factor if it is not 1 to prevent numerical issues (like (0.9 + 1) % 1 => 0.8999999) - let end1 = Math.min(end1Unscaled, MAX_U32); + let end1: T = min(end1Unscaled, max) as T; return [ [start1, end1], - end1Unscaled > MAX_U32 - ? [0, (factor !== MAX_U32 ? offset + factor : offset) % MAX_U32] + /* eslint-disable no-irregular-whitespace */ + // @ts-ignore + end1Unscaled > max + ? /* eslint-disable no-irregular-whitespace */ + // @ts-ignore + [zero, (factor !== max ? offset + factor : offset) % max] : [start1, end1], ]; }; @@ -59,8 +83,8 @@ export const shouldAssigneToRangeBoundary = ( ) => { let assignedToRangeBoundary = leaders === false || leaders.size < minReplicas; if (!assignedToRangeBoundary && leaders) { - for (const [_, { intersecting }] of leaders) { - if (!intersecting) { + for (const [_, value] of leaders) { + if (!value.intersecting) { assignedToRangeBoundary = true; break; } @@ -68,18 +92,77 @@ export const shouldAssigneToRangeBoundary = ( } return assignedToRangeBoundary; }; -export class EntryReplicated { +export interface EntryReplicated { + hash: string; // id of the entry + gid: string; + coordinates: NumberFromType[]; + wallTime: bigint; + assignedToRangeBoundary: boolean; + get meta(): ShallowMeta; +} + +export const isEntryReplicated = (x: any): x is EntryReplicated => { + return x instanceof EntryReplicatedU32 || x instanceof EntryReplicatedU64; +}; + +export class EntryReplicatedU32 implements EntryReplicated<"u32"> { @id({ type: "string" }) - id: string; // hash + coordinate + hash: string; @field({ type: "string" }) + gid: string; + + @field({ type: vec("u32") }) + coordinates: number[]; + + @field({ type: "u64" }) + wallTime: bigint; + + @field({ type: "bool" }) + assignedToRangeBoundary: boolean; + + @field({ type: Uint8Array }) + private _meta: Uint8Array; + + private _metaResolved: ShallowMeta; + + constructor(properties: { + coordinates: number[]; + hash: string; + meta: Meta; + assignedToRangeBoundary: boolean; + }) { + this.coordinates = properties.coordinates; + this.hash = properties.hash; + this.gid = properties.meta.gid; + this.wallTime = properties.meta.clock.timestamp.wallTime; + const shallow = + properties.meta instanceof Meta + ? new ShallowMeta(properties.meta) + : properties.meta; + this._meta = serialize(shallow); + this._metaResolved = deserialize(this._meta, ShallowMeta); + this._metaResolved = properties.meta; + this.assignedToRangeBoundary = properties.assignedToRangeBoundary; + } + + get meta(): ShallowMeta { + if (!this._metaResolved) { + this._metaResolved = deserialize(this._meta, ShallowMeta); + } + return this._metaResolved; + } +} + +export class EntryReplicatedU64 implements EntryReplicated<"u64"> { + @id({ type: "string" }) hash: string; @field({ type: "string" }) gid: string; - @field({ type: "u32" }) - coordinate: number; + @field({ type: vec("u64") }) + coordinates: bigint[]; @field({ type: "u64" }) wallTime: bigint; @@ -93,15 +176,14 @@ export class EntryReplicated { private _metaResolved: ShallowMeta; constructor(properties: { - coordinate: number; + coordinates: bigint[]; hash: string; meta: Meta; assignedToRangeBoundary: boolean; }) { - this.coordinate = properties.coordinate; + this.coordinates = properties.coordinates; this.hash = properties.hash; this.gid = properties.meta.gid; - this.id = this.hash + "-" + this.coordinate; this.wallTime = properties.meta.clock.timestamp.wallTime; const shallow = properties.meta instanceof Meta @@ -121,8 +203,25 @@ export class EntryReplicated { } } +export interface ReplicationRangeMessage { + id: Uint8Array; + timestamp: bigint; + get offset(): NumberFromType; + get factor(): NumberFromType; + mode: ReplicationIntent; + toReplicationRangeIndexable(key: PublicSignKey): ReplicationRangeIndexable; +} + +export const isReplicationRangeMessage = ( + x: any, +): x is ReplicationRangeMessage => { + return x instanceof ReplicationRangeMessage; +}; + +export abstract class ReplicationRangeMessage {} + @variant(0) -export class ReplicationRange { +export class ReplicationRangeMessageU32 extends ReplicationRangeMessage<"u32"> { @field({ type: Uint8Array }) id: Uint8Array; @@ -145,6 +244,7 @@ export class ReplicationRange { timestamp: bigint; mode: ReplicationIntent; }) { + super(); const { id, offset, factor, timestamp, mode } = properties; this.id = id; this._offset = offset; @@ -161,8 +261,10 @@ export class ReplicationRange { return this._offset; } - toReplicationRangeIndexable(key: PublicSignKey): ReplicationRangeIndexable { - return new ReplicationRangeIndexable({ + toReplicationRangeIndexable( + key: PublicSignKey, + ): ReplicationRangeIndexableU32 { + return new ReplicationRangeIndexableU32({ id: this.id, publicKeyHash: key.hashcode(), offset: this.offset, @@ -173,7 +275,146 @@ export class ReplicationRange { } } -export class ReplicationRangeIndexable { +@variant(1) +export class ReplicationRangeMessageU64 extends ReplicationRangeMessage<"u64"> { + @field({ type: Uint8Array }) + id: Uint8Array; + + @field({ type: "u64" }) + timestamp: bigint; + + @field({ type: "u64" }) + private _offset: bigint; + + @field({ type: "u64" }) + private _factor: bigint; + + @field({ type: "u8" }) + mode: ReplicationIntent; + + constructor(properties: { + id: Uint8Array; + offset: bigint; + factor: bigint; + timestamp: bigint; + mode: ReplicationIntent; + }) { + super(); + const { id, offset, factor, timestamp, mode } = properties; + this.id = id; + this._offset = offset; + this._factor = factor; + this.timestamp = timestamp; + this.mode = mode; + } + + get factor(): bigint { + return this._factor; + } + + get offset(): bigint { + return this._offset; + } + + toReplicationRangeIndexable( + key: PublicSignKey, + ): ReplicationRangeIndexableU64 { + return new ReplicationRangeIndexableU64({ + id: this.id, + publicKeyHash: key.hashcode(), + offset: this.offset, + length: this.factor, + timestamp: this.timestamp, + mode: this.mode, + }); + } +} + +class HashableSegmentU32 { + @field({ type: "u32" }) + start1!: number; + + @field({ type: "u32" }) + end1!: number; + + @field({ type: "u32" }) + start2!: number; + + @field({ type: "u32" }) + end2!: number; + + @field({ type: "u8" }) + mode: ReplicationIntent; + + constructor(properties: { + start1: number; + start2: number; + end1: number; + end2: number; + mode: ReplicationIntent; + }) { + this.start1 = properties.start1; + this.end1 = properties.end1; + this.start2 = properties.start2; + this.end2 = properties.end2; + this.mode = properties.mode; + } +} + +class HashableSegmentU64 { + @field({ type: "u64" }) + start1!: bigint; + + @field({ type: "u64" }) + end1!: bigint; + + @field({ type: "u64" }) + start2!: bigint; + + @field({ type: "u64" }) + end2!: bigint; + + @field({ type: "u8" }) + mode: ReplicationIntent; + + constructor(properties: { + start1: bigint; + start2: bigint; + end1: bigint; + end2: bigint; + mode: ReplicationIntent; + }) { + this.start1 = properties.start1; + this.end1 = properties.end1; + this.start2 = properties.start2; + this.end2 = properties.end2; + this.mode = properties.mode; + } +} + +export interface ReplicationRangeIndexable { + id: Uint8Array; + idString: string; + hash: string; + timestamp: bigint; + start1: NumberFromType; + end1: NumberFromType; + start2: NumberFromType; + end2: NumberFromType; + width: NumberFromType; + widthNormalized: number; + mode: ReplicationIntent; + wrapped: boolean; + toUniqueSegmentId(): string; + toReplicationRange(): ReplicationRangeMessage; + contains(point: NumberFromType): boolean; + equalRange(other: ReplicationRangeIndexable): boolean; + overlaps(other: ReplicationRangeIndexable): boolean; +} + +export class ReplicationRangeIndexableU32 + implements ReplicationRangeIndexable<"u32"> +{ @id({ type: Uint8Array }) id: Uint8Array; @@ -204,7 +445,6 @@ export class ReplicationRangeIndexable { constructor( properties: { id?: Uint8Array; - normalized?: boolean; offset: number; length: number; mode?: ReplicationIntent; @@ -215,14 +455,7 @@ export class ReplicationRangeIndexable { this.hash = (properties as { publicKeyHash: string }).publicKeyHash || (properties as { publicKey: PublicSignKey }).publicKey.hashcode(); - if (!properties.normalized) { - this.transform({ length: properties.length, offset: properties.offset }); - } else { - this.transform({ - length: scaleToU32(properties.length), - offset: scaleToU32(properties.offset), - }); - } + this.transform({ length: properties.length, offset: properties.offset }); this.mode = properties.mode ?? ReplicationIntent.NonStrict; this.timestamp = properties.timestamp || BigInt(0); @@ -232,6 +465,8 @@ export class ReplicationRangeIndexable { const ranges = getSegmentsFromOffsetAndRange( properties.offset, properties.length, + 0, + MAX_U32, ); this.start1 = Math.round(ranges[0][0]); this.end1 = Math.round(ranges[0][1]); @@ -244,11 +479,11 @@ export class ReplicationRangeIndexable { (this.end2 < this.end1 ? this.end2 - this.start2 : 0); if ( - this.start1 > 0xffffffff || - this.end1 > 0xffffffff || - this.start2 > 0xffffffff || - this.end2 > 0xffffffff || - this.width > 0xffffffff || + this.start1 > MAX_U32 || + this.end1 > MAX_U32 || + this.start2 > MAX_U32 || + this.end2 > MAX_U32 || + this.width > MAX_U32 || this.width < 0 ) { throw new Error("Segment coordinate out of bounds"); @@ -266,7 +501,7 @@ export class ReplicationRangeIndexable { ); } - overlaps(other: ReplicationRangeIndexable, checkOther = true): boolean { + overlaps(other: ReplicationRangeIndexableU32, checkOther = true): boolean { if ( this.contains(other.start1) || this.contains(other.start2) || @@ -282,7 +517,7 @@ export class ReplicationRangeIndexable { return false; } toReplicationRange() { - return new ReplicationRange({ + return new ReplicationRangeMessageU32({ id: this.id, offset: this.start1, factor: this.width, @@ -291,15 +526,6 @@ export class ReplicationRangeIndexable { }); } - distanceTo(point: number) { - let wrappedPoint = MAX_U32 - point; - return Math.min( - Math.abs(this.start1 - point), - Math.abs(this.end2 - point), - Math.abs(this.start1 - wrappedPoint), - Math.abs(this.end2 - wrappedPoint), - ); - } get wrapped() { return this.end2 < this.end1; } @@ -308,7 +534,7 @@ export class ReplicationRangeIndexable { return this.width / MAX_U32; } - equals(other: ReplicationRangeIndexable) { + equals(other: ReplicationRangeIndexableU32) { if ( equals(this.id, other.id) && this.hash === other.hash && @@ -326,7 +552,7 @@ export class ReplicationRangeIndexable { return false; } - equalRange(other: ReplicationRangeIndexable) { + equalRange(other: ReplicationRangeIndexableU32) { return ( this.start1 === other.start1 && this.end1 === other.end1 && @@ -348,137 +574,494 @@ export class ReplicationRangeIndexable { return `(hash ${this.hash} range: ${this.toString()})`; } - /* removeRange(other: ReplicationRangeIndexable): ReplicationRangeIndexable | ReplicationRangeIndexable[] { - if (!this.overlaps(other)) { - return this + toUniqueSegmentId() { + // return a unique id as a function of the segments location and the replication intent + const hashable = new HashableSegmentU32(this); + return sha256Base64Sync(serialize(hashable)); + } +} + +export class ReplicationRangeIndexableU64 + implements ReplicationRangeIndexable<"u64"> +{ + @id({ type: Uint8Array }) + id: Uint8Array; + + @field({ type: "string" }) + hash: string; + + @field({ type: "u64" }) + timestamp: bigint; + + @field({ type: "u64" }) + start1!: bigint; + + @field({ type: "u64" }) + end1!: bigint; + + @field({ type: "u64" }) + start2!: bigint; + + @field({ type: "u64" }) + end2!: bigint; + + @field({ type: "u64" }) + width!: bigint; + + @field({ type: "u8" }) + mode: ReplicationIntent; + + constructor( + properties: { + id?: Uint8Array; + offset: bigint | number; + length: bigint | number; + mode?: ReplicationIntent; + timestamp?: bigint; + } & ({ publicKeyHash: string } | { publicKey: PublicSignKey }), + ) { + this.id = properties.id ?? randomBytes(32); + this.hash = + (properties as { publicKeyHash: string }).publicKeyHash || + (properties as { publicKey: PublicSignKey }).publicKey.hashcode(); + this.transform({ length: properties.length, offset: properties.offset }); + + this.mode = properties.mode ?? ReplicationIntent.NonStrict; + this.timestamp = properties.timestamp || BigInt(0); + } + + private transform(properties: { + offset: bigint | number; + length: bigint | number; + }) { + const ranges = getSegmentsFromOffsetAndRange( + BigInt(properties.offset), + BigInt(properties.length), + 0n, + MAX_U64, + ); + this.start1 = ranges[0][0]; + this.end1 = ranges[0][1]; + this.start2 = ranges[1][0]; + this.end2 = ranges[1][1]; + + this.width = + this.end1 - + this.start1 + + (this.end2 < this.end1 ? this.end2 - this.start2 : 0n); + + if ( + this.start1 > MAX_U64 || + this.end1 > MAX_U64 || + this.start2 > MAX_U64 || + this.end2 > MAX_U64 || + this.width > MAX_U64 || + this.width < 0n + ) { + throw new Error("Segment coordinate out of bounds"); } + } + + get idString() { + return toBase64(this.id); + } - if (this.equalRange(other)) { - return [] + contains(point: bigint) { + return ( + (point >= this.start1 && point < this.end1) || + (point >= this.start2 && point < this.end2) + ); + } + + overlaps(other: ReplicationRangeIndexableU64, checkOther = true): boolean { + if ( + this.contains(other.start1) || + this.contains(other.start2) || + this.contains(other.end1 - 1n) || + this.contains(other.end2 - 1n) + ) { + return true; } - let diff: ReplicationRangeIndexable[] = []; - let start1 = this.start1; - if (other.start1 > start1) { - diff.push(new ReplicationRangeIndexable({ - id: this.id, - offset: this.start1, - length: other.start1 - this.start1, - mode: this.mode, - publicKeyHash: this.hash, - timestamp: this.timestamp, - normalized: false - })); - - start1 = other.end2 + if (checkOther) { + return other.overlaps(this, false); } + return false; + } + toReplicationRange() { + return new ReplicationRangeMessageU64({ + id: this.id, + offset: this.start1, + factor: this.width, + timestamp: this.timestamp, + mode: this.mode, + }); + } + + get wrapped() { + return this.end2 < this.end1; + } + + get widthNormalized() { + return Number(this.width) / Number(MAX_U64); + } + + equals(other: ReplicationRangeIndexableU64) { + if ( + equals(this.id, other.id) && + this.hash === other.hash && + this.timestamp === other.timestamp && + this.mode === other.mode && + this.start1 === other.start1 && + this.end1 === other.end1 && + this.start2 === other.start2 && + this.end2 === other.end2 && + this.width === other.width + ) { + return true; + } + + return false; + } + + equalRange(other: ReplicationRangeIndexableU64) { + return ( + this.start1 === other.start1 && + this.end1 === other.end1 && + this.start2 === other.start2 && + this.end2 === other.end2 + ); + } - if (other.end1 < this.end1) { - diff.push(new ReplicationRangeIndexable({ - id: this.id, - offset: other.end1, - length: this.end1 - other.end1, - mode: this.mode, - publicKeyHash: this.hash, - timestamp: this.timestamp, - normalized: false - })); + toString() { + let roundToTwoDecimals = (num: number) => Math.round(num * 100) / 100; + + if (Math.abs(Number(this.start1 - this.start2)) < 0.0001) { + return `([${roundToTwoDecimals(Number(this.start1) / Number(MAX_U64))}, ${roundToTwoDecimals(Number(this.start1) / Number(MAX_U64))}])`; } + return `([${roundToTwoDecimals(Number(this.start1) / Number(MAX_U64))}, ${roundToTwoDecimals(Number(this.start1) / Number(MAX_U64))}] [${roundToTwoDecimals(Number(this.start2) / Number(MAX_U64))}, ${roundToTwoDecimals(Number(this.end2) / Number(MAX_U64))}])`; + } + + toStringDetailed() { + return `(hash ${this.hash} range: ${this.toString()})`; + } + + toUniqueSegmentId() { + // return a unique id as a function of the segments location and the replication intent + const hashable = new HashableSegmentU64(this); + return sha256Base64Sync(serialize(hashable)); + } +} + +export const mergeRanges = ( + segments: ReplicationRangeIndexable[], + numbers: { zero: NumberFromType; maxValue: NumberFromType }, +) => { + if (segments.length === 0) { + throw new Error("No segments to merge"); + } + if (segments.length === 1) { + return segments[0]; + } + + // only allow merging from same publicKeyHash + const sameHash = segments.every((x) => x.hash === segments[0].hash); + if (!sameHash) { + throw new Error("Segments have different publicKeyHash"); + } + + // only allow merging segments with length 1 (trivial) + const sameLength = segments.every((x) => x.width === 1 || x.width === 1n); + if (!sameLength) { + throw new Error( + "Segments have different length, only merging of segments length 1 is supported", + ); + } + + const sorted = segments.sort((a, b) => Number(a.start1 - b.start1)); + + let calculateLargeGap = (): [NumberFromType, number] => { + let last = sorted[sorted.length - 1]; + let largestArc = numbers.zero; + let largestArcIndex = -1; + for (let i = 0; i < sorted.length; i++) { + const current = sorted[i]; + if (current.start1 !== last.start1) { + let arc = numbers.zero; + if (current.start1 < last.end2) { + arc += ((numbers.maxValue as any) - last.end2) as any; + + arc += (current.start1 - numbers.zero) as any; + } else { + arc += (current.start1 - last.end2) as any; + } - if (other.start2 > this.start2) { - diff.push(new ReplicationRangeIndexable({ - id: this.id, - offset: this.start2, - length: other.start2 - this.start2, - mode: this.mode, - publicKeyHash: this.hash, - timestamp: this.timestamp, - normalized: false - })); + if (arc > largestArc) { + largestArc = arc; + largestArcIndex = i; + } + } + last = current; } - if (other.end2 < this.end2) { - diff.push(new ReplicationRangeIndexable({ - id: this.id, - offset: other.end2, - length: this.end2 - other.end2, - mode: this.mode, - publicKeyHash: this.hash, - timestamp: this.timestamp, - normalized: false - })); - } + return [largestArc, largestArcIndex]; + }; + const [largestArc, largestArcIndex] = calculateLargeGap(); + + let totalLengthFinal: number = numbers.maxValue - largestArc; + + if (largestArcIndex === -1) { + return segments[0]; // all ranges are the same + } + // use segments[0] constructor to create a new object + + const proto = segments[0].constructor; + return new (proto as any)({ + length: totalLengthFinal, + offset: segments[largestArcIndex].start1, + publicKeyHash: segments[0].hash, + }); +}; + +const createContainingPointQuery = ( + points: NumberFromType[] | NumberFromType, + options?: { + time?: { + roleAgeLimit: number; + matured: boolean; + now: number; + }; + }, +) => { + const or: Query[] = []; + for (const point of Array.isArray(points) ? points : [points]) { + or.push( + new And([ + new IntegerCompare({ + key: "start1", + compare: Compare.LessOrEqual, + value: point, + }), + new IntegerCompare({ + key: "end1", + compare: Compare.Greater, + value: point, + }), + ]), + ); + or.push( + new And([ + new IntegerCompare({ + key: "start2", + compare: Compare.LessOrEqual, + value: point, + }), + new IntegerCompare({ + key: "end2", + compare: Compare.Greater, + value: point, + }), + ]), + ); + } + if (options?.time) { + let queries = [ + new Or(or), + new IntegerCompare({ + key: "timestamp", + compare: options.time.matured ? Compare.LessOrEqual : Compare.Greater, + value: BigInt(options.time.now - options.time.roleAgeLimit), + }), + ]; + return queries; + } else { + return new Or(or); + } +}; + +const createContainingPartialPointQuery = ( + point: NumberFromType, + first: boolean, + options?: { + time?: { + roleAgeLimit: number; + matured: boolean; + now: number; + }; + }, +) => { + let query: Query[]; + if (first) { + query = [ + new IntegerCompare({ + key: "start1", + compare: Compare.LessOrEqual, + value: point, + }), + new IntegerCompare({ + key: "end1", + compare: Compare.Greater, + value: point, + }), + ]; + } else { + query = [ + new IntegerCompare({ + key: "start2", + compare: Compare.LessOrEqual, + value: point, + }), + new IntegerCompare({ + key: "end2", + compare: Compare.Greater, + value: point, + }), + ]; + } + + if (options?.time) { + query.push( + new IntegerCompare({ + key: "timestamp", + compare: options.time.matured ? Compare.LessOrEqual : Compare.Greater, + value: BigInt(options.time.now - options.time.roleAgeLimit), + }), + ); + } + + return query; +}; - return diff; - } */ -} +const iterateRangesContainingPoint = < + S extends Shape | undefined, + R extends "u32" | "u64", +>( + rects: Index>, + points: NumberFromType[] | NumberFromType, -const containingPoint = ( - rects: Index, - point: number, - roleAgeLimit: number, - matured: boolean, - now: number, options?: { shape?: S; sort?: Sort[]; + time?: { + roleAgeLimit: number; + matured: boolean; + now: number; + }; }, -): IndexIterator => { +): IndexIterator, S> => { // point is between 0 and 1, and the range can start at any offset between 0 and 1 and have length between 0 and 1 - let queries = [ - new Or([ - new And([ - new IntegerCompare({ - key: "start1", - compare: Compare.LessOrEqual, - value: point, - }), - new IntegerCompare({ - key: "end1", - compare: Compare.Greater, - value: point, - }), - ]), - new And([ - new IntegerCompare({ - key: "start2", - compare: Compare.LessOrEqual, - value: point, - }), - new IntegerCompare({ - key: "end2", - compare: Compare.Greater, - value: point, - }), - ]), - ]), - new IntegerCompare({ - key: "timestamp", - compare: matured ? Compare.LessOrEqual : Compare.Greater, - value: BigInt(now - roleAgeLimit), - }), - ]; return rects.iterate( { - query: queries, + query: createContainingPointQuery(points, { + time: options?.time, + }), // new Or(points.map(point => new And(createContainingPointQuery(point, roleAgeLimit, matured, now))) sort: options?.sort, }, options, ); }; -const getClosest = ( +const allRangesContainingPoint = async < + S extends Shape | undefined, + R extends "u32" | "u64", +>( + rects: Index>, + points: NumberFromType[] | NumberFromType, + options?: { + shape?: S; + sort?: Sort[]; + time?: { + roleAgeLimit: number; + matured: boolean; + now: number; + }; + }, +) => { + // point is between 0 and 1, and the range can start at any offset between 0 and 1 and have length between 0 and 1 + + let allResults: IndexedResult< + ReturnTypeFromShape, S> + >[] = []; + for (const point of Array.isArray(points) ? points : [points]) { + const firstIterator = rects.iterate( + { + query: createContainingPartialPointQuery(point, false, options), + sort: options?.sort, + }, + options, + ); + + const secondIterator = rects.iterate( + { + query: createContainingPartialPointQuery(point, true, options), + sort: options?.sort, + }, + options, + ); + + [...(await firstIterator.all()), ...(await secondIterator.all())].forEach( + (x) => allResults.push(x), + ); + } + return allResults; + /* return [...await iterateRangesContainingPoint(rects, points, options).all()]; */ +}; + +const countRangesContainingPoint = async ( + rects: Index>, + point: NumberFromType, + options?: { + time?: { + roleAgeLimit: number; + matured: boolean; + now: number; + }; + }, +) => { + return rects.count({ + query: createContainingPointQuery(point, options), + }); +}; + +export const appromixateCoverage = async (properties: { + peers: Index>; + samples: number; + numbers: Numbers; + roleAge?: number; + normalized?: boolean; // if true, we dont care about the actual number of ranges, only if there is a range, hence the output will be between 0 and 1 +}) => { + const grid = properties.numbers.getGrid( + properties.numbers.zero, + properties.samples, + ); + /* const now = +new Date(); */ + let hits = 0; + for (const point of grid) { + const count = await countRangesContainingPoint( + properties.peers, + point, + /* properties?.roleAge ?? 0, + true, + now, */ + ); + hits += properties.normalized ? (count > 0 ? 1 : 0) : count; + } + return hits / properties.samples; +}; + +const getClosest = ( direction: "above" | "below", - rects: Index, - point: number, + rects: Index>, + point: NumberFromType, roleAgeLimit: number, matured: boolean, now: number, includeStrict: boolean, + numbers: Numbers, options?: { shape?: S }, -): IndexIterator => { - const createQueries = (p: number, equality: boolean) => { +): IndexIterator, S> => { + const createQueries = (p: NumberFromType, equality: boolean) => { let queries: Query[]; if (direction === "below") { queries = [ @@ -507,6 +1090,7 @@ const getClosest = ( }), ]; } + queries.push( new IntegerCompare({ key: "width", compare: Compare.Greater, value: 0 }), ); @@ -542,7 +1126,10 @@ const getClosest = ( const iteratorWrapped = rects.iterate( { - query: createQueries(direction === "below" ? MAX_U32 : 0, true), + query: createQueries( + direction === "below" ? numbers.maxValue : numbers.zero, + true, + ), sort: [ direction === "below" ? new Sort({ key: ["end2"], direction: "desc" }) @@ -554,68 +1141,83 @@ const getClosest = ( options, ); - return joinIterator([iterator, iteratorWrapped], point, direction); + return joinIterator( + [iterator, iteratorWrapped], + point, + direction, + numbers, + ); }; -export const hasCoveringRange = async ( - rects: Index, - range: ReplicationRangeIndexable, +export const getCoveringRangeQuery = (range: { + start1: number | bigint; + end1: number | bigint; + start2: number | bigint; + end2: number | bigint; +}) => { + return [ + new Or([ + new And([ + new IntegerCompare({ + key: "start1", + compare: Compare.LessOrEqual, + value: range.start1, + }), + new IntegerCompare({ + key: "end1", + compare: Compare.GreaterOrEqual, + value: range.end1, + }), + ]), + new And([ + new IntegerCompare({ + key: "start2", + compare: Compare.LessOrEqual, + value: range.start1, + }), + new IntegerCompare({ + key: "end2", + compare: Compare.GreaterOrEqual, + value: range.end1, + }), + ]), + ]), + new Or([ + new And([ + new IntegerCompare({ + key: "start1", + compare: Compare.LessOrEqual, + value: range.start2, + }), + new IntegerCompare({ + key: "end1", + compare: Compare.GreaterOrEqual, + value: range.end2, + }), + ]), + new And([ + new IntegerCompare({ + key: "start2", + compare: Compare.LessOrEqual, + value: range.start2, + }), + new IntegerCompare({ + key: "end2", + compare: Compare.GreaterOrEqual, + value: range.end2, + }), + ]), + ]), + ]; +}; +export const iHaveCoveringRange = async ( + rects: Index>, + range: ReplicationRangeIndexable, ) => { return ( (await rects.count({ query: [ - new Or([ - new And([ - new IntegerCompare({ - key: "start1", - compare: Compare.LessOrEqual, - value: range.start1, - }), - new IntegerCompare({ - key: "end1", - compare: Compare.GreaterOrEqual, - value: range.end1, - }), - ]), - new And([ - new IntegerCompare({ - key: "start2", - compare: Compare.LessOrEqual, - value: range.start1, - }), - new IntegerCompare({ - key: "end2", - compare: Compare.GreaterOrEqual, - value: range.end1, - }), - ]), - ]), - new Or([ - new And([ - new IntegerCompare({ - key: "start1", - compare: Compare.LessOrEqual, - value: range.start2, - }), - new IntegerCompare({ - key: "end1", - compare: Compare.GreaterOrEqual, - value: range.end2, - }), - ]), - new And([ - new IntegerCompare({ - key: "start2", - compare: Compare.LessOrEqual, - value: range.start2, - }), - new IntegerCompare({ - key: "end2", - compare: Compare.GreaterOrEqual, - value: range.end2, - }), - ]), - ]), + ...getCoveringRangeQuery(range), new StringMatch({ key: "hash", value: range.hash, @@ -632,54 +1234,55 @@ export const hasCoveringRange = async ( ); }; -export const getDistance = ( - from: number, - to: number, +// TODO +export function getDistance( + from: any, + to: any, direction: "above" | "below" | "closest", - end = MAX_U32, -) => { - // if direction is 'above' only measure distance from 'from to 'to' from above. - // i.e if from < to, then from needs to wrap around 0 to 1 and then to to - // if direction is 'below' and from > to, then from needs to wrap around 1 to 0 and then to to - // if direction is 'closest' then the shortest distance is the distance - - // also from is 0.1 and to is 0.9, then distance should be 0.2 not 0.8 - // same as for if from is 0.9 and to is 0.1, then distance should be 0.2 not 0.8 + end: any, +): any { + const abs = (value: number | bigint): number | bigint => + value < 0 ? -value : value; + const diff = (a: T, b: T): T => abs(a - b) as T; if (direction === "closest") { if (from === to) { - return 0; + return typeof from === "number" ? 0 : 0n; // returns 0 of the correct type } - - return Math.min(Math.abs(from - to), Math.abs(end - Math.abs(from - to))); + return diff(from, to) < diff(end, diff(from, to)) + ? diff(from, to) + : diff(end, diff(from, to)); } if (direction === "above") { if (from <= to) { - return Math.abs(end - to) + from; + return end - to + from; } return from - to; } if (direction === "below") { if (from >= to) { - return Math.abs(end - from) + to; + return end - from + to; } return to - from; } throw new Error("Invalid direction"); -}; +} -const joinIterator = ( - iterators: IndexIterator[], - point: number, +const joinIterator = ( + iterators: IndexIterator, S>[], + point: NumberFromType, direction: "above" | "below" | "closest", -): IndexIterator => { + numbers: Numbers, +): IndexIterator, S> => { let queues: { elements: { - result: IndexedResult>; - dist: number; + result: IndexedResult< + ReturnTypeFromShape, S> + >; + dist: NumberFromType; }[]; }[] = []; @@ -687,10 +1290,10 @@ const joinIterator = ( next: async ( count: number, ): Promise< - IndexedResults> + IndexedResults, S>> > => { let results: IndexedResults< - ReturnTypeFromShape + ReturnTypeFromShape, S> > = []; for (let i = 0; i < iterators.length; i++) { let queue = queues[i]; @@ -705,16 +1308,36 @@ const joinIterator = ( for (const el of res) { const closest = el.value; - let dist: number; + let dist: NumberFromType; if (direction === "closest") { - dist = Math.min( - getDistance(closest.start1, point, direction), - getDistance(closest.end2, point, direction), + dist = numbers.min( + getDistance( + closest.start1, + point as any, + direction, + numbers.maxValue as any, + ) as NumberFromType, + getDistance( + closest.end2, + point as any, + direction, + numbers.maxValue as any, + ) as NumberFromType, ); } else if (direction === "above") { - dist = getDistance(closest.start1, point, direction); + dist = getDistance( + closest.start1, + point as any, + direction, + numbers.maxValue as any, + ) as NumberFromType; } else if (direction === "below") { - dist = getDistance(closest.end2, point, direction); + dist = getDistance( + closest.end2, + point as any, + direction, + numbers.maxValue as any, + ) as NumberFromType; } else { throw new Error("Invalid direction"); } @@ -728,7 +1351,7 @@ const joinIterator = ( for (let i = 0; i < count; i++) { let closestQueue = -1; - let closestDist = Number.MAX_SAFE_INTEGER; + let closestDist: bigint | number = Number.MAX_VALUE; for (let j = 0; j < queues.length; j++) { let queue = queues[j]; if (queue && queue.elements.length > 0) { @@ -763,7 +1386,7 @@ const joinIterator = ( }, all: async () => { let results: IndexedResult< - ReturnTypeFromShape + ReturnTypeFromShape, S> >[] = []; for (const iterator of iterators) { let res = await iterator.all(); @@ -775,17 +1398,19 @@ const joinIterator = ( }; const getClosestAround = < - S extends (Shape & { timestamp: true }) | undefined = undefined, + S extends (Shape & { timestamp: true }) | undefined, + R extends "u32" | "u64", >( - peers: Index, - point: number, + peers: Index>, + point: NumberFromType, roleAge: number, now: number, includeStrictBelow: boolean, includeStrictAbove: boolean, + numbers: Numbers, options?: { shape?: S }, ) => { - const closestBelow = getClosest( + const closestBelow = getClosest( "below", peers, point, @@ -793,9 +1418,10 @@ const getClosestAround = < true, now, includeStrictBelow, + numbers, options, ); - const closestAbove = getClosest( + const closestAbove = getClosest( "above", peers, point, @@ -803,43 +1429,59 @@ const getClosestAround = < true, now, includeStrictAbove, + numbers, options, ); - const containing = containingPoint( + /* const containing = iterateRangesContainingPoint( peers, point, - roleAge, - true, - now, - options, + { + time: { + roleAgeLimit: roleAge, + matured: true, + now, + } + } ); return iteratorInSeries( containing, - joinIterator([closestBelow, closestAbove], point, "closest"), + joinIterator([closestBelow, closestAbove], point, "closest", numbers), + ); */ + return joinIterator( + [closestBelow, closestAbove], + point, + "closest", + numbers, ); }; -const collectNodesAroundPoint = async ( +export const isMatured = ( + segment: { timestamp: bigint }, + now: number, + minAge: number, +) => { + return now - Number(segment.timestamp) >= minAge; +}; +/* + +const collectNodesAroundPoint = async ( roleAge: number, - peers: Index, + peers: Index>, collector: ( rect: { hash: string }, matured: boolean, - interescting: boolean, + intersecting: boolean, ) => void, - point: u32, + point: NumberFromType, now: number, + numbers: Numbers, done: () => boolean = () => true, ) => { - /* let shape = { timestamp: true, hash: true } as const */ - const containing = containingPoint( - peers, - point, - 0, - true, - now /* , { shape } */, - ); + const containing = iterateRangesContainingPoint< + { timestamp: true, hash: true }, + R + >(peers, point, 0, true, now, { shape: { timestamp: true, hash: true } as const }); const allContaining = await containing.all(); for (const rect of allContaining) { collector(rect.value, isMatured(rect.value, now, roleAge), true); @@ -849,28 +1491,31 @@ const collectNodesAroundPoint = async ( return; } - const closestBelow = getClosest( + const closestBelow = getClosest( "below", peers, point, 0, true, now, - false /* , { shape } */, + false, + numbers ); - const closestAbove = getClosest( + const closestAbove = getClosest( "above", peers, point, 0, true, now, - false /* , { shape } */, + false, + numbers ); - const aroundIterator = joinIterator( + const aroundIterator = joinIterator( [closestBelow, closestAbove], point, "closest", + numbers, ); while (aroundIterator.done() !== true && done() !== true) { const res = await aroundIterator.next(1); @@ -882,30 +1527,70 @@ const collectNodesAroundPoint = async ( } } }; + */ -export const getEvenlySpacedU32 = (from: number, count: number) => { - let ret: number[] = new Array(count); - for (let i = 0; i < count; i++) { - ret[i] = Math.round(from + (i * MAX_U32) / count) % MAX_U32; - } - return ret; -}; - -export const isMatured = ( - segment: { timestamp: bigint }, +const collectClosestAround = async ( + roleAge: number, + peers: Index>, + collector: (rect: { hash: string }, matured: boolean) => void, + point: NumberFromType, now: number, - minAge: number, + numbers: Numbers, + done: () => boolean = () => true, ) => { - return now - Number(segment.timestamp) >= minAge; + const closestBelow = getClosest( + "below", + peers, + point, + 0, + true, + now, + false, + numbers, + ); + const closestAbove = getClosest( + "above", + peers, + point, + 0, + true, + now, + false, + numbers, + ); + const aroundIterator = joinIterator( + [closestBelow, closestAbove], + point, + "closest", + numbers, + ); + + let visited = new Set(); + while (aroundIterator.done() !== true && done() !== true) { + const res = await aroundIterator.next(1); + for (const rect of res) { + visited.add(rect.value.idString); + collector(rect.value, isMatured(rect.value, now, roleAge)); + if (done()) { + return; + } + } + } }; + // get peer sample that are responsible for the cursor point // will return a list of peers that want to replicate the data, // but also if necessary a list of peers that are responsible for the data // but have not explicitly replicating a range that cover the cursor point -export const getSamples = async ( - cursor: u32[], - peers: Index, +export const getSamples = async ( + cursor: NumberFromType[], + peers: Index>, roleAge: number, + numbers: Numbers, + options?: { + onlyIntersecting?: boolean; + uniqueReplicators?: Set; + }, ): Promise> => { const leaders: Map = new Map(); if (!peers) { @@ -913,50 +1598,72 @@ export const getSamples = async ( } const now = +new Date(); + let matured = 0; - const maturedLeaders = new Set(); for (let i = 0; i < cursor.length; i++) { - // evenly distributed + let point = cursor[i]; - // aquire at least one unique node for each point - await collectNodesAroundPoint( - roleAge, + const allContaining = await allRangesContainingPoint( peers, - (rect, m, intersecting) => { - if (m) { - maturedLeaders.add(rect.hash); + point, + ); + + for (const rect of allContaining) { + let prev = leaders.get(rect.value.hash); + if (!prev) { + if (isMatured(rect.value, now, roleAge)) { + matured++; } + leaders.set(rect.value.hash, { intersecting: true }); + } else { + prev.intersecting = true; + } + } - const prev = leaders.get(rect.hash); + if (options?.uniqueReplicators?.size === leaders.size) { + break; // nothing ore to find + } + + if (options?.onlyIntersecting || matured > i) { + continue; + } - if (!prev || (intersecting && !prev.intersecting)) { - leaders.set(rect.hash, { intersecting }); + let foundOneUniqueMatured = false; + await collectClosestAround( + roleAge, + peers, + (rect, m) => { + const prev = leaders.get(rect.hash); + if (m) { + if (!prev) { + matured++; + leaders.set(rect.hash, { intersecting: false }); + } + if (matured > i) { + foundOneUniqueMatured = true; + } } }, - cursor[i], + point, now, - () => { - if (maturedLeaders.size > i) { - return true; - } - return false; - }, + numbers, + () => foundOneUniqueMatured, ); } - return leaders; }; -const fetchOne = async ( - iterator: IndexIterator, +const fetchOne = async ( + iterator: IndexIterator, S>, ) => { const value = await iterator.next(1); await iterator.close(); return value[0]?.value; }; -export const minimumWidthToCover = async ( +export const minimumWidthToCover = async ( minReplicas: number /* , replicatorCount: number */, + numbers: Numbers, ) => { /* minReplicas = Math.min(minReplicas, replicatorCount); */ // TODO do we need this? @@ -965,34 +1672,29 @@ export const minimumWidthToCover = async ( // to make sure we reach sufficient amount of nodes such that at least one one has // the entry we are looking for - let widthToCoverScaled = Math.round(MAX_U32 / minReplicas); + let widthToCoverScaled = numbers.divRound(numbers.maxValue, minReplicas); return widthToCoverScaled; }; -export const getCoverSet = async (properties: { - peers: Index; - start: number | PublicSignKey | undefined; - widthToCoverScaled: number; +export const getCoverSet = async (properties: { + peers: Index>; + start: NumberFromType | PublicSignKey | undefined; + widthToCoverScaled: NumberFromType; roleAge: number; - intervalWidth?: number; + numbers: Numbers; eager?: | { unmaturedFetchCoverSize?: number; } | boolean; }): Promise> => { - let intervalWidth: number = properties.intervalWidth ?? MAX_U32; const { peers, start, widthToCoverScaled, roleAge } = properties; const now = Date.now(); - const { startNode, startLocation, endLocation } = await getStartAndEnd( - peers, - start, - widthToCoverScaled, - roleAge, - now, - intervalWidth, - ); + const { startNode, startLocation, endLocation } = await getStartAndEnd< + undefined, + R + >(peers, start, widthToCoverScaled, roleAge, now, properties.numbers); let ret = new Set(); @@ -1034,29 +1736,46 @@ export const getCoverSet = async (properties: { ret.add(current.hash); const resolveNextContaining = async ( - nextLocation: number, + nextLocation: NumberFromType, roleAge: number, ) => { let next = await fetchOne( - containingPoint(peers, nextLocation, roleAge, true, now, { + iterateRangesContainingPoint(peers, nextLocation, { sort: [new Sort({ key: "end2", direction: SortDirection.DESC })], + time: { + matured: true, + roleAgeLimit: roleAge, + now, + }, }), ); // get entersecting sort by largest end2 return next; }; - const resolveNextAbove = async (nextLocation: number, roleAge: number) => { + const resolveNextAbove = async ( + nextLocation: NumberFromType, + roleAge: number, + ) => { // if not get closest from above - let next = await fetchOne( - getClosest("above", peers, nextLocation, roleAge, true, now, true), + let next = await fetchOne( + getClosest( + "above", + peers, + nextLocation, + roleAge, + true, + now, + true, + properties.numbers, + ), ); return next; }; const resolveNext = async ( - nextLocation: number, + nextLocation: NumberFromType, roleAge: number, - ): Promise<[ReplicationRangeIndexable, boolean]> => { + ): Promise<[ReplicationRangeIndexable, boolean]> => { const containing = await resolveNextContaining(nextLocation, roleAge); if (containing) { return [containing, true]; @@ -1067,13 +1786,16 @@ export const getCoverSet = async (properties: { // fill the middle let wrappedOnce = current.end2 < current.end1; - let coveredLength = 0; - const addLength = (from: number) => { + let coveredLength = properties.numbers.zero; + const addLength = (from: NumberFromType) => { if (current.end2 < from || current.wrapped) { wrappedOnce = true; - coveredLength += MAX_U32 - from; + // @ts-ignore + coveredLength += properties.numbers.maxValue - from; + // @ts-ignore coveredLength += current.end2; } else { + // @ts-ignore coveredLength += current.end1 - from; } }; @@ -1085,7 +1807,7 @@ export const getCoverSet = async (properties: { while ( maturedCoveredLength < widthToCoverScaled && // eslint-disable-line no-unmodified-loop-condition - coveredLength <= MAX_U32 // eslint-disable-line no-unmodified-loop-condition + coveredLength <= properties.numbers.maxValue // eslint-disable-line no-unmodified-loop-condition ) { let nextCandidate = await resolveNext(nextLocation, roleAge); /* let fromAbove = false; */ @@ -1119,13 +1841,33 @@ export const getCoverSet = async (properties: { if ( !isLast || nextCandidate[1] || - Math.min( - getDistance(last.start1, endLocation, "closest"), - getDistance(last.end2, endLocation, "closest"), + properties.numbers.min( + getDistance( + last.start1, + endLocation, + "closest", + properties.numbers.maxValue, + ), + getDistance( + last.end2, + endLocation, + "closest", + properties.numbers.maxValue, + ), ) > - Math.min( - getDistance(current.start1, endLocation, "closest"), - getDistance(current.end2, endLocation, "closest"), + properties.numbers.min( + getDistance( + current.start1, + endLocation, + "closest", + properties.numbers.maxValue, + ), + getDistance( + current.end2, + endLocation, + "closest", + properties.numbers.maxValue, + ), ) ) { ret.add(current.hash); @@ -1141,9 +1883,9 @@ export const getCoverSet = async (properties: { nextLocation = endIsWrapped ? wrappedOnce - ? Math.min(current.end2, endLocation) + ? properties.numbers.min(current.end2, endLocation) : current.end2 - : Math.min(current.end2, endLocation); + : properties.numbers.min(current.end2, endLocation); } start instanceof PublicSignKey && ret.add(start.hashcode()); @@ -1153,63 +1895,69 @@ export const getCoverSet = async (properties: { // reduce the change set to only regions that are changed for each peer // i.e. subtract removed regions from added regions, and vice versa const result = new Map(); - + for (const addedChange of changes.added ?? []) { let prev = result.get(addedChange.hash) ?? []; for (const [_hash, ranges] of result.entries()) { for (const r of ranges) { - + } } } } */ -const matchRangeQuery = (range: ReplicationRangeIndexable) => { - let ors = []; - ors.push( - new And([ - new IntegerCompare({ - key: "coordinate", - compare: "gte", - value: range.start1, - }), - new IntegerCompare({ - key: "coordinate", - compare: "lt", - value: range.end1, - }), - ]), - ); +export const matchEntriesInRangeQuery = (range: { + start1: number | bigint; + end1: number | bigint; + start2: number | bigint; + end2: number | bigint; +}) => { + const c1 = new And([ + new IntegerCompare({ + key: "coordinates", + compare: "gte", + value: range.start1, + }), + new IntegerCompare({ + key: "coordinates", + compare: "lt", + value: range.end1, + }), + ]); + + if (range.start2 === range.end2) { + return c1; + } - ors.push( + let ors = [ + c1, new And([ new IntegerCompare({ - key: "coordinate", + key: "coordinates", compare: "gte", value: range.start2, }), new IntegerCompare({ - key: "coordinate", + key: "coordinates", compare: "lt", value: range.end2, }), ]), - ); - + ]; return new Or(ors); }; -export const toRebalance = ( +export const toRebalance = ( changes: ReplicationChanges, - index: Index, -): AsyncIterable<{ gid: string; entries: EntryReplicated[] }> => { + index: Index>, +): AsyncIterable> => { const assignedRangesQuery = (changes: ReplicationChanges) => { let ors: Query[] = []; for (const change of changes) { - const matchRange = matchRangeQuery(change.range); + const matchRange = matchEntriesInRangeQuery(change.range); if (change.type === "updated") { // assuming a range is to be removed, is this entry still enoughly replicated - const prevMatchRange = matchRangeQuery(change.prev); + const prevMatchRange = matchEntriesInRangeQuery(change.prev); ors.push(prevMatchRange); ors.push(matchRange); } else { @@ -1235,13 +1983,15 @@ export const toRebalance = ( }); while (iterator.done() !== true) { - const entries = await iterator.next(1000); // TODO choose right batch sizes here for optimal memory usage / speed - - // TODO do we need this - const grouped = await groupByGidSync(entries.map((x) => x.value)); + const entries = await iterator.all(); // TODO choose right batch sizes here for optimal memory usage / speed + /* const grouped = await groupByGidSync(entries.map((x) => x.value)); for (const [gid, entries] of grouped.entries()) { yield { gid, entries }; + } */ + + for (const entry of entries) { + yield entry.value; } } }, @@ -1249,12 +1999,14 @@ export const toRebalance = ( }; export const fetchOneFromPublicKey = async < - S extends (Shape & { timestamp: true }) | undefined = undefined, + S extends (Shape & { timestamp: true }) | undefined, + R extends "u32" | "u64", >( publicKey: PublicSignKey, - index: Index, + index: Index>, roleAge: number, now: number, + numbers: Numbers, options?: { shape: S; }, @@ -1271,13 +2023,14 @@ export const fetchOneFromPublicKey = async < if (node) { if (!isMatured(node, now, roleAge)) { const matured = await fetchOne( - getClosestAround( + getClosestAround( index, node.start1, roleAge, now, false, false, + numbers, options, ), ); @@ -1291,33 +2044,36 @@ export const fetchOneFromPublicKey = async < export const getStartAndEnd = async < S extends (Shape & { timestamp: true }) | undefined, + R extends "u32" | "u64", >( - peers: Index, - start: number | PublicSignKey | undefined | undefined, - widthToCoverScaled: number, + peers: Index>, + start: NumberFromType | PublicSignKey | undefined | undefined, + widthToCoverScaled: NumberFromType, roleAge: number, now: number, - intervalWidth: number, + numbers: Numbers, options?: { shape: S }, ): Promise<{ - startNode: ReturnTypeFromShape | undefined; - startLocation: number; - endLocation: number; + startNode: ReturnTypeFromShape, S> | undefined; + startLocation: NumberFromType; + endLocation: NumberFromType; }> => { // find a good starting point - let startNode: ReturnTypeFromShape | undefined = - undefined; - let startLocation: number | undefined = undefined; + let startNode: + | ReturnTypeFromShape, S> + | undefined = undefined; + let startLocation: NumberFromType | undefined = undefined; - const nodeFromPoint = async (point = scaleToU32(Math.random())) => { + const nodeFromPoint = async (point = numbers.random()) => { startLocation = point; - startNode = await fetchOneClosest( + startNode = await fetchOneClosest( peers, startLocation, roleAge, now, false, true, + numbers, options, ); }; @@ -1329,6 +2085,7 @@ export const getStartAndEnd = async < peers, roleAge, now, + numbers, options, ); if (!startNode) { @@ -1337,62 +2094,76 @@ export const getStartAndEnd = async < } else { startLocation = startNode.start1; } - } else if (typeof start === "number") { + } else if (typeof start === "number" || typeof start === "bigint") { await nodeFromPoint(start); } else { await nodeFromPoint(); } if (!startNode || startLocation == null) { - return { startNode: undefined, startLocation: 0, endLocation: 0 }; + return { + startNode: undefined, + startLocation: numbers.zero, + endLocation: numbers.zero, + }; } - let endLocation = startLocation + widthToCoverScaled; - if (intervalWidth != null) { - endLocation = endLocation % intervalWidth; - } + // @ts-ignore + let endLocation: T = (startLocation + widthToCoverScaled) % numbers.maxValue; - // if start location is after endLocation and startNode is strict then return undefined because this is not a node we want to choose - let coveredDistanceToStart = 0; - if (startNode.start1 < startLocation) { - coveredDistanceToStart += intervalWidth - startLocation + startNode.start1; - } else { - coveredDistanceToStart += startNode.start1 - startLocation; - } + // if the start node range is not containing the start point, then figure out if the startNode is ideal + if (!startNode.contains(startLocation)) { + let coveredDistanceToStart = numbers.zero; + if (startNode.start1 < startLocation) { + coveredDistanceToStart += + numbers.maxValue - startLocation + startNode.start1; + } else { + coveredDistanceToStart += ((startNode.start1 as any) - + startLocation) as any; + } - if ( - startNode.mode === ReplicationIntent.Strict && - coveredDistanceToStart > widthToCoverScaled - ) { - return { startNode: undefined, startLocation: 0, endLocation: 0 }; + // in this case, the gap to the start point is larger than the width we want to cover. Assume there are no good points + if ( + startNode.mode === ReplicationIntent.Strict && + coveredDistanceToStart > widthToCoverScaled + ) { + return { + startNode: undefined, + startLocation: numbers.zero, + endLocation: numbers.zero, + }; + } } return { startNode, - startLocation: Math.round(startLocation), - endLocation: Math.round(endLocation), + startLocation, + endLocation, }; }; export const fetchOneClosest = < - S extends (Shape & { timestamp: true }) | undefined = undefined, + S extends (Shape & { timestamp: true }) | undefined, + R extends "u32" | "u64", >( - peers: Index, - point: number, + peers: Index>, + point: NumberFromType, roleAge: number, now: number, includeStrictBelow: boolean, includeStrictAbove: boolean, + numbers: Numbers, options?: { shape?: S }, ) => { - return fetchOne( - getClosestAround( + return fetchOne( + getClosestAround( peers, point, roleAge, now, includeStrictBelow, includeStrictAbove, + numbers, options, ), ); diff --git a/packages/programs/data/shared-log/src/replication-domain-hash.ts b/packages/programs/data/shared-log/src/replication-domain-hash.ts index a11975ce2..498550e06 100644 --- a/packages/programs/data/shared-log/src/replication-domain-hash.ts +++ b/packages/programs/data/shared-log/src/replication-domain-hash.ts @@ -1,41 +1,66 @@ -import { BinaryReader, BinaryWriter } from "@dao-xyz/borsh"; +import { BinaryWriter } from "@dao-xyz/borsh"; import { sha256 } from "@peerbit/crypto"; import type { ShallowOrFullEntry } from "@peerbit/log"; -import type { EntryReplicated } from "./ranges.js"; +import { bytesToNumber } from "./integers.js"; +import { type EntryReplicated } from "./ranges.js"; import { type Log, type ReplicationDomain, type ReplicationDomainMapper, } from "./replication-domain.js"; -export const hashToU32 = (hash: Uint8Array) => { +/* const hashToU32 = (hash: Uint8Array) => { const seedNumber = new BinaryReader( hash.subarray(hash.length - 4, hash.length), ).u32(); return seedNumber; }; -const hashTransformer: ReplicationDomainMapper = async ( - entry: ShallowOrFullEntry | EntryReplicated, -) => { - // For a fixed set or members, the choosen leaders will always be the same (address invariant) - // This allows for that same content is always chosen to be distributed to same peers, to remove unecessary copies - // Convert this thing we wan't to distribute to 8 bytes so we get can convert it into a u64 - // modulus into an index - const utf8writer = new BinaryWriter(); - utf8writer.string(entry.meta.gid); - const seed = await sha256(utf8writer.finalize()); +const hashToU64 = (hash: Uint8Array): bigint => { + const seedNumber = new BinaryReader( + hash.subarray(hash.length - 4, hash.length), // + ).u64(); + return seedNumber; +}; + */ - // convert hash of slot to a number - return hashToU32(seed); +const hashTransformer = ( + resolution: R, +): ReplicationDomainMapper => { + const numberConverter = bytesToNumber(resolution); + if (resolution === "u32") { + return (async (entry: ShallowOrFullEntry | EntryReplicated) => { + const utf8writer = new BinaryWriter(); + utf8writer.string(entry.meta.gid); + const seed = await sha256(utf8writer.finalize()); + return numberConverter(seed); + }) as ReplicationDomainMapper; + } else if (resolution === "u64") { + return (async (entry: ShallowOrFullEntry | EntryReplicated) => { + const utf8writer = new BinaryWriter(); + utf8writer.string(entry.meta.gid); + const seed = await sha256(utf8writer.finalize()); + return numberConverter(seed); + }) as ReplicationDomainMapper; + } else { + throw new Error("Unsupported resolution"); + } }; -export type ReplicationDomainHash = ReplicationDomain; -export const createReplicationDomainHash: () => ReplicationDomainHash = () => { +export type ReplicationDomainHash = ReplicationDomain< + undefined, + any, + R +>; + +export const createReplicationDomainHash = ( + resolution: R, +): ReplicationDomainHash => { return { + resolution, type: "hash", - fromEntry: hashTransformer, + fromEntry: hashTransformer(resolution), fromArgs: async (args: undefined, log: Log) => { return { offset: log.node.identity.publicKey, diff --git a/packages/programs/data/shared-log/src/replication-domain-time.ts b/packages/programs/data/shared-log/src/replication-domain-time.ts index 96254839d..37d28406f 100644 --- a/packages/programs/data/shared-log/src/replication-domain-time.ts +++ b/packages/programs/data/shared-log/src/replication-domain-time.ts @@ -1,9 +1,8 @@ import type { ShallowOrFullEntry } from "@peerbit/log"; -import type { EntryReplicated } from "./ranges.js"; +import { type EntryReplicated } from "./ranges.js"; import { type ReplicationDomain, type ReplicationDomainMapper, - type u32, } from "./replication-domain.js"; type TimeUnit = "seconds" | "milliseconds" | "microseconds" | "nanoseconds"; @@ -24,11 +23,11 @@ const scalarMilliToUnit = { export const fromEntry = ( origin: Date, unit: TimeUnit = "milliseconds", -): ReplicationDomainMapper => { +): ReplicationDomainMapper => { const scalar = scalarNanoToUnit[unit]; const originTime = +origin / scalarMilliToUnit[unit]; - const fn = (entry: ShallowOrFullEntry | EntryReplicated) => { + const fn = (entry: ShallowOrFullEntry | EntryReplicated<"u32">) => { const cursor = entry.meta.clock.timestamp.wallTime / scalar; return Math.round(Number(cursor) - originTime); }; @@ -37,9 +36,9 @@ export const fromEntry = ( type TimeRange = { from: number; to: number }; -export type ReplicationDomainTime = ReplicationDomain & { - fromTime: (time: number | Date) => u32; - fromDuration: (duration: number) => u32; +export type ReplicationDomainTime = ReplicationDomain & { + fromTime: (time: number | Date) => number; + fromDuration: (duration: number) => number; }; export const createReplicationDomainTime = ( @@ -48,16 +47,17 @@ export const createReplicationDomainTime = ( ): ReplicationDomainTime => { const originScaled = +origin * scalarMilliToUnit[unit]; const fromMilliToUnit = scalarMilliToUnit[unit]; - const fromTime = (time: number | Date): u32 => { + const fromTime = (time: number | Date): number => { return ( (typeof time === "number" ? time : +time * fromMilliToUnit) - originScaled ); }; - const fromDuration = (duration: number): u32 => { + const fromDuration = (duration: number): number => { return duration; }; return { + resolution: "u32", type: "time", fromTime, fromDuration, diff --git a/packages/programs/data/shared-log/src/replication-domain.ts b/packages/programs/data/shared-log/src/replication-domain.ts index c22c6abfb..47ecef395 100644 --- a/packages/programs/data/shared-log/src/replication-domain.ts +++ b/packages/programs/data/shared-log/src/replication-domain.ts @@ -1,15 +1,15 @@ import type { PublicSignKey } from "@peerbit/crypto"; import { type Index } from "@peerbit/indexer-interface"; import type { Entry, ShallowEntry } from "@peerbit/log"; -import { debounceAcculmulator } from "./debounce.js"; -import type { EntryReplicated, ReplicationRangeIndexable } from "./ranges.js"; +import { debounceAccumulator } from "./debounce.js"; +import type { ReplicationRangeIndexable } from "./index.js"; +import type { NumberFromType } from "./integers.js"; +import type { EntryReplicated } from "./ranges.js"; import type { ReplicationLimits } from "./replication.js"; -import { MAX_U32 } from "./role.js"; -export type u32 = number; -export type ReplicationDomainMapper = ( - entry: Entry | ShallowEntry | EntryReplicated, -) => Promise | u32; +export type ReplicationDomainMapper = ( + entry: Entry | ShallowEntry | EntryReplicated, +) => Promise> | NumberFromType; export type Log = { replicas: ReplicationLimits; @@ -18,8 +18,7 @@ export type Log = { publicKey: PublicSignKey; }; }; - syncInFlight: Map>; - replicationIndex: Index; + replicationIndex: Index>; getDefaultMinRoleAge: () => Promise; }; export type ReplicationDomainCoverSet = ( @@ -28,24 +27,24 @@ export type ReplicationDomainCoverSet = ( args: Args, ) => Promise | string[]; // minimum set of peers that covers all the data -type CoverRange = { - offset: number | PublicSignKey; - length?: number; +type CoverRange = { + offset: T | PublicSignKey; + length?: T; }; export type ReplicationChanges = ReplicationChange[]; export type ReplicationChange = | { type: "added"; - range: ReplicationRangeIndexable; + range: ReplicationRangeIndexable; } | { type: "removed"; - range: ReplicationRangeIndexable; + range: ReplicationRangeIndexable; } | { type: "updated"; - range: ReplicationRangeIndexable; - prev: ReplicationRangeIndexable; + range: ReplicationRangeIndexable; + prev: ReplicationRangeIndexable; }; export const mergeReplicationChanges = ( @@ -62,7 +61,7 @@ export const debounceAggregationChanges = ( fn: (changeOrChanges: ReplicationChange[]) => void, delay: number, ) => { - return debounceAcculmulator( + return debounceAccumulator( (result) => { return fn([...result.values()]); }, @@ -90,24 +89,15 @@ export const debounceAggregationChanges = ( ); }; -export type ReplicationDomain = { +export type ReplicationDomain = { + resolution: R; type: string; - fromEntry: ReplicationDomainMapper; + fromEntry: ReplicationDomainMapper; fromArgs: ( args: Args | undefined, log: Log, - ) => Promise | CoverRange; - - // to rebalance will return an async iterator of objects that will be added to the log - /* toRebalance( - change: ReplicationChange, - index: Index - ): AsyncIterable<{ gid: string, entries: { coordinate: number, hash: string }[] }> | Promise>; */ -}; - -export const uniformToU32 = (cursor: number) => { - return cursor * MAX_U32; + ) => Promise>> | CoverRange>; }; export type ExtractDomainArgs = - T extends ReplicationDomain ? Args : never; + T extends ReplicationDomain ? Args : never; diff --git a/packages/programs/data/shared-log/src/replication.ts b/packages/programs/data/shared-log/src/replication.ts index e0c539912..a35b398c9 100644 --- a/packages/programs/data/shared-log/src/replication.ts +++ b/packages/programs/data/shared-log/src/replication.ts @@ -11,8 +11,9 @@ import { type Index } from "@peerbit/indexer-interface"; import { TransportMessage } from "./message.js"; import { ReplicationIntent, - ReplicationRange, type ReplicationRangeIndexable, + ReplicationRangeMessage, + ReplicationRangeMessageU32, } from "./ranges.js"; import { Observer, Replicator, Role } from "./role.js"; @@ -20,7 +21,7 @@ export type ReplicationLimits = { min: MinReplicas; max?: MinReplicas }; interface SharedLog { replicas: Partial; - replicationIndex: Index | undefined; + replicationIndex: Index> | undefined; } export class MinReplicas { @@ -67,7 +68,7 @@ export class ResponseRoleMessage extends TransportMessage { segments: this.role instanceof Replicator ? this.role.segments.map((x) => { - return new ReplicationRange({ + return new ReplicationRangeMessageU32({ id: randomBytes(32), offset: x.offset, factor: x.factor, @@ -82,10 +83,10 @@ export class ResponseRoleMessage extends TransportMessage { @variant([1, 2]) export class AllReplicatingSegmentsMessage extends TransportMessage { - @field({ type: vec(ReplicationRange) }) - segments: ReplicationRange[]; + @field({ type: vec(ReplicationRangeMessage) }) + segments: ReplicationRangeMessage[]; - constructor(properties: { segments: ReplicationRange[] }) { + constructor(properties: { segments: ReplicationRangeMessage[] }) { super(); this.segments = properties.segments; } @@ -93,10 +94,10 @@ export class AllReplicatingSegmentsMessage extends TransportMessage { @variant([1, 3]) export class AddedReplicationSegmentMessage extends TransportMessage { - @field({ type: vec(ReplicationRange) }) - segments: ReplicationRange[]; + @field({ type: vec(ReplicationRangeMessage) }) + segments: ReplicationRangeMessage[]; - constructor(properties: { segments: ReplicationRange[] }) { + constructor(properties: { segments: ReplicationRangeMessage[] }) { super(); this.segments = properties.segments; } diff --git a/packages/programs/data/shared-log/src/role.ts b/packages/programs/data/shared-log/src/role.ts index 085af6be2..4c06cdee2 100644 --- a/packages/programs/data/shared-log/src/role.ts +++ b/packages/programs/data/shared-log/src/role.ts @@ -4,10 +4,7 @@ * Roles have been replaces with just replication segments. */ import { field, variant, vec } from "@dao-xyz/borsh"; - -export const MAX_U32 = 4294967295; -export const HALF_MAX_U32 = 2147483647; // rounded down -export const scaleToU32 = (value: number) => Math.round(MAX_U32 * value); +import { MAX_U32, denormalizer } from "./integers.js"; export const overlaps = (x1: number, x2: number, y1: number, y2: number) => { if (x1 <= y2 && y1 <= x2) { @@ -40,6 +37,7 @@ export class Observer extends Role { export const REPLICATOR_TYPE_VARIANT = new Uint8Array([2]); +const denormalizeru32 = denormalizer("u32"); export class RoleReplicationSegment { @field({ type: "u64" }) timestamp: bigint; @@ -61,12 +59,12 @@ export class RoleReplicationSegment { } this.timestamp = timestamp ?? BigInt(+new Date()); - this.factorNominator = Math.round(MAX_U32 * factor); + this.factorNominator = denormalizeru32(factor); if (offset > 1 || offset < 0) { throw new Error("Expecting offset to be between 0 and 1, got: " + offset); } - this.offsetNominator = Math.round(MAX_U32 * offset); + this.offsetNominator = denormalizeru32(factor); } get factor(): number { diff --git a/packages/programs/data/shared-log/src/sync/index.ts b/packages/programs/data/shared-log/src/sync/index.ts new file mode 100644 index 000000000..37dfbe099 --- /dev/null +++ b/packages/programs/data/shared-log/src/sync/index.ts @@ -0,0 +1,49 @@ +import type { Cache } from "@peerbit/cache"; +import type { PublicSignKey } from "@peerbit/crypto"; +import type { Index } from "@peerbit/indexer-interface"; +import type { Entry, Log } from "@peerbit/log"; +import type { RPC, RequestContext } from "@peerbit/rpc"; +import type { EntryWithRefs } from "../exchange-heads.js"; +import type { TransportMessage } from "../message.js"; +import type { EntryReplicated, ReplicationRangeIndexable } from "../ranges.js"; + +export type SynchronizerComponents = { + rpc: RPC; + rangeIndex: Index, any>; + entryIndex: Index, any>; + log: Log; + coordinateToHash: Cache; +}; +export type SynchronizerConstructor = new ( + properties: SynchronizerComponents, +) => Syncronizer; + +export type SyncableKey = string | bigint; // hash or coordinate + +export interface Syncronizer { + onMaybeMissingEntries(properties: { + entries: Map>; + targets: string[]; + }): Promise | void; + + onMessage( + message: TransportMessage, + context: RequestContext, + ): Promise | boolean; + + onReceivedEntries(properties: { + entries: EntryWithRefs[]; + from: PublicSignKey; + }): Promise | void; + + onEntryAdded(entry: Entry): void; + onEntryRemoved(hash: string): void; + onPeerDisconnected(key: PublicSignKey): void; + + open(): Promise | void; + close(): Promise | void; + + get pending(): number; + + get syncInFlight(): Map>; +} diff --git a/packages/programs/data/shared-log/src/sync/rateless-iblt.ts b/packages/programs/data/shared-log/src/sync/rateless-iblt.ts new file mode 100644 index 000000000..cb4165745 --- /dev/null +++ b/packages/programs/data/shared-log/src/sync/rateless-iblt.ts @@ -0,0 +1,664 @@ +import { field, variant, vec } from "@dao-xyz/borsh"; +import { Cache } from "@peerbit/cache"; +import { type PublicSignKey, randomBytes, toBase64 } from "@peerbit/crypto"; +import { type Index } from "@peerbit/indexer-interface"; +import type { Entry, Log } from "@peerbit/log"; +import init, { DecoderWrapper, EncoderWrapper } from "@peerbit/riblt"; +import type { RPC, RequestContext } from "@peerbit/rpc"; +import { SilentDelivery } from "@peerbit/stream-interface"; +import type { SyncableKey, Syncronizer } from "."; +import { type EntryWithRefs } from "../exchange-heads.js"; +import { MAX_U64 } from "../integers.js"; +import { TransportMessage } from "../message.js"; +import { + type EntryReplicated, + type ReplicationRangeIndexable, + matchEntriesInRangeQuery, +} from "../ranges.js"; +import { SimpleSyncronizer } from "./simple.js"; + +const wasmFetch = async (input: any) => + (await (await import("node:fs/promises")).readFile(input)) as any; // TODO fix types. +globalThis.fetch = wasmFetch; // wasm-pack build --target web generated load with 'fetch' but node fetch can not load wasm yet, so we need to do this +await init(); + +class SymbolSerialized implements SSymbol { + @field({ type: "u64" }) + count: bigint; + + @field({ type: "u64" }) + hash: bigint; + + @field({ type: "u64" }) + symbol: bigint; + + constructor(props: { count: bigint; hash: bigint; symbol: bigint }) { + this.count = props.count; + this.hash = props.hash; + this.symbol = props.symbol; + } +} + +const getSyncIdString = (message: { syncId: Uint8Array }) => { + return toBase64(message.syncId); +}; + +@variant([3, 0]) +export class StartSync extends TransportMessage { + @field({ type: Uint8Array }) + syncId: Uint8Array; + + @field({ type: "u64" }) + start: bigint; + + @field({ type: "u64" }) + end: bigint; + + @field({ type: vec(SymbolSerialized) }) + symbols: SymbolSerialized[]; + + constructor(props: { + from: bigint; + to: bigint; + symbols: SymbolSerialized[]; + }) { + super(); + this.syncId = randomBytes(32); + this.start = props.from; + this.end = props.to; + this.symbols = props.symbols; + } +} + +@variant([3, 1]) +export class MoreSymbols extends TransportMessage { + @field({ type: Uint8Array }) + syncId: Uint8Array; + + @field({ type: "u64" }) + seqNo: bigint; + + @field({ type: vec(SymbolSerialized) }) + symbols: SymbolSerialized[]; + + constructor(props: { + syncId: Uint8Array; + lastSeqNo: bigint; + symbols: SymbolSerialized[]; + }) { + super(); + this.syncId = props.syncId; + this.seqNo = props.lastSeqNo + 1n; + this.symbols = props.symbols; + } +} + +@variant([3, 2]) +export class RequestMoreSymbols extends TransportMessage { + @field({ type: Uint8Array }) + syncId: Uint8Array; + + @field({ type: "u64" }) + lastSeqNo: bigint; + + constructor(props: { syncId: Uint8Array; lastSeqNo: bigint }) { + super(); + this.syncId = props.syncId; + this.lastSeqNo = props.lastSeqNo; + } +} + +@variant([3, 3]) +export class RequestAll extends TransportMessage { + @field({ type: Uint8Array }) + syncId: Uint8Array; + + constructor(props: { syncId: Uint8Array }) { + super(); + this.syncId = props.syncId; + } +} + +export interface SSymbol { + count: bigint; + hash: bigint; + symbol: bigint; +} + +const buildEncoderOrDecoderFromRange = async < + T extends "encoder" | "decoder", + E = T extends "encoder" ? EncoderWrapper : DecoderWrapper, +>( + ranges: { + start1: bigint; + end1: bigint; + start2: bigint; + end2: bigint; + }, + entryIndex: Index>, + type: T, +): Promise => { + const encoder = + type === "encoder" ? new EncoderWrapper() : new DecoderWrapper(); + + /* const buildDecoderStart = +new Date(); */ + let symbolCount = 0; + const hashes = new Set(); + + const entries = await entryIndex + .iterate( + { + query: matchEntriesInRangeQuery({ + end1: ranges.end1, + start1: ranges.start1, + end2: ranges.end2, + start2: ranges.start2, + }), + }, + { + shape: { + coordinates: true, + }, + }, + ) + .all(); + + if (entries.length === 0) { + return false; + } + + for (const entry of entries) { + symbolCount++; + for (const coordinate of entry.value.coordinates) { + encoder.add_symbol(BigInt(coordinate)); + } + hashes.add(entry.value); + } + + /* console.log( + (type === "encoder" ? "Encoder" : "Decoder") + " build time (s): ", + (+new Date() - buildDecoderStart) / 1000, + "Symbols: ", + symbolCount, + ", Hashes size: ", + +hashes.size, + ", Range: ", + ranges, + ); */ + return encoder as E; +}; +/* +class RangeToEncoders { + encoders: Map; + + constructor( + readonly me: PublicSignKey, + readonly rangeIndex: Index>, + readonly entryIndex: Index>, + ) { + this.encoders = new Map(); + } + + async build() { + // for all ranges in rangeIndex that belong to me + // fetch all cursors from entryIndex and build encoder with key from rangeId + for (const range of await this.rangeIndex + .iterate({ query: { hash: this.me.hashcode() } }) + .all()) { + const encoder = await buildEncoderOrDecoderFromRange( + range.value, + this.entryIndex, + "encoder", + ); + this.encoders.set(range.value.toUniqueSegmentId(), encoder); + } + } + + createSymbolGenerator(range: ReplicationRangeIndexable<"u64">): { + next: () => Symbol; + free: () => void; + } { + let encoder = this.encoders.get(range.toUniqueSegmentId()); + if (!encoder) { + throw new Error("No encoder found for range"); + } + const cloned = encoder.clone(); + return { + next: (): Symbol => { + return cloned.produce_next_coded_symbol(); + }, + free: () => { + // TODO? + }, + }; + } +} + + + +const getAllOverlappingRanges = async (properties: { + range: { + // To match + start1: bigint | number; + end1: bigint | number; + start2: bigint | number; + end2: bigint | number; + }; + publicKey: PublicSignKey; + rangeIndex: Index, any>; +}): Promise>> => { + const ranges = await properties.rangeIndex + .iterate({ + query: [ + ...getCoveringRangeQuery(properties.range), + new StringMatch({ + key: "hash", + value: properties.publicKey.hashcode(), + }), + ], + }) + .all(); + return ranges; +}; */ + +/* const getMissingValuesInRemote = async (properties: { + myEncoder: RangeToEncoders; + remoteRange: { + start1: bigint; + end1: bigint; + start2: bigint; + end2: bigint; + }; +}) => { + const findOverlappingRangesIOwn = await getAllOverlappingRanges({ + range: properties.remoteRange, + publicKey: properties.myEncoder.me, + rangeIndex: properties.myEncoder.rangeIndex, + }); + + const decoders: Map = new Map(); + for (const range of findOverlappingRangesIOwn) { + const segmentId = range.value.toUniqueSegmentId(); + const encoder: EncoderWrapper | undefined = + properties.myEncoder.encoders.get(segmentId); + if (encoder) { + decoders.set(segmentId, encoder.to_decoder()); + } + } + + return { + process: (encodedSymbol: any) => { + let allMissingSymbols: any[] = []; + for (const [k, decoder] of decoders) { + decoder.add_coded_symbol(encodedSymbol); + decoder.try_decode(); + if (decoder.decoded()) { + for (const missingSymbol of decoder.get_local_symbols()) { + allMissingSymbols.push(missingSymbol); + } + decoders.delete(k); + } + } + return { + missing: allMissingSymbols, + done: decoders.size === 0, + }; + }, + }; +}; + +export { RangeToEncoders, getMissingValuesInRemote }; */ + +export class RatelessIBLTSynchronizer implements Syncronizer<"u64"> { + simple: SimpleSyncronizer<"u64">; + + ingoingSyncProcesses: Map< + string, + { + decoder: DecoderWrapper; + timeout: ReturnType; + refresh: () => void; + process: (symbol: SSymbol) => Promise; + free: () => void; + } + >; + + outgoingSyncProcesses: Map< + string, + { + outgoing: Map>; + encoder: EncoderWrapper; + timeout: ReturnType; + refresh: () => void; + next: () => SSymbol; + free: () => void; + } + >; + + constructor( + readonly properties: { + rpc: RPC; + rangeIndex: Index, any>; + entryIndex: Index, any>; + log: Log; + coordinateToHash: Cache; + }, + ) { + this.simple = new SimpleSyncronizer(properties); + this.outgoingSyncProcesses = new Map(); + this.ingoingSyncProcesses = new Map(); + } + + onMaybeMissingEntries(properties: { + entries: Map>; + targets: string[]; + }): Promise | void { + // calculate the smallest range that covers all the entries + // calculate the largest gap and the smallest range will be the one that starts at the end of it + + // assume sorted, and find the largest gap + let largestGap = 0n; + let largestGapIndex = 0; + const sortedEntries = Array.from(properties.entries.values()) + .map((x) => x.coordinates) + .flat() + .sort((a, b) => { + if (a > b) { + return 1; + } else if (a < b) { + return -1; + } else { + return 0; + } + }); + + for (let i = 0; i < sortedEntries.length - 1; i++) { + const current = sortedEntries[i]; + const next = sortedEntries[i + 1]; + const gap = next >= current ? next - current : MAX_U64 - current + next; + if (gap > largestGap) { + largestGap = gap; + largestGapIndex = i; + } + } + + const smallestRangeStartIndex = + (largestGapIndex + 1) % sortedEntries.length; + const smallestRangeEndIndex = largestGapIndex; /// === (smallRangeStartIndex + 1) % sortedEntries.length + let smallestRangeStart = sortedEntries[smallestRangeStartIndex]; + let smallestRangeEnd = sortedEntries[smallestRangeEndIndex]; + let start: bigint, end: bigint; + if (smallestRangeEnd === smallestRangeStart) { + start = smallestRangeEnd; + end = smallestRangeEnd + 1n; + if (end > MAX_U64) { + end = 0n; + } + } else { + start = smallestRangeStart; + end = smallestRangeEnd; + } + + const startSync = new StartSync({ from: start, to: end, symbols: [] }); + const encoder = new EncoderWrapper(); + for (const entry of sortedEntries) { + encoder.add_symbol(BigInt(entry)); + } + + let initialSymbols = 10; // TODO arg + for (let i = 0; i < initialSymbols; i++) { + startSync.symbols.push( + new SymbolSerialized(encoder.produce_next_coded_symbol()), + ); + } + + const createTimeout = () => { + return setTimeout(() => { + // encoder.free(); TODO? + this.outgoingSyncProcesses.delete(getSyncIdString(startSync)); + }, 2e4); // TODO arg + }; + + const obj = { + encoder, + timeout: createTimeout(), + refresh: () => { + let prevTimeout = obj.timeout; + if (prevTimeout) { + clearTimeout(prevTimeout); + } + obj.timeout = createTimeout(); + }, + next: (): SSymbol => { + obj.refresh(); // TODO use timestamp instead and collective pruning/refresh + return encoder.produce_next_coded_symbol(); + }, + free: () => { + // encoder.free(); TODO? + clearTimeout( + this.outgoingSyncProcesses.get(getSyncIdString(startSync))?.timeout, + ); + this.outgoingSyncProcesses.delete(getSyncIdString(startSync)); + }, + outgoing: properties.entries, + }; + + this.outgoingSyncProcesses.set(getSyncIdString(startSync), obj); + this.simple.rpc.send(startSync, { + mode: new SilentDelivery({ to: properties.targets, redundancy: 1 }), + priority: 1, + }); + } + + async onMessage( + message: TransportMessage, + context: RequestContext, + ): Promise { + if (message instanceof StartSync) { + const wrapped = message.end < message.start; + const decoder = await buildEncoderOrDecoderFromRange( + { + start1: message.start, + end1: wrapped ? MAX_U64 : message.end, + start2: 0n, + end2: wrapped ? message.end : 0n, + }, + this.properties.entryIndex, + "decoder", + ); + + if (!decoder) { + await this.simple.rpc.send( + new RequestAll({ + syncId: message.syncId, + }), + { + mode: new SilentDelivery({ to: [context.from!], redundancy: 1 }), + priority: 1, + }, + ); + return true; + } + + /* console.log( + "ALREADY HAVE ENTRIES", + await this.properties.entryIndex.count(), + "but log ?", + this.properties.log.length, + ); */ + + const syncId = getSyncIdString(message); + const createTimeout = () => { + return setTimeout(() => { + // decoder.free(); TODO? + this.ingoingSyncProcesses.delete(syncId); + }, 2e4); // TODO arg + }; + + let count = 0; + /* let t0 = +new Date(); */ + const obj = { + decoder, + timeout: createTimeout(), + refresh: () => { + let prevTimeout = obj.timeout; + if (prevTimeout) { + clearTimeout(prevTimeout); + } + obj.timeout = createTimeout(); + }, + process: async ( + symbol: SSymbol | SymbolSerialized, + ): Promise => { + obj.refresh(); // TODO use timestamp instead and collective pruning/refresh + decoder.add_coded_symbol(symbol); + decoder.try_decode(); + count++; + if (decoder.decoded()) { + let allMissingSymbolsInRemote: bigint[] = []; + for (const missingSymbol of decoder.get_remote_symbols()) { + allMissingSymbolsInRemote.push(missingSymbol); + } + + /* let t1 = +new Date(); + console.log("Done decoding after", count, "symbols", "allMissingSymbolsInRemote: ", allMissingSymbolsInRemote.length, "time: ", (t1 - t0) / 1000, "s"); */ + + // now we want to resolve the hashes from the symbols + this.simple.queueSync(allMissingSymbolsInRemote, context.from!, { + skipCheck: true, + }); + obj.free(); + return true; + } + return false; + }, + free: () => { + // decoder.free(); TODO? + clearTimeout(this.ingoingSyncProcesses.get(syncId)?.timeout); + this.ingoingSyncProcesses.delete(syncId); + }, + }; + + this.ingoingSyncProcesses.set(syncId, obj); + + for (const symbol of message.symbols) { + if (await obj.process(symbol)) { + return true; // DONE + } + } + + // not done, request more symbols + await this.simple.rpc.send( + new RequestMoreSymbols({ + lastSeqNo: 0n, + syncId: message.syncId, + }), + { + mode: new SilentDelivery({ to: [context.from!], redundancy: 1 }), + priority: 1, + }, + ); + + return true; + } else if (message instanceof MoreSymbols) { + const obj = this.ingoingSyncProcesses.get(getSyncIdString(message)); + if (!obj) { + return true; + } + + for (const symbol of message.symbols) { + if (await obj.process(symbol)) { + return true; // DONE + } + } + + this.simple.rpc.send( + new RequestMoreSymbols({ + lastSeqNo: message.seqNo, + syncId: message.syncId, + }), + { + mode: new SilentDelivery({ to: [context.from!], redundancy: 1 }), + priority: 1, + }, + ); + + return true; + } else if (message instanceof RequestMoreSymbols) { + const obj = this.outgoingSyncProcesses.get(getSyncIdString(message)); + if (!obj) { + return true; + } + + const symbols = []; + let batch = 100; // TODO arg + for (let i = 0; i < batch; i++) { + symbols.push(new SymbolSerialized(obj.next())); + } + await this.properties.rpc.send( + new MoreSymbols({ + lastSeqNo: message.lastSeqNo + 1n, + syncId: message.syncId, + symbols, + }), + { + mode: new SilentDelivery({ to: [context.from!], redundancy: 1 }), + priority: 1, + }, + ); + return true; + } else if (message instanceof RequestAll) { + const p = this.outgoingSyncProcesses.get(getSyncIdString(message)); + if (!p) { + return true; + } + await this.simple.onMaybeMissingEntries({ + entries: p.outgoing, + targets: [context.from!.hashcode()], + }); + return true; + } + return this.simple.onMessage(message, context); + } + + onReceivedEntries(properties: { + entries: EntryWithRefs[]; + from: PublicSignKey; + }): Promise | void { + return this.simple.onReceivedEntries(properties); + } + + onEntryAdded(entry: Entry): void { + return this.simple.onEntryAdded(entry); + } + + onEntryRemoved(hash: string) { + return this.simple.onEntryRemoved(hash); + } + + onPeerDisconnected(key: PublicSignKey) { + return this.simple.onPeerDisconnected(key); + } + + open(): Promise | void { + return this.simple.open(); + } + + close(): Promise | void { + for (const [, obj] of this.ingoingSyncProcesses) { + obj.free(); + } + for (const [, obj] of this.outgoingSyncProcesses) { + obj.free(); + } + return this.simple.close(); + } + + get syncInFlight(): Map> { + return this.simple.syncInFlight; + } + + get pending(): number { + return this.simple.pending; + } +} diff --git a/packages/programs/data/shared-log/src/sync/simple.ts b/packages/programs/data/shared-log/src/sync/simple.ts new file mode 100644 index 000000000..9b71ae5b7 --- /dev/null +++ b/packages/programs/data/shared-log/src/sync/simple.ts @@ -0,0 +1,412 @@ +import { field, variant, vec } from "@dao-xyz/borsh"; +import { Cache } from "@peerbit/cache"; +import type { PublicSignKey } from "@peerbit/crypto"; +import { + Compare, + type Index, + IntegerCompare, + Or, +} from "@peerbit/indexer-interface"; +import { Entry, Log } from "@peerbit/log"; +import type { RPC, RequestContext } from "@peerbit/rpc"; +import { SilentDelivery } from "@peerbit/stream-interface"; +import type { SyncableKey, Syncronizer } from "."; +import { + EntryWithRefs, + createExchangeHeadsMessages, +} from "../exchange-heads.js"; +import { TransportMessage } from "../message.js"; +import type { EntryReplicated } from "../ranges"; + +@variant([0, 1]) +export class RequestMaybeSync extends TransportMessage { + @field({ type: vec("string") }) + hashes: string[]; + + constructor(props: { hashes: string[] }) { + super(); + this.hashes = props.hashes; + } +} + +@variant([0, 2]) +export class ResponseMaybeSync extends TransportMessage { + @field({ type: vec("string") }) + hashes: string[]; + + constructor(props: { hashes: string[] }) { + super(); + this.hashes = props.hashes; + } +} + +@variant([0, 5]) +export class RequestMaybeSyncCoordinate extends TransportMessage { + @field({ type: vec("u64") }) + coordinates: bigint[]; + + constructor(props: { coordinates: bigint[] }) { + super(); + this.coordinates = props.coordinates; + } +} + +const getHashesFromSymbols = async ( + symbols: bigint[], + entryIndex: Index, any>, + coordinateToHash: Cache, +) => { + let queries: IntegerCompare[] = []; + let batchSize = 1; // TODO arg + let results = new Set(); + const handleBatch = async (end = false) => { + if (queries.length >= batchSize || (end && queries.length > 0)) { + const entries = await entryIndex + .iterate( + { query: queries.length > 1 ? new Or(queries) : queries }, + { shape: { hash: true, coordinates: true } }, + ) + .all(); + queries = []; + for (const entry of entries) { + results.add(entry.value.hash); + for (const coordinate of entry.value.coordinates) { + coordinateToHash.add(coordinate, entry.value.hash); + } + } + } + }; + for (let i = 0; i < symbols.length; i++) { + const fromCache = coordinateToHash.get(symbols[i]); + if (fromCache) { + results.add(fromCache); + continue; + } + + const matchQuery = new IntegerCompare({ + key: "coordinates", + compare: Compare.Equal, + value: symbols[i], + }); + + queries.push(matchQuery); + await handleBatch(); + } + await handleBatch(true); + return results; +}; + +export class SimpleSyncronizer + implements Syncronizer +{ + // map of hash to public keys that we can ask for entries + syncInFlightQueue: Map; + syncInFlightQueueInverted: Map>; + + // map of hash to public keys that we have asked for entries + syncInFlight!: Map>; + + rpc: RPC; + log: Log; + entryIndex: Index, any>; + coordinateToHash: Cache; + + // Syncing and dedeplucation work + syncMoreInterval?: ReturnType; + + closed!: boolean; + + constructor(properties: { + rpc: RPC; + entryIndex: Index, any>; + log: Log; + coordinateToHash: Cache; + }) { + this.syncInFlightQueue = new Map(); + this.syncInFlightQueueInverted = new Map(); + this.syncInFlight = new Map(); + this.rpc = properties.rpc; + this.log = properties.log; + this.entryIndex = properties.entryIndex; + this.coordinateToHash = properties.coordinateToHash; + } + + onMaybeMissingEntries(properties: { + entries: Map>; + targets: string[]; + }): Promise { + return this.rpc.send( + new RequestMaybeSync({ hashes: [...properties.entries.keys()] }), + { + priority: 1, + mode: new SilentDelivery({ to: properties.targets, redundancy: 1 }), + }, + ); + } + + async onMessage( + msg: TransportMessage, + context: RequestContext, + ): Promise { + const from = context.from!; + if (msg instanceof RequestMaybeSync) { + await this.queueSync(msg.hashes, from); + return true; + } else if (msg instanceof ResponseMaybeSync) { + // TODO perhaps send less messages to more receivers for performance reasons? + // TODO wait for previous send to target before trying to send more? + + for await (const message of createExchangeHeadsMessages( + this.log, + msg.hashes, + )) { + await this.rpc.send(message, { + mode: new SilentDelivery({ to: [context.from!], redundancy: 1 }), + }); + } + return true; + } else if (msg instanceof RequestMaybeSyncCoordinate) { + const hashes = await getHashesFromSymbols( + msg.coordinates, + this.entryIndex, + this.coordinateToHash, + ); + + for await (const message of createExchangeHeadsMessages( + this.log, + hashes, + )) { + await this.rpc.send(message, { + mode: new SilentDelivery({ to: [context.from!], redundancy: 1 }), + priority: 1, + }); + } + + return true; + } else { + return false; // no message was consumed + } + } + + onReceivedEntries(properties: { + entries: EntryWithRefs[]; + from: PublicSignKey; + }): Promise | void { + for (const entry of properties.entries) { + const set = this.syncInFlight.get(properties.from.hashcode()); + if (set) { + set.delete(entry.entry.hash); + if (set?.size === 0) { + this.syncInFlight.delete(properties.from.hashcode()); + } + } + } + } + + async queueSync( + keys: string[] | bigint[], + from: PublicSignKey, + options?: { skipCheck?: boolean }, + ) { + const requestHashes: SyncableKey[] = []; + + for (const coordinateOrHash of keys) { + const inFlight = this.syncInFlightQueue.get(coordinateOrHash); + if (inFlight) { + if (!inFlight.find((x) => x.hashcode() === from.hashcode())) { + inFlight.push(from); + let inverted = this.syncInFlightQueueInverted.get(from.hashcode()); + if (!inverted) { + inverted = new Set(); + this.syncInFlightQueueInverted.set(from.hashcode(), inverted); + } + inverted.add(coordinateOrHash); + } + } else if ( + options?.skipCheck || + !(await this.checkHasCoordinateOrHash(coordinateOrHash)) + ) { + /* if ((this.dbg)) { + console.log("I NEED TO SYNC!", coordinateOrHash) + } */ + + this.syncInFlightQueue.set(coordinateOrHash, []); + requestHashes.push(coordinateOrHash); // request immediately (first time we have seen this hash) + } else { + /* if ((this.dbg)) { + console.log("ALREAD HAVE SYNC?", coordinateOrHash) + + } */ + } + } + + requestHashes.length > 0 && + (await this.requestSync(requestHashes as string[] | bigint[], [ + from!.hashcode(), + ])); + } + + private async requestSync( + hashes: string[] | bigint[], + to: Set | string[], + ) { + if (hashes.length === 0) { + return; + } + + const now = +new Date(); + for (const node of to) { + let map = this.syncInFlight.get(node); + if (!map) { + map = new Map(); + this.syncInFlight.set(node, map); + } + for (const hash of hashes) { + map.set(hash, { timestamp: now }); + } + } + + const isBigInt = typeof hashes[0] === "bigint"; + + await this.rpc.send( + isBigInt + ? new RequestMaybeSyncCoordinate({ coordinates: hashes as bigint[] }) + : new ResponseMaybeSync({ hashes: hashes as string[] }), + { + mode: new SilentDelivery({ to, redundancy: 1 }), + priority: 1, + }, + ); + } + private async checkHasCoordinateOrHash(key: string | bigint) { + return typeof key === "bigint" + ? (await this.entryIndex.count({ query: { coordinates: key } })) > 0 + : this.log.has(key); + } + async open() { + this.closed = false; + const requestSyncLoop = async () => { + /** + * This method fetches entries that we potentially want. + * In a case in which we become replicator of a segment, + * multiple remote peers might want to send us entries + * This method makes sure that we only request on entry from the remotes at a time + * so we don't get flooded with the same entry + */ + + const requestHashes: SyncableKey[] = []; + const from: Set = new Set(); + for (const [key, value] of this.syncInFlightQueue) { + if (this.closed) { + return; + } + + const has = await this.checkHasCoordinateOrHash(key); + + if (!has) { + // TODO test that this if statement actually does anymeaningfull + if (value.length > 0) { + requestHashes.push(key); + const publicKeyHash = value.shift()!.hashcode(); + from.add(publicKeyHash); + const invertedSet = + this.syncInFlightQueueInverted.get(publicKeyHash); + if (invertedSet) { + if (invertedSet.delete(key)) { + if (invertedSet.size === 0) { + this.syncInFlightQueueInverted.delete(publicKeyHash); + } + } + } + } + if (value.length === 0) { + this.syncInFlightQueue.delete(key); // no-one more to ask for this entry + } + } else { + this.syncInFlightQueue.delete(key); + } + } + + const nowMin10s = +new Date() - 1e4; + for (const [key, map] of this.syncInFlight) { + // cleanup "old" missing syncs + for (const [hash, { timestamp }] of map) { + if (timestamp < nowMin10s) { + map.delete(hash); + } + } + if (map.size === 0) { + this.syncInFlight.delete(key); + } + } + this.requestSync(requestHashes as string[] | bigint[], from).finally( + () => { + if (this.closed) { + return; + } + this.syncMoreInterval = setTimeout(requestSyncLoop, 3e3); + }, + ); + }; + + requestSyncLoop(); + } + + async close() { + this.closed = true; + this.syncInFlightQueue.clear(); + this.syncInFlightQueueInverted.clear(); + this.syncInFlight.clear(); + clearTimeout(this.syncMoreInterval); + } + onEntryAdded(entry: Entry): void { + return this.clearSyncProcess(entry.hash); + } + + onEntryRemoved(hash: string): void { + return this.clearSyncProcess(hash); + } + + private clearSyncProcess(hash: string) { + const inflight = this.syncInFlightQueue.get(hash); + if (inflight) { + for (const key of inflight) { + const map = this.syncInFlightQueueInverted.get(key.hashcode()); + if (map) { + map.delete(hash); + if (map.size === 0) { + this.syncInFlightQueueInverted.delete(key.hashcode()); + } + } + } + + this.syncInFlightQueue.delete(hash); + } + } + + onPeerDisconnected(key: PublicSignKey): Promise | void { + return this.clearSyncProcessPublicKey(key); + } + private clearSyncProcessPublicKey(publicKey: PublicSignKey) { + this.syncInFlight.delete(publicKey.hashcode()); + const map = this.syncInFlightQueueInverted.get(publicKey.hashcode()); + if (map) { + for (const hash of map) { + const arr = this.syncInFlightQueue.get(hash); + if (arr) { + const filtered = arr.filter((x) => !x.equals(publicKey)); + if (filtered.length > 0) { + this.syncInFlightQueue.set(hash, filtered); + } else { + this.syncInFlightQueue.delete(hash); + } + } + } + this.syncInFlightQueueInverted.delete(publicKey.hashcode()); + } + } + + get pending() { + return this.syncInFlightQueue.size; + } +} diff --git a/packages/programs/data/shared-log/src/utils.ts b/packages/programs/data/shared-log/src/utils.ts index f6cf74961..e0f4c7016 100644 --- a/packages/programs/data/shared-log/src/utils.ts +++ b/packages/programs/data/shared-log/src/utils.ts @@ -1,9 +1,13 @@ import { Entry, ShallowEntry } from "@peerbit/log"; import type { EntryWithRefs } from "./exchange-heads.js"; -import { EntryReplicated } from "./ranges.js"; +import { type EntryReplicated, isEntryReplicated } from "./ranges.js"; export const groupByGid = async < - T extends ShallowEntry | Entry | EntryWithRefs | EntryReplicated, + T extends + | ShallowEntry + | Entry + | EntryWithRefs + | EntryReplicated, >( entries: T[], ): Promise> => { @@ -14,7 +18,7 @@ export const groupByGid = async < ? (await head.getMeta()).gid : head instanceof ShallowEntry ? head.meta.gid - : head instanceof EntryReplicated + : isEntryReplicated(head) ? head.gid : (await head.entry.getMeta()).gid; let value = groupByGid.get(gid); @@ -27,7 +31,9 @@ export const groupByGid = async < return groupByGid; }; -export const groupByGidSync = async ( +export const groupByGidSync = async < + T extends ShallowEntry | EntryReplicated, +>( entries: T[], ): Promise> => { const groupByGid: Map = new Map(); diff --git a/packages/programs/data/shared-log/test/append.spec.ts b/packages/programs/data/shared-log/test/append.spec.ts index 22a72a2e8..1d769368a 100644 --- a/packages/programs/data/shared-log/test/append.spec.ts +++ b/packages/programs/data/shared-log/test/append.spec.ts @@ -15,7 +15,7 @@ describe("append", () => { it("canAppend checked once", async () => { session = await TestSession.disconnected(1); - const store = await session.peers[0].open(new EventStore()); + const store = await session.peers[0].open(new EventStore()); const canAppend = sinon.spy(store.log.canAppend); store.log.canAppend = canAppend; await store.add("a"); @@ -26,7 +26,7 @@ describe("append", () => { it("override option canAppend checked once", async () => { session = await TestSession.disconnected(1); - const store = await session.peers[0].open(new EventStore()); + const store = await session.peers[0].open(new EventStore()); const canAppend = sinon.spy(store.log.canAppend); store.log.canAppend = canAppend; diff --git a/packages/programs/data/shared-log/test/domain-time.spec.ts b/packages/programs/data/shared-log/test/domain-time.spec.ts index d01296a42..02dfbdf76 100644 --- a/packages/programs/data/shared-log/test/domain-time.spec.ts +++ b/packages/programs/data/shared-log/test/domain-time.spec.ts @@ -3,11 +3,11 @@ import type { Entry } from "@peerbit/log"; import { TestSession } from "@peerbit/test-utils"; import { waitForResolved } from "@peerbit/time"; import { expect } from "chai"; +import { denormalizer } from "../src/integers.js"; import { type ReplicationDomainTime, createReplicationDomainTime, } from "../src/replication-domain-time.js"; -import { scaleToU32 } from "../src/role.js"; import { EventStore } from "./utils/stores/event-store.js"; /** @@ -128,9 +128,12 @@ describe("ReplicationDomainTime", function () { factor: factor, strict: true, }); + await waitForResolved(async () => expect( - scaleToU32(await db2.log.calculateTotalParticipation()), + denormalizer("u32")( + await db2.log.calculateTotalParticipation({ sum: true }), + ), ).to.be.closeTo(factor, 1), ); @@ -162,8 +165,7 @@ describe("ReplicationDomainTime", function () { describe(`e2e`, function () { let session: TestSession; let db1: EventStore, - db2: EventStore, - db3: EventStore; + db2: EventStore; const options = { args: { @@ -227,7 +229,6 @@ describe(`e2e`, function () { afterEach(async () => { if (db1 && db1.closed === false) await db1.drop(); if (db2 && db2.closed === false) await db2.drop(); - if (db3 && db3.closed === false) await db3.drop(); }); it("select leaders for one or two peers", async () => { diff --git a/packages/programs/data/shared-log/test/encryption.spec.ts b/packages/programs/data/shared-log/test/encryption.spec.ts index c95f04bfc..c8907b6c7 100644 --- a/packages/programs/data/shared-log/test/encryption.spec.ts +++ b/packages/programs/data/shared-log/test/encryption.spec.ts @@ -9,7 +9,7 @@ import { SharedLog } from "../src/index.js"; @variant("encrypt_store") class SimpleStore extends Program { @field({ type: SharedLog }) - log: SharedLog; // Documents provide document store functionality around your Posts + log: SharedLog; // Documents provide document store functionality around your Posts constructor() { super(); diff --git a/packages/programs/data/shared-log/test/events.spec.ts b/packages/programs/data/shared-log/test/events.spec.ts index 93e575b89..fd6940fb8 100644 --- a/packages/programs/data/shared-log/test/events.spec.ts +++ b/packages/programs/data/shared-log/test/events.spec.ts @@ -1,3 +1,4 @@ +import { randomBytes } from "@peerbit/crypto"; import { TestSession } from "@peerbit/test-utils"; import { delay, waitForResolved } from "@peerbit/time"; import { expect } from "chai"; @@ -93,4 +94,107 @@ describe("events", () => { session.peers[1].identity.publicKey.hashcode(), ]); // no new join events }); + + it("replicator:mature not emitted more than once on update same same range id", async () => { + session = await TestSession.connected(2); + + const store = new EventStore(); + let db1JoinEvents: string[] = []; + let timeUntilRoleMaturity = 1e3; + const store1 = await session.peers[0].open(store, { + args: { + replicate: { factor: 1 }, + timeUntilRoleMaturity, + }, + }); + store1.log.events.addEventListener("replicator:mature", (event) => { + db1JoinEvents.push(event.detail.publicKey.hashcode()); + }); + + let rangeId = randomBytes(32); + const store2 = await session.peers[1].open(store.clone(), { + args: { + replicate: { id: rangeId, factor: 1 }, + timeUntilRoleMaturity, + }, + }); + await waitForResolved(() => + expect(db1JoinEvents).to.have.members([ + session.peers[0].identity.publicKey.hashcode(), + session.peers[1].identity.publicKey.hashcode(), + ]), + ); + + // reset: true means we will re-initalize hence we expect a maturity event + await store2.log.replicate({ id: rangeId, factor: 0.5 }, { reset: true }); + + await waitForResolved(async () => { + const store2Role = await store1.log.replicationIndex + .iterate({ query: { hash: store2.node.identity.publicKey.hashcode() } }) + .all(); + expect(store2Role).to.have.length(1); + expect(store2Role[0].value.widthNormalized).to.be.closeTo(0.5, 0.01); + }); + expect(store.log.pendingMaturity.size).to.be.eq(0); + + await delay(timeUntilRoleMaturity * 2); // wait a little bit more + expect(db1JoinEvents).to.have.members([ + session.peers[0].identity.publicKey.hashcode(), + session.peers[1].identity.publicKey.hashcode(), + ]); // no new join events + + expect(store.log.pendingMaturity.size).to.eq(0); + }); + + it("replicator:mature emit twice on update reset", async () => { + session = await TestSession.connected(2); + + const store = new EventStore(); + let db1JoinEvents: string[] = []; + let timeUntilRoleMaturity = 1e3; + const store1 = await session.peers[0].open(store, { + args: { + replicate: { factor: 1 }, + timeUntilRoleMaturity, + }, + }); + store1.log.events.addEventListener("replicator:mature", (event) => { + db1JoinEvents.push(event.detail.publicKey.hashcode()); + }); + + const store2 = await session.peers[1].open(store.clone(), { + args: { + replicate: { factor: 1 }, + timeUntilRoleMaturity, + }, + }); + await waitForResolved(() => + expect(db1JoinEvents).to.have.members([ + session.peers[0].identity.publicKey.hashcode(), + session.peers[1].identity.publicKey.hashcode(), + ]), + ); + + // reset: true means we will re-initalize hence we expect a maturity event + await store2.log.replicate({ factor: 0.5 }, { reset: true }); + + await waitForResolved(async () => { + const store2Role = await store1.log.replicationIndex + .iterate({ query: { hash: store2.node.identity.publicKey.hashcode() } }) + .all(); + expect(store2Role).to.have.length(1); + expect(store2Role[0].value.widthNormalized).to.be.closeTo(0.5, 0.01); + }); + expect(store.log.pendingMaturity.size).to.be.greaterThan(0); + + await waitForResolved(() => + expect(db1JoinEvents).to.have.members([ + session.peers[0].identity.publicKey.hashcode(), + session.peers[1].identity.publicKey.hashcode(), + session.peers[1].identity.publicKey.hashcode(), + ]), + ); + + expect(store.log.pendingMaturity.size).to.eq(0); + }); }); diff --git a/packages/programs/data/shared-log/test/join.spec.ts b/packages/programs/data/shared-log/test/join.spec.ts index dac7a1c43..5dbc6285a 100644 --- a/packages/programs/data/shared-log/test/join.spec.ts +++ b/packages/programs/data/shared-log/test/join.spec.ts @@ -10,7 +10,7 @@ import { EventStore } from "./utils/stores/event-store.js"; describe("join", () => { let session: TestSession; - let db1: EventStore, db2: EventStore; + let db1: EventStore, db2: EventStore; before(async () => { session = await TestSession.disconnected(3, [ @@ -78,9 +78,15 @@ describe("join", () => { }); it("can join replicate", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + factor: 1, + }, + }, + }); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -89,15 +95,53 @@ describe("join", () => { ))!; await db1.waitFor(session.peers[1].peerId); + await db2.log.waitForReplicator(db1.node.identity.publicKey); // in order to make the join operation to index correctly replicator info we want to wait here + const e1 = await db1.add("hello"); expect(await db2.log.getMyReplicationSegments()).to.have.length(0); await db2.log.join([e1.entry], { replicate: true }); - expect(await db2.log.getMyReplicationSegments()).to.have.length(1); + expect( + (await db2.log.getMyReplicationSegments()).map((x) => x.width), + ).to.deep.eq([1]); // a single pin expect(db2.log.log.length).to.equal(1); + + // expect entry to be indexed min replicas times + expect(await db2.log.entryCoordinatesIndex.count()).to.eq(1); + + const indexedEntry = await db2.log.entryCoordinatesIndex.iterate().all(); + + expect(indexedEntry[0].value.assignedToRangeBoundary).to.be.false; // since there should be 2 overlapping segments + }); + + it("can join replicate and merge segments", async () => { + db1 = await session.peers[0].open(new EventStore()); + + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { replicate: false }, + }, + ))!; + + await db1.waitFor(session.peers[1].peerId); + + const e1 = await db1.add("hello", { meta: { next: [] } }); + const e2 = await db1.add("hello again", { meta: { next: [] } }); + + expect(await db2.log.getMyReplicationSegments()).to.have.length(0); + await db2.log.join([e1.entry, e2.entry], { + replicate: { mergeSegments: true }, + }); + expect(await db2.log.getMyReplicationSegments()).to.have.length(1); + expect( + (await db2.log.getMyReplicationSegments())[0].width, + ).to.be.greaterThan(1); // a segment covering more than one entry + expect(db2.log.log.length).to.equal(2); }); it("will emit one message when replicating multiple entries", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: false }, }); db2 = db1.clone(); @@ -143,7 +187,7 @@ describe("join", () => { }); it("will emit one message when replicating new and already joined entries", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: false }, }); db2 = db1.clone(); @@ -194,9 +238,9 @@ describe("join", () => { describe("already but not replicated", () => { it("entry", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -216,9 +260,9 @@ describe("join", () => { }); it("hash", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -237,9 +281,9 @@ describe("join", () => { expect(db2.log.log.length).to.equal(1); }); it("shallow entry", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { diff --git a/packages/programs/data/shared-log/test/leader.spec.ts b/packages/programs/data/shared-log/test/leader.spec.ts index 171a0fd20..f98749956 100644 --- a/packages/programs/data/shared-log/test/leader.spec.ts +++ b/packages/programs/data/shared-log/test/leader.spec.ts @@ -1,11 +1,17 @@ import { privateKeyFromRaw } from "@libp2p/crypto/keys"; -import { getPublicKeyFromPeerId } from "@peerbit/crypto"; -import type { Entry } from "@peerbit/log"; +import { randomBytes, toBase64 } from "@peerbit/crypto"; +import { + type Entry, + EntryType, + LamportClock, + Meta, + Timestamp, +} from "@peerbit/log"; import { TestSession } from "@peerbit/test-utils"; import { delay, waitForResolved } from "@peerbit/time"; import { expect } from "chai"; import { ExchangeHeadsMessage } from "../src/exchange-heads.js"; -import { slowDownSend } from "./utils.js"; +import { slowDownMessage } from "./utils.js"; import { EventStore } from "./utils/stores/event-store.js"; /** @@ -13,12 +19,27 @@ import { EventStore } from "./utils/stores/event-store.js"; */ const toEntry = (gid: string | number) => { - return { meta: { gid: String(gid) } } as Entry; + return { + hash: toBase64(randomBytes(32)), + meta: new Meta({ + next: [], + type: EntryType.APPEND, + gid: String(gid), + clock: new LamportClock({ + id: randomBytes(32), + timestamp: new Timestamp({ + wallTime: BigInt(Math.round(Math.random() * 1e6)), + }), + }), + }), + } as Entry; }; describe(`isLeader`, function () { let session: TestSession; - let db1: EventStore, db2: EventStore, db3: EventStore; + let db1: EventStore, + db2: EventStore, + db3: EventStore; const options = { args: { @@ -90,23 +111,23 @@ describe(`isLeader`, function () { // TODO fix test timeout, isLeader is too slow as we need to wait for peers // perhaps do an event based get peers using the pubsub peers api - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { ...options.args, replicate: { offset: 0, factor: 0.5 } }, }); const isLeaderAOneLeader = await db1.log.isLeader({ entry: toEntry(123), replicas: 1, }); - expect(isLeaderAOneLeader); + expect(isLeaderAOneLeader).to.be.true; const isLeaderATwoLeader = await db1.log.isLeader({ entry: toEntry(123), replicas: 2, }); - expect(isLeaderATwoLeader); + expect(isLeaderATwoLeader).to.be.true; db2 = (await EventStore.open(db1.address!, session.peers[1], { args: { ...options.args, replicate: { offset: 0.5, factor: 0.5 } }, - })) as EventStore; + })) as EventStore; await waitForResolved(async () => expect((await db1.log.getReplicators()).size).to.equal(2), @@ -129,6 +150,7 @@ describe(`isLeader`, function () { entry: toEntry(slot), replicas: 1, }); + expect([isLeaderAOneLeader, isLeaderBOneLeader]).to.have.members([ false, true, @@ -155,7 +177,7 @@ describe(`isLeader`, function () { // TODO fix test timeout, isLeader is too slow as we need to wait for peers // perhaps do an event based get peers using the pubsub peers api - const store = await new EventStore(); + const store = await new EventStore(); db1 = await session.peers[0].open(store, { args: { ...options.args }, }); @@ -163,9 +185,9 @@ describe(`isLeader`, function () { db1.address!, session.peers[1], options, - )) as EventStore; + )) as EventStore; - await delay(5000); // some delay so that if peers are to replicate, they would have had time to notify each other + await delay(2500); // some delay so that if peers are to replicate, they would have had time to notify each other // One leader const slot = 0; @@ -188,18 +210,18 @@ describe(`isLeader`, function () { // TODO fix test timeout, isLeader is too slow as we need to wait for peers // perhaps do an event based get peers using the pubsub peers api - const store = await new EventStore(); + const store = await new EventStore(); db1 = await session.peers[0].open(store, { args: { ...options.args, replicate: false }, }); db2 = (await EventStore.open(db1.address!, session.peers[1], { args: { ...options.args, replicate: { factor: 0.5 } }, - })) as EventStore; + })) as EventStore; db3 = (await EventStore.open(db1.address!, session.peers[2], { args: { ...options.args, replicate: { factor: 0.5 } }, - })) as EventStore; + })) as EventStore; await waitForResolved(async () => expect((await db2.log.getReplicators()).size).to.equal(2), @@ -235,7 +257,7 @@ describe(`isLeader`, function () { // TODO fix test timeout, isLeader is too slow as we need to wait for peers // perhaps do an event based get peers using the pubsub peers api - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0, @@ -246,11 +268,11 @@ describe(`isLeader`, function () { db2 = (await EventStore.open(db1.address!, session.peers[1], { args: { replicate: { - offset: 0.333, + offset: 0.3333, factor: 0.3333, }, }, - })) as EventStore; + })) as EventStore; db3 = (await EventStore.open(db1.address!, session.peers[2], { args: { replicate: { @@ -258,7 +280,7 @@ describe(`isLeader`, function () { factor: 0.3333, }, }, - })) as EventStore; + })) as EventStore; await waitForResolved(async () => expect((await db1.log.getReplicators()).size).to.equal(3), @@ -358,35 +380,36 @@ describe(`isLeader`, function () { }); it("evenly distributed", async () => { - db1 = await session.peers[0].open(new EventStore()); - db2 = (await EventStore.open( - db1.address!, - session.peers[1], - options, - )) as EventStore; - db3 = (await EventStore.open( - db1.address!, - session.peers[2], - options, - )) as EventStore; - - let allowedError = 0.03; - - await waitForResolved(async () => - expect( - Math.abs((await db1.log.getMyTotalParticipation()) - 0.33), - ).lessThan(allowedError), - ); - await waitForResolved(async () => - expect( - Math.abs((await db2.log.getMyTotalParticipation()) - 0.33), - ).lessThan(allowedError), - ); - await waitForResolved(async () => - expect( - Math.abs((await db3.log.getMyTotalParticipation()) - 0.33), - ).lessThan(allowedError), - ); + db1 = await session.peers[0].open(new EventStore(), { + ...options, + args: { + ...options.args, + replicate: { + factor: 0.333333, + offset: 0, + }, + }, + }); + db2 = (await EventStore.open(db1.address!, session.peers[1], { + ...options, + args: { + ...options.args, + replicate: { + factor: 0.333333, + offset: 0.333333, + }, + }, + })) as EventStore; + db3 = (await EventStore.open(db1.address!, session.peers[2], { + ...options, + args: { + ...options.args, + replicate: { + factor: 0.333333, + offset: 0.66666, + }, + }, + })) as EventStore; await waitForResolved(async () => expect((await db1.log.getReplicators()).size).to.equal(3), @@ -438,7 +461,7 @@ describe(`isLeader`, function () { describe("union", () => { it("local first", async () => { - const store = new EventStore(); + const store = new EventStore(); db1 = await session.peers[0].open(store, { args: { replicate: { @@ -451,7 +474,7 @@ describe(`isLeader`, function () { }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -487,7 +510,7 @@ describe(`isLeader`, function () { }); it("will consider in flight", async () => { - const store = new EventStore(); + const store = new EventStore(); db1 = await session.peers[0].open(store.clone(), { args: { @@ -503,7 +526,12 @@ describe(`isLeader`, function () { const abortController = new AbortController(); await db1.add("hello!"); - slowDownSend(db1.log, ExchangeHeadsMessage, 1e5, abortController.signal); + slowDownMessage( + db1.log, + ExchangeHeadsMessage, + 1e5, + abortController.signal, + ); db2 = await session.peers[1].open(store.clone(), { args: { @@ -528,7 +556,9 @@ describe(`isLeader`, function () { await waitForResolved( () => expect( - db2.log["syncInFlight"].has(db1.node.identity.publicKey.hashcode()), + db2.log.syncronizer.syncInFlight.has( + db1.node.identity.publicKey.hashcode(), + ), ).to.be.true, ); @@ -544,7 +574,9 @@ describe(`isLeader`, function () { abortController.abort("Start sending now"); await waitForResolved(() => { expect( - db2.log["syncInFlight"].has(db1.node.identity.publicKey.hashcode()), + db2.log.syncronizer.syncInFlight.has( + db1.node.identity.publicKey.hashcode(), + ), ).to.be.false; }); @@ -555,7 +587,7 @@ describe(`isLeader`, function () { }); it("sets replicators groups correctly", async () => { - const store = new EventStore(); + const store = new EventStore(); db1 = await session.peers[0].open(store, { args: { @@ -568,7 +600,7 @@ describe(`isLeader`, function () { }, }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -584,7 +616,7 @@ describe(`isLeader`, function () { }, ); - db3 = await EventStore.open>( + db3 = await EventStore.open>( db1.address!, session.peers[2], { @@ -626,7 +658,7 @@ describe(`isLeader`, function () { describe("eager", () => { it("eager, me not-mature, all included", async () => { - const store = new EventStore(); + const store = new EventStore(); db1 = await session.peers[0].open(store, { args: { @@ -639,7 +671,7 @@ describe(`isLeader`, function () { }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -654,7 +686,7 @@ describe(`isLeader`, function () { }, ); - db3 = await EventStore.open>( + db3 = await EventStore.open>( db1.address!, session.peers[2], { @@ -696,7 +728,7 @@ describe(`isLeader`, function () { }); it("all non-mature, only me included", async () => { - const store = new EventStore(); + const store = new EventStore(); db1 = await session.peers[0].open(store, { args: { @@ -709,7 +741,7 @@ describe(`isLeader`, function () { }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -724,7 +756,7 @@ describe(`isLeader`, function () { }, ); - db3 = await EventStore.open>( + db3 = await EventStore.open>( db1.address!, session.peers[2], { @@ -763,7 +795,7 @@ describe(`isLeader`, function () { describe("maturity", () => { it("one mature, all included", async () => { - const store = new EventStore(); + const store = new EventStore(); const MATURE_TIME = 2000; db1 = await session.peers[0].open(store, { @@ -780,7 +812,7 @@ describe(`isLeader`, function () { await delay(MATURE_TIME); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -796,7 +828,7 @@ describe(`isLeader`, function () { }, ); - db3 = await EventStore.open>( + db3 = await EventStore.open>( db1.address!, session.peers[2], { @@ -856,7 +888,7 @@ describe(`isLeader`, function () { describe("balance", () => { it("small fractions means little replication", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0, @@ -865,7 +897,7 @@ describe(`isLeader`, function () { }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -932,7 +964,7 @@ describe(`isLeader`, function () { }); it("leader always defined", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { ...options.args, @@ -948,7 +980,7 @@ describe(`isLeader`, function () { factor: 0.3333, }, }, - })) as EventStore; + })) as EventStore; db3 = (await EventStore.open(db1.address!, session.peers[2], { args: { ...options.args, @@ -956,7 +988,7 @@ describe(`isLeader`, function () { factor: 0.3333, }, }, - })) as EventStore; + })) as EventStore; await waitForResolved(async () => expect((await db1.log.getReplicators()).size).to.equal(3), @@ -971,14 +1003,18 @@ describe(`isLeader`, function () { ); for (let i = 0; i < 100; i++) { - const leaders: Set = new Set([ - ...( - await db1.log.findLeaders( - { entry: toEntry(String(i)), replicas: 3 }, - { roleAge: 0 }, - ) - ).keys(), - ]); + const leaders: Set = new Set(); + const entry = toEntry(String(i)); + await db1.log.findLeaders( + await db1.log.createCoordinates(entry, 3), + entry, + { + roleAge: 0, + onLeader: (key) => { + leaders.add(key); + }, + }, + ); expect(leaders.has(undefined)).to.be.false; expect(leaders.size).equal(3); } @@ -986,12 +1022,12 @@ describe(`isLeader`, function () { describe("get replicators sorted", () => { it("can handle peers leaving and joining", async () => { - db1 = await session.peers[0].open(new EventStore(), options); + db1 = await session.peers[0].open(new EventStore(), options); db2 = (await EventStore.open( db1.address!, session.peers[1], options, - )) as EventStore; + )) as EventStore; await waitForResolved(async () => expect((await db1.log.getReplicators()).size).to.equal(2), @@ -1005,7 +1041,7 @@ describe(`isLeader`, function () { db1.address!, session.peers[2], options, - )) as EventStore; + )) as EventStore; await waitForResolved(async () => expect((await db3.log.getReplicators()).size).to.equal(3), @@ -1019,15 +1055,15 @@ describe(`isLeader`, function () { await waitForResolved(async () => expect([...(await db1.log.getReplicators())]).to.have.members([ - getPublicKeyFromPeerId(session.peers[0].peerId).hashcode(), - getPublicKeyFromPeerId(session.peers[2].peerId).hashcode(), + session.peers[0].identity.publicKey.hashcode(), + session.peers[2].identity.publicKey.hashcode(), ]), ); await waitForResolved(async () => expect([...(await db3.log.getReplicators())]).to.have.members([ - getPublicKeyFromPeerId(session.peers[0].peerId).hashcode(), - getPublicKeyFromPeerId(session.peers[2].peerId).hashcode(), + session.peers[0].identity.publicKey.hashcode(), + session.peers[2].identity.publicKey.hashcode(), ]), ); @@ -1037,7 +1073,7 @@ describe(`isLeader`, function () { db1.address!, session.peers[1], options, - )) as EventStore; + )) as EventStore; await waitForResolved(async () => expect((await db1.log.getReplicators()).size).to.equal(3), @@ -1052,19 +1088,19 @@ describe(`isLeader`, function () { ); expect([...(await db1.log.getReplicators())]).to.have.members([ - getPublicKeyFromPeerId(session.peers[0].peerId).hashcode(), - getPublicKeyFromPeerId(session.peers[1].peerId).hashcode(), - getPublicKeyFromPeerId(session.peers[2].peerId).hashcode(), + session.peers[0].identity.publicKey.hashcode(), + session.peers[1].identity.publicKey.hashcode(), + session.peers[2].identity.publicKey.hashcode(), ]); expect([...(await db2.log.getReplicators())]).to.have.members([ - getPublicKeyFromPeerId(session.peers[0].peerId).hashcode(), - getPublicKeyFromPeerId(session.peers[1].peerId).hashcode(), - getPublicKeyFromPeerId(session.peers[2].peerId).hashcode(), + session.peers[0].identity.publicKey.hashcode(), + session.peers[1].identity.publicKey.hashcode(), + session.peers[2].identity.publicKey.hashcode(), ]); expect([...(await db3.log.getReplicators())]).to.have.members([ - getPublicKeyFromPeerId(session.peers[0].peerId).hashcode(), - getPublicKeyFromPeerId(session.peers[1].peerId).hashcode(), - getPublicKeyFromPeerId(session.peers[2].peerId).hashcode(), + session.peers[0].identity.publicKey.hashcode(), + session.peers[1].identity.publicKey.hashcode(), + session.peers[2].identity.publicKey.hashcode(), ]); }); }); diff --git a/packages/programs/data/shared-log/test/lifecycle.spec.ts b/packages/programs/data/shared-log/test/lifecycle.spec.ts new file mode 100644 index 000000000..859a5b28f --- /dev/null +++ b/packages/programs/data/shared-log/test/lifecycle.spec.ts @@ -0,0 +1,244 @@ +// Include test utilities +import { TestSession } from "@peerbit/test-utils"; +import { delay, waitForResolved } from "@peerbit/time"; +import { expect } from "chai"; +import sinon from "sinon"; +import { ExchangeHeadsMessage } from "../src/exchange-heads.js"; +import { + RequestMaybeSync, + RequestMaybeSyncCoordinate, + type SimpleSyncronizer, +} from "../src/sync/simple.js"; +import { slowDownMessage } from "./utils.js"; +import { EventStore } from "./utils/stores/index.js"; + +describe("lifecycle", () => { + let session: TestSession; + + afterEach(async () => { + await session.stop(); + }); + + describe("close", () => { + it("will close all indices", async () => { + session = await TestSession.connected(1); + const store = new EventStore(); + const db = await session.peers[0].open(store); + const stopEntryCoordinatesIndex = sinon.spy( + db.log.entryCoordinatesIndex, + "stop", + ); + const stopReplicationIndex = sinon.spy(db.log.replicationIndex, "stop"); + const closeLog = sinon.spy(db.log, "close"); + await db.close(); + expect(stopEntryCoordinatesIndex.called).to.be.true; + expect(stopReplicationIndex.called).to.be.true; + expect(closeLog.called).to.be.true; + }); + + it("closing does not affect other instances", async () => { + session = await TestSession.connected(1); + const db = await session.peers[0].open(new EventStore()); + const db2 = await session.peers[0].open(new EventStore()); + await db2.add("hello"); + + await db.close(); + expect((await db2.iterator({ limit: -1 })).collect()).to.have.length(1); + }); + }); + describe("drop", () => { + it("will drop all data", async () => { + session = await TestSession.connected(1); + const store = new EventStore(); + const db = await session.peers[0].open(store); + await db.log.replicate({ factor: 0.3, offset: 0.3 }); + await db.log.replicate({ factor: 0.6, offset: 0.6 }); + await db.add("hello"); + await db.drop(); + + const reopen = await session.peers[0].open(store); + expect((await reopen.iterator({ limit: -1 })).collect()).to.have.length( + 0, + ); + expect(await reopen.log.entryCoordinatesIndex.count()).to.equal(0); + expect(await reopen.log.replicationIndex.count()).to.equal(1); + }); + + it("open cloned after dropped", async () => { + session = await TestSession.connected(1); + const store = new EventStore(); + const db = await session.peers[0].open(store); + await db.add("hello"); + await db.drop(); + + const reopen = await session.peers[0].open(store.clone()); + expect((await reopen.iterator({ limit: -1 })).collect()).to.have.length( + 0, + ); + }); + }); + + describe("replicators", () => { + it("uses existing subsription", async () => { + session = await TestSession.connected(2); + + const store = new EventStore(); + const db1 = await session.peers[0].open(store); + await session.peers[1].services.pubsub.requestSubscribers(db1.log.topic); + await waitForResolved(async () => + expect( + (await session.peers[1].services.pubsub.getSubscribers( + db1.log.topic, + ))!.find((x) => x.equals(session.peers[0].identity.publicKey)), + ), + ); + + // Adding a delay is necessary so that old subscription messages are not flowing around + // so that we are sure the we are "really" using existing subscriptions on start to build replicator set + await delay(1000); + + const db2 = await session.peers[1].open(store.clone()); + await waitForResolved(async () => + expect([...(await db1.log.getReplicators())]).to.have.members( + session.peers.map((x) => x.identity.publicKey.hashcode()), + ), + ); + await waitForResolved(async () => + expect([...(await db2.log.getReplicators())]).to.have.members( + session.peers.map((x) => x.identity.publicKey.hashcode()), + ), + ); + }); + + it("clears in flight info when leaving", async () => { + const store = new EventStore(); + + session = await TestSession.connected(3); + + const db1 = await session.peers[0].open(store.clone(), { + args: { + replicate: { + factor: 1, + }, + replicas: { + min: 3, + }, + }, + }); + const db2 = await session.peers[1].open(store.clone(), { + args: { + replicate: { + factor: 1, + }, + replicas: { + min: 3, + }, + }, + }); + + const abortController = new AbortController(); + const { entry } = await db1.add("hello!"); + await waitForResolved(() => expect(db2.log.log.length).equal(1)); + + slowDownMessage( + db1.log, + ExchangeHeadsMessage, + 1e4, + abortController.signal, + ); + slowDownMessage( + db2.log, + ExchangeHeadsMessage, + 1e4, + abortController.signal, + ); + slowDownMessage(db2.log, RequestMaybeSync, 2e3, abortController.signal); // make db2 a bit slower so the assertions below become deterministic (easily) + slowDownMessage( + db2.log, + RequestMaybeSyncCoordinate, + 2e3, + abortController.signal, + ); // make db2 a bit slower so the assertions below become deterministic (easily) + + const db3 = await session.peers[2].open(store, { + args: { + replicate: { + factor: 1, + }, + replicas: { + min: 3, + }, + }, + }); + + await waitForResolved(async () => { + expect((await db3.log.getReplicators()).size).equal(3); + }); + + await waitForResolved( + () => + expect( + db3.log.syncronizer.syncInFlight.has( + db1.node.identity.publicKey.hashcode(), + ), + ).to.be.true, + ); + await waitForResolved( + () => + expect( + !!(db3.log.syncronizer as SimpleSyncronizer)[ + "syncInFlightQueue" + ] + .get(entry.hash) + ?.find((x) => x.equals(db2.node.identity.publicKey)), + ).to.be.true, + ); + await waitForResolved( + () => + expect( + (db3.log.syncronizer as SimpleSyncronizer)[ + "syncInFlightQueueInverted" + ].has(db2.node.identity.publicKey.hashcode()), + ).to.be.true, + ); // because db2 is slower + await waitForResolved( + () => + expect( + (db3.log.syncronizer as SimpleSyncronizer)[ + "syncInFlightQueueInverted" + ].has(db1.node.identity.publicKey.hashcode()), + ).to.be.false, + ); + + await db1.close(); + await db2.close(); + + await waitForResolved( + () => + expect( + db3.log.syncronizer.syncInFlight.has( + db1.node.identity.publicKey.hashcode(), + ), + ).to.be.false, + ); + await waitForResolved( + () => + expect( + (db3.log.syncronizer as SimpleSyncronizer)[ + "syncInFlightQueue" + ].has(entry.hash), + ).to.be.false, + ); + await waitForResolved( + () => + expect( + (db3.log.syncronizer as SimpleSyncronizer)[ + "syncInFlightQueueInverted" + ].has(db2.node.identity.publicKey.hashcode()), + ).to.be.false, + ); + + abortController.abort("Done"); + }); + }); +}); diff --git a/packages/programs/data/shared-log/test/load.spec.ts b/packages/programs/data/shared-log/test/load.spec.ts index 1db3dbb60..407e3649a 100644 --- a/packages/programs/data/shared-log/test/load.spec.ts +++ b/packages/programs/data/shared-log/test/load.spec.ts @@ -5,11 +5,11 @@ import { waitForResolved } from "@peerbit/time"; import { expect } from "chai"; import mapSeries from "p-each-series"; import { v4 as uuid } from "uuid"; -import { waitForConverged } from "./utils.js"; +import { dbgLogs, waitForConverged } from "./utils.js"; import { EventStore } from "./utils/stores/event-store.js"; describe("load", function () { - let db1: EventStore, db2: EventStore; + let db1: EventStore, db2: EventStore; let session: TestSession; @@ -28,8 +28,8 @@ describe("load", function () { it("load after replicate", async () => { session = await TestSession.connected(2); - db1 = await session.peers[0].open(new EventStore()); - db2 = await EventStore.open>( + db1 = await session.peers[0].open(new EventStore()); + db2 = await EventStore.open>( db1.address!, session.peers[1], ); @@ -83,9 +83,9 @@ describe("load", function () { }, ]); - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { - replicate: { factor: 0.5 }, + replicate: { offset: 0, factor: 0.5 }, replicas: { min: 1, } /* @@ -99,12 +99,12 @@ describe("load", function () { await db1.add("hello" + i, { meta: { next: [] } }); } - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { args: { - replicate: { factor: 0.5 }, + replicate: { offset: 0.3, factor: 0.5 }, replicas: { min: 1, } /* @@ -113,14 +113,19 @@ describe("load", function () { }, ); - await waitForResolved(() => expect(db1.log.log.length).lessThan(count)); // pruning started + try { + await waitForResolved(() => expect(db1.log.log.length).lessThan(count)); // pruning started + } catch (error) { + await dbgLogs([db1.log, db2.log]); + throw error; + } await waitForConverged(() => db1.log.log.length); // pruning done const lengthBeforeClose = db1.log.log.length; await waitForConverged(() => db2.log.log.length); await session.peers[1].stop(); await db1.close(); - db1 = await EventStore.open>( + db1 = await EventStore.open>( db1.address!, session.peers[0], { @@ -142,7 +147,7 @@ describe("load", function () { { directory: "./tmp/shared-log/load-events/" + uuid() }, ]); - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 1 }, replicas: { diff --git a/packages/programs/data/shared-log/test/migration-8-9.spec.ts b/packages/programs/data/shared-log/test/migration-8-9.spec.ts index 9e63ed9b6..5a0ba5d6c 100644 --- a/packages/programs/data/shared-log/test/migration-8-9.spec.ts +++ b/packages/programs/data/shared-log/test/migration-8-9.spec.ts @@ -14,7 +14,7 @@ import { EventStore } from "./utils/stores/event-store.js"; describe(`migration-8-9`, function () { let session: TestSession; - let db1: EventStore, db2: EventStore; + let db1: EventStore, db2: EventStore; const setup = async (compatibility?: number, order: boolean = false) => { session = await TestSession.connected(2, [ @@ -46,7 +46,7 @@ describe(`migration-8-9`, function () { }, ]); - const db = new EventStore(); + const db = new EventStore(); const createV8 = () => { const db1 = db.clone(); diff --git a/packages/programs/data/shared-log/test/network.spec.ts b/packages/programs/data/shared-log/test/network.spec.ts index 211d5ec26..076858cd0 100644 --- a/packages/programs/data/shared-log/test/network.spec.ts +++ b/packages/programs/data/shared-log/test/network.spec.ts @@ -10,7 +10,7 @@ import { EventStore } from "./utils/stores/event-store.js"; describe(`network`, () => { let session: TestSession; - let db1: EventStore, db2: EventStore; + let db1: EventStore, db2: EventStore; after(async () => {}); @@ -32,7 +32,7 @@ describe(`network`, () => { await session.peers[0].services.blocks.waitFor(session.peers[2].peerId); await session.peers[1].services.blocks.waitFor(session.peers[2].peerId); - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 1, @@ -40,7 +40,7 @@ describe(`network`, () => { }, }); - db2 = await await EventStore.open>( + db2 = await await EventStore.open>( db1.address!, session.peers[1], { diff --git a/packages/programs/data/shared-log/test/observer.spec.ts b/packages/programs/data/shared-log/test/observer.spec.ts index 8e67beca5..f24f50520 100644 --- a/packages/programs/data/shared-log/test/observer.spec.ts +++ b/packages/programs/data/shared-log/test/observer.spec.ts @@ -20,8 +20,8 @@ describe("observer", () => { [session.peers[1], session.peers[2]], ]); - let stores: EventStore[] = []; - const s = new EventStore(); + let stores: EventStore[] = []; + const s = new EventStore(); const createStore = () => deserialize(serialize(s), EventStore); let replicatorEndIndex = 1; @@ -70,7 +70,7 @@ describe("observer", () => { session = await TestSession.connected(2); await session.connect([[session.peers[0], session.peers[1]]]); - const s = new EventStore(); + const s = new EventStore(); const createStore = () => deserialize(serialize(s), EventStore); const replicator = await session.peers[0].open(createStore(), { @@ -108,7 +108,7 @@ describe("observer", () => { [session.peers[1], session.peers[2]], ]); - const s = new EventStore(); + const s = new EventStore(); const createStore = () => deserialize(serialize(s), EventStore); const replicator = await session.peers[0].open(createStore(), { args: { diff --git a/packages/programs/data/shared-log/test/open-close.spec.ts b/packages/programs/data/shared-log/test/open-close.spec.ts deleted file mode 100644 index 3ca72d369..000000000 --- a/packages/programs/data/shared-log/test/open-close.spec.ts +++ /dev/null @@ -1,153 +0,0 @@ -// Include test utilities -import { TestSession } from "@peerbit/test-utils"; -import { delay, waitForResolved } from "@peerbit/time"; -import { expect } from "chai"; -import { - ExchangeHeadsMessage, - RequestMaybeSync, -} from "../src/exchange-heads.js"; -import { slowDownSend } from "./utils.js"; -import { EventStore } from "./utils/stores/index.js"; - -describe("replicators", () => { - let session: TestSession; - - afterEach(async () => { - await session.stop(); - }); - - it("uses existing subsription", async () => { - session = await TestSession.connected(2); - - const store = new EventStore(); - const db1 = await session.peers[0].open(store); - await session.peers[1].services.pubsub.requestSubscribers(db1.log.topic); - await waitForResolved(async () => - expect( - (await session.peers[1].services.pubsub.getSubscribers( - db1.log.topic, - ))!.find((x) => x.equals(session.peers[0].identity.publicKey)), - ), - ); - - // Adding a delay is necessary so that old subscription messages are not flowing around - // so that we are sure the we are "really" using existing subscriptions on start to build replicator set - await delay(1000); - - const db2 = await session.peers[1].open(store.clone()); - await waitForResolved(async () => - expect([...(await db1.log.getReplicators())]).to.have.members( - session.peers.map((x) => x.identity.publicKey.hashcode()), - ), - ); - await waitForResolved(async () => - expect([...(await db2.log.getReplicators())]).to.have.members( - session.peers.map((x) => x.identity.publicKey.hashcode()), - ), - ); - }); - - it("clears in flight info when leaving", async () => { - const store = new EventStore(); - - session = await TestSession.connected(3); - - const db1 = await session.peers[0].open(store.clone(), { - args: { - replicate: { - factor: 1, - }, - replicas: { - min: 3, - }, - }, - }); - const db2 = await session.peers[1].open(store.clone(), { - args: { - replicate: { - factor: 1, - }, - replicas: { - min: 3, - }, - }, - }); - - const abortController = new AbortController(); - const { entry } = await db1.add("hello!"); - await waitForResolved(() => expect(db2.log.log.length).equal(1)); - - slowDownSend(db1.log, ExchangeHeadsMessage, 1e4, abortController.signal); - slowDownSend(db2.log, ExchangeHeadsMessage, 1e4, abortController.signal); - slowDownSend(db2.log, RequestMaybeSync, 2e3, abortController.signal); // make db2 a bit slower so the assertions below become deterministic (easily) - - const db3 = await session.peers[2].open(store, { - args: { - replicate: { - factor: 1, - }, - replicas: { - min: 3, - }, - }, - }); - - await waitForResolved(async () => { - expect((await db3.log.getReplicators()).size).equal(3); - }); - - await waitForResolved( - () => - expect( - db3.log["syncInFlight"].has(db1.node.identity.publicKey.hashcode()), - ).to.be.true, - ); - await waitForResolved( - () => - expect( - !!db3.log["syncInFlightQueue"] - .get(entry.hash) - ?.find((x) => x.equals(db2.node.identity.publicKey)), - ).to.be.true, - ); - await waitForResolved( - () => - expect( - db3.log["syncInFlightQueueInverted"].has( - db2.node.identity.publicKey.hashcode(), - ), - ).to.be.true, - ); // because db2 is slower - await waitForResolved( - () => - expect( - db3.log["syncInFlightQueueInverted"].has( - db1.node.identity.publicKey.hashcode(), - ), - ).to.be.false, - ); - - await db1.close(); - await db2.close(); - - await waitForResolved( - () => - expect( - db3.log["syncInFlight"].has(db1.node.identity.publicKey.hashcode()), - ).to.be.false, - ); - await waitForResolved( - () => expect(db3.log["syncInFlightQueue"].has(entry.hash)).to.be.false, - ); - await waitForResolved( - () => - expect( - db3.log["syncInFlightQueueInverted"].has( - db2.node.identity.publicKey.hashcode(), - ), - ).to.be.false, - ); - - abortController.abort("Done"); - }); -}); diff --git a/packages/programs/data/shared-log/test/ranges.spec.ts b/packages/programs/data/shared-log/test/ranges.spec.ts index a17d5c70a..1dc6dd802 100644 --- a/packages/programs/data/shared-log/test/ranges.spec.ts +++ b/packages/programs/data/shared-log/test/ranges.spec.ts @@ -1,6 +1,7 @@ import { Ed25519Keypair, type Ed25519PublicKey, + type PublicSignKey, randomBytes, } from "@peerbit/crypto"; import type { Index } from "@peerbit/indexer-interface"; @@ -8,921 +9,2157 @@ import { create as createIndices } from "@peerbit/indexer-sqlite3"; import { LamportClock, Meta } from "@peerbit/log"; import { expect } from "chai"; import { - EntryReplicated, + type NumberFromType, + createNumbers, + denormalizer, +} from "../src/integers.js"; +import { + type EntryReplicated, + EntryReplicatedU32, + EntryReplicatedU64, ReplicationIntent, - ReplicationRangeIndexable, - getCoverSet, + type ReplicationRangeIndexable, + ReplicationRangeIndexableU32, + ReplicationRangeIndexableU64, + appromixateCoverage, + getCoverSet as getCoverSetGeneric, getDistance, - getEvenlySpacedU32, getSamples as getSamplesMap, - hasCoveringRange, + iHaveCoveringRange, + mergeRanges, toRebalance, } from "../src/ranges.js"; -import { HALF_MAX_U32, MAX_U32, scaleToU32 } from "../src/role.js"; - -const getSamples = async ( - offset: number, - peers: Index, - count: number, - roleAge: number, -) => { - const map = await getSamplesMap( - getEvenlySpacedU32(offset, count), - peers, - roleAge, - ); - return [...map.keys()]; -}; // prettier-ignore -describe("ranges", () => { - let peers: Index - let a: Ed25519PublicKey, b: Ed25519PublicKey, c: Ed25519PublicKey; - - let create = async (...rects: ReplicationRangeIndexable[]) => { - const indices = (await createIndices()) - await indices.start() - const index = await indices.init({ schema: ReplicationRangeIndexable }) - for (const rect of rects) { - await index.put(rect) - } - peers = index - } - before(async () => { - a = (await Ed25519Keypair.create()).publicKey; - b = (await Ed25519Keypair.create()).publicKey; - c = (await Ed25519Keypair.create()).publicKey; - - // sort keys by hash to make test assertions easier - if (a.hashcode() > b.hashcode()) { - const tmp = a; - a = b; - b = tmp; - } - if (b.hashcode() > c.hashcode()) { - const tmp = b; - b = c; - c = tmp; - } - if (a.hashcode() > b.hashcode()) { - const tmp = a; - a = b; - b = tmp; - } - - }) - beforeEach(() => { - peers = undefined!; - }) - - describe('getCover', () => { - - const rotations = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1] - rotations.forEach((rotation) => { - describe('rotation: ' + String(rotation), () => { - - describe('underflow', () => { - it('includes all', async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.1, offset: (0 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.1, offset: (0.333 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.1, offset: (0.666 + rotation) % 1, timestamp: 0n }) - ); - - // we try to cover 0.5 starting from a - // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32 })]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]) - - }) - }) - - describe("overflow", () => { - it("local first", async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 1, offset: (0 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 1, offset: (0.333 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 1, offset: (0.666 + rotation) % 1, timestamp: 0n })) - - // we try to cover 0.5 starting from a - // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32 })]).to.have.members([a.hashcode()]) - }) - }) - - describe("unmature", () => { - - it('all unmature', async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.34, offset: (0 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.34, offset: (0.333 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.34, offset: (0.666 + rotation) % 1, timestamp: BigInt(+new Date) }) - ); - - // we try to cover 0.5 starting from a - // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32 })]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]) - - }) - - - it('full width all unmature', async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 1, offset: (0 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 1, offset: (0.333 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 1, offset: (0.666 + rotation) % 1, timestamp: BigInt(+new Date) }) - ); - - // special case, assume we only look into selef - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32 })]).to.have.members([a.hashcode()]) - - }) - - it('two unmature', async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.34, offset: (0 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.34, offset: (0.333 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.34, offset: (0.666 + rotation) % 1, timestamp: BigInt(+new Date) }) - ); - - - // should not be included. TODO is this always expected behaviour? - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32 })]).to.have.members([a.hashcode()]) - - }) - - - }) +type R = 'u32' | 'u64' +const resolutions: [R, R] = ["u32", "u64"]; + +resolutions.forEach((resolution) => { + describe("ranges: " + resolution, () => { + const rangeClass = + resolution === "u32" + ? ReplicationRangeIndexableU32 + : ReplicationRangeIndexableU64; + const coerceNumber = (number: number | bigint): NumberFromType => + resolution === "u32" ? number : BigInt(number); + const numbers = createNumbers(resolution); + const denormalizeFn = denormalizer(resolution); + const getCoverSet = async (properties: { + peers: Index>; + start: NumberFromType | PublicSignKey | undefined; + widthToCoverScaled: NumberFromType; + roleAge: number; + eager?: + | { + unmaturedFetchCoverSize?: number; + } + | boolean; + }): Promise> => { + return getCoverSetGeneric({ ...properties, numbers }); + }; + const getSamples = async ( + offset: NumberFromType, + peers: Index>, + count: number, + roleAge: number, + ) => { + const map = await getSamplesMap( + numbers.getGrid(offset, count), + peers, + roleAge, + numbers, + ); + return [...map.keys()]; + }; - describe('eager', () => { - it('all unmature', async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.34, offset: (0 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.34, offset: (0.333 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.34, offset: (0.666 + rotation) % 1, timestamp: BigInt(+new Date) }) - ); + const createReplicationRangeFromNormalized = (properties: { + id?: Uint8Array; + publicKey: PublicSignKey; + length: number; + offset: number; + timestamp?: bigint; + mode?: ReplicationIntent; + }) => { + return new rangeClass({ + id: properties.id, + publicKey: properties.publicKey, + mode: properties.mode, + // @ts-ignore + length: denormalizeFn(properties.length), + // @ts-ignore + offset: denormalizeFn(properties.offset), + timestamp: properties.timestamp, + }); + }; - // we try to cover 0.5 starting from a - // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32, eager: true })]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]) - }) - it('full width all mature', async () => { + const createReplicationRange = (properties: { + id?: Uint8Array; + publicKey: PublicSignKey; + length: number | bigint; + offset: number | bigint; + timestamp?: bigint; + mode?: ReplicationIntent; + }) => { + // @ts-ignore + return new rangeClass({ + id: properties.id, + publicKey: properties.publicKey, + mode: properties.mode, + // @ts-ignore + length: coerceNumber(properties.length), + // @ts-ignore + offset: coerceNumber(properties.offset), + timestamp: properties.timestamp, + }); + }; - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 1, offset: (0 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 1, offset: (0.333 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 1, offset: (0.666 + rotation) % 1, timestamp: 0n }) - ); + describe("ReplicationRangeIndexable", () => { + let peers: Index>; + let a: Ed25519PublicKey, b: Ed25519PublicKey, c: Ed25519PublicKey; + + let create = async (...rects: ReplicationRangeIndexable[]) => { + const indices = await createIndices(); + await indices.start(); + const index = await indices.init({ schema: rangeClass as any }); + for (const rect of rects) { + await index.put(rect); + } + peers = index as Index>; + }; + + before(async () => { + a = (await Ed25519Keypair.create()).publicKey; + b = (await Ed25519Keypair.create()).publicKey; + c = (await Ed25519Keypair.create()).publicKey; + + // sort keys by hash to make test assertions easier + if (a.hashcode() > b.hashcode()) { + const tmp = a; + a = b; + b = tmp; + } + if (b.hashcode() > c.hashcode()) { + const tmp = b; + b = c; + c = tmp; + } + if (a.hashcode() > b.hashcode()) { + const tmp = a; + a = b; + b = tmp; + } + }); + beforeEach(() => { + peers = undefined!; + }); - // we try to cover 0.5 starting from a - // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32, eager: true })]).to.have.members([a.hashcode()]) - }) + describe("getCover", () => { + const rotations = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]; + rotations.forEach((rotation) => { + describe("rotation: " + String(rotation), () => { + describe("underflow", () => { + it("includes all", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.1, + offset: (0 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.1, + offset: (0.333 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.1, + offset: (0.666 + rotation) % 1, + timestamp: 0n, + }), + ); + + // we try to cover 0.5 starting from a + // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.maxValue, + })), + ]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]); + }); + }); + describe("overflow", () => { + it("local first", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 1, + offset: (0 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 1, + offset: (0.333 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 1, + offset: (0.666 + rotation) % 1, + timestamp: 0n, + }), + ); + + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.maxValue, + })), + ]).to.have.members([a.hashcode()]); + }); + }); - it('full width all unmature', async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 1, offset: (0 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 1, offset: (0.333 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 1, offset: (0.666 + rotation) % 1, timestamp: BigInt(+new Date) }) - ); + describe("unmature", () => { + it("all unmature", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.34, + offset: (0 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.34, + offset: (0.333 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.34, + offset: (0.666 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + ); + + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.maxValue, + })), + ]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]); + }); + + it("full width all unmature", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 1, + offset: (0 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 1, + offset: (0.333 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 1, + offset: (0.666 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + ); + + // special case, assume we only look into selef + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.maxValue, + })), + ]).to.have.members([a.hashcode()]); + }); + + it("two unmature", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.34, + offset: (0 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.34, + offset: (0.333 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.34, + offset: (0.666 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + ); + + // should not be included. TODO is this always expected behaviour? + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.maxValue, + })), + ]).to.have.members([a.hashcode()]); + }); + }); - // special case, assume we only look into selef - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32, eager: true })]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]) + describe("eager", () => { + it("all unmature", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.34, + offset: (0 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.34, + offset: (0.333 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.34, + offset: (0.666 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + ); + + // we try to cover 0.5 starting from a + // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.maxValue, + eager: true, + })), + ]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]); + }); + it("full width all mature", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 1, + offset: (0 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 1, + offset: (0.333 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 1, + offset: (0.666 + rotation) % 1, + timestamp: 0n, + }), + ); + + // we try to cover 0.5 starting from a + // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.maxValue, + eager: true, + })), + ]).to.have.members([a.hashcode()]); + }); + + it("full width all unmature", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 1, + offset: (0 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 1, + offset: (0.333 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 1, + offset: (0.666 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + ); + + // special case, assume we only look into selef + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.maxValue, + eager: true, + })), + ]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]); + }); + + it("two unmature", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.34, + offset: (0 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.34, + offset: (0.333 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.34, + offset: (0.666 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + ); + + // should not be included. TODO is this always expected behaviour? + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.maxValue, + eager: true, + })), + ]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]); + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: b, + widthToCoverScaled: numbers.maxValue, + eager: true, + })), + ]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]); + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: c, + widthToCoverScaled: numbers.maxValue, + eager: true, + })), + ]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]); + }); + }); - }) - - it('two unmature', async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.34, offset: (0 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.34, offset: (0.333 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.34, offset: (0.666 + rotation) % 1, timestamp: BigInt(+new Date) }) - ); - - - // should not be included. TODO is this always expected behaviour? - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32, eager: true })]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]) - expect([...await getCoverSet({ peers, roleAge: 1e5, start: b, widthToCoverScaled: MAX_U32, eager: true })]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]) - expect([...await getCoverSet({ peers, roleAge: 1e5, start: c, widthToCoverScaled: MAX_U32, eager: true })]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]) - - }) - }) - - - describe("skip", () => { - it('next', async () => { - - - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.34, offset: (0 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.41, offset: (0.1 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.5, offset: (0.3 + rotation) % 1, timestamp: BigInt(+new Date) }) - ); - - // we try to cover 0.5 starting from a - // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32 / 2 })]).to.have.members([a.hashcode(), b.hashcode()]) - }) - it('between', async () => { - - - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.34, offset: (0 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.5, offset: (0.2 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.34, offset: (0.3 + rotation) % 1, timestamp: 0n }) - ); - - - // we try to cover 0.5 starting from a - // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32 / 2 })]).to.have.members([a.hashcode(), b.hashcode()]) - - }) - }) - - describe("boundary", () => { - - it('exact', async () => { - - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.5, offset: (0.2 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.5, offset: (0.5 + rotation) % 1, timestamp: 0n }) - ); - - // because of rounding errors, a cover width of 0.5 might yield unecessary results - expect([...await getCoverSet({ peers, roleAge: 0, start: a, widthToCoverScaled: 0.499 * MAX_U32 })]).to.have.members([a.hashcode()]) - }) - - it('after', async () => { - - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.1, offset: (0.21 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.5, offset: (0.5 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.1, offset: (0.81 + rotation) % 1, timestamp: 0n }) - ); - - expect([...await getCoverSet({ peers, roleAge: 0, start: b, widthToCoverScaled: scaleToU32(0.6) })]).to.have.members([b.hashcode()]) - }) - - it('skip matured', async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.1, offset: (0.2 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.5, offset: (0.5 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.1, offset: (0.81 + rotation) % 1, timestamp: 0n }) - ); - // starting from b, we need both a and c since b is not mature to cover the width - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: scaleToU32(0.5) })]).to.have.members([a.hashcode(), c.hashcode()]) - }) - - it('include start node identity', async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.1, offset: (0.2 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.5, offset: (0.5 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.1, offset: (0.81 + rotation) % 1, timestamp: 0n }) - ); - // starting from b, we need both a and c since b is not mature to cover the width - expect([...await getCoverSet({ peers, roleAge: 1e5, start: b, widthToCoverScaled: scaleToU32(0.5) })]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]) - }) - - describe('strict', () => { - it('no boundary', async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.1, offset: (0.2 + rotation) % 1, timestamp: 0n, mode: ReplicationIntent.Strict }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.5, offset: (0.5 + rotation) % 1, timestamp: 0n, mode: ReplicationIntent.Strict }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.1, offset: (0.81 + rotation) % 1, timestamp: 0n, mode: ReplicationIntent.Strict }) - ); - // starting from b, we need both a and c since b is not mature to cover the width - expect([...await getCoverSet({ peers, roleAge: 1e5, start: b, widthToCoverScaled: scaleToU32(0.51) })]).to.have.members([b.hashcode()]) - }) - - it('empty set boundary', async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.1, offset: (0.2 + rotation) % 1, timestamp: 0n, mode: ReplicationIntent.Strict }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.1, offset: (0.81 + rotation) % 1, timestamp: 0n, mode: ReplicationIntent.Strict }) - ); - // starting from b, we need both a and c since b is not mature to cover the width - expect([...await getCoverSet({ peers, roleAge: 1e5, start: scaleToU32((0.5 + rotation) % 1), widthToCoverScaled: scaleToU32(0.3) })]).to.have.members([]) - }) - - it('overlapping', async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.1, offset: (0.2 + rotation) % 1, timestamp: 0n, mode: ReplicationIntent.Strict }), - ); - // starting from b, we need both a and c since b is not mature to cover the width - expect([...await getCoverSet({ peers, roleAge: 1e5, start: scaleToU32((0 + rotation) % 1), widthToCoverScaled: scaleToU32(0.6) })]).to.have.members([a.hashcode()]) - }) - }) - }) - - - }) - - }) - }) - - describe("getSamples", () => { - const rotations = [0, 0.333, 0.5, 0.8] - rotations.forEach((rotation) => { - describe('samples correctly: ' + rotation, () => { - it("1 and less than 1", async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.2625, offset: (0.367 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 1, offset: (0.847 + rotation) % 1, timestamp: 0n })) - expect(await getSamples(scaleToU32(0.78), peers, 2, 0)).to.have.length(2) - }) - - it("1 sample but overlapping yield two matches", async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 1, offset: (0.367 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 1, offset: (0.847 + rotation) % 1, timestamp: 0n })) - expect(await getSamples(scaleToU32(0.78), peers, 1, 0)).to.have.length(2) - }) - - it("closest to", async () => { - await create( - new ReplicationRangeIndexable({ normalized: false, publicKey: a, length: 1, offset: scaleToU32((0.367 + rotation) % 1), timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: false, publicKey: b, length: 1, offset: scaleToU32((0.847 + rotation) % 1), timestamp: 0n })) - expect(await getSamples(scaleToU32((0.78 + rotation) % 1), peers, 1, 0)).to.deep.eq([b.hashcode()]) - }) - - it("closest to oldest", async () => { - - // two exactly the same, but one is older - await create( - new ReplicationRangeIndexable({ normalized: false, publicKey: a, length: 1, offset: scaleToU32((0.367 + rotation) % 1), timestamp: 1n }), - new ReplicationRangeIndexable({ normalized: false, publicKey: b, length: 1, offset: scaleToU32((0.367 + rotation) % 1), timestamp: 0n })) - - expect(await getSamples(scaleToU32((0.78 + rotation) % 1), peers, 1, 0)).to.deep.eq([b.hashcode()]) - }) - - it("closest to hash", async () => { - - // two exactly the same, but one is older - await create( - new ReplicationRangeIndexable({ normalized: false, publicKey: a, length: 1, offset: scaleToU32((0.367 + rotation) % 1), timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: false, publicKey: b, length: 1, offset: scaleToU32((0.367 + rotation) % 1), timestamp: 0n })) - - expect(a.hashcode() < b.hashcode()).to.be.true - expect(await getSamples(scaleToU32((0.78 + rotation) % 1), peers, 1, 0)).to.deep.eq([a.hashcode()]) - }) - - it("interescting", async () => { - - // two exactly the same, but one is older - await create( - new ReplicationRangeIndexable({ normalized: false, publicKey: a, length: HALF_MAX_U32, offset: scaleToU32((0 + rotation) % 1), timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: false, publicKey: b, length: 1, offset: scaleToU32((0.5 + rotation) % 1), timestamp: 0n })) - - const samples1 = await getSamplesMap(getEvenlySpacedU32(scaleToU32((0.25 + rotation) % 1), 1), peers, 0) - expect([...samples1.values()].filter(x => x.intersecting).length).to.eq(1) - expect(samples1.size).to.eq(1) - - const samples2 = await getSamplesMap(getEvenlySpacedU32(scaleToU32((0.75 + rotation) % 1), 2), peers, 0) - expect([...samples2.values()].filter(x => x.intersecting).length).to.eq(1) - expect(samples2.size).to.eq(2) - - }) - - - // TODO add breakeven test to make sure it is sorted by hash - - }) - - }) - - - - it("factor 0 ", async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0, offset: (0.367) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 1, offset: (0.567) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 1, offset: (0.847) % 1, timestamp: 0n }) - ); - expect(await getSamples(scaleToU32(0.3701), peers, 2, 0)).to.have.members([b, c].map(x => x.hashcode())) - }) - - - it("factor 0 with 3 peers factor 1", async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 1, offset: 0.145, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0, offset: 0.367, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 1, offset: 0.8473, timestamp: 0n }) - ); - expect(await getSamples(scaleToU32(0.937), peers, 2, 0)).to.have.members([a, c].map(x => x.hashcode())) - }) - - it("factor 0 with 3 peers short", async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.2, offset: 0.145, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0, offset: 0.367, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.2, offset: 0.8473, timestamp: 0n }) - ); - expect(await getSamples(scaleToU32(0.937), peers, 2, 0)).to.have.members([a, c].map(x => x.hashcode())) - }) - - rotations.forEach((rotation) => { - - it("evenly distributed: " + rotation, async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.2, offset: (0.2333 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.2, offset: (0.56666 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.2, offset: (0.9 + rotation) % 1, timestamp: 0n }) - ); - - - let ac = 0, bc = 0, cc = 0; - let count = 1000; - for (let i = 0; i < count; i++) { - const leaders = await getSamplesMap([scaleToU32(i / count)], peers, 0) - if (leaders.has(a.hashcode())) { ac++; } - if (leaders.has(b.hashcode())) { bc++; } - if (leaders.has(c.hashcode())) { cc++; } - } - - // check ac, bc and cc are all close to 1/3 - expect(ac / count).to.be.closeTo(1 / 3, 0.1) - expect(bc / count).to.be.closeTo(1 / 3, 0.1) - expect(cc / count).to.be.closeTo(1 / 3, 0.1) - }) - }) - - describe('maturity', () => { - it("starting at unmatured", async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.333, offset: (0.333) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.333, offset: (0.666) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.3333, offset: (0.999) % 1, timestamp: 0n }), - ); - expect(await getSamples(scaleToU32(0.7), peers, 2, 1e5)).to.have.members([a, b, c].map(x => x.hashcode())) - }) - - it("starting at matured", async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.333, offset: (0.333) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.333, offset: (0.666) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.3333, offset: (0.999) % 1, timestamp: 0n }) - ); - // the offset jump will be 0.5 (a) and 0.5 + 0.5 = 1 which will intersect (c) - expect(await getSamples(scaleToU32(0.5), peers, 2, 1e5)).to.have.members([a, c].map(x => x.hashcode())) - }) + describe("skip", () => { + it("next", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.34, + offset: (0 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.41, + offset: (0.1 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.5, + offset: (0.3 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + ); + + // we try to cover 0.5 starting from a + // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.divRound(numbers.maxValue, 2), + })), + ]).to.have.members([a.hashcode(), b.hashcode()]); + }); + it("between", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.34, + offset: (0 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.5, + offset: (0.2 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.34, + offset: (0.3 + rotation) % 1, + timestamp: 0n, + }), + ); + + // we try to cover 0.5 starting from a + // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.divRound(numbers.maxValue, 2), + })), + ]).to.have.members([a.hashcode(), b.hashcode()]); + }); + }); - it("starting at matured-2", async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.333, offset: (0.333) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.333, offset: (0.666) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.3333, offset: (0.999) % 1, timestamp: 0n }) - ); - // the offset jump will be 0.2 (a) and 0.2 + 0.5 = 0.7 which will intersect (b) (unmatured) - expect(await getSamples(0, peers, 2, 1e5)).to.have.members([a, c].map(x => x.hashcode())) - }) - }) - - - describe('strict', async () => { - - rotations.forEach((rotation) => { - - it("only includes strict segments when intersecting: " + rotation, async () => { - - const offsetNonStrict = (0 + rotation) % 1 - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.2, offset: offsetNonStrict, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.2, offset: (0.3 + rotation) % 1, timestamp: 0n, mode: ReplicationIntent.Strict }), - ); - - const leaders = await getSamples(scaleToU32(offsetNonStrict + 0.001), peers, 2, 0) - expect(leaders).to.have.members([a].map(x => x.hashcode())) - }) - }) - - - }) - }) - - describe("getDistance", () => { - - describe('above', () => { - it("immediate", () => { - expect(getDistance(0.5, 0.4, 'above', 1)).to.be.closeTo(0.1, 0.0001) - }) - - it('wrap', () => { - expect(getDistance(0.1, 0.9, 'above', 1)).to.be.closeTo(0.2, 0.0001) - }) - }) - - describe('below', () => { - - it("immediate", () => { - expect(getDistance(0.5, 0.6, 'below', 1)).to.be.closeTo(0.1, 0.0001) - }) - - it('wrap', () => { - expect(getDistance(0.9, 0.1, 'below', 1)).to.be.closeTo(0.2, 0.0001) - }) - - }) - - describe('closest', () => { - it('immediate', () => { - expect(getDistance(0.5, 0.6, 'closest', 1)).to.be.closeTo(0.1, 0.0001) - }) - - it('wrap', () => { - expect(getDistance(0.9, 0.1, 'closest', 1)).to.be.closeTo(0.2, 0.0001) - }) - - it('wrap 2', () => { - expect(getDistance(0.1, 0.9, 'closest', 1)).to.be.closeTo(0.2, 0.0001) - }) - }) - }) - - describe("hasOneOverlapping", () => { - const rotations = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1] - rotations.forEach((rotation) => { - describe('rotation: ' + String(rotation), () => { - - it('includes all', async () => { - const cmp = new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.5, offset: (0 + rotation) % 1, timestamp: 0n }) - await create(cmp); - - const inside = new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.4, offset: (0.05 + rotation) % 1, timestamp: 0n }); - expect(await hasCoveringRange(peers, inside)).to.be.true - - const outside1 = new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.4, offset: (0.2 + rotation) % 1, timestamp: 0n }); - expect(await hasCoveringRange(peers, outside1)).to.be.false - - const outside2 = new ReplicationRangeIndexable({ - normalized: true, publicKey: a, length: 0.51, offset: (0.1 + rotation) % 1, timestamp: 0n - }); - expect(await hasCoveringRange(peers, outside2)).to.be.false - - }) - }) - }) - - }) - - - /* describe("removeRange", () => { - - - it('remove outside', () => { - const from = new ReplicationRangeIndexable({ normalized: false, publicKey: a, offset: 1, length: 1, timestamp: 0n }) - const toRemove = new ReplicationRangeIndexable({ normalized: false, publicKey: a, offset: 0, length: 1, timestamp: 0n }) - const result = from.removeRange(toRemove) - expect(result).to.equal(from) - - }) - - it('remove all', () => { - const from = new ReplicationRangeIndexable({ normalized: false, publicKey: a, offset: 1, length: 1, timestamp: 0n }) - const toRemove = new ReplicationRangeIndexable({ normalized: false, publicKey: a, offset: 1, length: 1, timestamp: 0n }) - const result = from.removeRange(toRemove) - expect(result).to.have.length(0) - }) - - const rotations = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1] - rotations.forEach((rotation) => { - describe('rotation: ' + String(rotation), () => { - - it('removes end', () => { - const from = new ReplicationRangeIndexable({ normalized: true, publicKey: a, offset: rotation, length: 0.3, timestamp: 0n }) - const toRemove = new ReplicationRangeIndexable({ normalized: true, publicKey: a, offset: rotation + 0.2, length: 0.2, timestamp: 0n }) - const result = from.removeRange(toRemove) - expect(result).to.have.length(2) - const arr = result as ReplicationRangeIndexable[] - expect(arr[0].start1).to.equal(from.start1) - expect(arr[0].end1).to.equal(toRemove.start1) - expect(arr[1].start2).to.equal(toRemove.start2) - expect(arr[1].end2).to.equal(toRemove.end2) - }) - }) - }) - - }) */ -}) -describe("entry replicated", () => { - let index: Index; - - let create = async (...rects: EntryReplicated[]) => { - const indices = await createIndices(); - await indices.start(); - index = await indices.init({ schema: EntryReplicated }); - for (const rect of rects) { - await index.put(rect); - } - }; - let a: Ed25519PublicKey; - - beforeEach(async () => { - a = (await Ed25519Keypair.create()).publicKey; - index = undefined!; - }); + describe("boundary", () => { + it("exact", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.5, + offset: (0.2 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.5, + offset: (0.5 + rotation) % 1, + timestamp: 0n, + }), + ); + + // because of rounding errors, a cover width of 0.5 might yield unecessary results + expect([ + ...(await getCoverSet({ + peers, + roleAge: 0, + start: a, + widthToCoverScaled: numbers.divRound(numbers.maxValue, 2), + })), + ]).to.have.members([a.hashcode()]); + }); + + it("after", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.1, + offset: (0.21 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.5, + offset: (0.5 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.1, + offset: (0.81 + rotation) % 1, + timestamp: 0n, + }), + ); + + expect([ + ...(await getCoverSet({ + peers, + roleAge: 0, + start: b, + widthToCoverScaled: denormalizeFn(0.6), + })), + ]).to.have.members([b.hashcode()]); + }); + + it("skip unmature", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.1, + offset: (0.2 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.5, + offset: (0.5 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.1, + offset: (0.81 + rotation) % 1, + timestamp: 0n, + }), + ); + // starting from b, we need both a and c since b is not mature to cover the width + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: denormalizeFn(0.5), + })), + ]).to.have.members([a.hashcode(), c.hashcode()]); + }); + + it("include start node identity", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.1, + offset: (0.2 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.5, + offset: (0.5 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.1, + offset: (0.81 + rotation) % 1, + timestamp: 0n, + }), + ); + // starting from b, we need both a and c since b is not mature to cover the width + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: b, + widthToCoverScaled: denormalizeFn(0.5), + })), + ]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]); + }); + + describe("strict", () => { + it("no boundary", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.1, + offset: (0.2 + rotation) % 1, + timestamp: 0n, + mode: ReplicationIntent.Strict, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.5, + offset: (0.5 + rotation) % 1, + timestamp: 0n, + mode: ReplicationIntent.Strict, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.1, + offset: (0.81 + rotation) % 1, + timestamp: 0n, + mode: ReplicationIntent.Strict, + }), + ); + // starting from b, we need both a and c since b is not mature to cover the width + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: b, + widthToCoverScaled: denormalizeFn(0.51), + })), + ]).to.have.members([b.hashcode()]); + }); + + it("empty set boundary", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.1, + offset: (0.2 + rotation) % 1, + timestamp: 0n, + mode: ReplicationIntent.Strict, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.1, + offset: (0.81 + rotation) % 1, + timestamp: 0n, + mode: ReplicationIntent.Strict, + }), + ); + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: denormalizeFn((0.5 + rotation) % 1), + widthToCoverScaled: denormalizeFn(0.3), + })), + ]).to.have.members([]); + }); + + it("overlapping", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.1, + offset: (0.2 + rotation) % 1, + timestamp: 0n, + mode: ReplicationIntent.Strict, + }), + ); + + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: denormalizeFn((0 + rotation) % 1), + widthToCoverScaled: denormalizeFn(0.6), + })), + ]).to.have.members([a.hashcode()]); + }); + + it("inside one", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.1, + offset: (0 + rotation) % 1, + timestamp: 0n, + mode: ReplicationIntent.Strict, + }), + ); + + await create( + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.1, + offset: (0.2 + rotation) % 1, + timestamp: 0n, + mode: ReplicationIntent.Strict, + }), + ); + + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: denormalizeFn((0.21 + rotation) % 1), + widthToCoverScaled: denormalizeFn(0.01), + })), + ]).to.have.members([b.hashcode()]); + }); + }); + }); + }); + }); + }); - describe("toRebalance", () => { - const rotations = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]; + describe("getSamples", () => { + const rotations = [0, 0.333, 0.5, 0.8]; + rotations.forEach((rotation) => { + describe("samples correctly: " + rotation, () => { + it("1 and less than 1", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.2625, + offset: (0.367 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 1, + offset: (0.847 + rotation) % 1, + timestamp: 0n, + }), + ); + expect( + // 0.78 is choosen to not interesect with a + // also (0.75 + 0.5) % 1 = 0.25 which also does not intersect with a + // this means a need to be included though the non interesecting sampling method + await getSamples(denormalizeFn(0.78), peers, 2, 0), + ).to.have.length(2); + }); - const consumeAllFromAsyncIterator = async ( - iter: AsyncIterable<{ gid: string; entries: EntryReplicated[] }>, - ) => { - const result = []; - for await (const entry of iter) { - result.push(entry); - } - return result; - }; + it("1 sample but overlapping yield two matches", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 1, + offset: (0.367 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 1, + offset: (0.847 + rotation) % 1, + timestamp: 0n, + }), + ); + expect( + await getSamples(denormalizeFn(0.78), peers, 1, 0), + ).to.have.length(2); + }); + + it("3 adjecent ranges", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.3333, + offset: (0 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.3333, + offset: (0.3333 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.3333, + offset: (0.6666 + rotation) % 1, + timestamp: 0n, + }), + ); + expect( + await getSamples(denormalizeFn(0.1), peers, 2, 0), + ).to.have.length(2); + }); - rotations.forEach((rotation) => { - const rotate = (from: number) => (from + rotation) % 1; - describe("rotation: " + String(rotation), () => { - it("empty change set", async () => { + it("closest to", async () => { + await create( + createReplicationRange({ + publicKey: a, + length: 1, + offset: denormalizeFn((0.367 + rotation) % 1), + timestamp: 0n, + }), + createReplicationRange({ + publicKey: b, + length: 1, + offset: denormalizeFn((0.847 + rotation) % 1), + timestamp: 0n, + }), + ); + expect( + await getSamples( + denormalizeFn((0.78 + rotation) % 1), + peers, + 1, + 0, + ), + ).to.deep.eq([b.hashcode()]); + }); + + it("closest to oldest", async () => { + // two exactly the same, but one is older + await create( + createReplicationRange({ + publicKey: a, + length: 1, + offset: denormalizeFn((0.367 + rotation) % 1), + timestamp: 1n, + }), + createReplicationRange({ + publicKey: b, + length: 1, + offset: denormalizeFn((0.367 + rotation) % 1), + timestamp: 0n, + }), + ); + + expect( + await getSamples( + denormalizeFn((0.78 + rotation) % 1), + peers, + 1, + 0, + ), + ).to.deep.eq([b.hashcode()]); + }); + + it("closest to hash", async () => { + // two exactly the same, but one is older + await create( + createReplicationRange({ + publicKey: a, + length: 1, + offset: denormalizeFn((0.367 + rotation) % 1), + timestamp: 0n, + }), + createReplicationRange({ + publicKey: b, + length: 1, + offset: denormalizeFn((0.367 + rotation) % 1), + timestamp: 0n, + }), + ); + + expect(a.hashcode() < b.hashcode()).to.be.true; + expect( + await getSamples( + denormalizeFn((0.78 + rotation) % 1), + peers, + 1, + 0, + ), + ).to.deep.eq([a.hashcode()]); + }); + + it("interescting one", async () => { + await create( + createReplicationRange({ + publicKey: a, + length: numbers.divRound(numbers.maxValue, 2), + offset: denormalizeFn((0 + rotation) % 1), + timestamp: 0n, + }), + createReplicationRange({ + publicKey: b, + length: 1, + offset: denormalizeFn((0.5 + rotation) % 1), + timestamp: 0n, + }), + ); + + const samples1 = await getSamplesMap( + numbers.getGrid(denormalizeFn((0.25 + rotation) % 1), 1), + peers, + 0, + numbers, + ); + expect( + [...samples1.values()].filter((x) => x.intersecting).length, + ).to.eq(1); + expect(samples1.size).to.eq(1); + + const samples2 = await getSamplesMap( + numbers.getGrid(denormalizeFn((0.75 + rotation) % 1), 2), + peers, + 0, + numbers, + ); + expect( + [...samples2.values()].filter((x) => x.intersecting).length, + ).to.eq(1); + expect(samples2.size).to.eq(2); + }); + + it("interescting overlapping", async () => { + await create( + createReplicationRange({ + publicKey: a, + length: numbers.divRound(numbers.maxValue, 2), + offset: denormalizeFn((0 + rotation) % 1), + timestamp: 0n, + }), + createReplicationRange({ + publicKey: b, + length: numbers.maxValue, + offset: denormalizeFn((0.5 + rotation) % 1), + timestamp: 0n, + }), + ); + + const samples1 = await getSamplesMap( + numbers.getGrid(denormalizeFn((0.25 + rotation) % 1), 2), + peers, + 0, + numbers, + ); + expect( + [...samples1.values()].filter((x) => x.intersecting).length, + ).to.eq(2); + expect(samples1.size).to.eq(2); + + const samples2 = await getSamplesMap( + numbers.getGrid(denormalizeFn((0.25 + rotation) % 1), 2), + peers, + 0, + numbers, + ); + expect( + [...samples2.values()].filter((x) => x.intersecting).length, + ).to.eq(2); + expect(samples2.size).to.eq(2); + }); + + it("interescting overlapping reversed", async () => { + await create( + // reversed insertion order + createReplicationRange({ + publicKey: b, + length: numbers.maxValue, + offset: denormalizeFn((0.5 + rotation) % 1), + timestamp: 0n, + }), + createReplicationRange({ + publicKey: a, + length: numbers.divRound(numbers.maxValue, 2), + offset: denormalizeFn((0 + rotation) % 1), + timestamp: 0n, + }), + ); + + const samples1 = await getSamplesMap( + numbers.getGrid(denormalizeFn((0.25 + rotation) % 1), 2), + peers, + 0, + numbers, + ); + expect( + [...samples1.values()].filter((x) => x.intersecting).length, + ).to.eq(2); + expect(samples1.size).to.eq(2); + + const samples2 = await getSamplesMap( + numbers.getGrid(denormalizeFn((0.25 + rotation) % 1), 2), + peers, + 0, + numbers, + ); + expect( + [...samples2.values()].filter((x) => x.intersecting).length, + ).to.eq(2); + expect(samples2.size).to.eq(2); + }); + + it("intersecting half range", async () => { + await create( + // reversed insertion order + + createReplicationRange({ + publicKey: a, + length: numbers.divRound(numbers.maxValue, 2), + offset: denormalizeFn((0.5 + rotation) % 1), + timestamp: 0n, + }), + createReplicationRange({ + publicKey: b, + length: numbers.divRound(numbers.maxValue, 2), + offset: denormalizeFn((0.5 + rotation) % 1), + timestamp: 0n, + }), + ); + const samples = await getSamplesMap( + numbers.getGrid(denormalizeFn((0.25 + rotation) % 1), 2), + peers, + 0, + numbers, + ); + expect( + [...samples.values()].filter((x) => x.intersecting).length, + ).to.eq(2); + }); + + it("intersecting half range overlapping", async () => { + await create( + // reversed insertion order + + createReplicationRange({ + publicKey: a, + length: numbers.divRound(numbers.maxValue, 2), + offset: denormalizeFn((0.4 + rotation) % 1), + timestamp: 0n, + }), + createReplicationRange({ + publicKey: b, + length: 1, + offset: denormalizeFn((0.5 + rotation) % 1), + timestamp: 0n, + }), + ); + const samples = await getSamplesMap( + numbers.getGrid(denormalizeFn((0.3 + rotation) % 1), 2), + peers, + 0, + numbers, + ); + expect( + [...samples.values()].filter((x) => x.intersecting).length, + ).to.eq(1); + expect(samples.size).to.eq(2); + }); + + // TODO add breakeven test to make sure it is sorted by hash + }); + }); + + it("factor 0 ", async () => { await create( - new EntryReplicated({ - coordinate: scaleToU32(rotate(0)), - assignedToRangeBoundary: false, - hash: "a", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "a", - next: [], - type: 0, - data: undefined, - }), + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0, + offset: 0.367 % 1, + timestamp: 0n, }), - new EntryReplicated({ - coordinate: scaleToU32(rotate(0.3)), - assignedToRangeBoundary: false, - hash: "b", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "b", - next: [], - type: 0, - data: undefined, - }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 1, + offset: 0.567 % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 1, + offset: 0.847 % 1, + timestamp: 0n, + }), + ); + expect( + await getSamples(denormalizeFn(0.3701), peers, 2, 0), + ).to.have.members([b, c].map((x) => x.hashcode())); + }); + + it("factor 0 with 3 peers factor 1", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 1, + offset: 0.145, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0, + offset: 0.367, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 1, + offset: 0.8473, + timestamp: 0n, }), ); + expect( + await getSamples(denormalizeFn(0.937), peers, 2, 0), + ).to.have.members([a, c].map((x) => x.hashcode())); + }); - const result = await consumeAllFromAsyncIterator( - toRebalance([], index), + it("factor 0 with 3 peers short", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.2, + offset: 0.145, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0, + offset: 0.367, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.2, + offset: 0.8473, + timestamp: 0n, + }), ); - expect(result).to.have.length(0); + expect( + await getSamples(denormalizeFn(0.937), peers, 2, 0), + ).to.have.members([a, c].map((x) => x.hashcode())); }); - describe("update", () => { - it("matches prev", async () => { + rotations.forEach((rotation) => { + it("evenly distributed: " + rotation, async () => { await create( - new EntryReplicated({ - coordinate: scaleToU32(rotate(0)), - assignedToRangeBoundary: false, - hash: "a", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "a", - next: [], - type: 0, - data: undefined, - }), + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.2, + offset: (0.2333 + rotation) % 1, + timestamp: 0n, }), - new EntryReplicated({ - coordinate: scaleToU32(rotate(0.3)), - assignedToRangeBoundary: false, - hash: "b", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "b", - next: [], - type: 0, - data: undefined, - }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.2, + offset: (0.56666 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.2, + offset: (0.9 + rotation) % 1, + timestamp: 0n, }), ); - const prev = new ReplicationRangeIndexable({ - normalized: true, - publicKey: a, - offset: rotate(0.2), - length: 0.2, - }); - const updated = new ReplicationRangeIndexable({ - id: prev.id, - normalized: true, - publicKey: a, - offset: rotate(0.5), - length: 0.2, - }); + let ac = 0, + bc = 0, + cc = 0; + let count = 1000; + for (let i = 0; i < count; i++) { + const leaders = await getSamplesMap( + [denormalizeFn(i / count)], + peers, + 0, + numbers, + ); + if (leaders.has(a.hashcode())) { + ac++; + } + if (leaders.has(b.hashcode())) { + bc++; + } + if (leaders.has(c.hashcode())) { + cc++; + } + } + + // check ac, bc and cc are all close to 1/3 + expect(ac / count).to.be.closeTo(1 / 3, 0.1); + expect(bc / count).to.be.closeTo(1 / 3, 0.1); + expect(cc / count).to.be.closeTo(1 / 3, 0.1); + }); + }); - const result = await consumeAllFromAsyncIterator( - toRebalance( - [ - { - prev, - range: updated, - type: "updated", - }, - ], - index, - ), + describe("maturity", () => { + it("starting at unmatured", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.333, + offset: 0.333 % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.333, + offset: 0.666 % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.3333, + offset: 0.999 % 1, + timestamp: 0n, + }), ); - expect(result.map((x) => x.gid)).to.deep.equal(["b"]); + expect( + await getSamples(denormalizeFn(0.7), peers, 2, 1e5), + ).to.have.members([a, b, c].map((x) => x.hashcode())); }); - it("matches next", async () => { + it("starting at matured", async () => { await create( - new EntryReplicated({ - coordinate: scaleToU32(rotate(0)), - assignedToRangeBoundary: false, - hash: "a", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "a", - next: [], - type: 0, - data: undefined, - }), + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.333, + offset: 0.333 % 1, + timestamp: 0n, }), - new EntryReplicated({ - coordinate: scaleToU32(rotate(0.3)), - assignedToRangeBoundary: false, - hash: "b", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "b", - next: [], - type: 0, - data: undefined, - }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.333, + offset: 0.666 % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.3333, + offset: 0.999 % 1, + timestamp: 0n, }), ); + // the offset jump will be 0.5 (a) and 0.5 + 0.5 = 1 which will intersect (c) + expect( + await getSamples(denormalizeFn(0.5), peers, 2, 1e5), + ).to.have.members([a, c].map((x) => x.hashcode())); + }); - const prev = new ReplicationRangeIndexable({ - normalized: true, - publicKey: a, - offset: rotate(0.5), - length: 0.2, - }); - const updated = new ReplicationRangeIndexable({ - id: prev.id, - normalized: true, - publicKey: a, - offset: rotate(0.2), - length: 0.2, - }); + it("starting at matured-2", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.333, + offset: 0.333 % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.333, + offset: 0.666 % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.3333, + offset: 0.999 % 1, + timestamp: 0n, + }), + ); + // the offset jump will be 0.2 (a) and 0.2 + 0.5 = 0.7 which will intersect (b) (unmatured) + expect( + await getSamples(numbers.zero, peers, 2, 1e5), + ).to.have.members([a, c].map((x) => x.hashcode())); + }); + }); - const result = await consumeAllFromAsyncIterator( - toRebalance( - [ - { - prev, - range: updated, - type: "updated", - }, - ], - index, - ), + describe("strict", async () => { + rotations.forEach((rotation) => { + it( + "only includes strict segments when intersecting: " + rotation, + async () => { + const offsetNonStrict = (0 + rotation) % 1; + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.2, + offset: offsetNonStrict, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.2, + offset: (0.3 + rotation) % 1, + timestamp: 0n, + mode: ReplicationIntent.Strict, + }), + ); + + const leaders = await getSamples( + denormalizeFn(offsetNonStrict + 0.001), + peers, + 2, + 0, + ); + expect(leaders).to.have.members([a].map((x) => x.hashcode())); + }, ); - expect(result.map((x) => x.gid)).to.deep.equal(["b"]); }); }); + }); - it("not enoughly replicated after change", async () => { - await create( - new EntryReplicated({ - coordinate: scaleToU32(rotate(0)), - assignedToRangeBoundary: false, - hash: "a", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "a", - next: [], - type: 0, - data: undefined, - }), - }), - new EntryReplicated({ - coordinate: scaleToU32(rotate(0.3)), - assignedToRangeBoundary: false, - hash: "b", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "b", - next: [], - type: 0, - data: undefined, - }), - }), - ); + describe("getDistance", () => { + describe("above", () => { + it("immediate", () => { + expect(getDistance(0.5, 0.4, "above", 1)).to.be.closeTo( + 0.1, + 0.0001, + ); + }); - const prev = new ReplicationRangeIndexable({ - normalized: true, - publicKey: a, - offset: rotate(0.2), - length: 0.2, + it("wrap", () => { + expect(getDistance(0.1, 0.9, "above", 1)).to.be.closeTo( + 0.2, + 0.0001, + ); }); - const updated = new ReplicationRangeIndexable({ - id: prev.id, - normalized: true, - publicKey: a, - offset: rotate(0.4), - length: 0.2, + }); + + describe("below", () => { + it("immediate", () => { + expect(getDistance(0.5, 0.6, "below", 1)).to.be.closeTo( + 0.1, + 0.0001, + ); }); - const result = await consumeAllFromAsyncIterator( - toRebalance( - [ - { - prev, - range: updated, - type: "updated", - }, - ], - index, - ), - ); - expect(result.map((x) => x.gid)).to.deep.eq(["b"]); + it("wrap", () => { + expect(getDistance(0.9, 0.1, "below", 1)).to.be.closeTo( + 0.2, + 0.0001, + ); + }); }); - it("not enoughly replicated after removed", async () => { - await create( - new EntryReplicated({ - coordinate: scaleToU32(rotate(0)), - assignedToRangeBoundary: false, - hash: "a", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "a", - next: [], - type: 0, - data: undefined, - }), - }), - new EntryReplicated({ - coordinate: scaleToU32(rotate(0.3)), - assignedToRangeBoundary: false, - hash: "b", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "b", - next: [], - type: 0, - data: undefined, - }), - }), - ); + describe("closest", () => { + it("immediate", () => { + expect(getDistance(0.5, 0.6, "closest", 1)).to.be.closeTo( + 0.1, + 0.0001, + ); + }); - const updated = new ReplicationRangeIndexable({ - normalized: true, - publicKey: a, - offset: rotate(0.2), - length: 0.2, + it("wrap", () => { + expect(getDistance(0.9, 0.1, "closest", 1)).to.be.closeTo( + 0.2, + 0.0001, + ); }); - const result = await consumeAllFromAsyncIterator( - toRebalance( - [ - { - range: updated, - type: "removed", - }, - ], - index, - ), - ); - expect(result.map((x) => x.gid)).to.deep.eq(["b"]); + it("wrap 2", () => { + expect(getDistance(0.1, 0.9, "closest", 1)).to.be.closeTo( + 0.2, + 0.0001, + ); + }); }); + }); - it("boundary assigned are always included", async () => { - await create( - new EntryReplicated({ - coordinate: scaleToU32(rotate(0)), - assignedToRangeBoundary: false, - hash: "a", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "a", - next: [], - type: 0, - data: undefined, - }), - }), - new EntryReplicated({ - coordinate: scaleToU32(rotate(0)), - assignedToRangeBoundary: true, - hash: "b", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "b", - next: [], - type: 0, - data: undefined, - }), - }), - ); - const result = await consumeAllFromAsyncIterator( - toRebalance([], index), - ); - expect(result.map((x) => x.gid)).to.deep.eq(["b"]); + describe("hasOneOverlapping", () => { + const rotations = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]; + rotations.forEach((rotation) => { + describe("rotation: " + String(rotation), () => { + it("includes all", async () => { + const cmp = createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.5, + offset: (0 + rotation) % 1, + timestamp: 0n, + }); + await create(cmp); + + const inside = createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.4, + offset: (0.05 + rotation) % 1, + timestamp: 0n, + }); + expect(await iHaveCoveringRange(peers, inside)).to.be.true; + + const outside1 = createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.4, + offset: (0.2 + rotation) % 1, + timestamp: 0n, + }); + expect(await iHaveCoveringRange(peers, outside1)).to.be.false; + + const outside2 = createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.51, + offset: (0.1 + rotation) % 1, + timestamp: 0n, + }); + expect(await iHaveCoveringRange(peers, outside2)).to.be.false; + }); + }); + }); + }); + + describe("merge", () => { + const rotations = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]; + rotations.forEach((rotation) => { + describe("rotation: " + String(rotation), () => { + describe("2 ranges", () => { + it("gap", async () => { + const offset1 = denormalizeFn(0.2 + rotation); + const offset2 = denormalizeFn(0.3 + rotation); + + //@ts-ignore + const diff = numbers.abs(offset1 - offset2); + + //@ts-ignore + const range1 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset1 % numbers.maxValue, + timestamp: 0n, + }); + + //@ts-ignore + const range2 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset2 % numbers.maxValue, + timestamp: 0n, + }); + + const merged = mergeRanges([range1, range2], numbers); + + expect(merged.width).to.eq( + diff + ((typeof diff === "number" ? 1 : 1n) as any), + ); // + 1 for the length of the last range + expect(merged.start1).to.equal(range1.start1); + }); + + it("adjecent", async () => { + const offset = denormalizeFn(0.2 + rotation); + + //@ts-ignore + const range1 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset % numbers.maxValue, + timestamp: 0n, + }); + + //@ts-ignore + const range2 = createReplicationRange({ + publicKey: a, + length: 1, + offset: + // @ts-ignore + (offset + (typeof offset === "bigint" ? 1n : 1)) % + numbers.maxValue, + timestamp: 0n, + }); + + const merged = mergeRanges([range1, range2], numbers); + expect(Number(merged.width)).to.eq(2); + expect(merged.start1).to.equal(range1.start1); + }); + + it("duplicates", async () => { + const offset = denormalizeFn(0.2 + rotation); + + //@ts-ignore + const range1 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset % numbers.maxValue, + timestamp: 0n, + }); + //@ts-ignore + const range2 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset % numbers.maxValue, + timestamp: 0n, + }); + + const merged = mergeRanges([range1, range2], numbers); + expect(Number(merged.width)).to.eq(1); + // expect(merged.start1).to.equal(range1.start1) + }); + }); + + describe("3 ranges", () => { + it("gap", async () => { + const offset1 = denormalizeFn(0.2 + rotation); + const offset2 = denormalizeFn(0.3 + rotation); + const offset3 = denormalizeFn(0.4 + rotation); + + // @ts-ignore + const diff = numbers.abs(offset1 - offset3); + + // @ts-ignore + const range1 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset1 % numbers.maxValue, + timestamp: 0n, + }); + // @ts-ignore + const range2 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset2 % numbers.maxValue, + timestamp: 0n, + }); + const range3 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset3 % numbers.maxValue, + timestamp: 0n, + }); + + const merged = mergeRanges([range1, range2, range3], numbers); + // @ts-ignore + expect(merged.width).to.eq( + // @ts-ignore + diff + (typeof diff === "number" ? 1 : 1n), + ); // + 1 for the length of the last range + }); + + it("adjecent", async () => { + const offset1 = denormalizeFn(0.2 + rotation); + const offset2 = + // @ts-ignore + offset1 + (typeof offset1 === "number" ? 1 : 1n); + // @ts-ignore + const offset3 = + offset2 + (typeof offset2 === "number" ? 1 : 1n); + + // @ts-ignore + const range1 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset1 % numbers.maxValue, + timestamp: 0n, + }); + // @ts-ignore + const range2 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset2 % numbers.maxValue, + timestamp: 0n, + }); + + // @ts-ignore + const range3 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset3 % numbers.maxValue, + timestamp: 0n, + }); + + const merged = mergeRanges([range1, range2, range3], numbers); + expect(Number(merged.width)).to.eq(3); + }); + }); + }); + }); + }); + + describe("approximateCoverage", () => { + [0, 0.3, 0.6, 0.9].forEach((rotation) => { + describe("rotation: " + rotation, () => { + const samples = 20; + describe("100%", () => { + it("one range", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 1, + offset: (0 + rotation) % 1, + timestamp: 0n, + }), + ); + + // we try to cover 0.5 starting from a + // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set + expect( + await appromixateCoverage({ + peers, + samples, + numbers, + normalized: true, + }), + ).to.eq(1); + }); + + it("two ranges", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.5, + offset: (0 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.5, + offset: (0.5 + rotation) % 1, + timestamp: 0n, + }), + ); + + // we try to cover 0.5 starting from a + // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set + expect( + await appromixateCoverage({ + peers, + samples, + numbers, + normalized: true, + }), + ).to.be.closeTo(1, 1 / (samples - 1)); + }); + }); + + describe("50%", () => { + it("one range", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.5, + offset: (0 + rotation) % 1, + timestamp: 0n, + }), + ); + + // we try to cover 0.5 starting from a + // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set + expect( + await appromixateCoverage({ + peers, + samples, + numbers, + normalized: true, + }), + ).to.be.closeTo(0.5, 1 / (samples - 1)); + }); + + it("two ranges", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.25, + offset: (0 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.25, + offset: (0.5 + rotation) % 1, + timestamp: 0n, + }), + ); + + // we try to cover 0.5 starting from a + // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set + expect( + await appromixateCoverage({ + peers, + samples, + numbers, + normalized: true, + }), + ).to.be.closeTo(0.5, 1 / (samples - 1)); + }); + }); + }); + }); + }); + + /* describe("removeRange", () => { + + + it('remove outside', () => { + const from = new ReplicationRangeIndexable({ normalized: false, publicKey: a, offset: 1, length: 1, timestamp: 0n }) + const toRemove = new ReplicationRangeIndexable({ normalized: false, publicKey: a, offset: 0, length: 1, timestamp: 0n }) + const result = from.removeRange(toRemove) + expect(result).to.equal(from) + + }) + + it('remove all', () => { + const from = new ReplicationRangeIndexable({ normalized: false, publicKey: a, offset: 1, length: 1, timestamp: 0n }) + const toRemove = new ReplicationRangeIndexable({ normalized: false, publicKey: a, offset: 1, length: 1, timestamp: 0n }) + const result = from.removeRange(toRemove) + expect(result).to.have.length(0) + }) + + const rotations = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1] + rotations.forEach((rotation) => { + describe('rotation: ' + String(rotation), () => { + + it('removes end', () => { + const from = new ReplicationRangeIndexable({ normalized: true, publicKey: a, offset: rotation, length: 0.3, timestamp: 0n }) + const toRemove = new ReplicationRangeIndexable({ normalized: true, publicKey: a, offset: rotation + 0.2, length: 0.2, timestamp: 0n }) + const result = from.removeRange(toRemove) + expect(result).to.have.length(2) + const arr = result as ReplicationRangeIndexable[] + expect(arr[0].start1).to.equal(from.start1) + expect(arr[0].end1).to.equal(toRemove.start1) + expect(arr[1].start2).to.equal(toRemove.start2) + expect(arr[1].end2).to.equal(toRemove.end2) + }) + }) + }) + + }) */ + }); + + describe("entry replicated", () => { + let index: Index>; + const entryClass = + resolution === "u32" ? EntryReplicatedU32 : EntryReplicatedU64; + + let create = async (...rects: EntryReplicated[]) => { + const indices = await createIndices(); + await indices.start(); + index = await indices.init({ schema: entryClass as any }); + for (const rect of rects) { + await index.put(rect); + } + }; + let a: Ed25519PublicKey; + + beforeEach(async () => { + a = (await Ed25519Keypair.create()).publicKey; + index = undefined!; + }); + + describe("toRebalance", () => { + const rotations = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]; + + const consumeAllFromAsyncIterator = async ( + iter: AsyncIterable>, + ): Promise[]> => { + const result = []; + for await (const entry of iter) { + result.push(entry); + } + return result; + }; + + const createEntryReplicated = (properties: { + coordinate: NumberFromType; + hash: string; + meta: Meta; + assignedToRangeBoundary: boolean; + }) => { + return new entryClass({ + coordinates: [properties.coordinate], + assignedToRangeBoundary: properties.assignedToRangeBoundary, + hash: properties.hash, + meta: properties.meta, + } as any); + }; + + rotations.forEach((rotation) => { + const rotate = (from: number) => (from + rotation) % 1; + describe("rotation: " + String(rotation), () => { + it("empty change set", async () => { + await create( + createEntryReplicated({ + coordinate: denormalizeFn(rotate(0)), + assignedToRangeBoundary: false, + hash: "a", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "a", + next: [], + type: 0, + data: undefined, + }), + }), + createEntryReplicated({ + coordinate: denormalizeFn(rotate(0.3)), + assignedToRangeBoundary: false, + hash: "b", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "b", + next: [], + type: 0, + data: undefined, + }), + }), + ); + + const result = await consumeAllFromAsyncIterator( + toRebalance([], index), + ); + expect(result).to.have.length(0); + }); + + describe("update", () => { + it("matches prev", async () => { + await create( + createEntryReplicated({ + coordinate: denormalizeFn(rotate(0)), + assignedToRangeBoundary: false, + hash: "a", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "a", + next: [], + type: 0, + data: undefined, + }), + }), + createEntryReplicated({ + coordinate: denormalizeFn(rotate(0.3)), + assignedToRangeBoundary: false, + hash: "b", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "b", + next: [], + type: 0, + data: undefined, + }), + }), + ); + + const prev = createReplicationRangeFromNormalized({ + publicKey: a, + offset: rotate(0.2), + length: 0.2, + }); + const updated = createReplicationRangeFromNormalized({ + id: prev.id, + publicKey: a, + offset: rotate(0.5), + length: 0.2, + }); + + const result = await consumeAllFromAsyncIterator( + toRebalance( + [ + { + prev, + range: updated, + type: "updated", + }, + ], + index, + ), + ); + expect(result.map((x) => x.gid)).to.deep.equal(["b"]); + }); + + it("matches next", async () => { + await create( + createEntryReplicated({ + coordinate: denormalizeFn(rotate(0)), + assignedToRangeBoundary: false, + hash: "a", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "a", + next: [], + type: 0, + data: undefined, + }), + }), + createEntryReplicated({ + coordinate: denormalizeFn(rotate(0.3)), + assignedToRangeBoundary: false, + hash: "b", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "b", + next: [], + type: 0, + data: undefined, + }), + }), + ); + + const prev = createReplicationRangeFromNormalized({ + publicKey: a, + offset: rotate(0.5), + length: 0.2, + }); + const updated = createReplicationRangeFromNormalized({ + id: prev.id, + publicKey: a, + offset: rotate(0.2), + length: 0.2, + }); + + const result = await consumeAllFromAsyncIterator( + toRebalance( + [ + { + prev, + range: updated, + type: "updated", + }, + ], + index, + ), + ); + expect(result.map((x) => x.gid)).to.deep.equal(["b"]); + }); + }); + + it("not enoughly replicated after change", async () => { + await create( + createEntryReplicated({ + coordinate: denormalizeFn(rotate(0)), + assignedToRangeBoundary: false, + hash: "a", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "a", + next: [], + type: 0, + data: undefined, + }), + }), + createEntryReplicated({ + coordinate: denormalizeFn(rotate(0.3)), + assignedToRangeBoundary: false, + hash: "b", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "b", + next: [], + type: 0, + data: undefined, + }), + }), + ); + + const prev = createReplicationRangeFromNormalized({ + publicKey: a, + offset: rotate(0.2), + length: 0.2, + }); + const updated = createReplicationRangeFromNormalized({ + id: prev.id, + publicKey: a, + offset: rotate(0.4), + length: 0.2, + }); + + const result = await consumeAllFromAsyncIterator( + toRebalance( + [ + { + prev, + range: updated, + type: "updated", + }, + ], + index, + ), + ); + expect(result.map((x) => x.gid)).to.deep.eq(["b"]); + }); + + it("not enoughly replicated after removed", async () => { + await create( + createEntryReplicated({ + coordinate: denormalizeFn(rotate(0)), + assignedToRangeBoundary: false, + hash: "a", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "a", + next: [], + type: 0, + data: undefined, + }), + }), + createEntryReplicated({ + coordinate: denormalizeFn(rotate(0.3)), + assignedToRangeBoundary: false, + hash: "b", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "b", + next: [], + type: 0, + data: undefined, + }), + }), + ); + + const updated = createReplicationRangeFromNormalized({ + publicKey: a, + offset: rotate(0.2), + length: 0.2, + }); + + const result = await consumeAllFromAsyncIterator( + toRebalance( + [ + { + range: updated, + type: "removed", + }, + ], + index, + ), + ); + expect(result.map((x) => x.gid)).to.deep.eq(["b"]); + }); + + it("boundary assigned are always included", async () => { + await create( + createEntryReplicated({ + coordinate: denormalizeFn(rotate(0)), + assignedToRangeBoundary: false, + hash: "a", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "a", + next: [], + type: 0, + data: undefined, + }), + }), + createEntryReplicated({ + coordinate: denormalizeFn(rotate(0)), + assignedToRangeBoundary: true, + hash: "b", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "b", + next: [], + type: 0, + data: undefined, + }), + }), + ); + const result = await consumeAllFromAsyncIterator( + toRebalance([], index), + ); + expect(result.map((x) => x.gid)).to.deep.eq(["b"]); + }); + + it("many items", async () => { + let count = 1500; + const entries: EntryReplicated[] = []; + for (let i = 0; i < count; i++) { + entries.push( + createEntryReplicated({ + coordinate: denormalizeFn(rotate(i / count)), + assignedToRangeBoundary: true, // needs to be true, so this item always is returned + hash: i.toString(), + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: i.toString(), + next: [], + type: 0, + data: undefined, + }), + }), + ); + } + + await create(...entries); + expect( + await consumeAllFromAsyncIterator(toRebalance([], index)).then( + (x) => x.length, + ), + ).to.eq(count); + }); + }); }); }); }); diff --git a/packages/programs/data/shared-log/test/rateless-iblt.spec.ts b/packages/programs/data/shared-log/test/rateless-iblt.spec.ts new file mode 100644 index 000000000..4bdc9a095 --- /dev/null +++ b/packages/programs/data/shared-log/test/rateless-iblt.spec.ts @@ -0,0 +1,560 @@ +import { Cache } from "@peerbit/cache"; +import { TestSession } from "@peerbit/test-utils"; +import { waitForResolved } from "@peerbit/time"; +import { expect } from "chai"; +import sinon from "sinon"; +import { + type ReplicationDomainHash, + createReplicationDomainHash, +} from "../src"; +import type { TransportMessage } from "../src/message"; +import { + MoreSymbols, + RatelessIBLTSynchronizer, + RequestAll, + StartSync, +} from "../src/sync/rateless-iblt.js"; +import { EventStore } from "./utils/stores"; + +const setup = { + domain: createReplicationDomainHash("u64"), + type: "u64" as const, + syncronizer: RatelessIBLTSynchronizer, + name: "u64-iblt", + coordinateToHash: new Cache({ max: 1000, ttl: 1000 }), +}; + +describe("rateless-iblt-syncronizer", () => { + let session: TestSession; + let db1: EventStore>, + db2: EventStore>; + + beforeEach(async () => {}); + + afterEach(async () => { + await session.stop(); + }); + + const collectMessages = async ( + log: EventStore>, + ) => { + const onMessageDb = sinon.spy(log.log, "_onMessage"); + log.log._onMessage = onMessageDb; + return { + get calls(): TransportMessage[] { + return onMessageDb.getCalls().map((x) => x.args[0]); + }, + }; + }; + + const countMessages = (messages: TransportMessage[], type: any) => { + return messages.filter((x) => x instanceof type).length; + }; + + const setupLogs = async ( + syncedCount: number, + unsyncedCount: number, + oneSided = false, + ) => { + session = await TestSession.disconnected(2); + db1 = await session.peers[0].open( + new EventStore>(), + { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }, + ); + + db2 = await session.peers[1].open(db1.clone(), { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }); + + for (let i = 0; i < syncedCount; i++) { + const out = await db1.add("test", { meta: { next: [] } }); + await db2.log.join([out.entry]); + } + + for (let i = 0; i < unsyncedCount; i++) { + await db1.add("test", { meta: { next: [] } }); + if (!oneSided) { + await db2.add("test", { meta: { next: [] } }); + } + } + expect(db1.log.log.length).to.equal(syncedCount + unsyncedCount); + expect(db2.log.log.length).to.equal( + syncedCount + (oneSided ? 0 : unsyncedCount), + ); + }; + + it("already synced", async () => { + const syncedCount = 1000; + await setupLogs(syncedCount, 0); + + const db1Messages = await collectMessages(db1); + const db2Messages = await collectMessages(db2); + + await db1.node.dial(db2.node.getMultiaddrs()); + + await waitForResolved(() => + expect(db1.log.log.length).to.equal(syncedCount), + ); + + expect(countMessages(db1Messages.calls, MoreSymbols)).to.equal(0); + expect(countMessages(db2Messages.calls, MoreSymbols)).to.equal(0); + }); + + it("all missing will skip iblt syncing", async () => { + const syncedCount = 0; + const unsyncedCount = 1000; + let oneSided = true; + + await setupLogs(syncedCount, unsyncedCount, oneSided); // only db1 will have entries + const db1Messages = await collectMessages(db1); + const db2Messages = await collectMessages(db2); + await db1.node.dial(db2.node.getMultiaddrs()); + await waitForResolved(() => + expect(db1.log.log.length).to.equal(unsyncedCount), + ); + await waitForResolved(() => + expect(db2.log.log.length).to.equal(unsyncedCount), + ); + + expect(countMessages(db1Messages.calls, MoreSymbols)).to.be.equal(0); + expect(countMessages(db1Messages.calls, RequestAll)).to.be.equal(1); + expect(countMessages(db1Messages.calls, StartSync)).to.be.equal(0); + + expect(countMessages(db2Messages.calls, MoreSymbols)).to.be.equal(0); + expect(countMessages(db2Messages.calls, RequestAll)).to.be.equal(0); + expect(countMessages(db2Messages.calls, StartSync)).to.be.equal(1); + }); + + it("one missing", async () => { + const syncedCount = 1000; + const unsyncedCount = 1; + await setupLogs(syncedCount, unsyncedCount); + const db1Messages = await collectMessages(db1); + const db2Messages = await collectMessages(db2); + await db1.node.dial(db2.node.getMultiaddrs()); + + await waitForResolved(() => + expect(db1.log.log.length).to.equal(syncedCount + unsyncedCount * 2), + ); + await waitForResolved(() => + expect(db2.log.log.length).to.equal(syncedCount + unsyncedCount * 2), + ); + + expect(countMessages(db1Messages.calls, MoreSymbols)).to.equal(0); // becase StartSync will emit a few symbols that will be enough + expect(countMessages(db2Messages.calls, MoreSymbols)).to.equal(0); // becase StartSync will emit a few symbols that will be enough + }); + + it("many missing", async () => { + const syncedCount = 3e3; + const unsyncedCount = 3e3; + + await setupLogs(syncedCount, unsyncedCount); + const db1Messages = await collectMessages(db1); + const db2Messages = await collectMessages(db2); + + await db1.node.dial(db2.node.getMultiaddrs()); + + await waitForResolved( + () => + expect(db1.log.log.length).to.equal(syncedCount + unsyncedCount * 2), + { + timeout: 2e4, + }, + ); + await waitForResolved( + () => + expect(db2.log.log.length).to.equal(syncedCount + unsyncedCount * 2), + { + timeout: 2e4, + }, + ); + + expect(countMessages(db1Messages.calls, MoreSymbols)).to.be.greaterThan(0); + expect(countMessages(db2Messages.calls, MoreSymbols)).to.be.greaterThan(0); + }); + + /* it("builds", async () => { + const { indices } = await createFromValues( + "u64", + [{ publicKey: a, offset: 0, length: 1 }], + [0.5], + ); + const rangeEncoders = new RangeToEncoders( + a, + indices.rangeIndex, + indices.entryIndex, + ); + await rangeEncoders.build(); + expect(rangeEncoders.encoders.size).to.equal(1); + }); + + it("generates determenistically", async () => { + const { indices, ranges } = await createFromValues( + "u64", + [{ publicKey: a, offset: 0, length: 1 }], + [0.5], + ); + const rangeEncoders = new RangeToEncoders( + a, + indices.rangeIndex, + indices.entryIndex, + ); + await rangeEncoders.build(); + expect(rangeEncoders.encoders.size).to.equal(1); + + const generator = rangeEncoders.createSymbolGenerator(ranges[0]); + + let symbol1 = generator.next(); + expect(typeof symbol1.count).to.equal("bigint"); + expect(typeof symbol1.hash).to.equal("bigint"); + expect(typeof symbol1.symbol).to.equal("bigint"); + expect(symbol1.hash).to.not.equal(0n); + + const generator2 = rangeEncoders.createSymbolGenerator(ranges[0]); + + let symbol2 = generator2.next(); + expect(symbol1).to.deep.equal(symbol2); + }); + + describe("diff", () => { + it("no difference", async () => { + const local = await createRangeEncoder( + "u64", + a, + [{ publicKey: a, offset: 0, length: 1 }], + [0.5], + ); + const remote = await createRangeEncoder( + "u64", + b, + [{ publicKey: b, offset: 0, length: 1 }], + [0.5], + ); + + const receiver = await getMissingValuesInRemote({ + myEncoder: local.rangeEncoders, + remoteRange: remote.ranges[0], + }); + + const bobGenerator = remote.rangeEncoders.createSymbolGenerator( + remote.ranges[0], + ); + const next = bobGenerator.next(); + const out = receiver.process(next); + expect(out.done).to.equal(true); + expect(out.missing).to.have.length(0); + }); + + it("remote is missing entry", async () => { + const local = await createRangeEncoder( + "u64", + a, + [{ publicKey: a, offset: 0, length: 1 }], + [0.5], + ); + const remote = await createRangeEncoder( + "u64", + b, + [{ publicKey: b, offset: 0, length: 1 }], + [], + ); + + const receiver = await getMissingValuesInRemote({ + myEncoder: local.rangeEncoders, + remoteRange: remote.ranges[0], + }); + + const bobGenerator = remote.rangeEncoders.createSymbolGenerator( + remote.ranges[0], + ); + const next = bobGenerator.next(); + const out = receiver.process(next); + expect(out.done).to.equal(true); + expect(out.missing).to.deep.eq([BigInt(local.entry[0].coordinate)]); + }); + + it("local is missing entry", async () => { + const local = await createRangeEncoder( + "u64", + a, + [{ publicKey: a, offset: 0, length: 1 }], + [], + ); + const remote = await createRangeEncoder( + "u64", + b, + [{ publicKey: b, offset: 0, length: 1 }], + [0.5], + ); + + const receiver = await getMissingValuesInRemote({ + myEncoder: local.rangeEncoders, + remoteRange: remote.ranges[0], + }); + + const bobGenerator = remote.rangeEncoders.createSymbolGenerator( + remote.ranges[0], + ); + const next = bobGenerator.next(); + const out = receiver.process(next); + expect(out.done).to.equal(true); + expect(out.missing).to.deep.eq([]); + }); + }); */ +}); + +/* +describe("sync", () => { + let indicesArr: SQLiteIndices[]; + + let createRangeEncoder = async ( + resolution: R, + publicKey: PublicSignKey, + rects: { publicKey: PublicSignKey; length: number; offset: number }[], + entries: number[], + ) => { + const { indices, entry, ranges } = await createFromValues( + resolution, + rects, + entries, + ); + const rangeEncoders = new RangeToEncoders( + publicKey, + indices.rangeIndex, + indices.entryIndex, + ); + await rangeEncoders.build(); + return { indices, rangeEncoders, entry, ranges }; + }; + + let createFromValues = async ( + resolution: R, + rects: { publicKey: PublicSignKey; length: number; offset: number }[], + entries: number[], + ) => { + const { rangeClass, entryClass } = resolveClasses(resolution); + const denormalizerFN = denormalizer(resolution); + let ranges: ReplicationRangeIndexable[] = rects.map( + (x) => + new rangeClass({ + publicKey: x.publicKey, + // @ts-ignore + length: denormalizerFN(x.length), + // @ts-ignore + offset: denormalizerFN(x.offset), + timestamp: 0n, + }) as unknown as ReplicationRangeIndexable, + ) as ReplicationRangeIndexable[]; + let entry: EntryReplicated[] = entries.map( + (x) => + // @ts-ignore + new entryClass({ + // @ts-ignore + coordinate: denormalizerFN(x) as NumberFromType, + assignedToRangeBoundary: false, + hash: String("a"), + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: String(x), + next: [], + type: 0, + data: undefined, + }), + }) as EntryReplicated, + ); + + return { + indices: await create(ranges, entry, resolution), + ranges, + entry, + }; + }; + + let create = async ( + rects: ReplicationRangeIndexable[], + entries: EntryReplicated[], + resolution: R, + ) => { + let indices = await createIndices(); + await indices.start(); + + const rangeClass = + resolution === "u32" + ? ReplicationRangeIndexableU32 + : ReplicationRangeIndexableU64; + const indexRects = await indices.init({ schema: rangeClass as any }); + for (const rect of rects) { + await indexRects.put(rect); + } + + const entryClass = + resolution === "u32" ? EntryReplicatedU32 : EntryReplicatedU64; + const indexEntries = await indices.init({ schema: entryClass as any }); + for (const entry of entries) { + await indexEntries.put(entry); + } + + indicesArr.push(indices); + return { + rangeIndex: indexRects, + entryIndex: indexEntries, + } as { + rangeIndex: Index>; + entryIndex: Index>; + }; + }; + let a: Ed25519PublicKey; + let b: Ed25519PublicKey; + + beforeEach(async () => { + indicesArr = []; + a = (await Ed25519Keypair.create()).publicKey; + b = (await Ed25519Keypair.create()).publicKey; + }); + + afterEach(async () => { + await Promise.all(indicesArr.map((x) => x.stop())); + }); + + it("builds", async () => { + const { indices } = await createFromValues( + "u64", + [{ publicKey: a, offset: 0, length: 1 }], + [0.5], + ); + const rangeEncoders = new RangeToEncoders( + a, + indices.rangeIndex, + indices.entryIndex, + ); + await rangeEncoders.build(); + expect(rangeEncoders.encoders.size).to.equal(1); + }); + + it("generates determenistically", async () => { + const { indices, ranges } = await createFromValues( + "u64", + [{ publicKey: a, offset: 0, length: 1 }], + [0.5], + ); + const rangeEncoders = new RangeToEncoders( + a, + indices.rangeIndex, + indices.entryIndex, + ); + await rangeEncoders.build(); + expect(rangeEncoders.encoders.size).to.equal(1); + + const generator = rangeEncoders.createSymbolGenerator(ranges[0]); + + let symbol1 = generator.next(); + expect(typeof symbol1.count).to.equal("bigint"); + expect(typeof symbol1.hash).to.equal("bigint"); + expect(typeof symbol1.symbol).to.equal("bigint"); + expect(symbol1.hash).to.not.equal(0n); + + const generator2 = rangeEncoders.createSymbolGenerator(ranges[0]); + + let symbol2 = generator2.next(); + expect(symbol1).to.deep.equal(symbol2); + }); + + describe("diff", () => { + it("no difference", async () => { + const local = await createRangeEncoder( + "u64", + a, + [{ publicKey: a, offset: 0, length: 1 }], + [0.5], + ); + const remote = await createRangeEncoder( + "u64", + b, + [{ publicKey: b, offset: 0, length: 1 }], + [0.5], + ); + + const receiver = await getMissingValuesInRemote({ + myEncoder: local.rangeEncoders, + remoteRange: remote.ranges[0], + }); + + const bobGenerator = remote.rangeEncoders.createSymbolGenerator( + remote.ranges[0], + ); + const next = bobGenerator.next(); + const out = receiver.process(next); + expect(out.done).to.equal(true); + expect(out.missing).to.have.length(0); + }); + + it("remote is missing entry", async () => { + const local = await createRangeEncoder( + "u64", + a, + [{ publicKey: a, offset: 0, length: 1 }], + [0.5], + ); + const remote = await createRangeEncoder( + "u64", + b, + [{ publicKey: b, offset: 0, length: 1 }], + [], + ); + + const receiver = await getMissingValuesInRemote({ + myEncoder: local.rangeEncoders, + remoteRange: remote.ranges[0], + }); + + const bobGenerator = remote.rangeEncoders.createSymbolGenerator( + remote.ranges[0], + ); + const next = bobGenerator.next(); + const out = receiver.process(next); + expect(out.done).to.equal(true); + expect(out.missing).to.deep.eq([BigInt(local.entry[0].coordinate)]); + }); + + it("local is missing entry", async () => { + const local = await createRangeEncoder( + "u64", + a, + [{ publicKey: a, offset: 0, length: 1 }], + [], + ); + const remote = await createRangeEncoder( + "u64", + b, + [{ publicKey: b, offset: 0, length: 1 }], + [0.5], + ); + + const receiver = await getMissingValuesInRemote({ + myEncoder: local.rangeEncoders, + remoteRange: remote.ranges[0], + }); + + const bobGenerator = remote.rangeEncoders.createSymbolGenerator( + remote.ranges[0], + ); + const next = bobGenerator.next(); + const out = receiver.process(next); + expect(out.done).to.equal(true); + expect(out.missing).to.deep.eq([]); + }); + }); +}); + */ diff --git a/packages/programs/data/shared-log/test/replicate.spec.ts b/packages/programs/data/shared-log/test/replicate.spec.ts index daab8a1cb..5e1553543 100644 --- a/packages/programs/data/shared-log/test/replicate.spec.ts +++ b/packages/programs/data/shared-log/test/replicate.spec.ts @@ -6,17 +6,17 @@ import { delay, waitFor, waitForResolved } from "@peerbit/time"; import { expect } from "chai"; import path from "path"; import { v4 as uuid } from "uuid"; -import type { SharedLog } from "../src/index.js"; -import { - ReplicationIntent, - type ReplicationRangeIndexable, - isMatured, -} from "../src/ranges.js"; +import type { + ReplicationDomainHash, + ReplicationRangeIndexable, + SharedLog, +} from "../src/index.js"; +import { denormalizer } from "../src/integers.js"; +import { ReplicationIntent, isMatured } from "../src/ranges.js"; import { createReplicationDomainHash } from "../src/replication-domain-hash.js"; -import { scaleToU32 } from "../src/role.js"; import { EventStore } from "./utils/stores/event-store.js"; -const checkRoleIsDynamic = async (log: SharedLog) => { +const checkRoleIsDynamic = async (log: SharedLog) => { const roles: any[] = []; log.events.addEventListener("replication:change", (change) => { if (change.detail.publicKey.equals(log.node.identity.publicKey)) { @@ -28,9 +28,12 @@ const checkRoleIsDynamic = async (log: SharedLog) => { await waitForResolved(() => expect(roles.length).greaterThan(3)); }; +const scaleToU32 = denormalizer("u32"); + describe(`replicate`, () => { let session: TestSession; - let db1: EventStore, db2: EventStore; + let db1: EventStore>, + db2: EventStore>; before(async () => { session = await TestSession.disconnected(3, [ @@ -98,11 +101,11 @@ describe(`replicate`, () => { }); it("none", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 1 } }, }); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -124,7 +127,7 @@ describe(`replicate`, () => { describe("observer", () => { it("can update", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); expect( (db1.log.node.services.pubsub as any)["subscriptions"].get( @@ -145,9 +148,9 @@ describe(`replicate`, () => { }); it("observer", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -168,9 +171,9 @@ describe(`replicate`, () => { }); }); - describe("replictor", () => { + describe("replicator", () => { it("fixed-object", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0.7, @@ -192,7 +195,7 @@ describe(`replicate`, () => { }); it("fixed-simple", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: 1, }, @@ -204,7 +207,7 @@ describe(`replicate`, () => { }); it("can unreplicate", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: 1, }, @@ -233,7 +236,7 @@ describe(`replicate`, () => { }); it("adding segments", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0, @@ -250,13 +253,13 @@ describe(`replicate`, () => { await waitForResolved(async () => expect(await db1.log.calculateTotalParticipation()).to.be.closeTo( 1.1, - 0.01, + 0.1, ), ); await waitForResolved(async () => expect(await db2.log.calculateTotalParticipation()).to.be.closeTo( 1.1, - 0.01, + 0.1, ), ); @@ -265,31 +268,31 @@ describe(`replicate`, () => { await waitForResolved(async () => expect(await db1.log.calculateTotalParticipation()).to.be.closeTo( 1.3, - 0.01, + 0.1, ), ); await waitForResolved(async () => expect(await db2.log.calculateTotalParticipation()).to.be.closeTo( 1.3, - 0.01, + 0.1, ), ); }); it("dynamic by default", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); await checkRoleIsDynamic(db1.log); }); it("update to dynamic role", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); await db1.log.replicate(false); await db1.log.replicate({ limits: {} }); await checkRoleIsDynamic(db1.log); }); it("waitForReplicator waits until maturity", async () => { - const store = new EventStore(); + const store = new EventStore(); const db1 = await session.peers[0].open(store.clone(), { args: { @@ -313,7 +316,7 @@ describe(`replicate`, () => { }); describe("getDefaultMinRoleAge", () => { it("if not replicating, min role age is 0", async () => { - const store = new EventStore(); + const store = new EventStore(); await session.peers[0].open(store.clone(), { args: { @@ -334,16 +337,19 @@ describe(`replicate`, () => { }); it("oldest is always mature", async () => { - const store = new EventStore(); + const store = new EventStore(); + + const timeUntilRoleMaturity = 500; + const tsm = 1000; const db1 = await session.peers[0].open(store.clone(), { args: { replicate: { factor: 1, }, + timeUntilRoleMaturity, }, }); - const tsm = 1000; await delay(tsm); @@ -352,6 +358,7 @@ describe(`replicate`, () => { replicate: { factor: 1, }, + timeUntilRoleMaturity, }, }); await waitForResolved(async () => @@ -407,7 +414,7 @@ describe(`replicate`, () => { describe("mode", () => { it("strict", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { normalized: false, @@ -443,9 +450,9 @@ describe(`replicate`, () => { describe("entry", () => { it("entry", async () => { - const store = new EventStore(); + const store = new EventStore(); - let domain = createReplicationDomainHash(); + let domain = createReplicationDomainHash("u32"); const db1 = await session.peers[0].open(store.clone(), { args: { @@ -462,7 +469,7 @@ describe(`replicate`, () => { }); const checkReplication = async ( - db: EventStore, + db: EventStore, entry: Entry, ) => { const offset = await domain.fromEntry(added.entry); @@ -475,7 +482,7 @@ describe(`replicate`, () => { expect(range.factor).to.equal(1); // mininum unit of length }; - const checkUnreplication = async (db: EventStore) => { + const checkUnreplication = async (db: EventStore) => { const ranges = await db.log.replicationIndex.iterate().all(); expect(ranges).to.have.length(0); }; @@ -492,9 +499,9 @@ describe(`replicate`, () => { }); it("entry with range", async () => { - const store = new EventStore(); + const store = new EventStore(); - let domain = createReplicationDomainHash(); + let domain = createReplicationDomainHash("u32"); let startFactor = 500000; let startOffset = 0; @@ -517,7 +524,7 @@ describe(`replicate`, () => { }); const checkReplication = async ( - db: EventStore, + db: EventStore, entry: Entry, ) => { const offset = await domain.fromEntry(added.entry); @@ -536,7 +543,7 @@ describe(`replicate`, () => { expect(rangeEntry.factor).to.equal(1); // mininum unit of length }; - const checkUnreplication = async (db: EventStore) => { + const checkUnreplication = async (db: EventStore) => { const ranges = await db.log.replicationIndex .iterate({ sort: new Sort({ key: ["start1"] }) }) .all(); @@ -623,7 +630,7 @@ describe(`replicate`, () => { }); it("restart after adding", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0.3, @@ -634,7 +641,7 @@ describe(`replicate`, () => { await db1.log.replicate({ factor: 0.2, offset: 0.6 }); - const checkSegments = async (db: EventStore) => { + const checkSegments = async (db: EventStore) => { const segments = await db.log.replicationIndex .iterate({ sort: [new Sort({ key: "start1" })] }) .all(); @@ -683,7 +690,7 @@ describe(`replicate`, () => { }); it("restart another settings", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0.3, @@ -708,7 +715,7 @@ describe(`replicate`, () => { it("will re-check replication segments on restart", async () => { // make sure non-reachable peers are not included in the replication segments - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0.3, @@ -773,7 +780,7 @@ describe(`replicate`, () => { it("segments updated while offline", async () => { // make sure non-reachable peers are not included in the replication segments - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0.1, @@ -837,7 +844,7 @@ describe(`replicate`, () => { await waitForResolved(async () => { const checkSegments = ( - segments: IndexedResults, + segments: IndexedResults>, ) => { expect(segments).to.have.length(2); @@ -870,8 +877,8 @@ describe(`replicate`, () => { id: "encryption key", group: topic, }); - db2 = await client2.open>( - await EventStore.load>( + db2 = await client2.open>( + await EventStore.load>( client2.libp2p.services.blocks, db1.address! ), diff --git a/packages/programs/data/shared-log/test/replication.spec.ts b/packages/programs/data/shared-log/test/replication.spec.ts index eba86418a..fb0de2e93 100644 --- a/packages/programs/data/shared-log/test/replication.spec.ts +++ b/packages/programs/data/shared-log/test/replication.spec.ts @@ -5,2889 +5,3803 @@ import { BlockResponse, type BlockMessage as InnerBlocksMessage, } from "@peerbit/blocks"; -import { - type PublicSignKey, - getPublicKeyFromPeerId, - randomBytes, - toBase64, -} from "@peerbit/crypto"; +import { type PublicSignKey, randomBytes, toBase64 } from "@peerbit/crypto"; import { Entry, EntryType } from "@peerbit/log"; import { TestSession } from "@peerbit/test-utils"; -import { - /* AbortError, */ - AbortError, - delay, - waitForResolved, -} from "@peerbit/time"; +import { AbortError, delay, waitForResolved } from "@peerbit/time"; import { expect } from "chai"; import mapSeries from "p-each-series"; import sinon from "sinon"; import { BlocksMessage } from "../src/blocks.js"; import { ExchangeHeadsMessage, RequestIPrune } from "../src/exchange-heads.js"; -import type { ReplicationOptions } from "../src/index.js"; +import { + type ReplicationOptions, + createReplicationDomainHash, +} from "../src/index.js"; +import { createNumbers } from "../src/integers.js"; import type { ReplicationRangeIndexable } from "../src/ranges.js"; import { AbsoluteReplicas, AddedReplicationSegmentMessage, decodeReplicas, - /* decodeReplicas, */ maxReplicas, } from "../src/replication.js"; +import { RatelessIBLTSynchronizer } from "../src/sync/rateless-iblt.js"; +import { SimpleSyncronizer } from "../src/sync/simple.js"; import { + type TestSetupConfig, checkBounded, collectMessages, collectMessagesFn, + dbgLogs, getReceivedHeads, + slowDownSend, waitForConverged, } from "./utils.js"; import { EventStore, type Operation } from "./utils/stores/event-store.js"; -describe(`replication`, function () { - let session: TestSession; - let db1: EventStore, db2: EventStore; - let fetchEvents: number; - let fetchHashes: Set; - let fromMultihash: any; - before(() => { - fromMultihash = Entry.fromMultihash; - // TODO monkeypatching might lead to sideeffects in other tests! - Entry.fromMultihash = (s, h, o) => { - fetchHashes.add(h); - fetchEvents += 1; - return fromMultihash(s, h, o); - }; - }); - after(() => { - Entry.fromMultihash = fromMultihash; - }); - - beforeEach(async () => { - fetchEvents = 0; - fetchHashes = new Set(); - session = await TestSession.connected(2, [ - { - libp2p: { - privateKey: privateKeyFromRaw( - new Uint8Array([ - 204, 234, 187, 172, 226, 232, 70, 175, 62, 211, 147, 91, 229, 157, - 168, 15, 45, 242, 144, 98, 75, 58, 208, 9, 223, 143, 251, 52, 252, - 159, 64, 83, 52, 197, 24, 246, 24, 234, 141, 183, 151, 82, 53, - 142, 57, 25, 148, 150, 26, 209, 223, 22, 212, 40, 201, 6, 191, 72, - 148, 82, 66, 138, 199, 185, - ]), - ), - }, - }, - { - libp2p: { - privateKey: privateKeyFromRaw( - new Uint8Array([ - 237, 55, 205, 86, 40, 44, 73, 169, 196, 118, 36, 69, 214, 122, 28, - 157, 208, 163, 15, 215, 104, 193, 151, 177, 62, 231, 253, 120, - 122, 222, 174, 242, 120, 50, 165, 97, 8, 235, 97, 186, 148, 251, - 100, 168, 49, 10, 119, 71, 246, 246, 174, 163, 198, 54, 224, 6, - 174, 212, 159, 187, 2, 137, 47, 192, - ]), - ), - }, - }, - ]); - - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - factor: 1, - }, - }, - }); - }); - - afterEach(async () => { - if (db1 && db1.closed === false) { - await db1.drop(); - } - if (db2 && db2.closed === false) { - await db2.drop(); - } - - await session.stop(); - }); - - it("verifies remote signatures by default", async () => { - const entry = await db1.add("a", { meta: { next: [] } }); - await (session.peers[0] as any)["libp2p"].hangUp(session.peers[1].peerId); - db2 = await session.peers[1].open(new EventStore()); - - const clonedEntry = deserialize(serialize(entry.entry), Entry); +export const testSetups: TestSetupConfig[] = [ + { + domain: createReplicationDomainHash("u32"), + type: "u32", + syncronizer: SimpleSyncronizer, + name: "u32-simple", + }, + /* { + domain: createReplicationDomainHash("u64"), + type: "u64", + syncronizer: SimpleSyncronizer, + name: "u64-simple", + }, */ + { + domain: createReplicationDomainHash("u64"), + type: "u64", + syncronizer: RatelessIBLTSynchronizer, + name: "u64-iblt", + }, +]; + +testSetups.forEach((setup) => { + describe(setup.name, () => { + const numbers = createNumbers(setup.type); + describe(`replication`, function () { + let session: TestSession; + let db1: EventStore, db2: EventStore; + let fetchEvents: number; + let fetchHashes: Set; + let fromMultihash: any; + before(() => { + fromMultihash = Entry.fromMultihash; + // TODO monkeypatching might lead to sideeffects in other tests! + Entry.fromMultihash = (s, h, o) => { + fetchHashes.add(h); + fetchEvents += 1; + return fromMultihash(s, h, o); + }; + }); + after(() => { + Entry.fromMultihash = fromMultihash; + }); - let verified = false; - const verifyFn = clonedEntry.verifySignatures.bind(clonedEntry); + beforeEach(async () => { + fetchEvents = 0; + fetchHashes = new Set(); + session = await TestSession.connected(2, [ + { + libp2p: { + privateKey: privateKeyFromRaw( + new Uint8Array([ + 204, 234, 187, 172, 226, 232, 70, 175, 62, 211, 147, 91, 229, + 157, 168, 15, 45, 242, 144, 98, 75, 58, 208, 9, 223, 143, 251, + 52, 252, 159, 64, 83, 52, 197, 24, 246, 24, 234, 141, 183, + 151, 82, 53, 142, 57, 25, 148, 150, 26, 209, 223, 22, 212, 40, + 201, 6, 191, 72, 148, 82, 66, 138, 199, 185, + ]), + ), + services: { + relay: null, // https://github.com/libp2p/js-libp2p/issues/2794 + } as any, + }, + }, + { + libp2p: { + privateKey: privateKeyFromRaw( + new Uint8Array([ + 237, 55, 205, 86, 40, 44, 73, 169, 196, 118, 36, 69, 214, 122, + 28, 157, 208, 163, 15, 215, 104, 193, 151, 177, 62, 231, 253, + 120, 122, 222, 174, 242, 120, 50, 165, 97, 8, 235, 97, 186, + 148, 251, 100, 168, 49, 10, 119, 71, 246, 246, 174, 163, 198, + 54, 224, 6, 174, 212, 159, 187, 2, 137, 47, 192, + ]), + ), + services: { + relay: null, // https://github.com/libp2p/js-libp2p/issues/2794 + } as any, + }, + }, + ]); - clonedEntry.verifySignatures = () => { - verified = true; - return verifyFn(); - }; - await db2.log.log.join([clonedEntry]); - expect(verified).to.be.true; - }); + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }); + }); - it("does not verify owned signatures by default", async () => { - const entry = await db1.add("a", { meta: { next: [] } }); - await (session.peers[0] as any)["libp2p"].hangUp(session.peers[1].peerId); - db2 = await session.peers[1].open(new EventStore()); - - const clonedEntry = deserialize(serialize(entry.entry), Entry); - - let verified = false; - const verifyFn = clonedEntry.verifySignatures.bind(clonedEntry); - clonedEntry.createdLocally = true; - clonedEntry.verifySignatures = () => { - verified = true; - return verifyFn(); - }; - await db2.log.log.join([clonedEntry]); - expect(verified).to.be.false; - }); + afterEach(async () => { + if (db1 && db1.closed === false) { + await db1.drop(); + } + if (db2 && db2.closed === false) { + await db2.drop(); + } - it("logs are unique", async () => { - const entryCount = 33; - const entryArr: number[] = []; - - const db1 = await session.peers[0].open(new EventStore()); - const db3 = await session.peers[0].open(new EventStore()); - - // Create the entries in the first database - for (let i = 0; i < entryCount; i++) { - entryArr.push(i); - } - - await mapSeries(entryArr, (i) => db1.add("hello" + i)); - - // Open the second database - const db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - ))!; - - const db4 = (await EventStore.open>( - db3.address!, - session.peers[1], - ))!; - - await waitForResolved(async () => - expect((await db2.iterator({ limit: -1 })).collect()).to.have.length( - entryCount, - ), - ); - - const result1 = (await db1.iterator({ limit: -1 })).collect(); - const result2 = (await db2.iterator({ limit: -1 })).collect(); - expect(result1.length).equal(result2.length); - for (let i = 0; i < result1.length; i++) { - expect(result1[i].equals(result2[i])).to.be.true; - } - - expect(db3.log.log.length).equal(0); - expect(db4.log.log.length).equal(0); - }); + await session.stop(); + }); - describe("references", () => { - it("joins by references", async () => { - db1.log.replicas = { min: new AbsoluteReplicas(1) }; - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { + it("verifies remote signatures by default", async () => { + const entry = await db1.add("a", { meta: { next: [] } }); + await (session.peers[0] as any)["libp2p"].hangUp( + session.peers[1].peerId, + ); + db2 = await session.peers[1].open(new EventStore(), { args: { - replicas: { - min: 1, - }, - waitForReplicatorTimeout: 2000, + setup, }, - }, - ))!; - await db1.log.replicate({ factor: 0.5 }); - await db2.log.replicate({ factor: 0.5 }); + }); - const getParticipationPerPer = (ranges: ReplicationRangeIndexable[]) => { - let map = new Map(); - for (const range of ranges) { - map.set( - range.hash, - (map.get(range.hash) || 0) + range.widthNormalized, - ); - } - return map; - }; + const clonedEntry = deserialize(serialize(entry.entry), Entry); - await waitForResolved(async () => - expect( - [ - ...getParticipationPerPer( - (await db1.log.replicationIndex.iterate().all()).map( - (x) => x.value, - ), - ).values(), - ].map((x) => Math.round(x * 200)), - ).to.deep.equal([100, 100]), - ); - - await waitForResolved(async () => - expect( - [ - ...getParticipationPerPer( - (await db2.log.replicationIndex.iterate().all()).map( - (x) => x.value, - ), - ).values(), - ].map((x) => Math.round(x * 200)), - ).to.deep.equal([100, 100]), - ); + let verified = false; + const verifyFn = clonedEntry.verifySignatures.bind(clonedEntry); - const { entry: entryA } = await db1.add("a", { - meta: { next: [], gidSeed: new Uint8Array([1]) }, - }); - const { entry: entryB } = await db1.add("b", { - meta: { next: [], gidSeed: new Uint8Array([0]) }, - }); - await db1.add("ab", { - meta: { next: [entryA, entryB] }, + clonedEntry.verifySignatures = () => { + verified = true; + return verifyFn(); + }; + await db2.log.log.join([clonedEntry]); + expect(verified).to.be.true; }); - await waitForResolved(() => { - expect(Math.max(db1.log.log.length, db2.log.log.length)).equal(3); // one is now responsible for everything - expect(Math.min(db1.log.log.length, db2.log.log.length)).equal(0); // one has to do nothing - }); - }); + it("does not verify owned signatures by default", async () => { + const entry = await db1.add("a", { meta: { next: [] } }); + await (session.peers[0] as any)["libp2p"].hangUp( + session.peers[1].peerId, + ); + db2 = await session.peers[1].open(new EventStore(), { + args: { + setup, + }, + }); - /* TODO feature not implemented yet, when is this expected? - it("references all gids on exchange", async () => { - const { entry: entryA } = await db1.add("a", { meta: { next: [] } }); - const { entry: entryB } = await db1.add("b", { meta: { next: [] } }); - const { entry: entryAB } = await db1.add("ab", { - meta: { next: [entryA, entryB] }, + const clonedEntry = deserialize(serialize(entry.entry), Entry); + + let verified = false; + const verifyFn = clonedEntry.verifySignatures.bind(clonedEntry); + clonedEntry.createdLocally = true; + clonedEntry.verifySignatures = () => { + verified = true; + return verifyFn(); + }; + await db2.log.log.join([clonedEntry]); + expect(verified).to.be.false; }); - expect(entryA.meta.gid).not.equal(entryB.meta.gid); - expect( - entryAB.meta.gid === entryA.meta.gid || - entryAB.meta.gid === entryB.meta.gid, - ).to.be.true; + it("logs are unique", async () => { + const entryCount = 33; + const entryArr: number[] = []; - let entryWithNotSameGid = - entryAB.meta.gid === entryA.meta.gid - ? entryB.meta.gid - : entryA.meta.gid; - - const sendFn = db1.log.rpc.send.bind(db1.log.rpc); + const db1 = await session.peers[0].open(new EventStore(), { + args: { + setup, + }, + }); + const db3 = await session.peers[0].open(new EventStore(), { + args: { + setup, + }, + }); - db1.log.rpc.send = async (msg, options) => { - if (msg instanceof ExchangeHeadsMessage) { - expect(msg.heads.map((x) => x.entry.hash)).to.deep.equal([ - entryAB.hash, - ]); - expect( - msg.heads.map((x) => x.gidRefrences.map((y) => y)).flat(), - ).to.deep.equal([entryWithNotSameGid]); + // Create the entries in the first database + for (let i = 0; i < entryCount; i++) { + entryArr.push(i); } - return sendFn(msg, options); - }; - let cacheLookups: Entry[][] = []; - let db1GetShallowFn = db1.log["_gidParentCache"].get.bind( - db1.log["_gidParentCache"], - ); - db1.log["_gidParentCache"].get = (k) => { - const result = db1GetShallowFn(k); - cacheLookups.push(result!); - return result; - }; - - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - factor: 1, + await mapSeries(entryArr, (i) => db1.add("hello" + i)); + + // Open the second database + const db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + setup, + }, + }, + ))!; + + const db4 = (await EventStore.open>( + db3.address!, + session.peers[1], + { + args: { + setup, }, }, - }, - ))!; + ))!; - await waitForResolved(() => expect(db2.log.log.length).equal(3)); + await waitForResolved(async () => + expect((await db2.iterator({ limit: -1 })).collect()).to.have.length( + entryCount, + ), + ); - expect(cacheLookups).to.have.length(1); - expect( - cacheLookups.map((x) => x.map((y) => y.meta.gid)).flat(), - ).to.have.members([entryWithNotSameGid, entryAB.meta.gid]); + const result1 = (await db1.iterator({ limit: -1 })).collect(); + const result2 = (await db2.iterator({ limit: -1 })).collect(); - await db1.close(); - expect(db1.log["_gidParentCache"].size).equal(0); - }); - */ + expect(result1.length).equal(result2.length); + for (let i = 0; i < result1.length; i++) { + expect(result1[i].equals(result2[i])).to.be.true; + } - it("fetches next blocks once", async () => { - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicas: { - min: 0, - }, - replicate: false, - timeUntilRoleMaturity: 1000, - }, + expect(db3.log.log.length).equal(0); + expect(db4.log.log.length).equal(0); }); - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicas: { - min: 0, + describe("references", () => { + it("joins by references", async () => { + db1.log.replicas = { min: new AbsoluteReplicas(1) }; + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicas: { + min: 1, + }, + waitForReplicatorTimeout: 2000, + setup, + }, }, + ))!; + await db1.log.replicate({ factor: 0.5 }); + await db2.log.replicate({ factor: 0.5 }); + + const getParticipationPerPer = ( + ranges: ReplicationRangeIndexable[], + ) => { + let map = new Map(); + for (const range of ranges) { + map.set( + range.hash, + (map.get(range.hash) || 0) + range.widthNormalized, + ); + } + return map; + }; + + await waitForResolved(async () => + expect( + [ + ...getParticipationPerPer( + (await db1.log.replicationIndex.iterate().all()).map( + (x) => x.value, + ), + ).values(), + ].map((x) => Math.round(x * 200)), + ).to.deep.equal([100, 100]), + ); - replicate: { - factor: 1, - }, - timeUntilRoleMaturity: 1000, - }, - }, - ))!; + await waitForResolved(async () => + expect( + [ + ...getParticipationPerPer( + (await db2.log.replicationIndex.iterate().all()).map( + (x) => x.value, + ), + ).values(), + ].map((x) => Math.round(x * 200)), + ).to.deep.equal([100, 100]), + ); - // followwing entries set minReplicas to 1 which means only db2 or db3 needs to hold it - const e1 = await db1.add("0", { - replicas: new AbsoluteReplicas(3), - meta: { next: [] }, - }); - await db1.add("1", { - replicas: new AbsoluteReplicas(1), // will be overriden by 'maxReplicas' above - meta: { next: [e1.entry] }, - }); + const { entry: entryA } = await db1.add("a", { + meta: { next: [], gidSeed: new Uint8Array([1]) }, + }); + const { entry: entryB } = await db1.add("b", { + meta: { next: [], gidSeed: new Uint8Array([0]) }, + }); + await db1.add("ab", { + meta: { next: [entryA, entryB] }, + }); - const onMessageFn1 = db1.log._onMessage.bind(db1.log); + await waitForResolved(() => { + expect(Math.max(db1.log.log.length, db2.log.log.length)).equal(3); // one is now responsible for everything + expect(Math.min(db1.log.log.length, db2.log.log.length)).equal(0); // one has to do nothing + }); + }); - let receivedMessageDb1: InnerBlocksMessage[] = []; - db1.log.rpc["_responseHandler"] = async (msg: any, cxt: any) => { - if (msg instanceof BlocksMessage) { - receivedMessageDb1.push(msg.message); - } - return onMessageFn1(msg, cxt); - }; + /* TODO feature not implemented yet, when is this expected? + it("references all gids on exchange", async () => { + const { entry: entryA } = await db1.add("a", { meta: { next: [] } }); + const { entry: entryB } = await db1.add("b", { meta: { next: [] } }); + const { entry: entryAB } = await db1.add("ab", { + meta: { next: [entryA, entryB] }, + }); + + expect(entryA.meta.gid).not.equal(entryB.meta.gid); + expect( + entryAB.meta.gid === entryA.meta.gid || + entryAB.meta.gid === entryB.meta.gid, + ).to.be.true; + + let entryWithNotSameGid = + entryAB.meta.gid === entryA.meta.gid + ? entryB.meta.gid + : entryA.meta.gid; + + const sendFn = db1.log.rpc.send.bind(db1.log.rpc); + + db1.log.rpc.send = async (msg, options) => { + if (msg instanceof ExchangeHeadsMessage) { + expect(msg.heads.map((x) => x.entry.hash)).to.deep.equal([ + entryAB.hash, + ]); + expect( + msg.heads.map((x) => x.gidRefrences.map((y) => y)).flat(), + ).to.deep.equal([entryWithNotSameGid]); + } + return sendFn(msg, options); + }; + + let cacheLookups: Entry[][] = []; + let db1GetShallowFn = db1.log["_gidParentCache"].get.bind( + db1.log["_gidParentCache"], + ); + db1.log["_gidParentCache"].get = (k) => { + const result = db1GetShallowFn(k); + cacheLookups.push(result!); + return result; + }; + + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 1, + }, + }, + }, + ))!; + + await waitForResolved(() => expect(db2.log.log.length).equal(3)); + + expect(cacheLookups).to.have.length(1); + expect( + cacheLookups.map((x) => x.map((y) => y.meta.gid)).flat(), + ).to.have.members([entryWithNotSameGid, entryAB.meta.gid]); + + await db1.close(); + expect(db1.log["_gidParentCache"].size).equal(0); + }); + */ - let receivedMessageDb2: InnerBlocksMessage[] = []; - const onMessageFn2 = db2.log._onMessage.bind(db2.log); - db2.log.rpc["_responseHandler"] = async (msg: any, cxt: any) => { - if (msg instanceof BlocksMessage) { - receivedMessageDb2.push(msg.message); - } - return onMessageFn2(msg, cxt); - }; + it("fetches next blocks once", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicas: { + min: 0, + }, + replicate: false, + timeUntilRoleMaturity: 1000, + setup, + }, + }); + + // followwing entries set minReplicas to 1 which means only db2 or db3 needs to hold it + const e1 = await db1.add("0", { + replicas: new AbsoluteReplicas(3), + meta: { next: [] }, + }); + await db1.add("1", { + replicas: new AbsoluteReplicas(1), // will be overriden by 'maxReplicas' above + meta: { next: [e1.entry] }, + }); + + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicas: { + min: 0, + }, + + replicate: { + factor: 1, + }, + timeUntilRoleMaturity: 1000, + setup, + }, + }, + ))!; + + const onMessageFn1 = db1.log._onMessage.bind(db1.log); + + let receivedMessageDb1: InnerBlocksMessage[] = []; + db1.log.rpc["_responseHandler"] = async (msg: any, cxt: any) => { + if (msg instanceof BlocksMessage) { + receivedMessageDb1.push(msg.message); + } + return onMessageFn1(msg, cxt); + }; + + let receivedMessageDb2: InnerBlocksMessage[] = []; + const onMessageFn2 = db2.log._onMessage.bind(db2.log); + db2.log.rpc["_responseHandler"] = async (msg: any, cxt: any) => { + if (msg instanceof BlocksMessage) { + receivedMessageDb2.push(msg.message); + } + return onMessageFn2(msg, cxt); + }; + + await waitForResolved(() => { + expect(db1.log.log.length).equal(0); + expect(db2.log.log.length).greaterThanOrEqual(1); + }); - await waitForResolved(() => { - expect(db1.log.log.length).equal(0); - expect(db2.log.log.length).greaterThanOrEqual(1); + expect(receivedMessageDb1).to.have.length(1); + expect(receivedMessageDb1[0]).to.be.instanceOf(BlockRequest); + expect(receivedMessageDb2).to.have.length(1); + expect(receivedMessageDb2[0]).to.be.instanceOf(BlockResponse); + }); }); - expect(receivedMessageDb1).to.have.length(1); - expect(receivedMessageDb1[0]).to.be.instanceOf(BlockRequest); - expect(receivedMessageDb2).to.have.length(1); - expect(receivedMessageDb2[0]).to.be.instanceOf(BlockResponse); - }); - }); + describe("replication", () => { + describe("one way", () => { + it("replicates database of 1 entry", async () => { + const value = "hello"; + await db1.add(value); - describe("replication", () => { - it("replicates database of 1 entry", async () => { - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - ))!; + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + setup, + }, + }, + ))!; + + await db1.waitFor(session.peers[1].peerId); + await db2.waitFor(session.peers[0].peerId); + + await waitForResolved(() => expect(db2.log.log.length).equal(1)); + + expect((await db2.iterator({ limit: -1 })).collect().length).equal( + 1, + ); + + const db1Entries: Entry>[] = ( + await db1.iterator({ limit: -1 }) + ).collect(); + expect(db1Entries.length).equal(1); + + await waitForResolved(async () => + expect([ + ...( + await db1.log.findLeadersFromEntry( + db1Entries[0], + maxReplicas(db1.log, db1Entries), + ) + ).keys(), + ]).to.have.members([ + session.peers[0].identity.publicKey.hashcode(), + session.peers[1].identity.publicKey.hashcode(), + ]), + ); + + expect(db1Entries[0].payload.getValue().value).equal(value); + const db2Entries: Entry>[] = ( + await db2.iterator({ limit: -1 }) + ).collect(); + expect(db2Entries.length).equal(1); + + expect([ + ...( + await db2.log.findLeadersFromEntry( + db2Entries[0], + maxReplicas(db2.log, db2Entries), + ) + ).keys(), + ]).include.members([ + session.peers[0].identity.publicKey.hashcode(), + session.peers[1].identity.publicKey.hashcode(), + ]); + expect(db2Entries[0].payload.getValue().value).equal(value); + }); - await db1.waitFor(session.peers[1].peerId); - await db2.waitFor(session.peers[0].peerId); + it("replicates database of 1000 entries", async () => { + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }, + ))!; + + await db1.waitFor(session.peers[1].peerId); + await db2.waitFor(session.peers[0].peerId); + + const entryCount = 1e3; + for (let i = 0; i < entryCount; i++) { + // entryArr.push(i); + await db1.add("hello" + i, { meta: { next: [] } }); + } + + await waitForResolved(() => + expect(db2.log.log.length).equal(entryCount), + ); + + const entries = (await db2.iterator({ limit: -1 })).collect(); + expect(entries.length).equal(entryCount); + for (let i = 0; i < entryCount; i++) { + try { + expect(entries[i].payload.getValue().value).equal("hello" + i); + } catch (error) { + console.error( + "Entries out of order: " + + entries.map((x) => x.payload.getValue().value).join(", "), + ); + throw error; + } + } + }); - const value = "hello"; + it("distributes after merge", async () => { + await session.stop(); + session = await TestSession.connected(3); - await db1.add(value); - await waitForResolved(() => expect(db2.log.log.length).equal(1)); + await slowDownSend(session.peers[2], session.peers[0]); // we do this to potentially change the sync order + const db1 = await session.peers[0].open( + new EventStore(), + { + args: { + replicate: { + factor: 1, + }, + /* replicas: { + min: 1, + }, */ + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ); - expect((await db2.iterator({ limit: -1 })).collect().length).equal(1); + await db1.add("hello"); - const db1Entries: Entry>[] = ( - await db1.iterator({ limit: -1 }) - ).collect(); - expect(db1Entries.length).equal(1); + let db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 1, + }, + /* replicas: { + min: 1, + }, */ + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ))!; - await waitForResolved(async () => - expect([ - ...( - await db1.log.findLeaders( + let db3 = (await EventStore.open>( + db1.address!, + session.peers[2], { - entry: db1Entries[0], - replicas: maxReplicas(db1.log, db1Entries), - }, - // 0 - ) - ).keys(), - ]).to.have.members( - [session.peers[0].peerId, session.peers[1].peerId].map((p) => - getPublicKeyFromPeerId(p).hashcode(), - ), - ), - ); - - expect(db1Entries[0].payload.getValue().value).equal(value); - const db2Entries: Entry>[] = ( - await db2.iterator({ limit: -1 }) - ).collect(); - expect(db2Entries.length).equal(1); - expect([ - ...( - await db2.log.findLeaders( - { - entry: db2Entries[0], - replicas: maxReplicas(db2.log, db2Entries), - }, - // 0 - ) - ).keys(), - ]).include.members( - [session.peers[0].peerId, session.peers[1].peerId].map((p) => - getPublicKeyFromPeerId(p).hashcode(), - ), - ); - expect(db2Entries[0].payload.getValue().value).equal(value); - }); + args: { + replicate: { + factor: 1, + }, + /* replicas: { + min: 1, + }, */ + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ))!; - it("replicates database of 1000 entries", async () => { - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - factor: 1, - }, - }, - }, - ))!; + await waitForResolved(() => expect(db1.log.log.length).to.be.eq(1)); + await waitForResolved(() => expect(db2.log.log.length).to.be.eq(1)); + await waitForResolved(() => expect(db3.log.log.length).to.be.eq(1)); + }); - await db1.waitFor(session.peers[1].peerId); - await db2.waitFor(session.peers[0].peerId); + it("replicates database of large entries", async () => { + let count = 10; + for (let i = 0; i < count; i++) { + const value = toBase64(randomBytes(4e6)); + await db1.add(value, { meta: { next: [] } }); // force unique heads + } + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }, + ))!; - const entryCount = 1e3; - for (let i = 0; i < entryCount; i++) { - // entryArr.push(i); - await db1.add("hello" + i); - } + await waitForResolved(() => + expect(db2.log.log.length).equal(count), + ); + }); - await waitForResolved(() => expect(db2.log.log.length).equal(entryCount)); + it("replicates 1 entry with cut next", async () => { + const first = await db1.add("old"); + const second = await db1.add("new", { + meta: { type: EntryType.CUT, next: [first.entry] }, + }); + expect( + (await db1.iterator({ limit: -1 })).collect().map((x) => x.hash), + ).to.deep.equal([second.entry.hash]); + expect(db1.log.log.length).equal(1); + + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + setup, + }, + }, + ))!; + + await waitForResolved(async () => { + expect( + (await db2.iterator({ limit: -1 })) + .collect() + .map((x) => x.hash), + ).to.deep.equal([second.entry.hash]); + }); + }); - const entries = (await db2.iterator({ limit: -1 })).collect(); - expect(entries.length).equal(entryCount); - for (let i = 0; i < entryCount; i++) { - try { - expect(entries[i].payload.getValue().value).equal("hello" + i); - } catch (error) { - console.error( - "Entries out of order: " + - entries.map((x) => x.payload.getValue().value).join(", "), - ); - throw error; - } - } - }); + it("it does not fetch missing entries from remotes when exchanging heads to remote", async () => { + const first = await db1.add("a", { meta: { next: [] } }); + const second = await db1.add("b", { meta: { next: [] } }); + await db1.log.log.entryIndex.delete(second.entry.hash); - it("replicates database of large entries", async () => { - let count = 10; - for (let i = 0; i < count; i++) { - const value = toBase64(randomBytes(4e6)); - await db1.add(value, { meta: { next: [] } }); // force unique heads - } - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - factor: 1, - }, - }, - }, - ))!; + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + setup, + }, + }, + ))!; + + let remoteFetchOptions: any[] = []; + const db1LogGet = db1.log.log.get.bind(db1.log.log); + + db1.log.log.get = async (hash, options) => { + if (hash === second.entry.hash) { + remoteFetchOptions.push(options?.remote); + return undefined; + } + return db1LogGet(hash, options); + }; + + await waitForResolved(async () => { + expect( + (await db2.iterator({ limit: -1 })) + .collect() + .map((x) => x.hash), + ).to.deep.equal([first.entry.hash]); + }); + await waitForResolved(() => + expect(remoteFetchOptions).to.have.length(1), + ); + expect(remoteFetchOptions[0]).to.be.undefined; + }); + }); + describe("two way", () => { + beforeEach(async () => { + await session.stop(); + session = await TestSession.disconnected(2); + + const store = new EventStore(); + db1 = await session.peers[0].open(store.clone(), { + args: { + replicate: { + factor: 1, + }, + timeUntilRoleMaturity: 0, + setup, + }, + }); - await waitForResolved(() => expect(db2.log.log.length).equal(count)); - }); + db2 = await session.peers[1].open(store.clone(), { + args: { + replicate: { + factor: 1, + }, + timeUntilRoleMaturity: 0, + setup, + }, + }); - it("replicates 1 entry with cut next", async () => { - const first = await db1.add("old"); - const second = await db1.add("new", { - meta: { type: EntryType.CUT, next: [first.entry] }, - }); - expect( - (await db1.iterator({ limit: -1 })).collect().map((x) => x.hash), - ).to.deep.equal([second.entry.hash]); - expect(db1.log.log.length).equal(1); - - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - ))!; - - await waitForResolved(async () => { - expect( - (await db2.iterator({ limit: -1 })).collect().map((x) => x.hash), - ).to.deep.equal([second.entry.hash]); - }); - }); + expect(db1.log.replicas.min.getValue(db1.log)).to.eq(2); + expect(db2.log.replicas.min.getValue(db1.log)).to.eq(2); + }); - it("it does not fetch missing entries from remotes when exchanging heads to remote", async () => { - const first = await db1.add("a", { meta: { next: [] } }); - const second = await db1.add("b", { meta: { next: [] } }); - await db1.log.log.entryIndex.delete(second.entry.hash); + it("no change", async () => { + await db1.log.replicate({ factor: 1 }); + await db2.log.replicate({ factor: 1 }); + + let count = 1000; + for (let i = 0; i < count; i++) { + const { entry: entry1 } = await db1.add("hello " + i, { + meta: { next: [] }, + }); + await db2.log.join([entry1]); + } + + // dial for sync + await db1.node.dial(db2.node.getMultiaddrs()); + + await waitForResolved(() => + expect(db1.log.log.length).equal(count), + ); + await waitForResolved(() => + expect(db2.log.log.length).equal(count), + ); + }); - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - ))!; + it("partially synced one", async () => { + const { entry: entry1 } = await db1.add("a", { + meta: { next: [] }, + }); + const { entry: entry2 } = await db2.add("b", { + meta: { next: [] }, + }); - let remoteFetchOptions: any[] = []; - const db1LogGet = db1.log.log.get.bind(db1.log.log); + await db1.log.join([entry2]); + await db2.log.join([entry1]); - db1.log.log.get = async (hash, options) => { - if (hash === second.entry.hash) { - remoteFetchOptions.push(options?.remote); - return undefined; - } - return db1LogGet(hash, options); - }; + // now ad som unsynced entries - await waitForResolved(async () => { - expect( - (await db2.iterator({ limit: -1 })).collect().map((x) => x.hash), - ).to.deep.equal([first.entry.hash]); - }); - await waitForResolved(() => expect(remoteFetchOptions).to.have.length(1)); - expect(remoteFetchOptions[0]).to.be.undefined; - }); - }); -}); + await db1.add("c", { meta: { next: [] } }); + await db2.add("d", { meta: { next: [] } }); -describe("redundancy", () => { - let session: TestSession; - let db1: EventStore, db2: EventStore, db3: EventStore; - - let fetchEvents: number; - let fetchHashes: Set; - let fromMultihash: any; - - before(async () => { - session = await TestSession.connected(3); - fromMultihash = Entry.fromMultihash; - // TODO monkeypatching might lead to sideeffects in other tests! - Entry.fromMultihash = (s, h, o) => { - fetchHashes.add(h); - fetchEvents += 1; - return fromMultihash(s, h, o); - }; - }); - after(async () => { - await session.stop(); - }); + // dial for sync + await db1.node.dial(db2.node.getMultiaddrs()); - beforeEach(() => { - fetchEvents = 0; - fetchHashes = new Set(); - }); - afterEach(async () => { - if (db1 && db1.closed === false) await db1.drop(); - if (db2 && db2.closed === false) await db2.drop(); - if (db3 && db3.closed === false) await db3.drop(); - }); + await waitForResolved(() => expect(db1.log.log.length).equal(4)); + await waitForResolved(() => expect(db2.log.log.length).equal(4)); + }); - it("only sends entries once, 2 peers dynamic", async () => { - db1 = await session.peers[0].open(new EventStore()); - await db1.log.replicate(); - let count = 100; - for (let i = 0; i < count; i++) { - await db1.add("hello " + i, { meta: { next: [] } }); - } - const message1 = collectMessages(db1.log); - - let db2 = db1.clone(); - - // start to collect messages before opening the second db so we don't miss any - const { messages: message2, fn } = collectMessagesFn(db2.log); - db2 = await session.peers[1].open(db2, { - args: { - replicate: true, - onMessage: fn, - }, + it("partially synced large", async () => { + let alreadySyncCount = 1000; + let unsyncedCount = 1; + let totalCount = alreadySyncCount + unsyncedCount * 2; + for (let i = 0; i < alreadySyncCount; i++) { + const { entry } = await db1.add("hello-ab- " + i, { + meta: { next: [] }, + }); + await db2.log.join([entry]); + } + + expect(db1.log.log.length).equal(alreadySyncCount); + expect(db2.log.log.length).equal(alreadySyncCount); + + expect(await db1.log.entryCoordinatesIndex.getSize()).to.equal( + alreadySyncCount, + ); + expect(await db2.log.entryCoordinatesIndex.getSize()).to.equal( + alreadySyncCount, + ); + + for (let i = 0; i < unsyncedCount; i++) { + await db1.add("hello-a- " + i, { meta: { next: [] } }); + await db2.add("hello-b- " + i, { meta: { next: [] } }); + } + + await db1.node.dial(db2.node.getMultiaddrs()); + await waitForResolved(() => + expect(db1.log.log.length).equal(totalCount), + ); + await waitForResolved(() => + expect(db2.log.log.length).equal(totalCount), + ); + }); + }); + }); }); - const check = () => { - const dataMessages2 = getReceivedHeads(message2); - expect(dataMessages2).to.have.length(count); + describe("redundancy", () => { + let session: TestSession; + let db1: EventStore, + db2: EventStore, + db3: EventStore; + + let fetchEvents: number; + let fetchHashes: Set; + let fromMultihash: any; + + before(async () => { + session = await TestSession.connected(3); + fromMultihash = Entry.fromMultihash; + // TODO monkeypatching might lead to sideeffects in other tests! + Entry.fromMultihash = (s, h, o) => { + fetchHashes.add(h); + fetchEvents += 1; + return fromMultihash(s, h, o); + }; + }); + after(async () => { + await session.stop(); + }); - const dataMessages1 = getReceivedHeads(message1); - expect(dataMessages1).to.be.empty; // no data is sent back - }; - try { - await waitForResolved(() => { - check(); + beforeEach(() => { + fetchEvents = 0; + fetchHashes = new Set(); + }); + afterEach(async () => { + if (db1 && db1.closed === false) await db1.drop(); + if (db2 && db2.closed === false) await db2.drop(); + if (db3 && db3.closed === false) await db3.drop(); }); - await delay(3000); - check(); - } catch (error) { - console.error(error); - throw new Error( - "Did not resolve all heads. Log length: " + db2.log.log.length, - ); - } - }); - it("only sends entries once, 2 peers fixed", async () => { - db1 = await session.peers[0].open(new EventStore()); - db1.log.replicate({ factor: 1 }); - let count = 1000; - for (let i = 0; i < count; i++) { - await db1.add("hello " + i, { meta: { next: [] } }); - } - const message1 = collectMessages(db1.log); - - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - factor: 1, - }, - }, - }, - ))!; - - const message2 = collectMessages(db2.log); - await delay(3000); - - const dataMessages2 = getReceivedHeads(message2); - await waitForResolved(() => expect(dataMessages2).to.have.length(count)); - - const dataMessages1 = getReceivedHeads(message1); - expect(dataMessages1).to.be.empty; // no data is sent back - }); + it("only sends entries once, 2 peers dynamic", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + setup, + }, + }); + await db1.log.replicate(); + let count = 100; + for (let i = 0; i < count; i++) { + await db1.add("hello " + i, { meta: { next: [] } }); + } + const message1 = collectMessages(db1.log); - it("only sends entries once, 2 peers fixed, write after open", async () => { - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { factor: 1 }, - }, - }); - let count = 1; - const message1 = collectMessages(db1.log); + let db2 = db1.clone(); - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - factor: 1, + // start to collect messages before opening the second db so we don't miss any + const { messages: message2, fn } = collectMessagesFn(db2.log); + db2 = await session.peers[1].open(db2, { + args: { + replicate: true, + onMessage: fn, + setup, }, - }, - }, - ))!; + }); - const message2 = collectMessages(db2.log); + const check = () => { + const dataMessages2 = getReceivedHeads(message2); + expect(dataMessages2).to.have.length(count); - await waitForResolved(async () => - expect((await db1.log.getReplicators())?.size).equal(2), - ); + const dataMessages1 = getReceivedHeads(message1); + expect(dataMessages1).to.be.empty; // no data is sent back + }; + try { + await waitForResolved(() => { + check(); + }); + await delay(3000); + check(); + } catch (error) { + console.log("------------------------"); + console.error(error); + throw new Error( + "Did not resolve all heads. Log length: " + db2.log.log.length, + ); + } + }); - await waitForResolved(async () => - expect((await db2.log.getReplicators())?.size).equal(2), - ); + it("only sends entries once, 2 peers fixed", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + setup, + }, + }); + db1.log.replicate({ factor: 1 }); + let count = 1000; + for (let i = 0; i < count; i++) { + await db1.add("hello " + i, { meta: { next: [] } }); + } + const message1 = collectMessages(db1.log); + + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }, + ))!; - await db1.add("hello", { meta: { next: [] } }); + const message2 = collectMessages(db2.log); + await delay(3000); - await waitForResolved(() => expect(db2.log.log.length).equal(1)); + const dataMessages2 = getReceivedHeads(message2); + await waitForResolved(() => + expect(dataMessages2).to.have.length(count), + ); - const dataMessages2 = getReceivedHeads(message2); - await waitForResolved(() => expect(dataMessages2).to.have.length(count)); + const dataMessages1 = getReceivedHeads(message1); + expect(dataMessages1).to.be.empty; // no data is sent back + }); - const dataMessages1 = getReceivedHeads(message1); - expect(dataMessages1).to.be.empty; // no data is sent back - }); + it("only sends entries once, 2 peers fixed, write after open", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { factor: 1 }, + setup, + }, + }); + let count = 1; + const message1 = collectMessages(db1.log); + + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }, + ))!; - it("only sends entries once, 3 peers", async () => { - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - factor: 1, - }, - }, - }); - const message1 = collectMessages(db1.log); - - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - factor: 1, - }, - }, - }, - ); - const message2 = collectMessages(db2.log); - - let count = 10; // TODO make higher count work in Github CI - - for (let i = 0; i < count; i++) { - await db1.add("hello " + i, { meta: { next: [] } }); - } - await waitForResolved(() => expect(db2.log.log.length).equal(count)); - - db3 = await EventStore.open>( - db1.address!, - session.peers[2], - { - args: { - replicate: { - factor: 1, - }, - }, - }, - ); - const message3 = collectMessages(db3.log); - - await waitForResolved(() => expect(db3.log.log.length).equal(count)); - - const heads = getReceivedHeads(message3); - expect(heads).to.have.length(count); - - expect(getReceivedHeads(message1)).to.be.empty; - expect(getReceivedHeads(message2)).to.have.length(count); - - await waitForResolved(() => expect(db3.log.log.length).equal(count)); - - // gc check,. - await waitForResolved(() => { - expect(db3.log["syncInFlightQueue"].size).equal(0); - expect(db3.log["syncInFlightQueueInverted"].size).equal(0); - }); - }); + const message2 = collectMessages(db2.log); - it("no fetches needed when replicating live ", async () => { - db1 = await session.peers[0].open(new EventStore()); + await waitForResolved(async () => + expect((await db1.log.getReplicators())?.size).equal(2), + ); - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - ))!; + await waitForResolved(async () => + expect((await db2.log.getReplicators())?.size).equal(2), + ); - await db1.waitFor(session.peers[1].peerId); - await db2.waitFor(session.peers[0].peerId); + await db1.add("hello", { meta: { next: [] } }); - const entryCount = 10; // todo when larger (N) this test usually times out at N - 1 or 2, unless a delay is put beforehand + await waitForResolved(() => expect(db2.log.log.length).equal(1)); - // Trigger replication - let adds: number[] = []; - for (let i = 0; i < entryCount; i++) { - adds.push(i); - await db1.add("hello " + i, { meta: { next: [] } }); - // TODO when nexts is omitted, entrise will dependon each other, - // When entries arrive in db2 unecessary fetches occur because there is already a sync in progress? - } + const dataMessages2 = getReceivedHeads(message2); + await waitForResolved(() => + expect(dataMessages2).to.have.length(count), + ); - //await mapSeries(adds, (i) => db1.add("hello " + i)); + const dataMessages1 = getReceivedHeads(message1); + expect(dataMessages1).to.be.empty; // no data is sent back + }); - // All entries should be in the database - await waitForResolved(() => expect(db2.log.log.length).equal(entryCount)); + it("only sends entries once, 3 peers", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }); + const message1 = collectMessages(db1.log); + + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }, + ); + const message2 = collectMessages(db2.log); - // All entries should be in the database - expect(db2.log.log.length).equal(entryCount); + let count = 10; // TODO make higher count work in Github CI - // progress events should increase monotonically - expect(fetchEvents).equal(fetchHashes.size); - expect(fetchEvents).equal(0); // becausel all entries were sent - }); - it("fetches only once after open", async () => { - db1 = await session.peers[0].open(new EventStore()); + for (let i = 0; i < count; i++) { + await db1.add("hello " + i, { meta: { next: [] } }); + } + await waitForResolved(() => expect(db2.log.log.length).equal(count)); + + db3 = await EventStore.open>( + db1.address!, + session.peers[2], + { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }, + ); + const message3 = collectMessages(db3.log); - const entryCount = 15; + await waitForResolved(() => expect(db3.log.log.length).equal(count)); - // Trigger replication - const adds: number[] = []; - for (let i = 0; i < entryCount; i++) { - adds.push(i); - } + const heads = getReceivedHeads(message3); + expect(heads).to.have.length(count); - const add = async (i: number) => { - await db1.add("hello " + i); - }; + expect(getReceivedHeads(message1)).to.be.empty; + expect(getReceivedHeads(message2)).to.have.length(count); - await mapSeries(adds, add); + await delay(3000); // wait for potential additional messages - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - ))!; + expect(getReceivedHeads(message1)).to.be.empty; + expect(getReceivedHeads(message2)).to.have.length(count); - // All entries should be in the database - await waitForResolved(() => expect(db2.log.log.length).equal(entryCount)); + await waitForResolved(() => expect(db3.log.log.length).equal(count)); - // progress events should (increase monotonically) - expect((await db2.iterator({ limit: -1 })).collect().length).equal( - entryCount, - ); - expect(fetchEvents).equal(fetchHashes.size); - expect(fetchEvents).equal(entryCount - 1); // - 1 because we also send some references for faster syncing (see exchange-heads.ts) - }); -}); + // gc check,. + // TODO dont do this, this way + /* await waitForResolved(() => { + expect((db3.log.syncronizer as any)["syncInFlightQueue"].size).equal( + 0, + ); + expect( + (db3.log.syncronizer as any)["syncInFlightQueueInverted"].size, + ).equal(0); + }); +*/ + expect(db3.log.syncronizer.pending).to.eq(0); + }); -describe(`start/stop`, function () { - let session: TestSession; - beforeEach(async () => { - session = await TestSession.connected(2); - }); + it("no fetches needed when replicating live ", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + setup, + }, + }); - afterEach(async () => { - await session.stop(); - }); + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + setup, + }, + }, + ))!; - it("replicate on connect", async () => { - const entryCount = 1000; - const entryArr: number[] = []; - const db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - factor: 1, - }, - }, - }); + await db1.waitFor(session.peers[1].peerId); + await db2.waitFor(session.peers[0].peerId); - // Create the entries in the first database - for (let i = 0; i < entryCount; i++) { - entryArr.push(i); - } - - await mapSeries(entryArr, (i) => db1.add("hello" + i)); - - // Open the second database - const db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - factor: 1, - }, - }, - }, - ))!; - - await waitForResolved(async () => - expect(db2.log.log.length).equal(entryCount), - ); - const result1 = (await db1.iterator({ limit: -1 })).collect(); - const result2 = (await db2.iterator({ limit: -1 })).collect(); - expect(result1.length).equal(result2.length); - for (let i = 0; i < result1.length; i++) { - expect(result1[i].equals(result2[i])).to.be.true; - } - }); + const entryCount = 10; // todo when larger (N) this test usually times out at N - 1 or 2, unless a delay is put beforehand - it("can restart replicate", async () => { - const db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - factor: 1, - }, - }, - }); + // Trigger replication + let adds: number[] = []; + for (let i = 0; i < entryCount; i++) { + adds.push(i); + await db1.add("hello " + i, { meta: { next: [] } }); + // TODO when nexts is omitted, entrise will dependon each other, + // When entries arrive in db2 unecessary fetches occur because there is already a sync in progress? + } - await db1.add("hello"); - - let db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - factor: 1, - }, - }, - }, - ))!; - - await waitForResolved(() => expect(db2.log.log.length).equal(1)); - - await db2.close(); - await db1.add("world"); - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - factor: 1, - }, - }, - }, - ))!; - await waitForResolved(() => expect(db2.log.log.length).equal(2)); - }); -}); + //await mapSeries(adds, (i) => db1.add("hello " + i)); -describe("canReplicate", () => { - let session: TestSession; - let db1: EventStore, db2: EventStore, db3: EventStore; - - const init = async ( - canReplicate: (publicKey: PublicSignKey) => Promise | boolean, - replicate: ReplicationOptions = { factor: 1 }, - ) => { - let min = 100; - let max = undefined; - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicas: { - min, - max, - }, - replicate, - canReplicate, - }, - }); - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicas: { - min, - max, - }, - replicate, - canReplicate, - }, - }, - ))!; - - db3 = (await EventStore.open>( - db1.address!, - session.peers[2], - { - args: { - replicas: { - min, - max, - }, - replicate, - canReplicate, - }, - }, - ))!; - - await db1.waitFor(session.peers[1].peerId); - await db2.waitFor(session.peers[0].peerId); - await db3.waitFor(session.peers[0].peerId); - }; - beforeEach(async () => { - session = await TestSession.connected(3); - db1 = undefined as any; - db2 = undefined as any; - db3 = undefined as any; - }); + // All entries should be in the database + await waitForResolved(() => + expect(db2.log.log.length).equal(entryCount), + ); - afterEach(async () => { - if (db1) await db1.drop(); + // All entries should be in the database + expect(db2.log.log.length).equal(entryCount); - if (db2) await db2.drop(); + // progress events should increase monotonically + expect(fetchEvents).equal(fetchHashes.size); + expect(fetchEvents).equal(0); // becausel all entries were sent + }); + it("fetches only once after open", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + setup, + }, + }); - if (db3) await db3.drop(); + const entryCount = 15; - await session.stop(); - }); + // Trigger replication + const adds: number[] = []; + for (let i = 0; i < entryCount; i++) { + adds.push(i); + } - it("can filter unwanted replicators", async () => { - // allow all replicaotors except node 0 - await init((key) => !key.equals(session.peers[0].identity.publicKey)); + const add = async (i: number) => { + await db1.add("hello " + i); + }; - const expectedReplicators = [ - session.peers[1].identity.publicKey.hashcode(), - session.peers[2].identity.publicKey.hashcode(), - ]; + await mapSeries(adds, add); - await Promise.all( - [db1, db2, db3].map((db) => - waitForResolved(async () => - expect([...(await db.log.getReplicators())]).to.have.members( - expectedReplicators, - ), - ), - ), - ); - - const unionFromPeer0 = await db1.log.getCover(undefined, { roleAge: 0 }); - let selfIndex = unionFromPeer0.findIndex( - (x) => x === db1.node.identity.publicKey.hashcode(), - ); - - // should always include self in the cover set, also include one of the remotes since their replication factor is 1 - expect([ - db2.node.identity.publicKey.hashcode(), - db3.node.identity.publicKey.hashcode(), - ]).include(unionFromPeer0[selfIndex === 0 ? 1 : 0]); - expect(unionFromPeer0).to.have.length(2); - - // the other ones should only have to cover themselves - await Promise.all( - [db2, db3].map((log) => - waitForResolved(async () => - expect( - await log.log.getCover(undefined, { roleAge: 0 }), - ).to.have.members([log.node.identity.publicKey.hashcode()]), - ), - ), - ); + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + setup, + }, + }, + ))!; - await db2.add("hello"); + // All entries should be in the database + await waitForResolved(() => + expect(db2.log.log.length).equal(entryCount), + ); - await waitForResolved(() => { - expect(db2.log.log.length).equal(1); - expect(db3.log.log.length).equal(1); + // progress events should (increase monotonically) + expect((await db2.iterator({ limit: -1 })).collect().length).equal( + entryCount, + ); + expect(fetchEvents).equal(fetchHashes.size); + expect(fetchEvents).equal(entryCount - 1); // - 1 because we also send some references for faster syncing (see exchange-heads.ts) + }); }); - await delay(1000); // Add some delay so that all replication events most likely have occured - expect(db1.log.log.length).equal(0); // because not trusted for replication job - }); - /* TODO feat(?) - - it("replicate even if not allowed if factor is 1 ", async () => { - await init(() => false, { factor: 1 }); + describe(`start/stop`, function () { + let session: TestSession; + beforeEach(async () => { + session = await TestSession.connected(3); + }); - const mySegments = await db1.log.getMyReplicationSegments(); - expect(mySegments).to.have.length(1); - expect(mySegments[0].widthNormalized).to.equal(1); - }); */ + afterEach(async () => { + await session.stop(); + }); - it("does not replicate if not allowed and dynamic ", async () => { - await init(() => false, true); + it("replicate on connect", async () => { + const entryCount = 10; + const entryArr: number[] = []; + const db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }); - const mySegments = await db1.log.getMyReplicationSegments(); - expect(mySegments).to.have.length(0); - }); -}); + // Create the entries in the first database + for (let i = 0; i < entryCount; i++) { + entryArr.push(i); + } -describe("replication degree", () => { - let session: TestSession; - let db1: EventStore, db2: EventStore, db3: EventStore; - - const init = async (props: { - min: number; - max?: number; - beforeOther?: () => Promise | void; - }) => { - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicas: props, - replicate: false, - timeUntilRoleMaturity: 1000, - }, - }); + await mapSeries(entryArr, (i) => db1.add("hello" + i)); - await props.beforeOther?.(); - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicas: props, + // Open the second database + const db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }, + ))!; + try { + await waitForResolved(async () => + expect(db2.log.log.length).equal(entryCount), + ); + } catch (error) { + await dbgLogs([db1.log, db2.log]); + throw error; + } + const result1 = (await db1.iterator({ limit: -1 })).collect(); + const result2 = (await db2.iterator({ limit: -1 })).collect(); + expect(result1.length).equal(result2.length); + for (let i = 0; i < result1.length; i++) { + expect(result1[i].equals(result2[i])).to.be.true; + } + }); - replicate: { - factor: 0.5, - offset: 0, + it("can restart replicate", async () => { + const db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + factor: 1, + }, + setup, }, - timeUntilRoleMaturity: 1000, - }, - }, - ))!; - - db3 = (await EventStore.open>( - db1.address!, - session.peers[2], - { - args: { - replicas: props, - - replicate: { - factor: 0.5, - offset: 0.5, - }, - timeUntilRoleMaturity: 1000, - }, - }, - ))!; - - await db1.waitFor(session.peers[1].peerId); - await db2.waitFor(session.peers[0].peerId); - await db2.waitFor(session.peers[2].peerId); - await db3.waitFor(session.peers[0].peerId); - }; - beforeEach(async () => { - session = await TestSession.connected(3, [ - { - libp2p: { - privateKey: await privateKeyFromRaw( - new Uint8Array([ - 237, 55, 205, 86, 40, 44, 73, 169, 196, 118, 36, 69, 214, 122, 28, - 157, 208, 163, 15, 215, 104, 193, 151, 177, 62, 231, 253, 120, - 122, 222, 174, 242, 120, 50, 165, 97, 8, 235, 97, 186, 148, 251, - 100, 168, 49, 10, 119, 71, 246, 246, 174, 163, 198, 54, 224, 6, - 174, 212, 159, 187, 2, 137, 47, 192, - ]), - ), - }, - }, - { - libp2p: { - privateKey: privateKeyFromRaw( - new Uint8Array([ - 27, 246, 37, 180, 13, 75, 242, 124, 185, 205, 207, 9, 16, 54, 162, - 197, 247, 25, 211, 196, 127, 198, 82, 19, 68, 143, 197, 8, 203, - 18, 179, 181, 105, 158, 64, 215, 56, 13, 71, 156, 41, 178, 86, - 159, 80, 222, 167, 73, 3, 37, 251, 67, 86, 6, 90, 212, 16, 251, - 206, 54, 49, 141, 91, 171, - ]), - ), - }, - }, - - { - libp2p: { - privateKey: privateKeyFromRaw( - new Uint8Array([ - 204, 234, 187, 172, 226, 232, 70, 175, 62, 211, 147, 91, 229, 157, - 168, 15, 45, 242, 144, 98, 75, 58, 208, 9, 223, 143, 251, 52, 252, - 159, 64, 83, 52, 197, 24, 246, 24, 234, 141, 183, 151, 82, 53, - 142, 57, 25, 148, 150, 26, 209, 223, 22, 212, 40, 201, 6, 191, 72, - 148, 82, 66, 138, 199, 185, - ]), - ), - }, - }, - ]); - db1 = undefined as any; - db2 = undefined as any; - db3 = undefined as any; - }); + }); - afterEach(async () => { - if (db1 && db1.closed === false) await db1.drop(); + await db1.add("hello"); - if (db2 && db1.closed === false) await db2.drop(); + let db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }, + ))!; - if (db3 && db1.closed === false) await db3.drop(); + await waitForResolved(() => expect(db2.log.log.length).equal(1)); - await session.stop(); - }); + await db2.close(); + await db1.add("world"); + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }, + ))!; + await waitForResolved(() => expect(db2.log.log.length).equal(2)); + }); - it("will not prune below replication degree", async () => { - let replicas = 2; - const db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: false, - replicas: { - min: replicas, - }, - }, + /* it("start stop many times", async () => { + const iterations = 1000; + for (let i = 0; i < iterations; i++) { + await session.stop() + session = await TestSession.connected(3, [ + { + libp2p: { + privateKey: await privateKeyFromRaw( + new Uint8Array([ + 237, 55, 205, 86, 40, 44, 73, 169, 196, 118, 36, 69, 214, 122, + 28, 157, 208, 163, 15, 215, 104, 193, 151, 177, 62, 231, 253, + 120, 122, 222, 174, 242, 120, 50, 165, 97, 8, 235, 97, 186, + 148, 251, 100, 168, 49, 10, 119, 71, 246, 246, 174, 163, 198, + 54, 224, 6, 174, 212, 159, 187, 2, 137, 47, 192, + ]), + ), + }, + }, + { + libp2p: { + privateKey: privateKeyFromRaw( + new Uint8Array([ + 27, 246, 37, 180, 13, 75, 242, 124, 185, 205, 207, 9, 16, 54, + 162, 197, 247, 25, 211, 196, 127, 198, 82, 19, 68, 143, 197, + 8, 203, 18, 179, 181, 105, 158, 64, 215, 56, 13, 71, 156, 41, + 178, 86, 159, 80, 222, 167, 73, 3, 37, 251, 67, 86, 6, 90, + 212, 16, 251, 206, 54, 49, 141, 91, 171, + ]), + ), + }, + }, + + { + libp2p: { + privateKey: privateKeyFromRaw( + new Uint8Array([ + 204, 234, 187, 172, 226, 232, 70, 175, 62, 211, 147, 91, 229, + 157, 168, 15, 45, 242, 144, 98, 75, 58, 208, 9, 223, 143, 251, + 52, 252, 159, 64, 83, 52, 197, 24, 246, 24, 234, 141, 183, + 151, 82, 53, 142, 57, 25, 148, 150, 26, 209, 223, 22, 212, 40, + 201, 6, 191, 72, 148, 82, 66, 138, 199, 185, + ]), + ), + }, + }, + ]); + + + + + let db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + factor: 0.333, + offset: 0.333, + }, + setup, + }, + }); + + let db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 0.333, + offset: 0, + }, + setup, + }, + }, + ); + let db3 = await EventStore.open>( + db1.address!, + session.peers[2], + { + args: { + replicate: { + factor: 0.333, + offset: 0.666, + }, + setup, + }, + }, + ); + + + + try { + await waitForResolved(async () => + expect(await db1.log.replicationIndex?.getSize()).equal(3), + ); + await waitForResolved(async () => + expect(await db2.log.replicationIndex?.getSize()).equal(3), + ); + await waitForResolved(async () => + expect(await db3.log.replicationIndex?.getSize()).equal(3), + ); + + } catch (error) { + console.log("???", await Promise.all([db1.log.replicationIndex?.getSize(), db2.log.replicationIndex?.getSize(), db3.log.replicationIndex?.getSize()])); + throw error; + } + + if (db1 && db1.closed === false) await db1.drop(); + + if (db2 && db2.closed === false) await db2.drop(); + + if (db3 && db3.closed === false) await db3.drop(); + + + } + + }) */ }); - let db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - factor: 1, + describe("canReplicate", () => { + let session: TestSession; + let db1: EventStore, + db2: EventStore, + db3: EventStore; + + const init = async ( + canReplicate: (publicKey: PublicSignKey) => Promise | boolean, + replicate: ReplicationOptions = { factor: 1 }, + ) => { + let min = 100; + let max = undefined; + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicas: { + min, + max, + }, + replicate, + canReplicate, + setup, }, - replicas: { - min: replicas, + }); + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicas: { + min, + max, + }, + replicate, + canReplicate, + setup, + }, }, - }, - }, - ))!; + ))!; - await db1.add("hello"); - - await waitForResolved(() => expect(db2.log.log.length).equal(1)); - await delay(3e3); - expect(db1.log.log.length).equal(1); - }); + db3 = (await EventStore.open>( + db1.address!, + session.peers[2], + { + args: { + replicas: { + min, + max, + }, + replicate, + canReplicate, + setup, + }, + }, + ))!; - it("prune on insert many", async () => { - await init({ min: 1 }); - let count = 100; - for (let i = 0; i < count; i++) { - await db1.add("hello", { - meta: { next: [] }, + await db1.waitFor(session.peers[1].peerId); + await db2.waitFor(session.peers[0].peerId); + await db3.waitFor(session.peers[0].peerId); + }; + beforeEach(async () => { + session = await TestSession.connected(3); + db1 = undefined as any; + db2 = undefined as any; + db3 = undefined as any; }); - } - /* await delay(2e4) */ - await waitForResolved(() => expect(db1.log.log.length).equal(0)); - await waitForResolved(() => - expect(db2.log.log.length + db3.log.log.length).equal(count), - ); - }); + afterEach(async () => { + if (db1) await db1.drop(); - it("will prune on before join", async () => { - await init({ min: 1, beforeOther: () => db1.add("hello") }); - await waitForResolved(() => expect(db1.log.log.length).equal(0)); - await waitForResolved(() => - expect(db2.log.log.length + db3.log.log.length).equal(1), - ); - }); - it("will prune on put 300 before join", async () => { - let count = 100; - await init({ - min: 1, - beforeOther: async () => { - for (let i = 0; i < count; i++) { - await db1.add("hello", { - meta: { next: [] }, - }); - } - }, - }); - await waitForResolved(() => expect(db1.log.log.length).equal(0)); - await waitForResolved(() => - expect(db2.log.log.length + db3.log.log.length).equal(count), - ); - }); + if (db2) await db2.drop(); - it("will prune on put 300 after join", async () => { - await init({ min: 1 }); + if (db3) await db3.drop(); - let count = 300; - for (let i = 0; i < count; i++) { - await db1.add("hello", { - meta: { next: [] }, + await session.stop(); }); - } - await waitForResolved(() => expect(db1.log.log.length).equal(0)); - await waitForResolved(() => - expect(db2.log.log.length + db3.log.log.length).equal(count), - ); - }); + it("can filter unwanted replicators", async () => { + // allow all replicaotors except node 0 + await init((key) => !key.equals(session.peers[0].identity.publicKey)); - it("will prune when join with partial coverage", async () => { - const db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: false, - replicas: { - min: 1, - }, - }, - }); + const expectedReplicators = [ + session.peers[1].identity.publicKey.hashcode(), + session.peers[2].identity.publicKey.hashcode(), + ]; - await db1.add("hello"); - let db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - offset: 0, - factor: 1, - normalized: false, - }, + await Promise.all( + [db1, db2, db3].map((db) => + waitForResolved(async () => + expect([...(await db.log.getReplicators())]).to.have.members( + expectedReplicators, + ), + ), + ), + ); - replicas: { - min: 1, - }, - }, - }, - ))!; + const unionFromPeer0 = await db1.log.getCover(undefined, { + roleAge: 0, + }); + let selfIndex = unionFromPeer0.findIndex( + (x) => x === db1.node.identity.publicKey.hashcode(), + ); - await waitForResolved(() => expect(db1.log.log.length).equal(0)); - await waitForResolved(() => expect(db2.log.log.length).equal(1)); - }); + // should always include self in the cover set, also include one of the remotes since their replication factor is 1 + expect([ + db2.node.identity.publicKey.hashcode(), + db3.node.identity.publicKey.hashcode(), + ]).include(unionFromPeer0[selfIndex === 0 ? 1 : 0]); + expect(unionFromPeer0).to.have.length(2); + + // the other ones should only have to cover themselves + await Promise.all( + [db2, db3].map((log) => + waitForResolved(async () => + expect( + await log.log.getCover(undefined, { roleAge: 0 }), + ).to.have.members([log.node.identity.publicKey.hashcode()]), + ), + ), + ); - it("will prune when join with complete coverage", async () => { - const db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: false, - replicas: { - min: 1, - }, - }, + await db2.add("hello"); + + await waitForResolved(() => { + expect(db2.log.log.length).equal(1); + expect(db3.log.log.length).equal(1); + }); + await delay(1000); // Add some delay so that all replication events most likely have occured + expect(db1.log.log.length).equal(0); // because not trusted for replication job + }); + + /* TODO feat(?) + + it("replicate even if not allowed if factor is 1 ", async () => { + await init(() => false, { factor: 1 }); + + const mySegments = await db1.log.getMyReplicationSegments(); + expect(mySegments).to.have.length(1); + expect(mySegments[0].widthNormalized).to.equal(1); + }); */ + + it("does not replicate if not allowed and dynamic ", async () => { + await init(() => false, true); + + const mySegments = await db1.log.getMyReplicationSegments(); + expect(mySegments).to.have.length(0); + }); }); - await db1.add("hello"); - let db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - offset: 0, - factor: 1, - normalized: true, + describe("replication degree", () => { + let session: TestSession; + let db1: EventStore, + db2: EventStore, + db3: EventStore; + + const init = async (props: { + min: number; + max?: number; + beforeOther?: () => Promise | void; + }) => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicas: props, + replicate: false, + timeUntilRoleMaturity: 1000, + setup, }, + }); - replicas: { - min: 1, + await props.beforeOther?.(); + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicas: props, + + replicate: { + factor: 0.5, + offset: 0, + }, + timeUntilRoleMaturity: 1000, + setup, + }, }, - }, - }, - ))!; + ))!; - await waitForResolved(() => expect(db1.log.log.length).equal(0)); - await waitForResolved(() => expect(db2.log.log.length).equal(1)); - }); + db3 = (await EventStore.open>( + db1.address!, + session.peers[2], + { + args: { + replicas: props, - it("will prune on insert after join 2 peers", async () => { - const db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - offset: 0, - factor: 0, - normalized: false, - }, - replicas: { - min: 1, - }, - }, - }); + replicate: { + factor: 0.5, + offset: 0.5, + }, + timeUntilRoleMaturity: 1000, + setup, + }, + }, + ))!; - let db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - offset: 0, - factor: 1, - normalized: false, + await db1.waitFor(session.peers[1].peerId); + await db2.waitFor(session.peers[0].peerId); + await db2.waitFor(session.peers[2].peerId); + await db3.waitFor(session.peers[0].peerId); + }; + + beforeEach(async () => { + session = await TestSession.connected(3, [ + { + libp2p: { + privateKey: await privateKeyFromRaw( + new Uint8Array([ + 237, 55, 205, 86, 40, 44, 73, 169, 196, 118, 36, 69, 214, 122, + 28, 157, 208, 163, 15, 215, 104, 193, 151, 177, 62, 231, 253, + 120, 122, 222, 174, 242, 120, 50, 165, 97, 8, 235, 97, 186, + 148, 251, 100, 168, 49, 10, 119, 71, 246, 246, 174, 163, 198, + 54, 224, 6, 174, 212, 159, 187, 2, 137, 47, 192, + ]), + ), + }, + }, + { + libp2p: { + privateKey: privateKeyFromRaw( + new Uint8Array([ + 27, 246, 37, 180, 13, 75, 242, 124, 185, 205, 207, 9, 16, 54, + 162, 197, 247, 25, 211, 196, 127, 198, 82, 19, 68, 143, 197, + 8, 203, 18, 179, 181, 105, 158, 64, 215, 56, 13, 71, 156, 41, + 178, 86, 159, 80, 222, 167, 73, 3, 37, 251, 67, 86, 6, 90, + 212, 16, 251, 206, 54, 49, 141, 91, 171, + ]), + ), + }, }, - replicas: { - min: 1, + { + libp2p: { + privateKey: privateKeyFromRaw( + new Uint8Array([ + 204, 234, 187, 172, 226, 232, 70, 175, 62, 211, 147, 91, 229, + 157, 168, 15, 45, 242, 144, 98, 75, 58, 208, 9, 223, 143, 251, + 52, 252, 159, 64, 83, 52, 197, 24, 246, 24, 234, 141, 183, + 151, 82, 53, 142, 57, 25, 148, 150, 26, 209, 223, 22, 212, 40, + 201, 6, 191, 72, 148, 82, 66, 138, 199, 185, + ]), + ), + }, }, - }, - }, - ))!; + ]); + db1 = undefined as any; + db2 = undefined as any; + db3 = undefined as any; + }); - await db1.add("hello"); + afterEach(async () => { + if (db1 && db1.closed === false) await db1.drop(); - await waitForResolved(() => expect(db1.log.log.length).equal(0)); - await waitForResolved(() => expect(db2.log.log.length).equal(1)); - }); + if (db2 && db2.closed === false) await db2.drop(); - it("will prune once reaching max replicas", async () => { - await session.stop(); - - session = await TestSession.disconnected(3, [ - { - libp2p: { - privateKey: privateKeyFromRaw( - new Uint8Array([ - 48, 245, 17, 66, 32, 106, 72, 98, 203, 253, 86, 138, 133, 155, - 243, 214, 8, 11, 14, 230, 18, 126, 173, 3, 62, 252, 92, 46, 214, - 0, 226, 184, 104, 58, 22, 118, 214, 182, 125, 233, 106, 94, 13, - 16, 6, 164, 236, 215, 159, 135, 117, 8, 240, 168, 169, 96, 38, 86, - 213, 250, 103, 183, 38, 205, - ]), - ), - }, - }, - { - libp2p: { - privateKey: privateKeyFromRaw( - new Uint8Array([ - 113, 203, 231, 235, 7, 120, 3, 194, 138, 113, 131, 40, 251, 158, - 121, 38, 190, 114, 116, 252, 100, 202, 107, 97, 119, 184, 24, 56, - 27, 76, 150, 62, 132, 22, 246, 177, 200, 6, 179, 117, 218, 216, - 120, 235, 147, 249, 48, 157, 232, 161, 145, 3, 63, 158, 217, 111, - 65, 105, 99, 83, 4, 113, 62, 15, - ]), - ), - }, - }, - { - libp2p: { - privateKey: privateKeyFromRaw( - new Uint8Array([ - 27, 246, 37, 180, 13, 75, 242, 124, 185, 205, 207, 9, 16, 54, 162, - 197, 247, 25, 211, 196, 127, 198, 82, 19, 68, 143, 197, 8, 203, - 18, 179, 181, 105, 158, 64, 215, 56, 13, 71, 156, 41, 178, 86, - 159, 80, 222, 167, 73, 3, 37, 251, 67, 86, 6, 90, 212, 16, 251, - 206, 54, 49, 141, 91, 171, - ]), - ), - }, - }, - ]); - - let minReplicas = 2; - let maxReplicas = 2; - - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicas: { - min: minReplicas, - max: maxReplicas, - }, - replicate: { - offset: 0, - factor: 0.333, - }, - }, - }); - db2 = (await session.peers[1].open(db1.clone(), { - args: { - replicas: { - min: minReplicas, - max: maxReplicas, - }, - replicate: { - offset: 0.333, - factor: 0.666, - }, - }, - }))!; - - db3 = (await session.peers[2].open(db1.clone(), { - args: { - replicas: { - min: minReplicas, - max: maxReplicas, - }, - replicate: { - offset: 0.666, - factor: 0.333, - }, - }, - }))!; - - const entryCount = 100; - for (let i = 0; i < entryCount; i++) { - await db1.add("hello", { - replicas: new AbsoluteReplicas(3), // will be overriden by 'maxReplicas' above - meta: { next: [] }, + if (db3 && db3.closed === false) await db3.drop(); + + await session.stop(); }); - } - - // TODO why is this needed? - await waitForResolved(() => - session.peers[1].dial(session.peers[0].getMultiaddrs()), - ); - - await waitForResolved(() => expect(db2.log.log.length).equal(entryCount)); - - await db2.close(); - - session.peers[2].dial(session.peers[0].getMultiaddrs()); - - await waitForResolved(() => - expect(db3.log.log.length).to.be.greaterThan(0), - ); - await waitForConverged(() => db3.log.log.length); - - // reopen db2 again and make sure either db3 or db2 drops the entry (not both need to replicate) - await delay(2000); - db2 = await session.peers[1].open(db2, { - args: { - replicas: { - min: minReplicas, - max: maxReplicas, - }, - replicate: { - offset: 0.333, - factor: 0.666, - }, - }, - }); - // await db1.log["pruneDebouncedFn"](); - //await db1.log.waitForPruned() + it("will not prune below replication degree", async () => { + let replicas = 2; + const db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: false, + replicas: { + min: replicas, + }, + setup, + }, + }); - try { - await waitForResolved(() => { - expect(db1.log.log.length).to.be.lessThan(entryCount); - }); - } catch (error) { - const prunable = await db1.log.getPrunable(); - console.log(prunable.length); - const ranges1 = await db1.log.replicationIndex.iterate().all(); - const ranges2 = await db2.log.replicationIndex.iterate().all(); - const ranges3 = await db3.log.replicationIndex.iterate().all(); - console.log(ranges1, ranges2, ranges3); - throw error; - } - }); + let db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 1, + }, + replicas: { + min: replicas, + }, + setup, + }, + }, + ))!; - describe("commit options", () => { - it("control per commmit put before join", async () => { - const entryCount = 100; + await db1.add("hello"); - await init({ - min: 1, - beforeOther: async () => { - const value = "hello"; - for (let i = 0; i < entryCount; i++) { - await db1.add(value, { - replicas: new AbsoluteReplicas(3), - meta: { next: [] }, - }); - } - }, + await waitForResolved(() => expect(db2.log.log.length).equal(1)); + await delay(3e3); + expect(db1.log.log.length).equal(1); }); - const check = async (log: EventStore) => { - let replicated3Times = 0; - for (const entry of await log.log.log.toArray()) { - if (decodeReplicas(entry).getValue(db2.log) === 3) { - replicated3Times += 1; - } + it("prune on insert many", async () => { + await init({ min: 1 }); + let count = 100; + for (let i = 0; i < count; i++) { + await db1.add("hello", { + meta: { next: [] }, + }); } - expect(replicated3Times).equal(entryCount); - }; - - await waitForResolved(() => check(db2)); - await waitForResolved(() => check(db3)); - }); - - it("control per commmit", async () => { - const entryCount = 100; - await init({ - min: 1, + try { + await waitForResolved(() => expect(db1.log.log.length).equal(0)); + await waitForResolved(() => + expect(db2.log.log.length + db3.log.log.length).equal(count), + ); + } catch (error) { + await dbgLogs([db1.log, db2.log, db3.log]); + throw error; + } }); - const value = "hello"; - for (let i = 0; i < entryCount; i++) { - await db1.add(value, { - replicas: new AbsoluteReplicas(3), - meta: { next: [] }, + it("will prune on before join", async () => { + await init({ min: 1, beforeOther: () => db1.add("hello") }); + await waitForResolved(() => expect(db1.log.log.length).equal(0)); + await waitForResolved(() => + expect(db2.log.log.length + db3.log.log.length).equal(1), + ); + }); + it("will prune on put 300 before join", async () => { + let count = 100; + await init({ + min: 1, + beforeOther: async () => { + for (let i = 0; i < count; i++) { + await db1.add("hello", { + meta: { next: [] }, + }); + } + }, }); - } - const check = async (log: EventStore) => { - let replicated3Times = 0; - for (const entry of await log.log.log.toArray()) { - if (decodeReplicas(entry).getValue(db2.log) === 3) { - replicated3Times += 1; - } - } - expect(replicated3Times).equal(entryCount); - }; + await waitForResolved(() => expect(db1.log.log.length).equal(0)); + await waitForResolved(() => + expect(db2.log.log.length + db3.log.log.length).equal(count), + ); + }); - await waitForResolved(() => check(db2)); - await waitForResolved(() => check(db3)); - }); + it("will prune on put 300 after join", async () => { + await init({ min: 1 }); - it("mixed control per commmit", async () => { - await init({ min: 1 }); - - const value = "hello"; - - const entryCount = 100; - for (let i = 0; i < entryCount; i++) { - await db1.add(value, { - replicas: new AbsoluteReplicas(1), - meta: { next: [] }, - }); - await db1.add(value, { - replicas: new AbsoluteReplicas(3), - meta: { next: [] }, - }); - } - - // expect e1 to be replicated at db1 and/or 1 other peer (when you write you always store locally) - // expect e2 to be replicated everywhere - const check = async (log: EventStore) => { - let replicated3Times = 0; - let other = 0; - for (const entry of await log.log.log.toArray()) { - if (decodeReplicas(entry).getValue(db2.log) === 3) { - replicated3Times += 1; - } else { - other += 1; - } + let count = 300; + for (let i = 0; i < count; i++) { + await db1.add("hello", { + meta: { next: [] }, + }); } - expect(replicated3Times).equal(entryCount); - expect(other).greaterThan(0); - }; - await waitForResolved(() => check(db2)); - await waitForResolved(() => check(db3)); - }); - it("will index replication underflow degree", async () => { - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicas: { - min: 4, - }, - replicate: false, - timeUntilRoleMaturity: 1000, - }, + await waitForResolved(() => expect(db1.log.log.length).equal(0)); + await waitForResolved(() => + expect(db2.log.log.length + db3.log.log.length).equal(count), + ); }); - db2 = await session.peers[1].open>(db1.address, { - args: { - replicas: { - min: 4, + it("will prune when join with partial coverage", async () => { + const db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: false, + replicas: { + min: 1, + }, + setup, }, - replicate: { - factor: 1, + }); + + await db1.add("hello"); + let db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + offset: 0, + factor: 1, + normalized: false, + }, + + replicas: { + min: 1, + }, + setup, + }, }, - timeUntilRoleMaturity: 1000, - }, - }); + ))!; - await db1.add("hello", { - replicas: new AbsoluteReplicas(4), + try { + await waitForResolved(() => expect(db1.log.log.length).equal(0)); + await waitForResolved(() => expect(db2.log.log.length).equal(1)); + } catch (error) { + await dbgLogs([db1.log, db2.log]); + throw error; + } }); - await waitForResolved(() => expect(db1.log.log.length).equal(1)); - await waitForResolved(() => expect(db2.log.log.length).equal(1)); + it("will prune when join with complete coverage", async () => { + const db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: false, + replicas: { + min: 1, + }, + setup, + }, + }); - const indexedDb1 = await db1.log.entryCoordinatesIndex.iterate().all(); - const indexedDb2 = await db2.log.entryCoordinatesIndex.iterate().all(); + await db1.add("hello"); + let db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + offset: 0, + factor: 1, + normalized: true, + }, - expect( - indexedDb1.filter((x) => x.value.assignedToRangeBoundary), - ).to.have.length(4); - expect( - indexedDb2.filter((x) => x.value.assignedToRangeBoundary), - ).to.have.length(4); - }); - }); - - it("min replicas with be maximum value for gid", async () => { - await init({ min: 1 }); - - // followwing entries set minReplicas to 1 which means only db2 or db3 needs to hold it - const entryCount = 100; - for (let i = 0; i < entryCount / 2; i++) { - const e1 = await db1.add(String(i), { - replicas: new AbsoluteReplicas(3), - meta: { next: [] }, - }); - await db1.add(String(i), { - replicas: new AbsoluteReplicas(1), // will be overriden by 'maxReplicas' above - meta: { next: [e1.entry] }, - }); - } - - await waitForResolved(() => { - expect(db1.log.log.length).equal(0); - let total = db2.log.log.length + db3.log.log.length; - expect(total).greaterThanOrEqual(entryCount); - expect(db2.log.log.length).greaterThan(entryCount * 0.2); - expect(db3.log.log.length).greaterThan(entryCount * 0.2); - }); - }); - - it("observer will not delete unless replicated", async () => { - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicas: { - min: 10, - }, - replicate: false, - }, - }); - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicas: { - min: 10, - }, - replicate: { - factor: 1, - }, - }, - }, - ))!; - - const e1 = await db1.add("hello"); - - await waitForResolved(() => expect(db1.log.log.length).equal(1)); - await waitForResolved(() => expect(db2.log.log.length).equal(1)); - await expect( - Promise.all(db1.log.prune([e1.entry], { timeout: 3000 })), - ).rejectedWith("Timeout for checked pruning"); - expect(db1.log.log.length).equal(1); // No deletions - }); - - it("replicator will not delete unless replicated", async () => { - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicas: { - min: 10, - }, - replicate: { - factor: 1, - }, - }, - }); - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicas: { - min: 10, - }, - replicate: { - factor: 1, - }, - }, - }, - ))!; - - const e1 = await db1.add("hello"); - await waitForResolved(() => expect(db1.log.log.length).equal(1)); - await waitForResolved(() => expect(db2.log.log.length).equal(1)); - await expect( - Promise.all(db1.log.prune([e1.entry], { timeout: 3000 })), - ).rejectedWith("Failed to delete, is leader"); - expect(db1.log.log.length).equal(1); // No deletions - }); - - it("keep degree while updating role", async () => { - let min = 1; - let max = 1; - - // peer 1 observer - // peer 2 observer - - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicas: { - min, - max, - }, - replicate: false, - }, - }); - - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - factor: 0, - }, - replicas: { - min, - max, - }, - }, - }, - ))!; - - let db2ReorgCounter = 0; - let db2ReplicationReorganizationFn = db2.log.onReplicationChange.bind( - db2.log, - ); - db2.log.onReplicationChange = (args) => { - db2ReorgCounter += 1; - return db2ReplicationReorganizationFn(args); - }; - await db1.add("hello"); - - // peer 1 observer - // peer 2 replicator (will get entry) - - await waitForResolved(() => expect(db1.log.log.length).equal(1)); - expect(db2ReorgCounter).equal(0); - await db2.log.replicate({ - factor: 1, - }); - await waitForResolved(() => expect(db2ReorgCounter).equal(1)); - await waitForResolved(() => expect(db2.log.log.length).equal(1)); - - // peer 1 removed - // peer 2 replicator (has entry) - await db1.drop(); - - // peer 1 observer - // peer 2 replicator (has entry) - await session.peers[0].open(db1, { - args: { - replicas: { - min, - max, - }, - replicate: false, - }, - }); - - // peer 1 observer - // peer 2 observer - expect(db2.log.log.length).equal(1); - await delay(2000); - // expect(db2ReorgCounter).equal(1); TODO limit distributions and test this - - await db2.log.replicate(false); - - // expect(db2ReorgCounter).equal(2); TODO limit distributions and test this - expect(await db2.log.isReplicating()).to.be.false; - - // peer 1 replicator (will get entry) - // peer 2 observer (will safely delete the entry) - await db1.log.replicate({ - factor: 1, - }); - - await waitForResolved(() => expect(db1.log.log.length).equal(1)); - await waitForResolved(() => expect(db2.log.log.length).equal(0)); - // expect(db2ReorgCounter).equal(3); TODO - }); - it("can override min on program level", async () => { - let minReplicas = 1; - let maxReplicas = 1; - - await init({ min: minReplicas, max: maxReplicas }); - - const entryCount = 100; - for (let i = 0; i < entryCount; i++) { - await db1.add("hello", { - replicas: new AbsoluteReplicas(5), // will be overriden by 'maxReplicas' above - meta: { next: [] }, - }); - } - await waitForResolved( - () => { - expect(db1.log.log.length).equal(0); // because db1 is not replicating at all, but just pruning once it knows entries are replicated elsewhere - let total = db2.log.log.length + db3.log.log.length; - expect(total).greaterThanOrEqual(entryCount); - expect(total).lessThan(entryCount * 2); - expect(db2.log.log.length).greaterThan(entryCount * 0.2); - expect(db3.log.log.length).greaterThan(entryCount * 0.2); - }, - { timeout: 3e4 }, - ); - }); - it("time out when pending IHave are never resolved", async () => { - let min = 1; - let max = 1; - - // peer 1 observer - // peer 2 observer - - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicas: { - min, - max, - }, - replicate: false, - }, - }); - - let respondToIHaveTimeout = 3000; - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicas: { - min, - max, - }, - replicate: { - factor: 1, - }, - respondToIHaveTimeout, - }, - }, - ); - - // TODO this test is flaky because background prune calls are intefering with assertions - // Todo make sure no background prunes are done (?) - - const onMessageFn = db2.log._onMessage.bind(db2.log); - db2.log.rpc["_responseHandler"] = async (msg: any, cxt: any) => { - if (msg instanceof ExchangeHeadsMessage) { - return; // prevent replication - } - return onMessageFn(msg, cxt); - }; - const { entry } = await db1.add("hello"); - const expectPromise = expect( - Promise.all( - db1.log.prune([entry], { timeout: db1.log.timeUntilRoleMaturity }), - ), - ).rejectedWith("Timeout"); - await waitForResolved(() => expect(db2.log["_pendingIHave"].size).equal(1)); - await delay(respondToIHaveTimeout + 1000); - await waitForResolved(() => expect(db2.log["_pendingIHave"].size).equal(0)); // shoulld clear up - await expectPromise; - }); - - it("does not get blocked by slow sends", async () => { - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - factor: 1, - }, - }, - }); - - db2 = await session.peers[1].open>(db1.address, { - args: { - replicate: { - factor: 1, - }, - }, - }); - - await waitForResolved(async () => - expect((await db1.log.getReplicators()).size).equal(2), - ); + replicas: { + min: 1, + }, + setup, + }, + }, + ))!; - let db1Delay = 0; - const db1Send = db1.log.rpc.send.bind(db1.log.rpc); - db1.log.rpc.send = async (message, options) => { - const controller = new AbortController(); - db1.log.rpc.events.addEventListener("close", () => { - controller.abort(new AbortError()); + await waitForResolved(() => expect(db1.log.log.length).equal(0)); + await waitForResolved(() => expect(db2.log.log.length).equal(1)); }); - db1.log.rpc.events.addEventListener("drop", () => { - controller.abort(new AbortError()); - }); - try { - await delay(db1Delay, { signal: controller.signal }); - } catch (error) { - return; - } - return db1Send(message, options); - }; - - db1Delay = 1e4; - - db1.add("hello"); - - await delay(1000); // make sure we have gotten "stuck" into the rpc.send unction - - let t0 = +new Date(); - db1Delay = 0; - db3 = await session.peers[2].open>(db1.address, { - args: { - replicate: { - factor: 1, - }, - }, - }); - - await waitForResolved(() => expect(db3.log.log.length).equal(1)); - let t1 = +new Date(); - expect(t1 - t0).lessThan(2000); - }); - - it("restarting node will receive entries", async () => { - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - factor: 1, - }, - }, - }); - - db2 = await session.peers[1].open>(db1.address, { - args: { - replicate: { - factor: 1, - }, - }, - }); - await db1.add("hello"); - await waitForResolved(() => expect(db2.log.log.length).equal(1)); - await db2.drop(); - await session.peers[1].stop(); - await session.peers[1].start(); - db2 = await session.peers[1].open>(db1.address, { - args: { - replicate: { - factor: 1, - }, - }, - }); - await waitForResolved(() => expect(db2.log.log.length).equal(1)); - }); - - it("can handle many large messages", async () => { - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - factor: 1, - }, - }, - }); - - // append more than 30 mb - const count = 5; - for (let i = 0; i < count; i++) { - await db1.add(toBase64(randomBytes(6e6)), { meta: { next: [] } }); - } - db2 = await session.peers[1].open>(db1.address, { - args: { - replicate: { - factor: 1, - }, - }, - }); - await waitForResolved(() => expect(db2.log.log.length).equal(count)); - }); - describe("update", () => { - it("shift", async () => { - const u32Div2 = 2147483647; - const db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - offset: 0, - factor: u32Div2, - normalized: false, - }, - replicas: { - min: 1, - }, - }, - }); + it("will prune when join even if rapidly updating", async () => { + let timeUntilRoleMaturity = 2e3; - let db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { + const db1 = await session.peers[0].open(new EventStore(), { args: { - replicate: { - offset: 0, - factor: u32Div2, - normalized: false, - }, + replicate: false, replicas: { min: 1, }, + setup, + timeUntilRoleMaturity, }, - }, - ))!; - - let entryCount = 100; - for (let i = 0; i < entryCount; i++) { - await db1.add("hello" + i, { meta: { next: [] } }); - } - await waitForResolved(() => - expect(db2.log.log.length).to.be.above(entryCount / 3), - ); - - await db2.log.replicate( - { factor: u32Div2, offset: u32Div2, normalized: false }, - { reset: true }, - ); - - await waitForResolved(() => - expect(db1.log.log.length).to.closeTo(entryCount / 2, 20), - ); - await waitForResolved(() => - expect(db2.log.log.length).to.closeTo(entryCount / 2, 20), - ); - await waitForResolved(() => - expect(db1.log.log.length + db2.log.log.length).to.equal(entryCount), - ); - }); + }); - it("to same range", async () => { - const db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - offset: 0, - factor: 1, - }, - replicas: { - min: 1, - }, - timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing - }, - }); + await db1.add("hello"); + let db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: false, - let db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - offset: 0, - factor: 1, - }, - replicas: { - min: 1, + replicas: { + min: 1, + }, + setup, + timeUntilRoleMaturity, }, - timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing }, - }, - ))!; - await db1.add("hello", { meta: { next: [] } }); - await waitForResolved(() => expect(db1.log.log.length).equal(1)); - await waitForResolved(() => expect(db2.log.log.length).equal(1)); - - const findLeaders1 = sinon.spy(db1.log, "findLeaders"); - const findLeaders2 = sinon.spy(db2.log, "findLeaders"); - const onMessage1 = sinon.spy(db1.log, "_onMessage"); - - const range = ( - await db2.log.getMyReplicationSegments() - )[0].toReplicationRange(); - await db2.log.replicate(range); + ))!; - expect(findLeaders1.callCount).equal(0); // no changes - await waitForResolved(() => expect(onMessage1.callCount).equal(1)); // one message - expect(findLeaders2.callCount).equal(0); // no changes emitted - }); + let rangeId = randomBytes(32); - it("to smaller but already replicated", async () => { - const db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { + let i = 0; + let factorStart = numbers.maxValue; + let interval = setInterval(async () => { + await db2.log.replicate({ + id: rangeId, + factor: + (factorStart as any) - + ((typeof factorStart === "bigint" ? BigInt(i) : i) as any), offset: 0, - factor: 1, - }, - replicas: { - min: 1, - }, - timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing - }, + normalized: false, + }); + i++; + }, 500); + + try { + await waitForResolved(() => expect(db1.log.log.length).equal(0)); + await waitForResolved(() => expect(db2.log.log.length).equal(1)); + } finally { + clearInterval(interval); + } }); - let db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { + it("will prune on insert after join 2 peers", async () => { + const db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0, - factor: 1, + factor: 0, + normalized: false, }, replicas: { min: 1, }, - timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, }, - }, - ))!; - - let entryCount = 100; - for (let i = 0; i < entryCount; i++) { - await db1.add("hello" + i, { meta: { next: [] } }); - } - await waitForResolved(() => expect(db1.log.log.length).equal(entryCount)); - await waitForResolved(() => expect(db2.log.log.length).equal(entryCount)); - - const findLeaders1 = sinon.spy(db1.log, "findLeaders"); - const findLeaders2 = sinon.spy(db2.log, "findLeaders"); - const onMessage1 = sinon.spy(db1.log, "_onMessage"); - - const range = ( - await db2.log.getMyReplicationSegments() - )[0].toReplicationRange(); - - let newFactor = 0.5; - await db2.log.replicate({ factor: newFactor, offset: 0, id: range.id }); - const expectedAmountOfEntriesToPrune = entryCount * newFactor; + }); - await waitForResolved(async () => { - expect(db2.log.log.length).to.be.closeTo( - entryCount - expectedAmountOfEntriesToPrune, - 30, - ); + let db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + offset: 0, + factor: 1, + normalized: false, + }, - expect(onMessage1.callCount).equal(2); // two messages (the updated range) and request for pruning - expect(findLeaders1.callCount).to.be.lessThan(entryCount * 3); // some upper bound, TODO make more strict - expect(findLeaders2.callCount).to.be.lessThan(entryCount * 3); // some upper bound, TODO make more strict - /* - TODO stricter boundes like below - expect(findLeaders1.callCount).to.closeTo(prunedEntries * 2, 30); // redistribute + prune about 50% of the entries - expect(findLeaders2.callCount).to.closeTo(prunedEntries * 2, 30); // redistribute + handle prune requests - */ - }); + replicas: { + min: 1, + }, + setup, + }, + }, + ))!; - // we do below separetly because this will interefere with the callCounts above - await waitForResolved(async () => - expect(await db2.log.getPrunable()).to.length(0), - ); + await db1.add("hello"); - // eslint-disable-next-line no-useless-catch - try { - expect(onMessage1.getCall(0).args[0]).instanceOf( - AddedReplicationSegmentMessage, - ); - expect(onMessage1.getCall(1).args[0]).instanceOf(RequestIPrune); - } catch (error) { - // eslint-disable-next-line no-useless-catch try { - expect(onMessage1.getCall(1).args[0]).instanceOf( - AddedReplicationSegmentMessage, - ); - expect(onMessage1.getCall(0).args[0]).instanceOf(RequestIPrune); + await waitForResolved(() => expect(db1.log.log.length).equal(0)); + await waitForResolved(() => expect(db2.log.log.length).equal(1)); } catch (error) { + await dbgLogs([db1.log, db2.log]); throw error; } - } - /* const entryRefs1 = await db1.log.entryCoordinatesIndex.iterate().all(); - const entryRefs2 = await db2.log.entryCoordinatesIndex.iterate().all(); - - expect( - entryRefs1.filter((x) => x.value.replicators === 2), - ).to.have.length(db2.log.log.length); - expect( - entryRefs1.filter((x) => x.value.replicators === 1), - ).to.have.length(entryCount - db2.log.log.length); - expect( - entryRefs2.filter((x) => x.value.replicators === 2), - ).to.have.length(db2.log.log.length); */ - }); + }); - it("to smaller will need transfer", async () => { - const db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - offset: 0, - factor: 0.5, + it("will prune once reaching max replicas", async () => { + await session.stop(); + + session = await TestSession.disconnected(3, [ + { + libp2p: { + privateKey: privateKeyFromRaw( + new Uint8Array([ + 48, 245, 17, 66, 32, 106, 72, 98, 203, 253, 86, 138, 133, 155, + 243, 214, 8, 11, 14, 230, 18, 126, 173, 3, 62, 252, 92, 46, + 214, 0, 226, 184, 104, 58, 22, 118, 214, 182, 125, 233, 106, + 94, 13, 16, 6, 164, 236, 215, 159, 135, 117, 8, 240, 168, 169, + 96, 38, 86, 213, 250, 103, 183, 38, 205, + ]), + ), + }, }, - replicas: { - min: 1, + { + libp2p: { + privateKey: privateKeyFromRaw( + new Uint8Array([ + 113, 203, 231, 235, 7, 120, 3, 194, 138, 113, 131, 40, 251, + 158, 121, 38, 190, 114, 116, 252, 100, 202, 107, 97, 119, 184, + 24, 56, 27, 76, 150, 62, 132, 22, 246, 177, 200, 6, 179, 117, + 218, 216, 120, 235, 147, 249, 48, 157, 232, 161, 145, 3, 63, + 158, 217, 111, 65, 105, 99, 83, 4, 113, 62, 15, + ]), + ), + }, }, - timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing - }, - }); + { + libp2p: { + privateKey: privateKeyFromRaw( + new Uint8Array([ + 27, 246, 37, 180, 13, 75, 242, 124, 185, 205, 207, 9, 16, 54, + 162, 197, 247, 25, 211, 196, 127, 198, 82, 19, 68, 143, 197, + 8, 203, 18, 179, 181, 105, 158, 64, 215, 56, 13, 71, 156, 41, + 178, 86, 159, 80, 222, 167, 73, 3, 37, 251, 67, 86, 6, 90, + 212, 16, 251, 206, 54, 49, 141, 91, 171, + ]), + ), + }, + }, + ]); + + let minReplicas = 2; + let maxReplicas = 2; - let db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { + db1 = await session.peers[0].open(new EventStore(), { args: { + replicas: { + min: minReplicas, + max: maxReplicas, + }, replicate: { - offset: 0.5, - factor: 0.5, + offset: 0, + factor: 0.333, }, + setup, + timeUntilRoleMaturity: 0, + }, + }); + db2 = (await session.peers[1].open(db1.clone(), { + args: { replicas: { - min: 1, + min: minReplicas, + max: maxReplicas, }, - timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing - }, - }, - ))!; - - let entryCount = 100; - for (let i = 0; i < entryCount; i++) { - await db1.add("hello" + i, { meta: { next: [] } }); - } - - await waitForResolved(() => - expect(db1.log.log.length).to.be.closeTo(entryCount / 2, 30), - ); - await waitForResolved(() => - expect(db2.log.log.length).to.be.closeTo(entryCount / 2, 30), - ); - - /* - // TODO assert findLeaders call count strict - const findLeaders1 = sinon.spy(db1.log, "findLeaders"); - const findLeaders2 = sinon.spy(db2.log, "findLeaders"); - */ - - const prune2 = sinon.spy(db2.log, "prune"); - - const range = ( - await db2.log.getMyReplicationSegments() - )[0].toReplicationRange(); - - await db2.log.replicate({ factor: 0.001, offset: 0.99, id: range.id }); - - /* const entriesThatWillBeChecked = entryCount / 2; - const entriesThatWillBePruned = entryCount / 4; // the change is that the range [0.5, 0.75] will be owned by db1 and [0.75, 1] will be owned by db2 - await waitForResolved(() => - expect(findLeaders2.callCount).to.closeTo( - entriesThatWillBeChecked + entriesThatWillBePruned, - 30, - ), - ); TODO assert findLeaders call count strictly */ - - await waitForResolved(() => { - expect(prune2.callCount).to.eq(1); - expect([...prune2.getCall(0).args[0].values()].length).to.be.closeTo( - entryCount / 4, - 15, - ); // a quarter of the entries should be pruned becuse the range [0, 0.75] will be owned by db1 and [0.75, 1] will be owned by db2 - }); - - // TODO assert some kind of findLeaders callCount ? - }); - - it("to smaller then to larger", async () => { - const db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - offset: 0, - factor: 0.5, - }, - replicas: { - min: 1, + replicate: { + offset: 0.333, + factor: 0.666, + }, + setup, + timeUntilRoleMaturity: 0, }, - timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing - }, - }); + }))!; - let db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { + db3 = (await session.peers[2].open(db1.clone(), { args: { - replicate: { - offset: 0.5, - factor: 0.5, - }, replicas: { - min: 1, + min: minReplicas, + max: maxReplicas, + }, + replicate: { + offset: 0.666, + factor: 0.333, }, - timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + timeUntilRoleMaturity: 0, }, - }, - ))!; - - let entryCount = 100; - for (let i = 0; i < entryCount; i++) { - await db1.add("hello" + i, { meta: { next: [] } }); - } - await waitForResolved(() => - expect(db1.log.log.length).to.be.closeTo(entryCount / 2, 30), - ); - await waitForResolved(() => - expect(db2.log.log.length).to.be.closeTo(entryCount / 2, 30), - ); + }))!; - const range = ( - await db2.log.getMyReplicationSegments() - )[0].toReplicationRange(); + const entryCount = 100; + for (let i = 0; i < entryCount; i++) { + await db1.add("hello", { + replicas: new AbsoluteReplicas(3), // will be overriden by 'maxReplicas' above + meta: { next: [] }, + }); + } - await waitForConverged(() => db2.log.log.length); + // TODO why is this needed? + await waitForResolved(() => + session.peers[1].dial(session.peers[0].getMultiaddrs()), + ); - let startSize = db2.log.log.length; - await db2.log.replicate({ factor: 0.25, offset: 0.5, id: range.id }); + await waitForResolved(() => + expect(db2.log.log.length).equal(entryCount), + ); - await waitForResolved(() => - expect(db2.log.log.length).to.be.lessThan(startSize), - ); - await delay(1000); + await db2.close(); - await db2.log.replicate({ factor: 0.5, offset: 0.5, id: range.id }); - await waitForResolved(() => expect(db2.log.log.length).to.eq(startSize)); - }); + session.peers[2].dial(session.peers[0].getMultiaddrs()); - it("replace range with another node write before join", async () => { - const db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - offset: 0, - factor: 0.5, - }, - replicas: { - min: 1, - }, - timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing - }, - }); + await waitForResolved(() => + expect(db3.log.log.length).to.eq(entryCount), + ); - let entryCount = 100; - for (let i = 0; i < entryCount; i++) { - await db1.add("hello" + i, { meta: { next: [] } }); - } + // reopen db2 again and make sure either db3 or db2 drops the entry (not both need to replicate) + await delay(2000); - let db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { + db2 = await session.peers[1].open(db2.clone(), { args: { - replicate: { - offset: 0.5, - factor: 0.5, - }, replicas: { - min: 1, + min: minReplicas, + max: maxReplicas, }, - timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing - }, - }, - ))!; - - let db3 = (await EventStore.open>( - db1.address!, - session.peers[2], - { - args: { replicate: { - offset: 0.5, - factor: 0.5, - }, - replicas: { - min: 1, + offset: 0.333, + factor: 0.666, }, - timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, }, - }, - ))!; - - await waitForConverged(() => db1.log.log.length); - await waitForConverged(() => db2.log.log.length); - await waitForConverged(() => db3.log.log.length); + }); - const db2Length = db2.log.log.length; - const db3Length = db3.log.log.length; + // await db1.log["pruneDebouncedFn"](); + //await db1.log.waitForPruned() - await waitForResolved(() => - expect(db2.log.log.length).to.be.greaterThan(0), - ); + await waitForResolved(() => { + expect(db1.log.log.length).to.be.lessThan(entryCount); + }); + }); - await waitForResolved(() => - expect(db3.log.log.length).to.be.greaterThan(0), - ); + describe("commit options", () => { + it("control per commmit put before join", async () => { + const entryCount = 100; - const range2 = ( - await db2.log.getMyReplicationSegments() - )[0].toReplicationRange(); + await init({ + min: 1, + beforeOther: async () => { + const value = "hello"; + for (let i = 0; i < entryCount; i++) { + await db1.add(value, { + replicas: new AbsoluteReplicas(3), + meta: { next: [] }, + }); + } + }, + }); - await db2.log.replicate({ id: range2.id, offset: 0.1, factor: 0.1 }); + const check = async (log: EventStore) => { + let replicated3Times = 0; + for (const entry of await log.log.log.toArray()) { + if (decodeReplicas(entry).getValue(db2.log) === 3) { + replicated3Times += 1; + } + } + expect(replicated3Times).equal(entryCount); + }; + + await waitForResolved(() => check(db2)); + await waitForResolved(() => check(db3)); + }); - const range3 = ( - await db3.log.getMyReplicationSegments() - )[0].toReplicationRange(); + it("control per commmit", async () => { + const entryCount = 100; - await db3.log.replicate({ id: range3.id, offset: 0.1, factor: 0.1 }); + await init({ + min: 1, + }); - await waitForConverged(() => db2.log.log.length); - await waitForConverged(() => db3.log.log.length); - expect(db2.log.log.length).to.be.lessThan(db2Length); - expect(db3.log.log.length).to.be.lessThan(db3Length); + const value = "hello"; + for (let i = 0; i < entryCount; i++) { + await db1.add(value, { + replicas: new AbsoluteReplicas(3), + meta: { next: [] }, + }); + } - // reset to original + const check = async (log: EventStore) => { + let replicated3Times = 0; + for (const entry of await log.log.log.toArray()) { + if (decodeReplicas(entry).getValue(db2.log) === 3) { + replicated3Times += 1; + } + } + expect(replicated3Times).equal(entryCount); + }; + + await waitForResolved(() => check(db2)); + await waitForResolved(() => check(db3)); + }); - await db2.log.replicate({ id: range2.id, offset: 0.5, factor: 0.5 }); + it("mixed control per commmit", async () => { + await init({ min: 1 }); - await db3.log.replicate({ id: range3.id, offset: 0.5, factor: 0.5 }); + const value = "hello"; - await waitForResolved(() => expect(db2.log.log.length).to.eq(db2Length)); - await waitForResolved(() => expect(db3.log.log.length).to.eq(db3Length)); - }); + const entryCount = 100; + for (let i = 0; i < entryCount; i++) { + await db1.add(value, { + replicas: new AbsoluteReplicas(1), + meta: { next: [] }, + }); + await db1.add(value, { + replicas: new AbsoluteReplicas(3), + meta: { next: [] }, + }); + } - it("replace range with another node write after join", async () => { - const db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - offset: 0, - factor: 0.5, - }, - replicas: { - min: 1, - }, - timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing - }, - }); + // expect e1 to be replicated at db1 and/or 1 other peer (when you write you always store locally) + // expect e2 to be replicated everywhere + const check = async (log: EventStore) => { + let replicated3Times = 0; + let other = 0; + for (const entry of await log.log.log.toArray()) { + if (decodeReplicas(entry).getValue(db2.log) === 3) { + replicated3Times += 1; + } else { + other += 1; + } + } + expect(replicated3Times).equal(entryCount); + expect(other).greaterThan(0); + }; + await waitForResolved(() => check(db2)); + await waitForResolved(() => check(db3)); + }); - let db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - offset: 0.5, - factor: 0.5, - }, - replicas: { - min: 1, + it("will index replication underflow degree", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicas: { + min: 4, + }, + replicate: false, + timeUntilRoleMaturity: 1000, + setup, }, - timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing - }, - }, - ))!; + }); - let db3 = (await EventStore.open>( - db1.address!, - session.peers[2], - { - args: { - replicate: { - offset: 0.5, - factor: 0.5, - }, - replicas: { - min: 1, + db2 = await session.peers[1].open>( + db1.address, + { + args: { + replicas: { + min: 4, + }, + replicate: { + factor: 1, + }, + timeUntilRoleMaturity: 1000, + setup, + }, }, - timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing - }, - }, - ))!; - - await waitForResolved(async () => - expect((await db1.log.getReplicators()).size).to.eq(3), - ); + ); - let entryCount = 100; - for (let i = 0; i < entryCount; i++) { - await db1.add("hello" + i, { meta: { next: [] } }); - } + await db1.add("hello", { + replicas: new AbsoluteReplicas(4), + }); - await waitForConverged(() => db1.log.log.length); - await waitForConverged(() => db2.log.log.length); - await waitForConverged(() => db3.log.log.length); + try { + await waitForResolved(() => expect(db1.log.log.length).equal(1)); + await waitForResolved(() => expect(db2.log.log.length).equal(1)); + } catch (error) { + await dbgLogs([db1.log, db2.log]); + throw error; + } - const db2Length = db2.log.log.length; - const db3Length = db3.log.log.length; + const indexedDb1 = await db1.log.entryCoordinatesIndex + .iterate() + .all(); + const indexedDb2 = await db2.log.entryCoordinatesIndex + .iterate() + .all(); - await waitForResolved(() => - expect(db2.log.log.length).to.be.greaterThan(0), - ); + const assignedToRangeBoundaryDb1 = indexedDb1.filter( + (x) => x.value.assignedToRangeBoundary, + ); + expect(assignedToRangeBoundaryDb1).to.have.length(1); + expect( + assignedToRangeBoundaryDb1[0].value.coordinates, + ).to.have.length(4); + const assignedToRangeBoundaryDb2 = indexedDb2.filter( + (x) => x.value.assignedToRangeBoundary, + ); + expect(assignedToRangeBoundaryDb2).to.have.length(1); + expect( + assignedToRangeBoundaryDb2[0].value.coordinates, + ).to.have.length(4); + }); + }); - await waitForResolved(() => - expect(db3.log.log.length).to.be.greaterThan(0), - ); + it("min replicas with be maximum value for gid", async () => { + await init({ min: 1 }); - const range2 = ( - await db2.log.getMyReplicationSegments() - )[0].toReplicationRange(); - - await db2.log.replicate({ id: range2.id, offset: 0.1, factor: 0.1 }); - - const range3 = ( - await db3.log.getMyReplicationSegments() - )[0].toReplicationRange(); - - await db3.log.replicate({ id: range3.id, offset: 0.1, factor: 0.1 }); - - await waitForConverged(() => db2.log.log.length); - await waitForConverged(() => db3.log.log.length); - expect(db2.log.log.length).to.be.lessThan(db2Length); - expect(db3.log.log.length).to.be.lessThan(db3Length); - - // reset to original - - await db2.log.replicate({ id: range2.id, offset: 0.5, factor: 0.5 }); + // followwing entries set minReplicas to 1 which means only db2 or db3 needs to hold it + const entryCount = 100; + for (let i = 0; i < entryCount / 2; i++) { + const e1 = await db1.add(String(i), { + replicas: new AbsoluteReplicas(3), + meta: { next: [] }, + }); + await db1.add(String(i), { + replicas: new AbsoluteReplicas(1), // will be overriden by 'maxReplicas' above + meta: { next: [e1.entry] }, + }); + } - await db3.log.replicate({ id: range3.id, offset: 0.5, factor: 0.5 }); + await waitForResolved(() => { + expect(db1.log.log.length).equal(0); + let total = db2.log.log.length + db3.log.log.length; + expect(total).greaterThanOrEqual(entryCount); + expect(db2.log.log.length).greaterThan(entryCount * 0.2); + expect(db3.log.log.length).greaterThan(entryCount * 0.2); + }); + }); - await waitForResolved(() => expect(db2.log.log.length).to.eq(db2Length)); - await waitForResolved(() => expect(db3.log.log.length).to.eq(db3Length)); - }); - it("distribute", async () => { - const u32Div3 = Math.round(0xffffffff / 3); - const db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - offset: 0, - factor: 0xffffffff, - normalized: false, + it("observer will not delete unless replicated", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicas: { + min: 10, + }, + replicate: false, + setup, }, - replicas: { - min: 1, + }); + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicas: { + min: 10, + }, + replicate: { + factor: 1, + }, + setup, + }, }, - }, + ))!; + + const e1 = await db1.add("hello"); + + await waitForResolved(() => expect(db1.log.log.length).equal(1)); + await waitForResolved(() => expect(db2.log.log.length).equal(1)); + await expect( + Promise.all( + db1.log.prune( + new Map([ + [ + e1.entry.hash, + { + entry: e1.entry, + leaders: new Set([db2.node.identity.publicKey.hashcode()]), + }, + ], + ]), + { timeout: 3000 }, + ), + ), + ).rejectedWith("Timeout for checked pruning"); + expect(db1.log.log.length).equal(1); // No deletions }); - let db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { + it("replicator will not delete unless replicated", async () => { + db1 = await session.peers[0].open(new EventStore(), { args: { + replicas: { + min: 10, + }, replicate: { - offset: 0, - factor: 0xffffffff, - normalized: false, + factor: 1, }, + setup, + }, + }); + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicas: { + min: 10, + }, + replicate: { + factor: 1, + }, + setup, + }, + }, + ))!; + + const e1 = await db1.add("hello"); + await waitForResolved(() => expect(db1.log.log.length).equal(1)); + await waitForResolved(() => expect(db2.log.log.length).equal(1)); + await expect( + Promise.all( + db1.log.prune( + new Map([ + [ + e1.entry.hash, + { + entry: e1.entry, + leaders: new Set( + [db1, db2].map((x) => + x.node.identity.publicKey.hashcode(), + ), + ), + }, + ], + ]), + { timeout: 3000 }, + ), + ), + ).rejectedWith("Failed to delete, is leader"); + expect(db1.log.log.length).equal(1); // No deletions + }); + + it("keep degree while updating role", async () => { + let min = 1; + let max = 1; + + // peer 1 observer + // peer 2 observer + + db1 = await session.peers[0].open(new EventStore(), { + args: { replicas: { - min: 1, + min, + max, }, + replicate: false, + setup, }, - }, - ))!; + }); - let db3 = (await EventStore.open>( - db1.address!, - session.peers[2], - { + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 0, + }, + replicas: { + min, + max, + }, + setup, + }, + }, + ))!; + + let db2ReorgCounter = 0; + let db2ReplicationReorganizationFn = db2.log.onReplicationChange.bind( + db2.log, + ); + db2.log.onReplicationChange = (args) => { + db2ReorgCounter += 1; + return db2ReplicationReorganizationFn(args); + }; + await db1.add("hello"); + + // peer 1 observer + // peer 2 replicator (will get entry) + + await waitForResolved(() => expect(db1.log.log.length).equal(1)); + expect(db2ReorgCounter).equal(0); + await db2.log.replicate({ + factor: 1, + }); + await waitForResolved(() => expect(db2ReorgCounter).equal(1)); + await waitForResolved(() => expect(db2.log.log.length).equal(1)); + + // peer 1 removed + // peer 2 replicator (has entry) + await db1.drop(); + + // peer 1 observer + // peer 2 replicator (has entry) + await session.peers[0].open(db1, { args: { - replicate: { - offset: 0, - factor: 0xffffffff, - normalized: false, + replicas: { + min, + max, }, + replicate: false, + setup, + }, + }); + + // peer 1 observer + // peer 2 observer + expect(db2.log.log.length).equal(1); + await delay(2000); + // expect(db2ReorgCounter).equal(1); TODO limit distributions and test this + + await db2.log.replicate(false); + + // expect(db2ReorgCounter).equal(2); TODO limit distributions and test this + expect(await db2.log.isReplicating()).to.be.false; + + // peer 1 replicator (will get entry) + // peer 2 observer (will safely delete the entry) + await db1.log.replicate({ + factor: 1, + }); + + await waitForResolved(() => expect(db1.log.log.length).equal(1)); + await waitForResolved(() => expect(db2.log.log.length).equal(0)); + // expect(db2ReorgCounter).equal(3); TODO + }); + it("can override min on program level", async () => { + let minReplicas = 1; + let maxReplicas = 1; + + await init({ min: minReplicas, max: maxReplicas }); + + const entryCount = 100; + for (let i = 0; i < entryCount; i++) { + await db1.add("hello", { + replicas: new AbsoluteReplicas(5), // will be overriden by 'maxReplicas' above + meta: { next: [] }, + }); + } + await waitForResolved( + () => { + expect(db1.log.log.length).equal(0); // because db1 is not replicating at all, but just pruning once it knows entries are replicated elsewhere + let total = db2.log.log.length + db3.log.log.length; + expect(total).greaterThanOrEqual(entryCount); + expect(total).lessThan(entryCount * 2); + expect(db2.log.log.length).greaterThan(entryCount * 0.2); + expect(db3.log.log.length).greaterThan(entryCount * 0.2); + }, + { timeout: 3e4 }, + ); + }); + it("time out when pending IHave are never resolved", async () => { + let min = 1; + let max = 1; + + // peer 1 observer + // peer 2 observer + + db1 = await session.peers[0].open(new EventStore(), { + args: { replicas: { - min: 1, + min, + max, }, + replicate: false, + setup, }, - }, - ))!; - - let entryCount = 300; - - for (let i = 0; i < entryCount; i++) { - await db1.add("hello" + i, { meta: { next: [] } }); - } - - await waitForResolved(() => expect(db1.log.log.length).equal(entryCount)); - await waitForResolved(() => expect(db2.log.log.length).equal(entryCount)); - await waitForResolved(() => expect(db3.log.log.length).equal(entryCount)); - - db1.log.replicate( - { factor: u32Div3, offset: 0, normalized: false }, - { reset: true }, - ); - db2.log.replicate( - { factor: u32Div3, offset: u32Div3, normalized: false }, - { reset: true }, - ); - db3.log.replicate( - { factor: u32Div3, offset: u32Div3 * 2, normalized: false }, - { reset: true }, - ); - - await waitForResolved(() => - expect(db1.log.log.length).to.closeTo(entryCount / 3, 30), - ); - await waitForResolved(() => - expect(db2.log.log.length).to.closeTo(entryCount / 3, 30), - ); - await waitForResolved(() => - expect(db3.log.log.length).to.closeTo(entryCount / 3, 30), - ); - await waitForResolved(() => - expect( - db1.log.log.length + db2.log.log.length + db3.log.log.length, - ).to.equal(entryCount), - ); - for (const db of [db1, db2, db3]) { - expect(await db.log.getPrunable()).to.have.length(0); - } - }); + }); - it("close", async () => { - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - factor: 0.333, - offset: 0.333, + let respondToIHaveTimeout = 3000; + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicas: { + min, + max, + }, + replicate: { + factor: 1, + }, + respondToIHaveTimeout, + setup, + }, }, - }, + ); + + // TODO this test is flaky because background prune calls are intefering with assertions + // Todo make sure no background prunes are done (?) + + const onMessageFn = db2.log._onMessage.bind(db2.log); + db2.log.rpc["_responseHandler"] = async (msg: any, cxt: any) => { + if (msg instanceof ExchangeHeadsMessage) { + return; // prevent replication + } + return onMessageFn(msg, cxt); + }; + const { entry } = await db1.add("hello"); + const expectPromise = expect( + Promise.all( + db1.log.prune( + new Map([ + [ + entry.hash, + { + entry: entry, + leaders: new Set( + [db2].map((x) => x.node.identity.publicKey.hashcode()), + ), + }, + ], + ]), + { timeout: db1.log.timeUntilRoleMaturity }, + ), + ), + ).rejectedWith("Timeout"); + await waitForResolved(() => + expect(db2.log["_pendingIHave"].size).equal(1), + ); + await delay(respondToIHaveTimeout + 1000); + await waitForResolved(() => + expect(db2.log["_pendingIHave"].size).equal(0), + ); // shoulld clear up + await expectPromise; }); - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { + it("does not get blocked by slow sends", async () => { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { - factor: 0.333, - offset: 0, + factor: 1, }, + setup, }, - }, - ); - db3 = await EventStore.open>( - db1.address!, - session.peers[2], - { + }); + + db2 = await session.peers[1].open>(db1.address, { args: { replicate: { - factor: 0.333, - offset: 0.666, + factor: 1, }, + setup, }, - }, - ); - - const sampleSize = 1e3; - const entryCount = sampleSize; - - await waitForResolved(async () => - expect(await db1.log.replicationIndex?.getSize()).equal(3), - ); - await waitForResolved(async () => - expect(await db2.log.replicationIndex?.getSize()).equal(3), - ); - await waitForResolved(async () => - expect(await db3.log.replicationIndex?.getSize()).equal(3), - ); + }); - const promises: Promise[] = []; - for (let i = 0; i < entryCount; i++) { - promises.push( - db1.add(toBase64(new Uint8Array([i])), { - meta: { next: [] }, - }), + await waitForResolved(async () => + expect((await db1.log.getReplicators()).size).equal(2), ); - } - await Promise.all(promises); + let db1Delay = 0; + const db1Send = db1.log.rpc.send.bind(db1.log.rpc); + db1.log.rpc.send = async (message, options) => { + const controller = new AbortController(); + db1.log.rpc.events.addEventListener("close", () => { + controller.abort(new AbortError()); + }); + db1.log.rpc.events.addEventListener("drop", () => { + controller.abort(new AbortError()); + }); + try { + await delay(db1Delay, { signal: controller.signal }); + } catch (error) { + return; + } + return db1Send(message, options); + }; - await checkBounded(entryCount, 0.5, 0.9, db1, db2, db3); + db1Delay = 1e4; - const distribute = sinon.spy(db1.log.onReplicationChange); - db1.log.onReplicationChange = distribute; - await db3.close(); - await checkBounded(entryCount, 1, 1, db1, db2); - }); + db1.add("hello"); - it("a smaller replicator join leave joins", async () => { - const db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - factor: 1, // this replicator will get all entries - }, - replicas: { - min: 2, // we set min replicas to 2 to ensure second node should have all entries no matter what + await delay(1000); // make sure we have gotten "stuck" into the rpc.send unction + + let t0 = +new Date(); + db1Delay = 0; + db3 = await session.peers[2].open>(db1.address, { + args: { + replicate: { + factor: 1, + }, + setup, }, - timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing - }, + }); + + await waitForResolved(() => expect(db3.log.log.length).equal(1)); + let t1 = +new Date(); + expect(t1 - t0).lessThan(2000); }); - let entryCount = 100; - for (let i = 0; i < entryCount; i++) { - await db1.add("hello" + i, { meta: { next: [] } }); - } - - let db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { + + it("restarting node will receive entries", async () => { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { - offset: 0.1, - factor: 0.1, // some small range - }, - replicas: { - min: 2, + factor: 1, }, - timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, }, - }, - ))!; + }); - await waitForResolved(() => - expect(db1.log.log.length).to.be.equal(entryCount), - ); - await waitForResolved(() => - expect(db2.log.log.length).to.be.equal(entryCount), - ); + db2 = await session.peers[1].open>(db1.address, { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }); + await db1.add("hello"); + await waitForResolved(() => expect(db2.log.log.length).equal(1)); + await db2.drop(); + await session.peers[1].stop(); + await session.peers[1].start(); + db2 = await session.peers[1].open>(db1.address, { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }); + await waitForResolved(() => expect(db2.log.log.length).equal(1)); + }); - await db2.close(); - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { + it("can handle many large messages", async () => { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { - factor: 0.2, // some small range - offset: 0.2, // but on another place + factor: 1, }, - replicas: { - min: 2, + setup, + }, + }); + + // append more than 30 mb + const count = 5; + for (let i = 0; i < count; i++) { + await db1.add(toBase64(randomBytes(6e6)), { meta: { next: [] } }); + } + db2 = await session.peers[1].open>(db1.address, { + args: { + replicate: { + factor: 1, }, - timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, }, - }, - ))!; + }); + await waitForResolved(() => expect(db2.log.log.length).equal(count)); + }); - await waitForResolved(() => - expect(db1.log.log.length).to.be.equal(entryCount), - ); + describe("update", () => { + it("shift to 0 factor", async () => { + const db1 = await session.peers[0].open( + new EventStore(), + { + args: { + replicate: { + offset: 0, + factor: 1, + }, + replicas: { + min: 1, + }, + setup, + }, + }, + ); - await waitForResolved(() => - expect(db2.log.log.length).to.be.equal(entryCount), - ); - }); - }); + let entryCount = 100; + for (let i = 0; i < entryCount; i++) { + await db1.add("hello" + i, { meta: { next: [] } }); + } - /* TODO feat - it("will reject early if leaders does not have entry", async () => { - await init(1); + // half of he entries entries will end up in a region where there are no replicators + expect( + (await db1.log.entryCoordinatesIndex.iterate().all()).filter( + (x) => x.value.assignedToRangeBoundary, + ).length, + ).to.be.lessThan(100); + + let db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + offset: 0, + factor: 1, + }, + replicas: { + min: 1, + }, + setup, + }, + }, + ))!; - const value = "hello"; + await waitForResolved(() => + expect(db2.log.log.length).to.be.above(entryCount / 3), + ); - const e1 = await db1.add(value, { replicas: new AbsoluteReplicas(2) }); + await db2.log.replicate( + { factor: 0, offset: 0, normalized: false }, + { reset: true }, + ); - // Assume all peers gets it - await waitForResolved(() => expect(db1.log.log.length).equal(1)); - await waitForResolved(() => expect(db2.log.log.length).equal(1)); - await waitForResolved(() => expect(db3.log.log.length).equal(1)); - await db3.log.log.deleteRecursively(await db3.log.log.getHeads()); - await waitForResolved(() => expect(db3.log.log.length).equal(0)); + await waitForResolved(() => + expect(db1.log.log.length).to.eq(entryCount), + ); + await waitForResolved(() => expect(db2.log.log.length).to.eq(0)); + await waitForResolved(() => + expect(db1.log.log.length + db2.log.log.length).to.equal( + entryCount, + ), + ); + }); - expect(db2.log.log.length).equal(1); - const fn = () => db2.log.safelyDelete([e1.entry], { timeout: 3000 })[0]; - await expect(fn).rejectedWith( - "Insufficient replicators to safely delete: " + e1.entry.hash - ); - expect(db2.log.log.length).equal(1); - }); */ -}); + it("shift half prune", async () => { + const halfRegion = Number(numbers.maxValue) / 2; + const db1 = await session.peers[0].open( + new EventStore(), + { + args: { + replicate: { + offset: 0, + factor: halfRegion, + normalized: false, + }, + replicas: { + min: 1, + }, + setup, + }, + }, + ); -describe("sync", () => { - let session: TestSession; - let db1: EventStore, db2: EventStore; + let entryCount = 100; + for (let i = 0; i < entryCount; i++) { + await db1.add("hello" + i, { meta: { next: [] } }); + } - before(async () => { - session = await TestSession.connected(2); - }); - after(async () => { - await session.stop(); - }); + // half of he entries entries will end up in a region where there are no replicators + expect( + (await db1.log.entryCoordinatesIndex.iterate().all()).filter( + (x) => x.value.assignedToRangeBoundary, + ).length, + ).to.be.lessThan(100); + + let db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + offset: 0, + factor: halfRegion, + normalized: false, + }, + replicas: { + min: 1, + }, + setup, + }, + }, + ))!; - afterEach(async () => { - if (db1) await db1.drop(); - if (db2) await db2.drop(); - }); + await waitForResolved(() => + expect(db2.log.log.length).to.be.above(entryCount / 3), + ); - it("manually synced entries will not get pruned", async () => { - db1 = await session.peers[0].open>(new EventStore(), { - args: { - /* sync: () => true, */ - replicas: { - min: 1, - }, - replicate: { - factor: 1, - }, - }, - })!; - - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - /* sync: () => true, */ - replicas: { - min: 1, + await db2.log.replicate( + { factor: halfRegion, offset: halfRegion, normalized: false }, + { reset: true }, + ); + + try { + await waitForResolved(() => + expect(db1.log.log.length).to.closeTo(entryCount / 2, 20), + ); + await waitForResolved(() => + expect(db2.log.log.length).to.closeTo(entryCount / 2, 20), + ); + await waitForResolved(() => + expect(db1.log.log.length + db2.log.log.length).to.equal( + entryCount, + ), + ); + } catch (error) { + await dbgLogs([db1.log, db2.log]); + throw error; + } + }); + + it("to same range", async () => { + const db1 = await session.peers[0].open( + new EventStore(), + { + args: { + replicate: { + offset: 0, + factor: 1, + }, + replicas: { + min: 1, + }, + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ); + + let db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + offset: 0, + factor: 1, + }, + replicas: { + min: 1, + }, + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ))!; + await db1.add("hello", { meta: { next: [] } }); + await waitForResolved(() => expect(db1.log.log.length).equal(1)); + await waitForResolved(() => expect(db2.log.log.length).equal(1)); + + const findLeaders1 = sinon.spy(db1.log, "findLeaders"); + const findLeaders2 = sinon.spy(db2.log, "findLeaders"); + const onMessage1 = sinon.spy(db1.log, "_onMessage"); + + const range = ( + await db2.log.getMyReplicationSegments() + )[0].toReplicationRange(); + await db2.log.replicate(range); + + expect(findLeaders1.callCount).equal(0); // no changes + try { + await waitForResolved(() => expect(onMessage1.callCount).equal(1)); // one message + } catch (error) { + throw new Error("Never received message"); + } + expect(findLeaders2.callCount).equal(0); // no changes emitted + }); + + it("to smaller but already replicated", async () => { + const db1 = await session.peers[0].open( + new EventStore(), + { + args: { + replicate: { + offset: 0, + factor: 1, + }, + replicas: { + min: 1, + }, + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ); + + let db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + offset: 0, + factor: 1, + }, + replicas: { + min: 1, + }, + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ))!; + + let entryCount = 100; + for (let i = 0; i < entryCount; i++) { + await db1.add("hello" + i, { meta: { next: [] } }); + } + await waitForResolved(() => + expect(db1.log.log.length).equal(entryCount), + ); + await waitForResolved(() => + expect(db2.log.log.length).equal(entryCount), + ); + + const findLeaders1 = sinon.spy(db1.log, "findLeaders"); + const findLeaders2 = sinon.spy(db2.log, "findLeaders"); + const onMessage1 = sinon.spy(db1.log, "_onMessage"); + + const range = ( + await db2.log.getMyReplicationSegments() + )[0].toReplicationRange(); + + let newFactor = 0.5; + await db2.log.replicate({ + factor: newFactor, + offset: 0, + id: range.id, + }); + const expectedAmountOfEntriesToPrune = entryCount * newFactor; + + await waitForResolved(async () => { + expect(db2.log.log.length).to.be.closeTo( + entryCount - expectedAmountOfEntriesToPrune, + 30, + ); + + // TODO reenable expect(onMessage1.callCount).equal(2); // two messages (the updated range) and request for pruning + expect(findLeaders1.callCount).to.be.lessThan(entryCount * 3); // some upper bound, TODO make more strict + expect(findLeaders2.callCount).to.be.lessThan(entryCount * 3); // some upper bound, TODO make more strict + /* + TODO stricter boundes like below + expect(findLeaders1.callCount).to.closeTo(prunedEntries * 2, 30); // redistribute + prune about 50% of the entries + expect(findLeaders2.callCount).to.closeTo(prunedEntries * 2, 30); // redistribute + handle prune requests + */ + }); + + // we do below separetly because this will interefere with the callCounts above + await waitForResolved(async () => + expect(await db2.log.getPrunable()).to.length(0), + ); + + // eslint-disable-next-line no-useless-catch + try { + expect(onMessage1.getCall(0).args[0]).instanceOf( + AddedReplicationSegmentMessage, + ); + expect(onMessage1.getCall(1).args[0]).instanceOf(RequestIPrune); + } catch (error) { + // eslint-disable-next-line no-useless-catch + try { + expect(onMessage1.getCall(1).args[0]).instanceOf( + AddedReplicationSegmentMessage, + ); + expect(onMessage1.getCall(0).args[0]).instanceOf(RequestIPrune); + } catch (error) { + throw error; + } + } + /* const entryRefs1 = await db1.log.entryCoordinatesIndex.iterate().all(); + const entryRefs2 = await db2.log.entryCoordinatesIndex.iterate().all(); + + expect( + entryRefs1.filter((x) => x.value.replicators === 2), + ).to.have.length(db2.log.log.length); + expect( + entryRefs1.filter((x) => x.value.replicators === 1), + ).to.have.length(entryCount - db2.log.log.length); + expect( + entryRefs2.filter((x) => x.value.replicators === 2), + ).to.have.length(db2.log.log.length); */ + }); + + it("to smaller will need transfer", async () => { + const db1 = await session.peers[0].open( + new EventStore(), + { + args: { + replicate: { + offset: 0, + factor: 0.5, + }, + replicas: { + min: 1, + }, + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ); + + let db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + offset: 0.5, + factor: 0.5, + }, + replicas: { + min: 1, + }, + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ))!; + + let entryCount = 100; + for (let i = 0; i < entryCount; i++) { + await db1.add("hello" + i, { meta: { next: [] } }); + } + + await waitForResolved(() => + expect(db1.log.log.length).to.be.closeTo(entryCount / 2, 30), + ); + await waitForResolved(() => + expect(db2.log.log.length).to.be.closeTo(entryCount / 2, 30), + ); + + /* + // TODO assert findLeaders call count strict + const findLeaders1 = sinon.spy(db1.log, "findLeaders"); + const findLeaders2 = sinon.spy(db2.log, "findLeaders"); + */ + + const prune2 = sinon.spy(db2.log, "prune"); + + const range = ( + await db2.log.getMyReplicationSegments() + )[0].toReplicationRange(); + + await db2.log.replicate({ + factor: 0.001, + offset: 0.99, + id: range.id, + }); + + /* const entriesThatWillBeChecked = entryCount / 2; + const entriesThatWillBePruned = entryCount / 4; // the change is that the range [0.5, 0.75] will be owned by db1 and [0.75, 1] will be owned by db2 + await waitForResolved(() => + expect(findLeaders2.callCount).to.closeTo( + entriesThatWillBeChecked + entriesThatWillBePruned, + 30, + ), + ); TODO assert findLeaders call count strictly */ + + await waitForResolved(() => { + expect(prune2.callCount).to.eq(1); + expect( + [...prune2.getCall(0).args[0].values()].length, + ).to.be.closeTo(entryCount / 4, 15); // a quarter of the entries should be pruned becuse the range [0, 0.75] will be owned by db1 and [0.75, 1] will be owned by db2 + }); + + // TODO assert some kind of findLeaders callCount ? + }); + + it("to smaller then to larger", async () => { + const db1 = await session.peers[0].open( + new EventStore(), + { + args: { + replicate: { + offset: 0, + factor: 0.5, + }, + replicas: { + min: 1, + }, + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ); + + let db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + offset: 0.5, + factor: 0.5, + }, + replicas: { + min: 1, + }, + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ))!; + + let entryCount = 100; + for (let i = 0; i < entryCount; i++) { + await db1.add("hello" + i, { meta: { next: [] } }); + } + await waitForResolved(() => + expect(db1.log.log.length).to.be.closeTo(entryCount / 2, 30), + ); + await waitForResolved(() => + expect(db2.log.log.length).to.be.closeTo(entryCount / 2, 30), + ); + + const range = ( + await db2.log.getMyReplicationSegments() + )[0].toReplicationRange(); + + await waitForConverged(() => db2.log.log.length); + + let startSize = db2.log.log.length; + await db2.log.replicate({ factor: 0.25, offset: 0.5, id: range.id }); + + await waitForResolved(() => + expect(db2.log.log.length).to.be.lessThan(startSize), + ); + await delay(1000); + + await db2.log.replicate({ factor: 0.5, offset: 0.5, id: range.id }); + await waitForResolved(() => + expect(db2.log.log.length).to.eq(startSize), + ); + }); + + it("to smaller will initiate prune", async () => { + const db1 = await session.peers[0].open( + new EventStore(), + { + args: { + replicate: { + offset: 0, + factor: 1, + }, + replicas: { + min: 1, + }, + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ); + + let entryCount = 100; + for (let i = 0; i < entryCount; i++) { + await db1.add("hello" + i, { meta: { next: [] } }); + } + + let db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + offset: 0, + factor: 1, + }, + replicas: { + min: 1, + }, + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ))!; + + await waitForResolved(() => + expect(db2.log.log.length).to.be.eq(entryCount), + ); + + const range1 = ( + await db1.log.getMyReplicationSegments() + )[0].toReplicationRange(); + + await db1.log.replicate({ id: range1.id, offset: 0, factor: 0 }); + await waitForResolved(() => expect(db1.log.log.length).to.eq(0)); + }); + + it("replace range with another node write before join with slowed down send", async () => { + let sendDelay = 2000; + let waitForPruneDelay = sendDelay + 2000; + await slowDownSend(session.peers[2], session.peers[0], sendDelay); // we do this to force a replication pattern where peer[1] needs to send entries to peer[2] + const db1 = await session.peers[0].open( + new EventStore(), + { + args: { + replicate: { + offset: 0, + factor: 0.5, + }, + replicas: { + min: 1, + }, + waitForPruneDelay, + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ); + + let entryCount = 100; + + for (let i = 0; i < entryCount; i++) { + await db1.add("hello" + i, { meta: { next: [] } }); + } + + let db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + offset: 0.5, + factor: 0.5, + }, + replicas: { + min: 1, + }, + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + waitForPruneDelay, + setup, + }, + }, + ))!; + + let db3 = (await EventStore.open>( + db1.address!, + session.peers[2], + { + args: { + replicate: { + offset: 0.5, + factor: 0.5, + }, + replicas: { + min: 1, + }, + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + waitForPruneDelay, + setup, + }, + }, + ))!; + + await waitForResolved(() => + expect(db1.log.log.length).to.be.closeTo(entryCount / 2, 20), + ); + await waitForResolved(() => + expect(db2.log.log.length).to.be.closeTo(entryCount / 2, 20), + ); + await waitForResolved(() => + expect(db3.log.log.length).to.be.closeTo(entryCount / 2, 20), + ); + + const db2Length = db2.log.log.length; + const db3Length = db3.log.log.length; + + await waitForResolved(() => + expect(db2.log.log.length).to.be.greaterThan(0), + ); + + await waitForResolved(() => + expect(db3.log.log.length).to.be.greaterThan(0), + ); + + const range2 = ( + await db2.log.getMyReplicationSegments() + )[0].toReplicationRange(); + + await db2.log.replicate({ id: range2.id, offset: 0.1, factor: 0.1 }); + + // await delay(5000) + + const range3 = ( + await db3.log.getMyReplicationSegments() + )[0].toReplicationRange(); + + await db3.log.replicate({ id: range3.id, offset: 0.1, factor: 0.1 }); + + await waitForResolved(() => + expect(db1.log.log.length).to.be.closeTo(entryCount / 2, 20), + ); + await waitForResolved(() => + expect(db2.log.log.length).to.be.closeTo(entryCount / 10, 10), + ); + await waitForResolved(() => + expect(db3.log.log.length).to.be.closeTo(entryCount / 10, 10), + ); + + expect(db2.log.log.length).to.be.lessThan(db2Length); + expect(db3.log.log.length).to.be.lessThan(db3Length); + + // reset to original + + await db2.log.replicate({ id: range2.id, offset: 0.5, factor: 0.5 }); + await db3.log.replicate({ id: range3.id, offset: 0.5, factor: 0.5 }); + + try { + await waitForResolved(() => + expect(db2.log.log.length).to.eq(db2Length), + ); + await waitForResolved(() => + expect(db3.log.log.length).to.eq(db3Length), + ); + } catch (error) { + await dbgLogs([db2.log, db3.log]); + throw error; + } + }); + + it("replace range with another node write after join", async () => { + const db1 = await session.peers[0].open( + new EventStore(), + { + args: { + replicate: { + offset: 0, + factor: 0.5, + }, + replicas: { + min: 1, + }, + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ); + + let db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + offset: 0.5, + factor: 0.5, + }, + replicas: { + min: 1, + }, + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ))!; + + let db3 = (await EventStore.open>( + db1.address!, + session.peers[2], + { + args: { + replicate: { + offset: 0.5, + factor: 0.5, + }, + replicas: { + min: 1, + }, + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ))!; + + try { + await waitForResolved(async () => + expect((await db1.log.getReplicators()).size).to.eq(3), + ); + + let entryCount = 100; + for (let i = 0; i < entryCount; i++) { + await db1.add("hello" + i, { meta: { next: [] } }); + } + + await waitForResolved(() => + expect(db1.log.log.length).to.be.closeTo(entryCount / 2, 20), + ); + await waitForResolved(() => + expect(db2.log.log.length).to.be.closeTo(entryCount / 2, 20), + ); + await waitForResolved(() => + expect(db3.log.log.length).to.be.closeTo(entryCount / 2, 20), + ); + + const db2Length = db2.log.log.length; + const db3Length = db3.log.log.length; + + await waitForResolved(() => + expect(db2.log.log.length).to.be.greaterThan(0), + ); + + await waitForResolved(() => + expect(db3.log.log.length).to.be.greaterThan(0), + ); + + const range2 = ( + await db2.log.getMyReplicationSegments() + )[0].toReplicationRange(); + + await db2.log.replicate({ + id: range2.id, + offset: 0.1, + factor: 0.1, + }); + + const range3 = ( + await db3.log.getMyReplicationSegments() + )[0].toReplicationRange(); + + await db3.log.replicate({ + id: range3.id, + offset: 0.1, + factor: 0.1, + }); + + await waitForResolved(() => + expect(db1.log.log.length).to.be.closeTo(entryCount / 2, 20), + ); + await waitForResolved(() => + expect(db2.log.log.length).to.be.closeTo(entryCount / 10, 10), + ); + await waitForResolved(() => + expect(db3.log.log.length).to.be.closeTo(entryCount / 10, 10), + ); + + // reset to original + + await db2.log.replicate({ + id: range2.id, + offset: 0.5, + factor: 0.5, + }); + + await db3.log.replicate({ + id: range3.id, + offset: 0.5, + factor: 0.5, + }); + + await waitForResolved(() => + expect(db2.log.log.length).to.eq(db2Length), + ); + await waitForResolved(() => + expect(db3.log.log.length).to.eq(db3Length), + ); + } catch (error) { + await dbgLogs([db1.log, db2.log, db3.log]); + throw error; + } + }); + + it("distribute", async () => { + const maxDiv3 = Math.round(Number(numbers.maxValue) / 3); + const db1 = await session.peers[0].open( + new EventStore(), + { + args: { + replicate: { + offset: 0, + factor: numbers.maxValue, + normalized: false, + }, + replicas: { + min: 1, + }, + setup, + }, + }, + ); + + let db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + offset: 0, + factor: numbers.maxValue, + normalized: false, + }, + replicas: { + min: 1, + }, + setup, + }, + }, + ))!; + + let db3 = (await EventStore.open>( + db1.address!, + session.peers[2], + { + args: { + replicate: { + offset: 0, + factor: numbers.maxValue, + normalized: false, + }, + replicas: { + min: 1, + }, + setup, + }, + }, + ))!; + + let entryCount = 300; + + for (let i = 0; i < entryCount; i++) { + await db1.add("hello" + i, { meta: { next: [] } }); + } + + await waitForResolved(() => + expect(db1.log.log.length).equal(entryCount), + ); + await waitForResolved(() => + expect(db2.log.log.length).equal(entryCount), + ); + await waitForResolved(() => + expect(db3.log.log.length).equal(entryCount), + ); + + db1.log.replicate( + { factor: maxDiv3, offset: 0, normalized: false }, + { reset: true }, + ); + db2.log.replicate( + { factor: maxDiv3, offset: maxDiv3, normalized: false }, + { reset: true }, + ); + db3.log.replicate( + { factor: maxDiv3, offset: maxDiv3 * 2, normalized: false }, + { reset: true }, + ); + + await waitForResolved(() => + expect(db1.log.log.length).to.closeTo(entryCount / 3, 30), + ); + await waitForResolved(() => + expect(db2.log.log.length).to.closeTo(entryCount / 3, 30), + ); + await waitForResolved(() => + expect(db3.log.log.length).to.closeTo(entryCount / 3, 30), + ); + await waitForResolved(() => + expect( + db1.log.log.length + db2.log.log.length + db3.log.log.length, + ).to.equal(entryCount), + ); + for (const db of [db1, db2, db3]) { + expect(await db.log.getPrunable()).to.have.length(0); + } + }); + + it("close", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + factor: 0.333, + offset: 0.333, + }, + setup, + }, + }); + + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 0.333, + offset: 0, + }, + setup, + }, + }, + ); + db3 = await EventStore.open>( + db1.address!, + session.peers[2], + { + args: { + replicate: { + factor: 0.333, + offset: 0.666, + }, + setup, + }, + }, + ); + + const sampleSize = 1e3; + const entryCount = sampleSize; + + try { + await waitForResolved(async () => + expect(await db1.log.replicationIndex?.getSize()).equal(3), + ); + await waitForResolved(async () => + expect(await db2.log.replicationIndex?.getSize()).equal(3), + ); + await waitForResolved(async () => + expect(await db3.log.replicationIndex?.getSize()).equal(3), + ); + } catch (error) { + await dbgLogs([db1.log, db2.log, db3.log]); + throw error; + } + + const promises: Promise[] = []; + for (let i = 0; i < entryCount; i++) { + promises.push( + db1.add(toBase64(new Uint8Array([i])), { + meta: { next: [] }, + }), + ); + } + + await Promise.all(promises); + + await checkBounded(entryCount, 0.5, 0.9, db1, db2, db3); + + const distribute = sinon.spy(db1.log.onReplicationChange); + db1.log.onReplicationChange = distribute; + await db3.close(); + await checkBounded(entryCount, 1, 1, db1, db2); + }); + + it("a smaller replicator join leave joins", async () => { + let minReplicas = 2; + const db1 = await session.peers[0].open( + new EventStore(), + { + args: { + replicate: { + factor: 1, // this replicator will get all entries + }, + replicas: { + min: minReplicas, // we set min replicas to 2 to ensure second node should have all entries no matter what + }, + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ); + let entryCount = 100; + for (let i = 0; i < entryCount; i++) { + await db1.add("hello" + i, { meta: { next: [] } }); + } + + expect( + (await db1.log.entryCoordinatesIndex.iterate().all()).filter( + (x) => x.value.assignedToRangeBoundary, + ).length, + ).to.eq(entryCount); + + let db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + offset: 0.1, + factor: 0.1, // some small range + }, + replicas: { + min: minReplicas, + }, + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ))!; + + await waitForResolved(() => + expect(db1.log.log.length).to.be.equal(entryCount), + ); + await waitForResolved(() => + expect(db2.log.log.length).to.be.equal(entryCount), + ); + + await db2.close(); + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 0.2, // some small range + offset: 0.2, // but on another place + }, + replicas: { + min: minReplicas, + }, + timeUntilRoleMaturity: 0, // prevent additiona replicationChangeEvents to occur when maturing + setup, + }, + }, + ))!; + + await waitForResolved(() => + expect(db1.log.log.length).to.be.equal(entryCount), + ); + await waitForResolved(() => + expect(db2.log.log.length).to.be.equal(entryCount), + ); + }); + }); + + /* TODO feat + it("will reject early if leaders does not have entry", async () => { + await init(1); + + const value = "hello"; + + const e1 = await db1.add(value, { replicas: new AbsoluteReplicas(2) }); + + // Assume all peers gets it + await waitForResolved(() => expect(db1.log.log.length).equal(1)); + await waitForResolved(() => expect(db2.log.log.length).equal(1)); + await waitForResolved(() => expect(db3.log.log.length).equal(1)); + await db3.log.log.deleteRecursively(await db3.log.log.getHeads()); + await waitForResolved(() => expect(db3.log.log.length).equal(0)); + + expect(db2.log.log.length).equal(1); + const fn = () => db2.log.safelyDelete([e1.entry], { timeout: 3000 })[0]; + await expect(fn).rejectedWith( + "Insufficient replicators to safely delete: " + e1.entry.hash + ); + expect(db2.log.log.length).equal(1); + }); */ + }); + + describe("sync", () => { + let session: TestSession; + let db1: EventStore, db2: EventStore; + + before(async () => { + session = await TestSession.connected(2); + }); + after(async () => { + await session.stop(); + }); + + afterEach(async () => { + if (db1) await db1.drop(); + if (db2) await db2.drop(); + }); + + it("manually synced entries will not get pruned", async () => { + db1 = await session.peers[0].open>( + new EventStore(), + { + args: { + /* sync: () => true, */ + replicas: { + min: 1, + }, + replicate: { + factor: 1, + }, + setup, + }, }, - replicate: { - factor: 1, - }, - }, - }, - ))!; - await db1.add("data"); - await waitForResolved(() => expect(db2.log.log.length).equal(1)); - await db2.log.replicate(false); - await delay(3000); - await waitForResolved(() => expect(db2.log.log.length).equal(0)); + )!; + + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + /* sync: () => true, */ + replicas: { + min: 1, + }, + replicate: { + factor: 1, + }, + setup, + }, + }, + ))!; + await db1.add("data"); + await waitForResolved(() => expect(db2.log.log.length).equal(1)); + await db2.log.replicate(false); + await delay(3000); + await waitForResolved(() => expect(db2.log.log.length).equal(0)); + }); + }); }); }); diff --git a/packages/programs/data/shared-log/test/sharding.spec.ts b/packages/programs/data/shared-log/test/sharding.spec.ts index a660f7857..6859570c3 100644 --- a/packages/programs/data/shared-log/test/sharding.spec.ts +++ b/packages/programs/data/shared-log/test/sharding.spec.ts @@ -5,1226 +5,1573 @@ import { TestSession } from "@peerbit/test-utils"; import { delay, waitFor, waitForResolved } from "@peerbit/time"; import { expect } from "chai"; import sinon from "sinon"; +import { + type ReplicationDomainHash, + createReplicationDomainHash, +} from "../src/replication-domain-hash.js"; import { AbsoluteReplicas } from "../src/replication.js"; -import { checkBounded, waitForConverged } from "./utils.js"; +import { RatelessIBLTSynchronizer } from "../src/sync/rateless-iblt.js"; +import { SimpleSyncronizer } from "../src/sync/simple.js"; +import { + type TestSetupConfig, + checkBounded, + checkIfSetupIsUsed, + dbgLogs, + waitForConverged, +} from "./utils.js"; import { EventStore } from "./utils/stores/event-store.js"; -describe(`sharding`, () => { - let session: TestSession; - let db1: EventStore, - db2: EventStore, - db3: EventStore, - db4: EventStore; - - before(async () => { - session = await TestSession.connected(4, [ - { - libp2p: { - privateKey: privateKeyFromRaw( - new Uint8Array([ - 27, 246, 37, 180, 13, 75, 242, 124, 185, 205, 207, 9, 16, 54, 162, - 197, 247, 25, 211, 196, 127, 198, 82, 19, 68, 143, 197, 8, 203, - 18, 179, 181, 105, 158, 64, 215, 56, 13, 71, 156, 41, 178, 86, - 159, 80, 222, 167, 73, 3, 37, 251, 67, 86, 6, 90, 212, 16, 251, - 206, 54, 49, 141, 91, 171, - ]), - ), - }, - }, - { - libp2p: { - privateKey: privateKeyFromRaw( - new Uint8Array([ - 113, 203, 231, 235, 7, 120, 3, 194, 138, 113, 131, 40, 251, 158, - 121, 38, 190, 114, 116, 252, 100, 202, 107, 97, 119, 184, 24, 56, - 27, 76, 150, 62, 132, 22, 246, 177, 200, 6, 179, 117, 218, 216, - 120, 235, 147, 249, 48, 157, 232, 161, 145, 3, 63, 158, 217, 111, - 65, 105, 99, 83, 4, 113, 62, 15, - ]), - ), - }, - }, - - { - libp2p: { - privateKey: privateKeyFromRaw( - new Uint8Array([ - 215, 31, 167, 188, 121, 226, 67, 218, 96, 8, 55, 233, 34, 68, 9, - 147, 11, 157, 187, 43, 39, 43, 25, 95, 184, 227, 137, 56, 4, 69, - 120, 214, 182, 163, 41, 82, 248, 210, 213, 22, 179, 112, 251, 219, - 52, 114, 102, 110, 6, 60, 216, 135, 218, 60, 196, 128, 251, 85, - 167, 121, 179, 136, 114, 83, - ]), - ), - }, - }, - { - libp2p: { - privateKey: privateKeyFromRaw( - new Uint8Array([ - 176, 30, 32, 212, 227, 61, 222, 213, 141, 55, 56, 33, 95, 29, 21, - 143, 15, 130, 94, 221, 124, 176, 12, 225, 198, 214, 83, 46, 114, - 69, 187, 104, 51, 28, 15, 14, 240, 27, 110, 250, 130, 74, 127, - 194, 243, 32, 169, 162, 109, 127, 172, 232, 208, 152, 149, 108, - 74, 52, 229, 109, 23, 50, 249, 249, - ]), - ), - }, - }, - ]); - }); +export const testSetups: TestSetupConfig[] = [ + { + domain: createReplicationDomainHash("u32"), + type: "u32", + syncronizer: SimpleSyncronizer, + name: "u32-simple", + }, + /* { + domain: createReplicationDomainHash("u64"), + type: "u64", + syncronizer: SimpleSyncronizer, + name: "u64-simple", + }, */ + { + domain: createReplicationDomainHash("u64"), + type: "u64", + syncronizer: RatelessIBLTSynchronizer, + name: "u64-iblt", + }, +]; + +testSetups.forEach((setup) => { + describe(setup.name, () => { + describe(`sharding`, () => { + let session: TestSession; + let db1: EventStore>, + db2: EventStore>, + db3: EventStore>, + db4: EventStore>; + + before(async () => { + session = await TestSession.connected(4, [ + { + libp2p: { + privateKey: privateKeyFromRaw( + new Uint8Array([ + 27, 246, 37, 180, 13, 75, 242, 124, 185, 205, 207, 9, 16, 54, + 162, 197, 247, 25, 211, 196, 127, 198, 82, 19, 68, 143, 197, + 8, 203, 18, 179, 181, 105, 158, 64, 215, 56, 13, 71, 156, 41, + 178, 86, 159, 80, 222, 167, 73, 3, 37, 251, 67, 86, 6, 90, + 212, 16, 251, 206, 54, 49, 141, 91, 171, + ]), + ), + }, + }, + { + libp2p: { + privateKey: privateKeyFromRaw( + new Uint8Array([ + 113, 203, 231, 235, 7, 120, 3, 194, 138, 113, 131, 40, 251, + 158, 121, 38, 190, 114, 116, 252, 100, 202, 107, 97, 119, 184, + 24, 56, 27, 76, 150, 62, 132, 22, 246, 177, 200, 6, 179, 117, + 218, 216, 120, 235, 147, 249, 48, 157, 232, 161, 145, 3, 63, + 158, 217, 111, 65, 105, 99, 83, 4, 113, 62, 15, + ]), + ), + }, + }, - afterEach(async () => { - try { - await Promise.allSettled([ - db1?.drop(), - db2?.drop(), - db3?.drop(), - db4?.drop(), - ]); - } catch (error) {} - db1 = undefined as any; - db2 = undefined as any; - db3 = undefined as any; - db4 = undefined as any; - }); + { + libp2p: { + privateKey: privateKeyFromRaw( + new Uint8Array([ + 215, 31, 167, 188, 121, 226, 67, 218, 96, 8, 55, 233, 34, 68, + 9, 147, 11, 157, 187, 43, 39, 43, 25, 95, 184, 227, 137, 56, + 4, 69, 120, 214, 182, 163, 41, 82, 248, 210, 213, 22, 179, + 112, 251, 219, 52, 114, 102, 110, 6, 60, 216, 135, 218, 60, + 196, 128, 251, 85, 167, 121, 179, 136, 114, 83, + ]), + ), + }, + }, + { + libp2p: { + privateKey: privateKeyFromRaw( + new Uint8Array([ + 176, 30, 32, 212, 227, 61, 222, 213, 141, 55, 56, 33, 95, 29, + 21, 143, 15, 130, 94, 221, 124, 176, 12, 225, 198, 214, 83, + 46, 114, 69, 187, 104, 51, 28, 15, 14, 240, 27, 110, 250, 130, + 74, 127, 194, 243, 32, 169, 162, 109, 127, 172, 232, 208, 152, + 149, 108, 74, 52, 229, 109, 23, 50, 249, 249, + ]), + ), + }, + }, + ]); + }); - after(async () => { - await session.stop(); - }); + afterEach(async () => { + // check that each domain actually is what excpected + for (const db of [db1, db2, db3, db4]) { + db && checkIfSetupIsUsed(setup, db.log); + } - const sampleSize = 200; // must be < 255 + try { + await Promise.allSettled([ + db1?.drop(), + db2?.drop(), + db3?.drop(), + db4?.drop(), + ]); + } catch (error) {} + db1 = undefined as any; + db2 = undefined as any; + db3 = undefined as any; + db4 = undefined as any; + }); - it("will not have any prunable after balance", async () => { - const store = new EventStore(); + after(async () => { + await session.stop(); + }); - db1 = await session.peers[0].open(store, { - args: { - replicas: { - min: 1, - }, - /* timeUntilRoleMaturity: 0 */ - }, - }); - const entryCount = 200; - - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicas: { - min: 1, + const sampleSize = 200; // must be < 255 + + it("will not have any prunable after balance", async () => { + const store = new EventStore(); + + db1 = await session.peers[0].open(store, { + args: { + replicas: { + min: 1, + }, + setup, }, - /* timeUntilRoleMaturity: 0 */ - }, - }, - ); - - // expect min replicas 2 with 3 peers, this means that 66% of entries (ca) will be at peer 2 and 3, and peer1 will have all of them since 1 is the creator - const promises: Promise[] = []; - for (let i = 0; i < entryCount; i++) { - // db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }); - promises.push( - db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }), - ); - } - - await Promise.all(promises); - - await waitForConverged(() => db1.log.log.length); - await waitForConverged(() => db2.log.log.length); - - await waitForResolved(async () => { - const prunable1 = await db1.log.getPrunable(); - const prunable2 = await db2.log.getPrunable(); - expect(prunable1).length(0); - expect(prunable2).length(0); - }); + }); + const entryCount = 200; + + // expect min replicas 2 with 3 peers, this means that 66% of entries (ca) will be at peer 2 and 3, and peer1 will have all of them since 1 is the creator + const promises: Promise[] = []; + for (let i = 0; i < entryCount; i++) { + // db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }); + promises.push( + db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }), + ); + } + await Promise.all(promises); - expect(db1.log.log.length).to.be.greaterThan(30); - expect(db2.log.log.length).to.be.greaterThan(30); + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicas: { + min: 1, + }, + setup, + }, + }, + ); - expect(db1.log.log.length + db2.log.log.length).to.be.greaterThanOrEqual( - entryCount, - ); - }); + await waitForConverged(() => db1.log.log.length); + await waitForConverged(() => db2.log.log.length); - it("2 peers", async () => { - const store = new EventStore(); + await waitForResolved(async () => { + const prunable1 = await db1.log.getPrunable(); + const prunable2 = await db2.log.getPrunable(); + expect(prunable1).length(0); + expect(prunable2).length(0); + }); - db1 = await session.peers[0].open(store, { - args: { - replicas: { - min: 1, - }, - /* timeUntilRoleMaturity: 0 */ - }, - }); - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicas: { - min: 1, - }, - /* timeUntilRoleMaturity: 0 */ - }, - }, - ); - - const entryCount = 200; - - // expect min replicas 2 with 3 peers, this means that 66% of entries (ca) will be at peer 2 and 3, and peer1 will have all of them since 1 is the creator - const promises: Promise[] = []; - for (let i = 0; i < entryCount; i++) { - // db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }); - promises.push( - db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }), - ); - } - - await Promise.all(promises); - await checkBounded(entryCount, 0.35, 0.65, db1, db2); - }); + expect(db1.log.log.length).to.be.greaterThan(30); + expect(db2.log.log.length).to.be.greaterThan(30); - it("2 peers write while joining", async () => { - const store = new EventStore(); + expect( + db1.log.log.length + db2.log.log.length, + ).to.be.greaterThanOrEqual(entryCount); + }); - db1 = await session.peers[0].open(store, { - args: { - replicas: { - min: 1, - }, - }, - }); - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicas: { - min: 1, + it("2 peers", async () => { + const store = new EventStore(); + + db1 = await session.peers[0].open(store, { + args: { + replicas: { + min: 1, + }, + replicate: { + offset: 0, + }, + setup, }, - }, - }, - ); - - const entryCount = 200; - - // expect min replicas 2 with 3 peers, this means that 66% of entries (ca) will be at peer 2 and 3, and peer1 will have all of them since 1 is the creator - const promises: Promise[] = []; - for (let i = 0; i < entryCount; i++) { - // db1.add(toBase64(toBase64(new Uint8Array([i]))), { meta: { next: [] } }); - promises.push( - db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }), - ); - } - - await waitForResolved(async () => - expect((await db1.log.calculateTotalParticipation()) - 1).lessThan(0.05), - ); - await waitForResolved(async () => - expect((await db2.log.calculateTotalParticipation()) - 1).lessThan(0.05), - ); - await checkBounded(entryCount, 0.3, 0.7, db1, db2); - }); + }); + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicas: { + min: 1, + }, + replicate: { + offset: 0.5, + }, + setup, + }, + }, + ); - it("3 peers", async () => { - const store = new EventStore(); - - db1 = await session.peers[0].open(store); - - const entryCount = sampleSize; - - // expect min replicas 2 with 3 peers, this means that 66% of entries (ca) will be at peer 2 and 3, and peer1 will have all of them since 1 is the creator - const promises: Promise[] = []; - for (let i = 0; i < entryCount; i++) { - promises.push( - db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }), - ); - } - - await Promise.all(promises); - - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - ); - db3 = await EventStore.open>( - db1.address!, - session.peers[2], - ); - - await waitForResolved(async () => - expect((await db1.log.calculateTotalParticipation()) - 1).lessThan(0.05), - ); - await waitForResolved(async () => - expect((await db2.log.calculateTotalParticipation()) - 1).lessThan(0.05), - ); - await waitForResolved(async () => - expect((await db3.log.calculateTotalParticipation()) - 1).lessThan(0.05), - ); - - await checkBounded(entryCount, 0.5, 0.9, db1, db2, db3); - }); + const entryCount = 200; - it("3 peers prune all", async () => { - const store = new EventStore(); + // expect min replicas 2 with 3 peers, this means that 66% of entries (ca) will be at peer 2 and 3, and peer1 will have all of them since 1 is the creator + const promises: Promise[] = []; + for (let i = 0; i < entryCount; i++) { + // db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }); + promises.push( + db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }), + ); + } - db1 = await session.peers[0].open(store, { - args: { - replicate: false, - replicas: { - min: 1, - }, - }, - }); + await Promise.all(promises); + await checkBounded(entryCount, 0.35, 0.65, db1, db2); + }); - const promises: Promise[] = []; - for (let i = 0; i < 500; i++) { - // db1.add(toBase64(toBase64(new Uint8Array([i]))), { meta: { next: [] } }); - promises.push( - db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }), - ); - } - - await Promise.all(promises); - - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicas: { - min: 1, + it("2 peers write while joining", async () => { + const store = new EventStore(); + + db1 = await session.peers[0].open(store, { + args: { + replicas: { + min: 1, + }, + replicate: { + offset: 0, + }, + setup, }, - }, - }, - ); - await delay(3e3); - - db3 = await EventStore.open>( - db1.address!, - session.peers[2], - { - args: { - replicas: { - min: 1, + }); + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicas: { + min: 1, + }, + replicate: { + offset: 0.5, + }, + setup, + }, }, - }, - }, - ); + ); - // expect min replicas 2 with 3 peers, this means that 66% of entries (ca) will be at peer 2 and 3, and peer1 will have all of them since 1 is the creator + const entryCount = 200; - await waitForResolved(() => expect(db1.log.log.length).equal(0)); - }); + // expect min replicas 2 with 3 peers, this means that 66% of entries (ca) will be at peer 2 and 3, and peer1 will have all of them since 1 is the creator + const promises: Promise[] = []; + for (let i = 0; i < entryCount; i++) { + // db1.add(toBase64(toBase64(new Uint8Array([i]))), { meta: { next: [] } }); + promises.push( + db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }), + ); + } - it("write while joining peers", async () => { - const store = new EventStore(); + await waitForResolved(async () => + expect((await db1.log.calculateTotalParticipation()) - 1).lessThan( + 0.05, + ), + ); + await waitForResolved(async () => + expect((await db2.log.calculateTotalParticipation()) - 1).lessThan( + 0.05, + ), + ); + await checkBounded(entryCount, 0.3, 0.7, db1, db2); + }); - db1 = await session.peers[0].open(store); - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - ); + it("3 peers", async () => { + const store = new EventStore(); - const entryCount = 200; + db1 = await session.peers[0].open(store, { + args: { + replicate: { + offset: 0, + }, + setup, + }, + }); - // expect min replicas 2 with 3 peers, this means that 66% of entries (ca) will be at peer 2 and 3, and peer1 will have all of them since 1 is the creator - const promises: Promise[] = []; - for (let i = 0; i < entryCount; i++) { - // db1.add(toBase64(toBase64(new Uint8Array([i]))), { meta: { next: [] } }); - promises.push( - db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }), - ); - } + const entryCount = sampleSize; - db3 = await EventStore.open>( - db1.address!, - session.peers[2], - ); + // expect min replicas 2 with 3 peers, this means that 66% of entries (ca) will be at peer 2 and 3, and peer1 will have all of them since 1 is the creator + const promises: Promise[] = []; + for (let i = 0; i < entryCount; i++) { + promises.push( + db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }), + ); + } - await checkBounded(entryCount, 0.5, 0.9, db1, db2, db3); - }); + await Promise.all(promises); - // TODO add tests for late joining and leaving peers - it("distributes to joining peers", async () => { - db1 = await session.peers[0].open(new EventStore()); - - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - ); - - await waitForResolved(async () => - expect(await db2.log.replicationIndex?.getSize()).equal(2), - ); - - const entryCount = sampleSize; - const promises: Promise[] = []; - for (let i = 0; i < entryCount; i++) { - promises.push( - db1.add(toBase64(new Uint8Array([i])), { - meta: { next: [] }, - }), - ); - } - await waitFor(() => db1.log.log.length === entryCount); - await waitFor(() => db2.log.log.length === entryCount); - - db3 = await EventStore.open>( - db1.address!, - session.peers[2], - ); - - await checkBounded(entryCount, 0.5, 0.9, db1, db2, db3); - }); + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + offset: 0.3333, + }, + setup, + }, + }, + ); + db3 = await EventStore.open>( + db1.address!, + session.peers[2], + { + args: { + replicate: { + offset: 0.6666, + }, + setup, + }, + }, + ); - it("distributes to leaving peers", async () => { - db1 = await session.peers[0].open(new EventStore()); - - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - ); - db3 = await EventStore.open>( - db1.address!, - session.peers[2], - ); - - const entryCount = sampleSize * 6; - - await waitForResolved(async () => - expect(await db1.log.replicationIndex?.getSize()).equal(3), - ); - await waitForResolved(async () => - expect(await db2.log.replicationIndex?.getSize()).equal(3), - ); - await waitForResolved(async () => - expect(await db3.log.replicationIndex?.getSize()).equal(3), - ); - - const promises: Promise[] = []; - for (let i = 0; i < entryCount; i++) { - promises.push( - db1.add(toBase64(new Uint8Array([i])), { - meta: { next: [] }, - }), - ); - } - - await Promise.all(promises); - - await checkBounded(entryCount, 0.5, 0.9, db1, db2, db3); - - const distribute = sinon.spy(db1.log.onReplicationChange); - db1.log.onReplicationChange = distribute; - - await db3.close(); - await checkBounded(entryCount, 1, 1, db1, db2); - }); + await waitForResolved(async () => + expect((await db1.log.calculateTotalParticipation()) - 1).lessThan( + 0.05, + ), + ); + await waitForResolved(async () => + expect((await db2.log.calculateTotalParticipation()) - 1).lessThan( + 0.05, + ), + ); + await waitForResolved(async () => + expect((await db3.log.calculateTotalParticipation()) - 1).lessThan( + 0.05, + ), + ); - it("handles peer joining and leaving multiple times", async () => { - db1 = await session.peers[0].open(new EventStore()); - - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - ); - db3 = await EventStore.open>( - db1.address!, - session.peers[2], - ); - - const entryCount = sampleSize * 5; - - const promises: Promise[] = []; - for (let i = 0; i < entryCount; i++) { - promises.push( - db1.add(toBase64(new Uint8Array(i)), { - meta: { next: [] }, - }), - ); - } - - await Promise.all(promises); - await checkBounded(entryCount, 0.5, 0.9, db1, db2, db3); - - await db3.close(); - await session.peers[2].open(db3); - await db3.close(); - // adding some delay seems to make CI tests also fail here - // Specifically is .pendingDeletes is used to resuse safelyDelete requests, - // which would make this test break since reopen, would/should invalidate pending deletes - // TODO make this more well defined - - await delay(100); - - await session.peers[2].open(db3); - await db3.close(); - await session.peers[2].open(db3); - - await checkBounded(entryCount, 0.5, 0.9, db1, db2, db3); - - await waitForResolved(async () => - expect((await db1.log.calculateTotalParticipation()) - 1).lessThan(0.1), - ); - await waitForResolved(async () => - expect((await db2.log.calculateTotalParticipation()) - 1).lessThan(0.1), - ); - await waitForResolved(async () => - expect((await db3.log.calculateTotalParticipation()) - 1).lessThan(0.1), - ); - - await db3.close(); - - await checkBounded(entryCount, 1, 1, db1, db2); - - await waitForResolved(async () => - expect((await db1.log.calculateTotalParticipation()) - 1).lessThan(0.1), - ); - await waitForResolved(async () => - expect((await db2.log.calculateTotalParticipation()) - 1).lessThan(0.1), - ); - }); + await checkBounded(entryCount, 0.5, 0.9, db1, db2, db3); + }); - it("drops when no longer replicating as observer", async () => { - let COUNT = 10; - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - factor: 1, - }, - }, - }); + it("3 peers prune all", async () => { + const store = new EventStore(); - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - factor: 1, + db1 = await session.peers[0].open(store, { + args: { + replicate: false, + replicas: { + min: 1, + }, + setup, }, - }, - }, - ); - - for (let i = 0; i < COUNT; i++) { - await db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }); - } - - await waitForResolved(() => expect(db2.log.log.length).equal(COUNT)); - - db3 = await EventStore.open>( - db1.address!, - session.peers[2], - { - args: { - replicate: { - factor: 1, + }); + + const promises: Promise[] = []; + for (let i = 0; i < 500; i++) { + // db1.add(toBase64(toBase64(new Uint8Array([i]))), { meta: { next: [] } }); + promises.push( + db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }), + ); + } + + await Promise.all(promises); + + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicas: { + min: 1, + }, + replicate: { + offset: 0, + }, + setup, + }, + }, + ); + + db3 = await EventStore.open>( + db1.address!, + session.peers[2], + { + args: { + replicas: { + min: 1, + }, + replicate: { + offset: 0.5, + }, + setup, + }, }, - }, - }, - ); + ); - await db2.log.replicate(false); + // expect min replicas 2 with 3 peers, this means that 66% of entries (ca) will be at peer 2 and 3, and peer1 will have all of them since 1 is the creator - await waitForResolved(() => expect(db3.log.log.length).equal(COUNT)); - await waitForResolved(() => expect(db2.log.log.length).equal(0)); - }); + try { + await waitForResolved(() => expect(db1.log.log.length).equal(0)); + } catch (error) { + await dbgLogs([db1.log, db2.log, db3.log]); + throw error; + } + }); - it("drops when no longer replicating with factor 0", async () => { - let COUNT = 100; + it("write while joining peers", async () => { + const store = new EventStore(); - const evtStore = new EventStore(); - const db1p = await session.peers[0].open(evtStore, { - args: { - replicate: { - factor: 1, - }, - }, - }); + db1 = await session.peers[0].open(store, { + args: { + replicate: { + offset: 0, + }, + setup, + }, + }); + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + offset: 0.3333, + }, + setup, + }, + }, + ); - const db2p = session.peers[1].open(evtStore.clone(), { - args: { - replicate: { - factor: 1, - }, - }, - }); + const entryCount = 200; - db1 = await db1p; - db2 = await db2p; + // expect min replicas 2 with 3 peers, this means that 66% of entries (ca) will be at peer 2 and 3, and peer1 will have all of them since 1 is the creator + const promises: Promise[] = []; + for (let i = 0; i < entryCount; i++) { + // db1.add(toBase64(toBase64(new Uint8Array([i]))), { meta: { next: [] } }); + promises.push( + db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }), + ); + } - for (let i = 0; i < COUNT; i++) { - await db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }); - } + db3 = await EventStore.open>( + db1.address!, + session.peers[2], + { + args: { + replicate: { + offset: 0.6666, + }, + setup, + }, + }, + ); - await waitForResolved(() => expect(db2.log.log.length).equal(COUNT)); + await checkBounded(entryCount, 0.5, 0.9, db1, db2, db3); + }); - db3 = await EventStore.open>( - db1.address!, - session.peers[2], - { - args: { - replicate: { - factor: 1, + // TODO add tests for late joining and leaving peers + it("distributes to joining peers", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + offset: 0, + }, + setup, }, - }, - }, - ); - await db2.log.replicate({ factor: 0 }); - await waitForResolved(() => expect(db3.log.log.length).equal(COUNT)); - await waitForResolved(() => expect(db2.log.log.length).equal(0)); // min replicas is set to 2 so, if there are 2 dbs still replicating, this nod should not store any data - }); + }); - describe("distribution", () => { - describe("objectives", () => { - describe("cpu", () => { - it("no cpu usage allowed", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { args: { - replicate: true, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), + replicate: { + offset: 0.3333, }, + setup, }, - }); + }, + ); - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - limits: { - cpu: { - max: 0, - monitor: { - value: () => 0.5, // fixed 50% usage - }, - }, // 100kb - }, - }, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), - }, + await waitForResolved(async () => + expect(await db2.log.replicationIndex?.getSize()).equal(2), + ); + + const entryCount = sampleSize; + const promises: Promise[] = []; + for (let i = 0; i < entryCount; i++) { + promises.push( + db1.add(toBase64(new Uint8Array([i])), { + meta: { next: [] }, + }), + ); + } + await waitFor(() => db1.log.log.length === entryCount); + await waitFor(() => db2.log.log.length === entryCount); + + db3 = await EventStore.open>( + db1.address!, + session.peers[2], + { + args: { + replicate: { + offset: 0.6666, }, + setup, }, - ); + }, + ); - await delay(3e3); + await checkBounded(entryCount, 0.5, 0.9, db1, db2, db3); + }); - await waitForResolved(async () => - expect(await db2.log.getMyTotalParticipation()).equal(0), - ); // because the CPU error from fixed usage (0.5) is always greater than max (0) + it("distributes to leaving peers", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + offset: 0, + }, + setup, + }, }); - it("below limit", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { args: { - replicate: true, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), + replicate: { + offset: 0.3333, }, + setup, }, - }); + }, + ); + db3 = await EventStore.open>( + db1.address!, + session.peers[2], + { + args: { + replicate: { + offset: 0.6666, + }, + setup, + }, + }, + ); - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - limits: { - cpu: { - max: 0.4, - monitor: { - value: () => 0.3, // fixed 50% usage - }, - }, // 100kb - }, - }, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), - }, + const entryCount = sampleSize * 6; + + await waitForResolved(async () => + expect(await db1.log.replicationIndex?.getSize()).equal(3), + ); + await waitForResolved(async () => + expect(await db2.log.replicationIndex?.getSize()).equal(3), + ); + await waitForResolved(async () => + expect(await db3.log.replicationIndex?.getSize()).equal(3), + ); + + const promises: Promise[] = []; + for (let i = 0; i < entryCount; i++) { + promises.push( + db1.add(toBase64(new Uint8Array([i])), { + meta: { next: [] }, + }), + ); + } + + await Promise.all(promises); + + try { + await checkBounded(entryCount, 0.5, 0.9, db1, db2, db3); + } catch (error) { + console.log( + ( + await Promise.all( + [db1, db2, db3].map((db) => db.log.getMyReplicationSegments()), + ) + ).map((x) => x[0].widthNormalized), + ); + console.log( + ( + await Promise.all( + [db1, db2, db3].map((db) => db.log.getPrunable()), + ) + ).map((x) => x.length), + ); + console.log([db1, db2, db3].map((x) => x.log.log.length)); + console.log( + await Promise.all( + [db1, db2, db3].map( + async (x) => + (await x.log.log.entryIndex.iterate([]).all()).length, + ), + ), + ); + + throw error; + } + + const distribute = sinon.spy(db1.log.onReplicationChange); + db1.log.onReplicationChange = distribute; + + await db3.close(); + try { + await checkBounded(entryCount, 1, 1, db1, db2); + } catch (error) { + console.log( + ( + await Promise.all([db1, db2].map((db) => db.log.getPrunable())) + ).map((x) => x.length), + ); + console.log( + ( + await Promise.all( + [db1, db2].map((db) => db.log.getMyReplicationSegments()), + ) + ).map((x) => x[0].widthNormalized), + ); + throw error; + } + }); + + it("handles peer joining and leaving multiple times", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + offset: 0, + factor: 0.33333, + }, + setup, + }, + }); + + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + offset: 0.3333, + }, + setup, + }, + }, + ); + db3 = await EventStore.open>( + db1.address!, + session.peers[2], + { + args: { + replicate: { + offset: 0.6666, }, + setup, }, + }, + ); + + const entryCount = sampleSize * 5; + + const promises: Promise[] = []; + for (let i = 0; i < entryCount; i++) { + promises.push( + db1.add(toBase64(new Uint8Array(i)), { + meta: { next: [] }, + }), ); + } - await waitForConverged(async () => { - const diff = await db1.log.getMyTotalParticipation(); - return Math.round(diff * 100); - }); - await waitForConverged(async () => { - const diff = await db2.log.getMyTotalParticipation(); - return Math.round(diff * 100); - }); + await Promise.all(promises); + + await checkBounded(entryCount, 0.5, 0.9, db1, db2, db3); - expect(await db1.log.getMyTotalParticipation()).to.be.within( - 0.45, - 0.55, - ); // because the CPU error from fixed usage (0.5) is always greater than max (0) - expect(await db2.log.getMyTotalParticipation()).to.be.within( - 0.45, - 0.55, - ); // because the CPU error from fixed usage (0.5) is always greater than max (0) + await db3.close(); + await session.peers[2].open(db3, { + args: { + replicate: { + offset: 0.66666, + }, + }, }); - }); - describe("memory", () => { - it("inserting half limited", async () => { - db1 = await session.peers[0].open(new EventStore(), { + await db3.close(); + // adding some delay seems to make CI tests also fail here + // Specifically is .pendingDeletes is used to resuse safelyDelete requests, + // which would make this test break since reopen, would/should invalidate pending deletes + // TODO make this more well defined + + await delay(300); + + await session.peers[2].open(db3, { + args: { + replicate: { + offset: 0.66666, + }, + setup, + }, + }); + db3.close(); + /* await session.peers[2].open(db3, { args: { - replicate: true, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), + replicate: { + offset: 0.66666, }, + setup, }, }); + + await checkBounded(entryCount, 0.5, 0.9, db1, db2, db3); + + await waitForResolved(async () => + expect((await db1.log.calculateTotalParticipation()) - 1).lessThan( + 0.1, + ), + ); + await waitForResolved(async () => + expect((await db2.log.calculateTotalParticipation()) - 1).lessThan( + 0.1, + ), + ); + await waitForResolved(async () => + expect((await db3.log.calculateTotalParticipation()) - 1).lessThan( + 0.1, + ), + ); + + await db3.close(); */ + /* db1.log.xreset(); + db2.log.xreset(); */ - const memoryLimit = 100 * 1e3; - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - limits: { - storage: memoryLimit, // 100kb - }, - }, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), - }, + await checkBounded(entryCount, 1, 1, db1, db2); + + await waitForResolved(async () => + expect((await db1.log.calculateTotalParticipation()) - 1).lessThan( + 0.1, + ), + ); + await waitForResolved(async () => + expect((await db2.log.calculateTotalParticipation()) - 1).lessThan( + 0.1, + ), + ); + }); + + it("drops when no longer replicating as observer", async () => { + let COUNT = 10; + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }); + + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 1, }, + setup, }, - ); + }, + ); - const data = toBase64(randomBytes(5.5e2)); // about 1kb + for (let i = 0; i < COUNT; i++) { + await db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }); + } - for (let i = 0; i < 1000; i++) { - // insert 1mb - await db1.add(data, { meta: { next: [] } }); - } + await waitForResolved(() => expect(db2.log.log.length).equal(COUNT)); - await delay(db1.log.timeUntilRoleMaturity + 1000); + db3 = await EventStore.open>( + db1.address!, + session.peers[2], + { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }, + ); - await waitForConverged(async () => { - const diff = Math.abs( - (await db2.log.getMyTotalParticipation()) - - (await db1.log.getMyTotalParticipation()), - ); - return Math.round(diff * 50); - }); + await db2.log.replicate(false); - await waitForResolved( - async () => { - const memoryUsage = await db2.log.getMemoryUsage(); - expect(Math.abs(memoryLimit - memoryUsage)).lessThan( - (memoryLimit / 100) * 5, - ); + await waitForResolved(() => expect(db3.log.log.length).equal(COUNT)); + await waitForResolved(() => expect(db2.log.log.length).equal(0)); + }); + + it("drops when no longer replicating with factor 0", async () => { + let COUNT = 100; + + const evtStore = new EventStore(); + const db1p = await session.peers[0].open(evtStore, { + args: { + replicate: { + factor: 1, }, - { timeout: 30 * 1000 }, - ); + setup, + }, + }); + + const db2p = session.peers[1].open(evtStore.clone(), { + args: { + replicate: { + factor: 1, + }, + setup, + }, }); - it("joining half limited", async () => { - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), - }, - }, - }); + db1 = await db1p; + db2 = await db2p; + + for (let i = 0; i < COUNT; i++) { + await db1.add(toBase64(new Uint8Array([i])), { meta: { next: [] } }); + } + + await waitForResolved(() => expect(db2.log.log.length).equal(COUNT)); + + db3 = await EventStore.open>( + db1.address!, + session.peers[2], + { + args: { + replicate: { + factor: 1, + }, + setup, + }, + }, + ); + await db2.log.replicate({ factor: 0 }); + await waitForResolved(() => expect(db3.log.log.length).equal(COUNT)); + await waitForResolved(() => expect(db2.log.log.length).equal(0)); // min replicas is set to 2 so, if there are 2 dbs still replicating, this nod should not store any data + }); + + describe("distribution", () => { + describe("objectives", () => { + describe("cpu", () => { + it("no cpu usage allowed", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: true, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), + }, + setup, + }, + }); + + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + limits: { + cpu: { + max: 0, + monitor: { + value: () => 0.5, // fixed 50% usage + }, + }, // 100kb + }, + }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), + }, + setup, + }, + }, + ); + + await delay(3e3); + + await waitForResolved(async () => + expect(await db2.log.calculateMyTotalParticipation()).equal(0), + ); // because the CPU error from fixed usage (0.5) is always greater than max (0) + }); + + it("below limit", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + offset: 0, + }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), + }, + setup, + }, + }); + + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + limits: { + cpu: { + max: 0.4, + monitor: { + value: () => 0.3, // fixed 50% usage + }, + }, // 100kb + }, + offset: 0.5, + }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), + }, + setup, + }, + }, + ); + + await waitForConverged(async () => { + const diff = await db1.log.calculateMyTotalParticipation(); + return Math.round(diff * 100); + }); + await waitForConverged(async () => { + const diff = await db2.log.calculateMyTotalParticipation(); + return Math.round(diff * 100); + }); + + expect( + await db1.log.calculateMyTotalParticipation(), + ).to.be.within(0.45, 0.55); // because the CPU error from fixed usage (0.5) is always greater than max (0) + expect( + await db2.log.calculateMyTotalParticipation(), + ).to.be.within(0.45, 0.55); // because the CPU error from fixed usage (0.5) is always greater than max (0) + }); + }); + describe("memory", () => { + it("inserting half limited", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + offset: 0, + }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), + }, + setup, + }, + }); + + const memoryLimit = 100 * 1e3; + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + limits: { + storage: memoryLimit, // 100kb + }, + offset: 0.5, + }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), + }, + setup, + }, + }, + ); + + const data = toBase64(randomBytes(5.5e2)); // about 1kb + + for (let i = 0; i < 1000; i++) { + // insert 1mb + await db1.add(data, { meta: { next: [] } }); + } + + await delay(db1.log.timeUntilRoleMaturity + 1000); + + await waitForConverged(async () => { + const diff = Math.abs( + (await db2.log.calculateMyTotalParticipation()) - + (await db1.log.calculateMyTotalParticipation()), + ); + return Math.round(diff * 50); + }); + + await waitForResolved( + async () => { + const memoryUsage = await db2.log.getMemoryUsage(); + expect(Math.abs(memoryLimit - memoryUsage)).lessThan( + (memoryLimit / 100) * 5, + ); + }, + { timeout: 30 * 1000 }, + ); + }); - const memoryLimit = 100 * 1e3; - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - limits: { - storage: memoryLimit, // 100kb + it("joining half limited", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + offset: 0, }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), + }, + setup, }, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), + }); + + const memoryLimit = 100 * 1e3; + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + limits: { + storage: memoryLimit, // 100kb + }, + offset: 0.5, + }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), + }, + setup, + }, }, - }, - }, - ); - - const data = toBase64(randomBytes(5.5e2)); // about 1kb - - for (let i = 0; i < 1000; i++) { - // insert 1mb - await db2.add(data, { meta: { next: [] } }); - } - try { - await waitForConverged(async () => { - const diff = Math.abs( - (await db2.log.getMyTotalParticipation()) - - (await db1.log.getMyTotalParticipation()), ); - return Math.round(diff * 100); + const data = toBase64(randomBytes(5.5e2)); // about 1kb + + for (let i = 0; i < 1000; i++) { + // insert 1mb + await db2.add(data, { meta: { next: [] } }); + } + try { + await waitForConverged(async () => { + const diff = Math.abs( + (await db2.log.calculateMyTotalParticipation()) - + (await db1.log.calculateMyTotalParticipation()), + ); + + return Math.round(diff * 100); + }); + + await waitForResolved( + async () => + expect( + Math.abs(memoryLimit - (await db2.log.getMemoryUsage())), + ).lessThan((memoryLimit / 100) * 10), // 10% error at most + { timeout: 20 * 1000, delayInterval: 1000 }, + ); // 10% error at most + } catch (error) { + const weight1 = await db2.log.getMemoryUsage(); + + const weight2 = await db2.log.getMemoryUsage(); + console.log("weight", weight1, weight2); + throw error; + } }); - await waitForResolved( - async () => - expect( - Math.abs(memoryLimit - (await db2.log.getMemoryUsage())), - ).lessThan((memoryLimit / 100) * 10), // 10% error at most - { timeout: 20 * 1000, delayInterval: 1000 }, - ); // 10% error at most - } catch (error) { - const weight1 = await db2.log.getMemoryUsage(); - - const weight2 = await db2.log.getMemoryUsage(); - console.log("weight", weight1, weight2); - throw error; - } - }); + it("underflow limited", async () => { + const memoryLimit = 100 * 1e3; - it("underflow limited", async () => { - const memoryLimit = 100 * 1e3; + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + limits: { + storage: memoryLimit, // 100kb + }, + offset: 0, + }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), + }, + setup, + }, + }); + + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + limits: { + storage: memoryLimit, // 100kb + }, + offset: 0.5, + }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), + }, + setup, + }, + }, + ); - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - limits: { - storage: memoryLimit, // 100kb + const data = toBase64(randomBytes(5.5e2)); // about 1kb + let entryCount = 150; + for (let i = 0; i < entryCount; i++) { + await db2.add(data, { meta: { next: [] } }); + } + + await waitForResolved( + async () => { + expect( + await db1.log.calculateMyTotalParticipation(), + ).to.be.within(0.43, 0.57); + expect( + await db2.log.calculateMyTotalParticipation(), + ).to.be.within(0.43, 0.57); }, - }, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), - }, - }, - }); + { timeout: 20 * 1000 }, + ); - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - limits: { - storage: memoryLimit, // 100kb + // allow 10% error + await waitForResolved(async () => { + expect(await db1.log.getMemoryUsage()).lessThan( + memoryLimit * 1.1, + ); + expect(await db2.log.getMemoryUsage()).lessThan( + memoryLimit * 1.1, + ); + }); + }); + + it("overflow limited", async () => { + const memoryLimit = 100 * 1e3; + + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + limits: { + storage: memoryLimit, // 100kb + }, + offset: 0, }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), + }, + setup, }, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), + }); + + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + limits: { + storage: memoryLimit, // 100kb + }, + offset: 0.5, + }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), + }, + setup, + }, }, - }, - }, - ); - - const data = toBase64(randomBytes(5.5e2)); // about 1kb - let entryCount = 150; - for (let i = 0; i < entryCount; i++) { - await db2.add(data, { meta: { next: [] } }); - } - - await waitForResolved( - async () => { - expect(await db1.log.getMyTotalParticipation()).to.be.within( - 0.43, - 0.57, ); - expect(await db2.log.getMyTotalParticipation()).to.be.within( - 0.43, - 0.57, - ); - }, - { timeout: 20 * 1000 }, - ); - // allow 10% error - await waitForResolved(async () => { - expect(await db1.log.getMemoryUsage()).lessThan(memoryLimit * 1.1); - expect(await db2.log.getMemoryUsage()).lessThan(memoryLimit * 1.1); - }); - }); + const data = toBase64(randomBytes(5.5e2)); // about 1kb + + for (let i = 0; i < 1000; i++) { + // insert 1mb + await db2.add(data, { meta: { next: [] } }); + } + + try { + await waitForConverged( + async () => + Math.round( + (await db1.log.calculateMyTotalParticipation()) * 500, + ), + { + tests: 3, + delta: 1, + timeout: 30 * 1000, + interval: 1000, + }, + ); + await waitForConverged( + async () => + Math.round( + (await db2.log.calculateMyTotalParticipation()) * 500, + ), + { + tests: 3, + delta: 1, + timeout: 30 * 1000, + interval: 1000, + }, + ); + } catch (error) { + throw new Error("Total participation failed to converge"); + } - it("overflow limited", async () => { - const memoryLimit = 100 * 1e3; + expect( + await db1.log.calculateMyTotalParticipation(), + ).to.be.within(0.03, 0.1); + expect( + await db1.log.calculateMyTotalParticipation(), + ).to.be.within(0.03, 0.1); + }); - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - limits: { - storage: memoryLimit, // 100kb - }, - }, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), - }, - }, - }); + it("evenly if limited when not constrained", async () => { + const memoryLimit = 100 * 1e3; - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - limits: { - storage: memoryLimit, // 100kb + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + limits: { + storage: memoryLimit, // 100kb + }, + offset: 0, + }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), }, + setup, }, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), + }); + + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + limits: { + storage: memoryLimit * 3, // 300kb + }, + offset: 0.5, + }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), + }, + setup, + }, }, - }, - }, - ); - - const data = toBase64(randomBytes(5.5e2)); // about 1kb + ); - for (let i = 0; i < 1000; i++) { - // insert 1mb - await db2.add(data, { meta: { next: [] } }); - } + const data = toBase64(randomBytes(5.5e2)); // about 1kb - await waitForConverged(async () => - Math.round((await db1.log.getMyTotalParticipation()) * 500), - ); - await waitForConverged(async () => - Math.round((await db2.log.getMyTotalParticipation()) * 500), - ); - expect(await db1.log.getMyTotalParticipation()).to.be.within( - 0.03, - 0.1, - ); - expect(await db1.log.getMyTotalParticipation()).to.be.within( - 0.03, - 0.1, - ); - }); + for (let i = 0; i < 100; i++) { + // insert 1mb + await db2.add(data, { meta: { next: [] } }); + } - it("evenly if limited when not constrained", async () => { - const memoryLimit = 100 * 1e3; + await waitForResolved(async () => { + expect( + await db1.log.calculateMyTotalParticipation(), + ).to.be.within(0.45, 0.55); + expect( + await db2.log.calculateMyTotalParticipation(), + ).to.be.within(0.45, 0.55); + }); + }); - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - limits: { - storage: memoryLimit, // 100kb - }, - }, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), - }, - }, - }); + it("unequally limited", async () => { + const memoryLimit = 100 * 1e3; - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - limits: { - storage: memoryLimit * 3, // 300kb + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + limits: { + storage: memoryLimit, // 100kb + }, + offset: 0, + }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), }, + setup, }, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), + }); + + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + limits: { + storage: memoryLimit * 2, // 200kb + }, + offset: 0.3, // we choose 0.3 so this node can cover 0.333 - 1 (66.666%) + }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), + }, + setup, + }, }, - }, - }, - ); + ); - const data = toBase64(randomBytes(5.5e2)); // about 1kb + const data = toBase64(randomBytes(5.5e2)); // about 1kb - for (let i = 0; i < 100; i++) { - // insert 1mb - await db2.add(data, { meta: { next: [] } }); - } + for (let i = 0; i < 300; i++) { + // insert 1mb + await db2.add(data, { meta: { next: [] } }); + } - await waitForResolved(async () => { - expect(await db1.log.getMyTotalParticipation()).to.be.within( - 0.45, - 0.55, - ); - expect(await db2.log.getMyTotalParticipation()).to.be.within( - 0.45, - 0.55, - ); - }); - }); + await waitForResolved( + async () => + expect( + Math.abs(memoryLimit - (await db1.log.getMemoryUsage())), + ).lessThan((memoryLimit / 100) * 10), + { + timeout: 20 * 1000, + }, + ); // 10% error at most - it("unequally limited", async () => { - const memoryLimit = 100 * 1e3; + await waitForResolved(async () => + expect( + Math.abs(memoryLimit * 2 - (await db2.log.getMemoryUsage())), + ).lessThan(((memoryLimit * 2) / 100) * 10), + ); // 10% error at most - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - limits: { - storage: memoryLimit, // 100kb - }, - }, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), - }, - }, - }); + await waitForResolved(async () => + expect( + Math.abs(memoryLimit - (await db1.log.getMemoryUsage())), + ).lessThan((memoryLimit / 100) * 10), + ); // 10% error at most - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - limits: { - storage: memoryLimit * 2, // 200kb + await waitForResolved(async () => + expect( + Math.abs(memoryLimit * 2 - (await db2.log.getMemoryUsage())), + ).lessThan(((memoryLimit * 2) / 100) * 10), + ); // 10% error at most + }); + + it("greatly limited", async () => { + const memoryLimit = 100 * 1e3; + + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + limits: { + storage: 0, // 0kb + }, + }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), }, + setup, }, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), + }); + + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + limits: { + storage: memoryLimit, // 100kb + }, + }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), + }, + setup, + }, }, - }, - }, - ); - - const data = toBase64(randomBytes(5.5e2)); // about 1kb + ); - for (let i = 0; i < 300; i++) { - // insert 1mb - await db2.add(data, { meta: { next: [] } }); - } + const data = toBase64(randomBytes(5.5e2)); // about 1kb + + for (let i = 0; i < 100; i++) { + // insert 1mb + await db2.add(data, { meta: { next: [] } }); + } + await delay(db1.log.timeUntilRoleMaturity); + try { + await waitForResolved( + async () => + expect(await db1.log.getMemoryUsage()).lessThan(10 * 1e3), + { + timeout: 2e4, + }, + ); // 10% error at most + + await waitForResolved(async () => + expect( + Math.abs(memoryLimit - (await db2.log.getMemoryUsage())), + ).lessThan((memoryLimit / 100) * 10), + ); // 10% error at most + } catch (error) { + await dbgLogs([db1.log, db2.log]); + throw error; + } + }); - await waitForResolved( - async () => - expect( - Math.abs(memoryLimit - (await db1.log.getMemoryUsage())), - ).lessThan((memoryLimit / 100) * 10), - { - timeout: 20 * 1000, - }, - ); // 10% error at most + it("even if unlimited", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + offset: 0, + }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), + }, + setup, + }, + }); + + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + offset: 0.5, + }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), + }, + setup, + }, + }, + ); - await waitForResolved(async () => - expect( - Math.abs(memoryLimit * 2 - (await db2.log.getMemoryUsage())), - ).lessThan(((memoryLimit * 2) / 100) * 10), - ); // 10% error at most + const data = toBase64(randomBytes(5.5e2)); // about 1kb - await waitForResolved(async () => - expect( - Math.abs(memoryLimit - (await db1.log.getMemoryUsage())), - ).lessThan((memoryLimit / 100) * 10), - ); // 10% error at most + for (let i = 0; i < 1000; i++) { + // insert 1mb + await db2.add(data, { meta: { next: [] } }); + } - await waitForResolved(async () => - expect( - Math.abs(memoryLimit * 2 - (await db2.log.getMemoryUsage())), - ).lessThan(((memoryLimit * 2) / 100) * 10), - ); // 10% error at most + await waitForResolved(async () => { + expect( + await db1.log.calculateMyTotalParticipation(), + ).to.be.within(0.45, 0.55); + expect( + await db2.log.calculateMyTotalParticipation(), + ).to.be.within(0.45, 0.55); + }); + }); + }); }); - it("greatly limited", async () => { - const memoryLimit = 100 * 1e3; + describe("mixed", () => { + it("1 limited, 2 factor", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: true, + setup, + }, + }); - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { - limits: { - storage: 0, // 0kb + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 1, + }, + setup, }, }, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), - }, - }, - }); + ); - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - limits: { - storage: memoryLimit, // 100kb + db3 = await EventStore.open>( + db1.address!, + session.peers[2], + { + args: { + replicate: { + factor: 1, }, - }, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), + setup, }, }, - }, - ); - - const data = toBase64(randomBytes(5.5e2)); // about 1kb - - for (let i = 0; i < 100; i++) { - // insert 1mb - await db2.add(data, { meta: { next: [] } }); - } - await delay(db1.log.timeUntilRoleMaturity); - await waitForResolved( - async () => - expect(await db1.log.getMemoryUsage()).lessThan(10 * 1e3), - { - timeout: 2e4, - }, - ); // 10% error at most - - await waitForResolved(async () => - expect( - Math.abs(memoryLimit - (await db2.log.getMemoryUsage())), - ).lessThan((memoryLimit / 100) * 10), - ); // 10% error at most - }); + ); - it("even if unlimited", async () => { - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: true, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), - }, - }, + await waitForResolved(async () => + expect(await db1.log.calculateMyTotalParticipation()).equal(0), + ); }); + }); - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { + describe("fixed", () => { + it("can weight by factor", async () => { + db1 = await session.peers[0].open(new EventStore(), { args: { - replicate: true, + replicate: { offset: 0, factor: 0.05 }, replicas: { min: new AbsoluteReplicas(1), max: new AbsoluteReplicas(1), }, + setup, }, - }, - ); - - const data = toBase64(randomBytes(5.5e2)); // about 1kb - - for (let i = 0; i < 1000; i++) { - // insert 1mb - await db2.add(data, { meta: { next: [] } }); - } + }); - await waitForResolved(async () => { - expect(await db1.log.getMyTotalParticipation()).to.be.within( - 0.45, - 0.55, + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { offset: 0.5, factor: 0.5 }, + replicas: { + min: new AbsoluteReplicas(1), + max: new AbsoluteReplicas(1), + }, + setup, + }, + }, ); - expect(await db2.log.getMyTotalParticipation()).to.be.within( - 0.45, - 0.55, + const data = toBase64(randomBytes(5.5e2)); // about 1kb + + for (let i = 0; i < 100; i++) { + // insert 100kb + await db1.add(data, { meta: { next: [] } }); + } + await waitForResolved( + () => + expect(db2.log.log.length).greaterThan(db1.log.log.length + 20), + { + timeout: 3e4, + }, ); }); }); }); - }); - - describe("mixed", () => { - it("1 limited, 2 factor", async () => { - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: true, - }, - }); - - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - factor: 1, - }, - }, - }, - ); - - db3 = await EventStore.open>( - db1.address!, - session.peers[2], - { - args: { - replicate: { - factor: 1, - }, - }, - }, - ); - - await waitForResolved(async () => - expect(await db1.log.getMyTotalParticipation()).equal(0), - ); - }); - }); - - describe("fixed", () => { - it("can weight by factor", async () => { - db1 = await session.peers[0].open(new EventStore(), { - args: { - replicate: { offset: 0, factor: 0.05 }, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), - }, - }, - }); - db2 = await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { offset: 0.5, factor: 0.5 }, - replicas: { - min: new AbsoluteReplicas(1), - max: new AbsoluteReplicas(1), - }, - }, - }, - ); - const data = toBase64(randomBytes(5.5e2)); // about 1kb - - for (let i = 0; i < 100; i++) { - // insert 100kb - await db1.add(data, { meta: { next: [] } }); - } - await waitForResolved( - () => expect(db2.log.log.length).greaterThan(db1.log.log.length + 30), - { - timeout: 3e4, - }, - ); - }); + // TODO test untrusted filtering }); }); - - // TODO test untrusted filtering }); diff --git a/packages/programs/data/shared-log/test/utils.ts b/packages/programs/data/shared-log/test/utils.ts index 87bb1f042..871b9be28 100644 --- a/packages/programs/data/shared-log/test/utils.ts +++ b/packages/programs/data/shared-log/test/utils.ts @@ -1,15 +1,26 @@ import { type Constructor } from "@dao-xyz/borsh"; import type { PublicSignKey } from "@peerbit/crypto"; -import { delay, waitFor, waitForResolved } from "@peerbit/time"; +import type { Entry } from "@peerbit/log"; +import type { ProgramClient } from "@peerbit/program"; +import type { DirectSub } from "@peerbit/pubsub"; +import { delay, waitForResolved } from "@peerbit/time"; import { expect } from "chai"; import { type EntryWithRefs, ExchangeHeadsMessage, } from "../src/exchange-heads.js"; -import { type SharedLog, maxReplicas } from "../src/index.js"; +import { + type ReplicationDomainHash, + type SharedLog, + createReplicationDomainHash, + maxReplicas, +} from "../src/index.js"; import type { TransportMessage } from "../src/message"; +import type { SynchronizerConstructor } from "../src/sync/index.js"; +import { RatelessIBLTSynchronizer } from "../src/sync/rateless-iblt.js"; +import { SimpleSyncronizer } from "../src/sync/simple.js"; -export const collectMessages = (log: SharedLog) => { +export const collectMessages = (log: SharedLog) => { const messages: [TransportMessage, PublicSignKey][] = []; // TODO types @@ -21,7 +32,7 @@ export const collectMessages = (log: SharedLog) => { return messages; }; -export const collectMessagesFn = (log: SharedLog) => { +export const collectMessagesFn = (log: SharedLog) => { const messages: [TransportMessage, PublicSignKey][] = []; const onMessageOrg = log._onMessage.bind(log); const fn = async (msg: any, ctx: any) => { @@ -32,7 +43,28 @@ export const collectMessagesFn = (log: SharedLog) => { }; export const slowDownSend = ( - log: SharedLog, + from: ProgramClient, + to: ProgramClient, + ms = 3000, +) => { + const directsub = from.services.pubsub as DirectSub; + for (const [_key, peer] of directsub.peers) { + if (peer.publicKey.equals(to.identity.publicKey)) { + const writeFn = peer.write.bind(peer); + peer.write = async (msg, priority) => { + await delay(ms); + if (peer.outboundStream) { + return writeFn(msg, priority); + } + }; + return; + } + } + throw new Error("Could not find peer"); +}; + +export const slowDownMessage = ( + log: SharedLog, type: Constructor, tms: number, abortSignal?: AbortSignal, @@ -69,9 +101,18 @@ export const getReceivedHeads = ( export const waitForConverged = async ( fn: () => any, - options: { timeout: number; tests: number } = { + options: { + timeout: number; + tests: number; + interval: number; + delta: number; + debug?: boolean; + } = { tests: 3, + delta: 1, timeout: 30 * 1000, + interval: 1000, + debug: false, }, ) => { let lastResult = undefined; @@ -79,7 +120,11 @@ export const waitForConverged = async ( let ok = 0; for (;;) { const current = await fn(); - if (lastResult === current) { + if (options.debug) { + console.log("Waiting for convergence: " + current); + } + + if (lastResult != null && Math.abs(lastResult - current) <= options.delta) { ok += 1; if (options.tests <= ok) { break; @@ -88,15 +133,15 @@ export const waitForConverged = async ( ok = 0; } lastResult = current; - await delay(1000); + await delay(options.interval); c++; - if (c * 1000 > options.timeout) { + if (c * options.interval > options.timeout) { throw new Error("Timeout"); } } }; export const getUnionSize = async ( - dbs: { log: SharedLog }[], + dbs: { log: SharedLog }[], expectedUnionSize: number, ) => { const union = new Set(); @@ -111,17 +156,42 @@ export const checkBounded = async ( entryCount: number, lower: number, higher: number, - ...dbs: { log: SharedLog }[] + ...dbs: { log: SharedLog }[] ) => { + const checkConverged = async (db: { log: SharedLog }) => { + const a = db.log.log.length; + await delay(100); // arb delay + // covergence is when the difference is less than 1% of the max + return ( + Math.abs(a - db.log.log.length) < + Math.max(Math.round(Math.max(a, db.log.log.length) * 0.01), 1) + ); // TODO make this a parameter + }; + + for (const [_i, db] of dbs.entries()) { + try { + await waitForResolved(() => checkConverged(db), { + timeout: 25000, + delayInterval: 2500, + }); + } catch (error) { + throw new Error("Log length did not converge"); + } + } + + await checkReplicas( + dbs, + maxReplicas(dbs[0].log, [...(await dbs[0].log.log.toArray())]), + entryCount, + ); + for (const [_i, db] of dbs.entries()) { try { - await waitForResolved( - () => expect(db.log.log.length).greaterThanOrEqual(entryCount * lower), - { - timeout: 25 * 1000, - }, + await waitForResolved(() => + expect(db.log.log.length).greaterThanOrEqual(entryCount * lower), ); } catch (error) { + await dbgLogs(dbs.map((x) => x.log)); throw new Error( "Log did not reach lower bound length of " + entryCount * lower + @@ -129,60 +199,141 @@ export const checkBounded = async ( db.log.log.length, ); } - } - const checkConverged = async (db: { log: SharedLog }) => { - const a = db.log.log.length; - await delay(100); // arb delay - return a === db.log.log.length; - }; + try { + await waitForResolved(() => + expect(db.log.log.length).lessThanOrEqual(entryCount * higher), + ); + } catch (error) { + await dbgLogs(dbs.map((x) => x.log)); + throw new Error( + "Log did not reach upper bound length of " + + entryCount * higher + + " got " + + db.log.log.length, + ); + } + } +}; - for (const [_i, db] of dbs.entries()) { - await waitFor(() => checkConverged(db), { - timeout: 25000, - delayInterval: 2500, +export const checkReplicas = async ( + dbs: { log: SharedLog }[], + minReplicas: number, + entryCount: number, +) => { + try { + await waitForResolved(async () => { + const map = new Map(); + const hashToEntry = new Map>(); + for (const db of dbs) { + for (const value of await db.log.log.toArray()) { + // eslint-disable-next-line @typescript-eslint/no-unused-expressions + expect(await db.log.log.blocks.has(value.hash)).to.be.true; + map.set(value.hash, (map.get(value.hash) || 0) + 1); + hashToEntry.set(value.hash, value); + } + } + for (const [_k, v] of map) { + try { + expect(v).greaterThanOrEqual(minReplicas); + } catch (error) { + const entry = hashToEntry.get(_k)!; + const gid = entry.meta.gid; + throw new Error( + "Did not fulfill min replicas level for " + + entry.hash + + " of: " + + minReplicas + + " got " + + v + + ". Gid to peer history? " + + JSON.stringify( + dbs.map((x) => x.log._gidPeersHistory.get(gid)?.size || 0) + + ". Has? " + + JSON.stringify( + await Promise.all( + dbs.map((x) => x.log.log.has(entry.hash)), + ), + ) + + ", sync in flight ? " + + JSON.stringify( + dbs.map((x) => + x.log.syncronizer.syncInFlight.has(entry.hash), + ), + ), + ), + ); + } + expect(v).lessThanOrEqual(dbs.length); + } }); + } catch (error) { + await dbgLogs(dbs.map((x) => x.log)); + throw error; } +}; - for (const [_i, db] of dbs.entries()) { - await waitForResolved(() => - expect(db.log.log.length).greaterThanOrEqual(entryCount * lower), - ); - await waitForResolved(() => - expect(db.log.log.length).lessThanOrEqual(entryCount * higher), - ); +export const generateTestsFromResolutions = ( + fn: (domain: ReplicationDomainHash<"u32" | "u64">) => void, +) => { + const resolutions = ["u32", "u64"] as const; + for (const resolution of resolutions) { + describe(resolution, () => { + fn(createReplicationDomainHash(resolution)); + }); } +}; - await checkReplicas( - dbs, - maxReplicas(dbs[0].log, [...(await dbs[0].log.log.toArray())]), - entryCount, - ); +export type TestSetupConfig = { + type: R; + domain: ReplicationDomainHash; + syncronizer: SynchronizerConstructor; + name: string; }; -export const checkReplicas = ( - dbs: { log: SharedLog }[], - minReplicas: number, - entryCount: number, +export const testSetups: TestSetupConfig[] = [ + { + domain: createReplicationDomainHash("u32"), + type: "u32", + syncronizer: SimpleSyncronizer, + name: "u32-simple", + }, + { + domain: createReplicationDomainHash("u64"), + type: "u64", + syncronizer: SimpleSyncronizer, + name: "u64-simple", + }, + { + domain: createReplicationDomainHash("u64"), + type: "u64", + syncronizer: RatelessIBLTSynchronizer, + name: "u64-iblt", + }, +]; +export const checkIfSetupIsUsed = ( + setup: TestSetupConfig, + log: SharedLog, ) => { - return waitForResolved(async () => { - const map = new Map(); - for (const db of dbs) { - for (const value of await db.log.log.toArray()) { - // eslint-disable-next-line @typescript-eslint/no-unused-expressions - expect(await db.log.log.blocks.has(value.hash)).to.be.true; - map.set(value.hash, (map.get(value.hash) || 0) + 1); - } - } - for (const [_k, v] of map) { - try { - expect(v).greaterThanOrEqual(minReplicas); - } catch (error) { - throw new Error( - "Did not fulfill min replicas level of: " + minReplicas + " got " + v, - ); - } - expect(v).lessThanOrEqual(dbs.length); - } - }); + expect(log.domain).to.equal(setup.domain); + expect(log.syncronizer.constructor).to.equal(setup.syncronizer); +}; + +export const dbgLogs = async (log: SharedLog[]) => { + for (const l of log) { + console.error( + "Id:", + l.node.identity.publicKey.hashcode(), + "Log length:", + l.log.length, + "Replication segments:", + // eslint-disable-next-line @typescript-eslint/no-base-to-string + (await l.getAllReplicationSegments()).map((x) => x.toString()), + "Prunable: " + (await l.getPrunable()).length, + "log length: ", + l.log.length, + "Replication ranges", + l.addedReplciationRangesFrom.size, + ); + } }; diff --git a/packages/programs/data/shared-log/test/utils/access.ts b/packages/programs/data/shared-log/test/utils/access.ts index 887c26e39..90de14878 100644 --- a/packages/programs/data/shared-log/test/utils/access.ts +++ b/packages/programs/data/shared-log/test/utils/access.ts @@ -5,9 +5,9 @@ import { EventStore } from "./stores"; @variant("test_simple") export class SimpleStoreContract extends Program { @field({ type: EventStore }) - store!: EventStore; + store!: EventStore; - constructor(properties?: { store: EventStore }) { + constructor(properties?: { store: EventStore }) { super(); if (properties) { this.store = properties.store; diff --git a/packages/programs/data/shared-log/test/utils/stores/event-store.ts b/packages/programs/data/shared-log/test/utils/stores/event-store.ts index 351220ff3..40b67c319 100644 --- a/packages/programs/data/shared-log/test/utils/stores/event-store.ts +++ b/packages/programs/data/shared-log/test/utils/stores/event-store.ts @@ -20,8 +20,9 @@ import { } from "../../../src/index.js"; import type { TransportMessage } from "../../../src/message.js"; import type { EntryReplicated } from "../../../src/ranges.js"; -import type { ReplicationDomainHash } from "../../../src/replication-domain-hash.js"; import type { ReplicationDomain } from "../../../src/replication-domain.js"; +import type { SynchronizerConstructor } from "../../../src/sync/index.js"; +import type { TestSetupConfig } from "../../utils.js"; import { JSON_ENCODING } from "./encoding.js"; // TODO: generalize the Iterator functions and spin to its own module @@ -32,8 +33,8 @@ export interface Operation { } export class EventIndex { - _log: SharedLog>; - constructor(log: SharedLog>) { + _log: SharedLog, any>; + constructor(log: SharedLog, any>) { this._log = log; } @@ -42,31 +43,42 @@ export class EventIndex { } } -export type Args>> = { +export type Args< + T, + D extends ReplicationDomain, R>, + R extends "u32" | "u64" = D extends ReplicationDomain + ? I + : "u32", +> = { onChange?: (change: Change>) => void; - replicate?: ReplicationOptions; + replicate?: ReplicationOptions; trim?: TrimOptions; replicas?: ReplicationLimitsOptions; encoding?: Encoding>; respondToIHaveTimeout?: number; timeUntilRoleMaturity?: number; + waitForPruneDelay?: number; waitForReplicatorTimeout?: number; sync?: ( - entry: Entry> | ShallowEntry | EntryReplicated, + entry: Entry> | ShallowEntry | EntryReplicated, ) => boolean; canAppend?: CanAppend>; canReplicate?: (publicKey: PublicSignKey) => Promise | boolean; onMessage?: (msg: TransportMessage, context: RequestContext) => Promise; compatibility?: number; + setup?: TestSetupConfig; domain?: D; }; @variant("event_store") export class EventStore< T, - D extends ReplicationDomain> = ReplicationDomainHash, -> extends Program> { + D extends ReplicationDomain, R>, + R extends "u32" | "u64" = D extends ReplicationDomain + ? I + : "u32", +> extends Program> { @field({ type: SharedLog }) - log: SharedLog, D>; + log: SharedLog, D, R>; @field({ type: Uint8Array }) id: Uint8Array; @@ -74,6 +86,8 @@ export class EventStore< _index!: EventIndex; _canAppend?: CanAppend>; + static staticArgs: Args | undefined; + constructor(properties?: { id: Uint8Array }) { super(); this.id = properties?.id || randomBytes(32); @@ -84,13 +98,17 @@ export class EventStore< this._canAppend = canAppend; } - async open(properties?: Args) { + async open(properties?: Args) { this._index = new EventIndex(this.log); if (properties?.onMessage) { this.log._onMessage = properties.onMessage; } + if (properties?.domain && properties?.setup?.domain) { + throw new Error("Cannot have both domain and setup.domain"); + } + await this.log.open({ compatibility: properties?.compatibility, onChange: properties?.onChange, @@ -107,11 +125,15 @@ export class EventStore< replicas: properties?.replicas, waitForReplicatorTimeout: properties?.waitForReplicatorTimeout, encoding: JSON_ENCODING, - timeUntilRoleMaturity: properties?.timeUntilRoleMaturity ?? 1000, + timeUntilRoleMaturity: properties?.timeUntilRoleMaturity ?? 3000, + waitForPruneDelay: properties?.waitForPruneDelay ?? 300, sync: properties?.sync, respondToIHaveTimeout: properties?.respondToIHaveTimeout, distributionDebounceTime: 50, // to make tests fast - domain: properties?.domain, + domain: properties?.domain ?? properties?.setup?.domain, + syncronizer: properties?.setup?.syncronizer as SynchronizerConstructor, + + ...(((this.constructor as typeof EventStore).staticArgs ?? {}) as any), }); } diff --git a/packages/programs/data/string/src/string-store.ts b/packages/programs/data/string/src/string-store.ts index 293a036a9..2f51fa4c6 100644 --- a/packages/programs/data/string/src/string-store.ts +++ b/packages/programs/data/string/src/string-store.ts @@ -53,7 +53,7 @@ export type CanPerform = ( export type Args = { canRead?: CanRead; canPerform?: CanPerform; - log?: SharedLogOptions; + log?: SharedLogOptions>; }; export type TransactionContext = { @@ -62,7 +62,7 @@ export type TransactionContext = { @variant("dstring") export class DString extends Program { @field({ type: SharedLog }) - _log: SharedLog; + _log: SharedLog; @field({ type: RPC }) query: RPC; diff --git a/packages/programs/program/src/client.ts b/packages/programs/program/src/client.ts index befd782d4..767c9e0b0 100644 --- a/packages/programs/program/src/client.ts +++ b/packages/programs/program/src/client.ts @@ -1,8 +1,12 @@ -import type { PeerId as Libp2pPeerId } from "@libp2p/interface"; +import type { PeerId as Libp2pPeerId, PeerId } from "@libp2p/interface"; import type { Multiaddr } from "@multiformats/multiaddr"; import type { AnyStore } from "@peerbit/any-store-interface"; import { type Blocks } from "@peerbit/blocks-interface"; -import { type Ed25519PublicKey, type Identity } from "@peerbit/crypto"; +import type { + Ed25519PublicKey, + Identity, + PublicSignKey, +} from "@peerbit/crypto"; import type { Indices } from "@peerbit/indexer-interface"; import { type Keychain } from "@peerbit/keychain"; import { type PubSub } from "@peerbit/pubsub-interface"; @@ -19,6 +23,7 @@ export interface Client>> { identity: Identity; getMultiaddrs: () => Multiaddr[]; dial(address: string | Multiaddr | Multiaddr[]): Promise; + hangUp(address: PeerId | PublicSignKey | string | Multiaddr): Promise; services: { pubsub: PubSub; blocks: Blocks; diff --git a/packages/programs/program/src/handler.ts b/packages/programs/program/src/handler.ts index ce06d2120..40e03888b 100644 --- a/packages/programs/program/src/handler.ts +++ b/packages/programs/program/src/handler.ts @@ -60,6 +60,10 @@ export type ProgramInitializationOptions> = { EventOptions; export const addParent = (child: Manageable, parent?: Manageable) => { + if (child.parents && child.parents.includes(parent) && parent == null) { + return; // prevent root parents to exist multiple times. This will allow use to close a program onces even if it is reused multiple times + } + (child.parents || (child.parents = [])).push(parent); if (parent) { (parent.children || (parent.children = [])).push(child); diff --git a/packages/programs/program/src/program.ts b/packages/programs/program/src/program.ts index cc341baa0..c7ca889ce 100644 --- a/packages/programs/program/src/program.ts +++ b/packages/programs/program/src/program.ts @@ -192,6 +192,11 @@ export abstract class Program< await next.beforeOpen(node, { ...options, parent: this }); } + await this._eventOptions?.onBeforeOpen?.(this); + this.closed = false; + } + + async afterOpen() { await this.node.services.pubsub.addEventListener( "subscribe", this._subscriptionEventListener || @@ -205,13 +210,8 @@ export abstract class Program< !this.closed && this._emitLeaveNetworkEvents(s.detail)), ); - await this._eventOptions?.onBeforeOpen?.(this); - } - - async afterOpen() { this.emitEvent(new CustomEvent("open", { detail: this }), true); await this._eventOptions?.onOpen?.(this); - this.closed = false; const nexts = this.programs; for (const next of nexts) { await next.afterOpen(); diff --git a/packages/programs/program/test/handler.spec.ts b/packages/programs/program/test/handler.spec.ts index 6a29eb82b..dbbade96e 100644 --- a/packages/programs/program/test/handler.spec.ts +++ b/packages/programs/program/test/handler.spec.ts @@ -19,7 +19,7 @@ describe(`shared`, () => { it("open same store twice will share instance", async () => { const db1 = await client.open(new TestProgram()); - await expect(await client.open(db1)).equal(db1); + expect(await client.open(db1)).equal(db1); }); it("can open different dbs concurrently", async () => { @@ -45,6 +45,20 @@ describe(`shared`, () => { ); }); + it("is open on open", async () => { + const instance = new TestProgram(); + const openFn = instance.open.bind(instance); + let openInvoked = false; + instance.open = async () => { + expect(instance.closed).to.be.false; + await openFn(); + openInvoked = true; + }; + + await client.open(instance); + expect(openInvoked).to.be.true; + }); + it("rejects duplicate concurrently", async () => { const p1 = new TestProgram(); const p2 = p1.clone(); @@ -126,6 +140,30 @@ describe(`shared`, () => { expect(p2.nested.openInvoked).to.not.be.true; }); + it("reuse clone multiple times and close", async () => { + const p1 = new TestProgram(); + const db1Promise = client.open(p1); + await db1Promise; + const p2 = await client.open(p1.clone(), { existing: "reuse" }); + const p3 = await client.open(p1.clone(), { existing: "reuse" }); + expect(p2 === p1).to.be.true; + expect(p3 === p1).to.be.true; + await p2.close(); + expect(p1.closed).to.be.true; + }); + + it("reuse multiple times and close", async () => { + const p1 = new TestProgram(); + const db1Promise = client.open(p1); + await db1Promise; + const p2 = await client.open(p1, { existing: "reuse" }); + const p3 = await client.open(p1, { existing: "reuse" }); + expect(p2 === p1).to.be.true; + expect(p3 === p1).to.be.true; + await p2.close(); + expect(p1.closed).to.be.true; + }); + it("rejects", async () => { const someParent = new TestProgram(); await expect(client.open(someParent, { parent: someParent })).rejectedWith( diff --git a/packages/programs/program/test/utils.ts b/packages/programs/program/test/utils.ts index 03c2da938..98155de5a 100644 --- a/packages/programs/program/test/utils.ts +++ b/packages/programs/program/test/utils.ts @@ -53,6 +53,7 @@ export const createPeer = async ( identity: keypair, getMultiaddrs: () => [], dial: () => Promise.resolve(false), + hangUp: () => Promise.resolve(), services: { blocks: { get: (c) => blocks.get(c), diff --git a/packages/transport/blocks/package.json b/packages/transport/blocks/package.json index c0d168fcd..e3cd91a6b 100644 --- a/packages/transport/blocks/package.json +++ b/packages/transport/blocks/package.json @@ -83,7 +83,7 @@ "@peerbit/blocks-interface": "1.3.7", "@peerbit/crypto": "2.3.2", "@ipld/dag-cbor": "^9.2.1", - "libp2p": "^2.2.1", + "libp2p": "^2.3.1", "multiformats": "^13.0.1" } } diff --git a/packages/transport/libp2p-test-utils/package.json b/packages/transport/libp2p-test-utils/package.json index a5727e616..c0df7ba84 100644 --- a/packages/transport/libp2p-test-utils/package.json +++ b/packages/transport/libp2p-test-utils/package.json @@ -58,12 +58,12 @@ "author": "dao.xyz", "license": "MIT", "dependencies": { - "@libp2p/tcp": "^10.0.11", - "@libp2p/webrtc": "^5.0.16", - "@libp2p/websockets": "^9.0.11", - "@libp2p/identify": "^3.0.10", - "@libp2p/circuit-relay-v2": "^3.1.0", - "libp2p": "^2.2.1" + "@libp2p/tcp": "^10.0.13", + "@libp2p/webrtc": "^5.0.19", + "@libp2p/websockets": "^9.0.13", + "@libp2p/identify": "^3.0.12", + "@libp2p/circuit-relay-v2": "^3.1.3", + "libp2p": "^2.3.1" }, "scripts": { "clean": "aegir clean", diff --git a/packages/transport/pubsub/package.json b/packages/transport/pubsub/package.json index 3756949c2..e241f7a3f 100644 --- a/packages/transport/pubsub/package.json +++ b/packages/transport/pubsub/package.json @@ -80,6 +80,6 @@ "@peerbit/stream": "4.1.2", "@peerbit/logger": "1.0.3", "@peerbit/pubsub-interface": "^3.1.1", - "libp2p": "^2.2.1" + "libp2p": "^2.3.1" } } diff --git a/packages/transport/stream/e2e/browser/browser-node/package.json b/packages/transport/stream/e2e/browser/browser-node/package.json index 492a74b37..0d8ecc525 100644 --- a/packages/transport/stream/e2e/browser/browser-node/package.json +++ b/packages/transport/stream/e2e/browser/browser-node/package.json @@ -13,7 +13,7 @@ "react": "^18.2.0", "react-dom": "^18.2.0", "@peerbit/stream": "*", - "libp2p": "^2.2.1" + "libp2p": "^2.3.1" }, "devDependencies": { "@types/react": "^18.2.12", diff --git a/packages/transport/stream/package.json b/packages/transport/stream/package.json index e16ed23c6..e42b04edd 100644 --- a/packages/transport/stream/package.json +++ b/packages/transport/stream/package.json @@ -89,7 +89,7 @@ "@peerbit/stream-interface": "^5.1.1", "@peerbit/time": "^2.0.7", "@peerbit/logger": "^1.0.3", - "libp2p": "^2.2.1", + "libp2p": "^2.3.1", "yallist": "^4.0.0", "abortable-iterator": "^5.0.1" } diff --git a/packages/transport/stream/src/index.ts b/packages/transport/stream/src/index.ts index cea09ae18..ba27914fe 100644 --- a/packages/transport/stream/src/index.ts +++ b/packages/transport/stream/src/index.ts @@ -1620,6 +1620,7 @@ export abstract class DirectStream< if (message.header.mode instanceof AnyWhere) { return { promise: Promise.resolve() }; } + const idString = toBase64(message.id); const existing = this._ackCallbacks.get(idString); @@ -1659,6 +1660,7 @@ export abstract class DirectStream< } const deliveryDeferredPromise = pDefer(); + if (!haveReceivers) { deliveryDeferredPromise.resolve(); // we dont know how many answer to expect, just resolve immediately } @@ -1747,6 +1749,7 @@ export abstract class DirectStream< // only remove callback function if we actually expected a expected amount of responses clear(); } + deliveryDeferredPromise.resolve(); return true; } @@ -1800,6 +1803,7 @@ export abstract class DirectStream< }, clear: () => { clear(); + deliveryDeferredPromise.resolve(); }, }); @@ -1869,7 +1873,7 @@ export abstract class DirectStream< (message.header.mode instanceof AcknowledgeDelivery || message.header.mode instanceof SilentDelivery) && !to && - message.header.mode.to + message.header.mode.to.length > 0 ) { const fanout = this.routes.getFanout( from.hashcode(), @@ -1891,13 +1895,14 @@ export abstract class DirectStream< return delivereyPromise; // we are done sending the message in all direction with updates 'to' lists } - return; // we defintely that we should not forward the message anywhere + return; // we defintely know that we should not forward the message anywhere } - return; - - // else send to all (fallthrough to code below) - } + // we end up here because we don't have enough information yet in how to send data to the peer (TODO test this codepath) + if (relayed) { + return; + } + } // else send to all (fallthrough to code below) } // We fils to send the message directly, instead fallback to floodsub @@ -2084,6 +2089,7 @@ export abstract class DirectStream< if (this.peers.size <= this.connectionManagerOptions.minConnections) { return; } + const sorted = [...this.peers.values()] .sort((x, y) => x.usedBandwidth - y.usedBandwidth) .map((x) => x.publicKey.hashcode()); diff --git a/packages/transport/stream/src/routes.ts b/packages/transport/stream/src/routes.ts index 2e8bca94f..dd46d51b0 100644 --- a/packages/transport/stream/src/routes.ts +++ b/packages/transport/stream/src/routes.ts @@ -14,6 +14,24 @@ type RouteInfo = { list: RelayInfo[]; }; +const sortRoutes = (routes: RelayInfo[]) => { + // sort by distance, if same distance make the routes without expire time first + + const sorted = routes.sort((a, b) => { + if (a.distance === b.distance) { + if (a.expireAt && !b.expireAt) { + return 1; + } + if (!a.expireAt && b.expireAt) { + return -1; + } + return 0; + } + return a.distance - b.distance; + }); + return sorted; +}; + export class Routes { // FROM -> TO -> { ROUTE INFO, A list of neighbours that we can send data through to reach to} routes: Map> = new Map(); @@ -154,7 +172,7 @@ export class Routes { route.distance = distance; route.session = session; route.expireAt = undefined; // remove expiry since we updated - prev.list.sort((a, b) => a.distance - b.distance); + sortRoutes(prev.list); return isNewRemoteSession ? "restart" : "updated"; } else if (route.distance === distance) { route.session = session; @@ -180,7 +198,7 @@ export class Routes { ? +new Date() + this.routeMaxRetentionPeriod : undefined, }); - prev.list.sort((a, b) => a.distance - b.distance); + sortRoutes(prev.list); } return exist ? (isNewRemoteSession ? "restart" : "updated") : "new"; @@ -400,13 +418,21 @@ export class Routes { if (neighbour) { let foundClosest = false; let added = 0; + + let foundPathForDistance = -2; for (let i = 0; i < neighbour.list.length; i++) { const { distance, session, expireAt } = neighbour.list[i]; - if (expireAt && !relaying) { + if (expireAt) { // don't send on old paths if not relaying - // TODO there could be a benifit of doing this (?) - continue; + // and if we have already found a path for the same distance + if (!relaying && foundPathForDistance === distance) { + // TODO there could be a benifit of doing this (?) + + continue; // we already have an path for this distance, so we can skip this one + } + } else { + foundPathForDistance = distance; } if (distance >= redundancy) { @@ -416,6 +442,7 @@ export class Routes { let fanout: Map = ( fanoutMap || (fanoutMap = new Map()) ).get(neighbour.list[i].hash); + if (!fanout) { fanout = new Map(); fanoutMap.set(neighbour.list[i].hash, fanout); diff --git a/packages/transport/stream/test/routes.spec.ts b/packages/transport/stream/test/routes.spec.ts index c47e84931..144423d1d 100644 --- a/packages/transport/stream/test/routes.spec.ts +++ b/packages/transport/stream/test/routes.spec.ts @@ -33,7 +33,7 @@ describe("routes", () => { .get(me)! .get(b)! .list.map((x) => x.hash), - ).to.deep.equal([a, c]); + ).to.deep.equal([c, a]); // [c, a] order because a is expiring, and we want to prioritize c await delay(routeMaxRetentionPeriod + 1000); @@ -160,5 +160,17 @@ describe("routes", () => { expect(fanout!.get(b)!.size).equal(1); expect(fanout!.get(d)!.size).equal(1); }); + + it("will send through expired routes directly if not yet have updated info", async () => { + const routes = new Routes(me, { signal: controller.signal }); + let session = 0; + + routes.add(a, c, c, -1, session, 0); // lower distance but older session + routes.add(a, d, c, 1, session + 1, 0); // higher distance but newer session + + const fanout = routes.getFanout(a, [c], 2); + expect(fanout!.size).equal(1); // only c will be used because it is a direct route (no matter if new expire information has been assigned to this route) + expect(fanout!.get(c)!.size).equal(1); + }); }); }); diff --git a/packages/transport/stream/test/stream.spec.ts b/packages/transport/stream/test/stream.spec.ts index ae40b525a..3384330fe 100644 --- a/packages/transport/stream/test/stream.spec.ts +++ b/packages/transport/stream/test/stream.spec.ts @@ -1892,7 +1892,6 @@ describe("streams", function () { describe("concurrency", () => { let session: TestSessionStream; let streams: ReturnType[]; - let timer: ReturnType; before(async () => {}); @@ -1911,7 +1910,6 @@ describe("streams", function () { }); afterEach(async () => { - timer && clearTimeout(timer); await session.stop(); }); @@ -2826,11 +2824,9 @@ describe("join/leave", () => { }); describe("invalidation", () => { - let extraSession: TestSessionStream; beforeEach(async () => {}); afterEach(async () => { await session?.stop(); - await extraSession?.stop(); }); it("will not get blocked for slow writes", async () => { diff --git a/packages/utils/indexer/interface/src/errors.ts b/packages/utils/indexer/interface/src/errors.ts new file mode 100644 index 000000000..0e02f1733 --- /dev/null +++ b/packages/utils/indexer/interface/src/errors.ts @@ -0,0 +1,5 @@ +export class NotStartedError extends Error { + constructor() { + super("Not started"); + } +} diff --git a/packages/utils/indexer/interface/src/id.ts b/packages/utils/indexer/interface/src/id.ts index 4ef87595d..d985f20f5 100644 --- a/packages/utils/indexer/interface/src/id.ts +++ b/packages/utils/indexer/interface/src/id.ts @@ -35,7 +35,7 @@ export class UnsignedIntegerValue extends IntegerValue { constructor(number: number) { super(); if (!Number.isInteger(number) || number > 4294967295 || number < 0) { - throw new Error("Number is not u32"); + throw new Error("Number is not u32: " + number); } this.number = number; } diff --git a/packages/utils/indexer/interface/src/index-engine.ts b/packages/utils/indexer/interface/src/index-engine.ts index 105f1c8e6..fdce8129e 100644 --- a/packages/utils/indexer/interface/src/index-engine.ts +++ b/packages/utils/indexer/interface/src/index-engine.ts @@ -108,7 +108,6 @@ export interface Index, NestedType = any> { request?: IterateOptions, options?: { shape?: S; reference?: boolean }, ): IndexIterator; - getSize(): MaybePromise; start(): MaybePromise; stop(): MaybePromise; diff --git a/packages/utils/indexer/interface/src/index.ts b/packages/utils/indexer/interface/src/index.ts index 26574d531..00a47ab7f 100644 --- a/packages/utils/indexer/interface/src/index.ts +++ b/packages/utils/indexer/interface/src/index.ts @@ -2,3 +2,4 @@ export * from "./id.js"; export * from "./query.js"; export * from "./index-engine.js"; export * from "./utils.js"; +export * from "./errors.js"; diff --git a/packages/utils/indexer/interface/src/query.ts b/packages/utils/indexer/interface/src/query.ts index 58ab87418..277ee5273 100644 --- a/packages/utils/indexer/interface/src/query.ts +++ b/packages/utils/indexer/interface/src/query.ts @@ -256,15 +256,15 @@ export class Nested extends Query { @field({ type: "string" }) id: string; - @field({ type: "string" }) - path: string; + @field({ type: vec("string") }) + path: string[]; @field({ type: vec(Query) }) query: Query[]; constructor(props: { id?: string; - path: string; + path: string | string[]; query: | Query[] | Query @@ -274,7 +274,7 @@ export class Nested extends Query { >; }) { super(); - this.path = props.path; + this.path = Array.isArray(props.path) ? props.path : [props.path]; this.id = props.id ?? uuid(); this.query = toQuery(props.query); } diff --git a/packages/utils/indexer/simple/src/index.ts b/packages/utils/indexer/simple/src/index.ts index 911143496..732017c79 100644 --- a/packages/utils/indexer/simple/src/index.ts +++ b/packages/utils/indexer/simple/src/index.ts @@ -134,9 +134,9 @@ export class HashmapIndex, NestedType = any> } if (typeof value === "number") { - sum = ((sum as number) || 0) + value; + sum = ((sum as unknown as number) || 0) + value; } else if (typeof value === "bigint") { - sum = ((sum as bigint) || 0n) + value; + sum = ((sum as unknown as bigint) || 0n) + value; } } return sum != null ? sum : 0; @@ -176,8 +176,9 @@ export class HashmapIndex, NestedType = any> // Handle query normally const indexedDocuments = await this._queryDocuments(async (doc) => { + let innerHits = new Map(); for (const f of queryCoerced) { - if (!(await this.handleQueryObject(f, doc.value))) { + if (!(await this.handleQueryObject(f, doc.value, innerHits))) { return false; } } @@ -277,37 +278,67 @@ export class HashmapIndex, NestedType = any> private async handleFieldQuery( f: types.StateFieldQuery, obj: any, - startIndex: number, + skipKeys: number, + innerHits: Map, + buildInnerHits = true, ): Promise { // this clause is needed if we have a field that is of type [][] (we will recursively go through each subarray) + const handleArrayResults = async ( + path: string[], + obj: any[] | Uint8Array, + skipKeys: number, + ): Promise => { + const pathKey = buildInnerHits ? path.join(".") : undefined; + const innerHitsValeu = pathKey ? innerHits.get(pathKey) : undefined; + if (pathKey && innerHitsValeu === false) { + return false; + } + + const fromInnerHits = pathKey; + const objArr = + fromInnerHits && innerHitsValeu && (innerHitsValeu as []).length > 0 + ? (innerHitsValeu as any[]) + : obj; // we already have iterated over this array before, we will just go over the hits from the last iteration + let newInnerHits: any[] | undefined = fromInnerHits ? [] : undefined; + for (const element of objArr!) { + if (await this.handleFieldQuery(f, element, skipKeys, innerHits)) { + if (!buildInnerHits) { + return true; + } + newInnerHits!.push(element); + } + } + if (!fromInnerHits) { + return false; + } + + if (newInnerHits!.length === 0) { + innerHits.set(pathKey!, false); + return false; + } + + innerHits.set(pathKey!, newInnerHits!); + return true; + }; + if ( Array.isArray(obj) || (obj instanceof Uint8Array && f instanceof types.ByteMatchQuery === false) ) { - for (const element of obj) { - if (await this.handleFieldQuery(f, element, startIndex)) { - return true; - } - } - return false; + return handleArrayResults(f.key, obj, skipKeys); } // Resolve the field from the key path. If we reach an array or nested Document store, // then do a recursive call or a search to look into them - for (let i = startIndex; i < f.key.length; i++) { + for (let i = skipKeys; i < f.key.length; i++) { obj = obj[f.key[i]]; if ( Array.isArray(obj) || (obj instanceof Uint8Array && f instanceof types.ByteMatchQuery === false) ) { - for (const element of obj) { - if (await this.handleFieldQuery(f, element, i + 1)) { - return true; - } - } - return false; + return handleArrayResults(f.key.slice(0, i + 1), obj, i + 1); } if (this.properties.nested?.match(obj)) { const queryCloned = f.clone(); @@ -366,28 +397,51 @@ export class HashmapIndex, NestedType = any> private async handleQueryObject( f: types.Query, value: Record | T, - ): Promise { + innerHits: Map, + skipKeys = 0, + ): Promise<{ result: true; innerHits: any[] } | boolean | undefined> { if (f instanceof types.StateFieldQuery) { - return this.handleFieldQuery(f, value as T, 0); + return this.handleFieldQuery(f, value as T, skipKeys, innerHits); } else if (f instanceof types.Nested) { + // TODO experimental // assume field valua is of array type and iterate over each object and match its parts - let arr = value[f.path]; + let arr = value; + + // we skip the first element as it is the root objec + for (let i = skipKeys; i < f.path.length; i++) { + arr = arr[f.path[i]]; + } + if (!Array.isArray(arr)) { throw new Error("Nested field is not an array"); } - for (const element of arr) { + const newSkipKeys = skipKeys + f.path.length; + outer: for (const element of arr) { for (const query of f.query) { - if (await this.handleQueryObject(query, element)) { - return true; + if ( + !(await this.handleQueryObject( + query, + element, + innerHits, + newSkipKeys, + )) + ) { + continue outer; } } + return true; } return false; // TODO test this codepath } else if (f instanceof types.LogicalQuery) { if (f instanceof types.And) { for (const and of f.and) { - const ret = await this.handleQueryObject(and, value); + const ret = await this.handleQueryObject( + and, + value, + innerHits, + skipKeys, + ); if (!ret) { return ret; } @@ -397,7 +451,13 @@ export class HashmapIndex, NestedType = any> if (f instanceof types.Or) { for (const or of f.or) { - const ret = await this.handleQueryObject(or, value); + const innerHits = new Map(); // in an Or context we isolate each nested hits so we can hit against multiple features independently + const ret = await this.handleQueryObject( + or, + value, + innerHits, + skipKeys, + ); if (ret === true) { return true; } else if (ret === undefined) { @@ -407,7 +467,12 @@ export class HashmapIndex, NestedType = any> return false; } if (f instanceof types.Not) { - const ret = await this.handleQueryObject(f.not, value); + const ret = await this.handleQueryObject( + f.not, + value, + innerHits, + skipKeys, + ); if (ret === undefined) { return undefined; } diff --git a/packages/utils/indexer/sqlite3/package.json b/packages/utils/indexer/sqlite3/package.json index 83fbacf40..a9a1d10d6 100644 --- a/packages/utils/indexer/sqlite3/package.json +++ b/packages/utils/indexer/sqlite3/package.json @@ -69,12 +69,12 @@ "author": "dao.xyz", "license": "MIT", "dependencies": { - "better-sqlite3": "^11.5.0", + "better-sqlite3": "^11.6.0", "@peerbit/indexer-interface": "^1.1.1", - "@sqlite.org/sqlite-wasm": "^3.47.0-build1" + "@sqlite.org/sqlite-wasm": "^3.47.1-build1" }, "devDependencies": { - "@types/better-sqlite3": "^7.6.11", + "@types/better-sqlite3": "^7.6.12", "@peerbit/indexer-tests": "^1.1.1" } } diff --git a/packages/utils/indexer/sqlite3/src/engine.ts b/packages/utils/indexer/sqlite3/src/engine.ts index 75514786c..677267482 100644 --- a/packages/utils/indexer/sqlite3/src/engine.ts +++ b/packages/utils/indexer/sqlite3/src/engine.ts @@ -7,6 +7,7 @@ import type { } from "@peerbit/indexer-interface"; import * as types from "@peerbit/indexer-interface"; import { v4 as uuid } from "uuid"; +import { PlannableQuery, QueryPlanner } from "./query-planner.js"; import { MissingFieldError, type Table, @@ -14,6 +15,9 @@ import { convertCountRequestToQuery, convertDeleteRequestToQuery, convertFromSQLType, + /* convertFromSQLType, */ + + /* convertFromSQLType, */ convertSearchRequestToQuery, /* getTableName, */ convertSumRequestToQuery, @@ -45,6 +49,7 @@ export class SQLLiteIndex> { primaryKeyArr!: string[]; primaryKeyString!: string; + planner: QueryPlanner; private scopeString?: string; private _rootTables!: Table[]; private _tables!: Map; @@ -53,9 +58,10 @@ export class SQLLiteIndex> { fetch: (amount: number) => Promise; /* countStatement: Statement; */ - timeout: ReturnType; + expire: number; } >; // TODO choose limit better + private cursorPruner: ReturnType | undefined; iteratorTimeout: number; closed: boolean = true; @@ -78,25 +84,28 @@ export class SQLLiteIndex> ? "_" + escapePathToSQLName(properties.scope).join("_") : undefined; this.iteratorTimeout = options?.iteratorTimeout || 60e3; + this.planner = new QueryPlanner({ + exec: this.properties.db.exec.bind(this.properties.db), + }); } get tables() { if (this.closed) { - throw new Error("Not started"); + throw new types.NotStartedError(); } return this._tables; } get rootTables() { if (this.closed) { - throw new Error("Not started"); + throw new types.NotStartedError(); } return this._rootTables; } get cursor() { if (this.closed) { - throw new Error("Not started"); + throw new types.NotStartedError(); } return this._cursor; } @@ -169,10 +178,40 @@ export class SQLLiteIndex> } const sqlCreateTable = `create table if not exists ${table.name} (${[...table.fields, ...table.constraints].map((s) => s.definition).join(", ")}) strict`; - const sqlCreateIndex = `create index if not exists ${table.name}_index on ${table.name} (${table.fields.map((field) => escapeColumnName(field.name)).join(", ")})`; - this.properties.db.exec(sqlCreateTable); - this.properties.db.exec(sqlCreateIndex); + + /* const fieldsToIndex = table.fields.filter( + (field) => + field.key !== ARRAY_INDEX_COLUMN && field.key !== table.primary, + ); + if (fieldsToIndex.length > 0) { + let arr = fieldsToIndex.map((field) => escapeColumnName(field.name)); + + const createIndex = async (columns: string[]) => { + const key = createIndexKey(table.name, columns) + const command = `create index if not exists ${key} on ${table.name} (${columns.map((n) => escapeColumnName(n)).join(", ")})`; + await this.properties.db.exec(command); + table.indices.add(key); + + + + const rev = columns.reverse() + const key2 = createIndexKey(table.name, rev) + const command2 = `create index if not exists ${key2} on ${table.name} (${rev.join(", ")})`; + await this.properties.db.exec(command2); + table.indices.add(key2); + } + await createIndex(fieldsToIndex.map(x => x.name)); + await createIndex([table.primary as string, ...fieldsToIndex.map(x => x.name)]); + + if (arr.length > 1) { + for (const field of fieldsToIndex) { + await createIndex([field.name]); + await createIndex([table.primary as string, field.name]); + + } + } + } */ // put and return the id let sqlPut = `insert into ${table.name} (${table.fields.map((field) => escapeColumnName(field.name)).join(", ")}) VALUES (${table.fields.map((_x) => "?").join(", ")}) RETURNING ${table.primary};`; @@ -182,6 +221,7 @@ export class SQLLiteIndex> await this.properties.db.prepare(sqlPut, putStatementKey(table)); await this.properties.db.prepare(sqlReplace, replaceStatementKey(table)); + if (table.parent) { await this.properties.db.prepare( selectChildren(table), @@ -190,6 +230,15 @@ export class SQLLiteIndex> } } + this.cursorPruner = setInterval(() => { + const now = Date.now(); + for (const [k, v] of this._cursor) { + if (v.expire < now) { + this.clearupIterator(k); + } + } + }, this.iteratorTimeout); + this.closed = false; } @@ -205,6 +254,7 @@ export class SQLLiteIndex> return; } this.closed = true; + clearInterval(this.cursorPruner!); await this.clearStatements(); @@ -213,15 +263,23 @@ export class SQLLiteIndex> for (const [k, _v] of this._cursor) { await this.clearupIterator(k); } + + await this.planner.stop(); } async drop(): Promise { + if (this.closed) { + throw new Error(`Already closed index ${this.id}, can not drop`); + } + this.closed = true; + clearInterval(this.cursorPruner!); await this.clearStatements(); // drop root table and cascade // drop table faster by dropping constraints first + for (const table of this._rootTables) { await this.properties.db.exec(`drop table if exists ${table.name}`); } @@ -231,6 +289,7 @@ export class SQLLiteIndex> for (const [k, _v] of this._cursor) { await this.clearupIterator(k); } + await this.planner.stop(); } private async resolveDependencies( @@ -253,27 +312,36 @@ export class SQLLiteIndex> table, options?.shape, ); - const sql = `${generateSelectQuery(table, selects)} ${buildJoin(joinMap, true)} where ${this.primaryKeyString} = ? limit 1`; - const stmt = await this.properties.db.prepare(sql, sql); - const rows = await stmt.get([ - table.primaryField?.from?.type - ? convertToSQLType(id.key, table.primaryField.from.type) - : id.key, - ]); - if (!rows) { - continue; + const sql = `${generateSelectQuery(table, selects)} ${buildJoin(joinMap).join} where ${this.primaryKeyString} = ? limit 1`; + try { + const stmt = await this.properties.db.prepare(sql, sql); + const rows = await stmt.get([ + table.primaryField?.from?.type + ? convertToSQLType(id.key, table.primaryField.from.type) + : id.key, + ]); + if ( + rows?.[getTablePrefixedField(table, table.primary as string)] == null + ) { + continue; + } + return { + value: (await resolveInstanceFromValue( + rows, + this.tables, + table, + this.resolveDependencies.bind(this), + true, + options?.shape, + )) as unknown as T, + id, + }; + } catch (error) { + if (this.closed) { + throw new types.NotStartedError(); + } + throw error; } - return { - value: (await resolveInstanceFromValue( - rows, - this.tables, - table, - this.resolveDependencies.bind(this), - true, - options?.shape, - )) as unknown as T, - id, - }; } return undefined; } @@ -282,24 +350,20 @@ export class SQLLiteIndex> const classOfValue = value.constructor as Constructor; return insert( async (values, table) => { - const preId = values[table.primaryIndex]; + let preId = values[table.primaryIndex]; if (preId != null) { const statement = this.properties.db.statements.get( replaceStatementKey(table), )!; - await statement.run( - values.map((x) => (typeof x === "boolean" ? (x ? 1 : 0) : x)), - ); + await statement.run(values); await statement.reset?.(); return preId; } else { const statement = this.properties.db.statements.get( putStatementKey(table), )!; - const out = await statement.get( - values.map((x) => (typeof x === "boolean" ? (x ? 1 : 0) : x)), - ); + const out = await statement.get(values); await statement.reset?.(); // TODO types @@ -345,47 +409,85 @@ export class SQLLiteIndex> let bindable: any[] = []; let sqlFetch: string | undefined = undefined; + const normalizedQuery = new PlannableQuery({ + query: types.toQuery(request?.query), + sort: request?.sort, + }); + let planningScope: ReturnType; + /* let totalCount: undefined | number = undefined; */ const fetch = async (amount: number | "all") => { kept = undefined; if (!once) { + planningScope = this.planner.scope(normalizedQuery); + let { sql, bindable: toBind } = convertSearchRequestToQuery( - request, + normalizedQuery, this.tables, this._rootTables, { + planner: planningScope, shape: options?.shape, - stable: typeof amount === "number", // if we are to fetch all, we dont need stable sorting + fetchAll: amount === "all", // if we are to fetch all, we dont need stable sorting }, ); + + /* if (indexesToCreate) { + for (const index of indexesToCreate) { + console.log(index); + await this.properties.db.exec(index); + } + } */ + sqlFetch = sql; + // sqlFetch = ` select NULL as 'v_0#id', NULL as 'v_0#value', v_1."id" as 'v_1#id', json_group_array(distinct json_object('__id', v_1__nested__class_DocumentWithProperties."__id", '__index', v_1__nested__class_DocumentWithProperties."__index", 'a', v_1__nested__class_DocumentWithProperties."a", 'b', v_1__nested__class_DocumentWithProperties."b", 'bool', v_1__nested__class_DocumentWithProperties."bool", 'c', v_1__nested__class_DocumentWithProperties."c", 'd', v_1__nested__class_DocumentWithProperties."d")) as v_1__nested__class_DocumentWithProperties, v_1."id" as 'v_1.id' FROM v_1 INDEXED BY v_1_index_id LEFT JOIN v_1__nested__class_DocumentWithProperties AS v_1__nested__class_DocumentWithProperties INDEXED BY v_1__nested__class_DocumentWithProperties_index___parent_id ON v_1.id = v_1__nested__class_DocumentWithProperties.__parent_id LEFT JOIN v_1__nested__class_DocumentWithProperties AS _query_v_1__nested__class_DocumentWithProperties INDEXED BY v_1__nested__class_DocumentWithProperties_index___parent_id_bool ON v_1.id = _query_v_1__nested__class_DocumentWithProperties.__parent_id where _query_v_1__nested__class_DocumentWithProperties."bool" = ? GROUP BY v_1."id" ORDER BY "v_0#id" ASC limit ? offset ?` + /* this.x++; + if (this.x % 1000 === 0) { + console.log("SQL FETCH", sqlFetch); + } */ + /* console.log("SQL FETCH", sqlFetch); */ + + /* if (sqlFetch.trim() === `select class_NumberQueryDocument."id" as 'class_NumberQueryDocument#id', class_NumberQueryDocument."number" as 'class_NumberQueryDocument#number' FROM class_NumberQueryDocument where class_NumberQueryDocument."number" < ? ORDER BY class_NumberQueryDocument.id ASC limit ? offset ?`) { + // sqlFetch = `select class_NumberQueryDocument."id" as 'class_NumberQueryDocument#id', class_NumberQueryDocument."number" as 'class_NumberQueryDocument#number' FROM class_NumberQueryDocument INDEXED BY class_NumberQueryDocument_index where class_NumberQueryDocument.number < ? ORDER BY class_NumberQueryDocument.id ASC limit ? offset ?` + sqlFetch = `select class_NumberQueryDocument.id as 'class_NumberQueryDocument#id', class_NumberQueryDocument.number as 'class_NumberQueryDocument#number' FROM class_NumberQueryDocument where class_NumberQueryDocument.number < ? ORDER BY class_NumberQueryDocument.id ASC limit ? offset ?` + + } */ + + /* sqlFetch = `explain query plan ${sqlFetch}`; */ bindable = toBind; + await planningScope.beforePrepare(); + stmt = await this.properties.db.prepare(sqlFetch, sqlFetch); - // stmt.reset?.(); // TODO dont invoke reset if not needed - /* countStmt.reset?.(); */ // Bump timeout timer - clearTimeout(iterator.timeout); - iterator.timeout = setTimeout( - () => this.clearupIterator(requestId), - this.iteratorTimeout, - ); + iterator.expire = Date.now() + this.iteratorTimeout; } once = true; - const allResults: Record[] = await stmt.all([ + /* console.log("----------------------") + console.log(sqlFetch); */ + + const allResults = await planningScope.perform(async () => { + const allResults: Record[] = await stmt.all([ + ...bindable, + ...(amount !== "all" ? [amount, offset] : []), + ]); + return allResults; + }); + + /* const allResults: Record[] = await stmt.all([ ...bindable, - amount === "all" ? Number.MAX_SAFE_INTEGER : amount, - offset, + ...(amount !== "all" ? [amount, + offset] : []) ]); - + */ let results: IndexedResult>[] = await Promise.all( allResults.map(async (row: any) => { let selectedTable = this._rootTables.find( - (table /* row["table_name"] === table.name, */) => + (table) => row[getTablePrefixedField(table, this.primaryKeyString)] != null, )!; @@ -415,21 +517,14 @@ export class SQLLiteIndex> offset += results.length; - /* if (results.length > 0) { - totalCount = - totalCount ?? - (await this.count( - request, - )); - iterator.kept = totalCount - results.length - offsetStart; - } else { - iterator.kept = 0; + /* const uniqueIds = new Set(results.map((x) => x.id.primitive)); + if (uniqueIds.size !== results.length) { + throw new Error("Duplicate ids in result set"); } */ if (amount === "all" || results.length < amount) { hasMore = false; await this.clearupIterator(requestId); - clearTimeout(iterator.timeout); } return results; }; @@ -437,10 +532,7 @@ export class SQLLiteIndex> const iterator = { fetch, /* countStatement: countStmt, */ - timeout: setTimeout( - () => this.clearupIterator(requestId), - this.iteratorTimeout, - ), + expire: Date.now() + this.iteratorTimeout, }; this.cursor.set(requestId, iterator); @@ -450,9 +542,9 @@ export class SQLLiteIndex> all: async () => { const results: IndexedResult>[] = []; while (true) { - const res = await fetch(100); + const res = await fetch("all"); results.push(...res); - if (res.length === 0) { + if (hasMore === false) { break; } } @@ -486,7 +578,6 @@ export class SQLLiteIndex> if (!cache) { return; // already cleared } - clearTimeout(cache.timeout); /* cache.countStatement.finalize?.(); */ // await cache.fetchStatement.finalize?.(); this._cursor.delete(id); @@ -717,10 +808,16 @@ export class SQLiteIndices implements types.Indices { await scope.drop(); } - for (const index of this.indices) { - await index.index.drop(); + if (!this.properties.parent) { + for (const index of this.indices) { + await index.index.stop(); + } + await this.properties.db.drop(); + } else { + for (const index of this.indices) { + await index.index.drop(); + } } - this.scopes.clear(); } } diff --git a/packages/utils/indexer/sqlite3/src/query-planner.ts b/packages/utils/indexer/sqlite3/src/query-planner.ts new file mode 100644 index 000000000..cf7997213 --- /dev/null +++ b/packages/utils/indexer/sqlite3/src/query-planner.ts @@ -0,0 +1,354 @@ +// track timing for optimal index selection +import { field, serialize, vec } from "@dao-xyz/borsh"; +import { sha256Base64Sync } from "@peerbit/crypto"; +import { + And, + BigUnsignedIntegerValue, + BoolQuery, + ByteMatchQuery, + Compare, + IntegerCompare, + IntegerValue, + IsNull, + Nested, + Not, + Or, + Query, + Sort, + StringMatch, + UnsignedIntegerValue, +} from "@peerbit/indexer-interface"; +import { hrtime } from "@peerbit/time"; +import { escapeColumnName } from "./schema.js"; + +export interface QueryIndexPlanner { + // assumes withing a query, each index can be picked independently. For example if we are to join two tables, we can pick the best index for each table + // sorted column names key to execution time for each index that was tried + columnsToIndexes: Map< + string, + { + results: { + used: number; + avg: number; + times: number[]; + indexKey: string; + }[]; + } + >; // +} + +type StmtStats = Map; + +const getSortedNameKey = (tableName: string, names: string[]) => + [tableName, ...names.sort()].join(","); +const createIndexKey = (tableName: string, fields: string[]) => + `${tableName}_index_${fields.map((x) => x).join("_")}`; + +const HALF_MAX_U32 = 2147483647; // rounded down +const HALF_MAX_U64 = 9223372036854775807n; // rounded down + +export const flattenQuery = function* (props?: { + query: Query[]; + sort?: Sort[] | Sort; +}): Generator<{ query: Query[]; sort?: Sort[] | Sort } | undefined> { + if (!props) { + return yield props; + } + // if query contains OR statements, split query into multiple queries so we can run each query with union and then sort + + // TODO this only works atm for one OR statement in the query + let ors: Query[] = []; + let ands: Query[] = []; + let stack = [...props.query]; + let foundOr = false; + for (const q of stack) { + if (q instanceof Or) { + if (foundOr) { + // multiple ORs are not supported + yield props; + return; + } + + ors = q.or; + foundOr = true; + } else if (q instanceof And) { + for (const a of q.and) { + stack.push(a); + } + } else { + ands.push(q); + } + } + + let maxFlatten = 4; // max 4 ORs else the query will be too big + if (ors.length === 0 || ors.length >= maxFlatten) { + yield { + query: ands, + sort: props.sort, + }; + return; + } + for (const or of ors) { + yield { + query: [...ands, ...(Array.isArray(or) ? or : [or])], + sort: props.sort, + }; + } +}; + +const reduceResolution = (value: IntegerValue): IntegerValue => { + if (value instanceof UnsignedIntegerValue) { + return value.number > HALF_MAX_U32 + ? new UnsignedIntegerValue(HALF_MAX_U32) + : new UnsignedIntegerValue(0); + } + + if (value instanceof BigUnsignedIntegerValue) { + return value.value > HALF_MAX_U64 + ? new BigUnsignedIntegerValue(HALF_MAX_U64) + : new BigUnsignedIntegerValue(0n); + } + + throw new Error("Unknown integer value type: " + value?.constructor.name); +}; +const nullifyQuery = (query: Query): Query => { + if (query instanceof IntegerCompare) { + return new IntegerCompare({ + compare: Compare.Equal, + value: reduceResolution(query.value), + key: query.key, + }); + } else if (query instanceof StringMatch) { + return new StringMatch({ + key: query.key, + value: "", + method: query.method, + }); + } else if (query instanceof ByteMatchQuery) { + return new ByteMatchQuery({ + key: query.key, + value: new Uint8Array(), + }); + } else if (query instanceof BoolQuery) { + return new BoolQuery({ + key: query.key, + value: false, + }); + } else if (query instanceof And) { + let and: Query[] = []; + for (const condition of query.and) { + and.push(nullifyQuery(condition)); + } + return new And(and); + } else if (query instanceof Or) { + let or: Query[] = []; + for (const condition of query.or) { + or.push(nullifyQuery(condition)); + } + return new Or(or); + } else if (query instanceof Not) { + return new Not(nullifyQuery(query.not)); + } else if (query instanceof IsNull) { + return query; + } else if (query instanceof Nested) { + // TODO remove + throw new Error("Unsupported query type, deprecated"); + } + + throw new Error("Unknown query type: " + query?.constructor.name); +}; + +export class PlannableQuery { + @field({ type: vec(Query) }) + query: Query[]; + + @field({ type: vec(Sort) }) + sort: Sort[]; + + constructor(props: { query: Query[]; sort?: Sort[] | Sort }) { + this.query = props.query; + this.sort = Array.isArray(props.sort) + ? props.sort + : props.sort + ? [props.sort] + : []; + } + + get key(): string { + let query = this.query.map((x) => nullifyQuery(x)); + let nullifiedPlannableQuery = new PlannableQuery({ + query: query, + sort: this.sort, + }); + return sha256Base64Sync(serialize(nullifiedPlannableQuery)); + } +} +export type PlanningSession = ReturnType; + +export class QueryPlanner { + stats: StmtStats = new Map(); + + pendingIndexCreation: Map> = new Map(); + + constructor( + readonly props: { exec: (query: string) => Promise | any }, + ) {} + + async stop() { + for (const promise of this.pendingIndexCreation.values()) { + await promise.catch(() => {}); + } + this.stats.clear(); + } + + scope(query: PlannableQuery) { + let obj = this.stats.get(query.key); + if (obj === undefined) { + obj = { + columnsToIndexes: new Map(), + }; + this.stats.set(query.key, obj); + } + + // returns a function that takes column names and return the index to use + let indexCreateCommands: { key: string; cmd: string }[] | undefined = + undefined; + let pickedIndexKeys: Map = new Map(); // index key to column names key + return { + beforePrepare: async () => { + // create missing indices + if (indexCreateCommands != null) { + for (const { key, cmd } of indexCreateCommands) { + if (this.pendingIndexCreation.has(key)) { + await this.pendingIndexCreation.get(key); + } + const promise = this.props.exec(cmd); + this.pendingIndexCreation.set(key, promise); + await promise; + this.pendingIndexCreation.delete(key); + } + } + + if (this.pendingIndexCreation.size > 0) { + for (const picked of pickedIndexKeys.keys()) { + await this.pendingIndexCreation.get(picked); + } + } + }, + resolveIndex: (tableName: string, columns: string[]): string => { + // first we figure out whether we want to reuse the fastest index or try a new one + // only assume we either do forward or backward column order for now (not all n! permutations) + const sortedNameKey = getSortedNameKey(tableName, columns); + let indexStats = obj.columnsToIndexes.get(sortedNameKey); + if (indexStats === undefined) { + indexStats = { + results: [], + }; + obj.columnsToIndexes.set(sortedNameKey, indexStats); + } + + if (indexStats.results.length === 0) { + // create both forward and backward permutations + const permutations = generatePermutations(columns); + for (const columns of permutations) { + const indexKey = createIndexKey(tableName, columns); + const command = `create index if not exists ${indexKey} on ${tableName} (${columns.map((n) => escapeColumnName(n)).join(", ")})`; + + (indexCreateCommands || (indexCreateCommands = [])).push({ + cmd: command, + key: indexKey, + }); + + indexStats.results.push({ + used: 0, + times: [], + avg: -1, // setting -1 will force the first time to be the fastest (i.e. new indices are always tested once) + indexKey, + }); + } + } + + // find the fastest index + let fastestIndex = indexStats.results[0]; + fastestIndex.used++; + pickedIndexKeys.set(fastestIndex.indexKey, sortedNameKey); + + /* if (fastestIndex.used % 300 === 0) { + console.log("INDEX STATS", indexStats.results.map(x => { + return { + key: x.indexKey, + used: x.used, + avg: x.avg, + } + })); + } */ + /* console.log("INDEX STATS", indexStats.results.map(x => { + return { + key: x.indexKey, + used: x.used, + avg: x.avg, + } + }), columns); */ + + // console.log("FASTEST", fastestIndex.indexKey) + return fastestIndex.indexKey!; + }, + perform: async (fn: () => Promise): Promise => { + // perform the query and meaasure time and updates stats for used indices + let t0 = hrtime.bigint(); + const out = await fn(); + let t1 = hrtime.bigint(); + const time = Number(t1 - t0); + // console.log("MEASURE TIME", time, "FOR", [...pickedIndexKeys.keys()]); + + for (const [indexKey, columnsKey] of pickedIndexKeys) { + const indexStats = obj.columnsToIndexes.get(columnsKey); + if (indexStats === undefined) { + throw new Error("index stats not found"); + } + const index = indexStats.results.find((x) => x.indexKey === indexKey); + if (index === undefined) { + throw new Error("index not found"); + } + + // recalculate the avg by updating the time array and calculating the average + index.times.push(time); + if (index.times.length > 20) { + index.times.shift(); + } + index.avg = + index.times.reduce((a, b) => a + b, 0) / index.times.length; + + indexStats.results.sort((a, b) => a.avg - b.avg); // make sure fastest is first + // console.log("INDEX STATS", indexStats.results.map(x => x.lastTime)); + } + + return out; + }, + }; + } +} + +const generatePermutations = (list: string[]) => { + if (list.length === 1) return [list]; + return [list, [...list].reverse()]; +}; +/* const generatePermutations = (list: string[]) => { + const results: string[][] = []; + + function permute(arr: string[], start: number) { + if (start === arr.length - 1) { + results.push([...arr]); // Push a copy of the current permutation + return; + } + + for (let i = start; i < arr.length; i++) { + [arr[start], arr[i]] = [arr[i], arr[start]]; // Swap + permute(arr, start + 1); // Recurse + [arr[start], arr[i]] = [arr[i], arr[start]]; // Swap back (backtrack) + } + } + + permute(list, 0); + return results; +} */ diff --git a/packages/utils/indexer/sqlite3/src/schema.ts b/packages/utils/indexer/sqlite3/src/schema.ts index 6613f6edb..95d32d893 100644 --- a/packages/utils/indexer/sqlite3/src/schema.ts +++ b/packages/utils/indexer/sqlite3/src/schema.ts @@ -14,8 +14,9 @@ import { serialize, variant, } from "@dao-xyz/borsh"; -import { toHexString } from "@peerbit/crypto"; +import { fromHexString, toHexString } from "@peerbit/crypto"; import * as types from "@peerbit/indexer-interface"; +import { type PlanningSession, flattenQuery } from "./query-planner.js"; const SQLConversionMap: any = { u8: "INTEGER", @@ -54,6 +55,9 @@ export type BindableValue = | ArrayBuffer | null; +let JSON_GROUP_ARRAY = "json_group_array"; +let JSON_OBJECT = "distinct json_object"; + export const u64ToI64 = (u64: bigint | number) => { return (typeof u64 === "number" ? BigInt(u64) : u64) - 9223372036854775808n; }; @@ -80,7 +84,8 @@ export const convertToSQLType = ( }; const nullAsUndefined = (value: any) => (value === null ? undefined : value); -export const escapeColumnName = (name: string) => `"${name}"`; +export const escapeColumnName = (name: string, char = '"') => + `${char}${name}${char}`; export class MissingFieldError extends Error { constructor(message: string) { @@ -152,6 +157,7 @@ type SQLField = { type: string; isPrimary: boolean; from: Field | undefined; + unwrappedType: FieldType | undefined; path: string[]; describesExistenceOfAnother?: string; }; @@ -172,6 +178,7 @@ export interface Table { parent: Table | undefined; referencedInArray: boolean; isSimpleValue: boolean; + indices: Set; } export const getSQLTable = ( @@ -220,6 +227,7 @@ export const getSQLTable = ( referencedInArray: false, isSimpleValue: false, inline, + indices: new Set(), }; ret.push(table); for (const dep of dependencies) { @@ -259,6 +267,14 @@ export const getTableName = ( path: string[] = [], clazz: string | Constructor, ) => { + let pathKey = path.length > 0 ? path.join("__") + "__" : ""; + if (typeof clazz !== "string") { + const tableName = (clazz as any)["__table_" + pathKey]; + if (tableName) { + return tableName; + } + } + let name: string = typeof clazz === "string" ? clazz : getNameOfClass(clazz); // prefix the generated table name so that the name is a valid SQL identifier (table name) @@ -266,9 +282,11 @@ export const getTableName = ( // leading _ to allow path to have numbers - const ret = - (path.length > 0 ? path.join("__") + "__" : "") + - name.replace(/[^a-zA-Z0-9_]/g, "_"); + const ret = pathKey + name.replace(/[^a-zA-Z0-9_]/g, "_"); + + if (typeof clazz !== "string") { + (clazz as any)["__table_" + pathKey] = ret; + } return ret; }; @@ -318,13 +336,14 @@ export const getSQLFields = ( ? addJoinFieldFromParent : (fields: SQLField[], contstraints: SQLConstraint[]) => { // we resolve primary field here since it might be unknown until this point - const primaryField = + const parentPrimaryField = primary != null ? sqlFields.find((field) => field.name === primary) : undefined; - const parentPrimaryFieldName = primaryField?.key || CHILD_TABLE_ID; - const parentPrimaryFieldType = primaryField - ? primaryField.type + const parentPrimaryFieldName = + parentPrimaryField?.key || CHILD_TABLE_ID; + const parentPrimaryFieldType = parentPrimaryField + ? parentPrimaryField.type : "INTEGER"; fields.unshift( @@ -335,6 +354,7 @@ export const getSQLFields = ( type: "INTEGER", isPrimary: true, from: undefined, + unwrappedType: undefined, path: [CHILD_TABLE_ID], }, @@ -344,8 +364,9 @@ export const getSQLFields = ( key: PARENT_TABLE_ID, definition: `${PARENT_TABLE_ID} ${parentPrimaryFieldType}`, type: parentPrimaryFieldType, + from: parentPrimaryField?.from, + unwrappedType: parentPrimaryField?.unwrappedType, isPrimary: false, - from: undefined, path: [PARENT_TABLE_ID], }, ); @@ -412,6 +433,7 @@ export const getSQLFields = ( type: "INTEGER", isPrimary: false, from: undefined, + unwrappedType: undefined, path: [ARRAY_INDEX_COLUMN], }, ...table.fields.slice(2), @@ -442,6 +464,7 @@ export const getSQLFields = ( type: fieldType, isPrimary, from: field, + unwrappedType: unwrapNestedType(field.type), path: [...path.slice(1), key], }); }; @@ -529,6 +552,7 @@ export const getSQLFields = ( type: "bool", isPrimary: false, from: undefined, + unwrappedType: undefined, path: [...path.slice(1), key], describesExistenceOfAnother: path[path.length - 1], }); @@ -630,7 +654,7 @@ const getTableFromValue = ( field: Field, value?: any, ): Table => { - let clazzName: string | undefined = undefined; + let clazzName: string | Constructor | undefined = undefined; if (!isNestedType(field.type)) { clazzName = WRAPPED_SIMPLE_VALUE_VARIANT; } else { @@ -649,7 +673,7 @@ const getTableFromValue = ( continue; } if (ctor) { - clazzName = getNameOfClass(ctor); + clazzName = ctor; break; } } @@ -781,7 +805,7 @@ export const insert = async ( for (const _field of subTable.fields) { bindableValues.push(null); } - bindableValues[bindableValues.length - 1] = false; // assign the value "false" to the exist field column + bindableValues[bindableValues.length - 1] = 0; // assign the value "false" to the exist field column continue; } @@ -790,7 +814,7 @@ export const insert = async ( if (table.inline) { bindableValues.push(...values); // insert the bindable values into the parent bindable array if (field.type instanceof OptionKind) { - bindableValues.push(true); // assign the value "true" to the exist field column + bindableValues.push(1); // assign the value "true" to the exist field column } return undefined; } else { @@ -906,7 +930,7 @@ export const generateSelectQuery = ( table: Table, selects: { from: string; as: string }[], ) => { - return `SELECT ${selects.map((x) => `${x.from} as ${x.as}`).join(", ")} FROM ${table.name}`; + return `select ${selects.map((x) => `${x.from} as ${x.as}`).join(", ")} FROM ${table.name}`; }; export const selectAllFieldsFromTables = ( @@ -918,24 +942,26 @@ export const selectAllFieldsFromTables = ( from: string; as: string; }[]; - joins: Map; + joins: Map; + groupBy: string | undefined; }[] = []; for (const table of tables) { - const { selects, join: joinFromSelect } = selectAllFieldsFromTable( - table, - shape, - ); - selectsPerTable.push({ selects, joins: joinFromSelect }); + const { + selects, + join: joinFromSelect, + groupBy, + } = selectAllFieldsFromTable(table, shape); + + selectsPerTable.push({ selects, joins: joinFromSelect, groupBy }); } // pad with empty selects to make sure all selects have the same length - /* const maxSelects = Math.max(...selectsPerTable.map(x => x.selects.length)); */ - let newSelects: { from: string; as: string; }[][] = []; + for (const [i, selects] of selectsPerTable.entries()) { const newSelect = []; for (const [j, selectsOther] of selectsPerTable.entries()) { @@ -948,11 +974,6 @@ export const selectAllFieldsFromTables = ( } } newSelects.push(newSelect); - - /* let pad = 0; - while (select.selects.length < maxSelects) { - select.selects.push({ from: "NULL", as: `'pad#${++pad}'` }); - } */ } // also return table name for (const [i, selects] of selectsPerTable.entries()) { @@ -969,8 +990,67 @@ export const selectAllFieldsFromTable = ( let stack: { table: Table; shape?: types.Shape }[] = [{ table, shape }]; let join: Map = new Map(); const fieldResolvers: { from: string; as: string }[] = []; + let groupByParentId = false; for (const tableAndShape of stack) { - if (!tableAndShape.table.inline) { + if (tableAndShape.table.referencedInArray) { + let selectBuilder = `${JSON_GROUP_ARRAY}(${JSON_OBJECT}(`; + + groupByParentId = true; // we need to group by the parent id as else we will not be returned with more than 1 result + + let first = false; + const as = createReconstructReferenceName(tableAndShape.table); + + for (const field of tableAndShape.table.fields) { + if ( + (field.isPrimary || + !tableAndShape.shape || + matchFieldInShape(tableAndShape.shape, [], field) || + // also always include the index field + field.name === ARRAY_INDEX_COLUMN) && + field.name !== PARENT_TABLE_ID + ) { + let resolveField = `${as}.${escapeColumnName(field.name)}`; + // if field is bigint we need to convert it to string, so that later in a JSON.parse scenario it is not converted to a number, but remains a string until we can convert it back to a bigint manually + if (field.unwrappedType === "u64") { + resolveField = `CAST(${resolveField} AS TEXT)`; + } + + // if field is blob we need to convert it to hex string + if (field.type === "BLOB") { + resolveField = `HEX(${resolveField})`; + } + + if (first) { + selectBuilder += `, `; + } + first = true; + selectBuilder += `${escapeColumnName(field.name, "'")}, ${resolveField}`; + } + } + selectBuilder += `)) `; // FILTER (WHERE ${tableAndShape.table.name}.${tableAndShape.table.primary} IS NOT NULL) + + fieldResolvers.push({ + from: selectBuilder, + as, + }); + + join.set(createReconstructReferenceName(tableAndShape.table), { + as, + table: tableAndShape.table, + type: "left" as const, + columns: [], + }); + } else if (!tableAndShape.table.inline) { + // we end up here when we have simple joins we want to make that are not arrays, and not inlined + if (tableAndShape.table.parent != null) { + join.set(createReconstructReferenceName(tableAndShape.table), { + as: tableAndShape.table.name, + table: tableAndShape.table, + type: "left" as const, + columns: [], + }); + } + for (const field of tableAndShape.table.fields) { if ( field.isPrimary || @@ -986,10 +1066,6 @@ export const selectAllFieldsFromTable = ( } for (const child of tableAndShape.table.children) { - if (child.referencedInArray) { - continue; - } - let childShape: types.Shape | undefined = undefined; if (tableAndShape.shape) { const parentPath = child.parentPath?.slice(1); @@ -1008,11 +1084,7 @@ export const selectAllFieldsFromTable = ( ? maybeShape[0] : maybeShape; } - stack.push({ table: child, shape: childShape }); - if (!child.inline) { - join.set(child.name, { as: child.name, table: child }); - } } } @@ -1021,6 +1093,10 @@ export const selectAllFieldsFromTable = ( } return { + groupBy: groupByParentId + ? `${table.name}.${escapeColumnName(table.primary as string)}` || + undefined + : undefined, selects: fieldResolvers, // `SELECT ${fieldResolvers.join(", ")} FROM ${table.name}`, join, }; @@ -1079,24 +1155,55 @@ export const resolveInstanceFromValue = async < : maybeShape; if (isArray) { - let once = false; + /* let once = false; */ let resolvedArr = []; for (const subtable of subTables) { - // TODO types - let rootTable = getNonInlinedTable(table); - const arr = await resolveChildren( - fromTablePrefixedValues[ - getTablePrefixedField( - rootTable, - rootTable.primary as string, - !tablePrefixed, - ) - ], - subtable, - ); - if (arr) { - once = true; + // check if the array already in the provided row + let arr: any[] | undefined = undefined; + const tableName = createReconstructReferenceName(subtable); + if (fromTablePrefixedValues[tableName]) { + arr = JSON.parse(fromTablePrefixedValues[tableName]) as Array; + arr = arr.filter((x) => x[subtable.primary as string] != null); + + // we need to go over all fields that are to be bigints and convert + // them back to bigints + // for blob fields we need to convert them back to Uint8Array + for (const field of subtable.fields) { + if (field.name === PARENT_TABLE_ID) { + continue; + } + if (field.unwrappedType === "u64") { + for (const item of arr!) { + item[field.name] = BigInt(item[field.name]); + } + } else if (field.type === "BLOB") { + for (const item of arr!) { + item[field.name] = fromHexString(item[field.name]); + } + } + } + } else { + if (subtable.children) { + // TODO we only end up where when we resolve nested arrays, + // which shoulld instead be resolved in a nested select (with json_group_array and json_object) + let rootTable = getNonInlinedTable(table); + const parentId = + fromTablePrefixedValues[ + getTablePrefixedField( + rootTable, + rootTable.primary as string, + !tablePrefixed, + ) + ]; + + arr = await resolveChildren(parentId, subtable); + } else { + arr = []; + } + } + if (arr && arr.length > 0) { + /* once = true; */ for (const element of arr) { const resolved: SimpleNested | any = await resolveInstanceFromValue( element, @@ -1114,11 +1221,7 @@ export const resolveInstanceFromValue = async < } } - if (!once) { - obj[field.key] = undefined; - } else { - obj[field.key] = resolvedArr; - } + obj[field.key] = resolvedArr; // we can not do option(vec('T')) since we dont store the option type for Arrays (TODO) } else { // resolve nested object from row directly /* let extracted: any = {} */ @@ -1251,7 +1354,7 @@ export const convertDeleteRequestToQuery = ( ): { sql: string; bindable: any[] } => { const { query, bindable } = convertRequestToQuery( "delete", - request, + { query: types.toQuery(request.query) }, tables, table, ); @@ -1268,7 +1371,7 @@ export const convertSumRequestToQuery = ( ): { sql: string; bindable: any[] } => { const { query, bindable } = convertRequestToQuery( "sum", - request, + { query: types.toQuery(request.query), key: request.key }, tables, table, ); @@ -1293,7 +1396,7 @@ export const convertCountRequestToQuery = ( ): { sql: string; bindable: any[] } => { const { query, bindable } = convertRequestToQuery( "count", - request, + { query: request?.query ? types.toQuery(request.query) : undefined }, tables, table, ); @@ -1303,13 +1406,76 @@ export const convertCountRequestToQuery = ( }; }; +const buildOrderBy = ( + sort: types.Sort[] | types.Sort | undefined, + tables: Map, + table: Table, + joinBuilder: Map, + resolverBuilder: { from: string; as: string }[], + path: string[] = [], + options?: { + fetchAll?: boolean; + planner?: PlanningSession; + }, +) => { + let orderByBuilder: string | undefined = undefined; + + if ( + (!sort || (Array.isArray(sort) && sort.length === 0)) && + !options?.fetchAll + ) { + sort = + table.primary && path.length === 0 + ? [{ key: [table.primary], direction: types.SortDirection.ASC }] + : undefined; + } + + if (sort) { + let sortArr = Array.isArray(sort) ? sort : [sort]; + if (sortArr.length > 0) { + orderByBuilder = ""; + let once = false; + for (const sort of sortArr) { + const { foreignTables, queryKey } = resolveTableToQuery( + table, + tables, + joinBuilder, + [...path, ...sort.key], + undefined, + true, + ); + + for (const foreignTable of foreignTables) { + if (once) { + orderByBuilder += ", "; + } + once = true; + + foreignTable.columns.push(queryKey); // add the sort key to the list of columns that will be used for this query + orderByBuilder += `"${foreignTable.as}#${queryKey}" ${sort.direction === types.SortDirection.ASC ? "ASC" : "DESC"}`; + + resolverBuilder.push({ + from: `${table.name}.${escapeColumnName(queryKey)}`, + as: `'${foreignTable.as}#${queryKey}'`, + }); + } + } + } + } + + return { orderByBuilder }; +}; + export const convertSearchRequestToQuery = ( - request: types.IterateOptions | undefined, + request: + | { query: types.Query[]; sort?: types.Sort[] | types.Sort } + | undefined, tables: Map, rootTables: Table[], options?: { shape?: types.Shape | undefined; - stable?: boolean; + fetchAll?: boolean; + planner?: PlanningSession; }, ): { sql: string; bindable: any[] } => { let unionBuilder = ""; @@ -1320,30 +1486,32 @@ export const convertSearchRequestToQuery = ( const selectsPerTable = selectAllFieldsFromTables(rootTables, options?.shape); let bindableBuilder: any[] = []; + for (const [i, table] of rootTables.entries()) { - const { selects, joins: joinFromSelect } = selectsPerTable[i]; - const selectQuery = generateSelectQuery(table, selects); + const { selects, joins, groupBy } = selectsPerTable[i]; + try { - const { orderBy, query, bindable } = convertRequestToQuery( - "iterate", - request, + const { orderByBuilder } = buildOrderBy( + request?.sort, tables, table, - joinFromSelect, + joins, + selects, [], - { - stable: options?.stable, - }, + options, ); - unionBuilder += `${unionBuilder.length > 0 ? " UNION ALL " : ""} ${selectQuery} ${query}`; - orderByClause = - orderBy?.length > 0 - ? orderByClause.length > 0 - ? orderByClause + ", " + orderBy - : orderBy - : orderByClause; - matchedOnce = true; - bindableBuilder.push(...bindable); + + if (!orderByClause && orderByBuilder) { + // assume all order by clauses will be the same + orderByClause = + orderByBuilder.length > 0 + ? orderByClause.length > 0 + ? orderByClause + ", " + orderByBuilder + : orderByBuilder + : orderByClause; + } + + //orderByAddedOnce = true; } catch (error) { if (error instanceof MissingFieldError) { lastError = error; @@ -1351,6 +1519,33 @@ export const convertSearchRequestToQuery = ( } throw error; } + + const selectQuery = generateSelectQuery(table, selects); + + for (const flattenRequest of flattenQuery(request)) { + try { + const { query, bindable } = convertRequestToQuery( + "iterate", + flattenRequest, + tables, + table, + new Map(joins), // copy the map, else we might might do unececessary joins + [], + options, + ); + + unionBuilder += `${unionBuilder.length > 0 ? " UNION " : ""} ${selectQuery} ${query} ${groupBy ? "GROUP BY " + groupBy : ""}`; + matchedOnce = true; + bindableBuilder.push(...bindable); + } catch (error) { + if (error instanceof MissingFieldError) { + lastError = error; + orderByClause = ""; + continue; + } + throw error; + } + } } if (!matchedOnce) { @@ -1358,20 +1553,43 @@ export const convertSearchRequestToQuery = ( } return { - sql: `${unionBuilder} ${orderByClause ? "ORDER BY " + orderByClause : ""} limit ? offset ?`, + sql: `${unionBuilder} ${orderByClause ? "ORDER BY " + orderByClause : ""} ${options?.fetchAll ? "" : "limit ? offset ?"}`, bindable: bindableBuilder, }; }; -type SearchQueryParts = { query: string; orderBy: string; bindable: any[] }; -type CountQueryParts = { query: string; join: string; bindable: any[] }; +type SearchQueryParts = { + query: string; + /* orderBy: string; */ + bindable: any[]; + selects: string[]; +}; +type CountQueryParts = { + query: string; + join: string; + bindable: any[]; + selects: string[]; +}; -function isIterateRequest( - request: any, - type: string, -): request is types.IterateOptions | undefined { - return type === "iterate"; -} +const getOrSetRootTable = ( + joinBuilder: Map, + table: Table, +) => { + const refName = createQueryTableReferenceName(table); + let ref = joinBuilder.get(refName); + if (ref) { + return ref; + } + const join = { + // add the root as a join even though it is not, just so we can collect the columns it will be queried + table: table, + type: "root" as const, + as: table.name, + columns: [], + }; + joinBuilder.set(refName, join); + return join; +}; const convertRequestToQuery = < T extends "iterate" | "count" | "sum" | "delete", @@ -1380,26 +1598,40 @@ const convertRequestToQuery = < type: T, request: | (T extends "iterate" - ? types.IterateOptions + ? { + query?: types.Query[]; + sort?: types.Sort[] | types.Sort; + } : T extends "count" - ? types.CountOptions + ? { + query?: types.Query[]; + } : T extends "delete" - ? types.DeleteOptions - : types.SumOptions) + ? { + query?: types.Query[]; + } + : { + query?: types.Query[]; + key: string | string[]; + }) | undefined, tables: Map, table: Table, - extraJoin?: Map, + extraJoin?: Map, path: string[] = [], options?: { - stable?: boolean; + fetchAll?: boolean; + planner?: PlanningSession; }, ): R => { let whereBuilder = ""; let bindableBuilder: any[] = []; - let orderByBuilder: string | undefined = undefined; + /* let orderByBuilder: string | undefined = undefined; */ /* let tablesToSelect: string[] = [table.name]; */ - let joinBuilder: Map = extraJoin || new Map(); + let joinBuilder: Map = extraJoin || new Map(); + + getOrSetRootTable(joinBuilder, table); + const coercedQuery = types.toQuery(request?.query); if (coercedQuery.length === 1) { const { where, bindable } = convertQueryToSQLQuery( @@ -1408,6 +1640,8 @@ const convertRequestToQuery = < table, joinBuilder, path, + undefined, + 0, ); whereBuilder += where; bindableBuilder.push(...bindable); @@ -1418,14 +1652,19 @@ const convertRequestToQuery = < table, joinBuilder, path, + undefined, + 0, ); whereBuilder += where; bindableBuilder.push(...bindable); } - if (isIterateRequest(request, type)) { + /* if (isIterateRequest(request, type)) { let sort = request?.sort; - if (!sort && options?.stable) { + if ( + (!sort || (Array.isArray(sort) && sort.length === 0)) && + !options?.fetchAll + ) { sort = table.primary && path.length === 0 ? [{ key: [table.primary], direction: types.SortDirection.ASC }] @@ -1446,61 +1685,97 @@ const convertRequestToQuery = < undefined, true, ); - for (const table of foreignTables) { + + for (const foreignTable of foreignTables) { if (once) { orderByBuilder += ", "; } once = true; - orderByBuilder += `${table.as}.${queryKey} ${sort.direction === types.SortDirection.ASC ? "ASC" : "DESC"}`; + + foreignTable.columns.push(queryKey); // add the sort key to the list of columns that will be used for this query + + orderByBuilder += `${foreignTable.as}.${queryKey} ${sort.direction === types.SortDirection.ASC ? "ASC" : "DESC"}`; } } - - /* orderByBuilder += request.sort - .map( - (sort) => - `${table.name}.${sort.key} ${sort.direction === types.SortDirection.ASC ? "ASC" : "DESC"}` - ) - .join(", "); */ } } - } + } */ const where = whereBuilder.length > 0 ? "where " + whereBuilder : undefined; if (extraJoin && extraJoin.size > 0) { insertMapIntoMap(joinBuilder, extraJoin); } - let join = buildJoin(joinBuilder, type === "iterate" ? true : false); + let { join } = buildJoin(joinBuilder, options); const query = `${join ? join : ""} ${where ? where : ""}`; return { query, - orderBy: orderByBuilder, + /* orderBy: orderByBuilder, */ bindable: bindableBuilder, } as R; }; export const buildJoin = ( - joinBuilder: Map, - resolveAllColumns: boolean, -) => { - let joinTypeDefault = resolveAllColumns - ? /* "FULL OUTER JOIN" */ "LEFT OUTER JOIN" - : "JOIN"; + joinBuilder: Map, + options?: { + planner?: PlanningSession; + }, +): { join: string } => { + /* let joinTypeDefault = resolveAllColumns + ? "CROSS JOIN" + : "JOIN"; */ let join = ""; + + for (const [_key, table] of joinBuilder) { + if (table.type !== "root") { + continue; + } + const out = _buildJoin(table, options); + join += out.join; + } for (const [_key, table] of joinBuilder) { + if (table.type === "root") { + continue; + } + const out = _buildJoin(table, options); + join += out.join; + } + return { join }; +}; + +const _buildJoin = ( + table: JoinOrRootTable, + options?: { + planner?: PlanningSession; + }, +) => { + let join = ""; + let indexedBy: string | undefined = undefined; + if (table.type !== "root") { + table!.columns.push(PARENT_TABLE_ID); // we unshift because we join on the parent id before where clause + } + + if (table!.columns.length > 0) { + const usedColumns = removeDuplicatesOrdered(table!.columns); + indexedBy = options?.planner + ? ` INDEXED BY ${options.planner.resolveIndex(table.table.name, usedColumns)} ` + : ""; + } + + if (table.type !== "root") { let nonInlinedParent = table.table.parent && getNonInlinedTable(table.table.parent); if (!nonInlinedParent) { throw new Error("Unexpected: missing parent"); } - - let joinType = table.table.referencedInArray - ? /* "FULL OUTER JOIN" */ "LEFT OUTER JOIN" - : joinTypeDefault; - join += `${joinType} ${table.table.name} AS ${table.as} ON ${nonInlinedParent.name}.${nonInlinedParent.primary} = ${table.as}.${PARENT_TABLE_ID} `; + let joinType = table.type === "cross" ? "LEFT JOIN" : "LEFT JOIN"; + join += ` ${joinType} ${table.table.name} AS ${table.as} ${indexedBy} ON ${nonInlinedParent.name}.${nonInlinedParent.primary} = ${table.as}.${PARENT_TABLE_ID} `; + } else if (indexedBy) { + join += indexedBy; } - return join; + + return { join }; }; const insertMapIntoMap = (map: Map, insert: Map) => { @@ -1513,9 +1788,10 @@ export const convertQueryToSQLQuery = ( query: types.Query, tables: Map, table: Table, - joinBuilder: Map, - path: string[] = [], - tableAlias: string | undefined = undefined, + joinBuilder: Map, + path: string[], + tableAlias: string | undefined, + skipKeys: number, ): { where: string; bindable: any[] } => { let whereBuilder = ""; let bindableBuilder: any[] = []; @@ -1524,7 +1800,8 @@ export const convertQueryToSQLQuery = ( const handleAnd = ( queries: types.Query[], path: string[], - tableAlias?: string, + tableAlias: string | undefined, + keysOffset: number, ) => { for (const query of queries) { const { where, bindable } = convertQueryToSQLQuery( @@ -1534,6 +1811,7 @@ export const convertQueryToSQLQuery = ( joinBuilder, path, tableAlias, + keysOffset, ); whereBuilder = whereBuilder.length > 0 ? `(${whereBuilder}) AND (${where})` : where; @@ -1549,16 +1827,18 @@ export const convertQueryToSQLQuery = ( joinBuilder, path, tableAlias, + skipKeys, ); whereBuilder += where; bindableBuilder.push(...bindable); } else if (query instanceof types.Nested) { let joinPrefix = "__" + String(tables.size); - path = [...path, query.path]; - handleAnd(query.query, path, joinPrefix); + path = [...path, ...query.path]; + let newSkipKeys = skipKeys + query.path.length; + handleAnd(query.query, path, joinPrefix, newSkipKeys); } else if (query instanceof types.LogicalQuery) { if (query instanceof types.And) { - handleAnd(query.and, path, tableAlias); + handleAnd(query.and, path, tableAlias, skipKeys); } else if (query instanceof types.Or) { for (const subquery of query.or) { const { where, bindable } = convertQueryToSQLQuery( @@ -1568,9 +1848,10 @@ export const convertQueryToSQLQuery = ( joinBuilder, path, tableAlias, + skipKeys, ); whereBuilder = - whereBuilder.length > 0 ? `(${whereBuilder}) OR (${where})` : where; + whereBuilder.length > 0 ? `(${whereBuilder}) OR(${where})` : where; bindableBuilder.push(...bindable); } } else if (query instanceof types.Not) { @@ -1581,8 +1862,9 @@ export const convertQueryToSQLQuery = ( joinBuilder, path, tableAlias, + skipKeys, ); - whereBuilder = `NOT (${where})`; + whereBuilder = `NOT(${where})`; bindableBuilder.push(...bindable); } else { throw new Error("Unsupported query type: " + query.constructor.name); @@ -1601,38 +1883,54 @@ const cloneQuery = (query: types.StateFieldQuery) => { return deserialize(serialize(query), types.StateFieldQuery); }; +type JoinOrRootTable = JoinTable | RootTable; + type JoinTable = { table: Table; as: string; + type: "left" | "cross"; + columns: string[]; }; -const createTableReferenceName = ( +type RootTable = { + type: "root"; + table: Table; + as: string; + columns: string[]; +}; + +/* const createQueryTableReferenceName = ( table: Table, alias: string | undefined, - fieldType: FieldType, - joinSize: number, ) => { + if ( - !alias && - (fieldType instanceof VecKind || - (fieldType instanceof OptionKind && - fieldType.elementType instanceof VecKind)) + !alias ) { - let aliasSuffix = "_" + String(joinSize); + let aliasSuffix = + "_query"; // "_" + String(joinSize); TODO this property will make every join unique, which is not wanted unless (ever?) since we can do OR in SQL which means we can do one join and perform AND/OR logic without joining multiple times to apply multiple conditions alias = aliasSuffix; } const tableNameAs = alias ? alias + "_" + table.name : table.name; return tableNameAs; +}; */ + +const createQueryTableReferenceName = (table: Table) => { + return table.parent == null ? table.name : "_query_" + table.name; +}; + +const createReconstructReferenceName = (table: Table) => { + return table.name; /* table.parent == null ? table.name : "_rec_" + table.name; */ }; const resolveTableToQuery = ( table: Table, tables: Map, - join: Map, + join: Map, path: string[], alias: string | undefined, searchSelf: boolean, -) => { +): { queryKey: string; foreignTables: JoinOrRootTable[] } => { // we are matching in two ways. // 1. joins @@ -1649,12 +1947,19 @@ const resolveTableToQuery = ( if (field) { return { queryKey: field.name, - foreignTables: [{ table, as: table.name }], + foreignTables: [getOrSetRootTable(join, table)], }; } } - let currentTables: JoinTable[] = [{ table, as: alias || table.name }]; + let currentTables: JoinTable[] = [ + { + table, + as: alias || table.name, + type: "cross" as const, + columns: [], + }, + ]; let prevTables: JoinTable[] | undefined = undefined; // outer: @@ -1667,20 +1972,29 @@ const resolveTableToQuery = ( if (!field && currentTable.children.length > 0) { // second arg is needed because of polymorphic fields we might end up here intentially to check what tables to query throw new MissingFieldError( - `Property with key "${key}" is not found in the schema ${JSON.stringify(schema.fields.map((x) => x.key))}`, + `Property with key "${key}" is not found in the schema ${JSON.stringify(schema.fields.map((x) => x.key))} `, ); } for (const child of currentTable.children) { - const tableNameAs = createTableReferenceName( + const tableNameAs = createQueryTableReferenceName( child, - alias, + /* alias */ /* , field.type, - join.size, + join.size, */ ); + let isMatching = child.parentPath![child.parentPath!.length - 1] === key; if (isMatching) { - const tableWithAlias = { table: child, as: tableNameAs }; + const tableWithAlias = { + columns: [], + table: child, + as: tableNameAs, + type: + currentTable.children.length > 1 + ? ("left" as const) + : ("cross" as const), + }; if (child.isSimpleValue) { if (!child.inline) { join.set(tableNameAs, tableWithAlias); @@ -1744,9 +2058,10 @@ const convertStateFieldQuery = ( query: types.StateFieldQuery, tables: Map, table: Table, - join: Map, + join: Map, path: string[], - tableAlias: string | undefined = undefined, + tableAlias: string | undefined, + skipKeys: number, ): { where: string; bindable: any[] } => { // if field id represented as foreign table, do join and compare const inlinedName = getInlineTableFieldName(query.key); @@ -1755,11 +2070,15 @@ const convertStateFieldQuery = ( ); /* stringArraysEquals(query.key, [...table.parentPath, x.name]) )*/ const isForeign = !tableField; // table.fields.find(x => x.name === query.key[query.key.length - 1]) if (isForeign) { + const tablePath: string[] = [...path]; + for (let i = skipKeys; i < query.key.length; i++) { + tablePath.push(query.key[i]); + } const { queryKey, foreignTables } = resolveTableToQuery( table, tables, join, - [...path, ...query.key], + tablePath, tableAlias, false, ); @@ -1772,6 +2091,7 @@ const convertStateFieldQuery = ( if (ftable.table === table) { throw new Error("Unexpected"); } + const { where, bindable } = convertQueryToSQLQuery( query, tables, @@ -1779,6 +2099,7 @@ const convertStateFieldQuery = ( join, path, ftable.as, + skipKeys, ); whereBuilder.push(where); bindableBuilder.push(bindable); @@ -1789,6 +2110,12 @@ const convertStateFieldQuery = ( }; } + const columnAggregator = join.get(createQueryTableReferenceName(table))!; + if (!columnAggregator) { + throw new Error("Unexpected"); + } + columnAggregator.columns.push(inlinedName); + let bindable: any[] = []; const keyWithTable = (tableAlias || table.name) + "." + escapeColumnName(inlinedName); @@ -1797,10 +2124,10 @@ const convertStateFieldQuery = ( let statement = ""; if (query.method === types.StringMatchMethod.contains) { - statement = `${keyWithTable} LIKE ?`; + statement = `${keyWithTable} LIKE ? `; bindable.push(`%${query.value}%`); } else if (query.method === types.StringMatchMethod.prefix) { - statement = `${keyWithTable} LIKE ?`; + statement = `${keyWithTable} LIKE ? `; bindable.push(`${query.value}%`); } else if (query.method === types.StringMatchMethod.exact) { statement = `${keyWithTable} = ?`; @@ -1819,7 +2146,7 @@ const convertStateFieldQuery = ( } else if (query instanceof types.IntegerCompare) { if (tableField!.type === "BLOB") { // TODO perf - where = `hex(${keyWithTable}) LIKE ?`; + where = `hex(${keyWithTable}) LIKE ? `; bindable.push( `%${toHexString(new Uint8Array([Number(query.value.value)]))}%`, ); @@ -1827,15 +2154,15 @@ const convertStateFieldQuery = ( if (query.compare === types.Compare.Equal) { where = `${keyWithTable} = ?`; } else if (query.compare === types.Compare.Greater) { - where = `${keyWithTable} > ?`; + where = `${keyWithTable} > ? `; } else if (query.compare === types.Compare.Less) { - where = `${keyWithTable} < ?`; + where = `${keyWithTable} = ?`; + where = `${keyWithTable} >= ? `; } else if (query.compare === types.Compare.LessOrEqual) { - where = `${keyWithTable} <= ?`; + where = `${keyWithTable} <= ? `; } else { - throw new Error(`Unsupported compare type: ${query.compare}`); + throw new Error(`Unsupported compare type: ${query.compare} `); } if (unwrapNestedType(tableField.from!.type) === "u64") { @@ -1855,3 +2182,14 @@ const convertStateFieldQuery = ( } return { where, bindable }; }; + +const removeDuplicatesOrdered = (arr: string[]) => { + let seen = new Set(); + return arr.filter((item) => { + if (seen.has(item)) { + return false; + } + seen.add(item); + return true; + }); +}; diff --git a/packages/utils/indexer/sqlite3/src/sqlite3-messages.worker.ts b/packages/utils/indexer/sqlite3/src/sqlite3-messages.worker.ts index a382053ce..f2afa9902 100644 --- a/packages/utils/indexer/sqlite3/src/sqlite3-messages.worker.ts +++ b/packages/utils/indexer/sqlite3/src/sqlite3-messages.worker.ts @@ -24,6 +24,10 @@ interface Close extends Message { type: "close"; } +interface Drop extends Message { + type: "drop"; +} + interface Open extends Message { type: "open"; } @@ -107,6 +111,7 @@ export type DatabaseMessages = | Exec | Prepare | Close + | Drop | Open | Run | Status; diff --git a/packages/utils/indexer/sqlite3/src/sqlite3.browser.ts b/packages/utils/indexer/sqlite3/src/sqlite3.browser.ts index f97f11800..5b61bb1e4 100644 --- a/packages/utils/indexer/sqlite3/src/sqlite3.browser.ts +++ b/packages/utils/indexer/sqlite3/src/sqlite3.browser.ts @@ -166,6 +166,14 @@ class ProxyDatabase implements IDatabase { }); } + async drop() { + return this.send({ + type: "drop", + id: uuid(), + databaseId: this.databaseId, + }); + } + async status() { return this.send<"open" | "closed">({ type: "status", diff --git a/packages/utils/indexer/sqlite3/src/sqlite3.ts b/packages/utils/indexer/sqlite3/src/sqlite3.ts index 71aefe58a..f72b4a54f 100644 --- a/packages/utils/indexer/sqlite3/src/sqlite3.ts +++ b/packages/utils/indexer/sqlite3/src/sqlite3.ts @@ -8,6 +8,7 @@ import type { let create = async (directory?: string) => { let db: DB.Database | undefined = undefined; let statements: Map = new Map(); + let dbFileName: string; let close = () => { for (const stmt of statements.values()) { @@ -20,26 +21,35 @@ let create = async (directory?: string) => { db = undefined; } }; + let drop = () => { + if (db && !db?.memory) { + fs.rmSync(dbFileName); + db = undefined; + } + return close(); + }; let open = () => { - if (db) { + if (db?.open) { return db; } - let dbFileName: string; - if (directory) { - // if directory is provided, check if directory exist, if not create it - if (!fs.existsSync(directory)) { - fs.mkdirSync(directory, { recursive: true }); + if (!db) { + if (directory) { + // if directory is provided, check if directory exist, if not create it + if (!fs.existsSync(directory)) { + fs.mkdirSync(directory, { recursive: true }); + } + dbFileName = `${directory}/db.sqlite`; + } else { + dbFileName = ":memory:"; } - dbFileName = `${directory}/db.sqlite`; - } else { - dbFileName = ":memory:"; + + db = new DB(dbFileName, { + fileMustExist: false, + readonly: false /* , verbose: (message) => console.log(message) */, + }); } - db = new DB(dbFileName, { - fileMustExist: false, - readonly: false /* , verbose: (message) => console.log(message) */, - }); // TODO this test makes things faster, but for benchmarking it might yield wierd results where some runs are faster than others db.pragma("journal_mode = WAL"); db.pragma("foreign_keys = on"); @@ -69,6 +79,7 @@ let create = async (directory?: string) => { }, statements, close, + drop, open, status: () => (db ? "open" : "closed"), } as IDatabase; // TODO fix this diff --git a/packages/utils/indexer/sqlite3/src/sqlite3.wasm.ts b/packages/utils/indexer/sqlite3/src/sqlite3.wasm.ts index e5feb3391..cff80dd07 100644 --- a/packages/utils/indexer/sqlite3/src/sqlite3.wasm.ts +++ b/packages/utils/indexer/sqlite3/src/sqlite3.wasm.ts @@ -110,6 +110,15 @@ const create = async (directory?: string) => { await sqliteDb?.close(); sqliteDb = undefined; }; + let dbFileName: string; + + let drop = async () => { + if (poolUtil && dbFileName != null) { + poolUtil.unlink(dbFileName); + } + + return close(); + }; let open = async () => { if (sqliteDb) { return sqliteDb; @@ -119,7 +128,7 @@ const create = async (directory?: string) => { // TODO show warning if directory is not absolute? directory = directory.replace(/^\./, ""); - let dbFileName = `${directory}/db.sqlite`; + dbFileName = `${directory}/db.sqlite`; poolUtil = poolUtil || @@ -143,6 +152,7 @@ const create = async (directory?: string) => { return sqliteDb!.exec(sql); }, open, + drop, prepare: async (sql: string, id?: string) => { if (id == null) { id = uuid(); diff --git a/packages/utils/indexer/sqlite3/src/sqlite3.worker.ts b/packages/utils/indexer/sqlite3/src/sqlite3.worker.ts index 73e76c016..4e87e62a3 100644 --- a/packages/utils/indexer/sqlite3/src/sqlite3.worker.ts +++ b/packages/utils/indexer/sqlite3/src/sqlite3.worker.ts @@ -21,7 +21,9 @@ class SqliteWorkerHandler { if (message.type === "close") { return; // ignore close message if database is not found } - + if (message.type === "drop") { + return; // ignore close message if database is not found + } if (message.type === "status") { return "closed"; } @@ -45,6 +47,9 @@ class SqliteWorkerHandler { } else if (message.type === "close") { await db.close(); this.databases.delete(message.databaseId); + } else if (message.type === "drop") { + await db.drop(); + this.databases.delete(message.databaseId); } else if (message.type === "open") { await db.open(); this.databases.set(message.databaseId, db); diff --git a/packages/utils/indexer/sqlite3/src/types.ts b/packages/utils/indexer/sqlite3/src/types.ts index 6ae98b0cb..1c18e9c0a 100644 --- a/packages/utils/indexer/sqlite3/src/types.ts +++ b/packages/utils/indexer/sqlite3/src/types.ts @@ -8,6 +8,7 @@ export type Database = { exec: (sql: string) => Promise | any; prepare: (sql: string, id?: string) => Promise | Statement; close: (err?: (err: any) => any) => Promise | any; + drop: () => Promise | any; open(): Promise | any; statements: { get: (id: string) => Statement | undefined; diff --git a/packages/utils/indexer/sqlite3/test/array.spec.ts b/packages/utils/indexer/sqlite3/test/array.spec.ts new file mode 100644 index 000000000..4cceb417c --- /dev/null +++ b/packages/utils/indexer/sqlite3/test/array.spec.ts @@ -0,0 +1,322 @@ +import { field, option, variant, vec } from "@dao-xyz/borsh"; +import { randomBytes } from "@peerbit/crypto"; +import { + /* +Compare, +IntegerCompare, +Or, */ + StringMatch, + id, +} from "@peerbit/indexer-interface"; +import { expect, use } from "chai"; +import chaiAsPromised from "chai-as-promised"; +import { SQLLiteIndex } from "../src/engine.js"; +import { create } from "../src/index.js"; +import { setup } from "./utils.js"; + +use(chaiAsPromised); + +describe("simple array", () => { + // u64 is a special case since we need to shift values to fit into signed 64 bit integers + + let index: Awaited>>; + + afterEach(async () => { + await index.store.stop(); + }); + + abstract class ArrayDocumentBase {} + + /* @variant(0) + class ArrayDocument extends ArrayDocumentBase { + @id({ type: "u64" }) + id: bigint; + + @field({ type: vec("u64") }) + value: bigint[]; + + constructor(id: bigint, value: bigint[]) { + super(); + this.id = id; + this.value = value; + } + } + */ + @variant(1) + // @ts-ignore + class AnotherArrayDocument extends ArrayDocumentBase { + @id({ type: "u64" }) + id: bigint; + + @field({ type: vec("u64") }) + anotherValue: bigint[]; + + constructor(id: bigint, value: bigint[]) { + super(); + this.id = id; + this.anotherValue = value; + } + } + + @variant(0) + class ArrayDocumentSingle { + @id({ type: "u64" }) + id: bigint; + + @field({ type: vec("u64") }) + value: bigint[]; + + constructor(id: bigint, value: bigint[]) { + this.id = id; + this.value = value; + } + } + + @variant(0) + class BlobArrayDocument { + @id({ type: "u64" }) + id: bigint; + + @field({ type: vec(Uint8Array) }) + value: Uint8Array[]; + + constructor(id: bigint, value: Uint8Array[]) { + this.id = id; + this.value = value; + } + } + + /* it("query inner items does not take too long time", async () => { + index = await setup({ schema: ArrayDocumentBase }, create); + const store = index.store as SQLLiteIndex; + expect(store.tables.size).to.equal(4); + let count = 1000; + let itemsToQuery: bigint[] = []; + for (let i = 0; i < count; i++) { + let offset = BigInt(i) * 3n; + if (itemsToQuery.length < 30) { + itemsToQuery.push(offset); + } + await index.store.put( + new ArrayDocument(BigInt(i), [offset + 0n, offset + 1n, offset + 2n]), + ); + } + + const t1 = +new Date(); + const out = await index.store.iterate({}).all(); + const t2 = +new Date(); + expect(out.length).to.equal(count); + + const t3 = +new Date(); + let compares: IntegerCompare[] = itemsToQuery.map( + (x) => + new IntegerCompare({ key: "value", value: x, compare: Compare.Equal }), + ); + const out2 = await index.store.iterate({ query: new Or(compares) }).all(); + const t4 = +new Date(); + + expect(t4 - t3).to.lessThan(t2 - t1); + expect(out2.length).to.equal(itemsToQuery.length); + }); + + it("poly-morphic base resolving many items is sufficiently fast", async () => { + index = await setup({ schema: ArrayDocumentBase }, create); + const store = index.store as SQLLiteIndex; + expect(store.tables.size).to.equal(4); + let count = 1e4; + for (let i = 0; i < count; i++) { + await index.store.put( + new ArrayDocument(BigInt(i), [ + BigInt(Math.round(Math.random() * Number.MAX_SAFE_INTEGER)), + BigInt(Math.round(Math.random() * Number.MAX_SAFE_INTEGER)), + BigInt(Math.round(Math.random() * Number.MAX_SAFE_INTEGER)), + ]), + ); + } + const t1 = +new Date(); + const out = await index.store.iterate({}).all(); + const t2 = +new Date(); + + console.log(`Time to resolve ${count} items: ${t2 - t1} ms`); + expect(out.length).to.equal(count); + expect(t2 - t1).to.lessThan(1000); + }); */ + + it("simple-base resolving many items is sufficiently fast", async () => { + index = await setup({ schema: ArrayDocumentSingle }, create); + let count = 1e4; + for (let i = 0; i < count; i++) { + await index.store.put( + new ArrayDocumentSingle(BigInt(i), [ + BigInt(Math.round(Math.random() * Number.MAX_SAFE_INTEGER)), + BigInt(Math.round(Math.random() * Number.MAX_SAFE_INTEGER)), + BigInt(Math.round(Math.random() * Number.MAX_SAFE_INTEGER)), + ]), + ); + } + const t1 = +new Date(); + const out = await index.store.iterate({}).all(); + const t2 = +new Date(); + + console.log(`Time to resolve ${count} items: ${t2 - t1} ms`); + expect(out.length).to.equal(count); + expect(t2 - t1).to.lessThan(1000); + }); + + it("blob array items is sufficiently fast", async () => { + index = await setup({ schema: BlobArrayDocument }, create); + let count = 1e4; + for (let i = 0; i < count; i++) { + await index.store.put( + new BlobArrayDocument(BigInt(i), [ + randomBytes(32), + randomBytes(32), + randomBytes(32), + ]), + ); + } + const t1 = +new Date(); + const out = await index.store.iterate({}).all(); + const t2 = +new Date(); + + console.log(`Time to resolve ${count} items: ${t2 - t1} ms`); + expect(out.length).to.equal(count); + expect(t2 - t1).to.lessThan(1000); + }); +}); + +describe("document array", () => { + // u64 is a special case since we need to shift values to fit into signed 64 bit integers + + let index: Awaited>>; + + afterEach(async () => { + await index.store.stop(); + }); + + abstract class Base {} + + @variant("av0") + class AV0 extends Base { + @field({ type: option("u64") }) + number?: bigint; + + constructor(opts: AV0) { + super(); + this.number = opts.number; + } + } + + @variant("av1") + class AV1 extends Base { + @field({ type: option("string") }) + string?: string; + + constructor(opts: AV1) { + super(); + this.string = opts.string; + } + } + + @variant("PolymorpArrayDocument") + class PolymorpArrayDocument { + @id({ type: "string" }) + id: string; + + @field({ type: vec(Base) }) + array: Base[]; + + constructor(opts: PolymorpArrayDocument) { + this.id = opts.id; + this.array = opts.array; + } + } + + beforeEach(async () => { + index = await setup({ schema: PolymorpArrayDocument }, create); + }); + + it("can query multiple versions at once", async () => { + const store = index.store as SQLLiteIndex; + await store.put( + new PolymorpArrayDocument({ + id: "1", + array: [ + new AV0({ + number: 0n, + }), + new AV1({ + string: "hello", + }), + ], + }), + ); + + const doc2 = new PolymorpArrayDocument({ + id: "2", + array: [ + new AV1({ + string: "world", + }), + new AV0({ + number: 123n, + }), + ], + }); + + await store.put(doc2); + + const response = await store + .iterate({ + query: [ + new StringMatch({ + key: ["array", "string"], + value: "world", + }), + ], + }) + .all(); + + expect(response).to.have.length(1); + expect(response[0].value.id).to.equal("2"); + expect(response[0].value.array).to.have.length(2); + expect(response[0].value.array[0]).to.be.instanceOf(AV1); + expect(response[0].value.array[1]).to.be.instanceOf(AV0); + expect((response[0].value.array[0] as AV1).string).to.equal("world"); + expect((response[0].value.array[1] as AV0).number).to.equal(123n); + }); + + it("all", async () => { + const store = index.store as SQLLiteIndex; + await store.put( + new PolymorpArrayDocument({ + id: "1", + array: [ + new AV0({ + number: 0n, + }), + new AV1({ + string: "hello", + }), + ], + }), + ); + + const doc2 = new PolymorpArrayDocument({ + id: "2", + array: [ + new AV1({ + string: "world", + }), + new AV0({ + number: 123n, + }), + ], + }); + + await store.put(doc2); + + const response = await store.iterate({}).all(); + expect(response).to.have.length(2); + }); +}); diff --git a/packages/utils/indexer/sqlite3/test/basic.spec.ts b/packages/utils/indexer/sqlite3/test/basic.spec.ts new file mode 100644 index 000000000..5ed788ed1 --- /dev/null +++ b/packages/utils/indexer/sqlite3/test/basic.spec.ts @@ -0,0 +1,264 @@ +// @ts-nocheck +import { + deserialize, + field, + fixedArray, + option, + serialize, + variant, + vec, +} from "@dao-xyz/borsh"; +import { randomBytes, sha256Base64Sync } from "@peerbit/crypto"; +import { + And, + BoolQuery, + ByteMatchQuery, + Compare, + type Index, + type IndexEngineInitProperties, + type IndexIterator, + type Indices, + IntegerCompare, + IsNull, + type IterateOptions, + Nested, + Not, + Or, + Query, + type Shape, + Sort, + SortDirection, + StringMatch, + StringMatchMethod, + extractFieldValue, + getIdProperty, + id, + toId, +} from "@peerbit/indexer-interface"; +import { + /* delay, */ + delay, + waitForResolved, +} from "@peerbit/time"; +import { expect } from "chai"; +import sodium from "libsodium-wrappers"; +import { equals } from "uint8arrays"; +import { v4 as uuid } from "uuid"; +import { create } from "../src/index.js"; + +@variant("nested_object") +class NestedValue { + @field({ type: "u32" }) + number: number; + + constructor(properties: { number: number }) { + this.number = properties.number; + } +} + +abstract class Base {} + +@variant(0) +class Document extends Base { + @field({ type: "string" }) + id: string; + + @field({ type: option("string") }) + name?: string; + + @field({ type: option("u64") }) + number?: bigint; + + @field({ type: option("bool") }) + bool?: boolean; + + @field({ type: option(Uint8Array) }) + data?: Uint8Array; + + @field({ type: option(fixedArray("u8", 32)) }) + fixedData?: Uint8Array; + + @field({ type: option(NestedValue) }) + nested?: NestedValue; + + @field({ type: vec("string") }) + tags: string[]; + + @field({ type: vec(NestedValue) }) + nestedVec: NestedValue[]; + + constructor(opts: Partial) { + super(); + this.id = opts.id || uuid(); + this.name = opts.name; + this.number = opts.number; + this.tags = opts.tags || []; + this.bool = opts.bool; + this.data = opts.data; + this.fixedData = opts.fixedData; + this.nested = opts.nested; + this.nestedVec = opts.nestedVec || []; + } +} + +// variant 1 (next version for migration testing) +@variant(1) +class DocumentNext extends Base { + @field({ type: "string" }) + id: string; + + @field({ type: "string" }) + name: string; + + @field({ type: "string" }) + anotherField: string; + + constructor(opts: Partial) { + super(); + this.id = opts.id || uuid(); + this.name = opts.name || uuid(); + this.anotherField = opts.anotherField || uuid(); + } +} + +describe("basic", () => { + let store: Index; + let indices: Indices; + let defaultDocs: Document[] = []; + + const setupDefault = async () => { + // Create store + const result = await setup({ schema: Base }); + + const doc = new Document({ + id: "1", + name: "hello", + number: 1n, + tags: [], + }); + + const docEdit = new Document({ + id: "1", + name: "hello world", + number: 1n, + bool: true, + data: new Uint8Array([1]), + fixedData: new Uint8Array(32).fill(1), + tags: [], + }); + + const doc2 = new Document({ + id: "2", + name: "hello world", + number: 4n, + tags: [], + }); + + const doc2Edit = new Document({ + id: "2", + name: "Hello World", + number: 2n, + data: new Uint8Array([2]), + fixedData: new Uint8Array(32).fill(2), + tags: ["Hello", "World"], + }); + + const doc3 = new Document({ + id: "3", + name: "foo", + number: 3n, + data: new Uint8Array([3]), + fixedData: new Uint8Array(32).fill(3), + tags: ["Hello"], + }); + + const doc4 = new Document({ + id: "4", + name: undefined, + number: undefined, + tags: [], + }); + + await store.put(doc); + await waitForResolved(async () => expect(await store.getSize()).equals(1)); + await store.put(docEdit); + await store.put(doc2); + await waitForResolved(async () => expect(await store.getSize()).equals(2)); + + await store.put(doc2Edit); + await store.put(doc3); + await store.put(doc4); + await waitForResolved(async () => expect(await store.getSize()).equal(4)); + + defaultDocs = [docEdit, doc2Edit, doc3, doc4]; + return result; + }; + + const checkDocument = (document: any, ...matchAny: any[]) => { + const match = matchAny.find((x) => + x.id instanceof Uint8Array + ? equals(x.id, document.id) + : x.id === document.id, + ); + + expect(match).to.exist; + + const keysMatch = Object.keys(match); + const keysDocument = Object.keys(document); + + expect(keysMatch).to.have.members(keysDocument); + expect(keysDocument).to.have.members(keysMatch); + for (const key of keysMatch) { + const value = document[key]; + const matchValue = match[key]; + if (value instanceof Uint8Array) { + expect(equals(value, matchValue)).to.be.true; + } else { + expect(value).to.deep.equal(matchValue); + } + } + + // expect(document).to.deep.equal(match); + expect(document).to.be.instanceOf(matchAny[0].constructor); + }; + + const setup = async ( + properties: Partial> & { schema: any }, + directory?: string, + ): Promise<{ + indices: Indices; + store: Index; + }> => { + // store && await store.stop() + indices && (await indices.stop()); + + await sodium.ready; + + indices = await create(directory); // TODO add directory testsc + await indices.start(); + const indexProps: IndexEngineInitProperties = { + ...{ + indexBy: getIdProperty(properties.schema) || ["id"], + iterator: { batch: { maxSize: 5e6, sizeProperty: ["__size"] } }, + /* nested: { + match: (obj: any): obj is IndexWrapper => obj instanceof IndexWrapper, + query: (nested: any, query: any) => nested.search(query) + } */ + }, + ...properties, + }; + store = await indices.init(indexProps); // TODO add directory tests + return { indices, store }; + /* return new IndexWrapper(index, indexProps.indexBy, directory); */ + }; + + it("all", async () => { + await setupDefault(); + + const results = await store.iterate().all(); + expect(results).to.have.length(4); + for (const result of results) { + checkDocument(result.value, ...defaultDocs); + } + }); +}); diff --git a/packages/utils/indexer/sqlite3/test/query-planner.spec.ts b/packages/utils/indexer/sqlite3/test/query-planner.spec.ts new file mode 100644 index 000000000..9f10aef90 --- /dev/null +++ b/packages/utils/indexer/sqlite3/test/query-planner.spec.ts @@ -0,0 +1,392 @@ +import { + And, + Compare, + IntegerCompare, + Or, + type Query, + Sort, + SortDirection, + toQuery, +} from "@peerbit/indexer-interface"; +import { delay } from "@peerbit/time"; +import { expect } from "chai"; +import { + PlannableQuery, + QueryPlanner, + flattenQuery, +} from "../src/query-planner.js"; + +describe("PlannableQuery", () => { + describe("IntegerCompare", () => { + it("key same for small change", async () => { + // TODO test all query types + + const plannable = new PlannableQuery({ + query: [ + new IntegerCompare({ + compare: Compare.Equal, + key: "key", + value: 1, + }), + ], + sort: [new Sort({ key: "key", direction: SortDirection.ASC })], + }); + + const plannableOtherValues = new PlannableQuery({ + query: [ + new IntegerCompare({ + compare: Compare.Equal, + key: "key", + value: 2, + }), + ], + sort: [new Sort({ key: "key", direction: SortDirection.ASC })], + }); + + expect(plannable.key).to.eq(plannableOtherValues.key); + }); + + it("key different for large diff", async () => { + const plannable = new PlannableQuery({ + query: [ + new IntegerCompare({ + compare: Compare.Equal, + key: "key", + value: 1, + }), + ], + sort: [new Sort({ key: "key", direction: SortDirection.ASC })], + }); + + const plannableOtherValues = new PlannableQuery({ + query: [ + new IntegerCompare({ + compare: Compare.Equal, + key: "key", + value: 2147483647 + 1, + }), + ], + sort: [new Sort({ key: "key", direction: SortDirection.ASC })], + }); + + expect(plannable.key).to.not.eq(plannableOtherValues.key); + }); + + it("can generate key from query with nesting", async () => { + const ors: And[] = []; + for (const point of [1, 2]) { + ors.push( + new And([ + new And([ + new IntegerCompare({ + key: "key1", + compare: Compare.LessOrEqual, + value: point, + }), + new IntegerCompare({ + key: "key2", + compare: Compare.Greater, + value: point, + }), + ]), + new And([ + new IntegerCompare({ + key: "key3", + compare: Compare.LessOrEqual, + value: point, + }), + new IntegerCompare({ + key: "key4", + compare: Compare.Greater, + value: point, + }), + ]), + ]), + ); + } + let complicatedQuery = [ + new Or(ors), + new IntegerCompare({ + key: "key5", + compare: Compare.Greater, + value: 0, + }), + ]; + + const plannable = new PlannableQuery({ + query: toQuery(complicatedQuery), + sort: [new Sort({ key: "key5", direction: SortDirection.ASC })], + }); + + expect(plannable.key).to.be.a("string"); + }); + }); +}); + +describe("QueryPlanner", () => { + it("can concurrently with same index", async () => { + let executed: string[] = []; + let execDelay = 1000; + + const planner = new QueryPlanner({ + exec: async (query: string) => { + await delay(execDelay); + executed.push(query); + }, + }); + const query = new PlannableQuery({ query: [] }); + const scope1 = planner.scope(query); + const scope2 = planner.scope(query); + + const index1 = scope1.resolveIndex("table", ["field1"]); + const index2 = scope2.resolveIndex("table", ["field1"]); + expect(index1).to.eq(index2); + + const prepare1 = scope1.beforePrepare(); + const prepare2 = scope2.beforePrepare(); + + await prepare2; + await scope2.perform(async () => { + expect(executed).to.have.length(1); + expect(executed[0]).to.contain(index1); + }); + + await prepare1; + }); +}); + +const generatorAsList = (gen: Generator) => { + let result: T[] = []; + for (const value of gen) { + result.push(value); + } + return result; +}; +describe("flattenQuery", () => { + it("or, and", () => { + let or = new Or([ + new IntegerCompare({ + compare: Compare.Equal, + key: "key1", + value: 1, + }), + new IntegerCompare({ + compare: Compare.Equal, + key: "key2", + value: 2, + }), + ]); + + let result = generatorAsList( + flattenQuery({ + query: [ + or, + new IntegerCompare({ + compare: Compare.Equal, + key: "key3", + value: 3, + }), + ], + sort: [], + }), + ); + + expect(result).to.have.length(2); + expect( + result[0]!.query.map((x) => (x as IntegerCompare).key[0]), + ).to.have.members(["key1", "key3"]); + expect( + result[1]!.query.map((x) => (x as IntegerCompare).key[0]), + ).to.have.members(["key2", "key3"]); + }); + + it("only flattens ors less than size 4", async () => { + let or = new Or([ + new IntegerCompare({ + compare: Compare.Equal, + key: "key1", + value: 1, + }), + new IntegerCompare({ + compare: Compare.Equal, + key: "key2", + value: 2, + }), + new IntegerCompare({ + compare: Compare.Equal, + key: "key3", + value: 3, + }), + new IntegerCompare({ + compare: Compare.Equal, + key: "key3", + value: 4, + }), + ]); + + let result = generatorAsList( + flattenQuery({ + query: [ + or, + new IntegerCompare({ + compare: Compare.Equal, + key: "key4", + + value: 4, + }), + ], + sort: [], + }), + ); + + expect(result).to.have.length(1); + }); + + it("and, and", () => { + let result = generatorAsList( + flattenQuery({ + query: [ + new And([ + new IntegerCompare({ + compare: Compare.Equal, + key: "key1", + value: 1, + }), + new IntegerCompare({ + compare: Compare.Equal, + key: "key2", + value: 2, + }), + ]), + new And([ + new IntegerCompare({ + compare: Compare.Equal, + key: "key3", + value: 3, + }), + new IntegerCompare({ + compare: Compare.Equal, + key: "key4", + value: 4, + }), + ]), + ], + sort: [], + }), + ); + + expect(result).to.have.length(1); + expect(result[0]!.query).to.have.length(4); + expect( + result[0]!.query.map((x) => (x as IntegerCompare).key[0]), + ).to.deep.eq(["key1", "key2", "key3", "key4"]); + }); + + it("and(or, and)", () => { + let result = generatorAsList( + flattenQuery({ + query: [ + new And([ + new Or([ + new IntegerCompare({ + compare: Compare.Equal, + key: "key1", + value: 1, + }), + new IntegerCompare({ + compare: Compare.Equal, + key: "key2", + value: 2, + }), + ]), + new IntegerCompare({ + compare: Compare.Equal, + key: "key3", + value: 3, + }), + ]), + new IntegerCompare({ + compare: Compare.Equal, + key: "key4", + value: 4, + }), + ], + sort: [], + }), + ); + + expect(result).to.have.length(2); + expect( + result[0]!.query.map((x) => (x as IntegerCompare).key[0]), + ).to.have.members(["key1", "key3", "key4"]); + expect( + result[1]!.query.map((x) => (x as IntegerCompare).key[0]), + ).to.have.members(["key2", "key3", "key4"]); + }); + + it("or(and(and)), and", () => { + const ors: And[] = []; + for (const point of [1, 2]) { + ors.push( + new And([ + new And([ + new IntegerCompare({ + key: "key1", + compare: Compare.LessOrEqual, + value: point, + }), + new IntegerCompare({ + key: "key2", + compare: Compare.Greater, + value: point, + }), + ]), + new And([ + new IntegerCompare({ + key: "key3", + compare: Compare.LessOrEqual, + value: point, + }), + new IntegerCompare({ + key: "key4", + compare: Compare.Greater, + value: point, + }), + ]), + ]), + ); + } + let complicatedQuery = [ + new Or(ors), + new IntegerCompare({ + key: "key5", + compare: Compare.Greater, + value: 0, + }), + ]; + + let result = generatorAsList( + flattenQuery({ + query: toQuery(complicatedQuery), + sort: [], + }), + ); + + expect(result).to.have.length(2); + + const checkResult = (result: Query[], value: number) => { + expect(result).to.have.length(2); + expect((result[0] as IntegerCompare).key[0]).to.eq("key5"); + const and = result[1] as And; + const andAnd = and.and.map((x) => (x as And).and).flat(); // stuff inside or is not flattened + expect(andAnd.map((x) => (x as IntegerCompare).value.value)).to.deep.eq([ + value, + value, + value, + value, + ]); + }; + checkResult(result[0]!.query, 1); + checkResult(result[1]!.query, 2); + }); +}); diff --git a/packages/utils/indexer/sqlite3/test/shape.spec.ts b/packages/utils/indexer/sqlite3/test/shape.spec.ts new file mode 100644 index 000000000..75829dd1d --- /dev/null +++ b/packages/utils/indexer/sqlite3/test/shape.spec.ts @@ -0,0 +1,476 @@ +import { field, option, variant, vec } from "@dao-xyz/borsh"; +import { + BoolQuery, + Compare, + IntegerCompare, + Or, + Sort, + SortDirection, + StringMatch, + id, +} from "@peerbit/indexer-interface"; +import { expect, use } from "chai"; +import chaiAsPromised from "chai-as-promised"; +import { v4 as uuid } from "uuid"; +import { SQLLiteIndex } from "../src/engine.js"; +import { create } from "../src/index.js"; +import { setup } from "./utils.js"; + +use(chaiAsPromised); + +@variant(0) +class ArrayDocument /* extends ArrayDocumentBase */ { + @id({ type: "string" }) + id: string; + + // create some extra fields to make the ndex more complicated + @field({ type: "string" }) + a: string; + + // create some extra fields to make the index more complicated + @field({ type: "string" }) + b: string; + + @field({ type: vec("u32") }) + value: number[]; + + constructor(id: string, value: bigint[]) { + /* super(); */ + this.id = id; + this.value = value.map((x) => Number(x)); + this.a = uuid(); + this.b = uuid(); + } +} + +class DocumentWithProperties { + // create some extra fields to make the ndex more complicated + @field({ type: "string" }) + a: string; + + // create some extra fields to make the index more complicated + @field({ type: "string" }) + b: string; + + // we will query this field + @field({ type: "bool" }) + bool: boolean; + + // create some extra fields to make the ndex more complicated + @field({ type: "string" }) + c: string; + + // create some extra fields to make the index more complicated + @field({ type: "string" }) + d: string; + + constructor(properties?: { + bool?: boolean; + a?: string; + b?: string; + c?: string; + d?: string; + }) { + this.bool = properties?.bool ?? Math.random() > 0.5; + this.a = properties?.a ?? uuid(); + this.b = properties?.b ?? uuid(); + this.c = properties?.c ?? uuid(); + this.d = properties?.d ?? uuid(); + } +} + +abstract class Base {} + +@variant(0) +// @ts-ignore +class Type0 extends Base { + @id({ type: "string" }) + id: string; + + @field({ type: "string" }) + value: string; + + constructor(id: string, value: string) { + super(); + this.id = id; + this.value = value; + } +} + +@variant(1) +class NestedBoolQueryDocument extends Base { + @id({ type: "string" }) + id: string; + + @field({ type: vec(DocumentWithProperties) }) + nested: DocumentWithProperties[]; + + constructor(id: string, nested: DocumentWithProperties[]) { + super(); + this.id = id; + this.nested = nested; + } +} + +describe("shape", () => { + let index: Awaited>>; + + afterEach(async () => { + await index.store.stop(); + }); + + it("shaped sort with when query is split", async () => { + index = await setup( + { schema: DocumentWithProperties, indexBy: ["a"] }, + create, + ); + index.store as SQLLiteIndex; + await index.store.put(new DocumentWithProperties({ a: "1" })); + await index.store.put(new DocumentWithProperties({ a: "2" })); + + const iterator = index.store.iterate( + { + query: new Or([ + new StringMatch({ key: "a", value: "1" }), + new StringMatch({ key: "a", value: "2" }), + ]), + sort: new Sort({ key: "b", direction: SortDirection.ASC }), + }, + { shape: { id: true } }, + ); + expect(await iterator.all()).to.have.length(2); + }); + + describe("simple array", () => { + /* abstract class ArrayDocumentBase { } */ + + it("shaped queries are faster", async () => { + index = await setup({ schema: ArrayDocument }, create); + index.store as SQLLiteIndex; + let count = 5e4; + let itemsToQuery: bigint[] = []; + for (let i = 0; i < count; i++) { + let offset = BigInt(i) * 3n; + if (itemsToQuery.length < 1) { + itemsToQuery.push(offset); + } + await index.store.put( + new ArrayDocument(uuid(), [offset + 0n, offset + 1n, offset + 2n]), + ); + } + + const queryCount = 1e4; + + const compares: IntegerCompare[] = itemsToQuery.map( + (x) => + new IntegerCompare({ + key: "value", + value: x, + compare: Compare.Equal, + }), + ); + + const iterator = index.store.iterate( + { query: new Or(compares) }, + { shape: { id: true } }, + ); + await iterator.next(1); + await iterator.close(); + + const iterator2 = index.store.iterate({ query: new Or(compares) }); + await iterator2.next(1); + await iterator2.close(); + + const t1 = +new Date(); + + let fetch = 30; + for (let i = 0; i < queryCount; i++) { + const iterator = index.store.iterate( + { query: new Or(compares) }, + { shape: { id: true } }, + ); + /* const out1 = */ await iterator.next(fetch); + await iterator.close(); + + /* if (out1.length !== itemsToQuery.length) { + throw new Error("Expected " + itemsToQuery.length + " but got " + out1.length); + } */ + } + + const t2 = +new Date(); + + const t3 = +new Date(); + for (let i = 0; i < queryCount; i++) { + const iterator = index.store.iterate({ query: new Or(compares) }); + const out2 = await iterator.next(fetch); + await iterator.close(); + if (out2.length !== itemsToQuery.length) { + throw new Error( + "Expected " + itemsToQuery.length + " but got " + out2.length, + ); + } + } + + const t4 = +new Date(); + + console.log(t4 - t3, t2 - t1); + expect(t4 - t3).to.greaterThan(t2 - t1); + }); + }); + + describe("document array", () => { + it("shaped queries are faster", async () => { + index = await setup({ schema: Base }, create); + index.store as SQLLiteIndex; + let count = 1e4; + for (let i = 0; i < count; i++) { + if (i % 5 === 0) { + await index.store.put(new NestedBoolQueryDocument(uuid(), [])); + } else { + await index.store.put( + new NestedBoolQueryDocument(uuid(), [ + new DocumentWithProperties({ bool: i % 2 === 0 ? true : false }), + ]), + ); + } + } + const fetch = 30; + const queryCount = 1e4; + + let iterator = index.store.iterate({ + query: new BoolQuery({ key: ["nested", "bool"], value: true }), + }); + await iterator.next(1); + await iterator.close(); + + let iteratorShaped = index.store.iterate( + { + query: new BoolQuery({ key: ["nested", "bool"], value: true }), + }, + { + shape: { id: true }, + }, + ); + await iteratorShaped.next(1); + await iteratorShaped.close(); + + const t1 = +new Date(); + let allResults = []; + + for (let i = 0; i < queryCount; i++) { + let iterator = index.store.iterate({ + query: new BoolQuery({ key: ["nested", "bool"], value: true }), + }); + const result = await iterator.next(fetch); + if (result.length !== fetch) { + throw new Error( + "Expected to fetch " + fetch + " but got " + result.length, + ); + } + for (const item of result) { + if (item.value.nested[0].bool !== true) { + throw new Error("Expected to fetch only true values"); + } + } + for (const item of result) { + allResults.push(item); + } + await iterator.close(); + } + + const t2 = +new Date(); + const t3 = +new Date(); + + let c = 0; + for (let i = 0; i < queryCount; i++) { + let iteratorShaped = index.store.iterate( + { + query: new BoolQuery({ key: ["nested", "bool"], value: true }), + }, + { + shape: { id: true }, + }, + ); + const result = await iteratorShaped.next(fetch); + if (result.length !== fetch) { + throw new Error( + "Expected to fetch " + fetch + " but got " + result.length, + ); + } + + for (const item of result) { + if (item.id.primitive !== allResults[c].id.primitive) { + throw new Error( + "Mismatch: " + + item.id.primitive + + " !== " + + allResults[c].id.primitive, + ); + } + c++; + } + await iteratorShaped.close(); + } + + const t4 = +new Date(); + expect(t4 - t3).to.lessThan(t2 - t1); + console.log(t4 - t3, t2 - t1); + expect(allResults.length).to.equal(queryCount * fetch); + }); + }); + + describe("nested document", () => { + // u64 is a special case since we need to shift values to fit into signed 64 bit integers + + let index: Awaited>>; + + afterEach(async () => { + await index.store.stop(); + }); + + class Nested { + // create some extra fields to make the ndex more complicated + @field({ type: "string" }) + a: string; + + // create some extra fields to make the index more complicated + @field({ type: "string" }) + b: string; + + // we will query this field + @field({ type: "bool" }) + bool: boolean; + + // create some extra fields to make the ndex more complicated + @field({ type: "string" }) + c: string; + + // create some extra fields to make the index more complicated + @field({ type: "string" }) + d: string; + + constructor(bool: boolean) { + this.bool = bool; + this.a = uuid(); + this.b = uuid(); + this.c = uuid(); + this.d = uuid(); + } + } + + abstract class Base {} + + @variant(0) + // @ts-ignore + class Type0 extends Base { + @id({ type: "string" }) + id: string; + + @field({ type: "string" }) + value: string; + + constructor(id: string, value: string) { + super(); + this.id = id; + this.value = value; + } + } + + @variant(1) + class NestedBoolQueryDocument extends Base { + @id({ type: "string" }) + id: string; + + @field({ type: option(Nested) }) + nested?: Nested; + + constructor(id: string, nested?: Nested) { + super(); + this.id = id; + this.nested = nested; + } + } + + it("shaped queries are faster", async () => { + index = await setup({ schema: NestedBoolQueryDocument }, create); + index.store as SQLLiteIndex; + let count = 1e4; + for (let i = 0; i < count; i++) { + if (i % 5 === 0) { + await index.store.put(new NestedBoolQueryDocument(uuid())); + } else { + await index.store.put( + new NestedBoolQueryDocument( + uuid(), + new Nested(i % 2 === 0 ? true : false), + ), + ); + } + } + const fetch = 30; + const queryCount = 1e4; + const t1 = +new Date(); + let allResults = []; + for (let i = 0; i < queryCount; i++) { + let iterator = index.store.iterate({ + query: new BoolQuery({ key: ["nested", "bool"], value: true }), + }); + const result = await iterator.next(fetch); + if (result.length !== fetch) { + throw new Error( + "Expected to fetch " + fetch + " but got " + result.length, + ); + } + for (const item of result) { + if (item.value.nested.bool !== true) { + throw new Error("Expected to fetch only true values"); + } + } + for (const item of result) { + allResults.push(item); + } + await iterator.close(); + } + + const t2 = +new Date(); + + const t3 = +new Date(); + /* let c = 0; */ + for (let i = 0; i < queryCount; i++) { + let iteratorShaped = index.store.iterate( + { + query: new BoolQuery({ key: ["nested", "bool"], value: true }), + }, + { + shape: { id: true }, + }, + ); + const result = await iteratorShaped.next(fetch); + if (result.length !== fetch) { + throw new Error( + "Expected to fetch " + fetch + " but got " + result.length, + ); + } + + /* for (const item of result) { + if (item.id.primitive !== allResults[c].id.primitive) { + throw new Error( + "Mismatch: " + + item.id.primitive + + " !== " + + allResults[c].id.primitive, + ); + } + c++; + } */ + + await iteratorShaped.close(); + } + + const t4 = +new Date(); + expect(t4 - t3).to.lessThan(t2 - t1); + console.log(t4 - t3, t2 - t1); + expect(allResults.length).to.equal(queryCount * fetch); + }); + }); +}); diff --git a/packages/utils/indexer/sqlite3/test/sort.spec.ts b/packages/utils/indexer/sqlite3/test/sort.spec.ts index d3c24fe88..9d8c68a42 100644 --- a/packages/utils/indexer/sqlite3/test/sort.spec.ts +++ b/packages/utils/indexer/sqlite3/test/sort.spec.ts @@ -1,4 +1,10 @@ -import { id } from "@peerbit/indexer-interface"; +import { + Or, + Sort, + SortDirection, + StringMatch, + id, +} from "@peerbit/indexer-interface"; import { expect, use } from "chai"; import chaiAsPromised from "chai-as-promised"; import { SQLLiteIndex } from "../src/engine.js"; @@ -34,14 +40,79 @@ describe("sort", () => { await index.store.put(new Document("2")); await index.store.put(new Document("1")); + const prepare = store.properties.db.prepare.bind(store.properties.db); + let preparedStatement: string[] = []; + store.properties.db.prepare = function (sql: string) { + preparedStatement.push(sql); + return prepare(sql); + }; + const iterator = await index.store.iterate(); const [first, second, third] = [ ...(await iterator.next(1)), ...(await iterator.next(1)), ...(await iterator.next(1)), ]; + + expect(preparedStatement).to.have.length(1); + expect(preparedStatement[0]).to.contain("ORDER BY"); + expect(first.value.id).to.equal("1"); expect(second.value.id).to.equal("2"); expect(third.value.id).to.equal("3"); }); + + it("will not sort by default when fetching all", async () => { + // this test is to insure that the iterator is stable. I.e. default sorting is applied + index = await setup({ schema: Document }, create); + const store = index.store as SQLLiteIndex; + expect(store.tables.size).to.equal(1); + await index.store.put(new Document("3")); + await index.store.put(new Document("2")); + await index.store.put(new Document("1")); + + const prepare = store.properties.db.prepare.bind(store.properties.db); + let preparedStatement: string[] = []; + store.properties.db.prepare = function (sql: string) { + preparedStatement.push(sql); + return prepare(sql); + }; + + const iterator = index.store.iterate(); + const results = await iterator.all(); + + expect(preparedStatement).to.have.length(1); + expect(preparedStatement[0]).to.not.contain("ORDER BY"); + + expect(results.map((x) => x.id.primitive)).to.deep.equal(["3", "2", "1"]); // insertion order (seems to be the default order when not sorting) + }); + + it("will sort correctly when query is split", async () => { + index = await setup({ schema: Document }, create); + const store = index.store as SQLLiteIndex; + expect(store.tables.size).to.equal(1); + await index.store.put(new Document("3")); + await index.store.put(new Document("2")); + await index.store.put(new Document("1")); + + const prepare = store.properties.db.prepare.bind(store.properties.db); + let preparedStatement: string[] = []; + store.properties.db.prepare = function (sql: string) { + preparedStatement.push(sql); + return prepare(sql); + }; + const iterator = index.store.iterate({ + query: new Or([ + new StringMatch({ key: "id", value: "1" }), + new StringMatch({ key: "id", value: "2" }), + ]), + sort: new Sort({ key: "id", direction: SortDirection.DESC }), + }); + const results = await iterator.all(); + expect(results).to.have.length(2); + + expect(preparedStatement).to.have.length(1); + expect(preparedStatement[0].match(/DESC/g)).to.have.length(1); + expect(preparedStatement[0].match(/ASC/g)).to.be.null; + }); }); diff --git a/packages/utils/indexer/sqlite3/test/u64.spec.ts b/packages/utils/indexer/sqlite3/test/u64.spec.ts index 8a329c3c2..ca39bea43 100644 --- a/packages/utils/indexer/sqlite3/test/u64.spec.ts +++ b/packages/utils/indexer/sqlite3/test/u64.spec.ts @@ -30,6 +30,20 @@ describe("u64", () => { } } + it("all", async () => { + index = await setup({ schema: DocumentWithBigint }, create); + await index.store.put(new DocumentWithBigint(0n, 0n)); + await index.store.put( + new DocumentWithBigint(18446744073709551615n, 18446744073709551615n), + ); + await index.store.put(new DocumentWithBigint(123n, 123n)); + + const all: IndexedResults = await index.store + .iterate() + .all(); + expect(all.length).to.equal(3); + }); + it("fetch bounds ", async () => { index = await setup({ schema: DocumentWithBigint }, create); const store = index.store as SQLLiteIndex; diff --git a/packages/utils/indexer/tests/src/benchmarks.ts b/packages/utils/indexer/tests/src/benchmarks.ts index 37ec60e2a..e829f6fe4 100644 --- a/packages/utils/indexer/tests/src/benchmarks.ts +++ b/packages/utils/indexer/tests/src/benchmarks.ts @@ -1,16 +1,21 @@ import { field, vec } from "@dao-xyz/borsh"; import { + And, BoolQuery, + Compare, type Index, type IndexEngineInitProperties, type Indices, + IntegerCompare, + Or, + Query, + Sort, StringMatch, getIdProperty, id, } from "@peerbit/indexer-interface"; -import B from "benchmark"; import sodium from "libsodium-wrappers"; -import pDefer from "p-defer"; +import * as B from "tinybench"; import { v4 as uuid } from "uuid"; const setup = async ( @@ -35,7 +40,7 @@ const setup = async ( }; let preFillCount = 2e4; -const strinbBenchmark = async ( +const stringBenchmark = async ( createIndicies: (directory?: string) => Indices | Promise, type: "transient" | "persist" = "transient", ) => { @@ -73,77 +78,45 @@ const strinbBenchmark = async ( type, ); - let done = pDefer(); - const suite = new B.Suite({ delay: 100 }); - suite - .add("string put - " + type, { - fn: async (deferred: any) => { - await stringIndexEmpty.store.put(new StringDocument(uuid(), uuid())); - deferred.resolve(); - }, - defer: true, - maxTime: 5, + const suite = new B.Bench({ warmupIterations: 1000 }); + await suite + .add("string put - " + type, async () => { + await stringIndexEmpty.store.put(new StringDocument(uuid(), uuid())); }) - .add("string query matching - " + type, { - fn: async (deferred: any) => { - const iterator = stringIndexPreFilled.store.iterate({ - query: new StringMatch({ key: "string", value: fixed }), - }); - await iterator.next(10); - await iterator.close(); - deferred.resolve(); - }, - defer: true, - maxTime: 5, + .add("string query matching - " + type, async () => { + const iterator = stringIndexPreFilled.store.iterate({ + query: new StringMatch({ key: "string", value: fixed }), + }); + await iterator.next(10); + await iterator.close(); }) - .add("string count matching - " + type, { - fn: async (deferred: any) => { - await stringIndexPreFilled.store.count({ - query: new StringMatch({ key: "string", value: fixed }), - }); - deferred.resolve(); - }, - defer: true, - maxTime: 5, + .add("string count matching - " + type, async () => { + await stringIndexPreFilled.store.count({ + query: new StringMatch({ key: "string", value: fixed }), + }); }) - .add("string count no-matches - " + type, { - fn: async (deferred: any) => { - const out = Math.random() > 0.5 ? true : false; - await stringIndexPreFilled.store.count({ - query: new StringMatch({ key: "string", value: uuid() }), - }); - deferred.resolve(); - }, - defer: true, - maxTime: 5, - }) - .on("cycle", async (event: any) => { - // eslint-disable-next-line no-console - console.log(String(event.target)); - }) - .on("error", (err: any) => { - throw err; - }) - .on("complete", async () => { - await stringIndexEmpty.indices.stop(); - stringIndexEmpty.directory && - fs.rmSync(stringIndexEmpty.directory, { recursive: true, force: true }); - - await stringIndexPreFilled.indices.stop(); - stringIndexPreFilled.directory && - fs.rmSync(stringIndexPreFilled.directory, { - recursive: true, - force: true, - }); - - done.resolve(); - }) - .on("error", (e) => { - done.reject(e); + .add("string count no-matches - " + type, async () => { + const out = Math.random() > 0.5 ? true : false; + await stringIndexPreFilled.store.count({ + query: new StringMatch({ key: "string", value: uuid() }), + }); }) .run(); - return done.promise; + + await stringIndexEmpty.indices.stop(); + stringIndexEmpty.directory && + fs.rmSync(stringIndexEmpty.directory, { recursive: true, force: true }); + + await stringIndexPreFilled.indices.stop(); + stringIndexPreFilled.directory && + fs.rmSync(stringIndexPreFilled.directory, { + recursive: true, + force: true, + }); + + /* eslint-disable no-console */ + console.table(suite.table()); }; const boolQueryBenchmark = async ( @@ -183,58 +156,204 @@ const boolQueryBenchmark = async ( type, ); - let done = pDefer(); - const suite = new B.Suite({ delay: 100 }); - suite - .add("bool query - " + type, { - fn: async (deferred: any) => { - const out = Math.random() > 0.5 ? true : false; - const iterator = await boolIndexPrefilled.store.iterate({ - query: new BoolQuery({ key: "bool", value: out }), - }); - await iterator.next(10); - await iterator.close(); - deferred.resolve(); - }, - defer: true, - maxTime: 5, + const suite = new B.Bench({ warmupIterations: 1000 }); + let fetch = 10; + await suite + .add(`bool query fetch ${fetch} - ${type}`, async () => { + const out = Math.random() > 0.5 ? true : false; + const iterator = await boolIndexPrefilled.store.iterate({ + query: new BoolQuery({ key: "bool", value: out }), + }); + await iterator.next(10); + await iterator.close(); }) - .add("bool put - " + type, { - fn: async (deferred: any) => { - await boolIndexEmpty.store.put( - new BoolQueryDocument(uuid(), Math.random() > 0.5 ? true : false), - ); - deferred.resolve(); - }, - defer: true, - maxTime: 5, + .add(`non bool query fetch ${fetch} - ${type}`, async () => { + const iterator = await boolIndexPrefilled.store.iterate(); + await iterator.next(10); + await iterator.close(); }) - .on("cycle", async (event: any) => { - // eslint-disable-next-line no-console - console.log(String(event.target)); + + .add(`non bool query fetch with sort ${fetch} - ${type}`, async () => { + const iterator = boolIndexPrefilled.store.iterate({ + sort: [new Sort({ key: "id" })], + }); + await iterator.next(10); + await iterator.close(); + }) + .add(`bool put - ${type}`, async () => { + await boolIndexEmpty.store.put( + new BoolQueryDocument(uuid(), Math.random() > 0.5 ? true : false), + ); }) - .on("error", (err: any) => { - throw err; + .run(); + + await boolIndexEmpty.indices.stop(); + boolIndexEmpty.directory && + fs.rmSync(boolIndexEmpty.directory, { recursive: true, force: true }); + + await boolIndexPrefilled.indices.stop(); + boolIndexPrefilled.directory && + fs.rmSync(boolIndexPrefilled.directory, { + recursive: true, + force: true, + }); + + /* eslint-disable no-console */ + console.table(suite.table()); +}; + +const inequalityBenchmark = async ( + createIndicies: (directory?: string) => Indices | Promise, + type: "transient" | "persist" = "transient", +) => { + class NumberQueryDocument { + @id({ type: "string" }) + id: string; + + @field({ type: "u32" }) + number: number; + + constructor(id: string, number: number) { + this.id = id; + this.number = number; + } + } + + const fs = await import("fs"); + + const numberIndexPrefilled = await setup( + { schema: NumberQueryDocument }, + createIndicies, + type, + ); + let docCount = 10e4; + for (let i = 0; i < docCount; i++) { + await numberIndexPrefilled.store.put(new NumberQueryDocument(uuid(), i)); + } + + const boolIndexEmpty = await setup( + { schema: NumberQueryDocument }, + createIndicies, + type, + ); + + // warmup + for (let i = 0; i < 1000; i++) { + const iterator = numberIndexPrefilled.store.iterate({ + query: new IntegerCompare({ + key: "number", + compare: Compare.Less, + value: 11, + }), + }); + await iterator.next(10); + await iterator.close(); + } + + const suite = new B.Bench({ warmupIterations: 1000 }); + let fetch = 10; + await suite + .add(`number query fetch ${fetch} - ${type}`, async () => { + const iterator = numberIndexPrefilled.store.iterate({ + query: new IntegerCompare({ + key: "number", + compare: Compare.Less, + value: 11, + }), + }); + await iterator.next(10); + await iterator.close(); }) - .on("complete", async () => { - await boolIndexEmpty.indices.stop(); - boolIndexEmpty.directory && - fs.rmSync(boolIndexEmpty.directory, { recursive: true, force: true }); - - await boolIndexPrefilled.indices.stop(); - boolIndexPrefilled.directory && - fs.rmSync(boolIndexPrefilled.directory, { - recursive: true, - force: true, - }); - - done.resolve(); + + .add(`non number query fetch ${fetch} - ${type}`, async () => { + const iterator = numberIndexPrefilled.store.iterate(); + await iterator.next(10); + await iterator.close(); }) - .on("error", (e) => { - done.reject(e); + + .add(`number put - ${type}`, async () => { + await boolIndexEmpty.store.put( + new NumberQueryDocument(uuid(), Math.round(Math.random() * 0xffffffff)), + ); }) .run(); - return done.promise; + + await boolIndexEmpty.indices.stop(); + boolIndexEmpty.directory && + fs.rmSync(boolIndexEmpty.directory, { recursive: true, force: true }); + + await numberIndexPrefilled.indices.stop(); + numberIndexPrefilled.directory && + fs.rmSync(numberIndexPrefilled.directory, { + recursive: true, + force: true, + }); + + /* eslint-disable no-console */ + console.table(suite.table()); +}; + +const getBenchmark = async ( + createIndicies: (directory?: string) => Indices | Promise, + type: "transient" | "persist" = "transient", +) => { + class BoolQueryDocument { + @id({ type: "string" }) + id: string; + + @field({ type: "bool" }) + bool: boolean; + + constructor(id: string, bool: boolean) { + this.id = id; + this.bool = bool; + } + } + + const fs = await import("fs"); + + const boolIndexPrefilled = await setup( + { schema: BoolQueryDocument }, + createIndicies, + type, + ); + let docCount = preFillCount; + let ids = []; + for (let i = 0; i < docCount; i++) { + let id = uuid(); + ids.push(id); + await boolIndexPrefilled.store.put( + new BoolQueryDocument(id, Math.random() > 0.5 ? true : false), + ); + } + + const boolIndexEmpty = await setup( + { schema: BoolQueryDocument }, + createIndicies, + type, + ); + + const suite = new B.Bench({ warmupIterations: 1000 }); + await suite + .add("get by id - " + type, async () => { + await boolIndexPrefilled.store.get( + ids[Math.floor(Math.random() * ids.length)], + ); + }) + .run(); + await boolIndexEmpty.indices.stop(); + boolIndexEmpty.directory && + fs.rmSync(boolIndexEmpty.directory, { recursive: true, force: true }); + + await boolIndexPrefilled.indices.stop(); + boolIndexPrefilled.directory && + fs.rmSync(boolIndexPrefilled.directory, { + recursive: true, + force: true, + }); + + /* eslint-disable no-console */ + console.table(suite.table()); }; const nestedBoolQueryBenchmark = async ( @@ -284,60 +403,36 @@ const nestedBoolQueryBenchmark = async ( type, ); - let done = pDefer(); - const suite = new B.Suite({ delay: 100 }); + const suite = new B.Bench({ warmupIterations: 1000 }); - suite - .add("nested bool query - " + type, { - fn: async (deferred: any) => { - const out = Math.random() > 0.5 ? true : false; - const iterator = await boolIndexPrefilled.store.iterate({ - query: new BoolQuery({ key: ["nested", "bool"], value: out }), - }); - await iterator.next(10); - await iterator.close(); - deferred.resolve(); - }, - defer: true, - maxTime: 5, - async: true, + await suite + .add("nested bool query - " + type, async () => { + const out = Math.random() > 0.5 ? true : false; + const iterator = await boolIndexPrefilled.store.iterate({ + query: new BoolQuery({ key: ["nested", "bool"], value: out }), + }); + await iterator.next(10); + await iterator.close(); }) - .add("nested bool put - " + type, { - fn: async (deferred: any) => { - await boolIndexEmpty.store.put( - new NestedBoolQueryDocument( - uuid(), - Math.random() > 0.5 ? true : false, - ), - ); - deferred.resolve(); - }, - defer: true, - maxTime: 5, - async: true, - }) - .on("cycle", async (event: any) => { - // eslint-disable-next-line no-console - console.log(String(event.target)); - }) - .on("error", (err: any) => { - done.reject(err); - }) - .on("complete", async () => { - await boolIndexEmpty.indices.stop(); - boolIndexEmpty.directory && - fs.rmSync(boolIndexEmpty.directory, { recursive: true, force: true }); - - await boolIndexPrefilled.indices.stop(); - boolIndexPrefilled.directory && - fs.rmSync(boolIndexPrefilled.directory, { - recursive: true, - force: true, - }); - done.resolve(); + .add("nested bool put - " + type, async () => { + await boolIndexEmpty.store.put( + new NestedBoolQueryDocument(uuid(), Math.random() > 0.5 ? true : false), + ); }) .run(); - return done.promise; + await boolIndexEmpty.indices.stop(); + boolIndexEmpty.directory && + fs.rmSync(boolIndexEmpty.directory, { recursive: true, force: true }); + + await boolIndexPrefilled.indices.stop(); + boolIndexPrefilled.directory && + fs.rmSync(boolIndexPrefilled.directory, { + recursive: true, + force: true, + }); + + /* eslint-disable no-console */ + console.table(suite.table()); }; const shapedQueryBenchmark = async ( @@ -383,73 +478,272 @@ const shapedQueryBenchmark = async ( ); } - let done = pDefer(); - const suite = new B.Suite({ delay: 100 }); + const suite = new B.Bench({ warmupIterations: 1000 }); let fetch = 10; - suite - .add("unshaped nested query - " + type, { - fn: async (deferred: any) => { - const out = Math.random() > 0.5 ? true : false; - let iterator = await boolIndexPrefilled.store.iterate({ - query: new BoolQuery({ key: ["nested", "bool"], value: out }), - }); - const results = await iterator.next(fetch); - await iterator.close(); - if (results.length !== fetch) { - throw new Error("Missing results"); - } - deferred.resolve(); - }, - defer: true, - maxTime: 5, - async: true, + await suite + .add("unshaped nested array query - " + type, async () => { + const out = Math.random() > 0.5 ? true : false; + let iterator = await boolIndexPrefilled.store.iterate({ + query: new BoolQuery({ key: ["nested", "bool"], value: out }), + }); + const results = await iterator.next(fetch); + await iterator.close(); + if (results.length !== fetch) { + throw new Error("Missing results"); + } }) - .add("shaped nested query - " + type, { - fn: async (deferred: any) => { - const out = Math.random() > 0.5 ? true : false; - const iterator = await boolIndexPrefilled.store.iterate( - { - query: new BoolQuery({ key: ["nested", "bool"], value: out }), - }, - { shape: { id: true } }, - ); - const results = await iterator.next(fetch); - await iterator.close(); - if (results.length !== fetch) { - throw new Error("Missing results"); - } - deferred.resolve(); - }, - defer: true, - maxTime: 5, - async: true, - }) - .on("cycle", async (event: any) => { - // eslint-disable-next-line no-console - console.log(String(event.target)); - }) - .on("error", (err: any) => { - done.reject(err); + .add("shaped nested array query - " + type, async () => { + const out = Math.random() > 0.5 ? true : false; + const iterator = boolIndexPrefilled.store.iterate( + { + query: new BoolQuery({ key: ["nested", "bool"], value: out }), + }, + { shape: { id: true } }, + ); + const results = await iterator.next(fetch); + await iterator.close(); + if (results.length !== fetch) { + throw new Error("Missing results"); + } }) - .on("complete", async () => { - await boolIndexPrefilled.indices.stop(); - boolIndexPrefilled.directory && - fs.rmSync(boolIndexPrefilled.directory, { - recursive: true, - force: true, - }); - done.resolve(); + .add("nested fetch without query - " + type, async () => { + const iterator = boolIndexPrefilled.store.iterate( + {}, + { shape: { id: true } }, + ); + const results = await iterator.next(fetch); + await iterator.close(); + if (results.length !== fetch) { + throw new Error("Missing results"); + } }) .run(); - return done.promise; + + await boolIndexPrefilled.indices.stop(); + boolIndexPrefilled.directory && + fs.rmSync(boolIndexPrefilled.directory, { + recursive: true, + force: true, + }); + + /* eslint-disable no-console */ + console.table(suite.table()); +}; + +const multiFieldQueryBenchmark = async ( + createIndicies: (directory?: string) => Indices | Promise, + type: "transient" | "persist" = "transient", +) => { + class ReplicationRangeIndexableU32 { + @id({ type: "string" }) + id: string; + + @field({ type: "string" }) + hash: string; + + @field({ type: "u64" }) + timestamp: bigint; + + @field({ type: "u32" }) + start1!: number; + + @field({ type: "u32" }) + end1!: number; + + @field({ type: "u32" }) + start2!: number; + + @field({ type: "u32" }) + end2!: number; + + @field({ type: "u32" }) + width!: number; + + @field({ type: "u8" }) + mode: number; + + constructor(properties: { + id?: string; + hash: string; + timestamp: bigint; + start1: number; + end1: number; + start2: number; + end2: number; + width: number; + mode: number; + }) { + this.id = properties.id || uuid(); + this.hash = properties.hash; + this.timestamp = properties.timestamp; + this.start1 = properties.start1; + this.end1 = properties.end1; + this.start2 = properties.start2; + this.end2 = properties.end2; + this.width = properties.width; + this.mode = properties.mode; + } + } + + const indexPrefilled = await setup( + { schema: ReplicationRangeIndexableU32 }, + createIndicies, + type, + ); + + let docCount = 10e4; // This is very small, so we expect that the ops will be very fast (i.e a few amount to join) + for (let i = 0; i < docCount; i++) { + await indexPrefilled.store.put( + new ReplicationRangeIndexableU32({ + hash: uuid(), + timestamp: BigInt(i), + start1: i, + end1: i + 1, + start2: i + 2, + end2: i + 3, + width: i + 4, + mode: i % 3, + }), + ); + } + + const suite = new B.Bench({ warmupIterations: 500 }); + let fetch = 10; + + const fs = await import("fs"); + const ors: any[] = []; + for (const point of [10, docCount - 4]) { + ors.push( + new And([ + new IntegerCompare({ + key: "start1", + compare: Compare.LessOrEqual, + value: point, + }), + new IntegerCompare({ + key: "end1", + compare: Compare.Greater, + value: point, + }), + ]), + ); + ors.push( + new And([ + new IntegerCompare({ + key: "start2", + compare: Compare.LessOrEqual, + value: point, + }), + new IntegerCompare({ + key: "end2", + compare: Compare.Greater, + value: point, + }), + ]), + ); + } + let complicatedQuery = [ + new Or(ors) /* , + new IntegerCompare({ + key: "timestamp", + compare: Compare.Less, + value: 100 + }) */, + ]; + + const suites: { query: Query[]; name: string }[] = [ + /* { + query: [ + new IntegerCompare({ + key: "start1", + compare: Compare.LessOrEqual, + value: 5, + }), + new IntegerCompare({ + key: "end1", + compare: Compare.Greater, + value: 5, + }) + ], name: "2-fields query" + }, + { + query: [ + + new IntegerCompare({ + key: "start1", + compare: Compare.LessOrEqual, + value: 5, + }), + new IntegerCompare({ + key: "end1", + compare: Compare.Greater, + value: 5, + }), + new IntegerCompare({ + key: "timestamp", + compare: Compare.Less, + value: 10, + }), + ], name: "3-fields query" + }, */ + { query: complicatedQuery, name: "3-fields or query" }, + ]; + suites.forEach(({ query, name }) => { + suite.add(`m-field ${name} query small fetch - ${type}`, async () => { + const iterator = await indexPrefilled.store.iterate({ + query, + }); + const results = await iterator.next(fetch); + await iterator.close(); + + if (results.length === 0) { + throw new Error("No results"); + } + }); + + /* .add(`m-field ${name} query all fetch - ${type}`, async () => { + const iterator = indexPrefilled.store.iterate({ + query, + }); + const results = await iterator.all(); + + if (results.length === 0) { + throw new Error("No results"); + } + }) */ + }); + + /* suite.add("m-field no query small fetch - " + type, async () => { + const iterator = await indexPrefilled.store.iterate(); + const results = await iterator.next(fetch); + await iterator.close(); + + if (results.length === 0) { + throw new Error("No results"); + } + }) */ + await suite.run(); + + await indexPrefilled.indices.stop(); + indexPrefilled.directory && + fs.rmSync(indexPrefilled.directory, { + recursive: true, + force: true, + }); + + /* eslint-disable no-console */ + console.table(suite.table()); }; export const benchmarks = async ( createIndicies: (directory?: string) => Indices | Promise, type: "transient" | "persist" = "transient", ) => { - await strinbBenchmark(createIndicies, type); + await inequalityBenchmark(createIndicies, type); + await multiFieldQueryBenchmark(createIndicies, type); + await stringBenchmark(createIndicies, type); await shapedQueryBenchmark(createIndicies, type); + await getBenchmark(createIndicies, type); await boolQueryBenchmark(createIndicies, type); await nestedBoolQueryBenchmark(createIndicies, type); }; diff --git a/packages/utils/indexer/tests/src/tests.ts b/packages/utils/indexer/tests/src/tests.ts index 93188d989..6150f19ce 100644 --- a/packages/utils/indexer/tests/src/tests.ts +++ b/packages/utils/indexer/tests/src/tests.ts @@ -7,7 +7,7 @@ import { variant, vec, } from "@dao-xyz/borsh"; -import { randomBytes } from "@peerbit/crypto"; +import { randomBytes, sha256Base64Sync } from "@peerbit/crypto"; import { And, BoolQuery, @@ -20,7 +20,6 @@ import { IntegerCompare, IsNull, type IterateOptions, - Nested, Not, Or, Query, @@ -289,7 +288,7 @@ export const tests = ( afterEach(async () => { defaultDocs = []; - await indices?.stop?.(); + await indices?.drop?.(); }); describe("indexBy", () => { @@ -848,7 +847,8 @@ export const tests = ( }); }); }); - +it("bool", async () => { + + it("bool", async () => { await setupDefault(); const responses = await search(store, { @@ -911,38 +911,203 @@ export const tests = ( @field({ type: vec(Document) }) documents: Document[]; - constructor(properties?: { documents: Document[] }) { + constructor(properties?: { + id?: Uint8Array; + documents: Document[]; + }) { this.id = randomBytes(32); this.documents = properties?.documents || []; } } - it("can search", async () => { - const out = await setup({ schema: DocumentsVec }); - store = out.store; + describe("search", () => { + let d1: DocumentsVec; + let d2: DocumentsVec; + let d3: DocumentsVec; - const d1 = new DocumentsVec({ - documents: [ - new Document({ id: uuid(), number: 123n, tags: [] }), - ], - }); - await store.put(d1); - await store.put( - new DocumentsVec({ + beforeEach(async () => { + const out = await setup({ schema: DocumentsVec }); + store = out.store; + + d1 = new DocumentsVec({ + id: new Uint8Array([0]), + documents: [ + new Document({ id: uuid(), number: 123n, tags: [] }), + ], + }); + await store.put(d1); + + d2 = new DocumentsVec({ + id: new Uint8Array([1]), documents: [ new Document({ id: uuid(), number: 124n, tags: [] }), ], - }), - ); + }); - const results = await search(store, { - query: new IntegerCompare({ - key: ["documents", "number"], - compare: Compare.Equal, - value: d1.documents[0]!.number, - }), + await store.put(d2); + + d3 = new DocumentsVec({ + id: new Uint8Array([2]), + documents: [ + new Document({ id: uuid(), number: 122n, tags: [] }), + new Document({ id: uuid(), number: 125n, tags: [] }), + ], + }); + + await store.put(d3); + }); + + it("match", async () => { + // equality + const results = await search(store, { + query: new IntegerCompare({ + key: ["documents", "number"], + compare: Compare.Equal, + value: d1.documents[0]!.number, + }), + }); + expect(results.map((x) => x.value.id)).to.deep.equal([d1.id]); + }); + + describe("logical", () => { + it("and", async () => { + // can not be equal to two different things at once + let results = await search(store, { + query: [ + new And([ + new IntegerCompare({ + key: ["documents", "number"], + compare: Compare.Equal, + value: 123n, + }), + new IntegerCompare({ + key: ["documents", "number"], + compare: Compare.Equal, + value: 124n, + }), + ]), + ], + }); + expect(results).to.have.length(0); + + // can not be between two different things at once + results = await search(store, { + query: [ + new And([ + new IntegerCompare({ + key: ["documents", "number"], + compare: Compare.Less, + value: 1000n, + }), + new IntegerCompare({ + key: ["documents", "number"], + compare: Compare.Greater, + value: 1000n, + }), + ]), + ], + }); + expect(results).to.have.length(0); + + // between one value matches + results = await search(store, { + query: [ + new Or([ + new IntegerCompare({ + key: ["documents", "number"], + compare: Compare.Less, + value: 124n, + }), + new IntegerCompare({ + key: ["documents", "number"], + compare: Compare.GreaterOrEqual, + value: 125n, + }), + ]), + ], + }); + + // because each query is applied separately + expect( + results.map((x) => sha256Base64Sync(x.value.id)), + ).to.have.members([d1.id, d3.id].map(sha256Base64Sync)); + + results = await search(store, { + query: [ + new And([ + new IntegerCompare({ + key: ["documents", "number"], + compare: Compare.Less, + value: 124n, + }), + new IntegerCompare({ + key: ["documents", "number"], + compare: Compare.GreaterOrEqual, + value: 123n, + }), + ]), + ], + }); + + expect(results.map((x) => x.value.id)).to.deep.eq([d1.id]); + }); + + it("or", async () => { + const results3 = await search(store, { + query: [ + new Or([ + new IntegerCompare({ + key: ["documents", "number"], + compare: Compare.Equal, + value: 123n, + }), + new IntegerCompare({ + key: ["documents", "number"], + compare: Compare.Equal, + value: 124n, + }), + ]), + ], + }); + + expect( + results3.map((x) => sha256Base64Sync(x.value.id)), + ).to.have.members([ + sha256Base64Sync(d1.id), + sha256Base64Sync(d2.id), + ]); + }); + + it("or arr, or field", async () => { + const results3 = await search(store, { + query: [ + new Or([ + new IntegerCompare({ + key: ["documents", "number"], + compare: Compare.Equal, + value: 123n, + }), + new IntegerCompare({ + key: ["documents", "number"], + compare: Compare.Equal, + value: 124n, + }), + new ByteMatchQuery({ + key: "id", + value: new Uint8Array([1]), + }), + ]), + ], + }); + + expect( + results3.map((x) => sha256Base64Sync(x.value.id)), + ).to.have.members([ + sha256Base64Sync(d1.id), + sha256Base64Sync(d2.id), + ]); + }); }); - expect(results.map((x) => x.value.id)).to.deep.equal([d1.id]); }); it("update array", async () => { @@ -1048,6 +1213,342 @@ export const tests = ( ).to.equal(0); }); }); + + describe("simple value", () => { + class ArrayDocument { + @field({ type: Uint8Array }) + id: Uint8Array; + + @field({ type: vec("u32") }) + array: number[]; + + constructor(properties?: { id?: Uint8Array; array: number[] }) { + this.id = properties.id || randomBytes(32); + this.array = properties?.array || []; + } + } + + describe("search", () => { + let d1: ArrayDocument; + let d2: ArrayDocument; + let d3: ArrayDocument; + + beforeEach(async () => { + const out = await setup({ schema: ArrayDocument }); + store = out.store; + + d1 = new ArrayDocument({ + id: new Uint8Array([0]), + array: [1], + }); + await store.put(d1); + + d2 = new ArrayDocument({ + id: new Uint8Array([1]), + array: [2], + }); + + await store.put(d2); + + d3 = new ArrayDocument({ + id: new Uint8Array([2]), + array: [0, 3], + }); + + await store.put(d3); + }); + + it("match", async () => { + // equality + const results = await search(store, { + query: new IntegerCompare({ + key: ["array"], + compare: Compare.Equal, + value: d1.array[0]!, + }), + }); + expect(results.map((x) => x.value.id)).to.deep.equal([d1.id]); + }); + + describe("logical", () => { + it("and", async () => { + // can not be equal to two different things at once + let results = await search(store, { + query: [ + new And([ + new IntegerCompare({ + key: ["array"], + compare: Compare.Equal, + value: d1.array[0]!, + }), + new IntegerCompare({ + key: ["array"], + compare: Compare.Equal, + value: d2.array[0]!, + }), + ]), + ], + }); + expect(results).to.have.length(0); + + // can not be between two different things at once + results = await search(store, { + query: [ + new And([ + new IntegerCompare({ + key: ["array"], + compare: Compare.Less, + value: 1000, + }), + new IntegerCompare({ + key: ["array"], + compare: Compare.Greater, + value: 1000, + }), + ]), + ], + }); + expect(results).to.have.length(0); + + // between one value matches + results = await search(store, { + query: [ + new Or([ + new IntegerCompare({ + key: ["array"], + compare: Compare.Less, + value: 2, + }), + new IntegerCompare({ + key: ["array"], + compare: Compare.GreaterOrEqual, + value: 3, + }), + ]), + ], + }); + + // because each query is applied separately + expect( + results.map((x) => sha256Base64Sync(x.value.id)), + ).to.have.members([d1.id, d3.id].map(sha256Base64Sync)); + + results = await search(store, { + query: [ + new And([ + new IntegerCompare({ + key: ["array"], + compare: Compare.Less, + value: 2, + }), + new IntegerCompare({ + key: ["array"], + compare: Compare.GreaterOrEqual, + value: 1, + }), + ]), + ], + }); + + // using nested path will apply the queries together + expect( + results.map((x) => sha256Base64Sync(x.value.id)), + ).to.have.members([d1.id].map(sha256Base64Sync)); + }); + + it("or", async () => { + const results3 = await search(store, { + query: [ + new Or([ + new IntegerCompare({ + key: ["array"], + compare: Compare.Equal, + value: d1.array[0]!, + }), + new IntegerCompare({ + key: ["array"], + compare: Compare.Equal, + value: d3.array[0]!, + }), + ]), + ], + }); + + expect( + results3.map((x) => sha256Base64Sync(x.value.id)), + ).to.have.members([ + sha256Base64Sync(d1.id), + sha256Base64Sync(d3.id), + ]); + }); + + it("or array, or field", async () => { + const results3 = await search(store, { + query: [ + new Or([ + new And([ + new IntegerCompare({ + key: ["array"], + compare: Compare.LessOrEqual, + value: 0, + }), + new IntegerCompare({ + key: ["array"], + compare: Compare.LessOrEqual, + value: 1, + }), + ]), + new ByteMatchQuery({ + key: "id", + value: new Uint8Array([0]), + }), + ]), + ], + }); + + expect( + results3.map((x) => sha256Base64Sync(x.value.id)), + ).to.have.members([ + sha256Base64Sync(d1.id), + sha256Base64Sync(d3.id), + ]); + }); + + it("or all", async () => { + const results = await search(store, { + query: [ + new Or([ + new Or([ + new And([ + new IntegerCompare({ + key: ["array"], + compare: Compare.GreaterOrEqual, + value: 0, + }), + ]), + new And([ + new IntegerCompare({ + key: ["array"], + compare: Compare.GreaterOrEqual, + value: 0, + }), + ]), + ]), + new IntegerCompare({ + key: ["array"], + compare: Compare.GreaterOrEqual, + value: 0, + }), + ]), + ], + }); + + expect( + results.map((x) => sha256Base64Sync(x.value.id)), + ).to.have.members([ + sha256Base64Sync(d1.id), + sha256Base64Sync(d2.id), + sha256Base64Sync(d3.id), + ]); + }); + }); + }); + + it("update array", async () => { + const out = await setup({ schema: ArrayDocument }); + store = out.store; + + const d1 = new ArrayDocument({ + array: [123], + }); + await store.put(d1); + + d1.array = [124]; + + await store.put(d1); + + // should have update results + expect( + ( + await search(store, { + query: new IntegerCompare({ + key: ["array"], + compare: Compare.Equal, + value: 123, + }), + }) + ).length, + ).to.equal(0); + + expect( + ( + await search(store, { + query: new IntegerCompare({ + key: ["array"], + compare: Compare.Equal, + value: 124, + }), + }) + ).map((x) => x.value.id), + ).to.deep.equal([d1.id]); + }); + + it("put delete put", async () => { + const { store } = await setup({ schema: ArrayDocument }); + + const d1 = new ArrayDocument({ + array: [123], + }); + + await store.put(d1); + const [deleted] = await store.del({ + query: { + id: d1.id, + }, + }); + + expect(deleted.key).to.deep.equal(d1.id); + + expect( + ( + await search(store, { + query: new IntegerCompare({ + key: ["array"], + compare: Compare.Equal, + value: 123, + }), + }) + ).length, + ).to.equal(0); + + d1.array = [124]; + await store.put(d1); + + expect( + ( + await search(store, { + query: new IntegerCompare({ + key: ["array"], + compare: Compare.Equal, + value: 124, + }), + }) + ).map((x) => x.value.id), + ).to.deep.equal([d1.id]); + + expect( + ( + await search(store, { + query: new IntegerCompare({ + key: ["array"], + compare: Compare.Equal, + value: 123, + }), + }) + ).length, + ).to.equal(0); + }); + }); }); describe("logical", () => { @@ -1132,7 +1633,7 @@ export const tests = ( new IntegerCompare({ key: "number", compare: Compare.Equal, - value: 2n, + value: 2, }), ], }); @@ -1146,7 +1647,7 @@ export const tests = ( new IntegerCompare({ key: "number", compare: Compare.Greater, - value: 2n, + value: 2, }), ], }); @@ -1160,7 +1661,7 @@ export const tests = ( new IntegerCompare({ key: "number", compare: Compare.GreaterOrEqual, - value: 2n, + value: 2, }), ], }); @@ -1178,7 +1679,7 @@ export const tests = ( new IntegerCompare({ key: "number", compare: Compare.Less, - value: 2n, + value: 2, }), ], }); @@ -1203,6 +1704,20 @@ export const tests = ( expect(response[0].value.number).to.be.oneOf([1n, 1]); expect(response[1].value.number).to.be.oneOf([2n, 2]); }); + + it("bigint as compare value", async () => { + const response = await search(store, { + query: [ + new IntegerCompare({ + key: "number", + compare: Compare.Less, + value: 2n, + }), + ], + }); + expect(response).to.have.length(1); + expect(response[0].value.number).to.be.oneOf([1n, 1]); + }); }); describe("bigint", () => { @@ -1313,6 +1828,19 @@ export const tests = ( expect(response[0].value.bigint).to.equal(first); expect(response[1].value.bigint).to.equal(second); }); + + it("number as compare value", async () => { + const response = await search(store, { + query: [ + new IntegerCompare({ + key: "bigint", + compare: Compare.Greater, + value: 1, + }), + ], + }); + expect(response).to.have.length(3); + }); }); describe("nested", () => { @@ -1348,6 +1876,31 @@ export const tests = ( await setup({ schema: DocumentWithNesting }); }); + it("all", async () => { + await store.put( + new DocumentWithNesting({ + id: "1", + nested: new Nested({ number: 1n, bool: false }), + }), + ); + + await store.put( + new DocumentWithNesting({ + id: "2", + nested: undefined, + }), + ); + + const all = await search(store, {}); + expect(all).to.have.length(2); + expect(all.map((x) => x.id.primitive)).to.have.members([ + "1", + "2", + ]); + expect(all[0].value.nested).to.be.instanceOf(Nested); + expect(all[1].value.nested).to.be.undefined; + }); + it("number", async () => { await store.put( new DocumentWithNesting({ @@ -2096,21 +2649,16 @@ export const tests = ( ); const response = await search(store, { - query: [ - new Nested({ - path: "array", - query: new And([ - new StringMatch({ - key: "a", - value: "hello", - }), - new StringMatch({ - key: "b", - value: "world", - }), - ]), + query: new And([ + new StringMatch({ + key: ["array", "a"], + value: "hello", }), - ], + new StringMatch({ + key: ["array", "b"], + value: "world", + }), + ]), }); expect(response).to.have.length(1); @@ -2130,24 +2678,40 @@ export const tests = ( }), ], }); + const doc2 = new NestedMultipleFieldsArrayDocument({ + id: "2", + array: [ + new NestedMultipleFieldsDocument({ + a: "hello", + b: "värld", + }), + new NestedMultipleFieldsDocument({ + a: "hej", + b: "world", + }), + ], + }); + + const doc3 = new NestedMultipleFieldsArrayDocument({ + id: "3", + array: [ + new NestedMultipleFieldsDocument({ + a: "_", + b: "_", + }), + new NestedMultipleFieldsDocument({ + a: "_", + b: "_", + }), + ], + }); + await store.put(doc1); - await store.put( - new NestedMultipleFieldsArrayDocument({ - id: "2", - array: [ - new NestedMultipleFieldsDocument({ - a: "hello", - b: "värld", - }), - new NestedMultipleFieldsDocument({ - a: "hej", - b: "world", - }), - ], - }), - ); + await store.put(doc2); + await store.put(doc3); - const response = await search(store, { + // AND will only yield doc 1 since only doc 1 contains the combination below + const responseAnd = await search(store, { query: [ new StringMatch({ key: ["array", "a"], @@ -2160,8 +2724,28 @@ export const tests = ( ], }); + expect(responseAnd).to.have.length(1); + checkDocument(responseAnd[0].value, doc1); + + // OR will only yield doc 1 and doc 2 since both will fulfill one of two conditions + const response = await search(store, { + query: [ + new Or([ + new StringMatch({ + key: ["array", "a"], + value: "hello", + }), + new StringMatch({ + key: ["array", "b"], + value: "world", + }), + ]), + ], + }); expect(response).to.have.length(2); + checkDocument(response[0].value, doc1); + checkDocument(response[1].value, doc2); }); }); }); @@ -2693,11 +3277,14 @@ export const tests = ( }); describe("sort", () => { - const put = async (id: number, stringId?: string) => { + const put = async ( + id: number, + options?: { name?: string; number?: bigint }, + ) => { const doc = new Document({ - id: stringId ?? String(id), - name: String(id), - number: BigInt(id), + id: String(id), + name: options?.name ?? String(id), + number: options?.number ?? BigInt(id), tags: [], }); const resp = await store.put(doc); @@ -2866,12 +3453,12 @@ export const tests = ( /* it("no sort is stable", async () => { // TODO this test is actually not a good predictor of stability - + const insertCount = 500; for (let i = 0; i < insertCount; i++) { await put(i, uuid()); } - + const resolvedValues: Set = new Set() const batchSize = 123; const iterator = store.iterate(); @@ -2897,6 +3484,25 @@ export const tests = ( await assertIteratorIsDone(iterator); }); + it("sort by multiple properties", async () => { + await put(0, { name: "a", number: 2n }); + await put(1, { name: "a", number: 1n }); + + await put(2, { name: "b", number: 2n }); + await put(3, { name: "b", number: 1n }); + + const iterator = store.iterate({ + query: [], + sort: [ + new Sort({ direction: SortDirection.DESC, key: "name" }), + new Sort({ direction: SortDirection.ASC, key: "number" }), + ], + }); + + const out = await iterator.all(); + expect(out.map((x) => x.value.id)).to.deep.equal(["3", "2", "1", "0"]); + }); + describe("nested", () => { it("variants", async () => { const doc1 = new Document({ @@ -3395,6 +4001,8 @@ export const tests = ( }); it("indices", async () => { + // TODO make this test more clear and the purpose of each call + let { directory, indices } = await setupDefault(); let subindex = await indices.scope("x"); @@ -3417,7 +4025,11 @@ export const tests = ( await indices.drop(); - await store.start(); + const out = await setup({ schema: Document }, directory); + indices = out.indices; + await indices.start(); + + store = out.store; expect(await store.getSize()).equal(0); @@ -3426,7 +4038,6 @@ export const tests = ( store = (await setup({ schema: Document }, directory)).store; - await store.start(); expect(await store.getSize()).equal(0); }); }); diff --git a/packages/utils/rateless-iblt/.gitignore b/packages/utils/rateless-iblt/.gitignore new file mode 100644 index 000000000..2b77276d4 --- /dev/null +++ b/packages/utils/rateless-iblt/.gitignore @@ -0,0 +1,2 @@ +target +pkg \ No newline at end of file diff --git a/packages/utils/rateless-iblt/Cargo.lock b/packages/utils/rateless-iblt/Cargo.lock new file mode 100644 index 000000000..1abea4b3e --- /dev/null +++ b/packages/utils/rateless-iblt/Cargo.lock @@ -0,0 +1,679 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + +[[package]] +name = "anstyle" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8365de52b16c035ff4fcafe0092ba9390540e3e352870ac09933bebcaa2c8c56" + +[[package]] +name = "autocfg" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "bumpalo" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + +[[package]] +name = "clap" +version = "4.5.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" +dependencies = [ + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.5.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" +dependencies = [ + "anstyle", + "clap_lex", +] + +[[package]] +name = "clap_lex" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" + +[[package]] +name = "cpufeatures" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +dependencies = [ + "libc", +] + +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "is-terminal", + "itertools", + "num-traits", + "once_cell", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", +] + +[[package]] +name = "either" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "half" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", +] + +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + +[[package]] +name = "is-terminal" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" + +[[package]] +name = "js-sys" +version = "0.3.76" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "libc" +version = "0.2.161" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" + +[[package]] +name = "log" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "once_cell" +version = "1.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" + +[[package]] +name = "oorandom" +version = "11.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" + +[[package]] +name = "plotters" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" + +[[package]] +name = "plotters-svg" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" +dependencies = [ + "plotters-backend", +] + +[[package]] +name = "proc-macro2" +version = "1.0.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "riblt" +version = "0.1.0" +dependencies = [ + "criterion", + "js-sys", + "sha2", + "wasm-bindgen", +] + +[[package]] +name = "ryu" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "serde" +version = "1.0.214" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.214" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.132" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "syn" +version = "2.0.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "unicode-ident" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.99" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396" +dependencies = [ + "cfg-if", + "once_cell", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.99" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.99" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.99" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.99" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" + +[[package]] +name = "web-sys" +version = "0.3.72" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" diff --git a/packages/utils/rateless-iblt/Cargo.toml b/packages/utils/rateless-iblt/Cargo.toml new file mode 100644 index 000000000..de406a234 --- /dev/null +++ b/packages/utils/rateless-iblt/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "riblt" +version = "0.1.0" +edition = "2021" +license = "MIT" +description = "Rust port of RIBLT library by yang1996" +repository = "https://github.com/Intersubjective/riblt-rust" +readme = "README.md" +exclude = [ + "TODO", + "mapping_ref.txt", +] + +[lib] +crate-type = ["cdylib", "rlib"] + +[[bench]] +name = "riblt_bench" +harness = false + +[dev-dependencies] +criterion = "0.5.1" +sha2 = "0.10.8" + +[dependencies] +js-sys = "0.3.76" +wasm-bindgen = { version = "0.2.99" } + + +[package.metadata.wasm-pack.profile.release] +wasm-opt = false \ No newline at end of file diff --git a/packages/utils/rateless-iblt/LICENSE b/packages/utils/rateless-iblt/LICENSE new file mode 100644 index 000000000..b71b54f39 --- /dev/null +++ b/packages/utils/rateless-iblt/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Intersubjective + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/packages/utils/rateless-iblt/README.md b/packages/utils/rateless-iblt/README.md new file mode 100644 index 000000000..9800284af --- /dev/null +++ b/packages/utils/rateless-iblt/README.md @@ -0,0 +1,101 @@ +# riblt-rust +Rust port of [RIBLT library](https://github.com/yangl1996/riblt) by yang1996. + +Implementation of Rateless Invertible Bloom Lookup Tables (Rateless IBLTs), as +proposed in paper Practical Rateless Set Reconciliation by Lei Yang, Yossi +Gilad, and Mohammad Alizadeh. Preprint available at +[arxiv.org/abs/2402.02668](https://arxiv.org/abs/2402.02668). + +## Library API + +To use this library, implement a `Symbol` trait, and create `Encoder` or `Decoder` objects to encode and decode symbols. + +### `Symbol` trait +- `fn zero() -> Self` - Create a zero symbol. +- `fn xor(&self, other: &Self) -> Self` - XOR of this symbol and another symbol. +- `fn hash(&self) -> u64` - Calculate a hash of the symbol. + +Example implementation for 64-bit integer symbols: +```rs +use riblt::*; +use std::hash::{SipHasher, Hasher}; + +pub type MyU64 = u64; + +impl Symbol for MyU64 { + fn zero() -> MyU64 { + return 0; + } + + fn xor(&self, other: &MyU64) -> MyU64 { + return self ^ other; + } + + fn hash(&self) -> u64 { + let mut hasher = SipHasher::new_with_keys(123, 456); + hasher.write_u64(*self); + return hasher.finish(); + } +} +``` + +### `Encoder` methods +- `Encoder::::new()` - Create a new Encoder for symbols of type `T`. +- `enc.reset()` - Reset the Encoder state. +- `enc.add_symbol(symbol: &T)` - Add a new symbol to the Encoder. +- `enc.produce_next_coded_symbol() -> CodedSymbol` - Produce the next coded symbol that can be decoded by the Decoder. + +#### Example usage +```rs +use riblt::*; + +fn foo() { + let mut enc = Encoder::::new(); + let symbols : [MyU64; 5] = [ 1, 2, 3, 4, 5 ]; + for x in symbols { + enc.add_symbol(&x); + } + + let coded = enc.produce_next_coded_symbol(); + + // send symbol to the decoder... +} +``` + +### `Decoder` methods +- `Decoder::::new()` - Create a new Decoder for symbols of type `T`. +- `dec.reset()` - Reset the Decoder state. +- `dec.add_symbol(symbol: &T)` - Add a new symbol to the Decoder. +- `dec.add_coded_symbol(symbol: &CodedSymbol)` - Add a new coded symbol to the Decoder. +- `dec.try_decode()` - Try to decode added symbols. May returns `Err(InvalidDegree)`. +- `dec.decoded()` - Returns `true` if all added coded symbols where decoded. +- `dec.get_remote_symbols() -> Vec>` - Returns an array of decoded remote symbols. +- `dec.get_local_symbols() -> Vec>` - Returns an array of local symbols. + +Remote and local symbols can be accessed directly via Decoder properties: +- `dec.remote.symbols`, +- `dec.local.symbols`. + +#### Example usage +```rs +use riblt::*; + +fn foo() { + let symbols : [CodedSymbol; 5] = ...; + + let mut dec = Decoder::::new(); + for i in 0..symbols.len() { + dec.add_coded_symbol(&symbols[i]); + } + + if dec.try_decode().is_err() { + // Decoding error... + } + + if dec.decoded() { + // Success... + } +} +``` + +For the complete example see test `example` in `src/tests.rs`. diff --git a/packages/utils/rateless-iblt/TODO b/packages/utils/rateless-iblt/TODO new file mode 100644 index 000000000..c174b8043 --- /dev/null +++ b/packages/utils/rateless-iblt/TODO @@ -0,0 +1,5 @@ +To-Do list + +1. Get rid of bloated dependencies: criterion, sha2 +2. Sketch code is never used and doesn't have the test coverage. Consider removing it or add tests. +3. Investigate mapping determinism. diff --git a/packages/utils/rateless-iblt/benches/riblt_bench.rs b/packages/utils/rateless-iblt/benches/riblt_bench.rs new file mode 100644 index 000000000..d0edf0805 --- /dev/null +++ b/packages/utils/rateless-iblt/benches/riblt_bench.rs @@ -0,0 +1,64 @@ +use riblt::encoding::*; +use riblt::sketch::*; +use riblt::testing::*; + +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use sha2::{Digest, Sha256}; + +const N: usize = 10000; +const M: usize = 15000; + +pub fn criterion_benchmark(c: &mut Criterion) { + let mut mapp = RandomMapping { + prng: 123456789, + last_idx: 0, + }; + + c.bench_function("mapping", |b| b.iter(|| black_box(mapp.next_index()))); + + let mut enc = Encoder::::new(); + + let data: [TestSymbol; N] = core::array::from_fn(|i| new_test_symbol(i as u64)); + + c.bench_function("encoding", |b| { + b.iter(|| { + let mut dummy: u64 = 0; + enc.reset(); + for i in 0..N { + enc.add_symbol(&data[i]); + } + for _ in 0..M { + dummy ^= enc.produce_next_coded_symbol().hash; + } + black_box(dummy) + }) + }); + + let sketch_benches = [1000, 100000, 10000000]; + for size in sketch_benches { + let mut sketch = Sketch::::new(size); + let mut n = 0; + c.bench_function(format!("sketch_and_symbol_{}", size).as_str(), |b| { + b.iter(|| { + sketch.add_symbol(&new_test_symbol(n)); + n += 1; + black_box(sketch.v[0].hash) + }) + }); + } + + let mut k = 0; + + c.bench_function("sha256", |b| { + b.iter(|| { + let sym = new_test_symbol(k); + k += 1; + let mut hasher = Sha256::new(); + hasher.update(sym); + black_box(hasher.finalize()) + }) + }); +} + +criterion_group!(benches, criterion_benchmark); +criterion_main!(benches); diff --git a/packages/utils/rateless-iblt/package.json b/packages/utils/rateless-iblt/package.json new file mode 100644 index 000000000..58c464f94 --- /dev/null +++ b/packages/utils/rateless-iblt/package.json @@ -0,0 +1,58 @@ +{ + "name": "@peerbit/riblt", + "version": "0.0.1", + "description": "Riblt", + "sideEffects": false, + "type": "module", + "types": "./pkg/riblt.d.ts", + "typesVersions": { + "*": { + "*": [ + "*", + "pkg/*", + "pkg/*/index" + ], + "src/*": [ + "*", + "pkg/*", + "pkg/*/index" + ] + } + }, + "files": [ + "src", + "dist", + "!dist/test", + "!**/*.tsbuildinfo" + ], + "exports": { + ".": { + "types": "./pkg/riblt.d.ts", + "import": "./pkg/riblt.js" + } + }, + "eslintConfig": { + "extends": "peerbit", + "parserOptions": { + "project": true, + "sourceType": "module" + }, + "ignorePatterns": [ + "!.aegir.js", + "test/ts-use", + "*.d.ts" + ] + }, + "publishConfig": { + "access": "public" + }, + "scripts": { + "benchmark": "cargo bench", + "clean": "cargo clear", + "build": "wasm-pack build --target web && shx rm -rf ./pkg/package.json", + "test": "cargo test", + "lint": "cargo fmt" + }, + "author": "dao.xyz", + "license": "MIT" +} diff --git a/packages/utils/rateless-iblt/src/encoding.rs b/packages/utils/rateless-iblt/src/encoding.rs new file mode 100644 index 000000000..73f042a41 --- /dev/null +++ b/packages/utils/rateless-iblt/src/encoding.rs @@ -0,0 +1,397 @@ +// NOTE +// - Investigate static/dynamic dispatch in regard to +// the performance when using traits like Symbol. +// - Hash values are hardcoded to be u64, make it more generic. +// - SipHasher is deprecated. Maybe replace it with a different hasher. + +use std::vec::Vec; + +pub trait Symbol { + fn zero() -> Self; + fn xor(&self, other: &Self) -> Self; + fn hash(&self) -> u64; +} + +#[derive(Clone, Copy)] +pub enum Direction { + ADD = 1, + REMOVE = -1, +} + +#[derive(Clone, Copy)] +pub enum Error { + InvalidDegree = 1, + InvalidSize = 2, + DecodeFailed = 3, +} + +#[derive(Clone, Copy)] +pub struct SymbolMapping { + pub source_idx: u64, + pub coded_idx: u64, +} + +#[derive(Clone, Copy)] +pub struct RandomMapping { + pub prng: u64, + pub last_idx: u64, +} + +#[derive(Clone, Copy)] +pub struct HashedSymbol { + pub symbol: T, + pub hash: u64, +} + +#[derive(Clone, Copy, PartialEq, Debug)] +pub struct CodedSymbol { + pub symbol: T, + pub hash: u64, + pub count: i64, +} + +#[derive(Clone)] +pub struct Encoder { + pub symbols: Vec>, + pub mappings: Vec, + pub queue: Vec, + pub next_idx: u64, +} + +pub struct Decoder { + coded: Vec>, + pub local: Encoder, + pub remote: Encoder, + window: Encoder, + decodable: Vec, + num_decoded: u64, +} + +impl RandomMapping { + pub fn next_index(&mut self) -> u64 { + let r = self.prng.wrapping_mul(0xda942042e4dd58b5); + self.prng = r; + self.last_idx = self.last_idx.wrapping_add( + (((self.last_idx as f64) + 1.5) + * (((1i64 << 32) as f64) / f64::sqrt((r as f64) + 1.0) - 1.0)) + .ceil() as u64, + ); + return self.last_idx; + } +} + +impl CodedSymbol { + pub fn apply(&mut self, sym: &HashedSymbol, direction: Direction) { + self.symbol = self.symbol.xor(&sym.symbol); + self.hash ^= sym.hash; + self.count += direction as i64; + } +} + +impl Encoder { + pub fn new() -> Self { + return Encoder:: { + symbols: Vec::>::new(), + mappings: Vec::::new(), + queue: Vec::::new(), + next_idx: 0, + }; + } + + pub fn reset(&mut self) { + self.symbols.clear(); + self.mappings.clear(); + self.queue.clear(); + self.next_idx = 0; + } + + pub fn add_hashed_symbol_with_mapping(&mut self, sym: &HashedSymbol, mapp: &RandomMapping) { + self.symbols.push(*sym); + self.mappings.push(*mapp); + + self.queue.push(SymbolMapping { + source_idx: (self.symbols.len() as u64) - 1, + coded_idx: mapp.last_idx, + }); + + // Fix tail + // + let mut cur: usize = self.queue.len() - 1; + while cur > 0 { + let parent = (cur - 1) / 2; + if cur == parent || self.queue[parent].coded_idx <= self.queue[cur].coded_idx { + break; + } + self.queue.swap(parent, cur); + cur = parent; + } + } + + pub fn add_hashed_symbol(&mut self, sym: &HashedSymbol) { + self.add_hashed_symbol_with_mapping( + sym, + &RandomMapping { + prng: sym.hash, + last_idx: 0, + }, + ); + } + + pub fn add_symbol(&mut self, sym: &T) { + self.add_hashed_symbol(&HashedSymbol:: { + symbol: *sym, + hash: sym.hash(), + }); + } + + pub fn apply_window(&mut self, sym: &CodedSymbol, direction: Direction) -> CodedSymbol { + let mut next_sym = *sym; + + if self.queue.is_empty() { + self.next_idx += 1; + return next_sym; + } + + while self.queue[0].coded_idx == self.next_idx { + next_sym.apply(&self.symbols[self.queue[0].source_idx as usize], direction); + self.queue[0].coded_idx = self.mappings[self.queue[0].source_idx as usize].next_index(); + + // Fix head + // + let mut cur: usize = 0; + loop { + let mut child = cur * 2 + 1; + if child >= self.queue.len() { + break; + } + let right_child = child + 1; + if right_child < self.queue.len() + && self.queue[right_child].coded_idx < self.queue[child].coded_idx + { + child = right_child; + } + if self.queue[cur].coded_idx <= self.queue[child].coded_idx { + break; + } + self.queue.swap(cur, child); + cur = child; + } + } + + self.next_idx += 1; + return next_sym; + } + + pub fn produce_next_coded_symbol(&mut self) -> CodedSymbol { + return self.apply_window( + &CodedSymbol:: { + symbol: T::zero(), + hash: 0, + count: 0, + }, + Direction::ADD, + ); + } + + pub fn remove_symbol(&mut self, sym: &T) { + let hash = sym.hash(); + // Find the position of the symbol to remove + if let Some(pos) = self + .symbols + .iter() + .position(|s| s.hash == hash && s.symbol == *sym) + { + // Remove the symbol and its mapping + self.symbols.remove(pos); + self.mappings.remove(pos); + + // Update the queue + // Collect indices in the queue that need to be removed or adjusted + let mut indices_to_remove = Vec::new(); + for (i, sm) in self.queue.iter_mut().enumerate() { + if sm.source_idx == pos as u64 { + // Mark this index for removal + indices_to_remove.push(i); + } else if sm.source_idx > pos as u64 { + // Decrement source_idx to account for the removed symbol + sm.source_idx -= 1; + } + } + + // Remove the SymbolMappings from the queue in reverse order to maintain correct indexing + for &i in indices_to_remove.iter().rev() { + self.queue.remove(i); + } + + // Rebuild the heap property of the queue + self.build_queue_heap(); + } else { + // Symbol not found; you may choose to handle this case differently + eprintln!("Symbol not found in encoder."); + } + } + + // Helper method to rebuild the heap property of the queue + fn build_queue_heap(&mut self) { + let len = self.queue.len(); + for i in (0..len / 2).rev() { + self.heapify_down(i); + } + } + + // Helper method to restore the heap property from a given index downwards + fn heapify_down(&mut self, mut cur: usize) { + let len = self.queue.len(); + loop { + let mut child = 2 * cur + 1; + if child >= len { + break; + } + let right = child + 1; + if right < len && self.queue[right].coded_idx < self.queue[child].coded_idx { + child = right; + } + if self.queue[cur].coded_idx <= self.queue[child].coded_idx { + break; + } + self.queue.swap(cur, child); + cur = child; + } + } +} + +impl Encoder { + pub fn to_decoder(&self) -> Decoder { + let mut decoder = Decoder::::new(); + // Clone the current encoder state into the decoder's window encoder + decoder.window = self.clone(); + decoder + } +} + +impl Decoder { + pub fn new() -> Self { + return Decoder:: { + coded: Vec::>::new(), + local: Encoder::::new(), + remote: Encoder::::new(), + window: Encoder::::new(), + decodable: Vec::::new(), + num_decoded: 0, + }; + } + + pub fn reset(&mut self) { + self.coded.clear(); + self.local.reset(); + self.remote.reset(); + self.window.reset(); + self.decodable.clear(); + self.num_decoded = 0; + } + + pub fn add_symbol(&mut self, sym: &T) { + self.window.add_hashed_symbol(&HashedSymbol:: { + symbol: *sym, + hash: sym.hash(), + }); + } + + pub fn add_coded_symbol(&mut self, sym: &CodedSymbol) { + let mut next_sym = self.window.apply_window(sym, Direction::REMOVE); + next_sym = self.remote.apply_window(&next_sym, Direction::REMOVE); + next_sym = self.local.apply_window(&next_sym, Direction::ADD); + + self.coded.push(next_sym); + + if ((next_sym.count == 1 || next_sym.count == -1) + && (next_sym.hash == next_sym.symbol.hash())) + || (next_sym.count == 0 && next_sym.hash == 0) + { + self.decodable.push((self.coded.len() as i64) - 1); + } + } + + fn apply_new_symbol(&mut self, sym: &HashedSymbol, direction: Direction) -> RandomMapping { + let mut mapp = RandomMapping { + prng: sym.hash, + last_idx: 0, + }; + + while mapp.last_idx < (self.coded.len() as u64) { + let n = mapp.last_idx as usize; + self.coded[n].apply(&sym, direction); + + if (self.coded[n].count == -1 || self.coded[n].count == 1) + && self.coded[n].hash == self.coded[n].symbol.hash() + { + self.decodable.push(n as i64); + } + + mapp.next_index(); + } + + return mapp; + } + + pub fn try_decode(&mut self) -> Result<(), Error> { + let mut didx: usize = 0; + + // self.decodable.len() will increase in apply_new_symbol + // + while didx < self.decodable.len() { + let cidx = self.decodable[didx] as usize; + let sym = self.coded[cidx]; + + match sym.count { + 1 => { + let new_sym = HashedSymbol:: { + symbol: T::zero().xor(&sym.symbol), + hash: sym.hash, + }; + + let mapp = self.apply_new_symbol(&new_sym, Direction::REMOVE); + self.remote.add_hashed_symbol_with_mapping(&new_sym, &mapp); + self.num_decoded += 1; + } + + -1 => { + let new_sym = HashedSymbol:: { + symbol: T::zero().xor(&sym.symbol), + hash: sym.hash, + }; + + let mapp = self.apply_new_symbol(&new_sym, Direction::ADD); + self.local.add_hashed_symbol_with_mapping(&new_sym, &mapp); + self.num_decoded += 1; + } + + 0 => { + self.num_decoded += 1; + } + + _ => { + return Err(Error::InvalidDegree); + } + } + + didx += 1; + } + + self.decodable.clear(); + + return Ok(()); + } + + pub fn decoded(&self) -> bool { + return self.num_decoded == (self.coded.len() as u64); + } + + pub fn get_remote_symbols(&self) -> Vec> { + return self.remote.symbols.clone(); + } + + pub fn get_local_symbols(&self) -> Vec> { + return self.local.symbols.clone(); + } +} diff --git a/packages/utils/rateless-iblt/src/lib.rs b/packages/utils/rateless-iblt/src/lib.rs new file mode 100644 index 000000000..9b32b125f --- /dev/null +++ b/packages/utils/rateless-iblt/src/lib.rs @@ -0,0 +1,7 @@ +#[cfg(test)] +mod tests; + +pub mod encoding; +pub mod sketch; +pub mod testing; +pub mod wasm; diff --git a/packages/utils/rateless-iblt/src/sketch.rs b/packages/utils/rateless-iblt/src/sketch.rs new file mode 100644 index 000000000..6238c33f3 --- /dev/null +++ b/packages/utils/rateless-iblt/src/sketch.rs @@ -0,0 +1,101 @@ +// FIXME +// Mostly untested code. + +use super::encoding::*; +use std::vec::Vec; + +pub struct Sketch { + pub v: Vec>, +} + +pub struct SketchDecodeResult { + pub fwd: Vec>, + pub rev: Vec>, + pub is_decoded: bool, +} + +impl Sketch { + pub fn new(size: usize) -> Sketch { + return Sketch:: { + v: vec![ + CodedSymbol:: { + symbol: T::zero(), + hash: 0, + count: 0, + }; + size + ], + }; + } + + pub fn add_hashed_symbol(&mut self, sym: &HashedSymbol) { + let mut mapp = RandomMapping { + prng: sym.hash, + last_idx: 0, + }; + + while (mapp.last_idx as usize) < self.v.len() { + let idx = mapp.last_idx as usize; + self.v[idx].symbol = self.v[idx].symbol.xor(&sym.symbol); + self.v[idx].count += 1; + self.v[idx].hash ^= sym.hash; + mapp.next_index(); + } + } + + pub fn remove_hashed_symbol(&mut self, sym: &HashedSymbol) { + let mut mapp = RandomMapping { + prng: sym.hash, + last_idx: 0, + }; + + while (mapp.last_idx as usize) < self.v.len() { + let idx = mapp.last_idx as usize; + self.v[idx].symbol = self.v[idx].symbol.xor(&sym.symbol); + self.v[idx].count -= 1; + self.v[idx].hash ^= sym.hash; + mapp.next_index(); + } + } + + pub fn add_symbol(&mut self, sym: &T) { + self.add_hashed_symbol(&HashedSymbol:: { + symbol: *sym, + hash: sym.hash(), + }); + } + + pub fn remove_symbol(&mut self, sym: &T) { + self.remove_hashed_symbol(&HashedSymbol:: { + symbol: *sym, + hash: sym.hash(), + }); + } + + pub fn subtract(&mut self, other: &Sketch) -> Result<(), Error> { + if self.v.len() != other.v.len() { + return Err(Error::InvalidSize); + } + for i in 0..self.v.len() { + self.v[i].symbol = self.v[i].symbol.xor(&other.v[i].symbol); + self.v[i].count = self.v[i].count - other.v[i].count; + self.v[i].hash ^= other.v[i].hash; + } + return Ok(()); + } + + pub fn decode(&mut self) -> Result, Error> { + let mut dec = Decoder::::new(); + for i in 0..self.v.len() { + dec.add_coded_symbol(&self.v[i]); + } + return match dec.try_decode() { + Ok(()) => Ok(SketchDecodeResult:: { + fwd: dec.get_remote_symbols(), + rev: dec.get_local_symbols(), + is_decoded: dec.decoded(), + }), + Err(x) => Err(x), + }; + } +} diff --git a/packages/utils/rateless-iblt/src/testing.rs b/packages/utils/rateless-iblt/src/testing.rs new file mode 100644 index 000000000..48a19edcf --- /dev/null +++ b/packages/utils/rateless-iblt/src/testing.rs @@ -0,0 +1,50 @@ +use crate::encoding::Symbol; + +#[allow(deprecated)] +use std::hash::{Hasher, SipHasher}; + +const TEST_SYMBOL_SIZE: usize = 64; + +pub type TestSymbol = [u8; TEST_SYMBOL_SIZE]; + +pub fn new_test_symbol(x: u64) -> TestSymbol { + return core::array::from_fn::(|i| { + x.checked_shr(8 * i as u32).unwrap_or(0) as u8 + }); +} + +impl Symbol for TestSymbol { + fn zero() -> TestSymbol { + return new_test_symbol(0); + } + + fn xor(&self, other: &TestSymbol) -> TestSymbol { + return core::array::from_fn(|i| self[i] ^ other[i]); + } + + #[allow(deprecated)] + fn hash(&self) -> u64 { + let mut hasher = SipHasher::new_with_keys(567, 890); + hasher.write(self); + return hasher.finish(); + } +} + +pub type TestU64 = u64; + +impl Symbol for TestU64 { + fn zero() -> TestU64 { + return 0; + } + + fn xor(&self, other: &TestU64) -> TestU64 { + return self ^ other; + } + + #[allow(deprecated)] + fn hash(&self) -> u64 { + let mut hasher = SipHasher::new_with_keys(123, 456); + hasher.write_u64(*self); + return hasher.finish(); + } +} diff --git a/packages/utils/rateless-iblt/src/tests.rs b/packages/utils/rateless-iblt/src/tests.rs new file mode 100644 index 000000000..bf378d6f8 --- /dev/null +++ b/packages/utils/rateless-iblt/src/tests.rs @@ -0,0 +1,2343 @@ +use super::encoding::*; +use super::testing::*; +use std::collections::BTreeSet; + +#[test] +fn diff() { + let alice: [TestU64; 10] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + let bob: [TestU64; 10] = [1, 3, 4, 5, 6, 7, 8, 9, 10, 11]; + + let mut enc = Encoder::::new(); + for x in alice { + enc.add_symbol(&x); + } + + let mut dec = Decoder::::new(); + for x in bob { + dec.add_symbol(&x); + } + + let mut cost = 0; + + loop { + let s = enc.produce_next_coded_symbol(); + cost += 1; + dec.add_coded_symbol(&s); + assert!(!dec.try_decode().is_err()); + if dec.decoded() { + break; + } + } + + // 2 is exclusive to Alice + assert_eq!(dec.remote.symbols[0].symbol, 2); + + // 11 is exclusive to Bob + assert_eq!(dec.local.symbols[0].symbol, 11); + + assert_eq!(cost, 2); +} + +#[test] +fn encode_and_decode() { + let mut enc = Encoder::::new(); + let mut dec = Decoder::::new(); + + let mut local = BTreeSet::::new(); + let mut remote = BTreeSet::::new(); + + let nlocal = 5000; // 50000; + let nremote = 5000; // 50000; + let ncommon = 10000; // 100000; + + let mut next_id: u64 = 0; + + for _ in 0..nlocal { + let s = new_test_symbol(next_id); + next_id += 1; + dec.add_symbol(&s); + local.insert(s.hash()); + } + for _ in 0..nremote { + let s = new_test_symbol(next_id); + next_id += 1; + enc.add_symbol(&s); + remote.insert(s.hash()); + } + for _ in 0..ncommon { + let s = new_test_symbol(next_id); + next_id += 1; + enc.add_symbol(&s); + dec.add_symbol(&s); + } + + let mut ncw = 0; + + loop { + dec.add_coded_symbol(&enc.produce_next_coded_symbol()); + ncw += 1; + assert!(!dec.try_decode().is_err()); + if dec.decoded() { + break; + } + } + + for v in dec.remote.symbols.iter() { + remote.remove(&v.hash); + } + + for v in dec.local.symbols.iter() { + local.remove(&v.hash); + } + + assert_eq!(remote.len(), 0); + assert_eq!(local.len(), 0); + assert!(dec.decoded()); + + println!("{} codewords until fully decoded", ncw); +} + +#[test] +fn encode_to_decoder() { + let alice: [TestU64; 10] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + let bob: [TestU64; 10] = [1, 3, 4, 5, 6, 7, 8, 9, 10, 11]; + + let mut enc = Encoder::::new(); + for x in alice { + enc.add_symbol(&x); + } + + let mut enc2: Encoder = Encoder::::new(); + for x in bob { + enc2.add_symbol(&x); + } + + let mut dec = enc2.to_decoder(); + + let mut cost = 0; + + loop { + let s = enc.produce_next_coded_symbol(); + cost += 1; + dec.add_coded_symbol(&s); + assert!(!dec.try_decode().is_err()); + if dec.decoded() { + break; + } + } + + // 2 is exclusive to Alice + assert_eq!(dec.remote.symbols[0].symbol, 2); + + // 11 is exclusive to Bob + assert_eq!(dec.local.symbols[0].symbol, 11); + + assert_eq!(cost, 2); +} + +#[test] +fn reset() { + let alice_0: [TestU64; 10] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + let bob_0: [TestU64; 10] = [1, 3, 4, 5, 6, 7, 8, 9, 10, 11]; + let alice_1: [TestU64; 10] = [1, 2, 3, 4, 5, 6, 7, 8, 10, 11]; + let bob_1: [TestU64; 10] = [1, 2, 4, 5, 6, 7, 8, 9, 10, 11]; + + let mut enc = Encoder::::new(); + for x in alice_0 { + enc.add_symbol(&x); + } + + let mut dec = Decoder::::new(); + for x in bob_0 { + dec.add_symbol(&x); + } + + let mut cost = 0; + + loop { + let s = enc.produce_next_coded_symbol(); + cost += 1; + dec.add_coded_symbol(&s); + assert!(!dec.try_decode().is_err()); + if dec.decoded() { + break; + } + } + + enc.reset(); + dec.reset(); + + for x in alice_1 { + enc.add_symbol(&x); + } + + for x in bob_1 { + dec.add_symbol(&x); + } + + cost = 0; + + loop { + let s = enc.produce_next_coded_symbol(); + cost += 1; + dec.add_coded_symbol(&s); + assert!(!dec.try_decode().is_err()); + if dec.decoded() { + break; + } + } + + // 3 is exclusive to Alice + assert_eq!(dec.remote.symbols[0].symbol, 3); + + // 9 is exclusive to Bob + assert_eq!(dec.local.symbols[0].symbol, 9); + + assert_eq!(cost, 2); +} + +#[test] +fn get_symbols() { + let alice: [TestU64; 10] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + let bob: [TestU64; 10] = [1, 3, 4, 5, 6, 7, 8, 9, 10, 11]; + + let mut enc = Encoder::::new(); + for x in alice { + enc.add_symbol(&x); + } + + let mut dec = Decoder::::new(); + for x in bob { + dec.add_symbol(&x); + } + + let mut cost = 0; + + loop { + let s = enc.produce_next_coded_symbol(); + cost += 1; + dec.add_coded_symbol(&s); + assert!(!dec.try_decode().is_err()); + if dec.decoded() { + break; + } + } + + let remote = dec.get_remote_symbols(); + let local = dec.get_local_symbols(); + + // 2 is exclusive to Alice + assert_eq!(remote[0].symbol, 2); + + // 11 is exclusive to Bob + assert_eq!(local[0].symbol, 11); + + assert_eq!(cost, 2); +} + +#[test] +fn test_remove_symbol() { + let mut encoder = Encoder::::new(); + + // Add symbols to the encoder + encoder.add_symbol(&1); + encoder.add_symbol(&2); + encoder.add_symbol(&3); + + assert_eq!(encoder.symbols.len(), 3); + + // Remove a symbol + encoder.remove_symbol(&2); + + // Check that the symbol was removed + assert_eq!(encoder.symbols.len(), 2); + assert!(!encoder.symbols.iter().any(|s| s.symbol == 2)); + + // Check that mappings and queue are updated correctly + assert_eq!(encoder.mappings.len(), 2); + assert_eq!(encoder.queue.len(), 2); + assert!(encoder.queue.iter().all(|sm| sm.source_idx < 2)); + + // Ensure that the heap property is maintained + for i in 1..encoder.queue.len() { + let parent = (i - 1) / 2; + assert!( + encoder.queue[parent].coded_idx <= encoder.queue[i].coded_idx, + "Heap property violated at index {}", + i + ); + } +} + +#[test] +fn test_diff_after_removal_of_symbol() { + let alice: [TestU64; 5] = [1, 2, 3, 5, 10]; + let bob: [TestU64; 4] = [1, 2, 3, 6]; + + let mut enc = Encoder::::new(); + for x in alice { + enc.add_symbol(&x); + } + + enc.remove_symbol(&10); + + let mut dec = Decoder::::new(); + for x in bob { + dec.add_symbol(&x); + } + + let mut cost = 0; + + loop { + let s = enc.produce_next_coded_symbol(); + cost += 1; + dec.add_coded_symbol(&s); + assert!(!dec.try_decode().is_err()); + if dec.decoded() { + break; + } + } + + // 5 is exclusive to Alice + assert_eq!(dec.remote.symbols[0].symbol, 5); + + // 6 is exclusive to Bob + assert_eq!(dec.local.symbols[0].symbol, 6); + + assert_eq!(cost, 2); +} + +#[test] +fn test_clone() { + let alice: [TestU64; 5] = [1, 2, 3, 5, 10]; + let mut enc = Encoder::::new(); + for x in alice { + enc.add_symbol(&x); + } + + // produce some symbols and reuse and produce the same symbols and assert they are equal + let mut enc2 = enc.clone(); + + let mut produces: Vec> = Vec::new(); + for _ in 0..10 { + produces.push(enc.produce_next_coded_symbol()); + } + + for i in 0..10 { + assert_eq!(enc2.produce_next_coded_symbol(), produces[i]); + } +} +const MAPPING_REF: [u64; 2000] = [ + 2, + 3, + 9, + 10, + 11, + 16, + 27, + 31, + 42, + 133, + 258, + 271, + 1969, + 2167, + 4018, + 19352, + 25878, + 38498, + 40513, + 64416, + 169690, + 740928, + 1519972, + 1968074, + 8208040, + 10400231, + 11695869, + 12610398, + 13893610, + 17287422, + 18066631, + 23660562, + 25888307, + 179863602, + 215986671, + 801810653, + 3402781043, + 4764885828, + 5866803996, + 7543450409, + 12720236490, + 13661179802, + 13753445813, + 111830685287, + 195882855340, + 226531037293, + 242336935646, + 1129731321329, + 1294351746820, + 1599276494338, + 5057796363277, + 16408421055474, + 22599230754570, + 59952571173986, + 78564300477335, + 89699944202590, + 157610705439900, + 219719584416704, + 481011332100199, + 769537964478830, + 1951573105410798, + 10085780461085687, + 13787072445283817, + 37342710031612493, + 59480048735071153, + 121481616280401865, + 793200340131224137, + 2412173016519570249, + 3146877963700278857, + 3268001426615912441, + 5674548168104436217, + 5956047874350559353, + 8538589375516646521, + 9187948460596693497, + 9287078302212508649, + 11901191482218757609, + 3039525328105790953, + 3160019853479824921, + 3316894547750845017, + 3360199901558658041, + 3973211352801976185, + 4495634179432494649, + 13839094117823393337, + 1873544722954184249, + 1978397965294508569, + 4106526400364231705, + 4106526400364231704, + 5748900985946378264, + 6413241262779981208, + 7540893577959613720, + 14503551309577267480, + 15808630469550402584, + 1342473340942458392, + 1424762746350639176, + 1684460149527201672, + 2379581915029727240, + 3747569155815616776, + 5592924978515277576, + 6066607445344116232, + 6390483890218260936, + 7400848894829915464, + 9114065277016819016, + 16187285940290710856, + 7849920258579342664, + 11101256641551275848, + 16129462018875097928, + 16129462018875097927, + 4801353112721507143, + 6136779504944517447, + 7503218853735661639, + 2080860561266718791, + 10786716632668951623, + 10786716632668951622, + 16961429938863261766, + 10311279425160263750, + 104292808938934342, + 134112991484844162, + 198471139819754274, + 733115700081127394, + 2089443019692571106, + 3369894856054728418, + 6050017276115829474, + 11936690373413264098, + 15562840077570483938, + 5636000350790340322, + 6351779967804027490, + 12469315387929274978, + 12469315387929274977, + 14633785144808694369, + 2279805332619779681, + 2419514708606640513, + 4704777080346762369, + 5201870862512480833, + 7012842863031724097, + 14150149540635211841, + 1331373460529622081, + 2149758645815525441, + 2329291395182032929, + 2329291395182032928, + 4232619690558051872, + 6056262690198436128, + 6468462729948783456, + 11038122849812092768, + 2591110160050411360, + 7672237843530619744, + 14304804239604551520, + 14304804239604551519, + 15619874751055778399, + 17743347839792802143, + 6058358948741427551, + 9303564995229632863, + 17729416846251748703, + 5369005043210817887, + 8569050157856370015, + 8569050157856370014, + 8569050157856370013, + 3227349021934157149, + 3535287945783948573, + 3935946846676377821, + 4236074163382057309, + 9107024244014938461, + 10914459567294415965, + 11667108085865846109, + 3662179474506839389, + 7967130254775215965, + 8830960836245594589, + 14905851974320035293, + 1007073299401230813, + 2245994131707039453, + 4143338838841905629, + 4337517823382254813, + 4337517823382254812, + 10109133361272261852, + 10134678711284725404, + 10134678711284725403, + 10134678711284725402, + 8941302127777003162, + 17843741186085099162, + 5949065987573128858, + 10547605048042827418, + 13614866558375183514, + 10704794643328619674, + 10704794643328619673, + 11098375608905362777, + 774368807631029593, + 1060479282120162617, + 1156698037664071209, + 1590750824257224489, + 1960183947388709929, + 2204782844626302121, + 5010682958806542505, + 5010682958806542504, + 5996548184666151848, + 10756391040068727720, + 14041383793343730088, + 16597313514341048744, + 9345895310700212648, + 10847562036107868840, + 10875646772256257720, + 10875646772256257719, + 11013737712730298087, + 16494856621529915111, + 8990302363676042983, + 1212708886654643943, + 1546773102096956071, + 1855797033275182567, + 1867403762412013191, + 2121163876524052615, + 2189469989128491199, + 4048073549205111999, + 4696327277030687039, + 5109128328956650239, + 11032757470468873983, + 11340821017985174975, + 13045245911355948991, + 16294018844911885759, + 15012600996198518207, + 5667489610198021567, + 6987594811386372543, + 11555425923718443967, + 4397077840473162687, + 212688111898520511, + 511935402764606847, + 765931598683386463, + 935962892665704287, + 1074947402708844895, + 2843137294803817311, + 4071635093069174879, + 13450160332418609247, + 17591506076150091871, + 17591506076150091870, + 17591506076150091869, + 17591506076150091868, + 3996360260946153564, + 9251564056549053532, + 12080025123908720220, + 11451311144194158172, + 14287408862835552348, + 14287408862835552347, + 14287408862835552346, + 7697035038012051546, + 10490906337534715482, + 12092945331948120410, + 12092945331948120409, + 17639632411565625689, + 4968025126303783257, + 7583734645732994393, + 10382747139942565209, + 10382747139942565208, + 749351793097401688, + 3115043874914375000, + 3375666296643347192, + 3581616863776224760, + 4073229105489566392, + 7264348951097761976, + 7264348951097761975, + 12460208134977411255, + 12460208134977411254, + 14822130477695273654, + 14896883595007080502, + 13481528102242852918, + 4504929675373891638, + 9059438753399210550, + 15536702655252100662, + 18212473996666107446, + 9735007041578853942, + 16721439083358467638, + 2006208160468298806, + 4121422338237695030, + 4616653305766488054, + 8427675690005462006, + 9297686377550328182, + 9765582082340188982, + 11573330517875114294, + 1648438546803889462, + 1824050446740942806, + 3242486533589436886, + 3455833905520347318, + 5746040762568682678, + 16919766134631230646, + 5495077392503105718, + 6505409277154677814, + 6853867560213600822, + 6872156644204672306, + 7615281762359784882, + 8879680434793755570, + 9146209630066408498, + 14340780210831361074, + 3677427333757067314, + 5936714088789761586, + 5936714088789761585, + 6924509312226656049, + 9298149807732436273, + 11442751604756895281, + 13627510964669988913, + 3242600861773065265, + 4418716695301362481, + 6279770279865943089, + 7080614287168706225, + 8868132673244289457, + 13250878510997881777, + 11140319760833881009, + 1386651027510632369, + 4879455484125002161, + 9807258256661681585, + 15254328444464814513, + 16861185821523256241, + 720561401534259633, + 1213326202632017457, + 1324923267627093009, + 2642886930942882065, + 4351366928436156177, + 10001443333026609937, + 4117712398055246609, + 8041725503837959953, + 5690686515061557009, + 9267760599216976657, + 13145745567287923985, + 535121766751720721, + 1776278538722383889, + 2819231399375208849, + 3156258793919518993, + 3195505683648100521, + 3909151620718254633, + 4041707011506702649, + 4415924873336150137, + 14623990791718021241, + 15304369016570783865, + 15304369016570783864, + 15492985554779062456, + 591712209572007096, + 1141041567186543672, + 6426935400094639160, + 12198487325760820280, + 12198487325760820279, + 1985207128622509111, + 4108397071827430967, + 5285663325754501687, + 8567963281274080311, + 9690436044127668279, + 13822575389986857015, + 16319747630671061047, + 16319747630671061046, + 6838110274107079734, + 17493640444133237814, + 2434946842334960182, + 2748641586803816246, + 8664059778309442358, + 17310887894315394870, + 8061747692493136694, + 10325187897630181174, + 13068215639429351222, + 14393009318676563254, + 16151308358003295542, + 16151308358003295541, + 16151308358003295540, + 16151308358003295539, + 3279103478365257011, + 7932421138780178739, + 10239360785730409779, + 16269469339889926451, + 17301577628376316595, + 7663399801777203, + 10111713497201638, + 16793534378646062, + 37796564450121254, + 40510573942190703, + 61095404467931503, + 90664919420718003, + 164225949503925331, + 167703546435912332, + 450030985582817068, + 607029827812203628, + 1715165266276769132, + 1982206033663210028, + 3274496668076299820, + 14632154208406124076, + 15193245561180224684, + 15193245561180224683, + 15193245561180224682, + 15193245561180224681, + 14693257013194747049, + 14693257013194747048, + 1260451792652077224, + 1817314206087697320, + 4598541984669108136, + 5065123073334363048, + 5065123073334363047, + 7913466063836160423, + 10034590712668692135, + 11779663320141337767, + 1852993380154852519, + 2029240397134927495, + 2527066433590579655, + 2670066914385354647, + 2794783324967854263, + 6695454003378708151, + 7054920514771564791, + 7392279547809657207, + 8460454710182186999, + 12100326690382434295, + 15502190327429098487, + 15376601188307016695, + 18367304860456649719, + 438745625002594359, + 455737588787321151, + 607876801982513823, + 691584183040083247, + 3782714566037112623, + 5582586217547786543, + 6033908658628388591, + 6263042309660455247, + 8992676503636456271, + 11456094689090022223, + 12253094249166591439, + 5004884290622160335, + 4802888933591418319, + 11602584180111897039, + 2551254008312383951, + 2858000437557115151, + 2893323042672188095, + 3490721800283430335, + 3599211071732072191, + 4146049438355731583, + 7785116432328217215, + 13662863109004246655, + 7434103734628214399, + 7598277463587282431, + 13167997918261143039, + 13649810065555690495, + 16219671385903446015, + 7124076053489528831, + 8095954420648467711, + 9455768188905832959, + 16006891143521369599, + 18304487460635246591, + 5985568248488173567, + 6670492080163209343, + 13006163544356538495, + 558981499886182527, + 734203918081934655, + 1306311136489416575, + 2199080194124775423, + 2204014970168438464, + 2677532920992289792, + 7763619023790427136, + 8061537008911296832, + 7994533451102937408, + 15733937288311780672, + 3645579292257011008, + 7575506083351276352, + 10522321382105718080, + 11627267256141996480, + 15529471346967541696, + 13518346408563013568, + 13789150794918120160, + 18295921206246532832, + 18295921206246532831, + 18295921206246532830, + 18295921206246532829, + 2455860669762837213, + 4863652702128650973, + 12801632327299876573, + 1744041328380590813, + 6120073262166574813, + 6405439158612758749, + 6803574031023280605, + 7332859202147331613, + 16963546894591522333, + 9663176594168378909, + 17289597825083598365, + 10595846411042084381, + 11237742828835274141, + 13521168706418817693, + 13521168706418817692, + 13521168706418817691, + 3734849715481359003, + 7059636390507414171, + 8098477720558839963, + 15655798136345616539, + 9053556749004941467, + 16654286657261163675, + 2438928347775514779, + 3499568405059315099, + 3653023030263055067, + 5734097350188369115, + 1796150891847507163, + 5297480757649161435, + 6737428511491960283, + 6737428511491960282, + 7862932842544314202, + 7862932842544314201, + 12628502756558249817, + 14066101229828399961, + 6506676357252694873, + 13970725504127661913, + 16518868686146604889, + 3182138951096461145, + 3467098968746149241, + 5824296719264797561, + 6299069438895972473, + 10852046434976852601, + 12647288613099902073, + 12647288613099902072, + 12647288613099902071, + 1197601911237631095, + 2536000324662698103, + 3360806796937775095, + 4264479948225387511, + 5648548827899768823, + 12360501644630577143, + 12575105432591125687, + 15414839324730960567, + 16831236938747147191, + 17386299312283135543, + 5350865444586980919, + 5350865444586980918, + 5846325935525269238, + 8493997117802679030, + 8493997117802679029, + 15233343920734526197, + 15233343920734526196, + 15233343920734526195, + 10402336415933627123, + 13306747317797690099, + 172423886554456819, + 235093768354044755, + 274278928091726155, + 815555173724480971, + 1358224609319414219, + 4114146503939427275, + 4114146503939427274, + 4842798692646627274, + 6073989416414690762, + 6387218905996121674, + 17768348201205147210, + 213827373535185866, + 224999651994780120, + 371661416072733752, + 512788446130206536, + 637004821074715992, + 812400060704099960, + 1010280335095798584, + 2011734429044706360, + 2140189181934856648, + 2540138820600191880, + 3392225995897270792, + 5877175894797278216, + 6444215418135477128, + 9589006231051259272, + 14356492983432860040, + 2141030629822185864, + 10291095590779518344, + 10804897693298627784, + 14434227708324260040, + 17367217914520751304, + 13977268904173001928, + 13977268904173001927, + 14201142646666160295, + 471405179039873191, + 628042997769406695, + 637937620029070897, + 1346442351500603441, + 1897346492384765681, + 9569736632675697393, + 11907002184982130417, + 12603131827748757745, + 12603131827748757744, + 4203489835472029936, + 14909711131972129008, + 13703067797726363888, + 13806918162867601904, + 13806918162867601903, + 17326301439671113199, + 5855221972805893615, + 8135803613048781807, + 2978513161234545647, + 3191627864187428431, + 9956375716763635279, + 10121944667464956687, + 10564601358692830543, + 4864112268851582287, + 1358700452733969743, + 3122688545059995215, + 3184823747257893431, + 5121220522120640567, + 12551354916339080247, + 16087851325076568631, + 1567013784948739127, + 3331402000051755319, + 7108355168516743479, + 7257938556249012503, + 12395575137879764247, + 2412335946019374359, + 2527260729504634535, + 2676081941794603335, + 7509834828231417159, + 12967781164392852807, + 1122689003859337543, + 1738081118764021191, + 2471327875145313863, + 2485305357076994673, + 2599724040082483329, + 2797044173890111617, + 4123438405324609665, + 10722935475910631553, + 11539252334849213825, + 12982136989933200513, + 13063484018031778353, + 14675222137969350961, + 5509222644882821425, + 6097201728669887793, + 7561455278321717809, + 10749712198971170353, + 10749712198971170352, + 11118451738346123312, + 7070221022846707760, + 7850211155200323632, + 7850211155200323631, + 10895131596388989999, + 10895131596388989998, + 3138488928871128110, + 4628097130719688750, + 4705375842631349630, + 6004909730802699134, + 6643109853932156286, + 11767303352613623166, + 3821486401272016254, + 10931270539226161534, + 16933746660186269054, + 8230936049701139838, + 12737286266577481086, + 12737286266577481085, + 17012244533832036733, + 18180890801861117821, + 4929885343274308477, + 6744103390834400125, + 7591044803972387581, + 17676747883797326589, + 1710654960804591357, + 1826830537258005069, + 2851993802345766861, + 4014290189392070605, + 4606220398090404685, + 1527257684351138637, + 1845586286293585805, + 2523793153998591757, + 2577285662229223429, + 4381791019784655109, + 12284476459794040069, + 15622054220470734597, + 3993037773802959621, + 7001729604456251141, + 8972819200975527173, + 14045638883308540165, + 14045638883308540164, + 14106481026365820660, + 14106481026365820659, + 14106481026365820658, + 18198746188065431282, + 3897442175926695666, + 3954958411078334402, + 1714165145075705794, + 2589591962139182658, + 2682979438433656098, + 6034984240696378658, + 7395936397114999842, + 16473600213173004322, + 4687821345277505570, + 7340934609458844706, + 10505047417595489314, + 16326696246595638306, + 2479502544030752802, + 7386151824240192546, + 9539142366847393314, + 1388848535703850530, + 1953073389858627682, + 3733296676614538594, + 6527091105387786594, + 6527091105387786593, + 6527091105387786592, + 6881586890081991520, + 12464956017750928224, + 677191353117629280, + 718386929126216032, + 1869622718624958688, + 2054581349096977120, + 2443112915582636128, + 2839173060003418464, + 5195927576969998688, + 9826550571260129632, + 12028162567179664736, + 16572473553400646496, + 2825960339134377824, + 5149349389599241056, + 10955254324236911456, + 16188401955194988384, + 2370942892202845024, + 5846634392391080800, + 8185994816344251744, + 14023610316681234784, + 4443301396598636896, + 5930489040908629088, + 10255336387203877984, + 10255336387203877983, + 11433565865794755423, + 1170766804768340831, + 1435283324628909023, + 3953284687349114335, + 4457245622288501727, + 5469474268941294943, + 10441150964351233375, + 10441150964351233374, + 11601251111242418014, + 1763195542727753566, + 1837728948154195678, + 3623280471072208862, + 4105534738515041118, + 4241284085090525790, + 10717726151308439134, + 10717726151308439133, + 4388233340257226333, + 5626040806428601181, + 6991682580491271517, + 8935411939880060765, + 14195493671570302813, + 15864339428889189213, + 15864339428889189212, + 4071698720759076700, + 5763105875874021980, + 5763105875874021979, + 12098150707851634267, + 13043237370565116763, + 3500436186422466395, + 3506542149752768640, + 7878312341747334784, + 17035409291051351680, + 6854293806235519616, + 15999300030099154560, + 7501375900319078016, + 11743171231527586944, + 12290431523090949248, + 1234559374866811008, + 1437766224176868480, + 2453919035704952192, + 2877263367353987584, + 4595086108069975040, + 5143798563027738944, + 6030783298970488384, + 18289787856074464832, + 10040684728529526336, + 12674337089460002880, + 17234889232432624704, + 16629229489473137728, + 7712984413556028480, + 7753663321175646488, + 10531078925226205976, + 14887937489276995864, + 15503293062566559896, + 12755487125347301528, + 18248150475905938584, + 5573294874704658584, + 8073790294759450776, + 9053494746822062744, + 9053494746822062743, + 12284951255820024983, + 3712006074869777559, + 4109618009464899415, + 4323262633161249367, + 7058203876470315095, + 8290772181976291415, + 8421869164889668647, + 10661024789238288679, + 436248196341287207, + 450784978247787933, + 1800543283741090717, + 3099944133626130077, + 5906692513761556125, + 18334543399677120157, + 1009855006753453469, + 1598356742800940061, + 2915969723578597149, + 4313139855520024605, + 10887788444399547421, + 17049939406656320541, + 16569748191391730717, + 3498163336521603101, + 17244879048921204765, + 5712562001479069725, + 7616053037155112477, + 7616053037155112476, + 10352391394980493852, + 12451468793125596700, + 2957476027545890332, + 3097371398935375084, + 3280589456226898444, + 4608344988203708428, + 5328517169964254732, + 8371884177507782668, + 11714007261664256524, + 11714007261664256523, + 12525986711352066955, + 12525986711352066954, + 14270455956347697290, + 2670973179265179786, + 2689591541810142914, + 3746005967071017794, + 4536832856672851906, + 10544004865515061186, + 706351010424615874, + 1573571788235460546, + 1581582820870426624, + 4152940247817247744, + 4655881756689642944, + 5701926937485794240, + 11026727211824601024, + 1666168095103582144, + 2071546857553993920, + 2326026564669060960, + 3755889887531971936, + 3994454971245137984, + 5094376675792070720, + 6416051241726226240, + 6973051309459127296, + 10816889179287808512, + 10816889179287808511, + 14626133090305324031, + 14626133090305324030, + 15406527937074692990, + 7287832884711306110, + 14429135190850205566, + 15833307679015403134, + 14205279123860727422, + 16874140183841068158, + 17084441953362490078, + 17084441953362490077, + 17084441953362490076, + 127889397896271580, + 252450723139095900, + 439966331021094044, + 449847069835553148, + 560163700073906268, + 1103206687310252828, + 2407368016850245660, + 7431141481035033628, + 9606885333568885020, + 2534543663765920028, + 3726098703991462428, + 4181157371493861660, + 6608480044305049372, + 8191577378584003868, + 1961290141505223964, + 2916086701140409500, + 4540217669290830492, + 6053557941134170524, + 6053557941134170523, + 14544476210220695963, + 16357457233179619739, + 17517284623978993051, + 17517284623978993050, + 3016394599199373722, + 3670588966335771546, + 4924856422828138906, + 6309613834694717594, + 272184248183302298, + 607330080953279642, + 714922197231861546, + 873698777898716106, + 1219573271260265418, + 1292531681867363882, + 1594016567402486634, + 2683576885721744618, + 3751931850710722922, + 521535283699659114, + 708486852023139178, + 1025742567306535914, + 1446138811524226986, + 2389699280010874026, + 6923280280055377066, + 8198842818068085418, + 8198842818068085417, + 12320291559544198313, + 9400853668371817641, + 14040587825164758185, + 14040587825164758184, + 15884176913516887208, + 1899270425208165544, + 4004338500207248040, + 8979276254422019752, + 10444890577497397928, + 15606643062409333416, + 8459523305353280168, + 8862507401590009896, + 10095970725061301544, + 11229941160548655016, + 12280920101255863592, + 8081130273348791592, + 9773011318298942760, + 11341193956986566184, + 1376969343288155688, + 2881554194483245096, + 3089740478265464552, + 3327339550474649160, + 3364397181626040856, + 4893221168889581848, + 3437775516054421784, + 8896002838824390936, + 2686469195368943896, + 4669936156622788888, + 6055702296815669016, + 11724632063987535640, + 13109529812107704856, + 12433935199602995736, + 15301325603370840600, + 17475976749011699480, + 17866445371019528536, + 3625267521999371096, + 4258706793516922072, + 4765357880920806040, + 5863817499257608088, + 5863817499257608087, + 17088763835666046871, + 17088763835666046870, + 17680901380690752278, + 18292610953008499094, + 11148215825253511574, + 7816817600800979350, + 9114247444007469206, + 1511388835463411862, + 2719218459272579990, + 7844460822910522262, + 12975515492048395158, + 6927710224568970134, + 3848479706511250326, + 3969950389959337510, + 4584901265357836070, + 6283991776912298534, + 11045952358039764518, + 15855504234265897510, + 4306872465958984230, + 5201815689055328422, + 6111920562831923878, + 6568196694870762214, + 8571158140569352678, + 10445048960766348262, + 13470277966276812262, + 3786135201332197862, + 9822189696817861094, + 9822189696817861093, + 11644156323941638373, + 11754059577782094037, + 11366906098552948949, + 2118095597433926869, + 5265636449667624661, + 5603343442595632789, + 7079599552202442133, + 9946140429763868053, + 9027435279280299413, + 9027435279280299412, + 9445610049022018004, + 11910097446707630036, + 11910097446707630035, + 11910097446707630034, + 255798921602212818, + 582875590123583762, + 654432092657539162, + 941825204317154138, + 969197382706799602, + 2101956294464611570, + 3016996606868097650, + 3745172501298307442, + 11296382027599154546, + 2216566037180491122, + 10587862906952171890, + 10587862906952171889, + 10657868832013220185, + 15524148418710711641, + 3392026838324267353, + 16497004712743807321, + 16497004712743807320, + 6705634318082896216, + 7841262213936617688, + 11354720427057428696, + 11794016347989165912, + 12882298425639143128, + 15065232187980461528, + 7360973375854243288, + 10331666052933145048, + 331022285745113560, + 412410177905431784, + 654158848034943848, + 1068648528843927976, + 4619486751907519400, + 5986393043440491176, + 6163231393693450696, + 6226638632947000752, + 8182803978756988592, + 8895079814176428080, + 12521664342870848048, + 13794982234146390320, + 17375865019722643760, + 6954619729724510512, + 14430893314247995696, + 16167225781188536880, + 16167225781188536879, + 18033100615745940271, + 18033100615745940270, + 5166133196801984302, + 13332115378936656686, + 15400649676237426734, + 10997831161920880686, + 10997831161920880685, + 13900956262600556589, + 4442348829623821357, + 6127194445592804141, + 6538002989186488813, + 12386034965187148269, + 997030431326847469, + 1003306887430542907, + 1690378565878190395, + 3003020181478337083, + 3074328476100917771, + 3236902413453164907, + 3971277471748861803, + 4255240429905547467, + 4519426003522297483, + 5865413147941299851, + 7319554201538699403, + 8451400965643207819, + 9834667420713259147, + 11296956657983972491, + 8599624391656256651, + 9922778480456777867, + 9922778480456777866, + 13859994480388296842, + 13859994480388296841, + 14474714474045948169, + 14474714474045948168, + 308875542952938248, + 404523171942102200, + 410070381125591131, + 425260101746810739, + 600586393441650515, + 801442085161952243, + 914610083719041603, + 1608627392804901059, + 1932062379091445699, + 3769647948093315267, + 4679791407024666947, + 6229040244644205123, + 7633030891562655299, + 10308588904585689155, + 10308588904585689154, + 11207595291438600898, + 2769168709690757826, + 4789340567267173826, + 7104429299009222594, + 7104429299009222593, + 11542920603218105281, + 8709531402246501313, + 4241164543833056193, + 4583181457551390913, + 9036754188274833601, + 11239752604911742913, + 11239752604911742912, + 12695432009250846400, + 12821757634110207024, + 501680895386929200, + 843927800961977968, + 925576475388633440, + 1509171386611760736, + 1544348339008032076, + 1766732708154041516, + 2782266194809203500, + 4033111405577832492, + 9502072876975814700, + 12925150649490378284, + 598115884705507884, + 1156471006175156204, + 1176427771173714956, + 2202535343653263372, + 2784182828086505996, + 3038830279116726796, + 5046909343961458188, + 11609062262454383116, + 14846082047594833420, + 15952546044482276492, + 7145632425167846540, + 7629776477658622476, + 11123678116183404556, + 14954971249349444620, + 684017940032738316, + 913920805378072524, + 1720904179991951436, + 1778359420320291884, + 1787356367637573271, + 2670009146636139159, + 3067346807336122839, + 3922551525962438231, + 14235489780412950103, + 9501770271444121175, + 10221580565669939415, + 11861197078883820247, + 2439222362156999383, + 2748558996220749207, + 5922781385036829079, + 12469276121924187543, + 16261091325936184215, + 1065302461586130839, + 1313194656961708183, + 4258499049233723543, + 6150849355889500567, + 7774774181373209751, + 8386447789819160343, + 8390698769112166150, + 11233626514830742790, + 11566095727518764102, + 11789919516503042982, + 11789919516503042981, + 2844853441099072421, + 8430625674097104805, + 8480202722222333421, + 17732091128504853997, + 17732091128504853996, + 17732091128504853995, + 4414796066977755627, + 4414796066977755626, + 7451487185131929066, + 8555891019940854250, + 11593992013280065002, + 4502633820883023338, + 12979650325278875114, + 15954703408495532522, + 3859618228383229418, + 14311189686992346602, + 4987730461245840874, + 5198082454964545450, + 5327507181074753274, + 5327507181074753273, + 5327507181074753272, + 10468625485069993720, + 17275100207696341752, + 17275100207696341751, + 17778797259624325751, + 1997984705122499191, + 2099971728119725447, + 2224378042729169959, + 4327612543524775975, + 4812435553609650599, + 5962745053204333479, + 8325577189824028583, + 9978972728687877287, + 10077806047741341911, + 12202317357952467415, + 1352910984421435863, + 2338734967841443031, + 2483359055622301271, + 5922378700196194391, + 6328551558072674903, + 10424031416437693015, + 10698781916674329879, + 17169162390675406103, + 17368133612914365751, + 3083368016455255863, + 4832066269266587191, + 7300872927079646775, + 7300872927079646774, + 14750905298401877558, + 15049730970460232246, + 15049730970460232245, + 16661469099975841333, + 16661469099975841332, + 3403833985131825716, + 3422529930114549580, + 3482057606134175868, + 4936449609935147132, + 5391218451900543228, + 8850335501755297532, + 8850335501755297531, + 14080581041774116603, + 17640448369345624827, + 3633710964280852219, + 9088599404451745531, + 12008797913545080571, + 12868687079378866939, + 14424365779403309563, + 15499639983597769467, + 15499639983597769466, + 3136919987983454970, + 3645489439360431290, + 4529474371207286586, + 4806640043543531546, + 5411160074265142554, + 7085377384168862234, + 17802468599890317850, + 2424684770854237210, + 2907974234933496282, + 10751197726656609754, + 10751197726656609753, + 14414888153423757785, + 14636321645467660665, + 1786355786501504377, + 4127333380259304825, + 4929941538268599417, + 12414952944004203641, + 13670081667276227705, + 2120754810724071545, + 3179329737692592377, + 6620442040589215993, + 8490623194097208313, + 11695171555897624057, + 13177275465083618297, + 211546157117899769, + 320749467525961337, + 349213088045767085, + 412131893202623101, + 506856187678209373, + 666784164608719357, + 1303371303855810813, + 1930378421758479229, + 2621188798574144893, + 7600594111377204605, + 12674460081327185277, + 15377802127551755133, + 3006242324990722941, + 5765794533254929789, + 7335224966942379389, + 9623910184685666685, + 9889355523019821437, + 14758752567485514109, + 14758752567485514108, + 4019865820151266684, + 11601797397321334140, + 1869227528538967420, + 1881779672627538978, + 3151804677483153442, + 4133104632852513186, + 5673195104985093794, + 5734861036930332370, + 7222044071019309778, + 13459016231996628690, + 13507261253857721130, + 15706222877530619434, + 1346331933656094250, + 2637946847932604458, + 3543506397364355754, + 12055956501713318570, + 16891105159317728938, + 17246851038874396330, + 7482836825459697322, + 9465180803964927402, + 10007800834391580970, + 10007800834391580969, + 1581693141309634857, + 2166897738768590121, + 2743733508801417513, + 3808062288019684905, + 7165397493723556905, + 7196100967729997273, + 13705246706122409433, + 15739647594445283033, + 15739647594445283032, + 15739647594445283031, + 15739647594445283030, + 2714196810552409814, + 2932290006255658006, + 4772786156303073302, + 6390622992451065110, + 7068190313846733846, + 8587742380791638806, + 8587742380791638805, + 10326175651737335061, + 14325120451967594261, + 16225511881989943317, + 16622105063286023765, + 2593052609205260373, + 2997012664878212437, + 3393813252265404053, + 6427403231910434965, + 5168247330128691349, + 12047491644632076437, + 1670969891992164501, + 3960144567613356437, + 10107562045706182037, + 15359490314407223701, + 644200674478206357, + 741246148779854245, + 1163957745478030181, + 6408972696640758629, + 8133844190775216741, + 8133844190775216740, + 12081498339891932260, + 18343369839292402788, + 232489412116415396, + 978374700451243428, + 1927710544952613540, + 3303174642789874340, + 9693435142129229476, + 8832448830115059364, + 17368039572107397796, + 10102480269991199396, + 5551937437376123556, + 8960430093689752228, + 10646289463540592036, + 10860742166713500580, + 12077160696708896676, + 12744119942175153060, + 14907604740084203172, + 8649887888469272228, + 16617867066405959332, + 5323522536854555300, + 7127381856165292196, + 8111022323637701284, + 9279382060067231652, + 1859151779839936420, + 2305484865548678500, + 2356392258033422404, + 2577927251842197668, + 6275799879767040164, + 6924685622465520932, + 17430591832560516388, + 2247737942245957924, + 2663982786741361316, + 4380330254404428452, + 4758128829898996452, + 10053967586463790820, + 10492127106585750308, + 11421197087346901924, + 13928905398154182052, + 6320151068089011620, + 8494088921686834852, + 10492404496671416996, + 11770454605043346340, + 11770454605043346339, + 15223109380615005603, + 15223109380615005602, + 9288930292358163874, + 9288930292358163873, + 11726653682520058785, + 7321875519128032161, + 7321875519128032160, + 13301696272522470304, + 10095356829886759840, + 11590790975408607136, + 11590790975408607135, + 10251447202807459743, + 17428114707010782111, + 3232228983672833951, + 8770673548489468831, + 807747085469232031, + 4735243734526799263, + 7562856761840869279, + 9743729014510387871, + 14558973271077953183, + 401258500005249183, + 524250169130960847, + 1555012228561607631, + 4000986154528494031, + 6358303210209832911, + 5298727189516636111, + 5361963089431269567, + 5361963089431269566, + 6629695073173940670, + 3435743172048714174, + 5689623086085965246, + 6029950830251935422, + 9424306115056016574, + 16448660604637728958, + 4941556264197980350, + 6524007462261048766, + 7638470506911088190, + 7638470506911088189, + 7638470506911088188, + 8856308810634172732, + 14330603763862833468, + 7043708083563106620, + 8932319076854850108, + 9301924881839394876, + 11138001737289404988, + 921640518520305212, + 1794724617224308412, + 4152066157713517756, + 10953503744565702844, + 10953503744565702843, + 11820032308391020987, + 14655286063716152763, + 17794332976522127291, + 14146006795330742203, + 15747354331172679867, + 15685435999577026747, + 18016708702998273211, + 10585519808871668923, + 12482290391525052859, + 11955677820789885371, + 17174672636861415867, + 15029372885326199227, + 1222465468443808187, + 8702421068484227515, + 11387021710996459451, + 13700551950374941115, + 8387941606501577147, + 8875606637956156795, + 13371549380212185979, + 14002991779568213371, + 3568478738044616059, + 9070287937134341499, + 15632100548063213947, + 15632100548063213946, + 442866334230488442, + 558554158313330442, + 1302996691653858698, + 3546747871275822986, + 3680926818587527898, + 5306784414155800282, + 14199949967880532698, + 14199949967880532697, + 1404791177162242777, + 2235507341550873433, + 6268617819429951833, + 9401884506653902169, + 2034999793394283865, + 2252181457612260633, + 2778060460671599705, + 6042849745129092185, + 13565119951965832281, + 14754465551753225561, + 14754465551753225560, + 13681967506433008984, + 17665070055383803224, + 4879272269194343768, + 6136684528084950872, + 8538443186898749272, + 8538443186898749271, + 10768029892441278551, + 11482115893184207063, + 14265006200089748695, + 14452664656623861463, + 17287533201914998487, + 1371745446464836311, + 2133558637981044183, + 2153681521472870951, + 2503513951808293735, + 2917882133442156391, + 9867069420785436519, + 958781497084577639, + 1201104507161837767, + 2898466368431051975, + 4221334129439839431, + 4707085239991906247, + 8618506178202607559, + 4422695886937511879, + 7519849872633012167, + 13280866895139761095, + 13280866895139761094, + 14252825993772534470, + 15699213879152489670, + 259818969537988294, + 511640037076557734, + 1226614568936444198, + 2046068174445526182, + 3052704686056933926, + 3378583527545082022, + 5063977342503669414, + 8073745120997007526, + 8565313973200271910, + 10247639060068986662, + 11146502187011488934, + 10843419173568486566, + 13831780086668959910, + 15140847448621253798, + 17114858490777360294, + 17291232970866818438, + 17291232970866818437, + 17291232970866818436, + 84567754716009604, + 494098864300076484, + 590127124379434468, + 3659946587763744228, + 4020206153039663012, + 4615495522167791780, + 6440205470323309220, + 7605276889695630756, + 9327918279838982564, + 10516949827751674020, + 10516949827751674019, + 6838102259936397475, + 7141596446158566819, + 13866019963326972323, + 14059243927043663683, + 16348380432772085315, + 16348380432772085314, + 16348380432772085313, + 6379674255943484993, + 12807658199701023297, + 12807658199701023296, + 12807658199701023295, + 12807658199701023294, + 7062373452694514238, + 12205080988497083966, + 14480587047806553918, + 14738908233938336414, + 14738908233938336413, + 17319702710730351773, + 581855396173792669, + 695972944588200077, + 1387453093228731405, + 1987138076185600909, + 12729853818227628941, + 14695227127176358541, + 16409664451348105357, + 1873210416656525453, + 3433410203089390477, + 4337437976290478861, + 7370363075372461325, + 7593314921649340365, + 7593314921649340364, + 10287212949426003404, + 15507012771307402700, + 15958276500454567692, + 4688861721972093708, + 4881664833011587244, + 10772108742587263148, + 12122071793340881068, + 4624346157141487788, + 5173397283181926764, + 6280408896845763948, + 6280408896845763947, + 8627670315674580843, + 8627670315674580842, + 9075015608831656106, + 4879145629315989674, + 5470177807681659562, + 5550615217731229178, + 6284954147790471034, + 10090854299603017082, + 16022121977386978682, + 10157003382316303738, + 16980148826027841914, + 3720994189353975162, + 6707816169288890234, + 16078575165736353658, + 10867882734548393850, + 11934292695110172410, + 12648990085327884282, + 7466455601212620794, + 9078219704935058682, + 15344071957755628794, + 17671255303162470138, + 6374870092487508730, + 12311035521920403194, + 12805364828266044922, + 18242178189049358842, + 18242178189049358841, + 18242178189049358840, + 18242178189049358839, + 1124327567097281527, + 2207147541150420215, + 3741186621196412407, + 3815043427441784055, + 3855459055723799791, + 4840550141405099631, + 7197647170333111407, + 8677875041157155183, + 9122531585467108783, + 5221595673768396207, + 6594378754293576879, + 7253768281383678767, + 13021953075503924015, + 15900454656763540271, + 17356042144280264495, + 18004247052927964975, + 4257688612621039407, + 4489876300225366639, + 5160557940591182191, + 9740413772972283759, + 9740413772972283758, + 13386784365896071534, + 8632156008570515822, + 9304088102779316078, + 17430163026129986414, + 17430163026129986413, + 4595306224968714093, + 5798327345475387245, + 6648538621662849773, + 12261321498280723181, + 5508452710882376429, + 8423421480748001005, + 11039789597233989357, + 12499993227407187693, + 13391457797731906157, + 16509448350931092589, + 478833824647426157, + 718015444867204781, + 737542288563961269, + 1700086806348677301, + 2347978654546164533, + 2579610440953417909, + 3974945285386880949, + 15107213213016868789, + 149851701645731765, + 215813321248023917, + 246470759156772937, + 343795938278501081, + 371257717828579285, + 403074429616759721, + 1078946744346352425, + 1512238714692172713, + 2125876902257547177, + 2715321478029492649, + 3512508678610949929, + 3514389518647856275, + 9456511133309198483, + 13153633325500286099, + 18235477270701114515, + 7073402308713625747, + 12703993286564534419, + 13319365118764914707, + 18360084410494849043, + 18360084410494849042, + 7807800011072812050, + 12315637604711111186, + 12635791962427379154, + 1602387724814269906, + 1719845537303410018, + 4107722057880799586, + 9044027502225527138, + 12810022681072149346, + 6163467844557138786, + 7657064255563407458, + 18357925597176610914, + 14815116201221431394, + 1304898406574300258, + 1919427839577249890, + 3283469148025780322, + 11655222440183109730, + 5728065491134275682, + 7217777316056643426, + 12656760341255337826, + 7843558366485413730, + 8242662580150457250, + 11986166157202762658, + 1291706733552774050, + 4172210926780488610, + 6681825956635170722, + 7540832353365891362, + 8383600753398905250, + 8519493645581589874, + 9808501073647890290, + 12469537751359050610, + 12834667454489574514, + 12834667454489574513, + 4942705830984414321, + 5642113917208456561, + 8242505876032958321, + 8503330049093268561, + 8584486142693059729, + 8584486142693059728, + 11127932761356047504, + 15119290196144686736, + 6855513181771169424, + 6855513181771169423, + 10960823355906153103, + 14860350724431727759, + 11429338504085468303, + 11429338504085468302, + 13791733421747052174, + 16716635464672646798, + 2399815970303436430, + 7120672792328101518, + 8267059781891539854, + 8386609570171070638, + 9700364998640324526, + 6354079465464573870, + 6929635712325204206, + 1735533337817268462, + 8555831323013353710, + 9062745020961657326, + 10112075018622933614, + 4342785169459827310, + 5002188901785258478, + 5698396090230750958, + 10601339481650139886, + 10601339481650139885, + 12099505708495607277, + 1454765363633301997, + 1674577906236206573, + 2192090564544927277, + 3877067180281275437, + 18185385993681859629, + 18185385993681859628, + 8408334277107132460, + 9144615287624441900, + 12232465038946418732, + 9273447683742535724, + 10228546324403593900, + 11083256510429689772, + 18334768575713005484, + 7251026204274046892, + 8484906119836330924, + 9210411474436193708, + 12521294371941167532, + 12521294371941167531, + 610004857444905387, + 913191495210999723, + 1030272043811550747, + 1110236932160471803, + 1999645308739240315, + 2086573428118404683, + 5843347887524072011, + 9243584807752930379, + 5253729860862665803, + 5895785554990510155, + 5945276465452839731, + 7810173142717594675, + 8334823575502698419, + 11129257463275462067, + 17056751797513252275, + 6868942910473446835, + 8877663658463558835, + 8877663658463558834, + 10652687822555051954, + 12007590165892691378, + 15840171347272206770, + 15840171347272206769, + 15840171347272206768, + 2276554202225405360, + 3178271644043407024, + 4232280875750859440, + 4321245930697321408, + 17289467543257770944, + 1764930096972730304, + 2381172034405432640, + 2455777229121811648, + 4947511763546844864, + 6222279714182143424, + 8294028695873774016, + 8353994993758897808, + 8591965286868291856, + 14579527372061752592, + 14579527372061752591, + 16357124301157118991, + 4251893070815504399, + 4485162970698688303, + 4917041968843694319, + 7306947182534098671, + 8613308793411774703, + 8613308793411774702, + 17036904177263666414, + 2800582427354497774, + 3509775508072424430, + 4969801852504232942, + 5598975449871570030, + 8902647067988073070, + 10636369429694444142, + 10636369429694444141, + 11096051293948462957, + 16634642709528783725, + 6309850189102255981, + 10304687641695390573, + 12332839039226269805, + 1541175507479416941, + 3581567296998624877, + 6823304254315797613, + 7959700875596860269, + 16333204252639423341, + 9732985721335116653, + 14634493685928651629, + 14634493685928651628, + 993087284848275308, + 1052442086119711892, + 1257368203592663412, + 1345538642524794484, + 2174028316317399028, + 2646045148123416884, + 2781990910824851204, + 3749112370258388868, + 5017401491699856516, + 5299852675429381028, + 6222830143629134884, + 7071367287056310948, + 7071367287056310947, + 10874837926877909667, + 14657123343645860515, + 14657123343645860514, + 15955016321180192674, + 2658971102809510818, + 4026596097454930082, + 4559023457166986978, + 14946636852028704482, + 17225779003482939618, + 7161163407777309922, + 8353682879190542050, + 8353682879190542049, + 8353682879190542048, + 9940712480064018912, + 11479367098290396640, + 15058563452332707296, + 15429348797611683872, + 10296550067847241760, + 13251758678795606560, + 853733735716700704, + 1918344670327310752, + 3564054056043504800, + 3817590970594028384, + 4405349867917873888, + 13679578553297138400, + 17377660510317205728, + 2735899076803714784, + 3091079997032393888, + 4467682783310698912, + 4467682783310698911, + 4636778876404072287, + 11209752763783374687, + 14747242783137576287, + 7871910813479572831, + 9199789787130009951, + 14939702503488289119, + 18004343438988200287, + 6150332406255846751, + 6461455146060506975, + 10611581083436961119, + 14447067821038211423, + 14447067821038211422, + 7417783215667159390, + 9489682585806688094, + 12295443428572403038, + 8452515910179614046, + 8452515910179614045, + 8452515910179614044, + 11103636693569337180, + 16125316559694421852, + 325984463423881564, + 663572886879408860, + 2204221409364112092, + 2883618248031593564, + 3535517432033449692, + 3583752879821638884, + 5087769015721161188, + 5216908205732824292, + 5267822082021660372, + 5267822082021660371, + 5973556082137549779, + 10127135586151408083, + 15132238521457916371, + 636216646173196243, + 1416222438070155219, + 1480606782568549883, + 2485172544837393403, + 2913126202474447611, + 3708071420352327675, + 7170218502226462203, + 11670554480557308411, + 11896977379484717051, + 2083558176980763643, + 5200211012084422139, + 6722687888964775419, + 263860744537800187, + 356626634200162043, + 374995728060726799, + 562626396649581455, + 668090710731765631, + 1314920694865210367, + 1565971694795356799, + 1626801138371177143, + 3050399631543600311, + 3252588417982718999, + 3943649882267154711, + 5690223247184898327, + 10500972580993406231, + 16587979528985333015, + 16587979528985333014, + 8435841878647835926, + 8842759330760244694, + 6900278005880804822, + 6913177521579022276, + 17403930227288176580, + 1582598453877586372, + 1767622453382162468, + 4561632501269398052, + 12074023931716663844, + 13462712387529293604, + 3390174784045954852, + 4432628136775169316, + 4942173446888271140, + 6840248833121737252, + 8471563730871628580, + 8584088084519716836, + 13055320889882013156, + 17488325539115324388, + 17488325539115324387, + 193141193810237155, + 217073021245487651, + 246357510466964667, + 375761730363109387, + 1691334113702703627, + 3134845592284406795, + 3940496189983812107, + 4086215182968352939, + 4325590533965633931, + 7343354270724715915, + 7973055256243169163, + 8318138276740459659, + 8604211150130435019, + 11442842369059280331, + 12359841103181922507, + 3490773153013303499, + 5528336751933505227, + 6540783476039867467, + 6540783476039867466, + 7804674356539685450, + 11895908996407579722, + 12826223092255614154, + 13382695616004497738, + 14815700757333594186, + 16439951890618587722, + 16439951890618587721, + 11693579203267162697, + 7218948442838297161, + 16022324428703904329, + 16022324428703904328, + 16022324428703904327, + 2779978180900834887, + 2887174734074284759, + 3197367898534435991, + 4962165147331154583, + 8063214183669008535, + 9395576161783525527, + 9395576161783525526, + 10393682632565385238, + 15604714973635722262, + 16867294769871843862, + 3000940605405179926, + 3936818220251076118, + 6620908367456711702, + 7301930289720303126, + 10229665190399330838, + 10229665190399330837, + 12549601405140259861, + 10040344842128778261, + 13159132507357842453, + 321189401840807957, + 333910489905004547, + 339550278152555729, + 389902709000592145, + 407146608122730327, + 3135218545626629463, + 3270900916826941895, + 7096255531431399367, + 10420104930240492487, + 12135436139587214791, + 12135436139587214790, + 6823062890375009734, + 7613107709739409606, + 7892697010757912838, + 7892697010757912837, + 8550469289887318533, + 16907485337985849861, + 16907485337985849860, + 73749600326477828, + 129358754018433956, + 135210429371105938, + 154749653951984478, + 502664213482999262, + 539434336070337334, + 4535220881640194870, + 4535220881640194869, + 10527845868963820341, + 2758850809430377269, + 3206016028606945589, + 5841644046833004853, + 14380517732157750581, + 17302280880833042229, + 2812794098610766133, + 4938000534506490165, + 6908876623200493621, + 9786215493805186101, + 14203569168050332213, + 2148423167720436277, + 2694516932039285493, + 4825779066626087413, + 7688339857985808373, + 7688339857985808372, + 12122724923918984180, + 12455148497246117812, + 12455148497246117811, + 6921502123845785523, + 7036243606352893939, + 7036243606352893938, + 10018792792569501170, + 10475539572065165554, + 10489419441236018372, + 11793475133879807172, + 13428423999554646212, + 14292726630349214148, + 2159664365246519748, + 4282783759924356548, + 8024508970010077124, + 11977766724815483844, + 13893156378369939140, + 13893156378369939139, +]; + +#[test] +fn mapping() { + let mut m = RandomMapping { + prng: 1234567891, + last_idx: 0, + }; + + for i in 0..2000 { + assert_eq!(m.next_index(), MAPPING_REF[i]); + } +} diff --git a/packages/utils/rateless-iblt/src/wasm.rs b/packages/utils/rateless-iblt/src/wasm.rs new file mode 100644 index 000000000..49d5be93b --- /dev/null +++ b/packages/utils/rateless-iblt/src/wasm.rs @@ -0,0 +1,173 @@ +use crate::encoding::*; +use js_sys::Array; +use wasm_bindgen::prelude::*; +/* +#[wasm_bindgen] +#[derive(Clone, Copy, PartialEq)] +pub struct MyU64(u64); + +impl From for MyU64 { + fn from(value: u64) -> Self { + MyU64(value) + } +} + +impl Into for MyU64 { + fn into(self) -> u64 { + self.0 + } +} + +impl Symbol for MyU64 { + fn zero() -> MyU64 { + MyU64(0) + } + + fn xor(&self, other: &MyU64) -> MyU64 { + MyU64(self.0 ^ other.0) + } + + fn hash(&self) -> u64 { + let mut hasher = DefaultHasher::new(); + hasher.write_u64(self.0); + hasher.finish() + } +} */ + +pub type IdentityU64 = u64; + +#[wasm_bindgen] +pub struct EncoderWrapper { + encoder: Encoder, +} + +#[wasm_bindgen] +impl EncoderWrapper { + #[wasm_bindgen(constructor)] + pub fn new() -> EncoderWrapper { + EncoderWrapper { + encoder: Encoder::::new(), + } + } + + pub fn add_symbol(&mut self, symbol: u64) { + let my_symbol: IdentityU64 = symbol; + self.encoder.add_symbol(&my_symbol); + } + + pub fn produce_next_coded_symbol(&mut self) -> JsValue { + let coded_symbol = self.encoder.produce_next_coded_symbol(); + let symbol_u64 = coded_symbol.symbol; + let hash_u64 = coded_symbol.hash; + let count_i64 = coded_symbol.count; + + // Create a JavaScript object to hold the coded symbol + let obj = js_sys::Object::new(); + + js_sys::Reflect::set( + &obj, + &JsValue::from_str("symbol"), + &JsValue::from(symbol_u64), + ) + .unwrap(); + js_sys::Reflect::set(&obj, &JsValue::from_str("hash"), &JsValue::from(hash_u64)).unwrap(); + js_sys::Reflect::set(&obj, &JsValue::from_str("count"), &JsValue::from(count_i64)).unwrap(); + + JsValue::from(obj) + } + + pub fn reset(&mut self) { + self.encoder.reset(); + } + + pub fn to_decoder(&self) -> DecoderWrapper { + DecoderWrapper { + decoder: self.encoder.to_decoder(), + } + } + + pub fn clone(&self) -> EncoderWrapper { + EncoderWrapper { + encoder: self.encoder.clone(), + } + } +} + +#[wasm_bindgen] +pub struct DecoderWrapper { + decoder: Decoder, +} + +#[wasm_bindgen] +impl DecoderWrapper { + #[wasm_bindgen(constructor)] + pub fn new() -> DecoderWrapper { + DecoderWrapper { + decoder: Decoder::::new(), + } + } + + pub fn add_symbol(&mut self, symbol: u64) { + let my_symbol: IdentityU64 = symbol; + self.decoder.add_symbol(&my_symbol); + } + + pub fn add_coded_symbol(&mut self, coded_symbol_js: &JsValue) { + // Extract symbol, hash, and count from JsValue + let symbol_js = + js_sys::Reflect::get(coded_symbol_js, &JsValue::from_str("symbol")).unwrap(); + let hash_js = js_sys::Reflect::get(coded_symbol_js, &JsValue::from_str("hash")).unwrap(); + let count_js = js_sys::Reflect::get(coded_symbol_js, &JsValue::from_str("count")).unwrap(); + + let symbol_u64: u64 = symbol_js.try_into().unwrap(); + let hash_u64: u64 = hash_js.try_into().unwrap(); + let count_i64: i64 = count_js.try_into().unwrap(); + let coded_symbol = CodedSymbol { + symbol: symbol_u64, + hash: hash_u64, + count: count_i64, + }; + + self.decoder.add_coded_symbol(&coded_symbol); + } + + pub fn try_decode(&mut self) -> Result<(), JsValue> { + match self.decoder.try_decode() { + Ok(_) => Ok(()), + // error is a enum with number of variants + Err(e) => { + return match e { + Error::InvalidDegree => Err(JsValue::from_str("Invalid degree")), + Error::InvalidSize => Err(JsValue::from_str("Invalid size")), + Error::DecodeFailed => Err(JsValue::from_str("Decode failed")), + }; + } + } + } + + pub fn decoded(&self) -> bool { + self.decoder.decoded() + } + + pub fn get_remote_symbols(&self) -> Array { + let symbols = self.decoder.get_remote_symbols(); + let array = Array::new(); + for sym in symbols { + array.push(&JsValue::from(sym.symbol)); + } + array + } + + pub fn get_local_symbols(&self) -> Array { + let symbols = self.decoder.get_local_symbols(); + let array = Array::new(); + for sym in symbols { + array.push(&JsValue::from(sym.symbol)); + } + array + } + + pub fn reset(&mut self) { + self.decoder.reset(); + } +} diff --git a/packages/utils/rateless-iblt/test/index.spec.ts b/packages/utils/rateless-iblt/test/index.spec.ts new file mode 100644 index 000000000..cbc6f8102 --- /dev/null +++ b/packages/utils/rateless-iblt/test/index.spec.ts @@ -0,0 +1,65 @@ +import { expect } from "chai"; +import { DecoderWrapper, EncoderWrapper } from "./index.js"; + +describe("riblt", () => { + it("diff", async () => { + const aliceSymbols = [1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n, 10n].map( + (n) => n, + ); + const bobSymbols = [1n, 3n, 4n, 5n, 6n, 7n, 8n, 9n, 10n, 11n].map((n) => n); + + const encoder = new EncoderWrapper(); + aliceSymbols.forEach((sym) => encoder.add_symbol(sym)); + + const decoder = new DecoderWrapper(); + bobSymbols.forEach((sym) => decoder.add_symbol(sym)); + + let cost = 0; + let once = false; + while (!decoder.decoded() || !once) { + once = true; + const codedSymbol = encoder.produce_next_coded_symbol(); + decoder.add_coded_symbol(codedSymbol); + decoder.try_decode(); + cost += 1; + } + + const remoteSymbols = decoder.get_remote_symbols(); + const localSymbols = decoder.get_local_symbols(); + + expect(remoteSymbols.length).to.equal(1); + expect(remoteSymbols[0]).to.equal(2n); + + expect(localSymbols.length).to.equal(1); + expect(localSymbols[0]).to.equal(11n); + expect(cost).to.equal(2); + }); + + it("no diff", async () => { + const aliceSymbols = [1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n, 10n]; + const bobSymbols = [1n, 2n, 3n, 4n, 5n, 6n, 7n, 8n, 9n, 10n]; + + const encoder = new EncoderWrapper(); + aliceSymbols.forEach((sym) => encoder.add_symbol(sym)); + + const decoder = new DecoderWrapper(); + bobSymbols.forEach((sym) => decoder.add_symbol(sym)); + + let cost = 0; + let once = false; + while (!decoder.decoded() || !once) { + once = true; + const codedSymbol = encoder.produce_next_coded_symbol(); + decoder.add_coded_symbol(codedSymbol); + decoder.try_decode(); + cost += 1; + } + + const remoteSymbols = decoder.get_remote_symbols(); + const localSymbols = decoder.get_local_symbols(); + + expect(remoteSymbols.length).to.equal(0); + expect(localSymbols.length).to.equal(0); + expect(cost).to.equal(1); + }); +}); diff --git a/packages/utils/rateless-iblt/test/index.ts b/packages/utils/rateless-iblt/test/index.ts new file mode 100644 index 000000000..427990c67 --- /dev/null +++ b/packages/utils/rateless-iblt/test/index.ts @@ -0,0 +1,7 @@ +import init from "../pkg/riblt.js"; + +const wasmFetch = async (input: any) => + (await (await import("node:fs/promises")).readFile(input)) as any; // TODO fix types. +globalThis.fetch = wasmFetch; // wasm-pack build --target web generated load with 'fetch' but node fetch can not load wasm yet, so we need to do this +await init(); +export { DecoderWrapper, EncoderWrapper } from "../pkg/riblt.js"; diff --git a/yarn.lock b/yarn.lock index 5344dfa0e..ab9a74f8a 100644 --- a/yarn.lock +++ b/yarn.lock @@ -54,525 +54,525 @@ tslib "^2.6.2" "@aws-sdk/client-ec2@^3.390.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/client-ec2/-/client-ec2-3.679.0.tgz#9f041847fd5fcb1d8d725376fd862afa2c2428ab" - integrity sha512-PbsPiF1/ZNtNc543S1AqTsIhOOsv7LNxyFbFMSFwKM5WYPTU7M1orIwhKE1oesdZIjfAp2EEqaKnugNhAMqLQA== + version "3.703.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/client-ec2/-/client-ec2-3.703.0.tgz#bb595364eba6470433c962fab093e9b0a54e7746" + integrity sha512-Ykmf40EIDt3XpF+3aVB6n/S/9FqxZ8vg84vARrX/ksnTLDZ99qbHaU7FwmreRd1Mwq+fLlgBdwODC4BiQvc2/g== dependencies: "@aws-crypto/sha256-browser" "5.2.0" "@aws-crypto/sha256-js" "5.2.0" - "@aws-sdk/client-sso-oidc" "3.679.0" - "@aws-sdk/client-sts" "3.679.0" - "@aws-sdk/core" "3.679.0" - "@aws-sdk/credential-provider-node" "3.679.0" - "@aws-sdk/middleware-host-header" "3.679.0" - "@aws-sdk/middleware-logger" "3.679.0" - "@aws-sdk/middleware-recursion-detection" "3.679.0" - "@aws-sdk/middleware-sdk-ec2" "3.679.0" - "@aws-sdk/middleware-user-agent" "3.679.0" - "@aws-sdk/region-config-resolver" "3.679.0" - "@aws-sdk/types" "3.679.0" - "@aws-sdk/util-endpoints" "3.679.0" - "@aws-sdk/util-user-agent-browser" "3.679.0" - "@aws-sdk/util-user-agent-node" "3.679.0" - "@smithy/config-resolver" "^3.0.9" - "@smithy/core" "^2.4.8" - "@smithy/fetch-http-handler" "^3.2.9" - "@smithy/hash-node" "^3.0.7" - "@smithy/invalid-dependency" "^3.0.7" - "@smithy/middleware-content-length" "^3.0.9" - "@smithy/middleware-endpoint" "^3.1.4" - "@smithy/middleware-retry" "^3.0.23" - "@smithy/middleware-serde" "^3.0.7" - "@smithy/middleware-stack" "^3.0.7" - "@smithy/node-config-provider" "^3.1.8" - "@smithy/node-http-handler" "^3.2.4" - "@smithy/protocol-http" "^4.1.4" - "@smithy/smithy-client" "^3.4.0" - "@smithy/types" "^3.5.0" - "@smithy/url-parser" "^3.0.7" + "@aws-sdk/client-sso-oidc" "3.699.0" + "@aws-sdk/client-sts" "3.699.0" + "@aws-sdk/core" "3.696.0" + "@aws-sdk/credential-provider-node" "3.699.0" + "@aws-sdk/middleware-host-header" "3.696.0" + "@aws-sdk/middleware-logger" "3.696.0" + "@aws-sdk/middleware-recursion-detection" "3.696.0" + "@aws-sdk/middleware-sdk-ec2" "3.696.0" + "@aws-sdk/middleware-user-agent" "3.696.0" + "@aws-sdk/region-config-resolver" "3.696.0" + "@aws-sdk/types" "3.696.0" + "@aws-sdk/util-endpoints" "3.696.0" + "@aws-sdk/util-user-agent-browser" "3.696.0" + "@aws-sdk/util-user-agent-node" "3.696.0" + "@smithy/config-resolver" "^3.0.12" + "@smithy/core" "^2.5.3" + "@smithy/fetch-http-handler" "^4.1.1" + "@smithy/hash-node" "^3.0.10" + "@smithy/invalid-dependency" "^3.0.10" + "@smithy/middleware-content-length" "^3.0.12" + "@smithy/middleware-endpoint" "^3.2.3" + "@smithy/middleware-retry" "^3.0.27" + "@smithy/middleware-serde" "^3.0.10" + "@smithy/middleware-stack" "^3.0.10" + "@smithy/node-config-provider" "^3.1.11" + "@smithy/node-http-handler" "^3.3.1" + "@smithy/protocol-http" "^4.1.7" + "@smithy/smithy-client" "^3.4.4" + "@smithy/types" "^3.7.1" + "@smithy/url-parser" "^3.0.10" "@smithy/util-base64" "^3.0.0" "@smithy/util-body-length-browser" "^3.0.0" "@smithy/util-body-length-node" "^3.0.0" - "@smithy/util-defaults-mode-browser" "^3.0.23" - "@smithy/util-defaults-mode-node" "^3.0.23" - "@smithy/util-endpoints" "^2.1.3" - "@smithy/util-middleware" "^3.0.7" - "@smithy/util-retry" "^3.0.7" + "@smithy/util-defaults-mode-browser" "^3.0.27" + "@smithy/util-defaults-mode-node" "^3.0.27" + "@smithy/util-endpoints" "^2.1.6" + "@smithy/util-middleware" "^3.0.10" + "@smithy/util-retry" "^3.0.10" "@smithy/util-utf8" "^3.0.0" - "@smithy/util-waiter" "^3.1.6" + "@smithy/util-waiter" "^3.1.9" "@types/uuid" "^9.0.1" tslib "^2.6.2" uuid "^9.0.1" "@aws-sdk/client-route-53@^3.391.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/client-route-53/-/client-route-53-3.679.0.tgz#e861f04d7ed392e8183575d35418a60a836747dd" - integrity sha512-xKm/HZ3RALk1+XJdQuK3p99s2RS3bN/lQf3yu4w50nqS4IuXN9L9GNKHsLq3BCbxRI+OVl3wGMjf6UKzRJQFsQ== + version "3.699.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/client-route-53/-/client-route-53-3.699.0.tgz#d5d9a4a0670f89d48dfb13dbb83952b4384b8d7b" + integrity sha512-GzIJA4/ZhR1WUYA+yy2NRQ+6QOeg/8uioGGvTMAGPpuE5yqjP86etgpURWpR9vyOuQmv7z9mk5X8ShlyJ9bn1A== dependencies: "@aws-crypto/sha256-browser" "5.2.0" "@aws-crypto/sha256-js" "5.2.0" - "@aws-sdk/client-sso-oidc" "3.679.0" - "@aws-sdk/client-sts" "3.679.0" - "@aws-sdk/core" "3.679.0" - "@aws-sdk/credential-provider-node" "3.679.0" - "@aws-sdk/middleware-host-header" "3.679.0" - "@aws-sdk/middleware-logger" "3.679.0" - "@aws-sdk/middleware-recursion-detection" "3.679.0" - "@aws-sdk/middleware-sdk-route53" "3.679.0" - "@aws-sdk/middleware-user-agent" "3.679.0" - "@aws-sdk/region-config-resolver" "3.679.0" - "@aws-sdk/types" "3.679.0" - "@aws-sdk/util-endpoints" "3.679.0" - "@aws-sdk/util-user-agent-browser" "3.679.0" - "@aws-sdk/util-user-agent-node" "3.679.0" - "@aws-sdk/xml-builder" "3.679.0" - "@smithy/config-resolver" "^3.0.9" - "@smithy/core" "^2.4.8" - "@smithy/fetch-http-handler" "^3.2.9" - "@smithy/hash-node" "^3.0.7" - "@smithy/invalid-dependency" "^3.0.7" - "@smithy/middleware-content-length" "^3.0.9" - "@smithy/middleware-endpoint" "^3.1.4" - "@smithy/middleware-retry" "^3.0.23" - "@smithy/middleware-serde" "^3.0.7" - "@smithy/middleware-stack" "^3.0.7" - "@smithy/node-config-provider" "^3.1.8" - "@smithy/node-http-handler" "^3.2.4" - "@smithy/protocol-http" "^4.1.4" - "@smithy/smithy-client" "^3.4.0" - "@smithy/types" "^3.5.0" - "@smithy/url-parser" "^3.0.7" + "@aws-sdk/client-sso-oidc" "3.699.0" + "@aws-sdk/client-sts" "3.699.0" + "@aws-sdk/core" "3.696.0" + "@aws-sdk/credential-provider-node" "3.699.0" + "@aws-sdk/middleware-host-header" "3.696.0" + "@aws-sdk/middleware-logger" "3.696.0" + "@aws-sdk/middleware-recursion-detection" "3.696.0" + "@aws-sdk/middleware-sdk-route53" "3.696.0" + "@aws-sdk/middleware-user-agent" "3.696.0" + "@aws-sdk/region-config-resolver" "3.696.0" + "@aws-sdk/types" "3.696.0" + "@aws-sdk/util-endpoints" "3.696.0" + "@aws-sdk/util-user-agent-browser" "3.696.0" + "@aws-sdk/util-user-agent-node" "3.696.0" + "@aws-sdk/xml-builder" "3.696.0" + "@smithy/config-resolver" "^3.0.12" + "@smithy/core" "^2.5.3" + "@smithy/fetch-http-handler" "^4.1.1" + "@smithy/hash-node" "^3.0.10" + "@smithy/invalid-dependency" "^3.0.10" + "@smithy/middleware-content-length" "^3.0.12" + "@smithy/middleware-endpoint" "^3.2.3" + "@smithy/middleware-retry" "^3.0.27" + "@smithy/middleware-serde" "^3.0.10" + "@smithy/middleware-stack" "^3.0.10" + "@smithy/node-config-provider" "^3.1.11" + "@smithy/node-http-handler" "^3.3.1" + "@smithy/protocol-http" "^4.1.7" + "@smithy/smithy-client" "^3.4.4" + "@smithy/types" "^3.7.1" + "@smithy/url-parser" "^3.0.10" "@smithy/util-base64" "^3.0.0" "@smithy/util-body-length-browser" "^3.0.0" "@smithy/util-body-length-node" "^3.0.0" - "@smithy/util-defaults-mode-browser" "^3.0.23" - "@smithy/util-defaults-mode-node" "^3.0.23" - "@smithy/util-endpoints" "^2.1.3" - "@smithy/util-middleware" "^3.0.7" - "@smithy/util-retry" "^3.0.7" + "@smithy/util-defaults-mode-browser" "^3.0.27" + "@smithy/util-defaults-mode-node" "^3.0.27" + "@smithy/util-endpoints" "^2.1.6" + "@smithy/util-middleware" "^3.0.10" + "@smithy/util-retry" "^3.0.10" "@smithy/util-utf8" "^3.0.0" - "@smithy/util-waiter" "^3.1.6" + "@smithy/util-waiter" "^3.1.9" tslib "^2.6.2" -"@aws-sdk/client-sso-oidc@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/client-sso-oidc/-/client-sso-oidc-3.679.0.tgz#00de670c9ea31c5073f6eed6842795e70bc63fca" - integrity sha512-/dBYWcCwbA/id4sFCIVZvf0UsvzHCC68SryxeNQk/PDkY9N4n5yRcMUkZDaEyQCjowc3kY4JOXp2AdUP037nhA== +"@aws-sdk/client-sso-oidc@3.699.0": + version "3.699.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/client-sso-oidc/-/client-sso-oidc-3.699.0.tgz#a35665e681abd518b56330bc7dab63041fbdaf83" + integrity sha512-u8a1GorY5D1l+4FQAf4XBUC1T10/t7neuwT21r0ymrtMFSK2a9QqVHKMoLkvavAwyhJnARSBM9/UQC797PFOFw== dependencies: "@aws-crypto/sha256-browser" "5.2.0" "@aws-crypto/sha256-js" "5.2.0" - "@aws-sdk/core" "3.679.0" - "@aws-sdk/credential-provider-node" "3.679.0" - "@aws-sdk/middleware-host-header" "3.679.0" - "@aws-sdk/middleware-logger" "3.679.0" - "@aws-sdk/middleware-recursion-detection" "3.679.0" - "@aws-sdk/middleware-user-agent" "3.679.0" - "@aws-sdk/region-config-resolver" "3.679.0" - "@aws-sdk/types" "3.679.0" - "@aws-sdk/util-endpoints" "3.679.0" - "@aws-sdk/util-user-agent-browser" "3.679.0" - "@aws-sdk/util-user-agent-node" "3.679.0" - "@smithy/config-resolver" "^3.0.9" - "@smithy/core" "^2.4.8" - "@smithy/fetch-http-handler" "^3.2.9" - "@smithy/hash-node" "^3.0.7" - "@smithy/invalid-dependency" "^3.0.7" - "@smithy/middleware-content-length" "^3.0.9" - "@smithy/middleware-endpoint" "^3.1.4" - "@smithy/middleware-retry" "^3.0.23" - "@smithy/middleware-serde" "^3.0.7" - "@smithy/middleware-stack" "^3.0.7" - "@smithy/node-config-provider" "^3.1.8" - "@smithy/node-http-handler" "^3.2.4" - "@smithy/protocol-http" "^4.1.4" - "@smithy/smithy-client" "^3.4.0" - "@smithy/types" "^3.5.0" - "@smithy/url-parser" "^3.0.7" + "@aws-sdk/core" "3.696.0" + "@aws-sdk/credential-provider-node" "3.699.0" + "@aws-sdk/middleware-host-header" "3.696.0" + "@aws-sdk/middleware-logger" "3.696.0" + "@aws-sdk/middleware-recursion-detection" "3.696.0" + "@aws-sdk/middleware-user-agent" "3.696.0" + "@aws-sdk/region-config-resolver" "3.696.0" + "@aws-sdk/types" "3.696.0" + "@aws-sdk/util-endpoints" "3.696.0" + "@aws-sdk/util-user-agent-browser" "3.696.0" + "@aws-sdk/util-user-agent-node" "3.696.0" + "@smithy/config-resolver" "^3.0.12" + "@smithy/core" "^2.5.3" + "@smithy/fetch-http-handler" "^4.1.1" + "@smithy/hash-node" "^3.0.10" + "@smithy/invalid-dependency" "^3.0.10" + "@smithy/middleware-content-length" "^3.0.12" + "@smithy/middleware-endpoint" "^3.2.3" + "@smithy/middleware-retry" "^3.0.27" + "@smithy/middleware-serde" "^3.0.10" + "@smithy/middleware-stack" "^3.0.10" + "@smithy/node-config-provider" "^3.1.11" + "@smithy/node-http-handler" "^3.3.1" + "@smithy/protocol-http" "^4.1.7" + "@smithy/smithy-client" "^3.4.4" + "@smithy/types" "^3.7.1" + "@smithy/url-parser" "^3.0.10" "@smithy/util-base64" "^3.0.0" "@smithy/util-body-length-browser" "^3.0.0" "@smithy/util-body-length-node" "^3.0.0" - "@smithy/util-defaults-mode-browser" "^3.0.23" - "@smithy/util-defaults-mode-node" "^3.0.23" - "@smithy/util-endpoints" "^2.1.3" - "@smithy/util-middleware" "^3.0.7" - "@smithy/util-retry" "^3.0.7" + "@smithy/util-defaults-mode-browser" "^3.0.27" + "@smithy/util-defaults-mode-node" "^3.0.27" + "@smithy/util-endpoints" "^2.1.6" + "@smithy/util-middleware" "^3.0.10" + "@smithy/util-retry" "^3.0.10" "@smithy/util-utf8" "^3.0.0" tslib "^2.6.2" -"@aws-sdk/client-sso@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/client-sso/-/client-sso-3.679.0.tgz#6d6e96ae4e8c3258793e26bcd127b9f9a621dd1b" - integrity sha512-/0cAvYnpOZTo/Y961F1kx2fhDDLUYZ0SQQ5/75gh3xVImLj7Zw+vp74ieqFbqWLYGMaq8z1Arr9A8zG95mbLdg== +"@aws-sdk/client-sso@3.696.0": + version "3.696.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/client-sso/-/client-sso-3.696.0.tgz#a9251e88cdfc91fb14191f760f68baa835e88f1c" + integrity sha512-q5TTkd08JS0DOkHfUL853tuArf7NrPeqoS5UOvqJho8ibV9Ak/a/HO4kNvy9Nj3cib/toHYHsQIEtecUPSUUrQ== dependencies: "@aws-crypto/sha256-browser" "5.2.0" "@aws-crypto/sha256-js" "5.2.0" - "@aws-sdk/core" "3.679.0" - "@aws-sdk/middleware-host-header" "3.679.0" - "@aws-sdk/middleware-logger" "3.679.0" - "@aws-sdk/middleware-recursion-detection" "3.679.0" - "@aws-sdk/middleware-user-agent" "3.679.0" - "@aws-sdk/region-config-resolver" "3.679.0" - "@aws-sdk/types" "3.679.0" - "@aws-sdk/util-endpoints" "3.679.0" - "@aws-sdk/util-user-agent-browser" "3.679.0" - "@aws-sdk/util-user-agent-node" "3.679.0" - "@smithy/config-resolver" "^3.0.9" - "@smithy/core" "^2.4.8" - "@smithy/fetch-http-handler" "^3.2.9" - "@smithy/hash-node" "^3.0.7" - "@smithy/invalid-dependency" "^3.0.7" - "@smithy/middleware-content-length" "^3.0.9" - "@smithy/middleware-endpoint" "^3.1.4" - "@smithy/middleware-retry" "^3.0.23" - "@smithy/middleware-serde" "^3.0.7" - "@smithy/middleware-stack" "^3.0.7" - "@smithy/node-config-provider" "^3.1.8" - "@smithy/node-http-handler" "^3.2.4" - "@smithy/protocol-http" "^4.1.4" - "@smithy/smithy-client" "^3.4.0" - "@smithy/types" "^3.5.0" - "@smithy/url-parser" "^3.0.7" + "@aws-sdk/core" "3.696.0" + "@aws-sdk/middleware-host-header" "3.696.0" + "@aws-sdk/middleware-logger" "3.696.0" + "@aws-sdk/middleware-recursion-detection" "3.696.0" + "@aws-sdk/middleware-user-agent" "3.696.0" + "@aws-sdk/region-config-resolver" "3.696.0" + "@aws-sdk/types" "3.696.0" + "@aws-sdk/util-endpoints" "3.696.0" + "@aws-sdk/util-user-agent-browser" "3.696.0" + "@aws-sdk/util-user-agent-node" "3.696.0" + "@smithy/config-resolver" "^3.0.12" + "@smithy/core" "^2.5.3" + "@smithy/fetch-http-handler" "^4.1.1" + "@smithy/hash-node" "^3.0.10" + "@smithy/invalid-dependency" "^3.0.10" + "@smithy/middleware-content-length" "^3.0.12" + "@smithy/middleware-endpoint" "^3.2.3" + "@smithy/middleware-retry" "^3.0.27" + "@smithy/middleware-serde" "^3.0.10" + "@smithy/middleware-stack" "^3.0.10" + "@smithy/node-config-provider" "^3.1.11" + "@smithy/node-http-handler" "^3.3.1" + "@smithy/protocol-http" "^4.1.7" + "@smithy/smithy-client" "^3.4.4" + "@smithy/types" "^3.7.1" + "@smithy/url-parser" "^3.0.10" "@smithy/util-base64" "^3.0.0" "@smithy/util-body-length-browser" "^3.0.0" "@smithy/util-body-length-node" "^3.0.0" - "@smithy/util-defaults-mode-browser" "^3.0.23" - "@smithy/util-defaults-mode-node" "^3.0.23" - "@smithy/util-endpoints" "^2.1.3" - "@smithy/util-middleware" "^3.0.7" - "@smithy/util-retry" "^3.0.7" + "@smithy/util-defaults-mode-browser" "^3.0.27" + "@smithy/util-defaults-mode-node" "^3.0.27" + "@smithy/util-endpoints" "^2.1.6" + "@smithy/util-middleware" "^3.0.10" + "@smithy/util-retry" "^3.0.10" "@smithy/util-utf8" "^3.0.0" tslib "^2.6.2" -"@aws-sdk/client-sts@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/client-sts/-/client-sts-3.679.0.tgz#4641c24032ebd69a6e0e4eb28477749e21e69884" - integrity sha512-3CvrT8w1RjFu1g8vKA5Azfr5V83r2/b68Ock43WE003Bq/5Y38mwmYX7vk0fPHzC3qejt4YMAWk/C3fSKOy25g== +"@aws-sdk/client-sts@3.699.0": + version "3.699.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/client-sts/-/client-sts-3.699.0.tgz#9419be6bbf3809008128117afea8b9129b5a959d" + integrity sha512-++lsn4x2YXsZPIzFVwv3fSUVM55ZT0WRFmPeNilYIhZClxHLmVAWKH4I55cY9ry60/aTKYjzOXkWwyBKGsGvQg== dependencies: "@aws-crypto/sha256-browser" "5.2.0" "@aws-crypto/sha256-js" "5.2.0" - "@aws-sdk/client-sso-oidc" "3.679.0" - "@aws-sdk/core" "3.679.0" - "@aws-sdk/credential-provider-node" "3.679.0" - "@aws-sdk/middleware-host-header" "3.679.0" - "@aws-sdk/middleware-logger" "3.679.0" - "@aws-sdk/middleware-recursion-detection" "3.679.0" - "@aws-sdk/middleware-user-agent" "3.679.0" - "@aws-sdk/region-config-resolver" "3.679.0" - "@aws-sdk/types" "3.679.0" - "@aws-sdk/util-endpoints" "3.679.0" - "@aws-sdk/util-user-agent-browser" "3.679.0" - "@aws-sdk/util-user-agent-node" "3.679.0" - "@smithy/config-resolver" "^3.0.9" - "@smithy/core" "^2.4.8" - "@smithy/fetch-http-handler" "^3.2.9" - "@smithy/hash-node" "^3.0.7" - "@smithy/invalid-dependency" "^3.0.7" - "@smithy/middleware-content-length" "^3.0.9" - "@smithy/middleware-endpoint" "^3.1.4" - "@smithy/middleware-retry" "^3.0.23" - "@smithy/middleware-serde" "^3.0.7" - "@smithy/middleware-stack" "^3.0.7" - "@smithy/node-config-provider" "^3.1.8" - "@smithy/node-http-handler" "^3.2.4" - "@smithy/protocol-http" "^4.1.4" - "@smithy/smithy-client" "^3.4.0" - "@smithy/types" "^3.5.0" - "@smithy/url-parser" "^3.0.7" + "@aws-sdk/client-sso-oidc" "3.699.0" + "@aws-sdk/core" "3.696.0" + "@aws-sdk/credential-provider-node" "3.699.0" + "@aws-sdk/middleware-host-header" "3.696.0" + "@aws-sdk/middleware-logger" "3.696.0" + "@aws-sdk/middleware-recursion-detection" "3.696.0" + "@aws-sdk/middleware-user-agent" "3.696.0" + "@aws-sdk/region-config-resolver" "3.696.0" + "@aws-sdk/types" "3.696.0" + "@aws-sdk/util-endpoints" "3.696.0" + "@aws-sdk/util-user-agent-browser" "3.696.0" + "@aws-sdk/util-user-agent-node" "3.696.0" + "@smithy/config-resolver" "^3.0.12" + "@smithy/core" "^2.5.3" + "@smithy/fetch-http-handler" "^4.1.1" + "@smithy/hash-node" "^3.0.10" + "@smithy/invalid-dependency" "^3.0.10" + "@smithy/middleware-content-length" "^3.0.12" + "@smithy/middleware-endpoint" "^3.2.3" + "@smithy/middleware-retry" "^3.0.27" + "@smithy/middleware-serde" "^3.0.10" + "@smithy/middleware-stack" "^3.0.10" + "@smithy/node-config-provider" "^3.1.11" + "@smithy/node-http-handler" "^3.3.1" + "@smithy/protocol-http" "^4.1.7" + "@smithy/smithy-client" "^3.4.4" + "@smithy/types" "^3.7.1" + "@smithy/url-parser" "^3.0.10" "@smithy/util-base64" "^3.0.0" "@smithy/util-body-length-browser" "^3.0.0" "@smithy/util-body-length-node" "^3.0.0" - "@smithy/util-defaults-mode-browser" "^3.0.23" - "@smithy/util-defaults-mode-node" "^3.0.23" - "@smithy/util-endpoints" "^2.1.3" - "@smithy/util-middleware" "^3.0.7" - "@smithy/util-retry" "^3.0.7" + "@smithy/util-defaults-mode-browser" "^3.0.27" + "@smithy/util-defaults-mode-node" "^3.0.27" + "@smithy/util-endpoints" "^2.1.6" + "@smithy/util-middleware" "^3.0.10" + "@smithy/util-retry" "^3.0.10" "@smithy/util-utf8" "^3.0.0" tslib "^2.6.2" -"@aws-sdk/core@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/core/-/core-3.679.0.tgz#102aa1d19db5bdcabefc2dcd044f2fb5d0771568" - integrity sha512-CS6PWGX8l4v/xyvX8RtXnBisdCa5+URzKd0L6GvHChype9qKUVxO/Gg6N/y43Hvg7MNWJt9FBPNWIxUB+byJwg== - dependencies: - "@aws-sdk/types" "3.679.0" - "@smithy/core" "^2.4.8" - "@smithy/node-config-provider" "^3.1.8" - "@smithy/property-provider" "^3.1.7" - "@smithy/protocol-http" "^4.1.4" - "@smithy/signature-v4" "^4.2.0" - "@smithy/smithy-client" "^3.4.0" - "@smithy/types" "^3.5.0" - "@smithy/util-middleware" "^3.0.7" +"@aws-sdk/core@3.696.0": + version "3.696.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/core/-/core-3.696.0.tgz#bdf306bdc019f485738d91d8838eec877861dd26" + integrity sha512-3c9III1k03DgvRZWg8vhVmfIXPG6hAciN9MzQTzqGngzWAELZF/WONRTRQuDFixVtarQatmLHYVw/atGeA2Byw== + dependencies: + "@aws-sdk/types" "3.696.0" + "@smithy/core" "^2.5.3" + "@smithy/node-config-provider" "^3.1.11" + "@smithy/property-provider" "^3.1.9" + "@smithy/protocol-http" "^4.1.7" + "@smithy/signature-v4" "^4.2.2" + "@smithy/smithy-client" "^3.4.4" + "@smithy/types" "^3.7.1" + "@smithy/util-middleware" "^3.0.10" fast-xml-parser "4.4.1" tslib "^2.6.2" -"@aws-sdk/credential-provider-env@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-env/-/credential-provider-env-3.679.0.tgz#abf297714b77197a9da0d3d95a0f5687ae28e5b3" - integrity sha512-EdlTYbzMm3G7VUNAMxr9S1nC1qUNqhKlAxFU8E7cKsAe8Bp29CD5HAs3POc56AVo9GC4yRIS+/mtlZSmrckzUA== +"@aws-sdk/credential-provider-env@3.696.0": + version "3.696.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-env/-/credential-provider-env-3.696.0.tgz#afad9e61cd03da404bb03e5bce83c49736b85271" + integrity sha512-T9iMFnJL7YTlESLpVFT3fg1Lkb1lD+oiaIC8KMpepb01gDUBIpj9+Y+pA/cgRWW0yRxmkDXNazAE2qQTVFGJzA== dependencies: - "@aws-sdk/core" "3.679.0" - "@aws-sdk/types" "3.679.0" - "@smithy/property-provider" "^3.1.7" - "@smithy/types" "^3.5.0" + "@aws-sdk/core" "3.696.0" + "@aws-sdk/types" "3.696.0" + "@smithy/property-provider" "^3.1.9" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@aws-sdk/credential-provider-http@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-http/-/credential-provider-http-3.679.0.tgz#9fc29f4ec7ab52ecf394288c05295823e818d812" - integrity sha512-ZoKLubW5DqqV1/2a3TSn+9sSKg0T8SsYMt1JeirnuLJF0mCoYFUaWMyvxxKuxPoqvUsaycxKru4GkpJ10ltNBw== - dependencies: - "@aws-sdk/core" "3.679.0" - "@aws-sdk/types" "3.679.0" - "@smithy/fetch-http-handler" "^3.2.9" - "@smithy/node-http-handler" "^3.2.4" - "@smithy/property-provider" "^3.1.7" - "@smithy/protocol-http" "^4.1.4" - "@smithy/smithy-client" "^3.4.0" - "@smithy/types" "^3.5.0" - "@smithy/util-stream" "^3.1.9" +"@aws-sdk/credential-provider-http@3.696.0": + version "3.696.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-http/-/credential-provider-http-3.696.0.tgz#535756f9f427fbe851a8c1db7b0e3aaaf7790ba2" + integrity sha512-GV6EbvPi2eq1+WgY/o2RFA3P7HGmnkIzCNmhwtALFlqMroLYWKE7PSeHw66Uh1dFQeVESn0/+hiUNhu1mB0emA== + dependencies: + "@aws-sdk/core" "3.696.0" + "@aws-sdk/types" "3.696.0" + "@smithy/fetch-http-handler" "^4.1.1" + "@smithy/node-http-handler" "^3.3.1" + "@smithy/property-provider" "^3.1.9" + "@smithy/protocol-http" "^4.1.7" + "@smithy/smithy-client" "^3.4.4" + "@smithy/types" "^3.7.1" + "@smithy/util-stream" "^3.3.1" tslib "^2.6.2" -"@aws-sdk/credential-provider-ini@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-ini/-/credential-provider-ini-3.679.0.tgz#0115c9e4813de3fcf0bf20f6156b6bf4b62d8431" - integrity sha512-Rg7t8RwUzKcumpipG4neZqaeJ6DF+Bco1+FHn5BZB68jpvwvjBjcQUuWkxj18B6ctYHr1fkunnzeKEn/+vy7+w== - dependencies: - "@aws-sdk/core" "3.679.0" - "@aws-sdk/credential-provider-env" "3.679.0" - "@aws-sdk/credential-provider-http" "3.679.0" - "@aws-sdk/credential-provider-process" "3.679.0" - "@aws-sdk/credential-provider-sso" "3.679.0" - "@aws-sdk/credential-provider-web-identity" "3.679.0" - "@aws-sdk/types" "3.679.0" - "@smithy/credential-provider-imds" "^3.2.4" - "@smithy/property-provider" "^3.1.7" - "@smithy/shared-ini-file-loader" "^3.1.8" - "@smithy/types" "^3.5.0" +"@aws-sdk/credential-provider-ini@3.699.0": + version "3.699.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-ini/-/credential-provider-ini-3.699.0.tgz#7919a454b05c5446d04a0d3270807046a029ee30" + integrity sha512-dXmCqjJnKmG37Q+nLjPVu22mNkrGHY8hYoOt3Jo9R2zr5MYV7s/NHsCHr+7E+BZ+tfZYLRPeB1wkpTeHiEcdRw== + dependencies: + "@aws-sdk/core" "3.696.0" + "@aws-sdk/credential-provider-env" "3.696.0" + "@aws-sdk/credential-provider-http" "3.696.0" + "@aws-sdk/credential-provider-process" "3.696.0" + "@aws-sdk/credential-provider-sso" "3.699.0" + "@aws-sdk/credential-provider-web-identity" "3.696.0" + "@aws-sdk/types" "3.696.0" + "@smithy/credential-provider-imds" "^3.2.6" + "@smithy/property-provider" "^3.1.9" + "@smithy/shared-ini-file-loader" "^3.1.10" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@aws-sdk/credential-provider-node@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-node/-/credential-provider-node-3.679.0.tgz#f3012b7e305aa1151c1472ece3f422f66666bc7c" - integrity sha512-E3lBtaqCte8tWs6Rkssc8sLzvGoJ10TLGvpkijOlz43wPd6xCRh1YLwg6zolf9fVFtEyUs/GsgymiASOyxhFtw== - dependencies: - "@aws-sdk/credential-provider-env" "3.679.0" - "@aws-sdk/credential-provider-http" "3.679.0" - "@aws-sdk/credential-provider-ini" "3.679.0" - "@aws-sdk/credential-provider-process" "3.679.0" - "@aws-sdk/credential-provider-sso" "3.679.0" - "@aws-sdk/credential-provider-web-identity" "3.679.0" - "@aws-sdk/types" "3.679.0" - "@smithy/credential-provider-imds" "^3.2.4" - "@smithy/property-provider" "^3.1.7" - "@smithy/shared-ini-file-loader" "^3.1.8" - "@smithy/types" "^3.5.0" +"@aws-sdk/credential-provider-node@3.699.0": + version "3.699.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-node/-/credential-provider-node-3.699.0.tgz#6a1e32a49a7fa71d10c85a927267d1782444def1" + integrity sha512-MmEmNDo1bBtTgRmdNfdQksXu4uXe66s0p1hi1YPrn1h59Q605eq/xiWbGL6/3KdkViH6eGUuABeV2ODld86ylg== + dependencies: + "@aws-sdk/credential-provider-env" "3.696.0" + "@aws-sdk/credential-provider-http" "3.696.0" + "@aws-sdk/credential-provider-ini" "3.699.0" + "@aws-sdk/credential-provider-process" "3.696.0" + "@aws-sdk/credential-provider-sso" "3.699.0" + "@aws-sdk/credential-provider-web-identity" "3.696.0" + "@aws-sdk/types" "3.696.0" + "@smithy/credential-provider-imds" "^3.2.6" + "@smithy/property-provider" "^3.1.9" + "@smithy/shared-ini-file-loader" "^3.1.10" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@aws-sdk/credential-provider-process@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-process/-/credential-provider-process-3.679.0.tgz#a06b5193cdad2c14382708bcd44d487af52b11dc" - integrity sha512-u/p4TV8kQ0zJWDdZD4+vdQFTMhkDEJFws040Gm113VHa/Xo1SYOjbpvqeuFoz6VmM0bLvoOWjxB9MxnSQbwKpQ== +"@aws-sdk/credential-provider-process@3.696.0": + version "3.696.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-process/-/credential-provider-process-3.696.0.tgz#45da7b948aa40987b413c7c0d4a8125bf1433651" + integrity sha512-mL1RcFDe9sfmyU5K1nuFkO8UiJXXxLX4JO1gVaDIOvPqwStpUAwi3A1BoeZhWZZNQsiKI810RnYGo0E0WB/hUA== dependencies: - "@aws-sdk/core" "3.679.0" - "@aws-sdk/types" "3.679.0" - "@smithy/property-provider" "^3.1.7" - "@smithy/shared-ini-file-loader" "^3.1.8" - "@smithy/types" "^3.5.0" + "@aws-sdk/core" "3.696.0" + "@aws-sdk/types" "3.696.0" + "@smithy/property-provider" "^3.1.9" + "@smithy/shared-ini-file-loader" "^3.1.10" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@aws-sdk/credential-provider-sso@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-sso/-/credential-provider-sso-3.679.0.tgz#ad07de8f9a0c3e5fe7bd660e1847867643ab480e" - integrity sha512-SAtWonhi9asxn0ukEbcE81jkyanKgqpsrtskvYPpO9Z9KOednM4Cqt6h1bfcS9zaHjN2zu815Gv8O7WiV+F/DQ== - dependencies: - "@aws-sdk/client-sso" "3.679.0" - "@aws-sdk/core" "3.679.0" - "@aws-sdk/token-providers" "3.679.0" - "@aws-sdk/types" "3.679.0" - "@smithy/property-provider" "^3.1.7" - "@smithy/shared-ini-file-loader" "^3.1.8" - "@smithy/types" "^3.5.0" +"@aws-sdk/credential-provider-sso@3.699.0": + version "3.699.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-sso/-/credential-provider-sso-3.699.0.tgz#515e2ecd407bace3141b8b192505631de415667e" + integrity sha512-Ekp2cZG4pl9D8+uKWm4qO1xcm8/MeiI8f+dnlZm8aQzizeC+aXYy9GyoclSf6daK8KfRPiRfM7ZHBBL5dAfdMA== + dependencies: + "@aws-sdk/client-sso" "3.696.0" + "@aws-sdk/core" "3.696.0" + "@aws-sdk/token-providers" "3.699.0" + "@aws-sdk/types" "3.696.0" + "@smithy/property-provider" "^3.1.9" + "@smithy/shared-ini-file-loader" "^3.1.10" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@aws-sdk/credential-provider-web-identity@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-web-identity/-/credential-provider-web-identity-3.679.0.tgz#5871c44e5846e7c93810fd033224c00493db65a3" - integrity sha512-a74tLccVznXCaBefWPSysUcLXYJiSkeUmQGtalNgJ1vGkE36W5l/8czFiiowdWdKWz7+x6xf0w+Kjkjlj42Ung== +"@aws-sdk/credential-provider-web-identity@3.696.0": + version "3.696.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/credential-provider-web-identity/-/credential-provider-web-identity-3.696.0.tgz#3f97c00bd3bc7cfd988e098af67ff7c8392ce188" + integrity sha512-XJ/CVlWChM0VCoc259vWguFUjJDn/QwDqHwbx+K9cg3v6yrqXfK5ai+p/6lx0nQpnk4JzPVeYYxWRpaTsGC9rg== dependencies: - "@aws-sdk/core" "3.679.0" - "@aws-sdk/types" "3.679.0" - "@smithy/property-provider" "^3.1.7" - "@smithy/types" "^3.5.0" + "@aws-sdk/core" "3.696.0" + "@aws-sdk/types" "3.696.0" + "@smithy/property-provider" "^3.1.9" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@aws-sdk/middleware-host-header@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-host-header/-/middleware-host-header-3.679.0.tgz#1eabe42250c57a9e28742dd04786781573faad1a" - integrity sha512-y176HuQ8JRY3hGX8rQzHDSbCl9P5Ny9l16z4xmaiLo+Qfte7ee4Yr3yaAKd7GFoJ3/Mhud2XZ37fR015MfYl2w== +"@aws-sdk/middleware-host-header@3.696.0": + version "3.696.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-host-header/-/middleware-host-header-3.696.0.tgz#20aae0efeb973ca1a6db1b1014acbcdd06ad472e" + integrity sha512-zELJp9Ta2zkX7ELggMN9qMCgekqZhFC5V2rOr4hJDEb/Tte7gpfKSObAnw/3AYiVqt36sjHKfdkoTsuwGdEoDg== dependencies: - "@aws-sdk/types" "3.679.0" - "@smithy/protocol-http" "^4.1.4" - "@smithy/types" "^3.5.0" + "@aws-sdk/types" "3.696.0" + "@smithy/protocol-http" "^4.1.7" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@aws-sdk/middleware-logger@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-logger/-/middleware-logger-3.679.0.tgz#cb0f205ddb5341d8327fc9ca1897bf06526c1896" - integrity sha512-0vet8InEj7nvIvGKk+ch7bEF5SyZ7Us9U7YTEgXPrBNStKeRUsgwRm0ijPWWd0a3oz2okaEwXsFl7G/vI0XiEA== +"@aws-sdk/middleware-logger@3.696.0": + version "3.696.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-logger/-/middleware-logger-3.696.0.tgz#79d68b7e5ba181511ade769b11165bfb7527181e" + integrity sha512-KhkHt+8AjCxcR/5Zp3++YPJPpFQzxpr+jmONiT/Jw2yqnSngZ0Yspm5wGoRx2hS1HJbyZNuaOWEGuJoxLeBKfA== dependencies: - "@aws-sdk/types" "3.679.0" - "@smithy/types" "^3.5.0" + "@aws-sdk/types" "3.696.0" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@aws-sdk/middleware-recursion-detection@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-recursion-detection/-/middleware-recursion-detection-3.679.0.tgz#3542de5baa466abffbfe5ee485fd87f60d5f917e" - integrity sha512-sQoAZFsQiW/LL3DfKMYwBoGjYDEnMbA9WslWN8xneCmBAwKo6IcSksvYs23PP8XMIoBGe2I2J9BSr654XWygTQ== +"@aws-sdk/middleware-recursion-detection@3.696.0": + version "3.696.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-recursion-detection/-/middleware-recursion-detection-3.696.0.tgz#aa437d645d74cb785905162266741125c18f182a" + integrity sha512-si/maV3Z0hH7qa99f9ru2xpS5HlfSVcasRlNUXKSDm611i7jFMWwGNLUOXFAOLhXotPX5G3Z6BLwL34oDeBMug== dependencies: - "@aws-sdk/types" "3.679.0" - "@smithy/protocol-http" "^4.1.4" - "@smithy/types" "^3.5.0" + "@aws-sdk/types" "3.696.0" + "@smithy/protocol-http" "^4.1.7" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@aws-sdk/middleware-sdk-ec2@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-sdk-ec2/-/middleware-sdk-ec2-3.679.0.tgz#34cdb975a7b7b5e7e998b08543816d2175d29ee2" - integrity sha512-29mxCB6avpcDluuLorL4Fd1QoawaqpGRPgiG2cDE29lwyg50WPNl3NOugvkcNxYqd3ZHHz4fpyPf/3qKCBmMMQ== - dependencies: - "@aws-sdk/types" "3.679.0" - "@aws-sdk/util-format-url" "3.679.0" - "@smithy/middleware-endpoint" "^3.1.4" - "@smithy/protocol-http" "^4.1.4" - "@smithy/signature-v4" "^4.2.0" - "@smithy/smithy-client" "^3.4.0" - "@smithy/types" "^3.5.0" +"@aws-sdk/middleware-sdk-ec2@3.696.0": + version "3.696.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-sdk-ec2/-/middleware-sdk-ec2-3.696.0.tgz#5cffe9a06ae9205a733b4ebff759f043b9c00aa4" + integrity sha512-HVMpblaaTQ1Ysku2nR6+N22aEgT7CDot+vsFutHNJCBPl+eEON5exp7IvsFC7sFCWmSfnMHTHtmmj5YIYHO1gQ== + dependencies: + "@aws-sdk/types" "3.696.0" + "@aws-sdk/util-format-url" "3.696.0" + "@smithy/middleware-endpoint" "^3.2.3" + "@smithy/protocol-http" "^4.1.7" + "@smithy/signature-v4" "^4.2.2" + "@smithy/smithy-client" "^3.4.4" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@aws-sdk/middleware-sdk-route53@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-sdk-route53/-/middleware-sdk-route53-3.679.0.tgz#99e4440cac1d9425d24937ca30d76e9fd0aba156" - integrity sha512-jm7NMl2hRJz9o18+ayMfvGSvlUMeYsfwM0UXKHg9mFUABGH6VUQ7Je2hwaG8inyNHBe5iAqZSenvrikSQVg98g== +"@aws-sdk/middleware-sdk-route53@3.696.0": + version "3.696.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-sdk-route53/-/middleware-sdk-route53-3.696.0.tgz#12d01e5351f2c4f18dadfafec6a16fd6d51e9b34" + integrity sha512-7pWE/5LSuIiL7z9YcVOi86mPiuIiNPqo7IdKuw8xckw8WK8bLtWQy4i9nRNxOQilKdZzVVxziJOyqLPfLOc2VQ== dependencies: - "@aws-sdk/types" "3.679.0" - "@smithy/types" "^3.5.0" + "@aws-sdk/types" "3.696.0" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@aws-sdk/middleware-user-agent@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-user-agent/-/middleware-user-agent-3.679.0.tgz#11e410967405139dee2bf69ca728be76f4e617ef" - integrity sha512-4hdeXhPDURPqQLPd9jCpUEo9fQITXl3NM3W1MwcJpE0gdUM36uXkQOYsTPeeU/IRCLVjK8Htlh2oCaM9iJrLCA== - dependencies: - "@aws-sdk/core" "3.679.0" - "@aws-sdk/types" "3.679.0" - "@aws-sdk/util-endpoints" "3.679.0" - "@smithy/core" "^2.4.8" - "@smithy/protocol-http" "^4.1.4" - "@smithy/types" "^3.5.0" +"@aws-sdk/middleware-user-agent@3.696.0": + version "3.696.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/middleware-user-agent/-/middleware-user-agent-3.696.0.tgz#626c89300f6b3af5aefc1cb6d9ac19eebf8bc97d" + integrity sha512-Lvyj8CTyxrHI6GHd2YVZKIRI5Fmnugt3cpJo0VrKKEgK5zMySwEZ1n4dqPK6czYRWKd5+WnYHYAuU+Wdk6Jsjw== + dependencies: + "@aws-sdk/core" "3.696.0" + "@aws-sdk/types" "3.696.0" + "@aws-sdk/util-endpoints" "3.696.0" + "@smithy/core" "^2.5.3" + "@smithy/protocol-http" "^4.1.7" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@aws-sdk/region-config-resolver@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/region-config-resolver/-/region-config-resolver-3.679.0.tgz#d205dbaea8385aaf05e637fb7cb095c60bc708be" - integrity sha512-Ybx54P8Tg6KKq5ck7uwdjiKif7n/8g1x+V0V9uTjBjRWqaIgiqzXwKWoPj6NCNkE7tJNtqI4JrNxp/3S3HvmRw== +"@aws-sdk/region-config-resolver@3.696.0": + version "3.696.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/region-config-resolver/-/region-config-resolver-3.696.0.tgz#146c428702c09db75df5234b5d40ce49d147d0cf" + integrity sha512-7EuH142lBXjI8yH6dVS/CZeiK/WZsmb/8zP6bQbVYpMrppSTgB3MzZZdxVZGzL5r8zPQOU10wLC4kIMy0qdBVQ== dependencies: - "@aws-sdk/types" "3.679.0" - "@smithy/node-config-provider" "^3.1.8" - "@smithy/types" "^3.5.0" + "@aws-sdk/types" "3.696.0" + "@smithy/node-config-provider" "^3.1.11" + "@smithy/types" "^3.7.1" "@smithy/util-config-provider" "^3.0.0" - "@smithy/util-middleware" "^3.0.7" + "@smithy/util-middleware" "^3.0.10" tslib "^2.6.2" -"@aws-sdk/token-providers@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/token-providers/-/token-providers-3.679.0.tgz#7ec462d93941dd3cfdc245104ad32971f6ebc4f6" - integrity sha512-1/+Zso/x2jqgutKixYFQEGli0FELTgah6bm7aB+m2FAWH4Hz7+iMUsazg6nSWm714sG9G3h5u42Dmpvi9X6/hA== +"@aws-sdk/token-providers@3.699.0": + version "3.699.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/token-providers/-/token-providers-3.699.0.tgz#354990dd52d651c1f7a64c4c0894c868cdc81de2" + integrity sha512-kuiEW9DWs7fNos/SM+y58HCPhcIzm1nEZLhe2/7/6+TvAYLuEWURYsbK48gzsxXlaJ2k/jGY3nIsA7RptbMOwA== dependencies: - "@aws-sdk/types" "3.679.0" - "@smithy/property-provider" "^3.1.7" - "@smithy/shared-ini-file-loader" "^3.1.8" - "@smithy/types" "^3.5.0" + "@aws-sdk/types" "3.696.0" + "@smithy/property-provider" "^3.1.9" + "@smithy/shared-ini-file-loader" "^3.1.10" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@aws-sdk/types@3.679.0", "@aws-sdk/types@^3.222.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/types/-/types-3.679.0.tgz#3737bb0f190add9e788b838a24cd5d8106dbed4f" - integrity sha512-NwVq8YvInxQdJ47+zz4fH3BRRLC6lL+WLkvr242PVBbUOLRyK/lkwHlfiKUoeVIMyK5NF+up6TRg71t/8Bny6Q== +"@aws-sdk/types@3.696.0", "@aws-sdk/types@^3.222.0": + version "3.696.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/types/-/types-3.696.0.tgz#559c3df74dc389b6f40ba6ec6daffeab155330cd" + integrity sha512-9rTvUJIAj5d3//U5FDPWGJ1nFJLuWb30vugGOrWk7aNZ6y9tuA3PI7Cc9dP8WEXKVyK1vuuk8rSFP2iqXnlgrw== dependencies: - "@smithy/types" "^3.5.0" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@aws-sdk/util-endpoints@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/util-endpoints/-/util-endpoints-3.679.0.tgz#b249ad8b4289e634cb5dfb3873a70b7aecbf323f" - integrity sha512-YL6s4Y/1zC45OvddvgE139fjeWSKKPgLlnfrvhVL7alNyY9n7beR4uhoDpNrt5mI6sn9qiBF17790o+xLAXjjg== +"@aws-sdk/util-endpoints@3.696.0": + version "3.696.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/util-endpoints/-/util-endpoints-3.696.0.tgz#79e18714419a423a64094381b849214499f00577" + integrity sha512-T5s0IlBVX+gkb9g/I6CLt4yAZVzMSiGnbUqWihWsHvQR1WOoIcndQy/Oz/IJXT9T2ipoy7a80gzV6a5mglrioA== dependencies: - "@aws-sdk/types" "3.679.0" - "@smithy/types" "^3.5.0" - "@smithy/util-endpoints" "^2.1.3" + "@aws-sdk/types" "3.696.0" + "@smithy/types" "^3.7.1" + "@smithy/util-endpoints" "^2.1.6" tslib "^2.6.2" -"@aws-sdk/util-format-url@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/util-format-url/-/util-format-url-3.679.0.tgz#5defda8e1601d5d4c4afe694348ad6d7e2420a9b" - integrity sha512-pqV1b/hJ/kumtF8AwObJ7bsGgs/2zuAdZtalSD8Pu4jdjOji3IBwP79giAHyhVwoXaMjkpG3mG4ldn9CVtzZJA== +"@aws-sdk/util-format-url@3.696.0": + version "3.696.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/util-format-url/-/util-format-url-3.696.0.tgz#9d38fc9c49009631b181b7547604ba8dc222053c" + integrity sha512-R6yK1LozUD1GdAZRPhNsIow6VNFJUTyyoIar1OCWaknlucBMcq7musF3DN3TlORBwfFMj5buHc2ET9OtMtzvuA== dependencies: - "@aws-sdk/types" "3.679.0" - "@smithy/querystring-builder" "^3.0.7" - "@smithy/types" "^3.5.0" + "@aws-sdk/types" "3.696.0" + "@smithy/querystring-builder" "^3.0.10" + "@smithy/types" "^3.7.1" tslib "^2.6.2" "@aws-sdk/util-locate-window@^3.0.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/util-locate-window/-/util-locate-window-3.679.0.tgz#8d5898624691e12ccbad839e103562002bbec85e" - integrity sha512-zKTd48/ZWrCplkXpYDABI74rQlbR0DNHs8nH95htfSLj9/mWRSwaGptoxwcihaq/77vi/fl2X3y0a1Bo8bt7RA== + version "3.693.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/util-locate-window/-/util-locate-window-3.693.0.tgz#1160f6d055cf074ca198eb8ecf89b6311537ad6c" + integrity sha512-ttrag6haJLWABhLqtg1Uf+4LgHWIMOVSYL+VYZmAp2v4PUGOwWmWQH0Zk8RM7YuQcLfH/EoR72/Yxz6A4FKcuw== dependencies: tslib "^2.6.2" -"@aws-sdk/util-user-agent-browser@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/util-user-agent-browser/-/util-user-agent-browser-3.679.0.tgz#bbaa5a8771c8a16388cd3cd934bb84a641ce907d" - integrity sha512-CusSm2bTBG1kFypcsqU8COhnYc6zltobsqs3nRrvYqYaOqtMnuE46K4XTWpnzKgwDejgZGOE+WYyprtAxrPvmQ== +"@aws-sdk/util-user-agent-browser@3.696.0": + version "3.696.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/util-user-agent-browser/-/util-user-agent-browser-3.696.0.tgz#2034765c81313d5e50783662332d35ec041755a0" + integrity sha512-Z5rVNDdmPOe6ELoM5AhF/ja5tSjbe6ctSctDPb0JdDf4dT0v2MfwhJKzXju2RzX8Es/77Glh7MlaXLE0kCB9+Q== dependencies: - "@aws-sdk/types" "3.679.0" - "@smithy/types" "^3.5.0" + "@aws-sdk/types" "3.696.0" + "@smithy/types" "^3.7.1" bowser "^2.11.0" tslib "^2.6.2" -"@aws-sdk/util-user-agent-node@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/util-user-agent-node/-/util-user-agent-node-3.679.0.tgz#0d1cd6eba18bfe6d0106d78fc7aa9b74889c462b" - integrity sha512-Bw4uXZ+NU5ed6TNfo4tBbhBSW+2eQxXYjYBGl5gLUNUpg2pDFToQAP6rXBFiwcG52V2ny5oLGiD82SoYuYkAVg== +"@aws-sdk/util-user-agent-node@3.696.0": + version "3.696.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/util-user-agent-node/-/util-user-agent-node-3.696.0.tgz#3267119e2be02185f3b4e0beb0cc495d392260b4" + integrity sha512-KhKqcfyXIB0SCCt+qsu4eJjsfiOrNzK5dCV7RAW2YIpp+msxGUUX0NdRE9rkzjiv+3EMktgJm3eEIS+yxtlVdQ== dependencies: - "@aws-sdk/middleware-user-agent" "3.679.0" - "@aws-sdk/types" "3.679.0" - "@smithy/node-config-provider" "^3.1.8" - "@smithy/types" "^3.5.0" + "@aws-sdk/middleware-user-agent" "3.696.0" + "@aws-sdk/types" "3.696.0" + "@smithy/node-config-provider" "^3.1.11" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@aws-sdk/xml-builder@3.679.0": - version "3.679.0" - resolved "https://registry.yarnpkg.com/@aws-sdk/xml-builder/-/xml-builder-3.679.0.tgz#96ccb7a4a4d4faa881d1fec5fc0554dc726843b5" - integrity sha512-nPmhVZb39ty5bcQ7mAwtjezBcsBqTYZ9A2D9v/lE92KCLdu5RhSkPH7O71ZqbZx1mUSg9fAOxHPiG79U5VlpLQ== +"@aws-sdk/xml-builder@3.696.0": + version "3.696.0" + resolved "https://registry.yarnpkg.com/@aws-sdk/xml-builder/-/xml-builder-3.696.0.tgz#ff3e37ee23208f9986f20d326cc278c0ee341164" + integrity sha512-dn1mX+EeqivoLYnY7p2qLrir0waPnCgS/0YdRCAVU2x14FgfUYCH6Im3w3oi2dMwhxfKY5lYVB5NKvZu7uI9lQ== dependencies: - "@smithy/types" "^3.5.0" + "@smithy/types" "^3.7.1" tslib "^2.6.2" "@babel/code-frame@^7.0.0", "@babel/code-frame@^7.22.13", "@babel/code-frame@^7.25.9", "@babel/code-frame@^7.26.0": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.26.0.tgz#9374b5cd068d128dac0b94ff482594273b1c2815" - integrity sha512-INCKxTtbXtcNbUZ3YXutwMpEleqttcswhAdee7dhuoVrD2cnuc3PqtERBtxkX5nziX9vnBL8WXmSGwv8CuPV6g== + version "7.26.2" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.26.2.tgz#4b5fab97d33338eff916235055f0ebc21e573a85" + integrity sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ== dependencies: "@babel/helper-validator-identifier" "^7.25.9" js-tokens "^4.0.0" picocolors "^1.0.0" "@babel/compat-data@^7.20.5", "@babel/compat-data@^7.22.6", "@babel/compat-data@^7.25.9": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.26.0.tgz#f02ba6d34e88fadd5e8861e8b38902f43cc1c819" - integrity sha512-qETICbZSLe7uXv9VE8T/RWOdIE5qqyTucOt4zLYMafj2MRO271VGgLd4RACJMeBO37UPWhXiKMBk7YlJ0fOzQA== + version "7.26.2" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.26.2.tgz#278b6b13664557de95b8f35b90d96785850bb56e" + integrity sha512-Z0WgzSEa+aUcdiJuCIqgujCshpMWgUpgOxXotrYPSA53hA3qopNaqcJpyr0hVb1FeWdnqFA35/fUtXgBK8srQg== -"@babel/core@^7.0.0", "@babel/core@^7.23.9", "@babel/core@^7.25.2": +"@babel/core@^7.0.0", "@babel/core@^7.23.9", "@babel/core@^7.26.0": version "7.26.0" resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.26.0.tgz#d78b6023cc8f3114ccf049eb219613f74a747b40" integrity sha512-i1SLeK+DzNnQ3LL/CswPCa/E5u4lh1k6IAEphON8F+cXt0t9euTshDru0q7/IqMa1PMPz5RnHuHscF8/ZJsStg== @@ -603,11 +603,11 @@ source-map "^0.5.0" "@babel/generator@^7.23.0", "@babel/generator@^7.25.9", "@babel/generator@^7.26.0": - version "7.26.0" - resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.26.0.tgz#505cc7c90d92513f458a477e5ef0703e7c91b8d7" - integrity sha512-/AIkAmInnWwgEAJGQr9vY0c66Mj6kjkE2ZPB1PurTRaRAh3U+J45sAQMjQDJdh4WbR3l0x5xkimXBKyBXXAu2w== + version "7.26.2" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.26.2.tgz#87b75813bec87916210e5e01939a4c823d6bb74f" + integrity sha512-zevQbhbau95nkoxSq3f/DC/SC+EEOUZd3DYqfSkMhY2/wfSeaHV1Ew4vk8e+x8lja31IbyuUa2uQ3JONqKbysw== dependencies: - "@babel/parser" "^7.26.0" + "@babel/parser" "^7.26.2" "@babel/types" "^7.26.0" "@jridgewell/gen-mapping" "^0.3.5" "@jridgewell/trace-mapping" "^0.3.25" @@ -661,10 +661,10 @@ regexpu-core "^6.1.1" semver "^6.3.1" -"@babel/helper-define-polyfill-provider@^0.6.2": - version "0.6.2" - resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.2.tgz#18594f789c3594acb24cfdb4a7f7b7d2e8bd912d" - integrity sha512-LV76g+C502biUK6AyZ3LK10vDpDyCzZnhZFXkH1L75zHPj68+qc8Zfpx2th+gzwA2MzyK+1g/3EPl62yFnVttQ== +"@babel/helper-define-polyfill-provider@^0.6.2", "@babel/helper-define-polyfill-provider@^0.6.3": + version "0.6.3" + resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.3.tgz#f4f2792fae2ef382074bc2d713522cf24e6ddb21" + integrity sha512-HK7Bi+Hj6H+VTHA3ZvBis7V/6hu9QuTrnMXNybfUf2iiuU/N97I8VjB+KbhFF8Rld/Lx5MzoCwPCpPjfK+n8Cg== dependencies: "@babel/helper-compilation-targets" "^7.22.6" "@babel/helper-plugin-utils" "^7.22.5" @@ -804,10 +804,10 @@ "@babel/template" "^7.25.9" "@babel/types" "^7.26.0" -"@babel/parser@^7.1.0", "@babel/parser@^7.20.5", "@babel/parser@^7.20.7", "@babel/parser@^7.23.0", "@babel/parser@^7.23.9", "@babel/parser@^7.25.3", "@babel/parser@^7.25.9", "@babel/parser@^7.26.0": - version "7.26.1" - resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.26.1.tgz#44e02499960df2cdce2c456372a3e8e0c3c5c975" - integrity sha512-reoQYNiAJreZNsJzyrDNzFQ+IQ5JFiIzAHJg9bn94S3l+4++J7RsIhNMoB+lgP/9tpmiAQqspv+xfdxTSzREOw== +"@babel/parser@^7.1.0", "@babel/parser@^7.20.5", "@babel/parser@^7.20.7", "@babel/parser@^7.23.0", "@babel/parser@^7.23.9", "@babel/parser@^7.25.3", "@babel/parser@^7.25.9", "@babel/parser@^7.26.0", "@babel/parser@^7.26.2": + version "7.26.2" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.26.2.tgz#fd7b6f487cfea09889557ef5d4eeb9ff9a5abd11" + integrity sha512-DWMCZH9WA4Maitz2q21SRKHo9QXZxkDsbNZoVD62gusNtNBBqDg9i7uOhASfTfIGNzW+O+r7+jAlM8dwphcJKQ== dependencies: "@babel/types" "^7.26.0" @@ -1053,14 +1053,14 @@ dependencies: "@babel/helper-plugin-utils" "^7.25.9" -"@babel/plugin-transform-react-jsx-self@^7.0.0", "@babel/plugin-transform-react-jsx-self@^7.24.7": +"@babel/plugin-transform-react-jsx-self@^7.0.0", "@babel/plugin-transform-react-jsx-self@^7.25.9": version "7.25.9" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.25.9.tgz#c0b6cae9c1b73967f7f9eb2fca9536ba2fad2858" integrity sha512-y8quW6p0WHkEhmErnfe58r7x0A70uKphQm8Sp8cV7tjNQwK56sNVK0M73LK3WuYmsuyrftut4xAkjjgU0twaMg== dependencies: "@babel/helper-plugin-utils" "^7.25.9" -"@babel/plugin-transform-react-jsx-source@^7.0.0", "@babel/plugin-transform-react-jsx-source@^7.24.7": +"@babel/plugin-transform-react-jsx-source@^7.0.0", "@babel/plugin-transform-react-jsx-source@^7.25.9": version "7.25.9" resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.25.9.tgz#4c6b8daa520b5f155b5fb55547d7c9fa91417503" integrity sha512-+iqjT8xmXhhYv4/uiYd8FNQsraMFZIfxVSqxxVSZP0WbbSAWvBXAul0m/zu+7Vv4O/3WtApy9pmaTMiumEZgfg== @@ -1305,16 +1305,16 @@ optionalDependencies: global-agent "^3.0.0" -"@emotion/babel-plugin@^11.12.0": - version "11.12.0" - resolved "https://registry.yarnpkg.com/@emotion/babel-plugin/-/babel-plugin-11.12.0.tgz#7b43debb250c313101b3f885eba634f1d723fcc2" - integrity sha512-y2WQb+oP8Jqvvclh8Q55gLUyb7UFvgv7eJfsj7td5TToBrIUtPay2kMrZi4xjq9qw2vD0ZR5fSho0yqoFgX7Rw== +"@emotion/babel-plugin@^11.13.5": + version "11.13.5" + resolved "https://registry.yarnpkg.com/@emotion/babel-plugin/-/babel-plugin-11.13.5.tgz#eab8d65dbded74e0ecfd28dc218e75607c4e7bc0" + integrity sha512-pxHCpT2ex+0q+HH91/zsdHkw/lXd468DIN2zvfvLtPKLLMo6gQj7oLObq8PhkrxOZb/gGCq03S3Z7PDhS8pduQ== dependencies: "@babel/helper-module-imports" "^7.16.7" "@babel/runtime" "^7.18.3" "@emotion/hash" "^0.9.2" "@emotion/memoize" "^0.9.0" - "@emotion/serialize" "^1.2.0" + "@emotion/serialize" "^1.3.3" babel-plugin-macros "^3.1.0" convert-source-map "^1.5.0" escape-string-regexp "^4.0.0" @@ -1322,14 +1322,14 @@ source-map "^0.5.7" stylis "4.2.0" -"@emotion/cache@^11.11.0", "@emotion/cache@^11.13.0": - version "11.13.1" - resolved "https://registry.yarnpkg.com/@emotion/cache/-/cache-11.13.1.tgz#fecfc54d51810beebf05bf2a161271a1a91895d7" - integrity sha512-iqouYkuEblRcXmylXIwwOodiEK5Ifl7JcX7o6V4jI3iW4mLXX3dmt5xwBtIkJiQEXFAI+pC8X0i67yiPkH9Ucw== +"@emotion/cache@^11.11.0", "@emotion/cache@^11.13.5": + version "11.13.5" + resolved "https://registry.yarnpkg.com/@emotion/cache/-/cache-11.13.5.tgz#e78dad0489e1ed7572507ba8ed9d2130529e4266" + integrity sha512-Z3xbtJ+UcK76eWkagZ1onvn/wAVb1GOMuR15s30Fm2wrMgC7jzpnO2JZXr4eujTTqoQFUrZIw/rT0c6Zzjca1g== dependencies: "@emotion/memoize" "^0.9.0" "@emotion/sheet" "^1.4.0" - "@emotion/utils" "^1.4.0" + "@emotion/utils" "^1.4.2" "@emotion/weak-memoize" "^0.4.0" stylis "4.2.0" @@ -1351,28 +1351,28 @@ integrity sha512-30FAj7/EoJ5mwVPOWhAyCX+FPfMDrVecJAM+Iw9NRoSl4BBAQeqj4cApHHUXOVvIPgLVDsCFoz/hGD+5QQD1GQ== "@emotion/react@^11.10.5": - version "11.13.3" - resolved "https://registry.yarnpkg.com/@emotion/react/-/react-11.13.3.tgz#a69d0de2a23f5b48e0acf210416638010e4bd2e4" - integrity sha512-lIsdU6JNrmYfJ5EbUCf4xW1ovy5wKQ2CkPRM4xogziOxH1nXxBSjpC9YqbFAP7circxMfYp+6x676BqWcEiixg== + version "11.13.5" + resolved "https://registry.yarnpkg.com/@emotion/react/-/react-11.13.5.tgz#fc818ff5b13424f86501ba4d0740f343ae20b8d9" + integrity sha512-6zeCUxUH+EPF1s+YF/2hPVODeV/7V07YU5x+2tfuRL8MdW6rv5vb2+CBEGTGwBdux0OIERcOS+RzxeK80k2DsQ== dependencies: "@babel/runtime" "^7.18.3" - "@emotion/babel-plugin" "^11.12.0" - "@emotion/cache" "^11.13.0" - "@emotion/serialize" "^1.3.1" + "@emotion/babel-plugin" "^11.13.5" + "@emotion/cache" "^11.13.5" + "@emotion/serialize" "^1.3.3" "@emotion/use-insertion-effect-with-fallbacks" "^1.1.0" - "@emotion/utils" "^1.4.0" + "@emotion/utils" "^1.4.2" "@emotion/weak-memoize" "^0.4.0" hoist-non-react-statics "^3.3.1" -"@emotion/serialize@^1.2.0", "@emotion/serialize@^1.3.0", "@emotion/serialize@^1.3.1": - version "1.3.2" - resolved "https://registry.yarnpkg.com/@emotion/serialize/-/serialize-1.3.2.tgz#e1c1a2e90708d5d85d81ccaee2dfeb3cc0cccf7a" - integrity sha512-grVnMvVPK9yUVE6rkKfAJlYZgo0cu3l9iMC77V7DW6E1DUIrU68pSEXRmFZFOFB1QFo57TncmOcvcbMDWsL4yA== +"@emotion/serialize@^1.3.3": + version "1.3.3" + resolved "https://registry.yarnpkg.com/@emotion/serialize/-/serialize-1.3.3.tgz#d291531005f17d704d0463a032fe679f376509e8" + integrity sha512-EISGqt7sSNWHGI76hC7x1CksiXPahbxEOrC5RjmFRJTqLyEK9/9hZvBbiYn70dw4wuwMKiEMCUlR6ZXTSWQqxA== dependencies: "@emotion/hash" "^0.9.2" "@emotion/memoize" "^0.9.0" "@emotion/unitless" "^0.10.0" - "@emotion/utils" "^1.4.1" + "@emotion/utils" "^1.4.2" csstype "^3.0.2" "@emotion/sheet@^1.4.0": @@ -1381,16 +1381,16 @@ integrity sha512-fTBW9/8r2w3dXWYM4HCB1Rdp8NLibOw2+XELH5m5+AkWiL/KqYX6dc0kKYlaYyKjrQ6ds33MCdMPEwgs2z1rqg== "@emotion/styled@^11.10.5": - version "11.13.0" - resolved "https://registry.yarnpkg.com/@emotion/styled/-/styled-11.13.0.tgz#633fd700db701472c7a5dbef54d6f9834e9fb190" - integrity sha512-tkzkY7nQhW/zC4hztlwucpT8QEZ6eUzpXDRhww/Eej4tFfO0FxQYWRyg/c5CCXa4d/f174kqeXYjuQRnhzf6dA== + version "11.13.5" + resolved "https://registry.yarnpkg.com/@emotion/styled/-/styled-11.13.5.tgz#0fa6602227414c5e42cf267506e3c35bae655df9" + integrity sha512-gnOQ+nGLPvDXgIx119JqGalys64lhMdnNQA9TMxhDA4K0Hq5+++OE20Zs5GxiCV9r814xQ2K5WmtofSpHVW6BQ== dependencies: "@babel/runtime" "^7.18.3" - "@emotion/babel-plugin" "^11.12.0" + "@emotion/babel-plugin" "^11.13.5" "@emotion/is-prop-valid" "^1.3.0" - "@emotion/serialize" "^1.3.0" + "@emotion/serialize" "^1.3.3" "@emotion/use-insertion-effect-with-fallbacks" "^1.1.0" - "@emotion/utils" "^1.4.0" + "@emotion/utils" "^1.4.2" "@emotion/unitless@^0.10.0": version "0.10.0" @@ -1402,10 +1402,10 @@ resolved "https://registry.yarnpkg.com/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.1.0.tgz#1a818a0b2c481efba0cf34e5ab1e0cb2dcb9dfaf" integrity sha512-+wBOcIV5snwGgI2ya3u99D7/FJquOIniQT1IKyDsBmEgwvpxMNeS65Oib7OnE2d2aY+3BU4OiH+0Wchf8yk3Hw== -"@emotion/utils@^1.4.0", "@emotion/utils@^1.4.1": - version "1.4.1" - resolved "https://registry.yarnpkg.com/@emotion/utils/-/utils-1.4.1.tgz#b3adbb43de12ee2149541c4f1337d2eb7774f0ad" - integrity sha512-BymCXzCG3r72VKJxaYVwOXATqXIZ85cuvg0YOUDxMGNrKc1DJRZk8MgV5wyXRyEayIMd4FuXJIUgTBXvDNW5cA== +"@emotion/utils@^1.4.2": + version "1.4.2" + resolved "https://registry.yarnpkg.com/@emotion/utils/-/utils-1.4.2.tgz#6df6c45881fcb1c412d6688a311a98b7f59c1b52" + integrity sha512-3vLclRofFziIa3J2wDh9jjbkUz9qk5Vi3IZ/FSTKViB0k+ef0fPV7dYrUIugbgupYDx7v9ud/SjrtEP8Y4xLoA== "@emotion/weak-memoize@^0.4.0": version "0.4.0" @@ -1651,7 +1651,7 @@ resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz#acad351d582d157bb145535db2a6ff53dd514b5c" integrity sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw== -"@eslint-community/eslint-utils@^4.1.2", "@eslint-community/eslint-utils@^4.2.0", "@eslint-community/eslint-utils@^4.4.0": +"@eslint-community/eslint-utils@^4.1.2", "@eslint-community/eslint-utils@^4.2.0", "@eslint-community/eslint-utils@^4.4.0", "@eslint-community/eslint-utils@^4.4.1": version "4.4.1" resolved "https://registry.yarnpkg.com/@eslint-community/eslint-utils/-/eslint-utils-4.4.1.tgz#d1145bf2c20132d6400495d6df4bf59362fd9d56" integrity sha512-s3O3waFUrMV8P/XaF/+ZTp1X9XBZW1a4B97ZnjQF2KYWaFD2A8KyFBsrsfSjEmjn3RGWAIuvlneuZm3CUK3jbA== @@ -1967,9 +1967,9 @@ integrity sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA== "@ipld/dag-cbor@^9.2.1": - version "9.2.1" - resolved "https://registry.yarnpkg.com/@ipld/dag-cbor/-/dag-cbor-9.2.1.tgz#e61f413770bb0fb27ffafa9577049869272d2056" - integrity sha512-nyY48yE7r3dnJVlxrdaimrbloh4RokQaNRdI//btfTkcTEZbpmSrbYcBQ4VKTf8ZxXAOUJy4VsRpkJo+y9RTnA== + version "9.2.2" + resolved "https://registry.yarnpkg.com/@ipld/dag-cbor/-/dag-cbor-9.2.2.tgz#e6f5f5bd1e4f290f2285b51fc969ef806484603a" + integrity sha512-uIEOuruCqKTP50OBWwgz4Js2+LhiBQaxc57cnP71f45b1mHEAo1OCR1Zn/TbvSW/mV1x+JqhacIktkKyaYqhCw== dependencies: cborg "^4.0.0" multiformats "^13.1.0" @@ -2059,18 +2059,18 @@ resolved "https://registry.yarnpkg.com/@leichtgewicht/ip-codec/-/ip-codec-2.0.5.tgz#4fc56c15c580b9adb7dc3c333a134e540b44bfb1" integrity sha512-Vo+PSpZG2/fmgmiNzYK9qWRh8h/CHrwD0mo1h1DzL4yzHNSfWYujGTYsWGreD000gcgmZ7K4Ys6Tx9TxtsKdDw== -"@libp2p/circuit-relay-v2@^3.1.0": - version "3.1.0" - resolved "https://registry.yarnpkg.com/@libp2p/circuit-relay-v2/-/circuit-relay-v2-3.1.0.tgz#bc5b10f19aa3ee1349c3c7f7e482c4ea0a0cd54b" - integrity sha512-g9AdFhT93P8Uc7sOKeAdULDKF+Tf/aGwnECWZMRo3GFIsvpbd06VdmnjqGmF9xSdll0NWPe8EwhI098rMRd7OQ== - dependencies: - "@libp2p/crypto" "^5.0.6" - "@libp2p/interface" "^2.2.0" - "@libp2p/interface-internal" "^2.0.10" - "@libp2p/peer-collections" "^6.0.10" - "@libp2p/peer-id" "^5.0.7" - "@libp2p/peer-record" "^8.0.10" - "@libp2p/utils" "^6.1.3" +"@libp2p/circuit-relay-v2@^3.1.3": + version "3.1.3" + resolved "https://registry.yarnpkg.com/@libp2p/circuit-relay-v2/-/circuit-relay-v2-3.1.3.tgz#08617b17c5fa002cf3dace58de3ed0f1c1c4f4fd" + integrity sha512-tdPaNK4ut9FU5uwABf+lj01eul3uVDUyAdlcpnIRgkRTOMZ02C06nyaIFSeHD5ykpyZpDIbLPHHBLd5kRYRfYg== + dependencies: + "@libp2p/crypto" "^5.0.7" + "@libp2p/interface" "^2.2.1" + "@libp2p/interface-internal" "^2.1.1" + "@libp2p/peer-collections" "^6.0.12" + "@libp2p/peer-id" "^5.0.8" + "@libp2p/peer-record" "^8.0.12" + "@libp2p/utils" "^6.2.1" "@multiformats/multiaddr" "^12.2.3" "@multiformats/multiaddr-matcher" "^1.3.0" any-signal "^4.1.1" @@ -2084,12 +2084,12 @@ uint8arraylist "^2.4.8" uint8arrays "^5.1.0" -"@libp2p/crypto@^5.0.0", "@libp2p/crypto@^5.0.5", "@libp2p/crypto@^5.0.6": - version "5.0.6" - resolved "https://registry.yarnpkg.com/@libp2p/crypto/-/crypto-5.0.6.tgz#3141bec0e59eea51c729e7bcc6f06d09118f5e6b" - integrity sha512-5mD/riNxUuSOerk3aPXUUMN96lwZsrU33lp97ySfffloh2WhLZcjVJszibBgIP7DP5nqmSOWY9++rqrBuYHvnQ== +"@libp2p/crypto@^5.0.0", "@libp2p/crypto@^5.0.5", "@libp2p/crypto@^5.0.7": + version "5.0.7" + resolved "https://registry.yarnpkg.com/@libp2p/crypto/-/crypto-5.0.7.tgz#24c9576fb8754f4d199ab47849e8eed923d48abf" + integrity sha512-hv0rv/BPBsmSV5GBtaLZpOEv1LsA+Ub0BEDnEvSdB0ZbZ3Fcdlt5HTaJ2jYz4lx2T7KWTFQa9i1elmlGxwuJNg== dependencies: - "@libp2p/interface" "^2.2.0" + "@libp2p/interface" "^2.2.1" "@noble/curves" "^1.4.0" "@noble/hashes" "^1.4.0" asn1js "^3.0.5" @@ -2098,17 +2098,17 @@ uint8arraylist "^2.4.8" uint8arrays "^5.1.0" -"@libp2p/identify@^3.0.10": - version "3.0.10" - resolved "https://registry.yarnpkg.com/@libp2p/identify/-/identify-3.0.10.tgz#d7c2bb423ab484ee2fd5d7ea8610ab67e2e64de3" - integrity sha512-IeFUojzx90j0M7/WjxLHnoaPKG5AksRQrIzLkpJtMeBL+TA9rMLW1n2HM8SD3EGsHV1vDTlkx0e0PHbFVtTnnA== - dependencies: - "@libp2p/crypto" "^5.0.6" - "@libp2p/interface" "^2.2.0" - "@libp2p/interface-internal" "^2.0.10" - "@libp2p/peer-id" "^5.0.7" - "@libp2p/peer-record" "^8.0.10" - "@libp2p/utils" "^6.1.3" +"@libp2p/identify@^3.0.12": + version "3.0.12" + resolved "https://registry.yarnpkg.com/@libp2p/identify/-/identify-3.0.12.tgz#91d386a50581eac3892a7cd2a62f7324678713d8" + integrity sha512-Z1MjdaGMsLPEEpEvlCJOsOgZ2q4FOPqO7W9ep6Kemnc0suuB6wk+8XLDvnZKHS80OdZopGQwm7z8Az06cxrLAA== + dependencies: + "@libp2p/crypto" "^5.0.7" + "@libp2p/interface" "^2.2.1" + "@libp2p/interface-internal" "^2.1.1" + "@libp2p/peer-id" "^5.0.8" + "@libp2p/peer-record" "^8.0.12" + "@libp2p/utils" "^6.2.1" "@multiformats/multiaddr" "^12.2.3" "@multiformats/multiaddr-matcher" "^1.2.1" it-drain "^3.0.7" @@ -2119,21 +2119,21 @@ uint8arrays "^5.1.0" wherearewe "^2.0.1" -"@libp2p/interface-internal@^2.0.10": - version "2.0.10" - resolved "https://registry.yarnpkg.com/@libp2p/interface-internal/-/interface-internal-2.0.10.tgz#d91854304324d467ab10e785a735a543bf822efc" - integrity sha512-LRnn6w5rtvMQlEukihDI5NhSZXZj7ITFT1Hbo3Dn3HGo1oxZe7oWh7ERc5LwZw835QHGzFKZYerBFKdqxoWsFQ== +"@libp2p/interface-internal@^2.1.1": + version "2.1.1" + resolved "https://registry.yarnpkg.com/@libp2p/interface-internal/-/interface-internal-2.1.1.tgz#16a85975ded00df93b6a977b812b219cdf122184" + integrity sha512-7rw7p5wZry9ZPfdhYi4zXRjsgrJ8y/X5M7iWIzUBSJdJP2Zd0ZVStlgyqYm1YAbb8V0mwo5BI/kxd2o9R/9TJQ== dependencies: - "@libp2p/interface" "^2.2.0" - "@libp2p/peer-collections" "^6.0.10" + "@libp2p/interface" "^2.2.1" + "@libp2p/peer-collections" "^6.0.12" "@multiformats/multiaddr" "^12.2.3" progress-events "^1.0.0" uint8arraylist "^2.4.8" -"@libp2p/interface@^2.0.0", "@libp2p/interface@^2.2.0": - version "2.2.0" - resolved "https://registry.yarnpkg.com/@libp2p/interface/-/interface-2.2.0.tgz#8718c29a0cf8c82b00d2ff9b140bcec9185578a2" - integrity sha512-Pn3P5ixDggBjDyuULT0GvwdgD3JA426OqZ0e521mI7ysS+/M9Z9fp4Qcy8JrkJ45bLmIi9cgrNrefuU/Zu+bAQ== +"@libp2p/interface@^2.0.0", "@libp2p/interface@^2.2.1": + version "2.2.1" + resolved "https://registry.yarnpkg.com/@libp2p/interface/-/interface-2.2.1.tgz#4f046cbeea6e2712af7ebd9b4724bdf8f113f10e" + integrity sha512-5dvsnf9+S5DoXCk5H3HNpe8lKzuXTi0k2On8Cdqr6YrkmrhCimow63AxtaUOVkH7GVBTTi8Q1jSx3aleX7KcEA== dependencies: "@multiformats/multiaddr" "^12.2.3" it-pushable "^3.2.3" @@ -2142,23 +2142,23 @@ progress-events "^1.0.0" uint8arraylist "^2.4.8" -"@libp2p/logger@^5.0.1", "@libp2p/logger@^5.1.3": - version "5.1.3" - resolved "https://registry.yarnpkg.com/@libp2p/logger/-/logger-5.1.3.tgz#fca69a5de0b3a80cfc1ec039bb76f30e9e26eab7" - integrity sha512-NUVWEWGbXlBDgDE5ntdm51+ZICmaKYI8mor6KrlPeB1WXDyIFxRWIBw6uzt+HgprQJWzLTojeUEGv6OPsj95Dg== +"@libp2p/logger@^5.0.1", "@libp2p/logger@^5.1.4": + version "5.1.4" + resolved "https://registry.yarnpkg.com/@libp2p/logger/-/logger-5.1.4.tgz#1c8cc8594aad6e3bd3dcf4056104784900004aaf" + integrity sha512-pVQ2odi6rcOR412wM0dg7eZ1+wPHPo5D7W8vIn3YyB2FLodQD7CZXXfg7Z9Yaqlc4BVbkNXDWL/jlUss9wL2Ow== dependencies: - "@libp2p/interface" "^2.2.0" + "@libp2p/interface" "^2.2.1" "@multiformats/multiaddr" "^12.2.3" interface-datastore "^8.3.0" multiformats "^13.1.0" weald "^1.0.2" -"@libp2p/multistream-select@^6.0.8": - version "6.0.8" - resolved "https://registry.yarnpkg.com/@libp2p/multistream-select/-/multistream-select-6.0.8.tgz#9a40f10cb0fa76918d29d48719e4800d9025a7aa" - integrity sha512-CSgTXvuw5ObZs/EIa4mtynsYEO+BxyZTNz3AEgjsPyZKxLJ9usrZ8lGxn4sK4g65CKcTI2mVJBmh0duz+sXxBw== +"@libp2p/multistream-select@^6.0.9": + version "6.0.9" + resolved "https://registry.yarnpkg.com/@libp2p/multistream-select/-/multistream-select-6.0.9.tgz#cb2bac034b4805a2cf0705952b73c3836477c160" + integrity sha512-yU+K4/jtXwt1WXMXSJTuhGnn6F97v/P0IOdMALMQlgmvSeGICDBNllX/i0r9y/DDwI/Hh61phB15aUgc/6pX8Q== dependencies: - "@libp2p/interface" "^2.2.0" + "@libp2p/interface" "^2.2.1" it-length-prefixed "^9.0.4" it-length-prefixed-stream "^1.1.7" it-stream-types "^2.0.1" @@ -2168,35 +2168,35 @@ uint8arraylist "^2.4.8" uint8arrays "^5.1.0" -"@libp2p/peer-collections@^6.0.10": - version "6.0.10" - resolved "https://registry.yarnpkg.com/@libp2p/peer-collections/-/peer-collections-6.0.10.tgz#93f42d21c0e273d3270f7df49dad2fe566e4fd5d" - integrity sha512-KQQiBZ2Y3+wvxjfIWbUCL0suCRVn5ylLuQ2r+OGXLA7LtgRw1RLQnUHHFVoY+CE9pvfIfamwTFlkZhWtvi271w== +"@libp2p/peer-collections@^6.0.12": + version "6.0.12" + resolved "https://registry.yarnpkg.com/@libp2p/peer-collections/-/peer-collections-6.0.12.tgz#fb35f7f5ff6c0577d66197967b95af9ed5f3a11e" + integrity sha512-JQvnCZ5rUiFkznQTOblNF+xE0ddmETn1f3FyYP9vHALOPrgdQkoZeY1b1W3Gz7gA8CXZ//cluHE+ZBiavDbNIg== dependencies: - "@libp2p/interface" "^2.2.0" - "@libp2p/peer-id" "^5.0.7" - "@libp2p/utils" "^6.1.3" + "@libp2p/interface" "^2.2.1" + "@libp2p/peer-id" "^5.0.8" + "@libp2p/utils" "^6.2.1" multiformats "^13.2.2" -"@libp2p/peer-id@^5.0.0", "@libp2p/peer-id@^5.0.5", "@libp2p/peer-id@^5.0.7": - version "5.0.7" - resolved "https://registry.yarnpkg.com/@libp2p/peer-id/-/peer-id-5.0.7.tgz#bcde5224ec3bc97b826efadebd52489f518bb326" - integrity sha512-ecF0Mu4Nxy8IHUMBYVNIEihjUlx52DM+X3CIfBItvGqvnhrUSkJJjkska2dJX3yf2J8wufzCT3jCg4NZWmndYg== +"@libp2p/peer-id@^5.0.0", "@libp2p/peer-id@^5.0.5", "@libp2p/peer-id@^5.0.8": + version "5.0.8" + resolved "https://registry.yarnpkg.com/@libp2p/peer-id/-/peer-id-5.0.8.tgz#96464cf5654963dbb0c06fabfecc9bd19c5e2f34" + integrity sha512-vil9cch+qtqchSlrgG0Zw82uCW8XsyeOJc6DaIiS2hI01cMOIChT4CKjTn0iV5k2yw/niycQPjLrYQzy7tBIYg== dependencies: - "@libp2p/crypto" "^5.0.6" - "@libp2p/interface" "^2.2.0" + "@libp2p/crypto" "^5.0.7" + "@libp2p/interface" "^2.2.1" multiformats "^13.1.0" uint8arrays "^5.1.0" -"@libp2p/peer-record@^8.0.10": - version "8.0.10" - resolved "https://registry.yarnpkg.com/@libp2p/peer-record/-/peer-record-8.0.10.tgz#e32b204d0f01ac77d3ab207bb39959502509d529" - integrity sha512-k5A5YFhx7xGgFjiFWp0j8Cbw5kUYLJoBY9I3YTIHrieusLUUkMtUkYeuWeagNL1JYcXr06gguoIaYBRNCMQAow== +"@libp2p/peer-record@^8.0.12": + version "8.0.12" + resolved "https://registry.yarnpkg.com/@libp2p/peer-record/-/peer-record-8.0.12.tgz#7e923b61b33d4ca5c94ce0fa67027b31952e5997" + integrity sha512-N8OyAAgQwBCUB7AtSlI0AQun45SeBS5UWMnhO9JLAzzNUOZiMk+IfBwEu8dpJ0E311QK2vGY1suoxTsauqMSjg== dependencies: - "@libp2p/crypto" "^5.0.6" - "@libp2p/interface" "^2.2.0" - "@libp2p/peer-id" "^5.0.7" - "@libp2p/utils" "^6.1.3" + "@libp2p/crypto" "^5.0.7" + "@libp2p/interface" "^2.2.1" + "@libp2p/peer-id" "^5.0.8" + "@libp2p/utils" "^6.2.1" "@multiformats/multiaddr" "^12.2.3" multiformats "^13.2.2" protons-runtime "^5.4.0" @@ -2204,15 +2204,15 @@ uint8arraylist "^2.4.8" uint8arrays "^5.1.0" -"@libp2p/peer-store@^11.0.10": - version "11.0.10" - resolved "https://registry.yarnpkg.com/@libp2p/peer-store/-/peer-store-11.0.10.tgz#320e208c653dbe0530ae669d1c69036aae1678f5" - integrity sha512-yUkIAKrk2XAJt01SVOvxpsaT/FZ9ju7j67TJhvh0NUon/dMYSQKVHwykK8SI/dxZi/7cDslSKIbIKv7eU5ZUTQ== +"@libp2p/peer-store@^11.0.12": + version "11.0.12" + resolved "https://registry.yarnpkg.com/@libp2p/peer-store/-/peer-store-11.0.12.tgz#22bf77c6fd34e60ec2c036b4d3fd93c904702386" + integrity sha512-wCPvrmdm+fua28xY6THVskawNDhKxo9O9suif9MAy6Nb9Drr+WiOGucHasOrs/ELvkuU3nc/zxvyWjk8MlTEfw== dependencies: - "@libp2p/crypto" "^5.0.6" - "@libp2p/interface" "^2.2.0" - "@libp2p/peer-id" "^5.0.7" - "@libp2p/peer-record" "^8.0.10" + "@libp2p/crypto" "^5.0.7" + "@libp2p/interface" "^2.2.1" + "@libp2p/peer-id" "^5.0.8" + "@libp2p/peer-record" "^8.0.12" "@multiformats/multiaddr" "^12.2.3" interface-datastore "^8.3.0" it-all "^3.0.6" @@ -2222,13 +2222,13 @@ uint8arraylist "^2.4.8" uint8arrays "^5.1.0" -"@libp2p/tcp@^10.0.11": - version "10.0.11" - resolved "https://registry.yarnpkg.com/@libp2p/tcp/-/tcp-10.0.11.tgz#dc1ae5ba5bb8434292f0fc462fe018b85dd29530" - integrity sha512-E4xy6G6o7XuS/rQGrlkWeIFoGoFd8BLBzuQaSXGfSkimFe9ym7vczNtSzS+MPSyCzFdl4RZrhG7odbBPt7hTnw== +"@libp2p/tcp@^10.0.13": + version "10.0.13" + resolved "https://registry.yarnpkg.com/@libp2p/tcp/-/tcp-10.0.13.tgz#2f0a4c1cdc36d4203aebf33ab41444161af76f19" + integrity sha512-1oxkfMGJdnACSeq75k+K1JM9KkPUFgp7U/YH6+4f884MC7crciV28+FtrIoxS+79gX+EZeUOzvCPLWbuQddk9Q== dependencies: - "@libp2p/interface" "^2.2.0" - "@libp2p/utils" "^6.1.3" + "@libp2p/interface" "^2.2.1" + "@libp2p/utils" "^6.2.1" "@multiformats/mafmt" "^12.1.6" "@multiformats/multiaddr" "^12.2.3" "@types/sinon" "^17.0.3" @@ -2238,15 +2238,15 @@ race-event "^1.3.0" stream-to-it "^1.0.1" -"@libp2p/utils@^6.0.0", "@libp2p/utils@^6.1.3": - version "6.1.3" - resolved "https://registry.yarnpkg.com/@libp2p/utils/-/utils-6.1.3.tgz#cd9c56d02ad7284c4ba58f1902cca2566d789bf7" - integrity sha512-n1D6phOXGkqE3tuvmZwm5gaHKcGanlKwCWEBlrZqx9SSCyd5U5C58BcyQ8YH5/nb4kYMI7HyjomfQAVs2S2R9Q== +"@libp2p/utils@^6.0.0", "@libp2p/utils@^6.2.1": + version "6.2.1" + resolved "https://registry.yarnpkg.com/@libp2p/utils/-/utils-6.2.1.tgz#c90408f6b073752002b2315c5b53c0a5af21caba" + integrity sha512-uORuQLB75MgbrDA6i0rCw/fqNFgCs1dO3c21Z5WNdRCA4Lcvhgi6wnUf45DwgNCqtljDAnBR0FYI+UbEp5yAuA== dependencies: "@chainsafe/is-ip" "^2.0.2" - "@libp2p/crypto" "^5.0.6" - "@libp2p/interface" "^2.2.0" - "@libp2p/logger" "^5.1.3" + "@libp2p/crypto" "^5.0.7" + "@libp2p/interface" "^2.2.1" + "@libp2p/logger" "^5.1.4" "@multiformats/multiaddr" "^12.2.3" "@sindresorhus/fnv1a" "^3.1.0" "@types/murmurhash3js-revisited" "^3.0.3" @@ -2266,17 +2266,16 @@ uint8arraylist "^2.4.8" uint8arrays "^5.1.0" -"@libp2p/webrtc@^5.0.16": - version "5.0.16" - resolved "https://registry.yarnpkg.com/@libp2p/webrtc/-/webrtc-5.0.16.tgz#b53cad3632afff057776bb747f9344eeb2d499d5" - integrity sha512-+o82zclk0d8HVGfI1pNiypCvhC0boubRVIJ/5+JSyUbZFpTNDelBb6DziI7s6NPVn0OnNvlfS6xWO3cetw9qag== +"@libp2p/webrtc@^5.0.19": + version "5.0.19" + resolved "https://registry.yarnpkg.com/@libp2p/webrtc/-/webrtc-5.0.19.tgz#2a67d0976ff820317f3a8be732abac6db819ea45" + integrity sha512-WicT2mraZf4ZKqt73MYvfAUvOQehZDPT673GThA3yK02eR6B/c9MenMuUbNb3qGsE4b5yhMjioYjZPahWpxcBw== dependencies: "@chainsafe/libp2p-noise" "^16.0.0" - "@libp2p/interface" "^2.2.0" - "@libp2p/interface-internal" "^2.0.10" - "@libp2p/peer-id" "^5.0.7" - "@libp2p/utils" "^6.1.3" - "@multiformats/mafmt" "^12.1.6" + "@libp2p/interface" "^2.2.1" + "@libp2p/interface-internal" "^2.1.1" + "@libp2p/peer-id" "^5.0.8" + "@libp2p/utils" "^6.2.1" "@multiformats/multiaddr" "^12.2.3" "@multiformats/multiaddr-matcher" "^1.2.1" detect-browser "^5.3.0" @@ -2297,13 +2296,13 @@ uint8arraylist "^2.4.8" uint8arrays "^5.1.0" -"@libp2p/websockets@^9.0.11": - version "9.0.11" - resolved "https://registry.yarnpkg.com/@libp2p/websockets/-/websockets-9.0.11.tgz#69dcd6425b3f5163b1d0b350e3857fa16cb18c40" - integrity sha512-GoX323NMnbOwPu4Cq49XD9AwKLJfOr/R4H5b21ZCpgPIryVGmKUUzS8tWVdBu8RlWcRwCqz7rTUGZ95TKJeVwQ== +"@libp2p/websockets@^9.0.13": + version "9.0.13" + resolved "https://registry.yarnpkg.com/@libp2p/websockets/-/websockets-9.0.13.tgz#d9e9397e9a91b154d22f88535f85e1926e3ae8e1" + integrity sha512-e1Lukn8kzJC7YbfufT8rOy/BXUyl213srb+zizFu4JkTRloChEjq7VzARPo9lrMQye71JrTi2eBg+O/XadERsg== dependencies: - "@libp2p/interface" "^2.2.0" - "@libp2p/utils" "^6.1.3" + "@libp2p/interface" "^2.2.1" + "@libp2p/utils" "^6.2.1" "@multiformats/multiaddr" "^12.2.3" "@multiformats/multiaddr-matcher" "^1.4.0" "@multiformats/multiaddr-to-uri" "^10.0.1" @@ -2315,21 +2314,21 @@ wherearewe "^2.0.1" ws "^8.17.0" -"@mui/core-downloads-tracker@^5.16.7": - version "5.16.7" - resolved "https://registry.yarnpkg.com/@mui/core-downloads-tracker/-/core-downloads-tracker-5.16.7.tgz#182a325a520f7ebd75de051fceabfc0314cfd004" - integrity sha512-RtsCt4Geed2/v74sbihWzzRs+HsIQCfclHeORh5Ynu2fS4icIKozcSubwuG7vtzq2uW3fOR1zITSP84TNt2GoQ== +"@mui/core-downloads-tracker@^5.16.8": + version "5.16.8" + resolved "https://registry.yarnpkg.com/@mui/core-downloads-tracker/-/core-downloads-tracker-5.16.8.tgz#b83316d14dad08fac7cd0574f2643b01959b4464" + integrity sha512-DARxShbBsWz6azwkTmv05rR7rJfcd9cXFsQtbcr24A+5esQBSnK2N3cbg/izlvuMGxonfWaysz/ae+6Ij9RAHQ== "@mui/material@^5.10.13": - version "5.16.7" - resolved "https://registry.yarnpkg.com/@mui/material/-/material-5.16.7.tgz#6e814e2eefdaf065a769cecf549c3569e107a50b" - integrity sha512-cwwVQxBhK60OIOqZOVLFt55t01zmarKJiJUWbk0+8s/Ix5IaUzAShqlJchxsIQ4mSrWqgcKCCXKtIlG5H+/Jmg== + version "5.16.8" + resolved "https://registry.yarnpkg.com/@mui/material/-/material-5.16.8.tgz#cf6a3784910fae05cbcd0a871eaca2821639d867" + integrity sha512-amnDx385shdQasC7wAk/oQjfti8N4wm0fLEcD7n5KXDZxn9Y+0VKEC+ieVseOUGuY5B2FqzdNfeVwUPTmUBszQ== dependencies: "@babel/runtime" "^7.23.9" - "@mui/core-downloads-tracker" "^5.16.7" - "@mui/system" "^5.16.7" + "@mui/core-downloads-tracker" "^5.16.8" + "@mui/system" "^5.16.8" "@mui/types" "^7.2.15" - "@mui/utils" "^5.16.6" + "@mui/utils" "^5.16.8" "@popperjs/core" "^2.11.8" "@types/react-transition-group" "^4.4.10" clsx "^2.1.0" @@ -2338,48 +2337,48 @@ react-is "^18.3.1" react-transition-group "^4.4.5" -"@mui/private-theming@^5.16.6": - version "5.16.6" - resolved "https://registry.yarnpkg.com/@mui/private-theming/-/private-theming-5.16.6.tgz#547671e7ae3f86b68d1289a0b90af04dfcc1c8c9" - integrity sha512-rAk+Rh8Clg7Cd7shZhyt2HGTTE5wYKNSJ5sspf28Fqm/PZ69Er9o6KX25g03/FG2dfpg5GCwZh/xOojiTfm3hw== +"@mui/private-theming@^5.16.8": + version "5.16.8" + resolved "https://registry.yarnpkg.com/@mui/private-theming/-/private-theming-5.16.8.tgz#7914996caaf6eedc59914aeab83dcd2d4e4da1ec" + integrity sha512-3Vl9yFVLU6T3CFtxRMQTcJ60Ijv7wxQi4yjH92+9YXcsqvVspeIYoocqNoIV/1bXGYfyWu5zrCmwQVHaGY7bug== dependencies: "@babel/runtime" "^7.23.9" - "@mui/utils" "^5.16.6" + "@mui/utils" "^5.16.8" prop-types "^15.8.1" -"@mui/styled-engine@^5.16.6": - version "5.16.6" - resolved "https://registry.yarnpkg.com/@mui/styled-engine/-/styled-engine-5.16.6.tgz#60110c106dd482dfdb7e2aa94fd6490a0a3f8852" - integrity sha512-zaThmS67ZmtHSWToTiHslbI8jwrmITcN93LQaR2lKArbvS7Z3iLkwRoiikNWutx9MBs8Q6okKvbZq1RQYB3v7g== +"@mui/styled-engine@^5.16.8": + version "5.16.8" + resolved "https://registry.yarnpkg.com/@mui/styled-engine/-/styled-engine-5.16.8.tgz#b8ca35f93f503a51d0759a05475bfd28e10757ea" + integrity sha512-OFdgFf8JczSRs0kvWGdSn0ZeXxWrY0LITDPJ/nAtLEvUUTyrlFaO4il3SECX8ruzvf1VnAxHx4M/4mX9oOn9yA== dependencies: "@babel/runtime" "^7.23.9" "@emotion/cache" "^11.11.0" csstype "^3.1.3" prop-types "^15.8.1" -"@mui/system@^5.16.7": - version "5.16.7" - resolved "https://registry.yarnpkg.com/@mui/system/-/system-5.16.7.tgz#4583ca5bf3b38942e02c15a1e622ba869ac51393" - integrity sha512-Jncvs/r/d/itkxh7O7opOunTqbbSSzMTHzZkNLM+FjAOg+cYAZHrPDlYe1ZGKUYORwwb2XexlWnpZp0kZ4AHuA== +"@mui/system@^5.16.8": + version "5.16.8" + resolved "https://registry.yarnpkg.com/@mui/system/-/system-5.16.8.tgz#e5010d76cd2fdcc403ad3f98abfba99d330055ad" + integrity sha512-L32TaFDFpGIi1g6ysRtmhc9zDgrlxDXu3NlrGE8gAsQw/ziHrPdr0PNr20O0POUshA1q14W4dNZ/z0Nx2F9lhA== dependencies: "@babel/runtime" "^7.23.9" - "@mui/private-theming" "^5.16.6" - "@mui/styled-engine" "^5.16.6" + "@mui/private-theming" "^5.16.8" + "@mui/styled-engine" "^5.16.8" "@mui/types" "^7.2.15" - "@mui/utils" "^5.16.6" + "@mui/utils" "^5.16.8" clsx "^2.1.0" csstype "^3.1.3" prop-types "^15.8.1" "@mui/types@^7.2.15": - version "7.2.18" - resolved "https://registry.yarnpkg.com/@mui/types/-/types-7.2.18.tgz#4b6385ed2f7828ef344113cdc339d6fdf8e4bc23" - integrity sha512-uvK9dWeyCJl/3ocVnTOS6nlji/Knj8/tVqVX03UVTpdmTJYu/s4jtDd9Kvv0nRGE0CUSNW1UYAci7PYypjealg== + version "7.2.19" + resolved "https://registry.yarnpkg.com/@mui/types/-/types-7.2.19.tgz#c941954dd24393fdce5f07830d44440cf4ab6c80" + integrity sha512-6XpZEM/Q3epK9RN8ENoXuygnqUQxE+siN/6rGRi2iwJPgBUR25mphYQ9ZI87plGh58YoZ5pp40bFvKYOCDJ3tA== -"@mui/utils@^5.16.6": - version "5.16.6" - resolved "https://registry.yarnpkg.com/@mui/utils/-/utils-5.16.6.tgz#905875bbc58d3dcc24531c3314a6807aba22a711" - integrity sha512-tWiQqlhxAt3KENNiSRL+DIn9H5xNVK6Jjf70x3PnfQPz1MPBdh7yyIcAyVBT9xiw7hP3SomRhPR7hzBMBCjqEA== +"@mui/utils@^5.16.8": + version "5.16.8" + resolved "https://registry.yarnpkg.com/@mui/utils/-/utils-5.16.8.tgz#e44acf38d446d361347c46b3e81ae366f615f37b" + integrity sha512-P/yb7BSWallQUeiNGxb+TM8epHteIUC8gzNTdPV2VfKhVY/EnGliHgt5np0GPkjQ7EzwDi/+gBevrAJtf+K94A== dependencies: "@babel/runtime" "^7.23.9" "@mui/types" "^7.2.15" @@ -2409,9 +2408,9 @@ "@multiformats/multiaddr" "^12.0.0" "@multiformats/multiaddr-matcher@^1.2.1", "@multiformats/multiaddr-matcher@^1.3.0", "@multiformats/multiaddr-matcher@^1.4.0": - version "1.4.0" - resolved "https://registry.yarnpkg.com/@multiformats/multiaddr-matcher/-/multiaddr-matcher-1.4.0.tgz#d4934eb6350320fc05044850974ce558b1d212b4" - integrity sha512-Riu+JbTolhzAEgZH3xexLKVn2Oe+xUEPCNHuURqKcE9Pa3RxwsuhldykUWmbsDifXOV4TJCc1LGADFHmpS1y5w== + version "1.6.0" + resolved "https://registry.yarnpkg.com/@multiformats/multiaddr-matcher/-/multiaddr-matcher-1.6.0.tgz#1086b37731296da41fc964df7ec8fdbc05ac999e" + integrity sha512-E77lLvQR+50kTAfvjV3g4wr9qCu77Z+6yT0s1hgfh8B4sAXZ8u/YdQJGhjgstgW1kmGy7BXPppROKYijqQsesQ== dependencies: "@chainsafe/is-ip" "^2.0.1" "@multiformats/multiaddr" "^12.0.0" @@ -2425,9 +2424,9 @@ "@multiformats/multiaddr" "^12.3.0" "@multiformats/multiaddr@^12.0.0", "@multiformats/multiaddr@^12.2.3", "@multiformats/multiaddr@^12.3.0": - version "12.3.1" - resolved "https://registry.yarnpkg.com/@multiformats/multiaddr/-/multiaddr-12.3.1.tgz#953ceb4ae3b39125b7b2c721230ea7b398cf49fe" - integrity sha512-yoGODQY4nIj41ENJClucS8FtBoe8w682bzbKldEQr9lSlfdHqAsRC+vpJAOBpiMwPps1tHua4kxrDmvprdhoDQ== + version "12.3.3" + resolved "https://registry.yarnpkg.com/@multiformats/multiaddr/-/multiaddr-12.3.3.tgz#eddd0a61a542fc0120da7b5f6fc250177762e6d4" + integrity sha512-3POIUN7myk8JbO8oi/FEyZoLQW2XMhwM/uB7hG5Zl1PgdXJR8UTH9QdQEp1jM358kd3yn+vtS4fFJdqdRk+O7A== dependencies: "@chainsafe/is-ip" "^2.0.1" "@chainsafe/netmask" "^2.0.0" @@ -2442,16 +2441,21 @@ integrity sha512-mIbq/R9QXk5/cTfESb1OKtyFnk7oc1Om/8onA1158K9/OZUQFDEVy55jVTato+xmp3XX6F6Qh0zz0Nc1AxAlRQ== "@noble/curves@^1.1.0", "@noble/curves@^1.4.0": - version "1.6.0" - resolved "https://registry.yarnpkg.com/@noble/curves/-/curves-1.6.0.tgz#be5296ebcd5a1730fccea4786d420f87abfeb40b" - integrity sha512-TlaHRXDehJuRNR9TfZDNQ45mMEd5dwUwmicsafcIX4SsNiqnCHKjE/1alYPd/lDRVhxdhUAlv8uEhMCI5zjIJQ== + version "1.7.0" + resolved "https://registry.yarnpkg.com/@noble/curves/-/curves-1.7.0.tgz#0512360622439256df892f21d25b388f52505e45" + integrity sha512-UTMhXK9SeDhFJVrHeUJ5uZlI6ajXg10O6Ddocf9S6GjbSBVZsJo88HzKwXznNfGpMTRDyJkqMjNDPYgf0qFWnw== dependencies: - "@noble/hashes" "1.5.0" + "@noble/hashes" "1.6.0" -"@noble/hashes@1.5.0", "@noble/hashes@^1.3.1", "@noble/hashes@^1.4.0": - version "1.5.0" - resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.5.0.tgz#abadc5ca20332db2b1b2aa3e496e9af1213570b0" - integrity sha512-1j6kQFb7QRru7eKN3ZDvRcP13rugwdxZqCjbiAVZfIJwgj2A65UmT4TgARXGlXgnRkORLTDTrO19ZErt7+QXgA== +"@noble/hashes@1.6.0": + version "1.6.0" + resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.6.0.tgz#d4bfb516ad6e7b5111c216a5cc7075f4cf19e6c5" + integrity sha512-YUULf0Uk4/mAA89w+k3+yUYh6NrEvxZa5T6SY3wlMvE2chHkxFUUIDI8/XW1QSC357iA5pSnqt7XEhvFOqmDyQ== + +"@noble/hashes@^1.3.1", "@noble/hashes@^1.4.0": + version "1.6.1" + resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.6.1.tgz#df6e5943edcea504bac61395926d6fd67869a0d5" + integrity sha512-pq5D8h10hHBjyqX+cfBm0i8JUXJ0UhczFc4r74zbuT9XgewFo2E3J1cOaGtdZynILNmQ685YWGzGE1Zv6io50w== "@nodelib/fs.scandir@2.1.5": version "2.1.5" @@ -2589,9 +2593,9 @@ npm-normalize-package-bin "^4.0.0" "@npmcli/map-workspaces@^4.0.1": - version "4.0.1" - resolved "https://registry.yarnpkg.com/@npmcli/map-workspaces/-/map-workspaces-4.0.1.tgz#ff1a7d6f643264617c0769ac0f36e507743d5a81" - integrity sha512-g5H8ljH7Z+4T1ASsfcL09gZl4YGw6M4GbjzPt6HgE+pCRSKC4nlNc4nY75zshi88eEHcdoh3Q8XgWFkGKoVOPw== + version "4.0.2" + resolved "https://registry.yarnpkg.com/@npmcli/map-workspaces/-/map-workspaces-4.0.2.tgz#d02c5508bf55624f60aaa58fe413748a5c773802" + integrity sha512-mnuMuibEbkaBTYj9HQ3dMe6L0ylYW+s/gfz7tBDMFY/la0w9Kf44P9aLn4/+/t3aTR3YUHKoT6XQL9rlicIe3Q== dependencies: "@npmcli/name-from-folder" "^3.0.0" "@npmcli/package-json" "^6.0.0" @@ -2620,9 +2624,9 @@ integrity sha512-+t5DZ6mO/QFh78PByMq1fGSAub/agLJZDRfJRMeOSNCt8s9YVlTjmGpIPwPhvXTGUIJk+WszlT0rQa1W33yzNA== "@npmcli/package-json@^6.0.0", "@npmcli/package-json@^6.0.1": - version "6.0.1" - resolved "https://registry.yarnpkg.com/@npmcli/package-json/-/package-json-6.0.1.tgz#550a8eb3e0ae9ad8577cb7a3f2d677a04a3bcee9" - integrity sha512-YW6PZ99sc1Q4DINEY2td5z9Z3rwbbsx7CyCnOc7UXUUdePXh5gPi1UeaoQVmKQMVbIU7aOwX2l1OG5ZfjgGi5g== + version "6.1.0" + resolved "https://registry.yarnpkg.com/@npmcli/package-json/-/package-json-6.1.0.tgz#34f0875da178b04df1a7746c02bdc26479819afb" + integrity sha512-t6G+6ZInT4X+tqj2i+wlLIeCKnKOTuz9/VFYDtj+TGTur5q7sp/OYrQA19LdBbWfXDOi0Y4jtedV6xtB8zQ9ug== dependencies: "@npmcli/git" "^6.0.0" glob "^10.2.2" @@ -2632,7 +2636,7 @@ proc-log "^5.0.0" semver "^7.5.3" -"@npmcli/promise-spawn@^8.0.0", "@npmcli/promise-spawn@^8.0.1": +"@npmcli/promise-spawn@^8.0.0", "@npmcli/promise-spawn@^8.0.2": version "8.0.2" resolved "https://registry.yarnpkg.com/@npmcli/promise-spawn/-/promise-spawn-8.0.2.tgz#053688f8bc2b4ecc036d2d52c691fd82af58ea5e" integrity sha512-/bNJhjc+o6qL+Dwz/bqfTQClkEO5nTQ1ZEcdCkAQjhkZMHIh22LPG7fNh1enJP1NKWDqYiiABnjFCY7E0zHYtQ== @@ -2704,11 +2708,11 @@ integrity sha512-QBhVjcUa9W7Wwhm6DBFu6ZZ+1/t/oYxqc2tp81Pi41YNuJinbFRx8B133qVOrAaBbF7D/m0Et6f9/pZt9Rc+tg== "@octokit/plugin-paginate-rest@^11.0.0": - version "11.3.5" - resolved "https://registry.yarnpkg.com/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-11.3.5.tgz#a1929b3ba3dc7b63bc73bb6d3c7a3faf2a9c7649" - integrity sha512-cgwIRtKrpwhLoBi0CUNuY83DPGRMaWVjqVI/bGKsLJ4PzyWZNaEmhHroI2xlrVXkk6nFv0IsZpOp+ZWSWUS2AQ== + version "11.3.6" + resolved "https://registry.yarnpkg.com/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-11.3.6.tgz#82f33c87464202423c2a89d5cc8c38761f4aa86b" + integrity sha512-zcvqqf/+TicbTCa/Z+3w4eBJcAxCFymtc0UAIsR3dEVoNilWld4oXdscQ3laXamTszUZdusw97K8+DrbFiOwjw== dependencies: - "@octokit/types" "^13.6.0" + "@octokit/types" "^13.6.2" "@octokit/plugin-retry@^7.0.0": version "7.1.2" @@ -2744,10 +2748,10 @@ "@octokit/types" "^13.1.0" universal-user-agent "^7.0.2" -"@octokit/types@^13.0.0", "@octokit/types@^13.1.0", "@octokit/types@^13.6.0": - version "13.6.1" - resolved "https://registry.yarnpkg.com/@octokit/types/-/types-13.6.1.tgz#432fc6c0aaae54318e5b2d3e15c22ac97fc9b15f" - integrity sha512-PHZE9Z+kWXb23Ndik8MKPirBPziOc0D2/3KH1P+6jK5nGWe96kadZuE4jev2/Jq7FvIfTlT2Ltg8Fv2x1v0a5g== +"@octokit/types@^13.0.0", "@octokit/types@^13.1.0", "@octokit/types@^13.6.2": + version "13.6.2" + resolved "https://registry.yarnpkg.com/@octokit/types/-/types-13.6.2.tgz#e10fc4d2bdd65d836d1ced223b03ad4cfdb525bd" + integrity sha512-WpbZfZUcZU77DrSW4wbsSgTPfKcp286q3ItaIgvSbBpZJlu6mnYXAkjZz6LVZPXkEvLIM8McanyZejKTYUHipA== dependencies: "@octokit/openapi-types" "^22.2.0" @@ -2769,11 +2773,11 @@ integrity sha512-cq8o4cWH0ibXh9VGi5P20Tu9XF/0fFXl9EUinr9QfTM7a7p0oTA4iJRCQWppXR1Pg8dSM0UCItCkPwsk9qWWYA== "@playwright/test@^1.47.0": - version "1.48.2" - resolved "https://registry.yarnpkg.com/@playwright/test/-/test-1.48.2.tgz#87dd40633f980872283404c8142a65744d3f13d6" - integrity sha512-54w1xCWfXuax7dz4W2M9uw0gDyh+ti/0K/MxcCUxChFh37kkdxPdfZDw5QBbuPUJHr1CiHJ1hXgSs+GgeQc5Zw== + version "1.49.0" + resolved "https://registry.yarnpkg.com/@playwright/test/-/test-1.49.0.tgz#74227385b58317ee076b86b56d0e1e1b25cff01e" + integrity sha512-DMulbwQURa8rNIQrf94+jPJQ4FmOVdpE5ZppRNvWVjvhC+6sOeo28r8MgIpQRYouXRtt/FCCXU7zn20jnHR4Qw== dependencies: - playwright "1.48.2" + playwright "1.49.0" "@pnpm/config.env-replace@^1.1.0": version "1.1.0" @@ -2826,95 +2830,95 @@ resolved "https://registry.yarnpkg.com/@protobufjs/utf8/-/utf8-1.1.0.tgz#a777360b5b39a1a2e5106f8e858f2fd2d060c570" integrity sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw== -"@rollup/rollup-android-arm-eabi@4.24.2": - version "4.24.2" - resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.24.2.tgz#07db37fcd9d401aae165f662c0069efd61d4ffcc" - integrity sha512-ufoveNTKDg9t/b7nqI3lwbCG/9IJMhADBNjjz/Jn6LxIZxD7T5L8l2uO/wD99945F1Oo8FvgbbZJRguyk/BdzA== - -"@rollup/rollup-android-arm64@4.24.2": - version "4.24.2" - resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.24.2.tgz#160975402adf85ecd58a0721ad60ae1779a68147" - integrity sha512-iZoYCiJz3Uek4NI0J06/ZxUgwAfNzqltK0MptPDO4OR0a88R4h0DSELMsflS6ibMCJ4PnLvq8f7O1d7WexUvIA== - -"@rollup/rollup-darwin-arm64@4.24.2": - version "4.24.2" - resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.24.2.tgz#2b126f0aa4349694fe2941bcbcc4b0982b7f1a49" - integrity sha512-/UhrIxobHYCBfhi5paTkUDQ0w+jckjRZDZ1kcBL132WeHZQ6+S5v9jQPVGLVrLbNUebdIRpIt00lQ+4Z7ys4Rg== - -"@rollup/rollup-darwin-x64@4.24.2": - version "4.24.2" - resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.24.2.tgz#3f4987eff6195532037c50b8db92736e326b5bb2" - integrity sha512-1F/jrfhxJtWILusgx63WeTvGTwE4vmsT9+e/z7cZLKU8sBMddwqw3UV5ERfOV+H1FuRK3YREZ46J4Gy0aP3qDA== - -"@rollup/rollup-freebsd-arm64@4.24.2": - version "4.24.2" - resolved "https://registry.yarnpkg.com/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.24.2.tgz#15fe184ecfafc635879500f6985c954e57697c44" - integrity sha512-1YWOpFcGuC6iGAS4EI+o3BV2/6S0H+m9kFOIlyFtp4xIX5rjSnL3AwbTBxROX0c8yWtiWM7ZI6mEPTI7VkSpZw== - -"@rollup/rollup-freebsd-x64@4.24.2": - version "4.24.2" - resolved "https://registry.yarnpkg.com/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.24.2.tgz#c72d37315d36b6e0763b7aabb6ae53c361b45e05" - integrity sha512-3qAqTewYrCdnOD9Gl9yvPoAoFAVmPJsBvleabvx4bnu1Kt6DrB2OALeRVag7BdWGWLhP1yooeMLEi6r2nYSOjg== - -"@rollup/rollup-linux-arm-gnueabihf@4.24.2": - version "4.24.2" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.24.2.tgz#f274f81abf845dcca5f1f40d434a09a79a3a73a0" - integrity sha512-ArdGtPHjLqWkqQuoVQ6a5UC5ebdX8INPuJuJNWRe0RGa/YNhVvxeWmCTFQ7LdmNCSUzVZzxAvUznKaYx645Rig== - -"@rollup/rollup-linux-arm-musleabihf@4.24.2": - version "4.24.2" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.24.2.tgz#9edaeb1a9fa7d4469917cb0614f665f1cf050625" - integrity sha512-B6UHHeNnnih8xH6wRKB0mOcJGvjZTww1FV59HqJoTJ5da9LCG6R4SEBt6uPqzlawv1LoEXSS0d4fBlHNWl6iYw== - -"@rollup/rollup-linux-arm64-gnu@4.24.2": - version "4.24.2" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.24.2.tgz#6eb6851f594336bfa00f074f58a00a61e9751493" - integrity sha512-kr3gqzczJjSAncwOS6i7fpb4dlqcvLidqrX5hpGBIM1wtt0QEVtf4wFaAwVv8QygFU8iWUMYEoJZWuWxyua4GQ== - -"@rollup/rollup-linux-arm64-musl@4.24.2": - version "4.24.2" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.24.2.tgz#9d8dc8e80df8f156d2888ecb8d6c96d653580731" - integrity sha512-TDdHLKCWgPuq9vQcmyLrhg/bgbOvIQ8rtWQK7MRxJ9nvaxKx38NvY7/Lo6cYuEnNHqf6rMqnivOIPIQt6H2AoA== - -"@rollup/rollup-linux-powerpc64le-gnu@4.24.2": - version "4.24.2" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.24.2.tgz#358e3e7dda2d60c46ff7c74f7075045736df5b50" - integrity sha512-xv9vS648T3X4AxFFZGWeB5Dou8ilsv4VVqJ0+loOIgDO20zIhYfDLkk5xoQiej2RiSQkld9ijF/fhLeonrz2mw== - -"@rollup/rollup-linux-riscv64-gnu@4.24.2": - version "4.24.2" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.24.2.tgz#b08461ace599c3f0b5f27051f1756b6cf1c78259" - integrity sha512-tbtXwnofRoTt223WUZYiUnbxhGAOVul/3StZ947U4A5NNjnQJV5irKMm76G0LGItWs6y+SCjUn/Q0WaMLkEskg== - -"@rollup/rollup-linux-s390x-gnu@4.24.2": - version "4.24.2" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.24.2.tgz#daab36c9b5c8ac4bfe5a9c4c39ad711464b7dfee" - integrity sha512-gc97UebApwdsSNT3q79glOSPdfwgwj5ELuiyuiMY3pEWMxeVqLGKfpDFoum4ujivzxn6veUPzkGuSYoh5deQ2Q== - -"@rollup/rollup-linux-x64-gnu@4.24.2": - version "4.24.2" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.24.2.tgz#4cc3a4f31920bdb028dbfd7ce0e972a17424a63c" - integrity sha512-jOG/0nXb3z+EM6SioY8RofqqmZ+9NKYvJ6QQaa9Mvd3RQxlH68/jcB/lpyVt4lCiqr04IyaC34NzhUqcXbB5FQ== - -"@rollup/rollup-linux-x64-musl@4.24.2": - version "4.24.2" - resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.24.2.tgz#59800e26c538517ee05f4645315d9e1aded93200" - integrity sha512-XAo7cJec80NWx9LlZFEJQxqKOMz/lX3geWs2iNT5CHIERLFfd90f3RYLLjiCBm1IMaQ4VOX/lTC9lWfzzQm14Q== - -"@rollup/rollup-win32-arm64-msvc@4.24.2": - version "4.24.2" - resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.24.2.tgz#c80e2c33c952b6b171fa6ad9a97dfbb2e4ebee44" - integrity sha512-A+JAs4+EhsTjnPQvo9XY/DC0ztaws3vfqzrMNMKlwQXuniBKOIIvAAI8M0fBYiTCxQnElYu7mLk7JrhlQ+HeOw== - -"@rollup/rollup-win32-ia32-msvc@4.24.2": - version "4.24.2" - resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.24.2.tgz#a1e9d275cb16f6d5feb9c20aee7e897b1e193359" - integrity sha512-ZhcrakbqA1SCiJRMKSU64AZcYzlZ/9M5LaYil9QWxx9vLnkQ9Vnkve17Qn4SjlipqIIBFKjBES6Zxhnvh0EAEw== - -"@rollup/rollup-win32-x64-msvc@4.24.2": - version "4.24.2" - resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.24.2.tgz#0610af0fb8fec52be779d5b163bbbd6930150467" - integrity sha512-2mLH46K1u3r6uwc95hU+OR9q/ggYMpnS7pSp83Ece1HUQgF9Nh/QwTK5rcgbFnV9j+08yBrU5sA/P0RK2MSBNA== +"@rollup/rollup-android-arm-eabi@4.28.0": + version "4.28.0" + resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.28.0.tgz#462e7ecdd60968bc9eb95a20d185e74f8243ec1b" + integrity sha512-wLJuPLT6grGZsy34g4N1yRfYeouklTgPhH1gWXCYspenKYD0s3cR99ZevOGw5BexMNywkbV3UkjADisozBmpPQ== + +"@rollup/rollup-android-arm64@4.28.0": + version "4.28.0" + resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.28.0.tgz#78a2b8a8a55f71a295eb860a654ae90a2b168f40" + integrity sha512-eiNkznlo0dLmVG/6wf+Ifi/v78G4d4QxRhuUl+s8EWZpDewgk7PX3ZyECUXU0Zq/Ca+8nU8cQpNC4Xgn2gFNDA== + +"@rollup/rollup-darwin-arm64@4.28.0": + version "4.28.0" + resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.28.0.tgz#5b783af714f434f1e66e3cdfa3817e0b99216d84" + integrity sha512-lmKx9yHsppblnLQZOGxdO66gT77bvdBtr/0P+TPOseowE7D9AJoBw8ZDULRasXRWf1Z86/gcOdpBrV6VDUY36Q== + +"@rollup/rollup-darwin-x64@4.28.0": + version "4.28.0" + resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.28.0.tgz#f72484e842521a5261978034e18e20f778a2850d" + integrity sha512-8hxgfReVs7k9Js1uAIhS6zq3I+wKQETInnWQtgzt8JfGx51R1N6DRVy3F4o0lQwumbErRz52YqwjfvuwRxGv1w== + +"@rollup/rollup-freebsd-arm64@4.28.0": + version "4.28.0" + resolved "https://registry.yarnpkg.com/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.28.0.tgz#3c919dff72b2fe344811a609c674a8347b033f62" + integrity sha512-lA1zZB3bFx5oxu9fYud4+g1mt+lYXCoch0M0V/xhqLoGatbzVse0wlSQ1UYOWKpuSu3gyN4qEc0Dxf/DII1bhQ== + +"@rollup/rollup-freebsd-x64@4.28.0": + version "4.28.0" + resolved "https://registry.yarnpkg.com/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.28.0.tgz#b62a3a8365b363b3fdfa6da11a9188b6ab4dca7c" + integrity sha512-aI2plavbUDjCQB/sRbeUZWX9qp12GfYkYSJOrdYTL/C5D53bsE2/nBPuoiJKoWp5SN78v2Vr8ZPnB+/VbQ2pFA== + +"@rollup/rollup-linux-arm-gnueabihf@4.28.0": + version "4.28.0" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.28.0.tgz#0d02cc55bd229bd8ca5c54f65f916ba5e0591c94" + integrity sha512-WXveUPKtfqtaNvpf0iOb0M6xC64GzUX/OowbqfiCSXTdi/jLlOmH0Ba94/OkiY2yTGTwteo4/dsHRfh5bDCZ+w== + +"@rollup/rollup-linux-arm-musleabihf@4.28.0": + version "4.28.0" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.28.0.tgz#c51d379263201e88a60e92bd8e90878f0c044425" + integrity sha512-yLc3O2NtOQR67lI79zsSc7lk31xjwcaocvdD1twL64PK1yNaIqCeWI9L5B4MFPAVGEVjH5k1oWSGuYX1Wutxpg== + +"@rollup/rollup-linux-arm64-gnu@4.28.0": + version "4.28.0" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.28.0.tgz#93ce2addc337b5cfa52b84f8e730d2e36eb4339b" + integrity sha512-+P9G9hjEpHucHRXqesY+3X9hD2wh0iNnJXX/QhS/J5vTdG6VhNYMxJ2rJkQOxRUd17u5mbMLHM7yWGZdAASfcg== + +"@rollup/rollup-linux-arm64-musl@4.28.0": + version "4.28.0" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.28.0.tgz#730af6ddc091a5ba5baac28a3510691725dc808b" + integrity sha512-1xsm2rCKSTpKzi5/ypT5wfc+4bOGa/9yI/eaOLW0oMs7qpC542APWhl4A37AENGZ6St6GBMWhCCMM6tXgTIplw== + +"@rollup/rollup-linux-powerpc64le-gnu@4.28.0": + version "4.28.0" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.28.0.tgz#b5565aac20b4de60ca1e557f525e76478b5436af" + integrity sha512-zgWxMq8neVQeXL+ouSf6S7DoNeo6EPgi1eeqHXVKQxqPy1B2NvTbaOUWPn/7CfMKL7xvhV0/+fq/Z/J69g1WAQ== + +"@rollup/rollup-linux-riscv64-gnu@4.28.0": + version "4.28.0" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.28.0.tgz#d488290bf9338bad4ae9409c4aa8a1728835a20b" + integrity sha512-VEdVYacLniRxbRJLNtzwGt5vwS0ycYshofI7cWAfj7Vg5asqj+pt+Q6x4n+AONSZW/kVm+5nklde0qs2EUwU2g== + +"@rollup/rollup-linux-s390x-gnu@4.28.0": + version "4.28.0" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.28.0.tgz#eb2e3f3a06acf448115045c11a5a96868c95a556" + integrity sha512-LQlP5t2hcDJh8HV8RELD9/xlYtEzJkm/aWGsauvdO2ulfl3QYRjqrKW+mGAIWP5kdNCBheqqqYIGElSRCaXfpw== + +"@rollup/rollup-linux-x64-gnu@4.28.0": + version "4.28.0" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.28.0.tgz#065952ef2aea7e837dc7e02aa500feeaff4fc507" + integrity sha512-Nl4KIzteVEKE9BdAvYoTkW19pa7LR/RBrT6F1dJCV/3pbjwDcaOq+edkP0LXuJ9kflW/xOK414X78r+K84+msw== + +"@rollup/rollup-linux-x64-musl@4.28.0": + version "4.28.0" + resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.28.0.tgz#3435d484d05f5c4d1ffd54541b4facce2887103a" + integrity sha512-eKpJr4vBDOi4goT75MvW+0dXcNUqisK4jvibY9vDdlgLx+yekxSm55StsHbxUsRxSTt3JEQvlr3cGDkzcSP8bw== + +"@rollup/rollup-win32-arm64-msvc@4.28.0": + version "4.28.0" + resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.28.0.tgz#69682a2a10d9fedc334f87583cfca83c39c08077" + integrity sha512-Vi+WR62xWGsE/Oj+mD0FNAPY2MEox3cfyG0zLpotZdehPFXwz6lypkGs5y38Jd/NVSbOD02aVad6q6QYF7i8Bg== + +"@rollup/rollup-win32-ia32-msvc@4.28.0": + version "4.28.0" + resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.28.0.tgz#b64470f9ac79abb386829c56750b9a4711be3332" + integrity sha512-kN/Vpip8emMLn/eOza+4JwqDZBL6MPNpkdaEsgUtW1NYN3DZvZqSQrbKzJcTL6hd8YNmFTn7XGWMwccOcJBL0A== + +"@rollup/rollup-win32-x64-msvc@4.28.0": + version "4.28.0" + resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.28.0.tgz#cb313feef9ac6e3737067fdf34f42804ac65a6f2" + integrity sha512-Bvno2/aZT6usSa7lRDL2+hMjVAGjuqaymF1ApZm31JXzniR/hvr14jpU+/z4X6Gt5BPlzosscyJZGUvguXIqeQ== "@rtsao/scc@^1.1.0": version "1.1.0" @@ -3004,9 +3008,9 @@ url-join "^5.0.0" "@semantic-release/github@^11.0.0": - version "11.0.0" - resolved "https://registry.yarnpkg.com/@semantic-release/github/-/github-11.0.0.tgz#1cbfed43b96282994a1c480a204179b89b73a387" - integrity sha512-Uon6G6gJD8U1JNvPm7X0j46yxNRJ8Ui6SgK4Zw5Ktu8RgjEft3BGn+l/RX1TTzhhO3/uUcKuqM+/9/ETFxWS/Q== + version "11.0.1" + resolved "https://registry.yarnpkg.com/@semantic-release/github/-/github-11.0.1.tgz#127579aa77ddd8586de6f4f57d0e66db3453a876" + integrity sha512-Z9cr0LgU/zgucbT9cksH0/pX9zmVda9hkDPcgIE0uvjMQ8w/mElDivGjx1w1pEQ+MuQJ5CBq3VCF16S6G4VH3A== dependencies: "@octokit/core" "^6.0.0" "@octokit/plugin-paginate-rest" "^11.0.0" @@ -3060,13 +3064,6 @@ lodash-es "^4.17.21" read-package-up "^11.0.0" -"@sigstore/bundle@^2.3.2": - version "2.3.2" - resolved "https://registry.yarnpkg.com/@sigstore/bundle/-/bundle-2.3.2.tgz#ad4dbb95d665405fd4a7a02c8a073dbd01e4e95e" - integrity sha512-wueKWDk70QixNLB363yHc2D2ItTgYiMTdPwK8D9dKQMR3ZQ0c35IxP5xnwQ8cNLoCgCRcHf14kE+CLIvNX1zmA== - dependencies: - "@sigstore/protobuf-specs" "^0.3.2" - "@sigstore/bundle@^3.0.0": version "3.0.0" resolved "https://registry.yarnpkg.com/@sigstore/bundle/-/bundle-3.0.0.tgz#ffffc750436c6eb8330ead1ca65bc892f893a7c5" @@ -3074,11 +3071,6 @@ dependencies: "@sigstore/protobuf-specs" "^0.3.2" -"@sigstore/core@^1.0.0", "@sigstore/core@^1.1.0": - version "1.1.0" - resolved "https://registry.yarnpkg.com/@sigstore/core/-/core-1.1.0.tgz#5583d8f7ffe599fa0a89f2bf289301a5af262380" - integrity sha512-JzBqdVIyqm2FRQCulY6nbQzMpJJpSiJ8XXWMhtOX9eKgaXXpfNOF53lzQEjIydlStnd/eFtuC1dW4VYdD93oRg== - "@sigstore/core@^2.0.0": version "2.0.0" resolved "https://registry.yarnpkg.com/@sigstore/core/-/core-2.0.0.tgz#f888a8e4c8fdaa27848514a281920b6fd8eca955" @@ -3089,18 +3081,6 @@ resolved "https://registry.yarnpkg.com/@sigstore/protobuf-specs/-/protobuf-specs-0.3.2.tgz#5becf88e494a920f548d0163e2978f81b44b7d6f" integrity sha512-c6B0ehIWxMI8wiS/bj6rHMPqeFvngFV7cDU/MY+B16P9Z3Mp9k8L93eYZ7BYzSickzuqAQqAq0V956b3Ju6mLw== -"@sigstore/sign@^2.3.2": - version "2.3.2" - resolved "https://registry.yarnpkg.com/@sigstore/sign/-/sign-2.3.2.tgz#d3d01e56d03af96fd5c3a9b9897516b1233fc1c4" - integrity sha512-5Vz5dPVuunIIvC5vBb0APwo7qKA4G9yM48kPWJT+OEERs40md5GoUR1yedwpekWZ4m0Hhw44m6zU+ObsON+iDA== - dependencies: - "@sigstore/bundle" "^2.3.2" - "@sigstore/core" "^1.0.0" - "@sigstore/protobuf-specs" "^0.3.2" - make-fetch-happen "^13.0.1" - proc-log "^4.2.0" - promise-retry "^2.0.1" - "@sigstore/sign@^3.0.0": version "3.0.0" resolved "https://registry.yarnpkg.com/@sigstore/sign/-/sign-3.0.0.tgz#70752aaa54dfeafa0b0fbe1f58ebe9fe3d621f8f" @@ -3113,14 +3093,6 @@ proc-log "^5.0.0" promise-retry "^2.0.1" -"@sigstore/tuf@^2.3.4": - version "2.3.4" - resolved "https://registry.yarnpkg.com/@sigstore/tuf/-/tuf-2.3.4.tgz#da1d2a20144f3b87c0172920cbc8dcc7851ca27c" - integrity sha512-44vtsveTPUpqhm9NCrbU8CWLe3Vck2HO1PNLw7RIajbB7xhtn5RBPm1VNSCMwqGYHhDsBJG8gDF0q4lgydsJvw== - dependencies: - "@sigstore/protobuf-specs" "^0.3.2" - tuf-js "^2.2.1" - "@sigstore/tuf@^3.0.0": version "3.0.0" resolved "https://registry.yarnpkg.com/@sigstore/tuf/-/tuf-3.0.0.tgz#5f657e3052e93cb09e1735ee7f52b7938351278d" @@ -3129,15 +3101,6 @@ "@sigstore/protobuf-specs" "^0.3.2" tuf-js "^3.0.1" -"@sigstore/verify@^1.2.1": - version "1.2.1" - resolved "https://registry.yarnpkg.com/@sigstore/verify/-/verify-1.2.1.tgz#c7e60241b432890dcb8bd8322427f6062ef819e1" - integrity sha512-8iKx79/F73DKbGfRf7+t4dqrc0bRr0thdPrxAtCKWRm/F0tG71i6O1rvlnScncJLLBZHn3h8M3c1BSUAb9yu8g== - dependencies: - "@sigstore/bundle" "^2.3.2" - "@sigstore/core" "^1.1.0" - "@sigstore/protobuf-specs" "^0.3.2" - "@sigstore/verify@^2.0.0": version "2.0.0" resolved "https://registry.yarnpkg.com/@sigstore/verify/-/verify-2.0.0.tgz#4ad96e9234b71b57622c3c446b63bad805351030" @@ -3182,9 +3145,9 @@ "@sinonjs/commons" "^3.0.0" "@sinonjs/fake-timers@^13.0.1": - version "13.0.4" - resolved "https://registry.yarnpkg.com/@sinonjs/fake-timers/-/fake-timers-13.0.4.tgz#cacb89257e650f3214f9da5d9236f72c9658a607" - integrity sha512-wpUq+QiKxrWk7U2pdvNSY9fNX62/k+7eEdlQMO0A3rU8tQ+vvzY/WzBhMz+GbQlATXZlXWYQqFWNFcn1SVvThA== + version "13.0.5" + resolved "https://registry.yarnpkg.com/@sinonjs/fake-timers/-/fake-timers-13.0.5.tgz#36b9dbc21ad5546486ea9173d6bea063eb1717d5" + integrity sha512-36/hTbH2uaWuGVERyC6da9YwGWnzUZXuPro/F2LfsdOsLnCojz/iSH8MxUt/FD2S5XBSVPhmArFUXcpCQ2Hkiw== dependencies: "@sinonjs/commons" "^3.0.1" @@ -3202,88 +3165,77 @@ resolved "https://registry.yarnpkg.com/@sinonjs/text-encoding/-/text-encoding-0.7.3.tgz#282046f03e886e352b2d5f5da5eb755e01457f3f" integrity sha512-DE427ROAphMQzU4ENbliGYrBSYPXF+TtLg9S8vzeA+OF4ZKzoDdzfL8sxuMUGS/lgRhM6j1URSk9ghf7Xo1tyA== -"@smithy/abort-controller@^3.1.6": - version "3.1.6" - resolved "https://registry.yarnpkg.com/@smithy/abort-controller/-/abort-controller-3.1.6.tgz#d9de97b85ca277df6ffb9ee7cd83d5da793ee6de" - integrity sha512-0XuhuHQlEqbNQZp7QxxrFTdVWdwxch4vjxYgfInF91hZFkPxf9QDrdQka0KfxFMPqLNzSw0b95uGTrLliQUavQ== +"@smithy/abort-controller@^3.1.8": + version "3.1.8" + resolved "https://registry.yarnpkg.com/@smithy/abort-controller/-/abort-controller-3.1.8.tgz#ce0c10ddb2b39107d70b06bbb8e4f6e368bc551d" + integrity sha512-+3DOBcUn5/rVjlxGvUPKc416SExarAQ+Qe0bqk30YSUjbepwpS7QN0cyKUSifvLJhdMZ0WPzPP5ymut0oonrpQ== dependencies: - "@smithy/types" "^3.6.0" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@smithy/config-resolver@^3.0.10", "@smithy/config-resolver@^3.0.9": - version "3.0.10" - resolved "https://registry.yarnpkg.com/@smithy/config-resolver/-/config-resolver-3.0.10.tgz#d9529d9893e5fae1f14cb1ffd55517feb6d7e50f" - integrity sha512-Uh0Sz9gdUuz538nvkPiyv1DZRX9+D15EKDtnQP5rYVAzM/dnYk3P8cg73jcxyOitPgT3mE3OVj7ky7sibzHWkw== +"@smithy/config-resolver@^3.0.12": + version "3.0.12" + resolved "https://registry.yarnpkg.com/@smithy/config-resolver/-/config-resolver-3.0.12.tgz#f355f95fcb5ee932a90871a488a4f2128e8ad3ac" + integrity sha512-YAJP9UJFZRZ8N+UruTeq78zkdjUHmzsY62J4qKWZ4SXB4QXJ/+680EfXXgkYA2xj77ooMqtUY9m406zGNqwivQ== dependencies: - "@smithy/node-config-provider" "^3.1.9" - "@smithy/types" "^3.6.0" + "@smithy/node-config-provider" "^3.1.11" + "@smithy/types" "^3.7.1" "@smithy/util-config-provider" "^3.0.0" - "@smithy/util-middleware" "^3.0.8" + "@smithy/util-middleware" "^3.0.10" tslib "^2.6.2" -"@smithy/core@^2.4.8", "@smithy/core@^2.5.1": - version "2.5.1" - resolved "https://registry.yarnpkg.com/@smithy/core/-/core-2.5.1.tgz#7f635b76778afca845bcb401d36f22fa37712f15" - integrity sha512-DujtuDA7BGEKExJ05W5OdxCoyekcKT3Rhg1ZGeiUWaz2BJIWXjZmsG/DIP4W48GHno7AQwRsaCb8NcBgH3QZpg== +"@smithy/core@^2.5.3", "@smithy/core@^2.5.4": + version "2.5.4" + resolved "https://registry.yarnpkg.com/@smithy/core/-/core-2.5.4.tgz#b9eb9c3a8f47d550dcdea19cc95434e66e5556cf" + integrity sha512-iFh2Ymn2sCziBRLPuOOxRPkuCx/2gBdXtBGuCUFLUe6bWYjKnhHyIPqGeNkLZ5Aco/5GjebRTBFiWID3sDbrKw== dependencies: - "@smithy/middleware-serde" "^3.0.8" - "@smithy/protocol-http" "^4.1.5" - "@smithy/types" "^3.6.0" + "@smithy/middleware-serde" "^3.0.10" + "@smithy/protocol-http" "^4.1.7" + "@smithy/types" "^3.7.1" "@smithy/util-body-length-browser" "^3.0.0" - "@smithy/util-middleware" "^3.0.8" - "@smithy/util-stream" "^3.2.1" + "@smithy/util-middleware" "^3.0.10" + "@smithy/util-stream" "^3.3.1" "@smithy/util-utf8" "^3.0.0" tslib "^2.6.2" -"@smithy/credential-provider-imds@^3.2.4", "@smithy/credential-provider-imds@^3.2.5": - version "3.2.5" - resolved "https://registry.yarnpkg.com/@smithy/credential-provider-imds/-/credential-provider-imds-3.2.5.tgz#dbfd849a4a7ebd68519cd9fc35f78d091e126d0a" - integrity sha512-4FTQGAsuwqTzVMmiRVTn0RR9GrbRfkP0wfu/tXWVHd2LgNpTY0uglQpIScXK4NaEyXbB3JmZt8gfVqO50lP8wg== - dependencies: - "@smithy/node-config-provider" "^3.1.9" - "@smithy/property-provider" "^3.1.8" - "@smithy/types" "^3.6.0" - "@smithy/url-parser" "^3.0.8" - tslib "^2.6.2" - -"@smithy/fetch-http-handler@^3.2.9": - version "3.2.9" - resolved "https://registry.yarnpkg.com/@smithy/fetch-http-handler/-/fetch-http-handler-3.2.9.tgz#8d5199c162a37caa37a8b6848eefa9ca58221a0b" - integrity sha512-hYNVQOqhFQ6vOpenifFME546f0GfJn2OiQ3M0FDmuUu8V/Uiwy2wej7ZXxFBNqdx0R5DZAqWM1l6VRhGz8oE6A== +"@smithy/credential-provider-imds@^3.2.6", "@smithy/credential-provider-imds@^3.2.7": + version "3.2.7" + resolved "https://registry.yarnpkg.com/@smithy/credential-provider-imds/-/credential-provider-imds-3.2.7.tgz#6eedf87ba0238723ec46d8ce0f18e276685a702d" + integrity sha512-cEfbau+rrWF8ylkmmVAObOmjbTIzKyUC5TkBL58SbLywD0RCBC4JAUKbmtSm2w5KUJNRPGgpGFMvE2FKnuNlWQ== dependencies: - "@smithy/protocol-http" "^4.1.4" - "@smithy/querystring-builder" "^3.0.7" - "@smithy/types" "^3.5.0" - "@smithy/util-base64" "^3.0.0" + "@smithy/node-config-provider" "^3.1.11" + "@smithy/property-provider" "^3.1.10" + "@smithy/types" "^3.7.1" + "@smithy/url-parser" "^3.0.10" tslib "^2.6.2" -"@smithy/fetch-http-handler@^4.0.0": - version "4.0.0" - resolved "https://registry.yarnpkg.com/@smithy/fetch-http-handler/-/fetch-http-handler-4.0.0.tgz#3763cb5178745ed630ed5bc3beb6328abdc31f36" - integrity sha512-MLb1f5tbBO2X6K4lMEKJvxeLooyg7guq48C2zKr4qM7F2Gpkz4dc+hdSgu77pCJ76jVqFBjZczHYAs6dp15N+g== +"@smithy/fetch-http-handler@^4.1.1": + version "4.1.1" + resolved "https://registry.yarnpkg.com/@smithy/fetch-http-handler/-/fetch-http-handler-4.1.1.tgz#cead80762af4cdea11e7eeb627ea1c4835265dfa" + integrity sha512-bH7QW0+JdX0bPBadXt8GwMof/jz0H28I84hU1Uet9ISpzUqXqRQ3fEZJ+ANPOhzSEczYvANNl3uDQDYArSFDtA== dependencies: - "@smithy/protocol-http" "^4.1.5" - "@smithy/querystring-builder" "^3.0.8" - "@smithy/types" "^3.6.0" + "@smithy/protocol-http" "^4.1.7" + "@smithy/querystring-builder" "^3.0.10" + "@smithy/types" "^3.7.1" "@smithy/util-base64" "^3.0.0" tslib "^2.6.2" -"@smithy/hash-node@^3.0.7": - version "3.0.8" - resolved "https://registry.yarnpkg.com/@smithy/hash-node/-/hash-node-3.0.8.tgz#f451cc342f74830466b0b39bf985dc3022634065" - integrity sha512-tlNQYbfpWXHimHqrvgo14DrMAgUBua/cNoz9fMYcDmYej7MAmUcjav/QKQbFc3NrcPxeJ7QClER4tWZmfwoPng== +"@smithy/hash-node@^3.0.10": + version "3.0.10" + resolved "https://registry.yarnpkg.com/@smithy/hash-node/-/hash-node-3.0.10.tgz#93c857b4bff3a48884886440fd9772924887e592" + integrity sha512-3zWGWCHI+FlJ5WJwx73Mw2llYR8aflVyZN5JhoqLxbdPZi6UyKSdCeXAWJw9ja22m6S6Tzz1KZ+kAaSwvydi0g== dependencies: - "@smithy/types" "^3.6.0" + "@smithy/types" "^3.7.1" "@smithy/util-buffer-from" "^3.0.0" "@smithy/util-utf8" "^3.0.0" tslib "^2.6.2" -"@smithy/invalid-dependency@^3.0.7": - version "3.0.8" - resolved "https://registry.yarnpkg.com/@smithy/invalid-dependency/-/invalid-dependency-3.0.8.tgz#4d381a4c24832371ade79e904a72c173c9851e5f" - integrity sha512-7Qynk6NWtTQhnGTTZwks++nJhQ1O54Mzi7fz4PqZOiYXb4Z1Flpb2yRvdALoggTS8xjtohWUM+RygOtB30YL3Q== +"@smithy/invalid-dependency@^3.0.10": + version "3.0.10" + resolved "https://registry.yarnpkg.com/@smithy/invalid-dependency/-/invalid-dependency-3.0.10.tgz#8616dee555916c24dec3e33b1e046c525efbfee3" + integrity sha512-Lp2L65vFi+cj0vFMu2obpPW69DU+6O5g3086lmI4XcnRCG8PxvpWC7XyaVwJCxsZFzueHjXnrOH/E0pl0zikfA== dependencies: - "@smithy/types" "^3.6.0" + "@smithy/types" "^3.7.1" tslib "^2.6.2" "@smithy/is-array-buffer@^2.2.0": @@ -3300,170 +3252,170 @@ dependencies: tslib "^2.6.2" -"@smithy/middleware-content-length@^3.0.9": - version "3.0.10" - resolved "https://registry.yarnpkg.com/@smithy/middleware-content-length/-/middleware-content-length-3.0.10.tgz#738266f6d81436d7e3a86bea931bc64e04ae7dbf" - integrity sha512-T4dIdCs1d/+/qMpwhJ1DzOhxCZjZHbHazEPJWdB4GDi2HjIZllVzeBEcdJUN0fomV8DURsgOyrbEUzg3vzTaOg== +"@smithy/middleware-content-length@^3.0.12": + version "3.0.12" + resolved "https://registry.yarnpkg.com/@smithy/middleware-content-length/-/middleware-content-length-3.0.12.tgz#3b248ed1e8f1e0ae67171abb8eae9da7ab7ca613" + integrity sha512-1mDEXqzM20yywaMDuf5o9ue8OkJ373lSPbaSjyEvkWdqELhFMyNNgKGWL/rCSf4KME8B+HlHKuR8u9kRj8HzEQ== dependencies: - "@smithy/protocol-http" "^4.1.5" - "@smithy/types" "^3.6.0" + "@smithy/protocol-http" "^4.1.7" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@smithy/middleware-endpoint@^3.1.4", "@smithy/middleware-endpoint@^3.2.1": - version "3.2.1" - resolved "https://registry.yarnpkg.com/@smithy/middleware-endpoint/-/middleware-endpoint-3.2.1.tgz#b9ee42d29d8f3a266883d293c4d6a586f7b60979" - integrity sha512-wWO3xYmFm6WRW8VsEJ5oU6h7aosFXfszlz3Dj176pTij6o21oZnzkCLzShfmRaaCHDkBXWBdO0c4sQAvLFP6zA== - dependencies: - "@smithy/core" "^2.5.1" - "@smithy/middleware-serde" "^3.0.8" - "@smithy/node-config-provider" "^3.1.9" - "@smithy/shared-ini-file-loader" "^3.1.9" - "@smithy/types" "^3.6.0" - "@smithy/url-parser" "^3.0.8" - "@smithy/util-middleware" "^3.0.8" +"@smithy/middleware-endpoint@^3.2.3", "@smithy/middleware-endpoint@^3.2.4": + version "3.2.4" + resolved "https://registry.yarnpkg.com/@smithy/middleware-endpoint/-/middleware-endpoint-3.2.4.tgz#aaded88e3848e56edc99797d71069817fe20cb44" + integrity sha512-TybiW2LA3kYVd3e+lWhINVu1o26KJbBwOpADnf0L4x/35vLVica77XVR5hvV9+kWeTGeSJ3IHTcYxbRxlbwhsg== + dependencies: + "@smithy/core" "^2.5.4" + "@smithy/middleware-serde" "^3.0.10" + "@smithy/node-config-provider" "^3.1.11" + "@smithy/shared-ini-file-loader" "^3.1.11" + "@smithy/types" "^3.7.1" + "@smithy/url-parser" "^3.0.10" + "@smithy/util-middleware" "^3.0.10" tslib "^2.6.2" -"@smithy/middleware-retry@^3.0.23": - version "3.0.25" - resolved "https://registry.yarnpkg.com/@smithy/middleware-retry/-/middleware-retry-3.0.25.tgz#a6b1081fc1a0991ffe1d15e567e76198af01f37c" - integrity sha512-m1F70cPaMBML4HiTgCw5I+jFNtjgz5z5UdGnUbG37vw6kh4UvizFYjqJGHvicfgKMkDL6mXwyPp5mhZg02g5sg== - dependencies: - "@smithy/node-config-provider" "^3.1.9" - "@smithy/protocol-http" "^4.1.5" - "@smithy/service-error-classification" "^3.0.8" - "@smithy/smithy-client" "^3.4.2" - "@smithy/types" "^3.6.0" - "@smithy/util-middleware" "^3.0.8" - "@smithy/util-retry" "^3.0.8" +"@smithy/middleware-retry@^3.0.27": + version "3.0.28" + resolved "https://registry.yarnpkg.com/@smithy/middleware-retry/-/middleware-retry-3.0.28.tgz#92ef5a446bf232fc170c92a460e8af827b0e43bb" + integrity sha512-vK2eDfvIXG1U64FEUhYxoZ1JSj4XFbYWkK36iz02i3pFwWiDz1Q7jKhGTBCwx/7KqJNk4VS7d7cDLXFOvP7M+g== + dependencies: + "@smithy/node-config-provider" "^3.1.11" + "@smithy/protocol-http" "^4.1.7" + "@smithy/service-error-classification" "^3.0.10" + "@smithy/smithy-client" "^3.4.5" + "@smithy/types" "^3.7.1" + "@smithy/util-middleware" "^3.0.10" + "@smithy/util-retry" "^3.0.10" tslib "^2.6.2" uuid "^9.0.1" -"@smithy/middleware-serde@^3.0.7", "@smithy/middleware-serde@^3.0.8": - version "3.0.8" - resolved "https://registry.yarnpkg.com/@smithy/middleware-serde/-/middleware-serde-3.0.8.tgz#a46d10dba3c395be0d28610d55c89ff8c07c0cd3" - integrity sha512-Xg2jK9Wc/1g/MBMP/EUn2DLspN8LNt+GMe7cgF+Ty3vl+Zvu+VeZU5nmhveU+H8pxyTsjrAkci8NqY6OuvZnjA== +"@smithy/middleware-serde@^3.0.10": + version "3.0.10" + resolved "https://registry.yarnpkg.com/@smithy/middleware-serde/-/middleware-serde-3.0.10.tgz#5f6c0b57b10089a21d355bd95e9b7d40378454d7" + integrity sha512-MnAuhh+dD14F428ubSJuRnmRsfOpxSzvRhaGVTvd/lrUDE3kxzCCmH8lnVTvoNQnV2BbJ4c15QwZ3UdQBtFNZA== dependencies: - "@smithy/types" "^3.6.0" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@smithy/middleware-stack@^3.0.7", "@smithy/middleware-stack@^3.0.8": - version "3.0.8" - resolved "https://registry.yarnpkg.com/@smithy/middleware-stack/-/middleware-stack-3.0.8.tgz#f1c7d9c7fe8280c6081141c88f4a76875da1fc43" - integrity sha512-d7ZuwvYgp1+3682Nx0MD3D/HtkmZd49N3JUndYWQXfRZrYEnCWYc8BHcNmVsPAp9gKvlurdg/mubE6b/rPS9MA== +"@smithy/middleware-stack@^3.0.10": + version "3.0.10" + resolved "https://registry.yarnpkg.com/@smithy/middleware-stack/-/middleware-stack-3.0.10.tgz#73e2fde5d151440844161773a17ee13375502baf" + integrity sha512-grCHyoiARDBBGPyw2BeicpjgpsDFWZZxptbVKb3CRd/ZA15F/T6rZjCCuBUjJwdck1nwUuIxYtsS4H9DDpbP5w== dependencies: - "@smithy/types" "^3.6.0" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@smithy/node-config-provider@^3.1.8", "@smithy/node-config-provider@^3.1.9": - version "3.1.9" - resolved "https://registry.yarnpkg.com/@smithy/node-config-provider/-/node-config-provider-3.1.9.tgz#d27ba8e4753f1941c24ed0af824dbc6c492f510a" - integrity sha512-qRHoah49QJ71eemjuS/WhUXB+mpNtwHRWQr77J/m40ewBVVwvo52kYAmb7iuaECgGTTcYxHS4Wmewfwy++ueew== +"@smithy/node-config-provider@^3.1.11": + version "3.1.11" + resolved "https://registry.yarnpkg.com/@smithy/node-config-provider/-/node-config-provider-3.1.11.tgz#95feba85a5cb3de3fe9adfff1060b35fd556d023" + integrity sha512-URq3gT3RpDikh/8MBJUB+QGZzfS7Bm6TQTqoh4CqE8NBuyPkWa5eUXj0XFcFfeZVgg3WMh1u19iaXn8FvvXxZw== dependencies: - "@smithy/property-provider" "^3.1.8" - "@smithy/shared-ini-file-loader" "^3.1.9" - "@smithy/types" "^3.6.0" + "@smithy/property-provider" "^3.1.10" + "@smithy/shared-ini-file-loader" "^3.1.11" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@smithy/node-http-handler@^3.2.4", "@smithy/node-http-handler@^3.2.5": - version "3.2.5" - resolved "https://registry.yarnpkg.com/@smithy/node-http-handler/-/node-http-handler-3.2.5.tgz#ad9d9ba1528bf0d4a655135e978ecc14b3df26a2" - integrity sha512-PkOwPNeKdvX/jCpn0A8n9/TyoxjGZB8WVoJmm9YzsnAgggTj4CrjpRHlTQw7dlLZ320n1mY1y+nTRUDViKi/3w== +"@smithy/node-http-handler@^3.3.1": + version "3.3.1" + resolved "https://registry.yarnpkg.com/@smithy/node-http-handler/-/node-http-handler-3.3.1.tgz#788fc1c22c21a0cf982f4025ccf9f64217f3164f" + integrity sha512-fr+UAOMGWh6bn4YSEezBCpJn9Ukp9oR4D32sCjCo7U81evE11YePOQ58ogzyfgmjIO79YeOdfXXqr0jyhPQeMg== dependencies: - "@smithy/abort-controller" "^3.1.6" - "@smithy/protocol-http" "^4.1.5" - "@smithy/querystring-builder" "^3.0.8" - "@smithy/types" "^3.6.0" + "@smithy/abort-controller" "^3.1.8" + "@smithy/protocol-http" "^4.1.7" + "@smithy/querystring-builder" "^3.0.10" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@smithy/property-provider@^3.1.7", "@smithy/property-provider@^3.1.8": - version "3.1.8" - resolved "https://registry.yarnpkg.com/@smithy/property-provider/-/property-provider-3.1.8.tgz#b1c5a3949effbb9772785ad7ddc5b4b235b10fbe" - integrity sha512-ukNUyo6rHmusG64lmkjFeXemwYuKge1BJ8CtpVKmrxQxc6rhUX0vebcptFA9MmrGsnLhwnnqeH83VTU9hwOpjA== +"@smithy/property-provider@^3.1.10", "@smithy/property-provider@^3.1.9": + version "3.1.10" + resolved "https://registry.yarnpkg.com/@smithy/property-provider/-/property-provider-3.1.10.tgz#ae00447c1060c194c3e1b9475f7c8548a70f8486" + integrity sha512-n1MJZGTorTH2DvyTVj+3wXnd4CzjJxyXeOgnTlgNVFxaaMeT4OteEp4QrzF8p9ee2yg42nvyVK6R/awLCakjeQ== dependencies: - "@smithy/types" "^3.6.0" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@smithy/protocol-http@^4.1.4", "@smithy/protocol-http@^4.1.5": - version "4.1.5" - resolved "https://registry.yarnpkg.com/@smithy/protocol-http/-/protocol-http-4.1.5.tgz#a1f397440f299b6a5abeed6866957fecb1bf5013" - integrity sha512-hsjtwpIemmCkm3ZV5fd/T0bPIugW1gJXwZ/hpuVubt2hEUApIoUTrf6qIdh9MAWlw0vjMrA1ztJLAwtNaZogvg== +"@smithy/protocol-http@^4.1.7": + version "4.1.7" + resolved "https://registry.yarnpkg.com/@smithy/protocol-http/-/protocol-http-4.1.7.tgz#5c67e62beb5deacdb94f2127f9a344bdf1b2ed6e" + integrity sha512-FP2LepWD0eJeOTm0SjssPcgqAlDFzOmRXqXmGhfIM52G7Lrox/pcpQf6RP4F21k0+O12zaqQt5fCDOeBtqY6Cg== dependencies: - "@smithy/types" "^3.6.0" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@smithy/querystring-builder@^3.0.7", "@smithy/querystring-builder@^3.0.8": - version "3.0.8" - resolved "https://registry.yarnpkg.com/@smithy/querystring-builder/-/querystring-builder-3.0.8.tgz#0d845be53aa624771c518d1412881236ce12ed4f" - integrity sha512-btYxGVqFUARbUrN6VhL9c3dnSviIwBYD9Rz1jHuN1hgh28Fpv2xjU1HeCeDJX68xctz7r4l1PBnFhGg1WBBPuA== +"@smithy/querystring-builder@^3.0.10": + version "3.0.10" + resolved "https://registry.yarnpkg.com/@smithy/querystring-builder/-/querystring-builder-3.0.10.tgz#db8773af85ee3977c82b8e35a5cdd178c621306d" + integrity sha512-nT9CQF3EIJtIUepXQuBFb8dxJi3WVZS3XfuDksxSCSn+/CzZowRLdhDn+2acbBv8R6eaJqPupoI/aRFIImNVPQ== dependencies: - "@smithy/types" "^3.6.0" + "@smithy/types" "^3.7.1" "@smithy/util-uri-escape" "^3.0.0" tslib "^2.6.2" -"@smithy/querystring-parser@^3.0.8": - version "3.0.8" - resolved "https://registry.yarnpkg.com/@smithy/querystring-parser/-/querystring-parser-3.0.8.tgz#057a8e2d301eea8eac7071923100ba38a824d7df" - integrity sha512-BtEk3FG7Ks64GAbt+JnKqwuobJNX8VmFLBsKIwWr1D60T426fGrV2L3YS5siOcUhhp6/Y6yhBw1PSPxA5p7qGg== +"@smithy/querystring-parser@^3.0.10": + version "3.0.10" + resolved "https://registry.yarnpkg.com/@smithy/querystring-parser/-/querystring-parser-3.0.10.tgz#62db744a1ed2cf90f4c08d2c73d365e033b4a11c" + integrity sha512-Oa0XDcpo9SmjhiDD9ua2UyM3uU01ZTuIrNdZvzwUTykW1PM8o2yJvMh1Do1rY5sUQg4NDV70dMi0JhDx4GyxuQ== dependencies: - "@smithy/types" "^3.6.0" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@smithy/service-error-classification@^3.0.8": - version "3.0.8" - resolved "https://registry.yarnpkg.com/@smithy/service-error-classification/-/service-error-classification-3.0.8.tgz#265ad2573b972f6c7bdd1ad6c5155a88aeeea1c4" - integrity sha512-uEC/kCCFto83bz5ZzapcrgGqHOh/0r69sZ2ZuHlgoD5kYgXJEThCoTuw/y1Ub3cE7aaKdznb+jD9xRPIfIwD7g== +"@smithy/service-error-classification@^3.0.10": + version "3.0.10" + resolved "https://registry.yarnpkg.com/@smithy/service-error-classification/-/service-error-classification-3.0.10.tgz#941c549daf0e9abb84d3def1d9e1e3f0f74f5ba6" + integrity sha512-zHe642KCqDxXLuhs6xmHVgRwy078RfqxP2wRDpIyiF8EmsWXptMwnMwbVa50lw+WOGNrYm9zbaEg0oDe3PTtvQ== dependencies: - "@smithy/types" "^3.6.0" + "@smithy/types" "^3.7.1" -"@smithy/shared-ini-file-loader@^3.1.8", "@smithy/shared-ini-file-loader@^3.1.9": - version "3.1.9" - resolved "https://registry.yarnpkg.com/@smithy/shared-ini-file-loader/-/shared-ini-file-loader-3.1.9.tgz#1b77852b5bb176445e1d80333fa3f739313a4928" - integrity sha512-/+OsJRNtoRbtsX0UpSgWVxFZLsJHo/4sTr+kBg/J78sr7iC+tHeOvOJrS5hCpVQ6sWBbhWLp1UNiuMyZhE6pmA== +"@smithy/shared-ini-file-loader@^3.1.10", "@smithy/shared-ini-file-loader@^3.1.11": + version "3.1.11" + resolved "https://registry.yarnpkg.com/@smithy/shared-ini-file-loader/-/shared-ini-file-loader-3.1.11.tgz#0b4f98c4a66480956fbbefc4627c5dc09d891aea" + integrity sha512-AUdrIZHFtUgmfSN4Gq9nHu3IkHMa1YDcN+s061Nfm+6pQ0mJy85YQDB0tZBCmls0Vuj22pLwDPmL92+Hvfwwlg== dependencies: - "@smithy/types" "^3.6.0" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@smithy/signature-v4@^4.2.0": - version "4.2.1" - resolved "https://registry.yarnpkg.com/@smithy/signature-v4/-/signature-v4-4.2.1.tgz#a918fd7d99af9f60aa07617506fa54be408126ee" - integrity sha512-NsV1jF4EvmO5wqmaSzlnTVetemBS3FZHdyc5CExbDljcyJCEEkJr8ANu2JvtNbVg/9MvKAWV44kTrGS+Pi4INg== +"@smithy/signature-v4@^4.2.2": + version "4.2.3" + resolved "https://registry.yarnpkg.com/@smithy/signature-v4/-/signature-v4-4.2.3.tgz#abbca5e5fe9158422b3125b2956791a325a27f22" + integrity sha512-pPSQQ2v2vu9vc8iew7sszLd0O09I5TRc5zhY71KA+Ao0xYazIG+uLeHbTJfIWGO3BGVLiXjUr3EEeCcEQLjpWQ== dependencies: "@smithy/is-array-buffer" "^3.0.0" - "@smithy/protocol-http" "^4.1.5" - "@smithy/types" "^3.6.0" + "@smithy/protocol-http" "^4.1.7" + "@smithy/types" "^3.7.1" "@smithy/util-hex-encoding" "^3.0.0" - "@smithy/util-middleware" "^3.0.8" + "@smithy/util-middleware" "^3.0.10" "@smithy/util-uri-escape" "^3.0.0" "@smithy/util-utf8" "^3.0.0" tslib "^2.6.2" -"@smithy/smithy-client@^3.4.0", "@smithy/smithy-client@^3.4.2": - version "3.4.2" - resolved "https://registry.yarnpkg.com/@smithy/smithy-client/-/smithy-client-3.4.2.tgz#a6e3ed98330ce170cf482e765bd0c21e0fde8ae4" - integrity sha512-dxw1BDxJiY9/zI3cBqfVrInij6ShjpV4fmGHesGZZUiP9OSE/EVfdwdRz0PgvkEvrZHpsj2htRaHJfftE8giBA== - dependencies: - "@smithy/core" "^2.5.1" - "@smithy/middleware-endpoint" "^3.2.1" - "@smithy/middleware-stack" "^3.0.8" - "@smithy/protocol-http" "^4.1.5" - "@smithy/types" "^3.6.0" - "@smithy/util-stream" "^3.2.1" +"@smithy/smithy-client@^3.4.4", "@smithy/smithy-client@^3.4.5": + version "3.4.5" + resolved "https://registry.yarnpkg.com/@smithy/smithy-client/-/smithy-client-3.4.5.tgz#b90fe15d80e2dca5aa9cf3bd24bd73359ad1ef61" + integrity sha512-k0sybYT9zlP79sIKd1XGm4TmK0AS1nA2bzDHXx7m0nGi3RQ8dxxQUs4CPkSmQTKAo+KF9aINU3KzpGIpV7UoMw== + dependencies: + "@smithy/core" "^2.5.4" + "@smithy/middleware-endpoint" "^3.2.4" + "@smithy/middleware-stack" "^3.0.10" + "@smithy/protocol-http" "^4.1.7" + "@smithy/types" "^3.7.1" + "@smithy/util-stream" "^3.3.1" tslib "^2.6.2" -"@smithy/types@^3.5.0", "@smithy/types@^3.6.0": - version "3.6.0" - resolved "https://registry.yarnpkg.com/@smithy/types/-/types-3.6.0.tgz#03a52bfd62ee4b7b2a1842c8ae3ada7a0a5ff3a4" - integrity sha512-8VXK/KzOHefoC65yRgCn5vG1cysPJjHnOVt9d0ybFQSmJgQj152vMn4EkYhGuaOmnnZvCPav/KnYyE6/KsNZ2w== +"@smithy/types@^3.7.1": + version "3.7.1" + resolved "https://registry.yarnpkg.com/@smithy/types/-/types-3.7.1.tgz#4af54c4e28351e9101996785a33f2fdbf93debe7" + integrity sha512-XKLcLXZY7sUQgvvWyeaL/qwNPp6V3dWcUjqrQKjSb+tzYiCy340R/c64LV5j+Tnb2GhmunEX0eou+L+m2hJNYA== dependencies: tslib "^2.6.2" -"@smithy/url-parser@^3.0.7", "@smithy/url-parser@^3.0.8": - version "3.0.8" - resolved "https://registry.yarnpkg.com/@smithy/url-parser/-/url-parser-3.0.8.tgz#8057d91d55ba8df97d74576e000f927b42da9e18" - integrity sha512-4FdOhwpTW7jtSFWm7SpfLGKIBC9ZaTKG5nBF0wK24aoQKQyDIKUw3+KFWCQ9maMzrgTJIuOvOnsV2lLGW5XjTg== +"@smithy/url-parser@^3.0.10": + version "3.0.10" + resolved "https://registry.yarnpkg.com/@smithy/url-parser/-/url-parser-3.0.10.tgz#f389985a79766cff4a99af14979f01a17ce318da" + integrity sha512-j90NUalTSBR2NaZTuruEgavSdh8MLirf58LoGSk4AtQfyIymogIhgnGUU2Mga2bkMkpSoC9gxb74xBXL5afKAQ== dependencies: - "@smithy/querystring-parser" "^3.0.8" - "@smithy/types" "^3.6.0" + "@smithy/querystring-parser" "^3.0.10" + "@smithy/types" "^3.7.1" tslib "^2.6.2" "@smithy/util-base64@^3.0.0": @@ -3512,37 +3464,37 @@ dependencies: tslib "^2.6.2" -"@smithy/util-defaults-mode-browser@^3.0.23": - version "3.0.25" - resolved "https://registry.yarnpkg.com/@smithy/util-defaults-mode-browser/-/util-defaults-mode-browser-3.0.25.tgz#ef9b84272d1db23503ff155f9075a4543ab6dab7" - integrity sha512-fRw7zymjIDt6XxIsLwfJfYUfbGoO9CmCJk6rjJ/X5cd20+d2Is7xjU5Kt/AiDt6hX8DAf5dztmfP5O82gR9emA== +"@smithy/util-defaults-mode-browser@^3.0.27": + version "3.0.28" + resolved "https://registry.yarnpkg.com/@smithy/util-defaults-mode-browser/-/util-defaults-mode-browser-3.0.28.tgz#c443b9ae2784b5621def0541a98fc9704c846bfc" + integrity sha512-6bzwAbZpHRFVJsOztmov5PGDmJYsbNSoIEfHSJJyFLzfBGCCChiO3od9k7E/TLgrCsIifdAbB9nqbVbyE7wRUw== dependencies: - "@smithy/property-provider" "^3.1.8" - "@smithy/smithy-client" "^3.4.2" - "@smithy/types" "^3.6.0" + "@smithy/property-provider" "^3.1.10" + "@smithy/smithy-client" "^3.4.5" + "@smithy/types" "^3.7.1" bowser "^2.11.0" tslib "^2.6.2" -"@smithy/util-defaults-mode-node@^3.0.23": - version "3.0.25" - resolved "https://registry.yarnpkg.com/@smithy/util-defaults-mode-node/-/util-defaults-mode-node-3.0.25.tgz#c16fe3995c8e90ae318e336178392173aebe1e37" - integrity sha512-H3BSZdBDiVZGzt8TG51Pd2FvFO0PAx/A0mJ0EH8a13KJ6iUCdYnw/Dk/MdC1kTd0eUuUGisDFaxXVXo4HHFL1g== - dependencies: - "@smithy/config-resolver" "^3.0.10" - "@smithy/credential-provider-imds" "^3.2.5" - "@smithy/node-config-provider" "^3.1.9" - "@smithy/property-provider" "^3.1.8" - "@smithy/smithy-client" "^3.4.2" - "@smithy/types" "^3.6.0" +"@smithy/util-defaults-mode-node@^3.0.27": + version "3.0.28" + resolved "https://registry.yarnpkg.com/@smithy/util-defaults-mode-node/-/util-defaults-mode-node-3.0.28.tgz#d6d742d62c2f678938b7a378224e79fca587458b" + integrity sha512-78ENJDorV1CjOQselGmm3+z7Yqjj5HWCbjzh0Ixuq736dh1oEnD9sAttSBNSLlpZsX8VQnmERqA2fEFlmqWn8w== + dependencies: + "@smithy/config-resolver" "^3.0.12" + "@smithy/credential-provider-imds" "^3.2.7" + "@smithy/node-config-provider" "^3.1.11" + "@smithy/property-provider" "^3.1.10" + "@smithy/smithy-client" "^3.4.5" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@smithy/util-endpoints@^2.1.3": - version "2.1.4" - resolved "https://registry.yarnpkg.com/@smithy/util-endpoints/-/util-endpoints-2.1.4.tgz#a29134c2b1982442c5fc3be18d9b22796e8eb964" - integrity sha512-kPt8j4emm7rdMWQyL0F89o92q10gvCUa6sBkBtDJ7nV2+P7wpXczzOfoDJ49CKXe5CCqb8dc1W+ZdLlrKzSAnQ== +"@smithy/util-endpoints@^2.1.6": + version "2.1.6" + resolved "https://registry.yarnpkg.com/@smithy/util-endpoints/-/util-endpoints-2.1.6.tgz#720cbd1a616ad7c099b77780f0cb0f1f9fc5d2df" + integrity sha512-mFV1t3ndBh0yZOJgWxO9J/4cHZVn5UG1D8DeCc6/echfNkeEJWu9LD7mgGH5fHrEdR7LDoWw7PQO6QiGpHXhgA== dependencies: - "@smithy/node-config-provider" "^3.1.9" - "@smithy/types" "^3.6.0" + "@smithy/node-config-provider" "^3.1.11" + "@smithy/types" "^3.7.1" tslib "^2.6.2" "@smithy/util-hex-encoding@^3.0.0": @@ -3552,31 +3504,31 @@ dependencies: tslib "^2.6.2" -"@smithy/util-middleware@^3.0.7", "@smithy/util-middleware@^3.0.8": - version "3.0.8" - resolved "https://registry.yarnpkg.com/@smithy/util-middleware/-/util-middleware-3.0.8.tgz#372bc7a2845408ad69da039d277fc23c2734d0c6" - integrity sha512-p7iYAPaQjoeM+AKABpYWeDdtwQNxasr4aXQEA/OmbOaug9V0odRVDy3Wx4ci8soljE/JXQo+abV0qZpW8NX0yA== +"@smithy/util-middleware@^3.0.10": + version "3.0.10" + resolved "https://registry.yarnpkg.com/@smithy/util-middleware/-/util-middleware-3.0.10.tgz#ab8be99f1aaafe5a5490c344f27a264b72b7592f" + integrity sha512-eJO+/+RsrG2RpmY68jZdwQtnfsxjmPxzMlQpnHKjFPwrYqvlcT+fHdT+ZVwcjlWSrByOhGr9Ff2GG17efc192A== dependencies: - "@smithy/types" "^3.6.0" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@smithy/util-retry@^3.0.7", "@smithy/util-retry@^3.0.8": - version "3.0.8" - resolved "https://registry.yarnpkg.com/@smithy/util-retry/-/util-retry-3.0.8.tgz#9c607c175a4d8a87b5d8ebaf308f6b849e4dc4d0" - integrity sha512-TCEhLnY581YJ+g1x0hapPz13JFqzmh/pMWL2KEFASC51qCfw3+Y47MrTmea4bUE5vsdxQ4F6/KFbUeSz22Q1ow== +"@smithy/util-retry@^3.0.10": + version "3.0.10" + resolved "https://registry.yarnpkg.com/@smithy/util-retry/-/util-retry-3.0.10.tgz#fc13e1b30e87af0cbecadf29ca83b171e2040440" + integrity sha512-1l4qatFp4PiU6j7UsbasUHL2VU023NRB/gfaa1M0rDqVrRN4g3mCArLRyH3OuktApA4ye+yjWQHjdziunw2eWA== dependencies: - "@smithy/service-error-classification" "^3.0.8" - "@smithy/types" "^3.6.0" + "@smithy/service-error-classification" "^3.0.10" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@smithy/util-stream@^3.1.9", "@smithy/util-stream@^3.2.1": - version "3.2.1" - resolved "https://registry.yarnpkg.com/@smithy/util-stream/-/util-stream-3.2.1.tgz#f3055dc4c8caba8af4e47191ea7e773d0e5a429d" - integrity sha512-R3ufuzJRxSJbE58K9AEnL/uSZyVdHzud9wLS8tIbXclxKzoe09CRohj2xV8wpx5tj7ZbiJaKYcutMm1eYgz/0A== +"@smithy/util-stream@^3.3.1": + version "3.3.1" + resolved "https://registry.yarnpkg.com/@smithy/util-stream/-/util-stream-3.3.1.tgz#a2636f435637ef90d64df2bb8e71cd63236be112" + integrity sha512-Ff68R5lJh2zj+AUTvbAU/4yx+6QPRzg7+pI7M1FbtQHcRIp7xvguxVsQBKyB3fwiOwhAKu0lnNyYBaQfSW6TNw== dependencies: - "@smithy/fetch-http-handler" "^4.0.0" - "@smithy/node-http-handler" "^3.2.5" - "@smithy/types" "^3.6.0" + "@smithy/fetch-http-handler" "^4.1.1" + "@smithy/node-http-handler" "^3.3.1" + "@smithy/types" "^3.7.1" "@smithy/util-base64" "^3.0.0" "@smithy/util-buffer-from" "^3.0.0" "@smithy/util-hex-encoding" "^3.0.0" @@ -3606,19 +3558,19 @@ "@smithy/util-buffer-from" "^3.0.0" tslib "^2.6.2" -"@smithy/util-waiter@^3.1.6": - version "3.1.7" - resolved "https://registry.yarnpkg.com/@smithy/util-waiter/-/util-waiter-3.1.7.tgz#e94f7b9fb8e3b627d78f8886918c76030cf41815" - integrity sha512-d5yGlQtmN/z5eoTtIYgkvOw27US2Ous4VycnXatyoImIF9tzlcpnKqQ/V7qhvJmb2p6xZne1NopCLakdTnkBBQ== +"@smithy/util-waiter@^3.1.9": + version "3.1.9" + resolved "https://registry.yarnpkg.com/@smithy/util-waiter/-/util-waiter-3.1.9.tgz#1330ce2e79b58419d67755d25bce7a226e32dc6d" + integrity sha512-/aMXPANhMOlMPjfPtSrDfPeVP8l56SJlz93xeiLmhLe5xvlXA5T3abZ2ilEsDEPeY9T/wnN/vNGn9wa1SbufWA== dependencies: - "@smithy/abort-controller" "^3.1.6" - "@smithy/types" "^3.6.0" + "@smithy/abort-controller" "^3.1.8" + "@smithy/types" "^3.7.1" tslib "^2.6.2" -"@sqlite.org/sqlite-wasm@^3.47.0-build1": - version "3.47.0-build1" - resolved "https://registry.yarnpkg.com/@sqlite.org/sqlite-wasm/-/sqlite-wasm-3.47.0-build1.tgz#047977d44981067902314fd14226328b8f60dc17" - integrity sha512-n2lLez+PwcN+WQdgRIm6OCaGvQUHOx+kEmlL7pq0G4xuK+NaKxnbF4wI6XhfZ4HbyG2sla1Qt4ImaD6hpwoMtg== +"@sqlite.org/sqlite-wasm@^3.47.1-build1": + version "3.47.1-build1" + resolved "https://registry.yarnpkg.com/@sqlite.org/sqlite-wasm/-/sqlite-wasm-3.47.1-build1.tgz#d1ec9ed8be6131e8cbd42e66c2e8c65713d1f928" + integrity sha512-3qnVGab3sjJ8ov0ce1rQGZrMkglYEQ/q5fnq1s1BpRWFMYsiJVaLD1zKepcI9E9r3qx7929+2G27Hfsasvrm6Q== "@stablelib/binary@^2.0.0": version "2.0.0" @@ -3695,14 +3647,6 @@ resolved "https://registry.yarnpkg.com/@tufjs/canonical-json/-/canonical-json-2.0.0.tgz#a52f61a3d7374833fca945b2549bc30a2dd40d0a" integrity sha512-yVtV8zsdo8qFHe+/3kw81dSLyF7D576A5cCFCi4X7B39tWT7SekaEFUnvnWJHz+9qO7qJTah1JbrDjWKqFtdWA== -"@tufjs/models@2.0.1": - version "2.0.1" - resolved "https://registry.yarnpkg.com/@tufjs/models/-/models-2.0.1.tgz#e429714e753b6c2469af3212e7f320a6973c2812" - integrity sha512-92F7/SFyufn4DXsha9+QfKnN03JGqtMFMXgSHbZOo8JG59WkTni7UzAouNQDf7AuP9OAMxVOPQcqG3sB7w+kkg== - dependencies: - "@tufjs/canonical-json" "2.0.0" - minimatch "^9.0.4" - "@tufjs/models@3.0.1": version "3.0.1" resolved "https://registry.yarnpkg.com/@tufjs/models/-/models-3.0.1.tgz#5aebb782ebb9e06f071ae7831c1f35b462b0319c" @@ -3749,10 +3693,10 @@ resolved "https://registry.yarnpkg.com/@types/benchmark/-/benchmark-2.1.5.tgz#940c1850c18fdfdaee3fd6ed29cd92ae0d445b45" integrity sha512-cKio2eFB3v7qmKcvIHLUMw/dIx/8bhWPuzpzRT4unCPRTD8VdA9Zb0afxpcxOqR4PixRS7yT42FqGS8BYL8g1w== -"@types/better-sqlite3@^7.6.11": - version "7.6.11" - resolved "https://registry.yarnpkg.com/@types/better-sqlite3/-/better-sqlite3-7.6.11.tgz#95acf22fcf5577624eea202058e26ba239760b9f" - integrity sha512-i8KcD3PgGtGBLl3+mMYA8PdKkButvPyARxA7IQAd6qeslht13qxb1zzO8dRCtE7U3IoJS782zDBAeoKiM695kg== +"@types/better-sqlite3@^7.6.12": + version "7.6.12" + resolved "https://registry.yarnpkg.com/@types/better-sqlite3/-/better-sqlite3-7.6.12.tgz#e5712d46d71097dcc2775c0b068072eadc15deb7" + integrity sha512-fnQmj8lELIj7BSrZQAdBMHEHX8OZLYIHXqAKT1O7tDfLxaINzf00PMjw22r3N/xXh0w/sGHlO6SVaCQ2mj78lg== dependencies: "@types/node" "*" @@ -3892,9 +3836,9 @@ integrity sha512-hov8bUuiLiyFPGyFPE1lwWhmzYbirOXQNNo40+y3zow8aFVTeyn3VWL0VFFfdNddA8S4Vf0Tc062rzyNr7Paag== "@types/mocha@^10.0.0", "@types/mocha@^10.0.8": - version "10.0.9" - resolved "https://registry.yarnpkg.com/@types/mocha/-/mocha-10.0.9.tgz#101e9da88d2c02e5ac8952982c23b224524d662a" - integrity sha512-sicdRoWtYevwxjOHNMPTl3vSfJM6oyW8o1wXeI7uww6b6xHg8eBznQDNSGBCDJmsE8UMxP05JgZRtsKbTqt//Q== + version "10.0.10" + resolved "https://registry.yarnpkg.com/@types/mocha/-/mocha-10.0.10.tgz#91f62905e8d23cbd66225312f239454a23bebfa0" + integrity sha512-xPyYSz1cMPnJQhl0CLMH68j3gprKZaTjG3s5Vi+fDgx+uhG9NOXwbVt52eFS8ECyXhyKcjDLCBEqBExKuiZb7Q== "@types/ms@*": version "0.7.34" @@ -3907,16 +3851,16 @@ integrity sha512-QvlqvYtGBYIDeO8dFdY4djkRubcrc+yTJtBc7n8VZPlJDUS/00A+PssbvERM8f9bYRmcaSEHPZgZojeQj7kzAA== "@types/node@*", "@types/node@^22.5.4": - version "22.8.2" - resolved "https://registry.yarnpkg.com/@types/node/-/node-22.8.2.tgz#8e82bb8201c0caf751dcdc61b0a262d2002d438b" - integrity sha512-NzaRNFV+FZkvK/KLCsNdTvID0SThyrs5SHB6tsD/lajr22FGC73N2QeDPM2wHtVde8mgcXuSsHQkH5cX1pbPLw== + version "22.10.1" + resolved "https://registry.yarnpkg.com/@types/node/-/node-22.10.1.tgz#41ffeee127b8975a05f8c4f83fb89bcb2987d766" + integrity sha512-qKgsUwfHZV2WCWLAnVP1JqnpE6Im6h3Y0+fYgMTasNQ7V++CBX5OT1as0g0f+OyubbFqhf6XVNIsmN4IIhEgGQ== dependencies: - undici-types "~6.19.8" + undici-types "~6.20.0" "@types/node@^20.4.2": - version "20.17.2" - resolved "https://registry.yarnpkg.com/@types/node/-/node-20.17.2.tgz#3ca40ef7d776c85a1db3df23cbb5bfb3c384a92e" - integrity sha512-OOHK4sjXqkL7yQ7VEEHcf6+0jSvKjWqwnaCtY7AKD/VLEvRHMsxxu7eI8ErnjxHS8VwmekD4PeVCpu4qZEZSxg== + version "20.17.9" + resolved "https://registry.yarnpkg.com/@types/node/-/node-20.17.9.tgz#5f141d4b7ee125cdee5faefe28de095398865bab" + integrity sha512-0JOXkRyLanfGPE2QRCwgxhzlBAvaRdCNMcvbd7jFfpmD4eEXll7LRwy5ymJmyeZqk7Nh7eD2LeUyQ68BbndmXw== dependencies: undici-types "~6.19.2" @@ -4039,9 +3983,9 @@ integrity sha512-e9kZO9kCdLqT2h9Tw38oGv9UNzBBWaR1MzuAavxPcsV/7FJ3tWbU6RI3uB+yKIDPGLkGVbplS52ub0AcRLvrhA== "@types/ws@^8.2.2", "@types/ws@^8.5.10": - version "8.5.12" - resolved "https://registry.yarnpkg.com/@types/ws/-/ws-8.5.12.tgz#619475fe98f35ccca2a2f6c137702d85ec247b7e" - integrity sha512-3tPRkv1EtkDpzlgyKyI8pGsGZAGPEaXeu0DOj5DI25Ja91bdAYddYHbADRYVrZMRbfW+1l5YwXVDKohDJNQxkQ== + version "8.5.13" + resolved "https://registry.yarnpkg.com/@types/ws/-/ws-8.5.13.tgz#6414c280875e2691d0d1e080b05addbf5cb91e20" + integrity sha512-osM/gWBTPKgHV8XkTunnegTRIsvF6owmf5w+JtAfOw472dptdm0dlGv4xCt6GwQRcC2XVOvvRE/0bAoQcL2QkA== dependencies: "@types/node" "*" @@ -4223,62 +4167,62 @@ integrity sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ== "@vitejs/plugin-react@^4.2.1": - version "4.3.3" - resolved "https://registry.yarnpkg.com/@vitejs/plugin-react/-/plugin-react-4.3.3.tgz#28301ac6d7aaf20b73a418ee5c65b05519b4836c" - integrity sha512-NooDe9GpHGqNns1i8XDERg0Vsg5SSYRhRxxyTGogUdkdNt47jal+fbuYi+Yfq6pzRCKXyoPcWisfxE6RIM3GKA== + version "4.3.4" + resolved "https://registry.yarnpkg.com/@vitejs/plugin-react/-/plugin-react-4.3.4.tgz#c64be10b54c4640135a5b28a2432330e88ad7c20" + integrity sha512-SCCPBJtYLdE8PX/7ZQAs1QAZ8Jqwih+0VBLum1EGqmCCQal+MIUqLCzj3ZUy8ufbC0cAM4LRlSTm7IQJwWT4ug== dependencies: - "@babel/core" "^7.25.2" - "@babel/plugin-transform-react-jsx-self" "^7.24.7" - "@babel/plugin-transform-react-jsx-source" "^7.24.7" + "@babel/core" "^7.26.0" + "@babel/plugin-transform-react-jsx-self" "^7.25.9" + "@babel/plugin-transform-react-jsx-source" "^7.25.9" "@types/babel__core" "^7.20.5" react-refresh "^0.14.2" -"@vue/compiler-core@3.5.12": - version "3.5.12" - resolved "https://registry.yarnpkg.com/@vue/compiler-core/-/compiler-core-3.5.12.tgz#bd70b7dabd12b0b6f31bc53418ba3da77994c437" - integrity sha512-ISyBTRMmMYagUxhcpyEH0hpXRd/KqDU4ymofPgl2XAkY9ZhQ+h0ovEZJIiPop13UmR/54oA2cgMDjgroRelaEw== +"@vue/compiler-core@3.5.13": + version "3.5.13" + resolved "https://registry.yarnpkg.com/@vue/compiler-core/-/compiler-core-3.5.13.tgz#b0ae6c4347f60c03e849a05d34e5bf747c9bda05" + integrity sha512-oOdAkwqUfW1WqpwSYJce06wvt6HljgY3fGeM9NcVA1HaYOij3mZG9Rkysn0OHuyUAGMbEbARIpsG+LPVlBJ5/Q== dependencies: "@babel/parser" "^7.25.3" - "@vue/shared" "3.5.12" + "@vue/shared" "3.5.13" entities "^4.5.0" estree-walker "^2.0.2" source-map-js "^1.2.0" -"@vue/compiler-dom@3.5.12": - version "3.5.12" - resolved "https://registry.yarnpkg.com/@vue/compiler-dom/-/compiler-dom-3.5.12.tgz#456d631d11102535b7ee6fd954cf2c93158d0354" - integrity sha512-9G6PbJ03uwxLHKQ3P42cMTi85lDRvGLB2rSGOiQqtXELat6uI4n8cNz9yjfVHRPIu+MsK6TE418Giruvgptckg== +"@vue/compiler-dom@3.5.13": + version "3.5.13" + resolved "https://registry.yarnpkg.com/@vue/compiler-dom/-/compiler-dom-3.5.13.tgz#bb1b8758dbc542b3658dda973b98a1c9311a8a58" + integrity sha512-ZOJ46sMOKUjO3e94wPdCzQ6P1Lx/vhp2RSvfaab88Ajexs0AHeV0uasYhi99WPaogmBlRHNRuly8xV75cNTMDA== dependencies: - "@vue/compiler-core" "3.5.12" - "@vue/shared" "3.5.12" + "@vue/compiler-core" "3.5.13" + "@vue/shared" "3.5.13" "@vue/compiler-sfc@^3.3.4": - version "3.5.12" - resolved "https://registry.yarnpkg.com/@vue/compiler-sfc/-/compiler-sfc-3.5.12.tgz#6688120d905fcf22f7e44d3cb90f8dabc4dd3cc8" - integrity sha512-2k973OGo2JuAa5+ZlekuQJtitI5CgLMOwgl94BzMCsKZCX/xiqzJYzapl4opFogKHqwJk34vfsaKpfEhd1k5nw== + version "3.5.13" + resolved "https://registry.yarnpkg.com/@vue/compiler-sfc/-/compiler-sfc-3.5.13.tgz#461f8bd343b5c06fac4189c4fef8af32dea82b46" + integrity sha512-6VdaljMpD82w6c2749Zhf5T9u5uLBWKnVue6XWxprDobftnletJ8+oel7sexFfM3qIxNmVE7LSFGTpv6obNyaQ== dependencies: "@babel/parser" "^7.25.3" - "@vue/compiler-core" "3.5.12" - "@vue/compiler-dom" "3.5.12" - "@vue/compiler-ssr" "3.5.12" - "@vue/shared" "3.5.12" + "@vue/compiler-core" "3.5.13" + "@vue/compiler-dom" "3.5.13" + "@vue/compiler-ssr" "3.5.13" + "@vue/shared" "3.5.13" estree-walker "^2.0.2" magic-string "^0.30.11" - postcss "^8.4.47" + postcss "^8.4.48" source-map-js "^1.2.0" -"@vue/compiler-ssr@3.5.12": - version "3.5.12" - resolved "https://registry.yarnpkg.com/@vue/compiler-ssr/-/compiler-ssr-3.5.12.tgz#5f1a3fbd5c44b79a6dbe88729f7801d9c9218bde" - integrity sha512-eLwc7v6bfGBSM7wZOGPmRavSWzNFF6+PdRhE+VFJhNCgHiF8AM7ccoqcv5kBXA2eWUfigD7byekvf/JsOfKvPA== +"@vue/compiler-ssr@3.5.13": + version "3.5.13" + resolved "https://registry.yarnpkg.com/@vue/compiler-ssr/-/compiler-ssr-3.5.13.tgz#e771adcca6d3d000f91a4277c972a996d07f43ba" + integrity sha512-wMH6vrYHxQl/IybKJagqbquvxpWCuVYpoUJfCqFZwa/JY1GdATAQ+TgVtgrwwMZ0D07QhA99rs/EAAWfvG6KpA== dependencies: - "@vue/compiler-dom" "3.5.12" - "@vue/shared" "3.5.12" + "@vue/compiler-dom" "3.5.13" + "@vue/shared" "3.5.13" -"@vue/shared@3.5.12": - version "3.5.12" - resolved "https://registry.yarnpkg.com/@vue/shared/-/shared-3.5.12.tgz#f9e45b7f63f2c3f40d84237b1194b7f67de192e3" - integrity sha512-L2RPSAwUFbgZH20etwrXyVyCBu9OxRSi8T/38QsvnkJyvq2LufW2lDCOzm7t/U9C1mkhJGWYfCuFBCmIuNivrg== +"@vue/shared@3.5.13": + version "3.5.13" + resolved "https://registry.yarnpkg.com/@vue/shared/-/shared-3.5.13.tgz#87b309a6379c22b926e696893237826f64339b6f" + integrity sha512-/hnE/qP5ZoGpol0a5mDi45bOd7t3tjYJBjsgCsivow7D48cJeV5l05RD82lPqi7gRiphZM37rnhW1l6ZoCNNnQ== "@yarnpkg/lockfile@^1.1.0": version "1.1.0" @@ -4342,7 +4286,7 @@ acorn-walk@^8.1.1: dependencies: acorn "^8.11.0" -acorn@^8.11.0, acorn@^8.12.0, acorn@^8.4.1, acorn@^8.9.0: +acorn@^8.11.0, acorn@^8.14.0, acorn@^8.4.1, acorn@^8.9.0: version "8.14.0" resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.14.0.tgz#063e2c70cac5fb4f6467f0b11152e04c682795b0" integrity sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA== @@ -4524,7 +4468,7 @@ ansi-regex@^5.0.1: resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== -ansi-regex@^6.0.1: +ansi-regex@^6.0.1, ansi-regex@^6.1.0: version "6.1.0" resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.1.0.tgz#95ec409c69619d6cb1b8b34f14b660ef28ebd654" integrity sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA== @@ -4793,9 +4737,9 @@ available-typed-arrays@^1.0.7: possible-typed-array-names "^1.0.0" aws-sdk@^2.1259.0: - version "2.1691.0" - resolved "https://registry.yarnpkg.com/aws-sdk/-/aws-sdk-2.1691.0.tgz#9d6ccdcbae03c806fc62667b76eb3e33e5294dcc" - integrity sha512-/F2YC+DlsY3UBM2Bdnh5RLHOPNibS/+IcjUuhP8XuctyrN+MlL+fWDAiela32LTDk7hMy4rx8MTgvbJ+0blO5g== + version "2.1692.0" + resolved "https://registry.yarnpkg.com/aws-sdk/-/aws-sdk-2.1692.0.tgz#9dac5f7bfcc5ab45825cc8591b12753aa7d2902c" + integrity sha512-x511uiJ/57FIsbgUe5csJ13k3uzu25uWQE+XqfBis/sB0SFoiElJWXRkgEAUh0U6n40eT3ay5Ue4oPkRMu1LYw== dependencies: buffer "4.9.2" events "1.1.1" @@ -4809,9 +4753,9 @@ aws-sdk@^2.1259.0: xml2js "0.6.2" axios@^1.4.0: - version "1.7.7" - resolved "https://registry.yarnpkg.com/axios/-/axios-1.7.7.tgz#2f554296f9892a72ac8d8e4c5b79c14a91d0a47f" - integrity sha512-S4kL7XrjgBmvdGut0sN3yJxqYzrDOnivkBiN0OFs6hLiUam3UPvswUo0kqGyhqUZGEOytHyumEdXsAkgCOUf3Q== + version "1.7.8" + resolved "https://registry.yarnpkg.com/axios/-/axios-1.7.8.tgz#1997b1496b394c21953e68c14aaa51b7b5de3d6e" + integrity sha512-Uu0wb7KNqK2t5K+YQyVCLM76prD5sRFjKHbJYCP1J7JFGEQ6nN7HWn9+04LAeiJ3ji54lgS/gZCH1oxyrf1SPw== dependencies: follow-redirects "^1.15.6" form-data "^4.0.0" @@ -4832,12 +4776,12 @@ babel-plugin-macros@^3.1.0: resolve "^1.19.0" babel-plugin-polyfill-corejs2@^0.4.10: - version "0.4.11" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.11.tgz#30320dfe3ffe1a336c15afdcdafd6fd615b25e33" - integrity sha512-sMEJ27L0gRHShOh5G54uAAPaiCOygY/5ratXuiyb2G46FmlSpc9eFCzYVyDiPxfNbwzA7mYahmjQc5q+CZQ09Q== + version "0.4.12" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.12.tgz#ca55bbec8ab0edeeef3d7b8ffd75322e210879a9" + integrity sha512-CPWT6BwvhrTO2d8QVorhTCQw9Y43zOu7G9HigcfxvepOU6b8o3tcWad6oVgZIsZCTt42FFv97aA7ZJsbM4+8og== dependencies: "@babel/compat-data" "^7.22.6" - "@babel/helper-define-polyfill-provider" "^0.6.2" + "@babel/helper-define-polyfill-provider" "^0.6.3" semver "^6.3.1" babel-plugin-polyfill-corejs3@^0.10.6: @@ -4849,11 +4793,11 @@ babel-plugin-polyfill-corejs3@^0.10.6: core-js-compat "^3.38.0" babel-plugin-polyfill-regenerator@^0.6.1: - version "0.6.2" - resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.2.tgz#addc47e240edd1da1058ebda03021f382bba785e" - integrity sha512-2R25rQZWP63nGwaAswvDazbPXfrM3HwVoBXK6HcqeKrSrL/JqcC/rDcf95l4r7LXLyxDXc8uQDa064GubtCABg== + version "0.6.3" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.3.tgz#abeb1f3f1c762eace37587f42548b08b57789bc8" + integrity sha512-LiWSbl4CRSIa5x/JAU6jZiG9eit9w6mz+yVMFwDE83LAWvt0AfGBoZ7HS/mkhrKuh2ZlzfVZYKoLjXdqw6Yt7Q== dependencies: - "@babel/helper-define-polyfill-provider" "^0.6.2" + "@babel/helper-define-polyfill-provider" "^0.6.3" babel-plugin-transform-inline-environment-variables@^0.4.3: version "0.4.4" @@ -4888,10 +4832,10 @@ benchmark@^2.1.4: lodash "^4.17.4" platform "^1.3.3" -better-sqlite3@^11.5.0: - version "11.5.0" - resolved "https://registry.yarnpkg.com/better-sqlite3/-/better-sqlite3-11.5.0.tgz#58faa51e02845a578dd154f0083487132ead0695" - integrity sha512-e/6eggfOutzoK0JWiU36jsisdWoHOfN9iWiW/SieKvb7SAa6aGNmBM/UKyp+/wWSXpLlWNN8tCPwoDNPhzUvuQ== +better-sqlite3@^11.6.0: + version "11.6.0" + resolved "https://registry.yarnpkg.com/better-sqlite3/-/better-sqlite3-11.6.0.tgz#e50736956e6fe1c30dc94f1bc94a9c15d63b7b6b" + integrity sha512-2J6k/eVxcFYY2SsTxsXrj6XylzHWPxveCn4fKPKZFv/Vqn/Cd7lOuX4d7rGQXT5zL+97MkNL3nSbCrIoe3LkgA== dependencies: bindings "^1.5.0" prebuild-install "^7.1.1" @@ -4938,9 +4882,9 @@ bl@^5.0.0: readable-stream "^3.4.0" bn.js@^4.11.9: - version "4.12.0" - resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.12.0.tgz#775b3f278efbb9718eec7361f483fb36fbbfea88" - integrity sha512-c98Bf3tPniI+scsdk237ku1Dc3ujXQTSgyiPUDEOe7tRkhrqridvh8klBv0HCEso1OLOYcHuCv/cS6DNxKH+ZA== + version "4.12.1" + resolved "https://registry.yarnpkg.com/bn.js/-/bn.js-4.12.1.tgz#215741fe3c9dba2d7e12c001d0cfdbae43975ba7" + integrity sha512-k8TVBiPkPJT9uHLdOKfFpqcfprwBFOAAXXozRubr7R7PfIuKvQlzcI4M0pALeqXN09vdaMbUdUj+pass+uULAg== bn.js@^5.2.1: version "5.2.1" @@ -5011,7 +4955,7 @@ browser-stdout@1.3.1, browser-stdout@^1.3.1: resolved "https://registry.yarnpkg.com/browser-stdout/-/browser-stdout-1.3.1.tgz#baa559ee14ced73452229bad7326467c61fabd60" integrity sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw== -browserslist@^4.23.3, browserslist@^4.24.0: +browserslist@^4.24.0, browserslist@^4.24.2: version "4.24.2" resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.24.2.tgz#f5845bc91069dbd55ee89faf9822e1d885d16580" integrity sha512-ZIc+Q62revdMcqC6aChtW4jz3My3klmCO1fEmINZY/8J3EpBg5/A/D0AKmBveUh6pgoeycoMkVMko84tuYS+Gg== @@ -5206,9 +5150,9 @@ camelcase@^8.0.0: integrity sha512-8WB3Jcas3swSvjIeA2yvCJ+Miyz5l1ZmB6HFb9R1317dt9LCQoswg/BGrmAmkWVEszSrrg4RwmO46qIm2OEnSA== caniuse-lite@^1.0.30001669: - version "1.0.30001673" - resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001673.tgz#5aa291557af1c71340e809987367410aab7a5a9e" - integrity sha512-WTrjUCSMp3LYX0nE12ECkV0a+e6LC85E0Auz75555/qr78Oc8YWhEPNfDd6SHdtlCMSzqtuXY0uyEMNRcsKpKw== + version "1.0.30001685" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001685.tgz#2d10d36c540a9a5d47ad6ab9e1ed5f61fdeadd8c" + integrity sha512-e/kJN1EMyHQzgcMEEgoo+YTCO1NGCmIYHk5Qk8jT6AazWemS5QFKJ5ShCJlH3GZrNIdZofcNCEwZqbMjjKzmnA== catering@^2.1.0, catering@^2.1.1: version "2.1.1" @@ -5370,10 +5314,10 @@ ci-info@^3.7.0: resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-3.9.0.tgz#4279a62028a7b1f262f3473fc9605f5e218c59b4" integrity sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ== -ci-info@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-4.0.0.tgz#65466f8b280fc019b9f50a5388115d17a63a44f2" - integrity sha512-TdHqgGf9odd8SXNuxtUBVx8Nv+qZOejE6qyqiy5NtbYYQOeFa6zmHkxlPzmaLxWWHsU6nJmB7AETdVPi+2NBUg== +ci-info@^4.0.0, ci-info@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-4.1.0.tgz#92319d2fa29d2620180ea5afed31f589bc98cf83" + integrity sha512-HutrvTNsF48wnxkzERIXOe5/mlcfFcbfCmwcg6CJnizbSue78AbDt+1cgl26zwn61WFxhcPykPfZrbqjGmBb4A== cidr-regex@^4.1.1: version "4.1.1" @@ -5665,11 +5609,11 @@ copy-file@^11.0.0: p-event "^6.0.0" core-js-compat@^3.38.0: - version "3.38.1" - resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.38.1.tgz#2bc7a298746ca5a7bcb9c164bcb120f2ebc09a09" - integrity sha512-JRH6gfXxGmrzF3tZ57lFx97YARxCXPaMzPo6jELZhv88pBH5VXpQ+y0znKGlFnzuaihqhLbefxSJxWJMPtfDzw== + version "3.39.0" + resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.39.0.tgz#b12dccb495f2601dc860bdbe7b4e3ffa8ba63f61" + integrity sha512-VgEUx3VwlExr5no0tXlBt+silBvhTryPwCXRI2Id1PN8WTKu7MreethvddqOubrYxkFdv/RnYrqlv1sFNAUelw== dependencies: - browserslist "^4.23.3" + browserslist "^4.24.2" core-util-is@~1.0.0: version "1.0.3" @@ -5733,9 +5677,9 @@ create-require@^1.1.0: integrity sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ== cross-spawn@^6.0.5: - version "6.0.5" - resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-6.0.5.tgz#4a5ec7c64dfae22c3a14124dbacdee846d80cbc4" - integrity sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ== + version "6.0.6" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-6.0.6.tgz#30d0efa0712ddb7eb5a76e1e8721bffafa6b5d57" + integrity sha512-VqCUuhcd1iB+dsv8gxPttb5iZh/D0iubSP21g36KXdEuf6I5JiioesUVjpCdHV9MZRUfVFlvwtIUyPfxo5trtw== dependencies: nice-try "^1.0.4" path-key "^2.0.1" @@ -5744,9 +5688,9 @@ cross-spawn@^6.0.5: which "^1.2.9" cross-spawn@^7.0.0, cross-spawn@^7.0.2, cross-spawn@^7.0.3: - version "7.0.3" - resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" - integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== + version "7.0.6" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f" + integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== dependencies: path-key "^3.1.0" shebang-command "^2.0.0" @@ -6165,9 +6109,9 @@ electron-mocha-main@^11.0.3: yargs "^16.2.0" electron-to-chromium@^1.5.41: - version "1.5.48" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.48.tgz#c4611d1ae36eaf943f94d384b62ec3d121167829" - integrity sha512-FXULnNK7ACNI9MTMOVAzUGiz/YrK9Kcb0s/JT4aJgsam7Eh6XYe7Y6q95lPq+VdBe1DpT2eTnfXFtnuPGCks4w== + version "1.5.67" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.67.tgz#66ebd2be4a77469ac2760ef5e9e460ba9a43a845" + integrity sha512-nz88NNBsD7kQSAGGJyp8hS6xSPtWwqNogA0mjtc2nUYeEf3nURK9qpV18TuBdDmEDgVWotS8Wkzf+V52dSQ/LQ== electron-window@^0.8.0: version "0.8.1" @@ -6233,7 +6177,7 @@ end-of-stream@^1.1.0, end-of-stream@^1.4.1: dependencies: once "^1.4.0" -enhanced-resolve@^5.17.0: +enhanced-resolve@^5.17.1: version "5.17.1" resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz#67bfbbcc2f81d511be77d686a90267ef7f898a15" integrity sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg== @@ -6281,10 +6225,10 @@ error-ex@^1.3.1: dependencies: is-arrayish "^0.2.1" -es-abstract@^1.22.1, es-abstract@^1.22.3, es-abstract@^1.23.0, es-abstract@^1.23.2: - version "1.23.3" - resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.23.3.tgz#8f0c5a35cd215312573c5a27c87dfd6c881a0aa0" - integrity sha512-e+HfNH61Bj1X9/jLc5v1owaLYuHdeHHSQlkhCBiTK8rBvKaULl/beGMxwrMXjpYrv4pz22BlY570vVePA2ho4A== +es-abstract@^1.22.1, es-abstract@^1.22.3, es-abstract@^1.23.0, es-abstract@^1.23.2, es-abstract@^1.23.5: + version "1.23.5" + resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.23.5.tgz#f4599a4946d57ed467515ed10e4f157289cd52fb" + integrity sha512-vlmniQ0WNPwXqA0BnmwV3Ng7HxiGlh6r5U6JcTMNx8OilcAGqVJBHJcPjqOMaczU9fRuRK5Px2BdVyPRnKMMVQ== dependencies: array-buffer-byte-length "^1.0.1" arraybuffer.prototype.slice "^1.0.3" @@ -6301,7 +6245,7 @@ es-abstract@^1.22.1, es-abstract@^1.22.3, es-abstract@^1.23.0, es-abstract@^1.23 function.prototype.name "^1.1.6" get-intrinsic "^1.2.4" get-symbol-description "^1.0.2" - globalthis "^1.0.3" + globalthis "^1.0.4" gopd "^1.0.1" has-property-descriptors "^1.0.2" has-proto "^1.0.3" @@ -6317,10 +6261,10 @@ es-abstract@^1.22.1, es-abstract@^1.22.3, es-abstract@^1.23.0, es-abstract@^1.23 is-string "^1.0.7" is-typed-array "^1.1.13" is-weakref "^1.0.2" - object-inspect "^1.13.1" + object-inspect "^1.13.3" object-keys "^1.1.1" object.assign "^4.1.5" - regexp.prototype.flags "^1.5.2" + regexp.prototype.flags "^1.5.3" safe-array-concat "^1.1.2" safe-regex-test "^1.0.3" string.prototype.trim "^1.2.9" @@ -6374,13 +6318,13 @@ es-shim-unscopables@^1.0.0, es-shim-unscopables@^1.0.2: hasown "^2.0.0" es-to-primitive@^1.2.1: - version "1.2.1" - resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.2.1.tgz#e55cd4c9cdc188bcefb03b366c736323fc5c898a" - integrity sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA== + version "1.3.0" + resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.3.0.tgz#96c89c82cc49fd8794a24835ba3e1ff87f214e18" + integrity sha512-w+5mJ3GuFL+NjVtJlvydShqE1eN3h3PbI7/5LAsYJP/2qtuMXjfL2LpHSRqo4b4eSF5K/DH1JXKUAHSB2UW50g== dependencies: - is-callable "^1.1.4" - is-date-object "^1.0.1" - is-symbol "^1.0.2" + is-callable "^1.2.7" + is-date-object "^1.0.5" + is-symbol "^1.0.4" es6-error@^4.0.1, es6-error@^4.1.1: version "4.1.1" @@ -6551,7 +6495,7 @@ eslint-module-utils@^2.12.0: dependencies: debug "^3.2.7" -eslint-plugin-es-x@^7.5.0: +eslint-plugin-es-x@^7.8.0: version "7.8.0" resolved "https://registry.yarnpkg.com/eslint-plugin-es-x/-/eslint-plugin-es-x-7.8.0.tgz#a207aa08da37a7923f2a9599e6d3eb73f3f92b74" integrity sha512-7Ds8+wAAoV3T+LAKeu39Y5BzXCrGKrcISfgKEqTS4BDN8SFEDQd0S43jiQ8vIa3wUKD07qitZdfzlenSi8/0qQ== @@ -6623,18 +6567,18 @@ eslint-plugin-jsdoc@^48.0.2, eslint-plugin-jsdoc@^48.2.2, eslint-plugin-jsdoc@^4 synckit "^0.9.1" eslint-plugin-n@^17.10.2: - version "17.11.1" - resolved "https://registry.yarnpkg.com/eslint-plugin-n/-/eslint-plugin-n-17.11.1.tgz#c5eeabef598e20751b4dcf31b64e69eb3ee9ae6b" - integrity sha512-93IUD82N6tIEgjztVI/l3ElHtC2wTa9boJHrD8iN+NyDxjxz/daZUZKfkedjBZNdg6EqDk4irybUsiPwDqXAEA== - dependencies: - "@eslint-community/eslint-utils" "^4.4.0" - enhanced-resolve "^5.17.0" - eslint-plugin-es-x "^7.5.0" - get-tsconfig "^4.7.0" - globals "^15.8.0" - ignore "^5.2.4" + version "17.14.0" + resolved "https://registry.yarnpkg.com/eslint-plugin-n/-/eslint-plugin-n-17.14.0.tgz#162a7c17a7ce7e3834af537bca68ab8b6aa26edc" + integrity sha512-maxPLMEA0rPmRpoOlxEclKng4UpDe+N5BJS4t24I3UKnN109Qcivnfs37KMy84G0af3bxjog5lKctP5ObsvcTA== + dependencies: + "@eslint-community/eslint-utils" "^4.4.1" + enhanced-resolve "^5.17.1" + eslint-plugin-es-x "^7.8.0" + get-tsconfig "^4.8.1" + globals "^15.11.0" + ignore "^5.3.2" minimatch "^9.0.5" - semver "^7.5.3" + semver "^7.6.3" eslint-plugin-no-only-tests@^3.1.0: version "3.3.0" @@ -6691,10 +6635,10 @@ eslint-visitor-keys@^3.3.0, eslint-visitor-keys@^3.4.1, eslint-visitor-keys@^3.4 resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz#0cd72fe8550e3c2eae156a96a4dddcd1c8ac5800" integrity sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag== -eslint-visitor-keys@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-4.1.0.tgz#1f785cc5e81eb7534523d85922248232077d2f8c" - integrity sha512-Q7lok0mqMUSf5a/AdAZkA5a/gHcO6snwQClVNNvFKCAVlxXucdU8pKydU5ZVZjBx5xr37vGbFFWtLQYreLzrZg== +eslint-visitor-keys@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-4.2.0.tgz#687bacb2af884fcdda8a6e7d65c606f46a14cd45" + integrity sha512-UyLnSehNt62FFhSwjZlHmeokpRK59rcz29j+F1/aDgbkbRTk7wIc9XzdoasMUbRNKDM0qQt/+BJ4BrpFeABemw== eslint@^8.31.0: version "8.57.1" @@ -6741,13 +6685,13 @@ eslint@^8.31.0: text-table "^0.2.0" espree@^10.1.0: - version "10.2.0" - resolved "https://registry.yarnpkg.com/espree/-/espree-10.2.0.tgz#f4bcead9e05b0615c968e85f83816bc386a45df6" - integrity sha512-upbkBJbckcCNBDBDXEbuhjbP68n+scUd3k/U2EkyM9nw+I/jPiL4cLF/Al06CF96wRltFda16sxDFrxsI1v0/g== + version "10.3.0" + resolved "https://registry.yarnpkg.com/espree/-/espree-10.3.0.tgz#29267cf5b0cb98735b65e64ba07e0ed49d1eed8a" + integrity sha512-0QYC8b24HWY8zjRnDTL6RiHfDbAWn63qb4LMj1Z4b076A4une81+z03Kg7l7mn/48PUTqoLptSXez8oknU8Clg== dependencies: - acorn "^8.12.0" + acorn "^8.14.0" acorn-jsx "^5.3.2" - eslint-visitor-keys "^4.1.0" + eslint-visitor-keys "^4.2.0" espree@^9.6.0, espree@^9.6.1: version "9.6.1" @@ -6873,9 +6817,9 @@ execa@^8.0.0, execa@^8.0.1: strip-final-newline "^3.0.0" execa@^9.0.0: - version "9.5.0" - resolved "https://registry.yarnpkg.com/execa/-/execa-9.5.0.tgz#b4437553fdd084f65184b5537a9bc38eac26c59a" - integrity sha512-t7vvYt+oKnMbF3O+S5+HkylsPrsUatwJSe4Cv+4017R0MCySjECxnVJ2eyDXVD/Xpj5H29YzyYn6eEpugG7GJA== + version "9.5.1" + resolved "https://registry.yarnpkg.com/execa/-/execa-9.5.1.tgz#ab9b68073245e1111bba359962a34fcdb28deef2" + integrity sha512-QY5PPtSonnGwhhHDNI7+3RvY285c7iuJFFB+lU+oEzMY/gEGJ808owqJsrr8Otd1E/x07po1LkUBmdAc5duPAg== dependencies: "@sindresorhus/merge-streams" "^4.0.0" cross-spawn "^7.0.3" @@ -7139,9 +7083,9 @@ flat@^5.0.2: integrity sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ== flatted@^3.2.9: - version "3.3.1" - resolved "https://registry.yarnpkg.com/flatted/-/flatted-3.3.1.tgz#21db470729a6734d4997002f439cb308987f567a" - integrity sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw== + version "3.3.2" + resolved "https://registry.yarnpkg.com/flatted/-/flatted-3.3.2.tgz#adba1448a9841bec72b42c532ea23dbbedef1a27" + integrity sha512-AiwGJM8YcNOaobumgtng+6NHuOqC3A7MixFeDafM3X9cIUM+xUXoS5Vfgf+OihAYe20fxqNM9yPBXJzRtZ/4eA== follow-redirects@^1.15.6: version "1.15.9" @@ -7309,7 +7253,7 @@ get-func-name@^2.0.1, get-func-name@^2.0.2: resolved "https://registry.yarnpkg.com/get-func-name/-/get-func-name-2.0.2.tgz#0d7cf20cd13fda808669ffa88f4ffc7a3943fc41" integrity sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ== -get-intrinsic@^1.1.3, get-intrinsic@^1.2.1, get-intrinsic@^1.2.3, get-intrinsic@^1.2.4: +get-intrinsic@^1.2.1, get-intrinsic@^1.2.3, get-intrinsic@^1.2.4: version "1.2.4" resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.4.tgz#e385f5a4b5227d449c3eabbad05494ef0abbeadd" integrity sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ== @@ -7369,7 +7313,7 @@ get-symbol-description@^1.0.2: es-errors "^1.3.0" get-intrinsic "^1.2.4" -get-tsconfig@^4.7.0: +get-tsconfig@^4.8.1: version "4.8.1" resolved "https://registry.yarnpkg.com/get-tsconfig/-/get-tsconfig-4.8.1.tgz#8995eb391ae6e1638d251118c7b56de7eb425471" integrity sha512-k9PN+cFBmaLWtVz29SkUoqU5O0slLuHJXt/2P+tMVFT+phsSGXGkp9t3rQIqdz0e+06EHNGs3oM6ZX1s2zHxRg== @@ -7524,12 +7468,12 @@ globals@^13.19.0: dependencies: type-fest "^0.20.2" -globals@^15.8.0: - version "15.11.0" - resolved "https://registry.yarnpkg.com/globals/-/globals-15.11.0.tgz#b96ed4c6998540c6fb824b24b5499216d2438d6e" - integrity sha512-yeyNSjdbyVaWurlwCpcA6XNBrHTMIeDdj0/hnvX/OLJ9ekOXYbLsLinH/MucQyGvNnXhidTdNhTtJaffL2sMfw== +globals@^15.11.0: + version "15.13.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-15.13.0.tgz#bbec719d69aafef188ecd67954aae76a696010fc" + integrity sha512-49TewVEz0UxZjr1WYYsWpPrhyC/B/pA8Bq0fUmet2n+eR7yn0IvNzNaoBwnK6mdkzcN+se7Ez9zUgULTz2QH4g== -globalthis@^1.0.1, globalthis@^1.0.3: +globalthis@^1.0.1, globalthis@^1.0.4: version "1.0.4" resolved "https://registry.yarnpkg.com/globalthis/-/globalthis-1.0.4.tgz#7430ed3a975d97bfb59bcce41f5cabbafa651236" integrity sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ== @@ -7572,12 +7516,12 @@ globby@^6.1.0: pify "^2.0.0" pinkie-promise "^2.0.0" -gopd@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.0.1.tgz#29ff76de69dac7489b7c0918a5788e56477c332c" - integrity sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA== +gopd@^1.0.1, gopd@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.1.0.tgz#df8f0839c2d48caefc32a025a49294d39606c912" + integrity sha512-FQoVQnqcdk4hVM4JN1eromaun4iuS34oStkdlLENLdpULsuQcTyXj8w7ayhuUfPwEYZ1ZOooOTT6fdA9Vmx/RA== dependencies: - get-intrinsic "^1.1.3" + get-intrinsic "^1.2.4" got@^11.8.5: version "11.8.6" @@ -7668,9 +7612,11 @@ has-property-descriptors@^1.0.0, has-property-descriptors@^1.0.2: es-define-property "^1.0.0" has-proto@^1.0.1, has-proto@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/has-proto/-/has-proto-1.0.3.tgz#b31ddfe9b0e6e9914536a6ab286426d0214f77fd" - integrity sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q== + version "1.1.0" + resolved "https://registry.yarnpkg.com/has-proto/-/has-proto-1.1.0.tgz#deb10494cbbe8809bce168a3b961f42969f5ed43" + integrity sha512-QLdzI9IIO1Jg7f9GT1gXpPpXArAn6cS31R1eEZqz08Gc+uQ8/XiqHWt17Fiw+2p6oTTIq5GXEpQkAlA88YRl/Q== + dependencies: + call-bind "^1.0.7" has-symbols@^1.0.2, has-symbols@^1.0.3: version "1.0.3" @@ -7769,10 +7715,10 @@ hosted-git-info@^7.0.0: dependencies: lru-cache "^10.0.1" -hosted-git-info@^8.0.0: - version "8.0.0" - resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-8.0.0.tgz#b20f1d55d492eb18d70a252d456a2158aab9f244" - integrity sha512-4nw3vOVR+vHUOT8+U4giwe2tcGv+R3pwwRidUe67DoMBTjhrfr6rZYJVVwdkBE+Um050SG+X9tf0Jo4fOpn01w== +hosted-git-info@^8.0.0, hosted-git-info@^8.0.2: + version "8.0.2" + resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-8.0.2.tgz#5bd7d8b5395616e41cc0d6578381a32f669b14b2" + integrity sha512-sYKnA7eGln5ov8T8gnYlkSOxFJvywzEx9BueN6xo/GKO8PGiI6uK6xx+DIGe45T3bdVjLAQDQW1aicT8z8JwQg== dependencies: lru-cache "^10.0.1" @@ -7854,7 +7800,7 @@ ignore-walk@^7.0.0: dependencies: minimatch "^9.0.0" -ignore@^5.1.1, ignore@^5.2.0, ignore@^5.2.4, ignore@^5.3.1: +ignore@^5.1.1, ignore@^5.2.0, ignore@^5.2.4, ignore@^5.3.1, ignore@^5.3.2: version "5.3.2" resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.3.2.tgz#3cd40e729f3643fd87cb04e50bf0eb722bc596f5" integrity sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g== @@ -8020,6 +7966,13 @@ is-arrayish@^0.2.1: resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" integrity sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg== +is-async-function@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-async-function/-/is-async-function-2.0.0.tgz#8e4418efd3e5d3a6ebb0164c05ef5afb69aa9646" + integrity sha512-Y1JXKrfykRJGdlDwdKlLpLyMIiWqWvuSd17TvZk68PLAOGOoF4Xyav1z0Xhoi+gCYjZVeC5SI+hYFOfvXmGRCA== + dependencies: + has-tostringtag "^1.0.0" + is-bigint@^1.0.1: version "1.0.4" resolved "https://registry.yarnpkg.com/is-bigint/-/is-bigint-1.0.4.tgz#08147a1875bc2b32005d41ccd8291dffc6691df3" @@ -8035,19 +7988,19 @@ is-binary-path@~2.1.0: binary-extensions "^2.0.0" is-boolean-object@^1.1.0: - version "1.1.2" - resolved "https://registry.yarnpkg.com/is-boolean-object/-/is-boolean-object-1.1.2.tgz#5c6dc200246dd9321ae4b885a114bb1f75f63719" - integrity sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA== + version "1.2.0" + resolved "https://registry.yarnpkg.com/is-boolean-object/-/is-boolean-object-1.2.0.tgz#9743641e80a62c094b5941c5bb791d66a88e497a" + integrity sha512-kR5g0+dXf/+kXnqI+lu0URKYPKgICtHGGNCDSB10AaUFj3o/HkB3u7WfpRBJGFopxxY0oH3ux7ZsDjLtK7xqvw== dependencies: - call-bind "^1.0.2" - has-tostringtag "^1.0.0" + call-bind "^1.0.7" + has-tostringtag "^1.0.2" is-buffer@^2.0.5: version "2.0.5" resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-2.0.5.tgz#ebc252e400d22ff8d77fa09888821a24a658c191" integrity sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ== -is-callable@^1.1.3, is-callable@^1.1.4, is-callable@^1.2.7: +is-callable@^1.1.3, is-callable@^1.2.7: version "1.2.7" resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.2.7.tgz#3bc2a85ea742d9e36205dcacdd72ca1fdc51b055" integrity sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA== @@ -8080,7 +8033,7 @@ is-data-view@^1.0.1: dependencies: is-typed-array "^1.1.13" -is-date-object@^1.0.1: +is-date-object@^1.0.5: version "1.0.5" resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.5.tgz#0841d5536e724c25597bf6ea62e1bd38298df31f" integrity sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ== @@ -8107,6 +8060,13 @@ is-extglob@^2.1.1: resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" integrity sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ== +is-finalizationregistry@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-finalizationregistry/-/is-finalizationregistry-1.1.0.tgz#d74a7d0c5f3578e34a20729e69202e578d495dc2" + integrity sha512-qfMdqbAQEwBw78ZyReKnlA8ezmPdb9BemzIIip/JkjaZUhitfXDkkr+3QTboW0JrSXT1QWyYShpvnNHGZ4c4yA== + dependencies: + call-bind "^1.0.7" + is-fullwidth-code-point@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb" @@ -8124,7 +8084,7 @@ is-fullwidth-code-point@^3.0.0: resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d" integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg== -is-generator-function@^1.0.7: +is-generator-function@^1.0.10, is-generator-function@^1.0.7: version "1.0.10" resolved "https://registry.yarnpkg.com/is-generator-function/-/is-generator-function-1.0.10.tgz#f1558baf1ac17e0deea7c0415c438351ff2b3c72" integrity sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A== @@ -8158,6 +8118,11 @@ is-loopback-addr@^2.0.2: resolved "https://registry.yarnpkg.com/is-loopback-addr/-/is-loopback-addr-2.0.2.tgz#70a6668fa3555d47caebdcee045745ab80adf5e4" integrity sha512-26POf2KRCno/KTNL5Q0b/9TYnL00xEsSaLfiFRmjM7m7Lw7ZMmFybzzuX4CcsLAluZGd+niLUiMRxEooVE3aqg== +is-map@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/is-map/-/is-map-2.0.3.tgz#ede96b7fe1e270b3c4465e3a465658764926d62e" + integrity sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw== + is-nan@^1.3.2: version "1.3.2" resolved "https://registry.yarnpkg.com/is-nan/-/is-nan-1.3.2.tgz#043a54adea31748b55b6cd4e09aadafa69bd9e1d" @@ -8177,11 +8142,12 @@ is-network-error@^1.0.0: integrity sha512-tUdRRAnhT+OtCZR/LxZelH/C7QtjtFrTu5tXCA8pl55eTUElUHT+GPYV8MBMBvea/j+NxQqVt3LbWMRir7Gx9g== is-number-object@^1.0.4: - version "1.0.7" - resolved "https://registry.yarnpkg.com/is-number-object/-/is-number-object-1.0.7.tgz#59d50ada4c45251784e9904f5246c742f07a42fc" - integrity sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ== + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-number-object/-/is-number-object-1.1.0.tgz#5a867e9ecc3d294dda740d9f127835857af7eb05" + integrity sha512-KVSZV0Dunv9DTPkhXwcZ3Q+tUc9TsaE1ZwX5J2WMvsSGS6Md8TFPun5uwh0yRdrNerI6vf/tbJxqSx4c1ZI1Lw== dependencies: - has-tostringtag "^1.0.0" + call-bind "^1.0.7" + has-tostringtag "^1.0.2" is-number@^7.0.0: version "7.0.0" @@ -8236,12 +8202,19 @@ is-promise@^2.1.0: integrity sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ== is-regex@^1.1.4: - version "1.1.4" - resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.1.4.tgz#eef5663cd59fa4c0ae339505323df6854bb15958" - integrity sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg== + version "1.2.0" + resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.2.0.tgz#41b9d266e7eb7451312c64efc37e8a7d453077cf" + integrity sha512-B6ohK4ZmoftlUe+uvenXSbPJFo6U37BH7oO1B3nQH8f/7h27N56s85MhUtbFJAziz5dcmuR3i8ovUl35zp8pFA== dependencies: - call-bind "^1.0.2" - has-tostringtag "^1.0.0" + call-bind "^1.0.7" + gopd "^1.1.0" + has-tostringtag "^1.0.2" + hasown "^2.0.2" + +is-set@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/is-set/-/is-set-2.0.3.tgz#8ab209ea424608141372ded6e0cb200ef1d9d01d" + integrity sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg== is-shared-array-buffer@^1.0.2, is-shared-array-buffer@^1.0.3: version "1.0.3" @@ -8271,13 +8244,14 @@ is-stream@^4.0.1: integrity sha512-Dnz92NInDqYckGEUJv689RbRiTSEHCQ7wOVeALbkOz999YpqT46yMRIGtSNl2iCL1waAZSx40+h59NV/EwzV/A== is-string@^1.0.5, is-string@^1.0.7: - version "1.0.7" - resolved "https://registry.yarnpkg.com/is-string/-/is-string-1.0.7.tgz#0dd12bf2006f255bb58f695110eff7491eebc0fd" - integrity sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg== + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-string/-/is-string-1.1.0.tgz#8cb83c5d57311bf8058bc6c8db294711641da45d" + integrity sha512-PlfzajuF9vSo5wErv3MJAKD/nqf9ngAs1NFQYm16nUYFO2IzxJ2hcm+IOCg+EEopdykNNUhVq5cz35cAUxU8+g== dependencies: - has-tostringtag "^1.0.0" + call-bind "^1.0.7" + has-tostringtag "^1.0.2" -is-symbol@^1.0.2, is-symbol@^1.0.3: +is-symbol@^1.0.3, is-symbol@^1.0.4: version "1.0.4" resolved "https://registry.yarnpkg.com/is-symbol/-/is-symbol-1.0.4.tgz#a6dac93b635b063ca6872236de88910a57af139c" integrity sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg== @@ -8316,6 +8290,11 @@ is-uuid@^1.0.2: resolved "https://registry.yarnpkg.com/is-uuid/-/is-uuid-1.0.2.tgz#ad1898ddf154947c25c8e54966f48604e9caecc4" integrity sha512-tCByphFcJgf2qmiMo5hMCgNAquNSagOetVetDvBXswGkNfoyEMvGH1yDlF8cbZbKnbVBr4Y5/rlpMz9umxyBkQ== +is-weakmap@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/is-weakmap/-/is-weakmap-2.0.2.tgz#bf72615d649dfe5f699079c54b83e47d1ae19cfd" + integrity sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w== + is-weakref@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/is-weakref/-/is-weakref-1.0.2.tgz#9529f383a9338205e89765e0392efc2f100f06f2" @@ -8323,6 +8302,14 @@ is-weakref@^1.0.2: dependencies: call-bind "^1.0.2" +is-weakset@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/is-weakset/-/is-weakset-2.0.3.tgz#e801519df8c0c43e12ff2834eead84ec9e624007" + integrity sha512-LvIm3/KWzS9oRFHugab7d+M/GcBXuXX5xZkzPmN+NxihdQlZUQ4dWuSV1xR/sq6upL1TJEDrfBgRepHFdBtSNQ== + dependencies: + call-bind "^1.0.7" + get-intrinsic "^1.2.4" + is-windows@^1.0.1, is-windows@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/is-windows/-/is-windows-1.0.2.tgz#d1850eb9791ecd18e6182ce12a30f396634bb19d" @@ -8913,10 +8900,10 @@ libnpmpack@^8.0.0: npm-package-arg "^12.0.0" pacote "^19.0.0" -libnpmpublish@^10.0.0: - version "10.0.0" - resolved "https://registry.yarnpkg.com/libnpmpublish/-/libnpmpublish-10.0.0.tgz#328fafc94fc0a735c9581207db57dbec590d594c" - integrity sha512-keixAKMGMegm9HHY0stnNtjPHI7J79sMLlRQf0V1n1iFrWspLTGmoA5VMyzX27L3gC8prl+wLizwNWOZlGpcWw== +libnpmpublish@^10.0.1: + version "10.0.1" + resolved "https://registry.yarnpkg.com/libnpmpublish/-/libnpmpublish-10.0.1.tgz#7a284565be164c2f8605225213316a0c1d0a9827" + integrity sha512-xNa1DQs9a8dZetNRV0ky686MNzv1MTqB3szgOlRR3Fr24x1gWRu7aB9OpLZsml0YekmtppgHBkyZ+8QZlzmEyw== dependencies: ci-info "^4.0.0" normalize-package-data "^7.0.0" @@ -8924,7 +8911,7 @@ libnpmpublish@^10.0.0: npm-registry-fetch "^18.0.1" proc-log "^5.0.0" semver "^7.3.7" - sigstore "^2.2.0" + sigstore "^3.0.0" ssri "^12.0.0" libnpmsearch@^8.0.0: @@ -8953,20 +8940,20 @@ libnpmversion@^7.0.0: proc-log "^5.0.0" semver "^7.3.7" -libp2p@^2.2.1: - version "2.2.1" - resolved "https://registry.yarnpkg.com/libp2p/-/libp2p-2.2.1.tgz#eef3ffc80ae6d8b7ec75fe0d6ac562e1ec5ece32" - integrity sha512-xxmaCAfpOKCgYuxLzA87RZBf2lzA2DwuLUB7kFB3MHw6FbGGeb10YEUaM4V/XCgIDDZs4DOCgXnKOMqN+BhjRw== - dependencies: - "@libp2p/crypto" "^5.0.6" - "@libp2p/interface" "^2.2.0" - "@libp2p/interface-internal" "^2.0.10" - "@libp2p/logger" "^5.1.3" - "@libp2p/multistream-select" "^6.0.8" - "@libp2p/peer-collections" "^6.0.10" - "@libp2p/peer-id" "^5.0.7" - "@libp2p/peer-store" "^11.0.10" - "@libp2p/utils" "^6.1.3" +libp2p@^2.3.1: + version "2.3.1" + resolved "https://registry.yarnpkg.com/libp2p/-/libp2p-2.3.1.tgz#eebf945919f3942deb76cf6ce8c2f367e56f813c" + integrity sha512-b8SydqWzScHXiS5A+c29w2JGbkYBajW+AGFmWmtaF5r53ZpMTetnPmlczkT7D2Zd9+k4yKC7plGBWBNku/KNXQ== + dependencies: + "@libp2p/crypto" "^5.0.7" + "@libp2p/interface" "^2.2.1" + "@libp2p/interface-internal" "^2.1.1" + "@libp2p/logger" "^5.1.4" + "@libp2p/multistream-select" "^6.0.9" + "@libp2p/peer-collections" "^6.0.12" + "@libp2p/peer-id" "^5.0.8" + "@libp2p/peer-store" "^11.0.12" + "@libp2p/utils" "^6.2.1" "@multiformats/dns" "^1.0.6" "@multiformats/multiaddr" "^12.2.3" "@multiformats/multiaddr-matcher" "^1.2.1" @@ -9232,9 +9219,9 @@ lunr@^2.3.9: integrity sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow== magic-string@^0.30.11: - version "0.30.12" - resolved "https://registry.yarnpkg.com/magic-string/-/magic-string-0.30.12.tgz#9eb11c9d072b9bcb4940a5b2c2e1a217e4ee1a60" - integrity sha512-Ea8I3sQMVXr8JhN4z+H/d8zwo+tYDgHE9+5G4Wnrwhs0gaK9fXTKx0Tw5Xwsd/bCPTTZNRAdpyzvoeORe9LYpw== + version "0.30.14" + resolved "https://registry.yarnpkg.com/magic-string/-/magic-string-0.30.14.tgz#e9bb29870b81cfc1ec3cc656552f5a7fcbf19077" + integrity sha512-5c99P1WKTed11ZC0HMJOj6CDIue6F8ySu+bJL+85q1zBEIY8IklrJ1eiKC2NDRh3Ct3FcvmJPyQHb9erXMTJNw== dependencies: "@jridgewell/sourcemap-codec" "^1.5.0" @@ -9257,7 +9244,7 @@ make-error@^1.1.1: resolved "https://registry.yarnpkg.com/make-error/-/make-error-1.3.6.tgz#2eb2e37ea9b67c4891f684a1394799af484cf7a2" integrity sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw== -make-fetch-happen@^13.0.0, make-fetch-happen@^13.0.1: +make-fetch-happen@^13.0.0: version "13.0.1" resolved "https://registry.yarnpkg.com/make-fetch-happen/-/make-fetch-happen-13.0.1.tgz#273ba2f78f45e1f3a6dca91cede87d9fa4821e36" integrity sha512-cKTUFc/rbKUd/9meOvgrpJ2WrNzymt6jfRDdwg5UCnVzv9dTpEj9JS5m3wtziXVCjluIXyL8pcaukYqezIzZQA== @@ -9275,7 +9262,7 @@ make-fetch-happen@^13.0.0, make-fetch-happen@^13.0.1: promise-retry "^2.0.1" ssri "^10.0.0" -make-fetch-happen@^14.0.0, make-fetch-happen@^14.0.1: +make-fetch-happen@^14.0.0, make-fetch-happen@^14.0.1, make-fetch-happen@^14.0.3: version "14.0.3" resolved "https://registry.yarnpkg.com/make-fetch-happen/-/make-fetch-happen-14.0.3.tgz#d74c3ecb0028f08ab604011e0bc6baed483fcdcd" integrity sha512-QMjGbFTP0blj97EeidG5hk/QhKQ3T4ICckQGLgz38QF7Vgbk6e6FTARN8KhKxyBbWn8R0HU+bnw8aSoFPD4qtQ== @@ -9308,16 +9295,17 @@ markdown-table@^3.0.0: integrity sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw== marked-terminal@^7.0.0: - version "7.1.0" - resolved "https://registry.yarnpkg.com/marked-terminal/-/marked-terminal-7.1.0.tgz#f0ed9b9231f954d9920d38eae3cf10b0f589fad0" - integrity sha512-+pvwa14KZL74MVXjYdPR3nSInhGhNvPce/3mqLVZT2oUvt654sL1XImFuLZ1pkA866IYZ3ikDTOFUIC7XzpZZg== + version "7.2.1" + resolved "https://registry.yarnpkg.com/marked-terminal/-/marked-terminal-7.2.1.tgz#9c1ae073a245a03c6a13e3eeac6f586f29856068" + integrity sha512-rQ1MoMFXZICWNsKMiiHwP/Z+92PLKskTPXj+e7uwXmuMPkNn7iTqC+IvDekVm1MPeC9wYQeLxeFaOvudRR/XbQ== dependencies: ansi-escapes "^7.0.0" + ansi-regex "^6.1.0" chalk "^5.3.0" cli-highlight "^2.1.11" cli-table3 "^0.6.5" node-emoji "^2.1.3" - supports-hyperlinks "^3.0.0" + supports-hyperlinks "^3.1.0" marked@^12.0.0: version "12.0.2" @@ -9445,15 +9433,16 @@ mdast-util-phrasing@^4.0.0: unist-util-is "^6.0.0" mdast-util-to-markdown@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.0.tgz#9813f1d6e0cdaac7c244ec8c6dabfdb2102ea2b4" - integrity sha512-SR2VnIEdVNCJbP6y7kVTJgPLifdr8WEU440fQec7qHoHOUz/oJ2jmNRqdDQ3rbiStOXb2mCDGTuwsK5OPUgYlQ== + version "2.1.2" + resolved "https://registry.yarnpkg.com/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz#f910ffe60897f04bb4b7e7ee434486f76288361b" + integrity sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA== dependencies: "@types/mdast" "^4.0.0" "@types/unist" "^3.0.0" longest-streak "^3.0.0" mdast-util-phrasing "^4.0.0" mdast-util-to-string "^4.0.0" + micromark-util-classify-character "^2.0.0" micromark-util-decode-string "^2.0.0" unist-util-visit "^5.0.0" zwitch "^2.0.0" @@ -9577,9 +9566,9 @@ metro-react-native-babel-preset@^0.64.0: react-refresh "^0.4.0" micromark-core-commonmark@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/micromark-core-commonmark/-/micromark-core-commonmark-2.0.1.tgz#9a45510557d068605c6e9a80f282b2bb8581e43d" - integrity sha512-CUQyKr1e///ZODyD1U3xit6zXwy1a8q2a1S1HKtIlmgvurrEpaw/Y9y6KSIbF8P59cn/NjzHyO+Q2fAyYLQrAA== + version "2.0.2" + resolved "https://registry.yarnpkg.com/micromark-core-commonmark/-/micromark-core-commonmark-2.0.2.tgz#6a45bbb139e126b3f8b361a10711ccc7c6e15e93" + integrity sha512-FKjQKbxd1cibWMM1P9N+H8TwlgGgSkWZMmfuVucLCHaYqeSvJ0hFeHsIa65pA2nYbes0f8LDHPMrd9X7Ujxg9w== dependencies: decode-named-character-reference "^1.0.0" devlop "^1.0.0" @@ -9678,18 +9667,18 @@ micromark-extension-gfm@^3.0.0: micromark-util-types "^2.0.0" micromark-factory-destination@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/micromark-factory-destination/-/micromark-factory-destination-2.0.0.tgz#857c94debd2c873cba34e0445ab26b74f6a6ec07" - integrity sha512-j9DGrQLm/Uhl2tCzcbLhy5kXsgkHUrjJHg4fFAeoMRwJmJerT9aw4FEhIbZStWN8A3qMwOp1uzHr4UL8AInxtA== + version "2.0.1" + resolved "https://registry.yarnpkg.com/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz#8fef8e0f7081f0474fbdd92deb50c990a0264639" + integrity sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA== dependencies: micromark-util-character "^2.0.0" micromark-util-symbol "^2.0.0" micromark-util-types "^2.0.0" micromark-factory-label@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/micromark-factory-label/-/micromark-factory-label-2.0.0.tgz#17c5c2e66ce39ad6f4fc4cbf40d972f9096f726a" - integrity sha512-RR3i96ohZGde//4WSe/dJsxOX6vxIg9TimLAS3i4EhBAFx8Sm5SmqVfR8E87DPSR31nEAjZfbt91OMZWcNgdZw== + version "2.0.1" + resolved "https://registry.yarnpkg.com/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz#5267efa97f1e5254efc7f20b459a38cb21058ba1" + integrity sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg== dependencies: devlop "^1.0.0" micromark-util-character "^2.0.0" @@ -9697,17 +9686,17 @@ micromark-factory-label@^2.0.0: micromark-util-types "^2.0.0" micromark-factory-space@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/micromark-factory-space/-/micromark-factory-space-2.0.0.tgz#5e7afd5929c23b96566d0e1ae018ae4fcf81d030" - integrity sha512-TKr+LIDX2pkBJXFLzpyPyljzYK3MtmllMUMODTQJIUfDGncESaqB90db9IAUcz4AZAJFdd8U9zOp9ty1458rxg== + version "2.0.1" + resolved "https://registry.yarnpkg.com/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz#36d0212e962b2b3121f8525fc7a3c7c029f334fc" + integrity sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg== dependencies: micromark-util-character "^2.0.0" micromark-util-types "^2.0.0" micromark-factory-title@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/micromark-factory-title/-/micromark-factory-title-2.0.0.tgz#726140fc77892af524705d689e1cf06c8a83ea95" - integrity sha512-jY8CSxmpWLOxS+t8W+FG3Xigc0RDQA9bKMY/EwILvsesiRniiVMejYTE4wumNc2f4UbAa4WsHqe3J1QS1sli+A== + version "2.0.1" + resolved "https://registry.yarnpkg.com/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz#237e4aa5d58a95863f01032d9ee9b090f1de6e94" + integrity sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw== dependencies: micromark-factory-space "^2.0.0" micromark-util-character "^2.0.0" @@ -9715,9 +9704,9 @@ micromark-factory-title@^2.0.0: micromark-util-types "^2.0.0" micromark-factory-whitespace@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.0.tgz#9e92eb0f5468083381f923d9653632b3cfb5f763" - integrity sha512-28kbwaBjc5yAI1XadbdPYHX/eDnqaUFVikLwrO7FDnKG7lpgxnvk/XGRhX/PN0mOZ+dBSZ+LgunHS+6tYQAzhA== + version "2.0.1" + resolved "https://registry.yarnpkg.com/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz#06b26b2983c4d27bfcc657b33e25134d4868b0b1" + integrity sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ== dependencies: micromark-factory-space "^2.0.0" micromark-util-character "^2.0.0" @@ -9725,48 +9714,48 @@ micromark-factory-whitespace@^2.0.0: micromark-util-types "^2.0.0" micromark-util-character@^2.0.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/micromark-util-character/-/micromark-util-character-2.1.0.tgz#31320ace16b4644316f6bf057531689c71e2aee1" - integrity sha512-KvOVV+X1yLBfs9dCBSopq/+G1PcgT3lAK07mC4BzXi5E7ahzMAF8oIupDDJ6mievI6F+lAATkbQQlQixJfT3aQ== + version "2.1.1" + resolved "https://registry.yarnpkg.com/micromark-util-character/-/micromark-util-character-2.1.1.tgz#2f987831a40d4c510ac261e89852c4e9703ccda6" + integrity sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q== dependencies: micromark-util-symbol "^2.0.0" micromark-util-types "^2.0.0" micromark-util-chunked@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/micromark-util-chunked/-/micromark-util-chunked-2.0.0.tgz#e51f4db85fb203a79dbfef23fd41b2f03dc2ef89" - integrity sha512-anK8SWmNphkXdaKgz5hJvGa7l00qmcaUQoMYsBwDlSKFKjc6gjGXPDw3FNL3Nbwq5L8gE+RCbGqTw49FK5Qyvg== + version "2.0.1" + resolved "https://registry.yarnpkg.com/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz#47fbcd93471a3fccab86cff03847fc3552db1051" + integrity sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA== dependencies: micromark-util-symbol "^2.0.0" micromark-util-classify-character@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/micromark-util-classify-character/-/micromark-util-classify-character-2.0.0.tgz#8c7537c20d0750b12df31f86e976d1d951165f34" - integrity sha512-S0ze2R9GH+fu41FA7pbSqNWObo/kzwf8rN/+IGlW/4tC6oACOs8B++bh+i9bVyNnwCcuksbFwsBme5OCKXCwIw== + version "2.0.1" + resolved "https://registry.yarnpkg.com/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz#d399faf9c45ca14c8b4be98b1ea481bced87b629" + integrity sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q== dependencies: micromark-util-character "^2.0.0" micromark-util-symbol "^2.0.0" micromark-util-types "^2.0.0" micromark-util-combine-extensions@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.0.tgz#75d6ab65c58b7403616db8d6b31315013bfb7ee5" - integrity sha512-vZZio48k7ON0fVS3CUgFatWHoKbbLTK/rT7pzpJ4Bjp5JjkZeasRfrS9wsBdDJK2cJLHMckXZdzPSSr1B8a4oQ== + version "2.0.1" + resolved "https://registry.yarnpkg.com/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz#2a0f490ab08bff5cc2fd5eec6dd0ca04f89b30a9" + integrity sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg== dependencies: micromark-util-chunked "^2.0.0" micromark-util-types "^2.0.0" micromark-util-decode-numeric-character-reference@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.1.tgz#2698bbb38f2a9ba6310e359f99fcb2b35a0d2bd5" - integrity sha512-bmkNc7z8Wn6kgjZmVHOX3SowGmVdhYS7yBpMnuMnPzDq/6xwVA604DuOXMZTO1lvq01g+Adfa0pE2UKGlxL1XQ== + version "2.0.2" + resolved "https://registry.yarnpkg.com/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz#fcf15b660979388e6f118cdb6bf7d79d73d26fe5" + integrity sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw== dependencies: micromark-util-symbol "^2.0.0" micromark-util-decode-string@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/micromark-util-decode-string/-/micromark-util-decode-string-2.0.0.tgz#7dfa3a63c45aecaa17824e656bcdb01f9737154a" - integrity sha512-r4Sc6leeUTn3P6gk20aFMj2ntPwn6qpDZqWvYmAG6NgvFTIlj4WtrAudLi65qYoaGdXYViXYw2pkmn7QnIFasA== + version "2.0.1" + resolved "https://registry.yarnpkg.com/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz#6cb99582e5d271e84efca8e61a807994d7161eb2" + integrity sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ== dependencies: decode-named-character-reference "^1.0.0" micromark-util-character "^2.0.0" @@ -9774,42 +9763,42 @@ micromark-util-decode-string@^2.0.0: micromark-util-symbol "^2.0.0" micromark-util-encode@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/micromark-util-encode/-/micromark-util-encode-2.0.0.tgz#0921ac7953dc3f1fd281e3d1932decfdb9382ab1" - integrity sha512-pS+ROfCXAGLWCOc8egcBvT0kf27GoWMqtdarNfDcjb6YLuV5cM3ioG45Ys2qOVqeqSbjaKg72vU+Wby3eddPsA== + version "2.0.1" + resolved "https://registry.yarnpkg.com/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz#0d51d1c095551cfaac368326963cf55f15f540b8" + integrity sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw== micromark-util-html-tag-name@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.0.tgz#ae34b01cbe063363847670284c6255bb12138ec4" - integrity sha512-xNn4Pqkj2puRhKdKTm8t1YHC/BAjx6CEwRFXntTaRf/x16aqka6ouVoutm+QdkISTlT7e2zU7U4ZdlDLJd2Mcw== + version "2.0.1" + resolved "https://registry.yarnpkg.com/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz#e40403096481986b41c106627f98f72d4d10b825" + integrity sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA== micromark-util-normalize-identifier@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.0.tgz#91f9a4e65fe66cc80c53b35b0254ad67aa431d8b" - integrity sha512-2xhYT0sfo85FMrUPtHcPo2rrp1lwbDEEzpx7jiH2xXJLqBuy4H0GgXk5ToU8IEwoROtXuL8ND0ttVa4rNqYK3w== + version "2.0.1" + resolved "https://registry.yarnpkg.com/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz#c30d77b2e832acf6526f8bf1aa47bc9c9438c16d" + integrity sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q== dependencies: micromark-util-symbol "^2.0.0" micromark-util-resolve-all@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.0.tgz#189656e7e1a53d0c86a38a652b284a252389f364" - integrity sha512-6KU6qO7DZ7GJkaCgwBNtplXCvGkJToU86ybBAUdavvgsCiG8lSSvYxr9MhwmQ+udpzywHsl4RpGJsYWG1pDOcA== + version "2.0.1" + resolved "https://registry.yarnpkg.com/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz#e1a2d62cdd237230a2ae11839027b19381e31e8b" + integrity sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg== dependencies: micromark-util-types "^2.0.0" micromark-util-sanitize-uri@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.0.tgz#ec8fbf0258e9e6d8f13d9e4770f9be64342673de" - integrity sha512-WhYv5UEcZrbAtlsnPuChHUAsu/iBPOVaEVsntLBIdpibO0ddy8OzavZz3iL2xVvBZOpolujSliP65Kq0/7KIYw== + version "2.0.1" + resolved "https://registry.yarnpkg.com/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz#ab89789b818a58752b73d6b55238621b7faa8fd7" + integrity sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ== dependencies: micromark-util-character "^2.0.0" micromark-util-encode "^2.0.0" micromark-util-symbol "^2.0.0" micromark-util-subtokenize@^2.0.0: - version "2.0.1" - resolved "https://registry.yarnpkg.com/micromark-util-subtokenize/-/micromark-util-subtokenize-2.0.1.tgz#76129c49ac65da6e479c09d0ec4b5f29ec6eace5" - integrity sha512-jZNtiFl/1aY73yS3UGQkutD0UbhTt68qnRpw2Pifmz5wV9h8gOVsN70v+Lq/f1rKaU/W8pxRe8y8Q9FX1AOe1Q== + version "2.0.3" + resolved "https://registry.yarnpkg.com/micromark-util-subtokenize/-/micromark-util-subtokenize-2.0.3.tgz#70ffb99a454bd8c913c8b709c3dc97baefb65f96" + integrity sha512-VXJJuNxYWSoYL6AJ6OQECCFGhIU2GGHMw8tahogePBrjkG8aCCas3ibkp7RnVOSTClg2is05/R7maAhF1XyQMg== dependencies: devlop "^1.0.0" micromark-util-chunked "^2.0.0" @@ -9817,19 +9806,19 @@ micromark-util-subtokenize@^2.0.0: micromark-util-types "^2.0.0" micromark-util-symbol@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/micromark-util-symbol/-/micromark-util-symbol-2.0.0.tgz#12225c8f95edf8b17254e47080ce0862d5db8044" - integrity sha512-8JZt9ElZ5kyTnO94muPxIGS8oyElRJaiJO8EzV6ZSyGQ1Is8xwl4Q45qU5UOg+bGH4AikWziz0iN4sFLWs8PGw== + version "2.0.1" + resolved "https://registry.yarnpkg.com/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz#e5da494e8eb2b071a0d08fb34f6cefec6c0a19b8" + integrity sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q== micromark-util-types@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/micromark-util-types/-/micromark-util-types-2.0.0.tgz#63b4b7ffeb35d3ecf50d1ca20e68fc7caa36d95e" - integrity sha512-oNh6S2WMHWRZrmutsRmDDfkzKtxF+bc2VxLC9dvtrDIRFln627VsFP6fLMgTryGDljgLPjkrzQSDcPrjPyDJ5w== + version "2.0.1" + resolved "https://registry.yarnpkg.com/micromark-util-types/-/micromark-util-types-2.0.1.tgz#a3edfda3022c6c6b55bfb049ef5b75d70af50709" + integrity sha512-534m2WhVTddrcKVepwmVEVnUAmtrx9bfIjNoQHRqfnvdaHQiFytEhJoTgpWJvDEXCO5gLTQh3wYC1PgOJA4NSQ== micromark@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/micromark/-/micromark-4.0.0.tgz#84746a249ebd904d9658cfabc1e8e5f32cbc6249" - integrity sha512-o/sd0nMof8kYff+TqcDx3VSrgBTcZpSvYcAHIfHhv5VAuNmisCxjhx6YmxS8PFEpb9z5WKWKPdzf0jM23ro3RQ== + version "4.0.1" + resolved "https://registry.yarnpkg.com/micromark/-/micromark-4.0.1.tgz#294c2f12364759e5f9e925a767ae3dfde72223ff" + integrity sha512-eBPdkcoCNvYcxQOAKAlceo5SNdzZWfF+FcSupREAzdAh9rRmE239CEQAiTwIgblwnoM8zzj35sZ5ZwvSEOF6Kw== dependencies: "@types/debug" "^4.0.0" debug "^4.0.0" @@ -10067,9 +10056,9 @@ mkdirp@^3.0.1: integrity sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg== mocha@^10.0.0, mocha@^10.7.3: - version "10.7.3" - resolved "https://registry.yarnpkg.com/mocha/-/mocha-10.7.3.tgz#ae32003cabbd52b59aece17846056a68eb4b0752" - integrity sha512-uQWxAu44wwiACGqjbPYmjo7Lg8sFrS3dQe7PP2FQI+woptP4vZXSMcfMyFL/e1yFEeEpV4RtyTpZROOKmxis+A== + version "10.8.2" + resolved "https://registry.yarnpkg.com/mocha/-/mocha-10.8.2.tgz#8d8342d016ed411b12a429eb731b825f961afb96" + integrity sha512-VZlYo/WE8t1tstuRmqgeyBgCbJc/lEdopaa+axcKzTBJ+UIdlAB9XnmvTCAH4pwR4ElNInaedhEBmZD8iCSVEg== dependencies: ansi-colors "^4.1.3" browser-stdout "^1.3.1" @@ -10162,9 +10151,9 @@ ms@^3.0.0-canary.1: integrity sha512-kh8ARjh8rMN7Du2igDRO9QJnqCb2xYTJxyQYK7vJJS4TvLLmsbyhiKpSW+t+y26gyOyMd0riphX0GeWKU3ky5g== multiformats@^13.0.0, multiformats@^13.0.1, multiformats@^13.1.0, multiformats@^13.2.2: - version "13.3.0" - resolved "https://registry.yarnpkg.com/multiformats/-/multiformats-13.3.0.tgz#1f5188bc7c4fe08ff829ae1c18dc33409042fb71" - integrity sha512-CBiqvsufgmpo01VT5ze94O+uc+Pbf6f/sThlvWss0sBZmAOu6GQn5usrYV2sf2mr17FWYc0rO8c/CNe2T90QAA== + version "13.3.1" + resolved "https://registry.yarnpkg.com/multiformats/-/multiformats-13.3.1.tgz#ea30d134b5697dcf2036ac819a17948f8a1775be" + integrity sha512-QxowxTNwJ3r5RMctoGA5p13w5RbRT2QDkoM+yFlqfLiioBp78nhDjnRLvmSBI9+KAqN4VdgOVWM9c0CHd86m3g== multimatch@^5.0.0: version "5.0.0" @@ -10207,14 +10196,14 @@ nanoid@3.3.1: integrity sha512-n6Vs/3KGyxPQd6uO0eH4Bv0ojGSUvuLlIHtC3Y0kEO23YRge8H9x1GCzLn28YX0H66pMkxuaeESFq4tKISKwdw== nanoid@^3.3.7: - version "3.3.7" - resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.7.tgz#d0c301a691bc8d54efa0a2226ccf3fe2fd656bd8" - integrity sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g== + version "3.3.8" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-3.3.8.tgz#b1be3030bee36aaff18bacb375e5cce521684baf" + integrity sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w== nanoid@^5.0.4, nanoid@^5.0.7: - version "5.0.8" - resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-5.0.8.tgz#7610003f6b3b761b5c244bb342c112c5312512bf" - integrity sha512-TcJPw+9RV9dibz1hHUzlLVy8N4X9TnwirAjrU08Juo6BNKggzVfP2ZJ/3ZUSq15Xl5i85i+Z89XBO90pB2PghQ== + version "5.0.9" + resolved "https://registry.yarnpkg.com/nanoid/-/nanoid-5.0.9.tgz#977dcbaac055430ce7b1e19cf0130cea91a20e50" + integrity sha512-Aooyr6MXU6HpvvWXKoVoXwKMs/KyVakWwg7xQfv5/S/RIgJMy0Ifa45H9qqYy7pTCszrHzP21Uk4PZq2HpEM8Q== napi-build-utils@^1.0.1: version "1.0.2" @@ -10303,9 +10292,9 @@ node-emoji@^2.1.3: skin-tone "^2.0.0" node-gyp-build@^4.3.0: - version "4.8.2" - resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-4.8.2.tgz#4f802b71c1ab2ca16af830e6c1ea7dd1ad9496fa" - integrity sha512-IRUxE4BVsHWXkV/SFOut4qTlagw2aM8T5/vnTsmrHJvVoKueJHRc/JaFND7QDDc61kLYUJ6qlZM3sqTSyx2dTw== + version "4.8.4" + resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-4.8.4.tgz#8a70ee85464ae52327772a90d66c6077a900cfc8" + integrity sha512-LA4ZjwlnUblHVgq0oBF3Jl/6h/Nvs5fzBLwdEF4nuxnFdsfajde4WfxtJr3CaiH+F6ewcIB/q4jQ4UzPyid+CQ== node-gyp@^10.0.0, node-gyp@^10.2.0: version "10.2.0" @@ -10414,10 +10403,10 @@ npm-bundled@^4.0.0: dependencies: npm-normalize-package-bin "^4.0.0" -npm-install-checks@^7.1.0: - version "7.1.0" - resolved "https://registry.yarnpkg.com/npm-install-checks/-/npm-install-checks-7.1.0.tgz#e365040c95d59571aaed3d6ebb83f7d9ddd347b8" - integrity sha512-bkTildVlofeMX7wiOaWk3PlW7YcBXAuEc7TWpOxwUgalG5ZvgT/ms+6OX9zt7iGLv4+VhKbRZhpOfgQJzk1YAw== +npm-install-checks@^7.1.0, npm-install-checks@^7.1.1: + version "7.1.1" + resolved "https://registry.yarnpkg.com/npm-install-checks/-/npm-install-checks-7.1.1.tgz#e9d679fc8a1944c75cdcc96478a22f9d0f763632" + integrity sha512-u6DCwbow5ynAX5BdiHQ9qvexme4U3qHW3MWe5NqH+NeBm0LbiH6zvGjNNew1fY+AZZUtVHbOPF3j7mJxbUzpXg== dependencies: semver "^7.1.1" @@ -10484,7 +10473,7 @@ npm-profile@^11.0.1: npm-registry-fetch "^18.0.0" proc-log "^5.0.0" -npm-registry-fetch@^18.0.0, npm-registry-fetch@^18.0.1: +npm-registry-fetch@^18.0.0, npm-registry-fetch@^18.0.1, npm-registry-fetch@^18.0.2: version "18.0.2" resolved "https://registry.yarnpkg.com/npm-registry-fetch/-/npm-registry-fetch-18.0.2.tgz#340432f56b5a8b1af068df91aae0435d2de646b5" integrity sha512-LeVMZBBVy+oQb5R6FDV9OlJCcWDU+al10oKpe+nsvcHnG24Z3uM3SvJYKfGJlfGjVU8v9liejCrUR/M5HO5NEQ== @@ -10526,9 +10515,9 @@ npm-user-validate@^3.0.0: integrity sha512-9xi0RdSmJ4mPYTC393VJPz1Sp8LyCx9cUnm/L9Qcb3cFO8gjT4mN20P9FAsea8qDHdQ7LtcN8VLh2UT47SdKCw== npm@^10.5.0: - version "10.9.0" - resolved "https://registry.yarnpkg.com/npm/-/npm-10.9.0.tgz#46903bc06cb8a5fd51372647920fe4b830efb8d8" - integrity sha512-ZanDioFylI9helNhl2LNd+ErmVD+H5I53ry41ixlLyCBgkuYb+58CvbAp99hW+zr5L9W4X7CchSoeqKdngOLSw== + version "10.9.1" + resolved "https://registry.yarnpkg.com/npm/-/npm-10.9.1.tgz#ab141c1229765c11c8c59060fc9cf450a2207bd6" + integrity sha512-yJUw03xLqjiv1D52oHeoS5qmOEC5hkJlhP1cWlSrCgshuxWVyFEEK3M3hLC0NwbTaklLTYrhoIanYsuNP5WUKg== dependencies: "@isaacs/string-locale-compare" "^1.1.0" "@npmcli/arborist" "^8.0.0" @@ -10536,21 +10525,21 @@ npm@^10.5.0: "@npmcli/fs" "^4.0.0" "@npmcli/map-workspaces" "^4.0.1" "@npmcli/package-json" "^6.0.1" - "@npmcli/promise-spawn" "^8.0.1" + "@npmcli/promise-spawn" "^8.0.2" "@npmcli/redact" "^3.0.0" "@npmcli/run-script" "^9.0.1" - "@sigstore/tuf" "^2.3.4" + "@sigstore/tuf" "^3.0.0" abbrev "^3.0.0" archy "~1.0.0" cacache "^19.0.1" chalk "^5.3.0" - ci-info "^4.0.0" + ci-info "^4.1.0" cli-columns "^4.0.0" fastest-levenshtein "^1.0.16" fs-minipass "^3.0.3" glob "^10.4.5" graceful-fs "^4.2.11" - hosted-git-info "^8.0.0" + hosted-git-info "^8.0.2" ini "^5.0.0" init-package-json "^7.0.1" is-cidr "^5.1.0" @@ -10562,11 +10551,11 @@ npm@^10.5.0: libnpmhook "^11.0.0" libnpmorg "^7.0.0" libnpmpack "^8.0.0" - libnpmpublish "^10.0.0" + libnpmpublish "^10.0.1" libnpmsearch "^8.0.0" libnpmteam "^7.0.0" libnpmversion "^7.0.0" - make-fetch-happen "^14.0.1" + make-fetch-happen "^14.0.3" minimatch "^9.0.5" minipass "^7.1.1" minipass-pipeline "^1.2.4" @@ -10575,14 +10564,14 @@ npm@^10.5.0: nopt "^8.0.0" normalize-package-data "^7.0.0" npm-audit-report "^6.0.0" - npm-install-checks "^7.1.0" + npm-install-checks "^7.1.1" npm-package-arg "^12.0.0" npm-pick-manifest "^10.0.0" npm-profile "^11.0.1" - npm-registry-fetch "^18.0.1" + npm-registry-fetch "^18.0.2" npm-user-validate "^3.0.0" p-map "^4.0.0" - pacote "^19.0.0" + pacote "^19.0.1" parse-conflict-json "^4.0.0" proc-log "^5.0.0" qrcode-terminal "^0.12.0" @@ -10642,10 +10631,10 @@ object-assign@^4, object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1 resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg== -object-inspect@^1.13.1: - version "1.13.2" - resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.2.tgz#dea0088467fb991e67af4058147a24824a3043ff" - integrity sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g== +object-inspect@^1.13.1, object-inspect@^1.13.3: + version "1.13.3" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.3.tgz#f14c183de51130243d6d18ae149375ff50ea488a" + integrity sha512-kDCGIbxkDSXE3euJZZXzc6to7fCrKHNI/hSRQnRuQ+BWjFNzZwiFF8fj/6o2t2G9/jTj8PSIYTfCLelLZEeRpA== object-is@^1.1.5: version "1.1.6" @@ -10935,9 +10924,9 @@ p-retry@^4.2.0: retry "^0.13.1" p-retry@^6.0.0, p-retry@^6.2.0: - version "6.2.0" - resolved "https://registry.yarnpkg.com/p-retry/-/p-retry-6.2.0.tgz#8d6df01af298750009691ce2f9b3ad2d5968f3bd" - integrity sha512-JA6nkq6hKyWLLasXQXUrO4z8BUZGUt/LjlJxx8Gb2+2ntodU/SS63YZ8b0LUTbQ8ZB9iwOfhEPhg4ykKnn2KsA== + version "6.2.1" + resolved "https://registry.yarnpkg.com/p-retry/-/p-retry-6.2.1.tgz#81828f8dc61c6ef5a800585491572cc9892703af" + integrity sha512-hEt02O4hUct5wtwg4H4KcWgDdm+l1bOaEy/hWzd8xtXB9BqxTWBBhb+2ImAtH4Cv4rPjV76xN3Zumqk3k3AhhQ== dependencies: "@types/retry" "0.12.2" is-network-error "^1.0.0" @@ -10988,7 +10977,7 @@ package-json@^10.0.0: registry-url "^6.0.1" semver "^7.6.0" -pacote@^19.0.0: +pacote@^19.0.0, pacote@^19.0.1: version "19.0.1" resolved "https://registry.yarnpkg.com/pacote/-/pacote-19.0.1.tgz#66d22dbd274ed8a7c30029d70eb8030f5151e6fc" integrity sha512-zIpxWAsr/BvhrkSruspG8aqCQUUrWtpwx0GjiRZQhEM/pZXrigA32ElN3vTcCPUDOFmHr6SFxwYrvVUs5NTEUg== @@ -11246,7 +11235,7 @@ pico-signals@^1.0.0: resolved "https://registry.yarnpkg.com/pico-signals/-/pico-signals-1.0.0.tgz#3df41f192c76d5baf9232bb3845fec73a7e50d9b" integrity sha512-Av5eg3cMtXbQVxVoIpP+dzHMBisRZuZy3htFWyaGGScT94AdfeT0On/QVhFNQhIMiY7aLi21W4pD+5KdWbEBUw== -picocolors@^1.0.0, picocolors@^1.1.0: +picocolors@^1.0.0, picocolors@^1.1.0, picocolors@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA== @@ -11342,10 +11331,10 @@ platform@^1.3.3: resolved "https://registry.yarnpkg.com/platform/-/platform-1.3.6.tgz#48b4ce983164b209c2d45a107adb31f473a6e7a7" integrity sha512-fnWVljUchTro6RiCFvCXBbNhJc2NijN7oIQxbwsyL0buWJPG85v81ehlHI9fXrJsMNgTofEoWIQeClKpgxFLrg== -playwright-core@1.48.2, playwright-core@^1.45.2: - version "1.48.2" - resolved "https://registry.yarnpkg.com/playwright-core/-/playwright-core-1.48.2.tgz#cd76ed8af61690edef5c05c64721c26a8db2f3d7" - integrity sha512-sjjw+qrLFlriJo64du+EK0kJgZzoQPsabGF4lBvsid+3CNIZIYLgnMj9V6JY5VhM2Peh20DJWIVpVljLLnlawA== +playwright-core@1.49.0, playwright-core@^1.45.2: + version "1.49.0" + resolved "https://registry.yarnpkg.com/playwright-core/-/playwright-core-1.49.0.tgz#8e69ffed3f41855b854982f3632f2922c890afcb" + integrity sha512-R+3KKTQF3npy5GTiKH/T+kdhoJfJojjHESR1YEWhYuEKRVfVaxH3+4+GvXE5xyCngCxhxnykk0Vlah9v8fs3jA== "playwright-test@github:marcus-pousette/playwright-test#master": version "14.1.3" @@ -11386,12 +11375,12 @@ playwright-core@1.48.2, playwright-core@^1.45.2: util "^0.12.5" v8-to-istanbul "^9.2.0" -playwright@1.48.2: - version "1.48.2" - resolved "https://registry.yarnpkg.com/playwright/-/playwright-1.48.2.tgz#fca45ae8abdc34835c715718072aaff7e305167e" - integrity sha512-NjYvYgp4BPmiwfe31j4gHLa3J7bD2WiBz8Lk2RoSsmX38SVIARZ18VYjxLjAcDsAhA+F4iSEXTSGgjua0rrlgQ== +playwright@1.49.0: + version "1.49.0" + resolved "https://registry.yarnpkg.com/playwright/-/playwright-1.49.0.tgz#df6b9e05423377a99658202844a294a8afb95d0a" + integrity sha512-eKpmys0UFDnfNb3vfsf8Vx2LEOtflgRebl0Im2eQQnYMA4Aqd+Zw8bEOB+7ZKvN76901mRnqdsiOGKxzVTbi7A== dependencies: - playwright-core "1.48.2" + playwright-core "1.49.0" optionalDependencies: fsevents "2.3.2" @@ -11430,13 +11419,13 @@ postcss-selector-parser@^6.1.2: cssesc "^3.0.0" util-deprecate "^1.0.2" -postcss@^8.4.43, postcss@^8.4.47: - version "8.4.47" - resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.47.tgz#5bf6c9a010f3e724c503bf03ef7947dcb0fea365" - integrity sha512-56rxCq7G/XfB4EkXq9Egn5GCqugWvDFjafDOThIdMBsI15iqPqR5r15TfSr1YPYeEI19YeaXMCbY6u88Y76GLQ== +postcss@^8.4.43, postcss@^8.4.48: + version "8.4.49" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.49.tgz#4ea479048ab059ab3ae61d082190fabfd994fe19" + integrity sha512-OCVPnIObs4N29kxTjzLfUryOkvZEq+pf8jTF0lg8E7uETuWHA+v7j3c/xJmiqpX450191LlmZfUKkXxkTry7nA== dependencies: nanoid "^3.3.7" - picocolors "^1.1.0" + picocolors "^1.1.1" source-map-js "^1.2.1" prebuild-install@^7.0.1, prebuild-install@^7.1.1: @@ -11468,14 +11457,14 @@ premove@^4.0.0: integrity sha512-zim/Hr4+FVdCIM7zL9b9Z0Wfd5Ya3mnKtiuDv7L5lzYzanSq6cOcVJ7EFcgK4I0pt28l8H0jX/x3nyog380XgQ== prettier@^3.3.3: - version "3.3.3" - resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.3.3.tgz#30c54fe0be0d8d12e6ae61dbb10109ea00d53105" - integrity sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew== + version "3.4.1" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.4.1.tgz#e211d451d6452db0a291672ca9154bc8c2579f7b" + integrity sha512-G+YdqtITVZmOJje6QkXQWzl3fSfMxFwm1tjTyo9exhkmWSqC4Yhd1+lug++IlR2mvRVAxEDDWYkQdeSztajqgg== pretty-ms@^9.0.0: - version "9.1.0" - resolved "https://registry.yarnpkg.com/pretty-ms/-/pretty-ms-9.1.0.tgz#0ad44de6086454f48a168e5abb3c26f8db1b3253" - integrity sha512-o1piW0n3tgKIKCwk2vpM/vOV13zjJzvP37Ioze54YlTHE06m4tjEbzg9WsKkvTuyYln2DHjo5pY4qrZGI0otpw== + version "9.2.0" + resolved "https://registry.yarnpkg.com/pretty-ms/-/pretty-ms-9.2.0.tgz#e14c0aad6493b69ed63114442a84133d7e560ef0" + integrity sha512-4yf0QO/sllf/1zbZWYnvWw3NxCQwLXKzIj0G849LSufP15BXKM0rbD2Z3wVnkMfjdn/CB0Dpp444gYAACdsplg== dependencies: parse-ms "^4.0.0" @@ -11495,9 +11484,9 @@ process-nextick-args@~2.0.0: integrity sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag== process-on-spawn@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/process-on-spawn/-/process-on-spawn-1.0.0.tgz#95b05a23073d30a17acfdc92a440efd2baefdc93" - integrity sha512-1WsPDsUSMmZH5LeMLegqkPDrsGgsWwk1Exipy2hvB0o/F0ASzbpIctSCcZIK1ykJvtTJULEH+20WOFjMvGnCTg== + version "1.1.0" + resolved "https://registry.yarnpkg.com/process-on-spawn/-/process-on-spawn-1.1.0.tgz#9d5999ba87b3bf0a8acb05322d69f2f5aa4fb763" + integrity sha512-JOnOPQ/8TZgjs1JIH/m9ni7FfimjNa/PRx7y/Wb5qdItsnhO0jE4AT7fC0HjC28DUQWDr50dwSYZLdRMlqDq3Q== dependencies: fromentries "^1.2.0" @@ -11623,11 +11612,11 @@ punycode@^2.1.0: integrity sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg== pvtsutils@^1.3.2: - version "1.3.5" - resolved "https://registry.yarnpkg.com/pvtsutils/-/pvtsutils-1.3.5.tgz#b8705b437b7b134cd7fd858f025a23456f1ce910" - integrity sha512-ARvb14YB9Nm2Xi6nBq1ZX6dAM0FsJnuk+31aUp4TrcZEdKUlSqOqsxJHUPJDNE3qiIp+iUPEIeR6Je/tgV7zsA== + version "1.3.6" + resolved "https://registry.yarnpkg.com/pvtsutils/-/pvtsutils-1.3.6.tgz#ec46e34db7422b9e4fdc5490578c1883657d6001" + integrity sha512-PLgQXQ6H2FWCaeRak8vvk1GW462lMxB5s3Jm673N82zI4vqtVUPuZdffdZbPDFRoU8kAhItWFtPCWiPpp4/EDg== dependencies: - tslib "^2.6.1" + tslib "^2.8.1" pvutils@^1.1.3: version "1.1.3" @@ -11918,6 +11907,19 @@ redent@^3.0.0: indent-string "^4.0.0" strip-indent "^3.0.0" +reflect.getprototypeof@^1.0.6: + version "1.0.7" + resolved "https://registry.yarnpkg.com/reflect.getprototypeof/-/reflect.getprototypeof-1.0.7.tgz#04311b33a1b713ca5eb7b5aed9950a86481858e5" + integrity sha512-bMvFGIUKlc/eSfXNX+aZ+EL95/EgZzuwA0OBPTbZZDEJw/0AkentjMuM1oiRfwHrshqk4RzdgiTg5CcDalXN5g== + dependencies: + call-bind "^1.0.7" + define-properties "^1.2.1" + es-abstract "^1.23.5" + es-errors "^1.3.0" + get-intrinsic "^1.2.4" + gopd "^1.0.1" + which-builtin-type "^1.1.4" + regenerate-unicode-properties@^10.2.0: version "10.2.0" resolved "https://registry.yarnpkg.com/regenerate-unicode-properties/-/regenerate-unicode-properties-10.2.0.tgz#626e39df8c372338ea9b8028d1f99dc3fd9c3db0" @@ -11942,7 +11944,7 @@ regenerator-transform@^0.15.2: dependencies: "@babel/runtime" "^7.8.4" -regexp.prototype.flags@^1.5.2: +regexp.prototype.flags@^1.5.3: version "1.5.3" resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.5.3.tgz#b3ae40b1d2499b8350ab2c3fe6ef3845d3a96f42" integrity sha512-vqlC04+RQoFalODCbCumG2xIOvapzVMHwsyIGM/SIE8fRhFFsXeH8/QQ+s0T0kDAhKc4k30s73/0ydkHQz6HlQ== @@ -11958,21 +11960,21 @@ regexpp@^3.0.0: integrity sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg== regexpu-core@^6.1.1: - version "6.1.1" - resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-6.1.1.tgz#b469b245594cb2d088ceebc6369dceb8c00becac" - integrity sha512-k67Nb9jvwJcJmVpw0jPttR1/zVfnKf8Km0IPatrU/zJ5XeG3+Slx0xLXs9HByJSzXzrlz5EDvN6yLNMDc2qdnw== + version "6.2.0" + resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-6.2.0.tgz#0e5190d79e542bf294955dccabae04d3c7d53826" + integrity sha512-H66BPQMrv+V16t8xtmq+UC0CBpiTBA60V8ibS1QVReIp8T1z8hwFxqcGzm9K6lgsN7sB5edVH8a+ze6Fqm4weA== dependencies: regenerate "^1.4.2" regenerate-unicode-properties "^10.2.0" regjsgen "^0.8.0" - regjsparser "^0.11.0" + regjsparser "^0.12.0" unicode-match-property-ecmascript "^2.0.0" unicode-match-property-value-ecmascript "^2.1.0" registry-auth-token@^5.0.0, registry-auth-token@^5.0.2: - version "5.0.2" - resolved "https://registry.yarnpkg.com/registry-auth-token/-/registry-auth-token-5.0.2.tgz#8b026cc507c8552ebbe06724136267e63302f756" - integrity sha512-o/3ikDxtXaA59BmZuZrJZDJv8NMDGSj+6j6XaeBmHw8eY1i1qd9+6H+LjVvQXx3HN6aRCGa1cUdJ9RaJZUugnQ== + version "5.0.3" + resolved "https://registry.yarnpkg.com/registry-auth-token/-/registry-auth-token-5.0.3.tgz#417d758c8164569de8cf5cabff16cc937902dcc6" + integrity sha512-1bpc9IyC+e+CNFRaWyn77tk4xGG4PPUyfakSmA6F6cvUDjrm58dfyJ3II+9yb10EDkHoy1LaPSmHaWLOH3m6HA== dependencies: "@pnpm/npm-conf" "^2.1.0" @@ -11988,10 +11990,10 @@ regjsgen@^0.8.0: resolved "https://registry.yarnpkg.com/regjsgen/-/regjsgen-0.8.0.tgz#df23ff26e0c5b300a6470cad160a9d090c3a37ab" integrity sha512-RvwtGe3d7LvWiDQXeQw8p5asZUmfU1G/l6WbUXeHta7Y2PEIvBTwH6E2EfmYUK8pxcxEdEmaomqyp0vZZ7C+3Q== -regjsparser@^0.11.0: - version "0.11.2" - resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.11.2.tgz#7404ad42be00226d72bcf1f003f1f441861913d8" - integrity sha512-3OGZZ4HoLJkkAZx/48mTXJNlmqTGOzc0o9OWQPuWpkOlXXPbyN6OafCcoXUnBqE2D3f/T5L+pWc1kdEmnfnRsA== +regjsparser@^0.12.0: + version "0.12.0" + resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.12.0.tgz#0e846df6c6530586429377de56e0475583b088dc" + integrity sha512-cnE+y8bz4NhMjISKbgeVJtqNbtf5QpjZP+Bslo+UqkIt9QPnX9q095eiRRASJG1/tz6dlNr6Z5NsBiWYokp6EQ== dependencies: jsesc "~3.0.2" @@ -12149,30 +12151,30 @@ roarr@^2.15.3: sprintf-js "^1.1.2" rollup@^4.20.0: - version "4.24.2" - resolved "https://registry.yarnpkg.com/rollup/-/rollup-4.24.2.tgz#04bbe819c1a0cd933533b79687f5dc43efb7a7f0" - integrity sha512-do/DFGq5g6rdDhdpPq5qb2ecoczeK6y+2UAjdJ5trjQJj5f1AiVdLRWRc9A9/fFukfvJRgM0UXzxBIYMovm5ww== + version "4.28.0" + resolved "https://registry.yarnpkg.com/rollup/-/rollup-4.28.0.tgz#eb8d28ed43ef60a18f21d0734d230ee79dd0de77" + integrity sha512-G9GOrmgWHBma4YfCcX8PjH0qhXSdH8B4HDE2o4/jaxj93S4DPCIDoLcXz99eWMji4hB29UFCEd7B2gwGJDR9cQ== dependencies: "@types/estree" "1.0.6" optionalDependencies: - "@rollup/rollup-android-arm-eabi" "4.24.2" - "@rollup/rollup-android-arm64" "4.24.2" - "@rollup/rollup-darwin-arm64" "4.24.2" - "@rollup/rollup-darwin-x64" "4.24.2" - "@rollup/rollup-freebsd-arm64" "4.24.2" - "@rollup/rollup-freebsd-x64" "4.24.2" - "@rollup/rollup-linux-arm-gnueabihf" "4.24.2" - "@rollup/rollup-linux-arm-musleabihf" "4.24.2" - "@rollup/rollup-linux-arm64-gnu" "4.24.2" - "@rollup/rollup-linux-arm64-musl" "4.24.2" - "@rollup/rollup-linux-powerpc64le-gnu" "4.24.2" - "@rollup/rollup-linux-riscv64-gnu" "4.24.2" - "@rollup/rollup-linux-s390x-gnu" "4.24.2" - "@rollup/rollup-linux-x64-gnu" "4.24.2" - "@rollup/rollup-linux-x64-musl" "4.24.2" - "@rollup/rollup-win32-arm64-msvc" "4.24.2" - "@rollup/rollup-win32-ia32-msvc" "4.24.2" - "@rollup/rollup-win32-x64-msvc" "4.24.2" + "@rollup/rollup-android-arm-eabi" "4.28.0" + "@rollup/rollup-android-arm64" "4.28.0" + "@rollup/rollup-darwin-arm64" "4.28.0" + "@rollup/rollup-darwin-x64" "4.28.0" + "@rollup/rollup-freebsd-arm64" "4.28.0" + "@rollup/rollup-freebsd-x64" "4.28.0" + "@rollup/rollup-linux-arm-gnueabihf" "4.28.0" + "@rollup/rollup-linux-arm-musleabihf" "4.28.0" + "@rollup/rollup-linux-arm64-gnu" "4.28.0" + "@rollup/rollup-linux-arm64-musl" "4.28.0" + "@rollup/rollup-linux-powerpc64le-gnu" "4.28.0" + "@rollup/rollup-linux-riscv64-gnu" "4.28.0" + "@rollup/rollup-linux-s390x-gnu" "4.28.0" + "@rollup/rollup-linux-x64-gnu" "4.28.0" + "@rollup/rollup-linux-x64-musl" "4.28.0" + "@rollup/rollup-win32-arm64-msvc" "4.28.0" + "@rollup/rollup-win32-ia32-msvc" "4.28.0" + "@rollup/rollup-win32-x64-msvc" "4.28.0" fsevents "~2.3.2" run-parallel-limit@^1.1.0: @@ -12483,18 +12485,6 @@ signale@^1.2.1: figures "^2.0.0" pkg-conf "^2.1.0" -sigstore@^2.2.0: - version "2.3.1" - resolved "https://registry.yarnpkg.com/sigstore/-/sigstore-2.3.1.tgz#0755dd2cc4820f2e922506da54d3d628e13bfa39" - integrity sha512-8G+/XDU8wNsJOQS5ysDVO0Etg9/2uA5gR9l4ZwijjlwxBcrU6RPfwi2+jJmbP+Ap1Hlp/nVAaEO4Fj22/SL2gQ== - dependencies: - "@sigstore/bundle" "^2.3.2" - "@sigstore/core" "^1.0.0" - "@sigstore/protobuf-specs" "^0.3.2" - "@sigstore/sign" "^2.3.2" - "@sigstore/tuf" "^2.3.4" - "@sigstore/verify" "^1.2.1" - sigstore@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/sigstore/-/sigstore-3.0.0.tgz#d6eadcc6590185a7f1c16184078ce8a9ef6db937" @@ -12768,9 +12758,9 @@ stream-transform@^2.1.3: mixme "^0.5.1" streamx@^2.15.0: - version "2.20.1" - resolved "https://registry.yarnpkg.com/streamx/-/streamx-2.20.1.tgz#471c4f8b860f7b696feb83d5b125caab2fdbb93c" - integrity sha512-uTa0mU6WUC65iUvzKH4X9hEdvSW7rbPxPtwfWiLMSj3qTdQbAiUboZTxauKfpFuGIGa1C2BYijZ7wgdUXICJhA== + version "2.20.2" + resolved "https://registry.yarnpkg.com/streamx/-/streamx-2.20.2.tgz#6a8911959d6f307c19781a1d19ecd94b5f042d78" + integrity sha512-aDGDLU+j9tJcUdPGOaHmVF1u/hhI+CsGkT02V3OKlHDV7IukOI+nTWAGkiZEKCO35rWN1wIr4tS7YFr1f4qSvA== dependencies: fast-fifo "^1.3.2" queue-tick "^1.0.1" @@ -13016,7 +13006,7 @@ supports-color@^9.4.0: resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-9.4.0.tgz#17bfcf686288f531db3dea3215510621ccb55954" integrity sha512-VL+lNrEoIXww1coLPOmiEmK/0sGigko5COxI09KzHc2VJXJsQ37UaQ+8quuxjDeA7+KnLGTWRyOXSLLR2Wb4jw== -supports-hyperlinks@^3.0.0: +supports-hyperlinks@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/supports-hyperlinks/-/supports-hyperlinks-3.1.0.tgz#b56150ff0173baacc15f21956450b61f2b18d3ac" integrity sha512-2rn0BZ+/f7puLOHZm1HOJfwBggfaHXUpPUSSG/SWM4TWp5KCfmNYwnC3hruy2rZlMnmWZ+QAGpZfchu3f3695A== @@ -13206,6 +13196,11 @@ tiny-relative-date@^1.3.0: resolved "https://registry.yarnpkg.com/tiny-relative-date/-/tiny-relative-date-1.3.0.tgz#fa08aad501ed730f31cc043181d995c39a935e07" integrity sha512-MOQHpzllWxDCHHaDno30hhLfbouoYlOI8YlMNtvKe1zXbjEVhbcEovQxvZrPvtiYW630GQDoMMarCnjfyfHA+A== +tinybench@^3: + version "3.0.7" + resolved "https://registry.yarnpkg.com/tinybench/-/tinybench-3.0.7.tgz#71500258fa98ddcaf75063330b6c570d39b2ad6d" + integrity sha512-soxV7Dp8eDKvPDv3c4qPJbUjLm1cZxFlsTaIH+FqalsazJzFrLG59dpiIN8OfgVcl11Hfj2b7apD73inCB67Mw== + tmp@^0.0.33: version "0.0.33" resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.33.tgz#6d34335889768d21b2bcda0aa277ced3b1bfadf9" @@ -13265,9 +13260,9 @@ trouter@^2.0.1: matchit "^1.0.0" ts-api-utils@^1.3.0: - version "1.3.0" - resolved "https://registry.yarnpkg.com/ts-api-utils/-/ts-api-utils-1.3.0.tgz#4b490e27129f1e8e686b45cc4ab63714dc60eea1" - integrity sha512-UQMIo7pb8WRomKR1/+MFVLTroIvDVtMX3K6OUir8ynLyzB8Jeriont2bTAtmNPa1ekAgN7YPDyf6V+ygrdU+eQ== + version "1.4.3" + resolved "https://registry.yarnpkg.com/ts-api-utils/-/ts-api-utils-1.4.3.tgz#bfc2215fe6528fecab2b0fba570a2e8a4263b064" + integrity sha512-i3eMG77UTMD0hZhgRS562pv83RC6ukSAC2GMNWc+9dieh/+jDM5u5YG+NHX6VNDRHQcHwmsTHctP9LhbC3WxVw== ts-node@^10.8.1: version "10.9.2" @@ -13313,10 +13308,10 @@ tslib@^1.8.1, tslib@^1.9.0: resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00" integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== -tslib@^2.0.0, tslib@^2.4.0, tslib@^2.6.1, tslib@^2.6.2: - version "2.8.0" - resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.8.0.tgz#d124c86c3c05a40a91e6fdea4021bd31d377971b" - integrity sha512-jWVzBLplnCmoaTr13V9dYbiQ99wvZRd0vNWaDRg+aVYRcjDF3nDksxFDE/+fkXnKhpnUUkmx5pK/v8mCtLVqZA== +tslib@^2.0.0, tslib@^2.4.0, tslib@^2.6.2, tslib@^2.8.1: + version "2.8.1" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.8.1.tgz#612efe4ed235d567e8aba5f2a5fab70280ade83f" + integrity sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w== tsutils-etc@^1.4.1: version "1.4.2" @@ -13346,15 +13341,6 @@ tty-table@^4.2.1: wcwidth "^1.0.1" yargs "^17.7.1" -tuf-js@^2.2.1: - version "2.2.1" - resolved "https://registry.yarnpkg.com/tuf-js/-/tuf-js-2.2.1.tgz#fdd8794b644af1a75c7aaa2b197ddffeb2911b56" - integrity sha512-GwIJau9XaA8nLVbUXsN3IlFi7WmQ48gBUrl3FTkkL/XLu/POhBzfmX9hd33FNMX1qAsfl6ozO1iMmW9NC8YniA== - dependencies: - "@tufjs/models" "2.0.1" - debug "^4.3.4" - make-fetch-happen "^13.0.1" - tuf-js@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/tuf-js/-/tuf-js-3.0.1.tgz#e3f07ed3d8e87afaa70607bd1ef801d5c1f57177" @@ -13429,9 +13415,9 @@ type-fest@^2.12.2: integrity sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA== type-fest@^4.20.0, type-fest@^4.6.0, type-fest@^4.7.1: - version "4.26.1" - resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-4.26.1.tgz#a4a17fa314f976dd3e6d6675ef6c775c16d7955e" - integrity sha512-yOGpmOAL7CkKe/91I5O3gPICmJNLJ1G4zFYVAsRHg7M64biSnPtRj0WNQt++bRkjYOqjWXrhnUw1utzmVErAdg== + version "4.29.1" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-4.29.1.tgz#9c125cb7c0cef6695f3c0b9d15d520c5dbadfcba" + integrity sha512-Y1zUveI92UYM/vo1EFlQSsNf74+hfKH+7saZJslF0Fw92FRaiTAnHPIvo9d7SLxXt/gAYqA4RXyDTioMQCCp0A== typed-array-buffer@^1.0.2: version "1.0.2" @@ -13454,9 +13440,9 @@ typed-array-byte-length@^1.0.1: is-typed-array "^1.1.13" typed-array-byte-offset@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/typed-array-byte-offset/-/typed-array-byte-offset-1.0.2.tgz#f9ec1acb9259f395093e4567eb3c28a580d02063" - integrity sha512-Ous0vodHa56FviZucS2E63zkgtgrACj7omjwd/8lTEMEPFFyjfixMZ1ZXenpgCFBBt4EC1J2XsyVS2gkG0eTFA== + version "1.0.3" + resolved "https://registry.yarnpkg.com/typed-array-byte-offset/-/typed-array-byte-offset-1.0.3.tgz#3fa9f22567700cc86aaf86a1e7176f74b59600f2" + integrity sha512-GsvTyUHTriq6o/bHcTd0vM7OQ9JEdlvluu9YISaA7+KzDzPaIzEeDFNkTfhdE3MYcNhNi0vq/LlegYgIs5yPAw== dependencies: available-typed-arrays "^1.0.7" call-bind "^1.0.7" @@ -13464,18 +13450,19 @@ typed-array-byte-offset@^1.0.2: gopd "^1.0.1" has-proto "^1.0.3" is-typed-array "^1.1.13" + reflect.getprototypeof "^1.0.6" typed-array-length@^1.0.6: - version "1.0.6" - resolved "https://registry.yarnpkg.com/typed-array-length/-/typed-array-length-1.0.6.tgz#57155207c76e64a3457482dfdc1c9d1d3c4c73a3" - integrity sha512-/OxDN6OtAk5KBpGb28T+HZc2M+ADtvRxXrKKbUwtsLgdoxgX13hyy7ek6bFRl5+aBs2yZzB0c4CnQfAtVypW/g== + version "1.0.7" + resolved "https://registry.yarnpkg.com/typed-array-length/-/typed-array-length-1.0.7.tgz#ee4deff984b64be1e118b0de8c9c877d5ce73d3d" + integrity sha512-3KS2b+kL7fsuk/eJZ7EQdnEmQoaho/r6KUef7hxvltNA5DR8NAUM+8wJMbJyZ4G9/7i3v5zPBIMN5aybAh2/Jg== dependencies: call-bind "^1.0.7" for-each "^0.3.3" gopd "^1.0.1" - has-proto "^1.0.3" is-typed-array "^1.1.13" possible-typed-array-names "^1.0.0" + reflect.getprototypeof "^1.0.6" typedarray-to-buffer@^3.1.5: version "3.1.5" @@ -13485,9 +13472,9 @@ typedarray-to-buffer@^3.1.5: is-typedarray "^1.0.0" typedoc-plugin-mdn-links@^3.0.3: - version "3.3.5" - resolved "https://registry.yarnpkg.com/typedoc-plugin-mdn-links/-/typedoc-plugin-mdn-links-3.3.5.tgz#c8e0cde2e7682ea653be1c715c9612c8fe3be04b" - integrity sha512-EsOmQ23eBYqFFEkjo/prud/h2O2QIPQwdVvpyocwn3SWWFCP1YfuTCs94/dDQG6Ikte7gik88ic7Md8fDvEmtw== + version "3.3.8" + resolved "https://registry.yarnpkg.com/typedoc-plugin-mdn-links/-/typedoc-plugin-mdn-links-3.3.8.tgz#415b16556ce08711733054a3ba4249f540651033" + integrity sha512-Aewg+SW7hBdffRpT6WnpRwWthoaF9irlzXDKRyvcDVekPZSFujOlh690SV6eCgqrtP7GBJmN0TVeJUq6+6rb1w== typedoc-plugin-missing-exports@^2.0.0: version "2.3.0" @@ -13518,9 +13505,9 @@ typescript-docs-verifier@^2.5.0: yargs "^17.5.1" typescript@^5.1.6, typescript@^5.6.3: - version "5.6.3" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.6.3.tgz#5f3449e31c9d94febb17de03cc081dd56d81db5b" - integrity sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw== + version "5.7.2" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.7.2.tgz#3169cf8c4c8a828cde53ba9ecb3d2b1d5dd67be6" + integrity sha512-i5t66RHxDvVN40HfDd1PsEThGNnlMCMT3jMUuoh9/0TaqWevNontacunWyN02LA9/fIbEWlcHZcgTKb9QoaLfg== uglify-js@^3.1.4: version "3.19.3" @@ -13559,11 +13546,16 @@ unbox-primitive@^1.0.2: has-symbols "^1.0.3" which-boxed-primitive "^1.0.2" -undici-types@~6.19.2, undici-types@~6.19.8: +undici-types@~6.19.2: version "6.19.8" resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.19.8.tgz#35111c9d1437ab83a7cdc0abae2f26d88eda0a02" integrity sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw== +undici-types@~6.20.0: + version "6.20.0" + resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.20.0.tgz#8171bf22c1f588d1554d55bf204bc624af388433" + integrity sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg== + unicode-canonical-property-names-ecmascript@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.1.tgz#cb3173fe47ca743e228216e4a3ddc4c84d628cc2" @@ -13809,9 +13801,9 @@ vite-plugin-static-copy@^1.0.0: picocolors "^1.0.0" vite@^5.0.10: - version "5.4.10" - resolved "https://registry.yarnpkg.com/vite/-/vite-5.4.10.tgz#d358a7bd8beda6cf0f3b7a450a8c7693a4f80c18" - integrity sha512-1hvaPshuPUtxeQ0hsVH3Mud0ZanOLwVTneA1EgbAM5LhaZEqyPWGRQ7BtaMvUrTDeEaC8pxtj6a6jku3x4z6SQ== + version "5.4.11" + resolved "https://registry.yarnpkg.com/vite/-/vite-5.4.11.tgz#3b415cd4aed781a356c1de5a9ebafb837715f6e5" + integrity sha512-c7jFQRklXua0mTzneGW9QVyxFjUgwcihC4bXEtujIo2ouWCe1Ajt/amn2PCxYnhYfd5k09JX3SB7OYWFKYqj8Q== dependencies: esbuild "^0.21.3" postcss "^8.4.43" @@ -13874,15 +13866,44 @@ which-boxed-primitive@^1.0.2: is-string "^1.0.5" is-symbol "^1.0.3" +which-builtin-type@^1.1.4: + version "1.2.0" + resolved "https://registry.yarnpkg.com/which-builtin-type/-/which-builtin-type-1.2.0.tgz#58042ac9602d78a6d117c7e811349df1268ba63c" + integrity sha512-I+qLGQ/vucCby4tf5HsLmGueEla4ZhwTBSqaooS+Y0BuxN4Cp+okmGuV+8mXZ84KDI9BA+oklo+RzKg0ONdSUA== + dependencies: + call-bind "^1.0.7" + function.prototype.name "^1.1.6" + has-tostringtag "^1.0.2" + is-async-function "^2.0.0" + is-date-object "^1.0.5" + is-finalizationregistry "^1.1.0" + is-generator-function "^1.0.10" + is-regex "^1.1.4" + is-weakref "^1.0.2" + isarray "^2.0.5" + which-boxed-primitive "^1.0.2" + which-collection "^1.0.2" + which-typed-array "^1.1.15" + +which-collection@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/which-collection/-/which-collection-1.0.2.tgz#627ef76243920a107e7ce8e96191debe4b16c2a0" + integrity sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw== + dependencies: + is-map "^2.0.3" + is-set "^2.0.3" + is-weakmap "^2.0.2" + is-weakset "^2.0.3" + which-module@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.1.tgz#776b1fe35d90aebe99e8ac15eb24093389a4a409" integrity sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ== which-typed-array@^1.1.14, which-typed-array@^1.1.15, which-typed-array@^1.1.2: - version "1.1.15" - resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.15.tgz#264859e9b11a649b388bfaaf4f767df1f779b38d" - integrity sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA== + version "1.1.16" + resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.16.tgz#db4db429c4706feca2f01677a144278e4a8c216b" + integrity sha512-g+N+GAWiRj66DngFwHvISJd+ITsyphZvD1vChfVg6cEdnzy53GzB3oy0fUNlvhz7H7+MiqhYr26qxQShCpKTTQ== dependencies: available-typed-arrays "^1.0.7" call-bind "^1.0.7" @@ -14062,9 +14083,9 @@ yaml@^1.10.0, yaml@^1.10.2: integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg== yaml@^2.2.2: - version "2.6.0" - resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.6.0.tgz#14059ad9d0b1680d0f04d3a60fe00f3a857303c3" - integrity sha512-a6ae//JvKDEra2kdi1qzCyrJW/WZCgFi8ydDV+eXExl95t+5R+ijnqHJbz9tmMh8FUjx3iv2fCQ4dclAQlO2UQ== + version "2.6.1" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.6.1.tgz#42f2b1ba89203f374609572d5349fb8686500773" + integrity sha512-7r0XPzioN/Q9kXBro/XPnA6kznR73DHq+GXh5ON7ZozRO6aMjbmiBuKste2wslTFkC5d1dw0GooOCepZXJ2SAg== yargs-parser@20.2.4: version "20.2.4"