From 40bfb0fc0730112f1c16da01ea8b4bde8e95bc37 Mon Sep 17 00:00:00 2001 From: Marcus Pousette Date: Mon, 4 Nov 2024 18:58:17 +0100 Subject: [PATCH] wip --- .prettierignore | 3 +- package.json | 3 +- packages/clients/peerbit/src/libp2p.ts | 5 +- packages/clients/peerbit/src/peer.ts | 42 +- .../test/{dial.spec.ts => connect.spec.ts} | 35 +- .../data/shared-log/benchmark/get-samples.ts | 142 +- .../data/shared-log/benchmark/index.ts | 10 +- .../shared-log/benchmark/replication-prune.ts | 10 +- .../data/shared-log/benchmark/replication.ts | 6 +- .../programs/data/shared-log/package.json | 3 +- .../programs/data/shared-log/src/index.ts | 554 ++-- .../programs/data/shared-log/src/integers.ts | 97 + .../programs/data/shared-log/src/ranges.ts | 1128 +++++-- .../shared-log/src/replication-domain-hash.ts | 62 +- .../shared-log/src/replication-domain-time.ts | 20 +- .../data/shared-log/src/replication-domain.ts | 48 +- .../data/shared-log/src/replication.ts | 19 +- packages/programs/data/shared-log/src/role.ts | 10 +- packages/programs/data/shared-log/src/sync.ts | 115 + .../programs/data/shared-log/src/utils.ts | 14 +- .../data/shared-log/test/append.spec.ts | 4 +- .../data/shared-log/test/domain-time.spec.ts | 4 +- .../data/shared-log/test/encryption.spec.ts | 2 +- .../data/shared-log/test/join.spec.ts | 53 +- .../data/shared-log/test/leader.spec.ts | 88 +- .../data/shared-log/test/load.spec.ts | 14 +- .../shared-log/test/migration-8-9.spec.ts | 4 +- .../data/shared-log/test/network.spec.ts | 6 +- .../data/shared-log/test/observer.spec.ts | 8 +- .../data/shared-log/test/open-close.spec.ts | 2 +- .../data/shared-log/test/ranges.spec.ts | 2648 +++++++++++------ .../data/shared-log/test/replicate.spec.ts | 85 +- .../data/shared-log/test/replication.spec.ts | 540 ++-- .../data/shared-log/test/sharding.spec.ts | 237 +- .../data/shared-log/test/sync.spec.ts | 283 ++ .../programs/data/shared-log/test/utils.ts | 14 +- .../data/shared-log/test/utils/access.ts | 4 +- .../test/utils/stores/event-store.ts | 26 +- packages/programs/program/src/client.ts | 9 +- packages/programs/program/src/handler.ts | 4 + .../programs/program/test/handler.spec.ts | 26 +- packages/programs/program/test/utils.ts | 1 + .../libp2p-test-utils/src/session.ts | 19 +- packages/transport/stream/test/stream.spec.ts | 19 + packages/utils/indexer/interface/src/id.ts | 29 +- .../utils/indexer/simple/test/index.spec.ts | 5 +- packages/utils/indexer/sqlite3/src/engine.ts | 23 +- packages/utils/indexer/sqlite3/src/schema.ts | 54 +- .../utils/indexer/sqlite3/test/index.spec.ts | 7 +- .../indexer/sqlite3/test/statement.spec.ts | 22 +- .../utils/indexer/sqlite3/test/table.spec.ts | 26 +- .../utils/indexer/sqlite3/test/u64.spec.ts | 65 + packages/utils/indexer/sqlite3/test/utils.ts | 22 + packages/utils/indexer/tests/src/tests.ts | 99 +- packages/utils/riblt | 1 + 55 files changed, 4678 insertions(+), 2101 deletions(-) rename packages/clients/peerbit/test/{dial.spec.ts => connect.spec.ts} (63%) create mode 100644 packages/programs/data/shared-log/src/integers.ts create mode 100644 packages/programs/data/shared-log/src/sync.ts create mode 100644 packages/programs/data/shared-log/test/sync.spec.ts create mode 100644 packages/utils/indexer/sqlite3/test/u64.spec.ts create mode 100644 packages/utils/indexer/sqlite3/test/utils.ts create mode 160000 packages/utils/riblt diff --git a/.prettierignore b/.prettierignore index c692685ea..1decc158c 100644 --- a/.prettierignore +++ b/.prettierignore @@ -1 +1,2 @@ -**/public/peerbit/** \ No newline at end of file +**/public/peerbit/** +**/target/** \ No newline at end of file diff --git a/package.json b/package.json index edfe0c638..848ddac60 100644 --- a/package.json +++ b/package.json @@ -43,7 +43,8 @@ "packages/utils/cache", "packages/utils/logger", "packages/utils/keychain", - "packages/utils/indexer/*" + "packages/utils/indexer/*", + "packages/utils/riblt/riblt-rust" ], "engines": { "node": ">=18" diff --git a/packages/clients/peerbit/src/libp2p.ts b/packages/clients/peerbit/src/libp2p.ts index c57a740cc..dc13ce174 100644 --- a/packages/clients/peerbit/src/libp2p.ts +++ b/packages/clients/peerbit/src/libp2p.ts @@ -45,13 +45,16 @@ export const createLibp2pExtended = ( ): Promise => { let extraServices: any = {}; - if (!opts.services?.["relay"]) { + if (opts.services?.["relay"] == null) { + delete opts.services?.["relay"]; + } else if (!opts.services?.["relay"]) { const relayComponent = relay(); if (relayComponent) { // will be null in browser extraServices["relay"] = relayComponent; } } + if (!opts.services?.["identify"]) { extraServices["identify"] = identify(); } diff --git a/packages/clients/peerbit/src/peer.ts b/packages/clients/peerbit/src/peer.ts index b8c92ab92..a4341f622 100644 --- a/packages/clients/peerbit/src/peer.ts +++ b/packages/clients/peerbit/src/peer.ts @@ -1,4 +1,5 @@ import { privateKeyFromRaw } from "@libp2p/crypto/keys"; +import type { PeerId } from "@libp2p/interface"; import "@libp2p/peer-id"; import { type Multiaddr, @@ -10,6 +11,7 @@ import { DirectBlock } from "@peerbit/blocks"; import { Ed25519Keypair, Ed25519PublicKey, + PublicSignKey, Secp256k1Keypair, getKeypairFromPrivateKey, } from "@peerbit/crypto"; @@ -119,7 +121,7 @@ export class Peerbit implements ProgramClient { let libp2pExtended: Libp2pExtended | undefined = (options as Libp2pOptions) .libp2p as Libp2pExtended; - const asRelay = (options as SimpleLibp2pOptions).relay; + const asRelay = (options as SimpleLibp2pOptions).relay ?? true; const directory = options.directory; const hasDir = directory != null; @@ -176,19 +178,25 @@ export class Peerbit implements ProgramClient { : undefined; } + const services: any = { + keychain: (c: any) => keychain, + blocks: (c: any) => + new DirectBlock(c, { + canRelayMessage: asRelay, + directory: blocksDirectory, + }), + pubsub: (c: any) => new DirectSub(c, { canRelayMessage: asRelay }), + ...extendedOptions?.services, + }; + + if (!asRelay) { + services.relay = null; + } + libp2pExtended = await createLibp2pExtended({ ...extendedOptions, privateKey, - services: { - keychain: (c: any) => keychain, - blocks: (c: any) => - new DirectBlock(c, { - canRelayMessage: asRelay, - directory: blocksDirectory, - }), - pubsub: (c: any) => new DirectSub(c, { canRelayMessage: asRelay }), - ...extendedOptions?.services, - } as any, // TODO types are funky + services, datastore, }); } @@ -280,6 +288,7 @@ export class Peerbit implements ProgramClient { ? address : address.getMultiaddrs(); const connection = await this.libp2p.dial(maddress); + const publicKey = Ed25519PublicKey.fromPeerId(connection.remotePeer); // TODO, do this as a promise instead using the onPeerConnected vents in pubsub and blocks @@ -292,6 +301,17 @@ export class Peerbit implements ProgramClient { ); } + async hangUp(address: PeerId | PublicSignKey | string | Multiaddr) { + await this.libp2p.hangUp( + address instanceof PublicSignKey + ? address.toPeerId() + : typeof address == "string" + ? multiaddr(address) + : address, + ); + // TODO wait for pubsub and blocks to disconnect? + } + async start() { await this._storage.open(); await this.indexer.start(); diff --git a/packages/clients/peerbit/test/dial.spec.ts b/packages/clients/peerbit/test/connect.spec.ts similarity index 63% rename from packages/clients/peerbit/test/dial.spec.ts rename to packages/clients/peerbit/test/connect.spec.ts index 13578929d..45ac5329c 100644 --- a/packages/clients/peerbit/test/dial.spec.ts +++ b/packages/clients/peerbit/test/connect.spec.ts @@ -1,5 +1,5 @@ import { SeekDelivery } from "@peerbit/stream-interface"; -import { waitFor } from "@peerbit/time"; +import { waitFor, waitForResolved } from "@peerbit/time"; import { expect } from "chai"; import { Peerbit } from "../src/index.js"; @@ -52,3 +52,36 @@ describe(`dial`, function () { ); }); }); + +describe(`hangup`, function () { + let clients: [Peerbit, Peerbit]; + + beforeEach(async () => { + clients = [ + await Peerbit.create({ + relay: false, // https://github.com/libp2p/js-libp2p/issues/2794 + }), + await Peerbit.create({ + relay: false, // https://github.com/libp2p/js-libp2p/issues/2794 + }), + ]; + }); + + afterEach(async () => { + await Promise.all(clients.map((c) => c.stop())); + }); + + it("pubsub subscribers clears up", async () => { + let topic = "topic"; + await clients[0].services.pubsub.subscribe(topic); + await clients[1].services.pubsub.subscribe(topic); + await clients[0].dial(clients[1].getMultiaddrs()[0]); + await waitForResolved(() => + expect(clients[0].services.pubsub.peers.size).to.eq(1), + ); + await clients[0].hangUp(clients[1].peerId); + await waitForResolved(() => + expect(clients[0].services.pubsub.peers.size).to.eq(0), + ); + }); +}); diff --git a/packages/programs/data/shared-log/benchmark/get-samples.ts b/packages/programs/data/shared-log/benchmark/get-samples.ts index 913938fdb..dffb0f38d 100644 --- a/packages/programs/data/shared-log/benchmark/get-samples.ts +++ b/packages/programs/data/shared-log/benchmark/get-samples.ts @@ -2,80 +2,82 @@ import { Ed25519Keypair } from "@peerbit/crypto"; import type { Index } from "@peerbit/indexer-interface"; import { create as createIndex } from "@peerbit/indexer-sqlite3"; import B from "benchmark"; -import { - ReplicationRangeIndexable, - getEvenlySpacedU32, - getSamples, -} from "../src/ranges.js"; +import { createNumbers } from "../src/integers.js"; +import { ReplicationRangeIndexableU32, getSamples } from "../src/ranges.js"; // Run with "node --loader ts-node/esm ./benchmark/get-samples.ts" -let create = async ( - ...rects: ReplicationRangeIndexable[] -): Promise<[Index, any]> => { - const indices = await createIndex(); - const index = await indices.init({ schema: ReplicationRangeIndexable }); - await indices.start(); - for (const rect of rects) { - await index.put(rect); +const suite = new B.Suite(); +const resolutions: ["u32", "u64"] = ["u32", "u64"]; +for (const resolution of resolutions) { + let create = async ( + ...rects: ReplicationRangeIndexableU32[] + ): Promise<[Index, any]> => { + const indices = await createIndex(); + const index = await indices.init({ schema: ReplicationRangeIndexableU32 }); + await indices.start(); + for (const rect of rects) { + await index.put(rect); + } + return [index, indices]; + }; + + let a = (await Ed25519Keypair.create()).publicKey; + let b = (await Ed25519Keypair.create()).publicKey; + let c = (await Ed25519Keypair.create()).publicKey; + + let ranges: ReplicationRangeIndexableU32[] = []; + let rangeCount = 1000; + for (let i = 0; i < rangeCount; i++) { + ranges.push( + ...[ + new ReplicationRangeIndexableU32({ + publicKey: a, + length: 0.2 / rangeCount, + offset: (0 + rangeCount / i) % 1, + timestamp: 0n, + }), + new ReplicationRangeIndexableU32({ + publicKey: b, + length: 0.4 / rangeCount, + offset: (0.333 + rangeCount / i) % 1, + timestamp: 0n, + }), + new ReplicationRangeIndexableU32({ + publicKey: c, + length: 0.6 / rangeCount, + offset: (0.666 + rangeCount / i) % 1, + timestamp: 0n, + }), + new ReplicationRangeIndexableU32({ + publicKey: c, + length: 0.6 / rangeCount, + offset: (0.666 + rangeCount / i) % 1, + timestamp: 0n, + }), + ], + ); } - return [index, indices]; -}; -let a = (await Ed25519Keypair.create()).publicKey; -let b = (await Ed25519Keypair.create()).publicKey; -let c = (await Ed25519Keypair.create()).publicKey; + const [index, indices] = await create(...ranges); -let ranges: ReplicationRangeIndexable[] = []; -let rangeCount = 1000; -for (let i = 0; i < rangeCount; i++) { - ranges.push( - ...[ - new ReplicationRangeIndexable({ - publicKey: a, - length: 0.2 / rangeCount, - offset: (0 + rangeCount / i) % 1, - timestamp: 0n, - }), - new ReplicationRangeIndexable({ - publicKey: b, - length: 0.4 / rangeCount, - offset: (0.333 + rangeCount / i) % 1, - timestamp: 0n, - }), - new ReplicationRangeIndexable({ - publicKey: c, - length: 0.6 / rangeCount, - offset: (0.666 + rangeCount / i) % 1, - timestamp: 0n, - }), - new ReplicationRangeIndexable({ - publicKey: c, - length: 0.6 / rangeCount, - offset: (0.666 + rangeCount / i) % 1, - timestamp: 0n, - }), - ], - ); + const numbers = createNumbers(resolution); + suite + .add("getSamples", { + fn: async (deferred: any) => { + await getSamples(numbers.getGrid(Math.random(), 2), index, 0, numbers); + deferred.resolve(); + }, + defer: true, + }) + .on("cycle", (event: any) => { + console.log(String(event.target)); + }) + .on("error", (err: any) => { + throw err; + }) + .on("complete", async function (this: any) { + await indices.drop(); + }) + .run(); } - -const [index, indices] = await create(...ranges); -const suite = new B.Suite(); -suite - .add("getSamples", { - fn: async (deferred: any) => { - await getSamples(getEvenlySpacedU32(Math.random(), 2), index, 0); - deferred.resolve(); - }, - defer: true, - }) - .on("cycle", (event: any) => { - console.log(String(event.target)); - }) - .on("error", (err: any) => { - throw err; - }) - .on("complete", async function (this: any) { - await indices.drop(); - }) - .run(); diff --git a/packages/programs/data/shared-log/benchmark/index.ts b/packages/programs/data/shared-log/benchmark/index.ts index fb90beaae..a60067be4 100644 --- a/packages/programs/data/shared-log/benchmark/index.ts +++ b/packages/programs/data/shared-log/benchmark/index.ts @@ -33,16 +33,16 @@ class Document { } @variant("test_shared_log") -class TestStore extends Program> { +class TestStore extends Program> { @field({ type: SharedLog }) - logs: SharedLog; + logs: SharedLog; - constructor(properties?: { logs: SharedLog }) { + constructor(properties?: { logs: SharedLog }) { super(); this.logs = properties?.logs || new SharedLog(); } - async open(options?: Args): Promise { + async open(options?: Args): Promise { await this.logs.open({ ...options, encoding: { @@ -57,7 +57,7 @@ const peersCount = 1; const session = await TestSession.connected(peersCount); const store = new TestStore({ - logs: new SharedLog({ + logs: new SharedLog({ id: new Uint8Array(32), }), }); diff --git a/packages/programs/data/shared-log/benchmark/replication-prune.ts b/packages/programs/data/shared-log/benchmark/replication-prune.ts index 2a8798d49..1fa1b58b9 100644 --- a/packages/programs/data/shared-log/benchmark/replication-prune.ts +++ b/packages/programs/data/shared-log/benchmark/replication-prune.ts @@ -50,10 +50,12 @@ let session: TestSession = await TestSession.connected(3, [ }, }, ]); -let db1: EventStore, db2: EventStore, db3: EventStore; +let db1: EventStore, + db2: EventStore, + db3: EventStore; const init = async (min: number, max?: number) => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicas: { min, @@ -62,7 +64,7 @@ const init = async (min: number, max?: number) => { replicate: false, }, }); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -75,7 +77,7 @@ const init = async (min: number, max?: number) => { }, ))!; - db3 = (await EventStore.open>( + db3 = (await EventStore.open>( db1.address!, session.peers[2], { diff --git a/packages/programs/data/shared-log/benchmark/replication.ts b/packages/programs/data/shared-log/benchmark/replication.ts index ab0716c92..fb174d754 100644 --- a/packages/programs/data/shared-log/benchmark/replication.ts +++ b/packages/programs/data/shared-log/benchmark/replication.ts @@ -39,13 +39,13 @@ let session: TestSession = await TestSession.connected(2, [ }, ]); -let db1: EventStore, db2: EventStore; +let db1: EventStore, db2: EventStore; let abortController = new AbortController(); let resolvers: Map void }> = new Map(); -db1 = await session.peers[0].open(new EventStore(), { +db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 1, @@ -53,7 +53,7 @@ db1 = await session.peers[0].open(new EventStore(), { }, }); -db2 = (await EventStore.open>( +db2 = (await EventStore.open>( db1.address!, session.peers[1], { diff --git a/packages/programs/data/shared-log/package.json b/packages/programs/data/shared-log/package.json index 4fc856905..e732698ae 100644 --- a/packages/programs/data/shared-log/package.json +++ b/packages/programs/data/shared-log/package.json @@ -63,7 +63,8 @@ "@peerbit/program": "5.0.7", "@peerbit/log": "4.0.18", "@peerbit/rpc": "5.0.16", - "@peerbit/time": "2.0.7" + "@peerbit/time": "2.0.7", + "@peerbit/riblt": "0.0.1" }, "devDependencies": { "@peerbit/test-utils": "^2.1.6" diff --git a/packages/programs/data/shared-log/src/index.ts b/packages/programs/data/shared-log/src/index.ts index 61e2dc826..d108710f3 100644 --- a/packages/programs/data/shared-log/src/index.ts +++ b/packages/programs/data/shared-log/src/index.ts @@ -21,6 +21,7 @@ import { Log, type LogEvents, type LogProperties, + Meta, ShallowEntry, type ShallowOrFullEntry, } from "@peerbit/log"; @@ -62,18 +63,30 @@ import { ResponseMaybeSync, createExchangeHeadsMessages, } from "./exchange-heads.js"; +import { + MAX_U32, + type NumberFromType, + bytesToNumber, + denormalizer, +} from "./integers.js"; import { TransportMessage } from "./message.js"; import { PIDReplicationController } from "./pid.js"; import { - EntryReplicated, + type EntryReplicated, + EntryReplicatedU32, + EntryReplicatedU64, ReplicationIntent, - ReplicationRange, - ReplicationRangeIndexable, + type ReplicationRangeIndexable, + ReplicationRangeIndexableU32, + ReplicationRangeIndexableU64, + ReplicationRangeMessage, + SyncStatus, getCoverSet, - getEvenlySpacedU32, getSamples, - hasCoveringRange, + iHaveCoveringRange, isMatured, + isReplicationRangeMessage, + mergeRanges, minimumWidthToCover, shouldAssigneToRangeBoundary, toRebalance, @@ -81,7 +94,6 @@ import { import { type ReplicationDomainHash, createReplicationDomainHash, - hashToU32, } from "./replication-domain-hash.js"; import { type ReplicationDomainTime, @@ -94,7 +106,6 @@ import { type ReplicationDomain, debounceAggregationChanges, mergeReplicationChanges, - type u32, } from "./replication-domain.js"; import { AbsoluteReplicas, @@ -109,7 +120,7 @@ import { encodeReplicas, maxReplicas, } from "./replication.js"; -import { MAX_U32, Observer, Replicator, scaleToU32 } from "./role.js"; +import { Observer, Replicator } from "./role.js"; import { groupByGid } from "./utils.js"; export { @@ -121,7 +132,7 @@ export { }; export { type CPUUsage, CPUUsageIntervalLag }; export * from "./replication.js"; -export { EntryReplicated, ReplicationRangeIndexable }; +export { type ReplicationRangeIndexable }; export const logger = loggerFn({ module: "shared-log" }); const getLatestEntry = ( @@ -155,9 +166,10 @@ export type DynamicReplicationOptions = { export type FixedReplicationOptions = { id?: Uint8Array; normalized?: boolean; - factor: number | "all" | "right"; + factor: number | bigint | "all" | "right"; strict?: boolean; // if true, only this range will be replicated - offset?: number; + offset?: number | bigint; + syncStatus?: SyncStatus; }; export type ReplicationOptions = @@ -211,12 +223,61 @@ const isReplicationOptionsDependentOnPreviousState = ( return false; }; -export type SharedLogOptions> = { +interface IndexableDomain { + denormalize: (value: number) => NumberFromType; + bytesToNumber: (bytes: Uint8Array) => NumberFromType; + constructorEntry: new (properties: { + coordinate: NumberFromType; + hash: string; + meta: Meta; + assignedToRangeBoundary: boolean; + }) => EntryReplicated; + constructorRange: new ( + properties: { + id?: Uint8Array; + offset: NumberFromType; + length: NumberFromType; + mode?: ReplicationIntent; + timestamp?: bigint; + } & ({ publicKeyHash: string } | { publicKey: PublicSignKey }), + ) => ReplicationRangeIndexable; +} + +const createIndexableDomainFromResolution = ( + resolution: R, +): IndexableDomain => { + const denormalizerFn = denormalizer(resolution); + const byteToNumberFn = bytesToNumber(resolution); + if (resolution === "u32") { + return { + constructorEntry: EntryReplicatedU32, + constructorRange: ReplicationRangeIndexableU32, + denormalize: denormalizerFn, + bytesToNumber: byteToNumberFn, + } as any as IndexableDomain; + } else if (resolution === "u64") { + return { + constructorEntry: EntryReplicatedU64, + constructorRange: ReplicationRangeIndexableU64, + denormalize: denormalizerFn, + bytesToNumber: byteToNumberFn, + } as any as IndexableDomain; + } + throw new Error("Unsupported resolution"); +}; + +export type SharedLogOptions< + T, + D extends ReplicationDomain, + R extends "u32" | "u64" = D extends ReplicationDomain + ? I + : "u32", +> = { replicate?: ReplicationOptions; replicas?: ReplicationLimitsOptions; respondToIHaveTimeout?: number; canReplicate?: (publicKey: PublicSignKey) => Promise | boolean; - sync?: (entry: ShallowOrFullEntry | EntryReplicated) => boolean; + sync?: (entry: ShallowOrFullEntry | EntryReplicated) => boolean; timeUntilRoleMaturity?: number; waitForReplicatorTimeout?: number; distributionDebounceTime?: number; @@ -250,8 +311,11 @@ const checkMinReplicasLimit = (minReplicas: number) => { export type Args< T, - D extends ReplicationDomain = ReplicationDomainHash, -> = LogProperties & LogEvents & SharedLogOptions; + D extends ReplicationDomain, + R extends "u32" | "u64" = D extends ReplicationDomain + ? I + : "u32", +> = LogProperties & LogEvents & SharedLogOptions; export type SharedAppendOptions = AppendOptions & { replicas?: AbsoluteReplicas | number; @@ -273,9 +337,12 @@ export interface SharedLogEvents extends ProgramEvents { @variant("shared_log") export class SharedLog< - T = Uint8Array, - D extends ReplicationDomain = ReplicationDomainHash, -> extends Program, SharedLogEvents> { + T, + D extends ReplicationDomain, + R extends "u32" | "u64" = D extends ReplicationDomain + ? I + : "u32", +> extends Program, SharedLogEvents> { @field({ type: Log }) log: Log; @@ -286,8 +353,8 @@ export class SharedLog< private _isReplicating!: boolean; private _isAdaptiveReplicating!: boolean; - private _replicationRangeIndex!: Index; - private _entryCoordinatesIndex!: Index; + private _replicationRangeIndex!: Index>; + private _entryCoordinatesIndex!: Index>; /* private _totalParticipation!: number; */ private _gidPeersHistory!: Map>; @@ -301,7 +368,7 @@ export class SharedLog< private _logProperties?: LogProperties & LogEvents & - SharedLogOptions; + SharedLogOptions; private _closeController!: AbortController; private _respondToIHaveTimeout!: any; private _pendingDeletes!: Map< @@ -340,7 +407,7 @@ export class SharedLog< private openTime!: number; private oldestOpenTime!: number; - private sync?: (entry: ShallowOrFullEntry | EntryReplicated) => boolean; + private sync?: (entry: ShallowOrFullEntry | EntryReplicated) => boolean; // A fn that we can call many times that recalculates the participation role private rebalanceParticipationDebounced: @@ -349,7 +416,7 @@ export class SharedLog< // A fn for debouncing the calls for pruning pruneDebouncedFn!: DebouncedAccumulatorMap< - Entry | ShallowEntry | EntryReplicated + Entry | ShallowEntry | EntryReplicated >; private responseToPruneDebouncedFn!: ReturnType< typeof debounceAcculmulator< @@ -389,6 +456,7 @@ export class SharedLog< replicationController!: PIDReplicationController; history!: { usedMemory: number; factor: number }[]; domain!: D; + indexableDomain!: IndexableDomain; interval: any; constructor(properties?: { id?: Uint8Array }) { @@ -417,8 +485,9 @@ export class SharedLog< if (segments.length > 0) { const segment = segments[0].toReplicationRange(); return new Replicator({ - factor: segment.factor / MAX_U32, - offset: segment.offset / MAX_U32, + // TODO types + factor: (segment.factor as number) / MAX_U32, + offset: (segment.offset as number) / MAX_U32, }); } @@ -430,37 +499,10 @@ export class SharedLog< if (!this._isReplicating) { return false; } - - /* - if (isAdaptiveReplicatorOption(this._replicationSettings)) { - return true; - } - - if ((this.replicationSettings as FixedReplicationOptions).factor !== 0) { - return true; - } */ - return (await this.countReplicationSegments()) > 0; } - /* get totalParticipation(): number { - return this._totalParticipation; - } */ - async calculateTotalParticipation() { - const sum = await this.replicationIndex.sum({ key: "width" }); - return Number(sum) / MAX_U32; - } - - async countReplicationSegments() { - const count = await this.replicationIndex.count({ - query: new StringMatch({ - key: "hash", - value: this.node.identity.publicKey.hashcode(), - }), - }); - return count; - } private setupRebalanceDebounceFunction( interval = RECALCULATE_PARTICIPATION_DEBOUNCE_INTERVAL, @@ -494,10 +536,14 @@ export class SharedLog< { reset, checkDuplicates, + syncStatus, announce, + mergeSegments, }: { reset?: boolean; checkDuplicates?: boolean; + syncStatus?: SyncStatus; + mergeSegments?: boolean; announce?: ( msg: AddedReplicationSegmentMessage | AllReplicatingSegmentsMessage, ) => void; @@ -507,7 +553,7 @@ export class SharedLog< if (isUnreplicationOptions(options)) { await this.unreplicate(); } else { - let ranges: ReplicationRangeIndexable[] = []; + let ranges: ReplicationRangeIndexable[] = []; if (options == null) { options = {}; @@ -531,7 +577,7 @@ export class SharedLog< ranges = [maybeRange]; offsetWasProvided = true; - } else if (options instanceof ReplicationRange) { + } else if (isReplicationRangeMessage(options)) { ranges = [ options.toReplicationRangeIndexable(this.node.identity.publicKey), ]; @@ -560,12 +606,42 @@ export class SharedLog< const normalized = rangeArg.normalized ?? true; offsetWasProvided = rangeArg.offset != null; const offset = - rangeArg.offset ?? - (normalized ? Math.random() : scaleToU32(Math.random())); + rangeArg.offset != null + ? normalized + ? this.indexableDomain.denormalize(rangeArg.offset as number) + : rangeArg.offset + : this.domain.numbers.random(); let factor = rangeArg.factor; - let width = normalized ? 1 : scaleToU32(1); + let fullWidth = this.domain.numbers.maxValue; + + let factorDenormalized = !normalized + ? factor + : this.indexableDomain.denormalize(factor as number); ranges.push( - new ReplicationRangeIndexable({ + new this.indexableDomain.constructorRange({ + id: rangeArg.id, + // @ts-ignore + offset: offset, + // @ts-ignore + length: (factor === "all" + ? fullWidth + : factor === "right" + // @ts-ignore + ? fullWidth - offset + : factorDenormalized) as NumberFromType, + /* typeof factor === "number" + ? factor + : factor === "all" + ? width + // @ts-ignore + : width - offset, */ + publicKeyHash: this.node.identity.publicKey.hashcode(), + mode: rangeArg.strict + ? ReplicationIntent.Strict + : ReplicationIntent.NonStrict, // automatic means that this range might be reused later for dynamic replication behaviour + timestamp: BigInt(+new Date()), + }), + /* new ReplicationRangeIndexable({ id: rangeArg.id, normalized, offset: offset, @@ -574,15 +650,21 @@ export class SharedLog< ? factor : factor === "all" ? width + // @ts-ignore : width - offset, publicKeyHash: this.node.identity.publicKey.hashcode(), mode: rangeArg.strict ? ReplicationIntent.Strict : ReplicationIntent.NonStrict, // automatic means that this range might be reused later for dynamic replication behaviour timestamp: BigInt(+new Date()), - }), + }), */ ); } + + if (mergeSegments && ranges.length > 1) { + const mergedSegment = mergeRanges(ranges, this.domain.numbers); + ranges = [mergedSegment]; + } } for (const range of ranges) { @@ -603,6 +685,7 @@ export class SharedLog< reset: resetRanges ?? false, checkDuplicates, announce, + syncStatus, }); return ranges; @@ -622,11 +705,11 @@ export class SharedLog< cpu: options?.limits?.cpu != null ? { - max: - typeof options?.limits?.cpu === "object" - ? options.limits.cpu.max - : options?.limits?.cpu, - } + max: + typeof options?.limits?.cpu === "object" + ? options.limits.cpu.max + : options?.limits?.cpu, + } : undefined, }, ); @@ -644,14 +727,17 @@ export class SharedLog< options?: { reset?: boolean; checkDuplicates?: boolean; + mergeSegments?: boolean; announce?: ( msg: AllReplicatingSegmentsMessage | AddedReplicationSegmentMessage, ) => void; }, ) { - let range: ReplicationRange[] | ReplicationOptions | undefined = undefined; + let range: ReplicationRangeMessage[] | ReplicationOptions | undefined = + undefined; + let syncStatus = SyncStatus.Unsynced; - if (rangeOrEntry instanceof ReplicationRange) { + if (rangeOrEntry instanceof ReplicationRangeMessage) { range = rangeOrEntry; } else if (rangeOrEntry instanceof Entry) { range = { @@ -659,8 +745,10 @@ export class SharedLog< offset: await this.domain.fromEntry(rangeOrEntry), normalized: false, }; + syncStatus = SyncStatus.Synced; /// we already have the entries } else if (Array.isArray(rangeOrEntry)) { - let ranges: (ReplicationRange | FixedReplicationOptions)[] = []; + let ranges: (ReplicationRangeMessage | FixedReplicationOptions)[] = + []; for (const entry of rangeOrEntry) { if (entry instanceof Entry) { ranges.push({ @@ -668,6 +756,8 @@ export class SharedLog< offset: await this.domain.fromEntry(entry), normalized: false, }); + + syncStatus = SyncStatus.Synced; /// we already have the entries } else { ranges.push(entry); } @@ -677,17 +767,17 @@ export class SharedLog< range = rangeOrEntry ?? true; } - return this._replicate(range, options); + return this._replicate(range, { ...options, syncStatus }); } - async unreplicate(rangeOrEntry?: Entry | ReplicationRange) { + async unreplicate(rangeOrEntry?: Entry | ReplicationRangeMessage) { let range: FixedReplicationOptions; if (rangeOrEntry instanceof Entry) { range = { factor: 1, offset: await this.domain.fromEntry(rangeOrEntry), }; - } else if (rangeOrEntry instanceof ReplicationRange) { + } else if (rangeOrEntry instanceof ReplicationRangeMessage) { range = rangeOrEntry; } else { this._isReplicating = false; @@ -729,7 +819,6 @@ export class SharedLog< .all(); await this.replicationIndex.del({ query: { hash: keyHash } }); - await this.updateOldestTimestampFromIndex(); const isMe = this.node.identity.publicKey.hashcode() === keyHash; @@ -835,7 +924,7 @@ export class SharedLog< } private async addReplicationRange( - ranges: ReplicationRangeIndexable[], + ranges: ReplicationRangeIndexable[], from: PublicSignKey, { reset, @@ -853,7 +942,7 @@ export class SharedLog< let isNewReplicator = false; let diffs: ReplicationChanges; - let deleted: ReplicationRangeIndexable[] | undefined = undefined; + let deleted: ReplicationRangeIndexable[] | undefined = undefined; if (reset) { deleted = ( await this.replicationIndex @@ -898,17 +987,17 @@ export class SharedLog< } if (checkDuplicates) { - let deduplicated: ReplicationRangeIndexable[] = []; + let deduplicated: ReplicationRangeIndexable[] = []; // TODO also deduplicate/de-overlap among the ranges that ought to be inserted? for (const range of ranges) { - if (!(await hasCoveringRange(this.replicationIndex, range))) { + if (!(await iHaveCoveringRange(this.replicationIndex, range))) { deduplicated.push(range); } } ranges = deduplicated; } - let existingMap = new Map(); + let existingMap = new Map>(); for (const result of existing) { existingMap.set(result.value.idString, result.value); } @@ -936,6 +1025,7 @@ export class SharedLog< for (const diff of diffs) { if (diff.type === "added" || diff.type === "updated") { await this.replicationIndex.put(diff.range); + if (!reset) { this.oldestOpenTime = Math.min( Number(diff.range.timestamp), @@ -1050,8 +1140,9 @@ export class SharedLog< } async startAnnounceReplicating( - range: ReplicationRangeIndexable[], + range: ReplicationRangeIndexable[], options: { + syncStatus?: SyncStatus; reset?: boolean; checkDuplicates?: boolean; announce?: ( @@ -1212,7 +1303,7 @@ export class SharedLog< return result; } - async open(options?: Args): Promise { + async open(options?: Args): Promise { this.replicas = { min: options?.replicas?.min ? typeof options?.replicas?.min === "number" @@ -1225,7 +1316,13 @@ export class SharedLog< : options.replicas.max : undefined, }; - this.domain = options?.domain ?? (createReplicationDomainHash() as D); + // TODO types + this.domain = options?.domain + ? (options.domain as any as D) + : (createReplicationDomainHash("u32") as D); + this.indexableDomain = createIndexableDomainFromResolution( + this.domain.resolution, + ); this._respondToIHaveTimeout = options?.respondToIHaveTimeout ?? 2e4; this._pendingDeletes = new Map(); this._pendingIHave = new Map(); @@ -1270,11 +1367,11 @@ export class SharedLog< const logScope = await this.node.indexer.scope(id); const replicationIndex = await logScope.scope("replication"); this._replicationRangeIndex = await replicationIndex.init({ - schema: ReplicationRangeIndexable, + schema: this.indexableDomain.constructorRange, }); this._entryCoordinatesIndex = await replicationIndex.init({ - schema: EntryReplicated, + schema: this.indexableDomain.constructorEntry, }); const logIndex = await logScope.scope("log"); @@ -1544,7 +1641,7 @@ export class SharedLog< if (!key) { throw new Error( "Failed to resolve public key from hash: " + - segment.value.hash, + segment.value.hash, ); } this.events.dispatchEvent( @@ -1621,25 +1718,28 @@ export class SharedLog< options?: { roleAge?: number; eager?: - | { - unmaturedFetchCoverSize?: number; - } - | boolean; + | { + unmaturedFetchCoverSize?: number; + } + | boolean; }, ) { let roleAge = options?.roleAge ?? (await this.getDefaultMinRoleAge()); let eager = options?.eager ?? false; const range = await this.domain.fromArgs(args, this); - const set = await getCoverSet({ + const set = await getCoverSet({ peers: this.replicationIndex, start: range.offset, widthToCoverScaled: range.length ?? - (await minimumWidthToCover(this.replicas.min.getValue(this))), + (await minimumWidthToCover( + this.replicas.min.getValue(this), + this.domain.numbers, + )), roleAge, eager, - intervalWidth: MAX_U32, + numbers: this.domain.numbers, }); // add all in flight @@ -1745,8 +1845,7 @@ export class SharedLog< const { heads } = msg; logger.debug( - `${this.node.identity.publicKey.hashcode()}: Recieved heads: ${ - heads.length === 1 ? heads[0].entry.hash : "#" + heads.length + `${this.node.identity.publicKey.hashcode()}: Recieved heads: ${heads.length === 1 ? heads[0].entry.hash : "#" + heads.length }, logId: ${this.log.idString}`, ); @@ -1801,11 +1900,11 @@ export class SharedLog< let isLeader: | Map< - string, - { - intersecting: boolean; - } - > + string, + { + intersecting: boolean; + } + > | false; if (isReplicating) { @@ -1871,8 +1970,7 @@ export class SharedLog< } logger.debug( - `${this.node.identity.publicKey.hashcode()}: Dropping heads with gid: ${ - entry.entry.meta.gid + `${this.node.identity.publicKey.hashcode()}: Dropping heads with gid: ${entry.entry.meta.gid }. Because not leader`, ); } @@ -2153,7 +2251,7 @@ export class SharedLog< } logger.error( "Failed to find peer who updated replication settings: " + - e?.message, + e?.message, ); }); } else if (msg instanceof StoppedReplicating) { @@ -2191,6 +2289,27 @@ export class SharedLog< } } + async calculateTotalParticipation() { + const sum = await this.replicationIndex.sum({ key: "width" }); + return Number(sum) / MAX_U32; + } + + async countReplicationSegments() { + const count = await this.replicationIndex.count({ + query: new StringMatch({ + key: "hash", + value: this.node.identity.publicKey.hashcode(), + }), + }); + return count; + } + + async getAllReplicationSegments() { + const ranges = await this.replicationIndex + .iterate().all(); + return ranges.map((x) => x.value); + } + async getMyReplicationSegments() { const ranges = await this.replicationIndex .iterate({ @@ -2203,7 +2322,9 @@ export class SharedLog< return ranges.map((x) => x.value); } - async getMyTotalParticipation() { + + + async calculateMyTotalParticipation() { // sum all of my replicator rects return (await this.getMyReplicationSegments()).reduce( (acc, { widthNormalized }) => acc + widthNormalized, @@ -2211,14 +2332,14 @@ export class SharedLog< ); } - get replicationIndex(): Index { + get replicationIndex(): Index> { if (!this._replicationRangeIndex) { throw new ClosedError(); } return this._replicationRangeIndex; } - get entryCoordinatesIndex(): Index { + get entryCoordinatesIndex(): Index> { if (!this._entryCoordinatesIndex) { throw new ClosedError(); } @@ -2275,78 +2396,118 @@ export class SharedLog< options?: { verifySignatures?: boolean; timeout?: number; - replicate?: boolean; + replicate?: + | boolean + | { + mergeSegments?: boolean; + }; }, ): Promise { let messageToSend: AddedReplicationSegmentMessage | undefined = undefined; + let entriesToReplicate: Entry[] = []; if (options?.replicate) { // TODO this block should perhaps be called from a callback on the this.log.join method on all the ignored element because already joined, like "onAlreadyJoined" // check which entrise we already have but not are replicating, and replicate them - let alreadyJoined: Entry[] = []; + // we can not just do the 'join' call because it will ignore the already joined entries for (const element of entries) { if (typeof element === "string") { const entry = await this.log.get(element); if (entry) { - alreadyJoined.push(entry); + entriesToReplicate.push(entry); } } else if (element instanceof Entry) { if (await this.log.has(element.hash)) { - alreadyJoined.push(element); + entriesToReplicate.push(element); } } else { const entry = await this.log.get(element.hash); if (entry) { - alreadyJoined.push(entry); + entriesToReplicate.push(entry); } } } // assume is heads - await this.replicate(alreadyJoined, { + /* await this.replicate(alreadyJoined, { checkDuplicates: true, + mergeSegments: + typeof options.replicate !== "boolean" + ? options.replicate.mergeSegments + : false, announce: (msg) => { messageToSend = msg; }, - }); + }); */ } let joinOptions = options?.replicate ? { - ...options, - onChange: async (change: Change) => { - if (change.added) { - for (const entry of change.added) { - if (entry.head) { - await this.replicate(entry.entry, { - checkDuplicates: true, - - // we override the announce step here to make sure we announce all new replication info - // in one large message instead - announce: (msg) => { - if (msg instanceof AllReplicatingSegmentsMessage) { - throw new Error("Unexpected"); - } - - if (messageToSend) { - // merge segments to make it into one messages - for (const segment of msg.segments) { - messageToSend.segments.push(segment); - } - } else { - messageToSend = msg; - } - }, - }); - } + ...options, + onChange: async (change: Change) => { + if (change.added) { + for (const entry of change.added) { + if (entry.head) { + entriesToReplicate.push(entry.entry); } } - }, - } + /* let heads = change.added.filter((x) => x.head).map(x => x.entry); + await this.replicate(heads, { + checkDuplicates: true, + mergeSegments: + typeof options.replicate !== "boolean" && options.replicate + ? options.replicate.mergeSegments + : false, + + // we override the announce step here to make sure we announce all new replication info + // in one large message instead + announce: (msg) => { + if (msg instanceof AllReplicatingSegmentsMessage) { + throw new Error("Unexpected"); + } + + if (messageToSend) { + // merge segments to make it into one messages + for (const segment of msg.segments) { + messageToSend.segments.push(segment); + } + } else { + messageToSend = msg; + } + }, + }); */ + } + }, + } : options; await this.log.join(entries, joinOptions); + options?.replicate && + (await this.replicate(entriesToReplicate, { + checkDuplicates: true, + mergeSegments: + typeof options.replicate !== "boolean" && options.replicate + ? options.replicate.mergeSegments + : false, + + // we override the announce step here to make sure we announce all new replication info + // in one large message instead + announce: (msg) => { + if (msg instanceof AllReplicatingSegmentsMessage) { + throw new Error("Unexpected"); + } + + if (messageToSend) { + // merge segments to make it into one messages + for (const segment of msg.segments) { + messageToSend.segments.push(segment); + } + } else { + messageToSend = msg; + } + }, + })); if (messageToSend) { await this.rpc.send(messageToSend, { @@ -2357,17 +2518,17 @@ export class SharedLog< private async findLeadersPersist( cursor: - | number[] + | NumberFromType[] | { - entry: ShallowOrFullEntry | EntryReplicated; - minReplicas: number; - }, - entry: ShallowOrFullEntry | EntryReplicated, + entry: ShallowOrFullEntry | EntryReplicated; + minReplicas: number; + }, + entry: ShallowOrFullEntry | EntryReplicated, options?: { roleAge?: number; // persist even if not leader persist?: { - prev?: EntryReplicated[]; + prev?: EntryReplicated[]; }; }, ): Promise<{ @@ -2424,11 +2585,11 @@ export class SharedLog< async isLeader( cursor: - | number[] + | NumberFromType[] | { - entry: ShallowOrFullEntry | EntryReplicated; - replicas: number; - }, + entry: ShallowOrFullEntry | EntryReplicated; + replicas: number; + }, options?: { roleAge?: number; }, @@ -2438,7 +2599,7 @@ export class SharedLog< } private async waitForIsLeader( - cursor: number[], + cursor: NumberFromType[], hash: string, options: { timeout: number; @@ -2487,11 +2648,11 @@ export class SharedLog< async findLeaders( cursor: - | number[] + | NumberFromType[] | { - entry: ShallowOrFullEntry | EntryReplicated; - replicas: number; - }, + entry: ShallowOrFullEntry | EntryReplicated; + replicas: number; + }, options?: { roleAge?: number; }, @@ -2505,18 +2666,18 @@ export class SharedLog< const coordinates = Array.isArray(cursor) ? cursor : await this.createCoordinates(cursor.entry, cursor.replicas); - const leaders = await this.findLeadersFromU32(coordinates, options); + const leaders = await this.findLeadersFromN(coordinates, options); return leaders; } private async groupByLeaders( cursors: ( - | number[] + | NumberFromType[] | { - entry: ShallowOrFullEntry | EntryReplicated; - replicas: number; - } + entry: ShallowOrFullEntry | EntryReplicated; + replicas: number; + } )[], options?: { roleAge?: number; @@ -2538,11 +2699,11 @@ export class SharedLog< } private async createCoordinates( - entry: ShallowOrFullEntry | EntryReplicated, + entry: ShallowOrFullEntry | EntryReplicated, minReplicas: number, ) { const cursor = await this.domain.fromEntry(entry); - const out = getEvenlySpacedU32(cursor, minReplicas); + const out = this.domain.numbers.getGrid(cursor, minReplicas); return out; } @@ -2555,16 +2716,16 @@ export class SharedLog< private async persistCoordinate( properties: { - coordinates: number[]; - entry: ShallowOrFullEntry | EntryReplicated; + coordinates: NumberFromType[]; + entry: ShallowOrFullEntry | EntryReplicated; leaders: - | Map< - string, - { - intersecting: boolean; - } - > - | false; + | Map< + string, + { + intersecting: boolean; + } + > + | false; }, options?: { assignToRangeBoundary?: boolean; @@ -2579,7 +2740,7 @@ export class SharedLog< for (const coordinate of properties.coordinates) { await this.entryCoordinatesIndex.put( - new EntryReplicated({ + new this.indexableDomain.constructorEntry({ assignedToRangeBoundary, coordinate, meta: properties.entry.meta, @@ -2624,14 +2785,19 @@ export class SharedLog< ); // / 3 so that if 2 replicators and timeUntilRoleMaturity = 1e4 the result will be 1 } - private async findLeadersFromU32( - cursor: u32[], + private async findLeadersFromN( + cursor: NumberFromType[], options?: { roleAge?: number; }, ): Promise> { const roleAge = options?.roleAge ?? (await this.getDefaultMinRoleAge()); // TODO -500 as is added so that i f someone else is just as new as us, then we treat them as mature as us. without -500 we might be slower syncing if two nodes starts almost at the same time - return getSamples(cursor, this.replicationIndex, roleAge); + return getSamples( + cursor, + this.replicationIndex, + roleAge, + this.domain.numbers, + ); } async isReplicator( @@ -2702,8 +2868,8 @@ export class SharedLog< prune( entries: - | (EntryReplicated | ShallowOrFullEntry)[] - | Map>, + | (EntryReplicated | ShallowOrFullEntry)[] + | Map | ShallowOrFullEntry>, options?: { timeout?: number; unchecked?: boolean }, ): Promise[] { if (options?.unchecked) { @@ -2725,7 +2891,8 @@ export class SharedLog< // - Peers join and leave, which means we might not be a replicator anymore const promises: Promise[] = []; - const filteredEntries: (EntryReplicated | ShallowOrFullEntry)[] = []; + const filteredEntries: (EntryReplicated | ShallowOrFullEntry)[] = + []; const deleted = new Set(); for (const entry of entries.values()) { @@ -2758,7 +2925,7 @@ export class SharedLog< deferredPromise.reject(e); }; - let cursor: number[] | undefined = undefined; + let cursor: NumberFromType[] | undefined = undefined; const timeout = setTimeout(async () => { reject( @@ -2780,10 +2947,10 @@ export class SharedLog< const leaders = await this.waitForIsLeader( cursor ?? - (cursor = await this.createCoordinates( - entry, - minMinReplicasValue, - )), + (cursor = await this.createCoordinates( + entry, + minMinReplicasValue, + )), publicKeyHash, ); if (leaders) { @@ -2920,7 +3087,7 @@ export class SharedLog< } this.onReplicationChange( - (await this.getMyReplicationSegments()).map((x) => { + (await this.getAllReplicationSegments()).map((x) => { return { range: x, type: "added" }; }), ); @@ -2950,9 +3117,9 @@ export class SharedLog< const uncheckedDeliver: Map> = new Map(); - const allEntriesToDelete: EntryReplicated[] = []; + const allEntriesToDelete: EntryReplicated[] = []; - for await (const { gid, entries: coordinates } of toRebalance( + for await (const { gid, entries: coordinates } of toRebalance( change, this.entryCoordinatesIndex, )) { @@ -2966,7 +3133,7 @@ export class SharedLog< } let { isLeader, leaders: currentPeers } = await this.findLeadersPersist( - coordinates.map((x) => x.coordinate), + coordinates.map((x) => x.coordinate) as NumberFromType[], coordinates[0], { roleAge: 0, @@ -3071,7 +3238,7 @@ export class SharedLog< } async _onUnsubscription(evt: CustomEvent) { - logger.debug( + logger.trace( `Peer disconnected '${evt.detail.from.hashcode()}' from '${JSON.stringify( evt.detail.unsubscriptions.map((x) => x), )} '`, @@ -3086,7 +3253,7 @@ export class SharedLog< } async _onSubscription(evt: CustomEvent) { - logger.debug( + logger.trace( `New peer '${evt.detail.from.hashcode()}' connected to '${JSON.stringify( evt.detail.subscriptions.map((x) => x), )}'`, @@ -3171,9 +3338,11 @@ export class SharedLog< if (relativeDifference > 0.0001) { // TODO can not reuse old range, since it will (potentially) affect the index because of sideeffects - dynamicRange = new ReplicationRangeIndexable({ - offset: hashToU32(this.node.identity.publicKey.bytes), - length: scaleToU32(newFactor), + dynamicRange = new this.indexableDomain.constructorRange({ + offset: this.indexableDomain.bytesToNumber( + this.node.identity.publicKey.bytes, + ), + length: this.indexableDomain.denormalize(newFactor), publicKeyHash: dynamicRange.hash, id: dynamicRange.id, mode: dynamicRange.mode, @@ -3223,10 +3392,9 @@ export class SharedLog< .all() )?.[0]?.value; if (!range) { - range = new ReplicationRangeIndexable({ - normalized: true, - offset: Math.random(), - length: 0, + range = new this.indexableDomain.constructorRange({ + offset: this.domain.numbers.random(), + length: this.domain.numbers.zero, publicKeyHash: this.node.identity.publicKey.hashcode(), mode: ReplicationIntent.NonStrict, timestamp: BigInt(+new Date()), diff --git a/packages/programs/data/shared-log/src/integers.ts b/packages/programs/data/shared-log/src/integers.ts new file mode 100644 index 000000000..9f275456d --- /dev/null +++ b/packages/programs/data/shared-log/src/integers.ts @@ -0,0 +1,97 @@ +import { BinaryReader } from "@dao-xyz/borsh"; + +export type u32 = number; +export type u64 = bigint; +export type NumberFromType = U extends "u32" + ? number + : bigint; +export const MAX_U32 = 4294967295; +export const MAX_U64 = 18446744073709551615n; +export const MAX_U64_NUMBER = 18446744073709551615; +export const HALF_MAX_U32 = 2147483647; // rounded down +export const HALF_MAX_U64 = 9223372036854775807n; // rounded down + +export const denormalizer = ( + resolution: R, +): ((number: number) => NumberFromType) => { + if (resolution === "u32") { + return ((value: number) => { + const result = Math.round(value * MAX_U32); + return result > MAX_U32 ? MAX_U32 : result; + }) as (number: number) => NumberFromType; + } + return ((value: number) => { + let result = BigInt(Math.round(value * MAX_U64_NUMBER)); + return result > MAX_U64 ? MAX_U64 : result; + }) as (number: number) => NumberFromType; +}; + +export const bytesToNumber = ( + resolution: R, +): ((arr: Uint8Array) => NumberFromType) => { + if (resolution === "u32") { + return ((arr: Uint8Array): number => { + const seedNumber = new BinaryReader(arr).u32(); + return seedNumber; + }) as (arr: Uint8Array) => NumberFromType; + } + return ((arr: Uint8Array): bigint => { + const seedNumber = new BinaryReader(arr).u64(); + return seedNumber; + }) as (arr: Uint8Array) => NumberFromType; +}; + +export interface Numbers { + zero: NumberFromType; + maxValue: NumberFromType; + random: () => NumberFromType; + getGrid: (from: NumberFromType, count: number) => NumberFromType[]; + divRound: (a: NumberFromType, b: number | bigint) => NumberFromType; + abs: (a: NumberFromType) => NumberFromType; + min: (a: NumberFromType, b: NumberFromType) => NumberFromType; +} + +const getEvenlySpacedU32 = (from: number, count: number): number[] => { + let ret: number[] = new Array(count); + for (let i = 0; i < count; i++) { + ret[i] = Math.round(from + (i * MAX_U32) / count) % MAX_U32; + } + return ret; +}; + +const getEvenlySpacedU64 = (from: bigint, count: number): bigint[] => { + let ret: bigint[] = new Array(count); + for (let i = 0; i < count; i++) { + ret[i] = (from + (BigInt(i) * MAX_U64) / BigInt(count)) % MAX_U64; + } + return ret; +}; + +export const createNumbers = ( + resolution: N, +): Numbers => { + const denormalizerFn = denormalizer(resolution); + if (resolution === "u32") { + return { + random: () => denormalizerFn(Math.random()), + zero: 0, + maxValue: MAX_U32, + getGrid: getEvenlySpacedU32 as any, // TODO fix this, + divRound: (a, b) => Math.round(a / Number(b)) as any, + abs: (a) => Math.abs(a as number), + min: (a, b) => Math.min(a as number, b as number), + } as Numbers; + } else if (resolution === "u64") { + return { + random: () => denormalizerFn(Math.random()), + zero: 0n, + maxValue: MAX_U64, + getGrid: getEvenlySpacedU64 as any, // TODO fix this + divRound: (a, b) => (a as bigint) / BigInt(b), + abs: (a) => (a < 0n ? -a : a), + min: (a, b) => (a < b ? a : b), + } as Numbers; + } else { + throw new Error("Unsupported resolution"); + } +}; diff --git a/packages/programs/data/shared-log/src/ranges.ts b/packages/programs/data/shared-log/src/ranges.ts index abe2fc9dd..d0596a094 100644 --- a/packages/programs/data/shared-log/src/ranges.ts +++ b/packages/programs/data/shared-log/src/ranges.ts @@ -1,5 +1,11 @@ import { deserialize, field, serialize, variant } from "@dao-xyz/borsh"; -import { PublicSignKey, equals, randomBytes, toBase64 } from "@peerbit/crypto"; +import { + PublicSignKey, + equals, + randomBytes, + sha256Base64Sync, + toBase64, +} from "@peerbit/crypto"; import { And, BoolQuery, @@ -22,8 +28,14 @@ import { } from "@peerbit/indexer-interface"; import { id } from "@peerbit/indexer-interface"; import { Meta, ShallowMeta } from "@peerbit/log"; -import { type ReplicationChanges, type u32 } from "./replication-domain.js"; -import { MAX_U32, scaleToU32 } from "./role.js"; +import { + MAX_U32, + MAX_U64, + MAX_U64_NUMBER, + type NumberFromType, + type Numbers, +} from "./integers.js"; +import { type ReplicationChanges } from "./replication-domain.js"; import { groupByGidSync } from "./utils.js"; export enum ReplicationIntent { @@ -31,17 +43,29 @@ export enum ReplicationIntent { Strict = 1, // only replicate data in the segment to the specified replicator, not any other data } -export const getSegmentsFromOffsetAndRange = ( - offset: number, - factor: number, -): [[number, number], [number, number]] => { +export enum SyncStatus { + Unsynced = 0, + Synced = 1, +} + +const min = (a: number | bigint, b: number | bigint) => (a < b ? a : b); + +const getSegmentsFromOffsetAndRange = ( + offset: T, + factor: T, + zero: T, + max: T, +): [[T, T], [T, T]] => { let start1 = offset; + // @ts-ignore let end1Unscaled = offset + factor; // only add factor if it is not 1 to prevent numerical issues (like (0.9 + 1) % 1 => 0.8999999) - let end1 = Math.min(end1Unscaled, MAX_U32); + let end1: T = min(end1Unscaled, max) as T; return [ [start1, end1], - end1Unscaled > MAX_U32 - ? [0, (factor !== MAX_U32 ? offset + factor : offset) % MAX_U32] + // @ts-ignore + end1Unscaled > max + ? // @ts-ignore + [zero, (factor !== max ? offset + factor : offset) % max] : [start1, end1], ]; }; @@ -49,11 +73,11 @@ export const getSegmentsFromOffsetAndRange = ( export const shouldAssigneToRangeBoundary = ( leaders: | Map< - string, - { - intersecting: boolean; - } - > + string, + { + intersecting: boolean; + } + > | false, minReplicas: number, ) => { @@ -68,7 +92,21 @@ export const shouldAssigneToRangeBoundary = ( } return assignedToRangeBoundary; }; -export class EntryReplicated { +export interface EntryReplicated { + id: string; // hash + coordinate + hash: string; + gid: string; + coordinate: NumberFromType; + wallTime: bigint; + assignedToRangeBoundary: boolean; + get meta(): ShallowMeta; +} + +export const isEntryReplicated = (x: any): x is EntryReplicated => { + return x instanceof EntryReplicatedU32 || x instanceof EntryReplicatedU64; +}; + +export class EntryReplicatedU32 implements EntryReplicated<"u32"> { @id({ type: "string" }) id: string; // hash + coordinate @@ -121,8 +159,78 @@ export class EntryReplicated { } } +export class EntryReplicatedU64 implements EntryReplicated<"u64"> { + @id({ type: "string" }) + id: string; // hash + coordinate + + @field({ type: "string" }) + hash: string; + + @field({ type: "string" }) + gid: string; + + @field({ type: "u64" }) + coordinate: bigint; + + @field({ type: "u64" }) + wallTime: bigint; + + @field({ type: "bool" }) + assignedToRangeBoundary: boolean; + + @field({ type: Uint8Array }) + private _meta: Uint8Array; + + private _metaResolved: ShallowMeta; + + constructor(properties: { + coordinate: bigint; + hash: string; + meta: Meta; + assignedToRangeBoundary: boolean; + }) { + this.coordinate = properties.coordinate; + this.hash = properties.hash; + this.gid = properties.meta.gid; + this.id = this.hash + "-" + this.coordinate; + this.wallTime = properties.meta.clock.timestamp.wallTime; + const shallow = + properties.meta instanceof Meta + ? new ShallowMeta(properties.meta) + : properties.meta; + this._meta = serialize(shallow); + this._metaResolved = deserialize(this._meta, ShallowMeta); + this._metaResolved = properties.meta; + this.assignedToRangeBoundary = properties.assignedToRangeBoundary; + } + + get meta(): ShallowMeta { + if (!this._metaResolved) { + this._metaResolved = deserialize(this._meta, ShallowMeta); + } + return this._metaResolved; + } +} + +export interface ReplicationRangeMessage { + id: Uint8Array; + timestamp: bigint; + get offset(): NumberFromType; + get factor(): NumberFromType; + mode: ReplicationIntent; + toReplicationRangeIndexable(key: PublicSignKey): ReplicationRangeIndexable; +} + +export const isReplicationRangeMessage = ( + x: any, +): x is ReplicationRangeMessage => { + return x instanceof ReplicationRangeMessage; +}; + +export abstract class ReplicationRangeMessage { } + @variant(0) -export class ReplicationRange { +export class ReplicationRangeMessageU32 extends ReplicationRangeMessage<"u32"> { @field({ type: Uint8Array }) id: Uint8Array; @@ -145,6 +253,7 @@ export class ReplicationRange { timestamp: bigint; mode: ReplicationIntent; }) { + super(); const { id, offset, factor, timestamp, mode } = properties; this.id = id; this._offset = offset; @@ -161,8 +270,65 @@ export class ReplicationRange { return this._offset; } - toReplicationRangeIndexable(key: PublicSignKey): ReplicationRangeIndexable { - return new ReplicationRangeIndexable({ + toReplicationRangeIndexable( + key: PublicSignKey, + ): ReplicationRangeIndexableU32 { + return new ReplicationRangeIndexableU32({ + id: this.id, + publicKeyHash: key.hashcode(), + offset: this.offset, + length: this.factor, + timestamp: this.timestamp, + mode: this.mode, + }); + } +} + +@variant(1) +export class ReplicationRangeMessageU64 extends ReplicationRangeMessage<"u64"> { + @field({ type: Uint8Array }) + id: Uint8Array; + + @field({ type: "u64" }) + timestamp: bigint; + + @field({ type: "u64" }) + private _offset: bigint; + + @field({ type: "u64" }) + private _factor: bigint; + + @field({ type: "u8" }) + mode: ReplicationIntent; + + constructor(properties: { + id: Uint8Array; + offset: bigint; + factor: bigint; + timestamp: bigint; + mode: ReplicationIntent; + }) { + super(); + const { id, offset, factor, timestamp, mode } = properties; + this.id = id; + this._offset = offset; + this._factor = factor; + this.timestamp = timestamp; + this.mode = mode; + } + + get factor(): bigint { + return this._factor; + } + + get offset(): bigint { + return this._offset; + } + + toReplicationRangeIndexable( + key: PublicSignKey, + ): ReplicationRangeIndexableU64 { + return new ReplicationRangeIndexableU64({ id: this.id, publicKeyHash: key.hashcode(), offset: this.offset, @@ -173,7 +339,89 @@ export class ReplicationRange { } } -export class ReplicationRangeIndexable { +class HashableSegmentU32 { + @field({ type: "u32" }) + start1!: number; + + @field({ type: "u32" }) + end1!: number; + + @field({ type: "u32" }) + start2!: number; + + @field({ type: "u32" }) + end2!: number; + + @field({ type: "u8" }) + mode: ReplicationIntent; + + constructor(properties: { + start1: number; + start2: number; + end1: number; + end2: number; + mode: ReplicationIntent; + }) { + this.start1 = properties.start1; + this.end1 = properties.end1; + this.start2 = properties.start2; + this.end2 = properties.end2; + this.mode = properties.mode; + } +} + +class HashableSegmentU64 { + @field({ type: "u64" }) + start1!: bigint; + + @field({ type: "u64" }) + end1!: bigint; + + @field({ type: "u64" }) + start2!: bigint; + + @field({ type: "u64" }) + end2!: bigint; + + @field({ type: "u8" }) + mode: ReplicationIntent; + + constructor(properties: { + start1: bigint; + start2: bigint; + end1: bigint; + end2: bigint; + mode: ReplicationIntent; + }) { + this.start1 = properties.start1; + this.end1 = properties.end1; + this.start2 = properties.start2; + this.end2 = properties.end2; + this.mode = properties.mode; + } +} + +export interface ReplicationRangeIndexable { + id: Uint8Array; + idString: string; + hash: string; + timestamp: bigint; + start1: NumberFromType; + end1: NumberFromType; + start2: NumberFromType; + end2: NumberFromType; + width: NumberFromType; + widthNormalized: number; + mode: ReplicationIntent; + wrapped: boolean; + toUniqueSegmentId(): string; + toReplicationRange(): ReplicationRangeMessage; + equalRange(other: ReplicationRangeIndexable): boolean; + overlaps(other: ReplicationRangeIndexable): boolean; +} + +export class ReplicationRangeIndexableU32 + implements ReplicationRangeIndexable<"u32"> { @id({ type: Uint8Array }) id: Uint8Array; @@ -204,7 +452,6 @@ export class ReplicationRangeIndexable { constructor( properties: { id?: Uint8Array; - normalized?: boolean; offset: number; length: number; mode?: ReplicationIntent; @@ -215,14 +462,14 @@ export class ReplicationRangeIndexable { this.hash = (properties as { publicKeyHash: string }).publicKeyHash || (properties as { publicKey: PublicSignKey }).publicKey.hashcode(); - if (!properties.normalized) { + /* if (!properties.normalized) */ { this.transform({ length: properties.length, offset: properties.offset }); - } else { + } /* else { this.transform({ length: scaleToU32(properties.length), offset: scaleToU32(properties.offset), }); - } + } */ this.mode = properties.mode ?? ReplicationIntent.NonStrict; this.timestamp = properties.timestamp || BigInt(0); @@ -232,6 +479,8 @@ export class ReplicationRangeIndexable { const ranges = getSegmentsFromOffsetAndRange( properties.offset, properties.length, + 0, + MAX_U32, ); this.start1 = Math.round(ranges[0][0]); this.end1 = Math.round(ranges[0][1]); @@ -244,11 +493,11 @@ export class ReplicationRangeIndexable { (this.end2 < this.end1 ? this.end2 - this.start2 : 0); if ( - this.start1 > 0xffffffff || - this.end1 > 0xffffffff || - this.start2 > 0xffffffff || - this.end2 > 0xffffffff || - this.width > 0xffffffff || + this.start1 > MAX_U32 || + this.end1 > MAX_U32 || + this.start2 > MAX_U32 || + this.end2 > MAX_U32 || + this.width > MAX_U32 || this.width < 0 ) { throw new Error("Segment coordinate out of bounds"); @@ -266,7 +515,7 @@ export class ReplicationRangeIndexable { ); } - overlaps(other: ReplicationRangeIndexable, checkOther = true): boolean { + overlaps(other: ReplicationRangeIndexableU32, checkOther = true): boolean { if ( this.contains(other.start1) || this.contains(other.start2) || @@ -282,7 +531,7 @@ export class ReplicationRangeIndexable { return false; } toReplicationRange() { - return new ReplicationRange({ + return new ReplicationRangeMessageU32({ id: this.id, offset: this.start1, factor: this.width, @@ -291,15 +540,6 @@ export class ReplicationRangeIndexable { }); } - distanceTo(point: number) { - let wrappedPoint = MAX_U32 - point; - return Math.min( - Math.abs(this.start1 - point), - Math.abs(this.end2 - point), - Math.abs(this.start1 - wrappedPoint), - Math.abs(this.end2 - wrappedPoint), - ); - } get wrapped() { return this.end2 < this.end1; } @@ -308,7 +548,7 @@ export class ReplicationRangeIndexable { return this.width / MAX_U32; } - equals(other: ReplicationRangeIndexable) { + equals(other: ReplicationRangeIndexableU32) { if ( equals(this.id, other.id) && this.hash === other.hash && @@ -326,7 +566,7 @@ export class ReplicationRangeIndexable { return false; } - equalRange(other: ReplicationRangeIndexable) { + equalRange(other: ReplicationRangeIndexableU32) { return ( this.start1 === other.start1 && this.end1 === other.end1 && @@ -348,74 +588,262 @@ export class ReplicationRangeIndexable { return `(hash ${this.hash} range: ${this.toString()})`; } - /* removeRange(other: ReplicationRangeIndexable): ReplicationRangeIndexable | ReplicationRangeIndexable[] { - if (!this.overlaps(other)) { - return this - } + toUniqueSegmentId() { + // return a unique id as a function of the segments location and the replication intent + const hashable = new HashableSegmentU32(this); + return sha256Base64Sync(serialize(hashable)); + } +} + +export class ReplicationRangeIndexableU64 + implements ReplicationRangeIndexable<"u64"> { + @id({ type: Uint8Array }) + id: Uint8Array; + + @field({ type: "string" }) + hash: string; + + @field({ type: "u64" }) + timestamp: bigint; + + @field({ type: "u64" }) + start1!: bigint; + + @field({ type: "u64" }) + end1!: bigint; + + @field({ type: "u64" }) + start2!: bigint; + + @field({ type: "u64" }) + end2!: bigint; + + @field({ type: "u64" }) + width!: bigint; + + @field({ type: "u8" }) + mode: ReplicationIntent; + + constructor( + properties: { + id?: Uint8Array; + offset: bigint; + length: bigint; + mode?: ReplicationIntent; + timestamp?: bigint; + } & ({ publicKeyHash: string } | { publicKey: PublicSignKey }), + ) { + this.id = properties.id ?? randomBytes(32); + this.hash = + (properties as { publicKeyHash: string }).publicKeyHash || + (properties as { publicKey: PublicSignKey }).publicKey.hashcode(); + /* if (!properties.normalized) */ { + this.transform({ length: properties.length, offset: properties.offset }); + } /* else { + this.transform({ + length: scaleToU32(properties.length), + offset: scaleToU32(properties.offset), + }); + } */ + + this.mode = properties.mode ?? ReplicationIntent.NonStrict; + this.timestamp = properties.timestamp || BigInt(0); + } + + private transform(properties: { offset: bigint; length: bigint }) { + const ranges = getSegmentsFromOffsetAndRange( + properties.offset, + properties.length, + 0n, + MAX_U64, + ); + this.start1 = ranges[0][0]; + this.end1 = ranges[0][1]; + this.start2 = ranges[1][0]; + this.end2 = ranges[1][1]; + + this.width = + this.end1 - + this.start1 + + (this.end2 < this.end1 ? this.end2 - this.start2 : 0n); - if (this.equalRange(other)) { - return [] + if ( + this.start1 > MAX_U64 || + this.end1 > MAX_U64 || + this.start2 > MAX_U64 || + this.end2 > MAX_U64 || + this.width > MAX_U64 || + this.width < 0n + ) { + console.log("???"); + throw new Error("Segment coordinate out of bounds"); } + } + + get idString() { + return toBase64(this.id); + } + + contains(point: bigint) { + return ( + (point >= this.start1 && point < this.end1) || + (point >= this.start2 && point < this.end2) + ); + } - let diff: ReplicationRangeIndexable[] = []; - let start1 = this.start1; - if (other.start1 > start1) { - diff.push(new ReplicationRangeIndexable({ - id: this.id, - offset: this.start1, - length: other.start1 - this.start1, - mode: this.mode, - publicKeyHash: this.hash, - timestamp: this.timestamp, - normalized: false - })); - - start1 = other.end2 + overlaps(other: ReplicationRangeIndexableU64, checkOther = true): boolean { + if ( + this.contains(other.start1) || + this.contains(other.start2) || + this.contains(other.end1 - 1n) || + this.contains(other.end2 - 1n) + ) { + return true; } - if (other.end1 < this.end1) { - diff.push(new ReplicationRangeIndexable({ - id: this.id, - offset: other.end1, - length: this.end1 - other.end1, - mode: this.mode, - publicKeyHash: this.hash, - timestamp: this.timestamp, - normalized: false - })); + if (checkOther) { + return other.overlaps(this, false); } + return false; + } + toReplicationRange() { + return new ReplicationRangeMessageU64({ + id: this.id, + offset: this.start1, + factor: this.width, + timestamp: this.timestamp, + mode: this.mode, + }); + } + + get wrapped() { + return this.end2 < this.end1; + } + + get widthNormalized() { + return Number(this.width) / MAX_U64_NUMBER; + } - if (other.start2 > this.start2) { - diff.push(new ReplicationRangeIndexable({ - id: this.id, - offset: this.start2, - length: other.start2 - this.start2, - mode: this.mode, - publicKeyHash: this.hash, - timestamp: this.timestamp, - normalized: false - })); + equals(other: ReplicationRangeIndexableU64) { + if ( + equals(this.id, other.id) && + this.hash === other.hash && + this.timestamp === other.timestamp && + this.mode === other.mode && + this.start1 === other.start1 && + this.end1 === other.end1 && + this.start2 === other.start2 && + this.end2 === other.end2 && + this.width === other.width + ) { + return true; } - if (other.end2 < this.end2) { - diff.push(new ReplicationRangeIndexable({ - id: this.id, - offset: other.end2, - length: this.end2 - other.end2, - mode: this.mode, - publicKeyHash: this.hash, - timestamp: this.timestamp, - normalized: false - })); + return false; + } + + equalRange(other: ReplicationRangeIndexableU64) { + return ( + this.start1 === other.start1 && + this.end1 === other.end1 && + this.start2 === other.start2 && + this.end2 === other.end2 + ); + } + + toString() { + let roundToTwoDecimals = (num: number) => Math.round(num * 100) / 100; + + if (Math.abs(Number(this.start1 - this.start2)) < 0.0001) { + return `([${roundToTwoDecimals(Number(this.start1) / MAX_U64_NUMBER)}, ${roundToTwoDecimals(Number(this.start1) / MAX_U64_NUMBER)}])`; } + return `([${roundToTwoDecimals(Number(this.start1) / MAX_U64_NUMBER)}, ${roundToTwoDecimals(Number(this.start1) / MAX_U64_NUMBER)}] [${roundToTwoDecimals(Number(this.start2) / MAX_U64_NUMBER)}, ${roundToTwoDecimals(Number(this.end2) / MAX_U64_NUMBER)}])`; + } + + toStringDetailed() { + return `(hash ${this.hash} range: ${this.toString()})`; + } - return diff; - } */ + toUniqueSegmentId() { + // return a unique id as a function of the segments location and the replication intent + const hashable = new HashableSegmentU64(this); + return sha256Base64Sync(serialize(hashable)); + } } -const containingPoint = ( - rects: Index, - point: number, +export const mergeRanges = ( + segments: ReplicationRangeIndexable[], + numbers: { zero: NumberFromType; maxValue: NumberFromType }, +) => { + if (segments.length === 0) { + throw new Error("No segments to merge"); + } + if (segments.length === 1) { + return segments[0]; + } + + // only allow merging from same publicKeyHash + const sameHash = segments.every((x) => x.hash === segments[0].hash); + if (!sameHash) { + throw new Error("Segments have different publicKeyHash"); + } + + // only allow merging segments with length 1 (trivial) + const sameLength = segments.every((x) => x.width === 1 || x.width === 1n); + if (!sameLength) { + throw new Error( + "Segments have different length, only merging of segments length 1 is supported", + ); + } + + const sorted = segments.sort((a, b) => Number(a.start1 - b.start1)); + + let calculateLargeGap = (): [NumberFromType, number] => { + let last = sorted[sorted.length - 1]; + let largestArc = numbers.zero; + let largestArcIndex = -1; + for (let i = 0; i < sorted.length; i++) { + const current = sorted[i]; + if (current.start1 !== last.start1) { + let arc = numbers.zero; + if (current.start1 < last.end2) { + arc += ((numbers.maxValue as any) - last.end2) as any; + + arc += (current.start1 - numbers.zero) as any; + } else { + arc += (current.start1 - last.end2) as any; + } + + if (arc > largestArc) { + largestArc = arc; + largestArcIndex = i; + } + } + last = current; + } + + return [largestArc, largestArcIndex]; + }; + const [largestArc, largestArcIndex] = calculateLargeGap(); + + let totalLengthFinal: number = numbers.maxValue - largestArc; + + if (largestArcIndex === -1) { + return segments[0]; // all ranges are the same + } + // use segments[0] constructor to create a new object + + const proto = segments[0].constructor; + return new (proto as any)({ + length: totalLengthFinal, + offset: segments[largestArcIndex].start1, + publicKeyHash: segments[0].hash, + }); +}; + +const containingPoint = ( + rects: Index>, + point: NumberFromType, roleAgeLimit: number, matured: boolean, now: number, @@ -423,7 +851,7 @@ const containingPoint = ( shape?: S; sort?: Sort[]; }, -): IndexIterator => { +): IndexIterator, S> => { // point is between 0 and 1, and the range can start at any offset between 0 and 1 and have length between 0 and 1 let queries = [ @@ -468,17 +896,18 @@ const containingPoint = ( ); }; -const getClosest = ( +const getClosest = ( direction: "above" | "below", - rects: Index, - point: number, + rects: Index>, + point: NumberFromType, roleAgeLimit: number, matured: boolean, now: number, includeStrict: boolean, + numbers: Numbers, options?: { shape?: S }, -): IndexIterator => { - const createQueries = (p: number, equality: boolean) => { +): IndexIterator, S> => { + const createQueries = (p: NumberFromType, equality: boolean) => { let queries: Query[]; if (direction === "below") { queries = [ @@ -542,7 +971,10 @@ const getClosest = ( const iteratorWrapped = rects.iterate( { - query: createQueries(direction === "below" ? MAX_U32 : 0, true), + query: createQueries( + direction === "below" ? numbers.maxValue : numbers.zero, + true, + ), sort: [ direction === "below" ? new Sort({ key: ["end2"], direction: "desc" }) @@ -554,68 +986,80 @@ const getClosest = ( options, ); - return joinIterator([iterator, iteratorWrapped], point, direction); + return joinIterator( + [iterator, iteratorWrapped], + point, + direction, + numbers, + ); }; -export const hasCoveringRange = async ( - rects: Index, - range: ReplicationRangeIndexable, +export const getCoveringRangeQuery = ( + range: ReplicationRangeIndexable, +) => { + return [ + new Or([ + new And([ + new IntegerCompare({ + key: "start1", + compare: Compare.LessOrEqual, + value: range.start1, + }), + new IntegerCompare({ + key: "end1", + compare: Compare.GreaterOrEqual, + value: range.end1, + }), + ]), + new And([ + new IntegerCompare({ + key: "start2", + compare: Compare.LessOrEqual, + value: range.start1, + }), + new IntegerCompare({ + key: "end2", + compare: Compare.GreaterOrEqual, + value: range.end1, + }), + ]), + ]), + new Or([ + new And([ + new IntegerCompare({ + key: "start1", + compare: Compare.LessOrEqual, + value: range.start2, + }), + new IntegerCompare({ + key: "end1", + compare: Compare.GreaterOrEqual, + value: range.end2, + }), + ]), + new And([ + new IntegerCompare({ + key: "start2", + compare: Compare.LessOrEqual, + value: range.start2, + }), + new IntegerCompare({ + key: "end2", + compare: Compare.GreaterOrEqual, + value: range.end2, + }), + ]), + ]), + ]; +}; +export const iHaveCoveringRange = async ( + rects: Index>, + range: ReplicationRangeIndexable, ) => { return ( (await rects.count({ query: [ - new Or([ - new And([ - new IntegerCompare({ - key: "start1", - compare: Compare.LessOrEqual, - value: range.start1, - }), - new IntegerCompare({ - key: "end1", - compare: Compare.GreaterOrEqual, - value: range.end1, - }), - ]), - new And([ - new IntegerCompare({ - key: "start2", - compare: Compare.LessOrEqual, - value: range.start1, - }), - new IntegerCompare({ - key: "end2", - compare: Compare.GreaterOrEqual, - value: range.end1, - }), - ]), - ]), - new Or([ - new And([ - new IntegerCompare({ - key: "start1", - compare: Compare.LessOrEqual, - value: range.start2, - }), - new IntegerCompare({ - key: "end1", - compare: Compare.GreaterOrEqual, - value: range.end2, - }), - ]), - new And([ - new IntegerCompare({ - key: "start2", - compare: Compare.LessOrEqual, - value: range.start2, - }), - new IntegerCompare({ - key: "end2", - compare: Compare.GreaterOrEqual, - value: range.end2, - }), - ]), - ]), + ...getCoveringRangeQuery(range), new StringMatch({ key: "hash", value: range.hash, @@ -632,54 +1076,55 @@ export const hasCoveringRange = async ( ); }; -export const getDistance = ( - from: number, - to: number, +// TODO +export function getDistance( + from: any, + to: any, direction: "above" | "below" | "closest", - end = MAX_U32, -) => { - // if direction is 'above' only measure distance from 'from to 'to' from above. - // i.e if from < to, then from needs to wrap around 0 to 1 and then to to - // if direction is 'below' and from > to, then from needs to wrap around 1 to 0 and then to to - // if direction is 'closest' then the shortest distance is the distance - - // also from is 0.1 and to is 0.9, then distance should be 0.2 not 0.8 - // same as for if from is 0.9 and to is 0.1, then distance should be 0.2 not 0.8 + end: any, +): any { + const abs = (value: number | bigint): number | bigint => + value < 0 ? -value : value; + const diff = (a: T, b: T): T => abs(a - b) as T; if (direction === "closest") { if (from === to) { - return 0; + return typeof from === "number" ? 0 : 0n; // returns 0 of the correct type } - - return Math.min(Math.abs(from - to), Math.abs(end - Math.abs(from - to))); + return diff(from, to) < diff(end, diff(from, to)) + ? diff(from, to) + : diff(end, diff(from, to)); } if (direction === "above") { if (from <= to) { - return Math.abs(end - to) + from; + return end - to + from; } return from - to; } if (direction === "below") { if (from >= to) { - return Math.abs(end - from) + to; + return end - from + to; } return to - from; } throw new Error("Invalid direction"); -}; +} -const joinIterator = ( - iterators: IndexIterator[], - point: number, +const joinIterator = ( + iterators: IndexIterator, S>[], + point: NumberFromType, direction: "above" | "below" | "closest", -): IndexIterator => { + numbers: Numbers, +): IndexIterator, S> => { let queues: { elements: { - result: IndexedResult>; - dist: number; + result: IndexedResult< + ReturnTypeFromShape, S> + >; + dist: NumberFromType; }[]; }[] = []; @@ -687,10 +1132,10 @@ const joinIterator = ( next: async ( count: number, ): Promise< - IndexedResults> + IndexedResults, S>> > => { let results: IndexedResults< - ReturnTypeFromShape + ReturnTypeFromShape, S> > = []; for (let i = 0; i < iterators.length; i++) { let queue = queues[i]; @@ -705,16 +1150,36 @@ const joinIterator = ( for (const el of res) { const closest = el.value; - let dist: number; + let dist: NumberFromType; if (direction === "closest") { - dist = Math.min( - getDistance(closest.start1, point, direction), - getDistance(closest.end2, point, direction), + dist = numbers.min( + getDistance( + closest.start1, + point as any, + direction, + numbers.maxValue as any, + ) as NumberFromType, + getDistance( + closest.end2, + point as any, + direction, + numbers.maxValue as any, + ) as NumberFromType, ); } else if (direction === "above") { - dist = getDistance(closest.start1, point, direction); + dist = getDistance( + closest.start1, + point as any, + direction, + numbers.maxValue as any, + ) as NumberFromType; } else if (direction === "below") { - dist = getDistance(closest.end2, point, direction); + dist = getDistance( + closest.end2, + point as any, + direction, + numbers.maxValue as any, + ) as NumberFromType; } else { throw new Error("Invalid direction"); } @@ -728,7 +1193,7 @@ const joinIterator = ( for (let i = 0; i < count; i++) { let closestQueue = -1; - let closestDist = Number.MAX_SAFE_INTEGER; + let closestDist: bigint | number = Number.MAX_VALUE; for (let j = 0; j < queues.length; j++) { let queue = queues[j]; if (queue && queue.elements.length > 0) { @@ -763,7 +1228,7 @@ const joinIterator = ( }, all: async () => { let results: IndexedResult< - ReturnTypeFromShape + ReturnTypeFromShape, S> >[] = []; for (const iterator of iterators) { let res = await iterator.all(); @@ -775,17 +1240,19 @@ const joinIterator = ( }; const getClosestAround = < - S extends (Shape & { timestamp: true }) | undefined = undefined, + S extends (Shape & { timestamp: true }) | undefined, + R extends "u32" | "u64", >( - peers: Index, - point: number, + peers: Index>, + point: NumberFromType, roleAge: number, now: number, includeStrictBelow: boolean, includeStrictAbove: boolean, + numbers: Numbers, options?: { shape?: S }, ) => { - const closestBelow = getClosest( + const closestBelow = getClosest( "below", peers, point, @@ -793,9 +1260,10 @@ const getClosestAround = < true, now, includeStrictBelow, + numbers, options, ); - const closestAbove = getClosest( + const closestAbove = getClosest( "above", peers, point, @@ -803,9 +1271,10 @@ const getClosestAround = < true, now, includeStrictAbove, + numbers, options, ); - const containing = containingPoint( + const containing = containingPoint( peers, point, roleAge, @@ -816,24 +1285,25 @@ const getClosestAround = < return iteratorInSeries( containing, - joinIterator([closestBelow, closestAbove], point, "closest"), + joinIterator([closestBelow, closestAbove], point, "closest", numbers), ); }; -const collectNodesAroundPoint = async ( +const collectNodesAroundPoint = async ( roleAge: number, - peers: Index, + peers: Index>, collector: ( rect: { hash: string }, matured: boolean, - interescting: boolean, + intersecting: boolean, ) => void, - point: u32, + point: NumberFromType, now: number, + numbers: Numbers, done: () => boolean = () => true, ) => { /* let shape = { timestamp: true, hash: true } as const */ - const containing = containingPoint( + const containing = containingPoint( peers, point, 0, @@ -849,28 +1319,33 @@ const collectNodesAroundPoint = async ( return; } - const closestBelow = getClosest( + const closestBelow = getClosest( "below", peers, point, 0, true, now, - false /* , { shape } */, + false, + numbers, + /* , { shape } */ ); - const closestAbove = getClosest( + const closestAbove = getClosest( "above", peers, point, 0, true, now, - false /* , { shape } */, + false, + numbers, + /* , { shape } */ ); - const aroundIterator = joinIterator( + const aroundIterator = joinIterator( [closestBelow, closestAbove], point, "closest", + numbers, ); while (aroundIterator.done() !== true && done() !== true) { const res = await aroundIterator.next(1); @@ -883,14 +1358,6 @@ const collectNodesAroundPoint = async ( } }; -export const getEvenlySpacedU32 = (from: number, count: number) => { - let ret: number[] = new Array(count); - for (let i = 0; i < count; i++) { - ret[i] = Math.round(from + (i * MAX_U32) / count) % MAX_U32; - } - return ret; -}; - export const isMatured = ( segment: { timestamp: bigint }, now: number, @@ -902,10 +1369,11 @@ export const isMatured = ( // will return a list of peers that want to replicate the data, // but also if necessary a list of peers that are responsible for the data // but have not explicitly replicating a range that cover the cursor point -export const getSamples = async ( - cursor: u32[], - peers: Index, +export const getSamples = async ( + cursor: NumberFromType[], + peers: Index>, roleAge: number, + numbers: Numbers, ): Promise> => { const leaders: Map = new Map(); if (!peers) { @@ -935,6 +1403,7 @@ export const getSamples = async ( }, cursor[i], now, + numbers, () => { if (maturedLeaders.size > i) { return true; @@ -947,16 +1416,17 @@ export const getSamples = async ( return leaders; }; -const fetchOne = async ( - iterator: IndexIterator, +const fetchOne = async ( + iterator: IndexIterator, S>, ) => { const value = await iterator.next(1); await iterator.close(); return value[0]?.value; }; -export const minimumWidthToCover = async ( +export const minimumWidthToCover = async ( minReplicas: number /* , replicatorCount: number */, + numbers: Numbers, ) => { /* minReplicas = Math.min(minReplicas, replicatorCount); */ // TODO do we need this? @@ -965,34 +1435,29 @@ export const minimumWidthToCover = async ( // to make sure we reach sufficient amount of nodes such that at least one one has // the entry we are looking for - let widthToCoverScaled = Math.round(MAX_U32 / minReplicas); + let widthToCoverScaled = numbers.divRound(numbers.maxValue, minReplicas); return widthToCoverScaled; }; -export const getCoverSet = async (properties: { - peers: Index; - start: number | PublicSignKey | undefined; - widthToCoverScaled: number; +export const getCoverSet = async (properties: { + peers: Index>; + start: NumberFromType | PublicSignKey | undefined; + widthToCoverScaled: NumberFromType; roleAge: number; - intervalWidth?: number; + numbers: Numbers; eager?: - | { - unmaturedFetchCoverSize?: number; - } - | boolean; + | { + unmaturedFetchCoverSize?: number; + } + | boolean; }): Promise> => { - let intervalWidth: number = properties.intervalWidth ?? MAX_U32; const { peers, start, widthToCoverScaled, roleAge } = properties; const now = Date.now(); - const { startNode, startLocation, endLocation } = await getStartAndEnd( - peers, - start, - widthToCoverScaled, - roleAge, - now, - intervalWidth, - ); + const { startNode, startLocation, endLocation } = await getStartAndEnd< + undefined, + R + >(peers, start, widthToCoverScaled, roleAge, now, properties.numbers); let ret = new Set(); @@ -1034,29 +1499,41 @@ export const getCoverSet = async (properties: { ret.add(current.hash); const resolveNextContaining = async ( - nextLocation: number, + nextLocation: NumberFromType, roleAge: number, ) => { let next = await fetchOne( - containingPoint(peers, nextLocation, roleAge, true, now, { + containingPoint(peers, nextLocation, roleAge, true, now, { sort: [new Sort({ key: "end2", direction: SortDirection.DESC })], }), ); // get entersecting sort by largest end2 return next; }; - const resolveNextAbove = async (nextLocation: number, roleAge: number) => { + const resolveNextAbove = async ( + nextLocation: NumberFromType, + roleAge: number, + ) => { // if not get closest from above - let next = await fetchOne( - getClosest("above", peers, nextLocation, roleAge, true, now, true), + let next = await fetchOne( + getClosest( + "above", + peers, + nextLocation, + roleAge, + true, + now, + true, + properties.numbers, + ), ); return next; }; const resolveNext = async ( - nextLocation: number, + nextLocation: NumberFromType, roleAge: number, - ): Promise<[ReplicationRangeIndexable, boolean]> => { + ): Promise<[ReplicationRangeIndexable, boolean]> => { const containing = await resolveNextContaining(nextLocation, roleAge); if (containing) { return [containing, true]; @@ -1067,13 +1544,16 @@ export const getCoverSet = async (properties: { // fill the middle let wrappedOnce = current.end2 < current.end1; - let coveredLength = 0; - const addLength = (from: number) => { + let coveredLength = properties.numbers.zero; + const addLength = (from: NumberFromType) => { if (current.end2 < from || current.wrapped) { wrappedOnce = true; - coveredLength += MAX_U32 - from; + // @ts-ignore + coveredLength += properties.numbers.maxValue - from; + // @ts-ignore coveredLength += current.end2; } else { + // @ts-ignore coveredLength += current.end1 - from; } }; @@ -1085,7 +1565,7 @@ export const getCoverSet = async (properties: { while ( maturedCoveredLength < widthToCoverScaled && // eslint-disable-line no-unmodified-loop-condition - coveredLength <= MAX_U32 // eslint-disable-line no-unmodified-loop-condition + coveredLength <= properties.numbers.maxValue // eslint-disable-line no-unmodified-loop-condition ) { let nextCandidate = await resolveNext(nextLocation, roleAge); /* let fromAbove = false; */ @@ -1119,14 +1599,34 @@ export const getCoverSet = async (properties: { if ( !isLast || nextCandidate[1] || - Math.min( - getDistance(last.start1, endLocation, "closest"), - getDistance(last.end2, endLocation, "closest"), + properties.numbers.min( + getDistance( + last.start1, + endLocation, + "closest", + properties.numbers.maxValue, + ), + getDistance( + last.end2, + endLocation, + "closest", + properties.numbers.maxValue, + ), ) > - Math.min( - getDistance(current.start1, endLocation, "closest"), - getDistance(current.end2, endLocation, "closest"), - ) + properties.numbers.min( + getDistance( + current.start1, + endLocation, + "closest", + properties.numbers.maxValue, + ), + getDistance( + current.end2, + endLocation, + "closest", + properties.numbers.maxValue, + ), + ) ) { ret.add(current.hash); } @@ -1141,9 +1641,9 @@ export const getCoverSet = async (properties: { nextLocation = endIsWrapped ? wrappedOnce - ? Math.min(current.end2, endLocation) + ? properties.numbers.min(current.end2, endLocation) : current.end2 - : Math.min(current.end2, endLocation); + : properties.numbers.min(current.end2, endLocation); } start instanceof PublicSignKey && ret.add(start.hashcode()); @@ -1153,19 +1653,21 @@ export const getCoverSet = async (properties: { // reduce the change set to only regions that are changed for each peer // i.e. subtract removed regions from added regions, and vice versa const result = new Map(); - + for (const addedChange of changes.added ?? []) { let prev = result.get(addedChange.hash) ?? []; for (const [_hash, ranges] of result.entries()) { for (const r of ranges) { - + } } } } */ -const matchRangeQuery = (range: ReplicationRangeIndexable) => { +export const matchEntriesInRangeQuery = ( + range: ReplicationRangeIndexable, +) => { let ors = []; ors.push( new And([ @@ -1199,17 +1701,17 @@ const matchRangeQuery = (range: ReplicationRangeIndexable) => { return new Or(ors); }; -export const toRebalance = ( +export const toRebalance = ( changes: ReplicationChanges, - index: Index, -): AsyncIterable<{ gid: string; entries: EntryReplicated[] }> => { + index: Index>, +): AsyncIterable<{ gid: string; entries: EntryReplicated[] }> => { const assignedRangesQuery = (changes: ReplicationChanges) => { let ors: Query[] = []; for (const change of changes) { - const matchRange = matchRangeQuery(change.range); + const matchRange = matchEntriesInRangeQuery(change.range); if (change.type === "updated") { // assuming a range is to be removed, is this entry still enoughly replicated - const prevMatchRange = matchRangeQuery(change.prev); + const prevMatchRange = matchEntriesInRangeQuery(change.prev); ors.push(prevMatchRange); ors.push(matchRange); } else { @@ -1249,12 +1751,14 @@ export const toRebalance = ( }; export const fetchOneFromPublicKey = async < - S extends (Shape & { timestamp: true }) | undefined = undefined, + S extends (Shape & { timestamp: true }) | undefined, + R extends "u32" | "u64", >( publicKey: PublicSignKey, - index: Index, + index: Index>, roleAge: number, now: number, + numbers: Numbers, options?: { shape: S; }, @@ -1271,13 +1775,14 @@ export const fetchOneFromPublicKey = async < if (node) { if (!isMatured(node, now, roleAge)) { const matured = await fetchOne( - getClosestAround( + getClosestAround( index, node.start1, roleAge, now, false, false, + numbers, options, ), ); @@ -1291,33 +1796,36 @@ export const fetchOneFromPublicKey = async < export const getStartAndEnd = async < S extends (Shape & { timestamp: true }) | undefined, + R extends "u32" | "u64", >( - peers: Index, - start: number | PublicSignKey | undefined | undefined, - widthToCoverScaled: number, + peers: Index>, + start: NumberFromType | PublicSignKey | undefined | undefined, + widthToCoverScaled: NumberFromType, roleAge: number, now: number, - intervalWidth: number, + numbers: Numbers, options?: { shape: S }, ): Promise<{ - startNode: ReturnTypeFromShape | undefined; - startLocation: number; - endLocation: number; + startNode: ReturnTypeFromShape, S> | undefined; + startLocation: NumberFromType; + endLocation: NumberFromType; }> => { // find a good starting point - let startNode: ReturnTypeFromShape | undefined = - undefined; - let startLocation: number | undefined = undefined; + let startNode: + | ReturnTypeFromShape, S> + | undefined = undefined; + let startLocation: NumberFromType | undefined = undefined; - const nodeFromPoint = async (point = scaleToU32(Math.random())) => { + const nodeFromPoint = async (point = numbers.random()) => { startLocation = point; - startNode = await fetchOneClosest( + startNode = await fetchOneClosest( peers, startLocation, roleAge, now, false, true, + numbers, options, ); }; @@ -1329,6 +1837,7 @@ export const getStartAndEnd = async < peers, roleAge, now, + numbers, options, ); if (!startNode) { @@ -1337,62 +1846,73 @@ export const getStartAndEnd = async < } else { startLocation = startNode.start1; } - } else if (typeof start === "number") { + } else if (typeof start === "number" || typeof start === "bigint") { await nodeFromPoint(start); } else { await nodeFromPoint(); } if (!startNode || startLocation == null) { - return { startNode: undefined, startLocation: 0, endLocation: 0 }; + return { + startNode: undefined, + startLocation: numbers.zero, + endLocation: numbers.zero, + }; } - let endLocation = startLocation + widthToCoverScaled; - if (intervalWidth != null) { - endLocation = endLocation % intervalWidth; - } + // @ts-ignore + let endLocation: T = (startLocation + widthToCoverScaled) % numbers.maxValue; // if start location is after endLocation and startNode is strict then return undefined because this is not a node we want to choose - let coveredDistanceToStart = 0; + let coveredDistanceToStart = numbers.zero; if (startNode.start1 < startLocation) { - coveredDistanceToStart += intervalWidth - startLocation + startNode.start1; + coveredDistanceToStart += + numbers.maxValue - startLocation + startNode.start1; } else { - coveredDistanceToStart += startNode.start1 - startLocation; + coveredDistanceToStart += ((startNode.start1 as any) - + startLocation) as any; } if ( startNode.mode === ReplicationIntent.Strict && coveredDistanceToStart > widthToCoverScaled ) { - return { startNode: undefined, startLocation: 0, endLocation: 0 }; + return { + startNode: undefined, + startLocation: numbers.zero, + endLocation: numbers.zero, + }; } return { startNode, - startLocation: Math.round(startLocation), - endLocation: Math.round(endLocation), + startLocation, + endLocation, }; }; export const fetchOneClosest = < - S extends (Shape & { timestamp: true }) | undefined = undefined, + S extends (Shape & { timestamp: true }) | undefined, + R extends "u32" | "u64", >( - peers: Index, - point: number, + peers: Index>, + point: NumberFromType, roleAge: number, now: number, includeStrictBelow: boolean, includeStrictAbove: boolean, + numbers: Numbers, options?: { shape?: S }, ) => { - return fetchOne( - getClosestAround( + return fetchOne( + getClosestAround( peers, point, roleAge, now, includeStrictBelow, includeStrictAbove, + numbers, options, ), ); diff --git a/packages/programs/data/shared-log/src/replication-domain-hash.ts b/packages/programs/data/shared-log/src/replication-domain-hash.ts index a11975ce2..aad127ab2 100644 --- a/packages/programs/data/shared-log/src/replication-domain-hash.ts +++ b/packages/programs/data/shared-log/src/replication-domain-hash.ts @@ -1,41 +1,67 @@ -import { BinaryReader, BinaryWriter } from "@dao-xyz/borsh"; +import { BinaryWriter } from "@dao-xyz/borsh"; import { sha256 } from "@peerbit/crypto"; import type { ShallowOrFullEntry } from "@peerbit/log"; -import type { EntryReplicated } from "./ranges.js"; +import { bytesToNumber, createNumbers } from "./integers.js"; +import { type EntryReplicated } from "./ranges.js"; import { type Log, type ReplicationDomain, type ReplicationDomainMapper, } from "./replication-domain.js"; -export const hashToU32 = (hash: Uint8Array) => { +/* const hashToU32 = (hash: Uint8Array) => { const seedNumber = new BinaryReader( hash.subarray(hash.length - 4, hash.length), ).u32(); return seedNumber; }; -const hashTransformer: ReplicationDomainMapper = async ( - entry: ShallowOrFullEntry | EntryReplicated, -) => { - // For a fixed set or members, the choosen leaders will always be the same (address invariant) - // This allows for that same content is always chosen to be distributed to same peers, to remove unecessary copies - // Convert this thing we wan't to distribute to 8 bytes so we get can convert it into a u64 - // modulus into an index - const utf8writer = new BinaryWriter(); - utf8writer.string(entry.meta.gid); - const seed = await sha256(utf8writer.finalize()); +const hashToU64 = (hash: Uint8Array): bigint => { + const seedNumber = new BinaryReader( + hash.subarray(hash.length - 4, hash.length), // + ).u64(); + return seedNumber; +}; + */ - // convert hash of slot to a number - return hashToU32(seed); +const hashTransformer = ( + resolution: R, +): ReplicationDomainMapper => { + const numberConverter = bytesToNumber(resolution); + if (resolution === "u32") { + return (async (entry: ShallowOrFullEntry | EntryReplicated) => { + const utf8writer = new BinaryWriter(); + utf8writer.string(entry.meta.gid); + const seed = await sha256(utf8writer.finalize()); + return numberConverter(seed); + }) as ReplicationDomainMapper; + } else if (resolution === "u64") { + return (async (entry: ShallowOrFullEntry | EntryReplicated) => { + const utf8writer = new BinaryWriter(); + utf8writer.string(entry.meta.gid); + const seed = await sha256(utf8writer.finalize()); + return numberConverter(seed); + }) as ReplicationDomainMapper; + } else { + throw new Error("Unsupported resolution"); + } }; -export type ReplicationDomainHash = ReplicationDomain; -export const createReplicationDomainHash: () => ReplicationDomainHash = () => { +export type ReplicationDomainHash = ReplicationDomain< + undefined, + any, + R +>; + +export const createReplicationDomainHash = ( + resolution: R, +): ReplicationDomainHash => { return { + resolution, type: "hash", - fromEntry: hashTransformer, + numbers: createNumbers(resolution), + fromEntry: hashTransformer(resolution), fromArgs: async (args: undefined, log: Log) => { return { offset: log.node.identity.publicKey, diff --git a/packages/programs/data/shared-log/src/replication-domain-time.ts b/packages/programs/data/shared-log/src/replication-domain-time.ts index 96254839d..1a924404d 100644 --- a/packages/programs/data/shared-log/src/replication-domain-time.ts +++ b/packages/programs/data/shared-log/src/replication-domain-time.ts @@ -1,9 +1,9 @@ import type { ShallowOrFullEntry } from "@peerbit/log"; -import type { EntryReplicated } from "./ranges.js"; +import { createNumbers } from "./integers.js"; +import { type EntryReplicated } from "./ranges.js"; import { type ReplicationDomain, type ReplicationDomainMapper, - type u32, } from "./replication-domain.js"; type TimeUnit = "seconds" | "milliseconds" | "microseconds" | "nanoseconds"; @@ -24,11 +24,11 @@ const scalarMilliToUnit = { export const fromEntry = ( origin: Date, unit: TimeUnit = "milliseconds", -): ReplicationDomainMapper => { +): ReplicationDomainMapper => { const scalar = scalarNanoToUnit[unit]; const originTime = +origin / scalarMilliToUnit[unit]; - const fn = (entry: ShallowOrFullEntry | EntryReplicated) => { + const fn = (entry: ShallowOrFullEntry | EntryReplicated<"u32">) => { const cursor = entry.meta.clock.timestamp.wallTime / scalar; return Math.round(Number(cursor) - originTime); }; @@ -37,9 +37,9 @@ export const fromEntry = ( type TimeRange = { from: number; to: number }; -export type ReplicationDomainTime = ReplicationDomain & { - fromTime: (time: number | Date) => u32; - fromDuration: (duration: number) => u32; +export type ReplicationDomainTime = ReplicationDomain & { + fromTime: (time: number | Date) => number; + fromDuration: (duration: number) => number; }; export const createReplicationDomainTime = ( @@ -48,17 +48,19 @@ export const createReplicationDomainTime = ( ): ReplicationDomainTime => { const originScaled = +origin * scalarMilliToUnit[unit]; const fromMilliToUnit = scalarMilliToUnit[unit]; - const fromTime = (time: number | Date): u32 => { + const fromTime = (time: number | Date): number => { return ( (typeof time === "number" ? time : +time * fromMilliToUnit) - originScaled ); }; - const fromDuration = (duration: number): u32 => { + const fromDuration = (duration: number): number => { return duration; }; return { + resolution: "u32", type: "time", + numbers: createNumbers("u32"), fromTime, fromDuration, fromEntry: fromEntry(origin, unit), diff --git a/packages/programs/data/shared-log/src/replication-domain.ts b/packages/programs/data/shared-log/src/replication-domain.ts index c22c6abfb..f9a7c43f6 100644 --- a/packages/programs/data/shared-log/src/replication-domain.ts +++ b/packages/programs/data/shared-log/src/replication-domain.ts @@ -2,14 +2,14 @@ import type { PublicSignKey } from "@peerbit/crypto"; import { type Index } from "@peerbit/indexer-interface"; import type { Entry, ShallowEntry } from "@peerbit/log"; import { debounceAcculmulator } from "./debounce.js"; -import type { EntryReplicated, ReplicationRangeIndexable } from "./ranges.js"; +import type { ReplicationRangeIndexable } from "./index.js"; +import type { NumberFromType, Numbers } from "./integers.js"; +import type { EntryReplicated } from "./ranges.js"; import type { ReplicationLimits } from "./replication.js"; -import { MAX_U32 } from "./role.js"; -export type u32 = number; -export type ReplicationDomainMapper = ( - entry: Entry | ShallowEntry | EntryReplicated, -) => Promise | u32; +export type ReplicationDomainMapper = ( + entry: Entry | ShallowEntry | EntryReplicated, +) => Promise> | NumberFromType; export type Log = { replicas: ReplicationLimits; @@ -19,7 +19,7 @@ export type Log = { }; }; syncInFlight: Map>; - replicationIndex: Index; + replicationIndex: Index>; getDefaultMinRoleAge: () => Promise; }; export type ReplicationDomainCoverSet = ( @@ -28,24 +28,24 @@ export type ReplicationDomainCoverSet = ( args: Args, ) => Promise | string[]; // minimum set of peers that covers all the data -type CoverRange = { - offset: number | PublicSignKey; - length?: number; +type CoverRange = { + offset: T | PublicSignKey; + length?: T; }; export type ReplicationChanges = ReplicationChange[]; export type ReplicationChange = | { type: "added"; - range: ReplicationRangeIndexable; + range: ReplicationRangeIndexable; } | { type: "removed"; - range: ReplicationRangeIndexable; + range: ReplicationRangeIndexable; } | { type: "updated"; - range: ReplicationRangeIndexable; - prev: ReplicationRangeIndexable; + range: ReplicationRangeIndexable; + prev: ReplicationRangeIndexable; }; export const mergeReplicationChanges = ( @@ -90,24 +90,16 @@ export const debounceAggregationChanges = ( ); }; -export type ReplicationDomain = { +export type ReplicationDomain = { + resolution: R; type: string; - fromEntry: ReplicationDomainMapper; + numbers: Numbers; + fromEntry: ReplicationDomainMapper; fromArgs: ( args: Args | undefined, log: Log, - ) => Promise | CoverRange; - - // to rebalance will return an async iterator of objects that will be added to the log - /* toRebalance( - change: ReplicationChange, - index: Index - ): AsyncIterable<{ gid: string, entries: { coordinate: number, hash: string }[] }> | Promise>; */ -}; - -export const uniformToU32 = (cursor: number) => { - return cursor * MAX_U32; + ) => Promise>> | CoverRange>; }; export type ExtractDomainArgs = - T extends ReplicationDomain ? Args : never; + T extends ReplicationDomain ? Args : never; diff --git a/packages/programs/data/shared-log/src/replication.ts b/packages/programs/data/shared-log/src/replication.ts index e0c539912..a35b398c9 100644 --- a/packages/programs/data/shared-log/src/replication.ts +++ b/packages/programs/data/shared-log/src/replication.ts @@ -11,8 +11,9 @@ import { type Index } from "@peerbit/indexer-interface"; import { TransportMessage } from "./message.js"; import { ReplicationIntent, - ReplicationRange, type ReplicationRangeIndexable, + ReplicationRangeMessage, + ReplicationRangeMessageU32, } from "./ranges.js"; import { Observer, Replicator, Role } from "./role.js"; @@ -20,7 +21,7 @@ export type ReplicationLimits = { min: MinReplicas; max?: MinReplicas }; interface SharedLog { replicas: Partial; - replicationIndex: Index | undefined; + replicationIndex: Index> | undefined; } export class MinReplicas { @@ -67,7 +68,7 @@ export class ResponseRoleMessage extends TransportMessage { segments: this.role instanceof Replicator ? this.role.segments.map((x) => { - return new ReplicationRange({ + return new ReplicationRangeMessageU32({ id: randomBytes(32), offset: x.offset, factor: x.factor, @@ -82,10 +83,10 @@ export class ResponseRoleMessage extends TransportMessage { @variant([1, 2]) export class AllReplicatingSegmentsMessage extends TransportMessage { - @field({ type: vec(ReplicationRange) }) - segments: ReplicationRange[]; + @field({ type: vec(ReplicationRangeMessage) }) + segments: ReplicationRangeMessage[]; - constructor(properties: { segments: ReplicationRange[] }) { + constructor(properties: { segments: ReplicationRangeMessage[] }) { super(); this.segments = properties.segments; } @@ -93,10 +94,10 @@ export class AllReplicatingSegmentsMessage extends TransportMessage { @variant([1, 3]) export class AddedReplicationSegmentMessage extends TransportMessage { - @field({ type: vec(ReplicationRange) }) - segments: ReplicationRange[]; + @field({ type: vec(ReplicationRangeMessage) }) + segments: ReplicationRangeMessage[]; - constructor(properties: { segments: ReplicationRange[] }) { + constructor(properties: { segments: ReplicationRangeMessage[] }) { super(); this.segments = properties.segments; } diff --git a/packages/programs/data/shared-log/src/role.ts b/packages/programs/data/shared-log/src/role.ts index 085af6be2..9d871be99 100644 --- a/packages/programs/data/shared-log/src/role.ts +++ b/packages/programs/data/shared-log/src/role.ts @@ -4,10 +4,7 @@ * Roles have been replaces with just replication segments. */ import { field, variant, vec } from "@dao-xyz/borsh"; - -export const MAX_U32 = 4294967295; -export const HALF_MAX_U32 = 2147483647; // rounded down -export const scaleToU32 = (value: number) => Math.round(MAX_U32 * value); +import { MAX_U32, denormalizer } from "./integers"; export const overlaps = (x1: number, x2: number, y1: number, y2: number) => { if (x1 <= y2 && y1 <= x2) { @@ -40,6 +37,7 @@ export class Observer extends Role { export const REPLICATOR_TYPE_VARIANT = new Uint8Array([2]); +const denormalizeru32 = denormalizer("u32"); export class RoleReplicationSegment { @field({ type: "u64" }) timestamp: bigint; @@ -61,12 +59,12 @@ export class RoleReplicationSegment { } this.timestamp = timestamp ?? BigInt(+new Date()); - this.factorNominator = Math.round(MAX_U32 * factor); + this.factorNominator = denormalizeru32(factor); if (offset > 1 || offset < 0) { throw new Error("Expecting offset to be between 0 and 1, got: " + offset); } - this.offsetNominator = Math.round(MAX_U32 * offset); + this.offsetNominator = denormalizeru32(factor); } get factor(): number { diff --git a/packages/programs/data/shared-log/src/sync.ts b/packages/programs/data/shared-log/src/sync.ts new file mode 100644 index 000000000..7a555218f --- /dev/null +++ b/packages/programs/data/shared-log/src/sync.ts @@ -0,0 +1,115 @@ +import type { PublicSignKey } from "@peerbit/crypto/dist/src"; +import type { Index } from "@peerbit/indexer-interface"; +import init, { DecoderWrapper, EncoderWrapper } from "@peerbit/riblt"; +import { + type EntryReplicated, + type ReplicationRangeIndexable, + type ReplicationRangeIndexableU32, + ReplicationRangeMessage, + getCoveringRangeQuery, + matchEntriesInRangeQuery, +} from "./ranges"; + +const wasmFetch = async (input: any) => + (await (await import("node:fs/promises")).readFile(input)) as any; // TODO fix types. +globalThis.fetch = wasmFetch; // wasm-pack build --target web generated load with 'fetch' but node fetch can not load wasm yet, so we need to do this +await init(); + +export type Symbol = { + count: bigint; + hash: bigint; + symbol: bigint; +}; +class RangeToEncoders { + encoders: Map; + + constructor( + readonly me: PublicSignKey, + readonly rangeIndex: Index>, + readonly entryIndex: Index>, + ) { + this.encoders = new Map(); + } + + async build() { + // for all ranges in rangeIndex that belong to me + // fetch all cursors from entryIndex and build encoder with key from rangeId + for (const range of await this.rangeIndex + .iterate({ query: { hash: this.me.hashcode() } }) + .all()) { + const entries = await this.entryIndex + .iterate({ query: matchEntriesInRangeQuery(range.value) }) + .all(); + const encoder = new EncoderWrapper(); + for (const entry of entries) { + encoder.add_symbol(BigInt(entry.value.coordinate)); + } + this.encoders.set(range.value.toUniqueSegmentId(), encoder); + } + } + + createSymbolGenerator(range: ReplicationRangeIndexable): { + next: () => Symbol; + free: () => void; + } { + let encoder = this.encoders.get(range.toUniqueSegmentId()); + if (!encoder) { + throw new Error("No encoder found for range"); + } + const cloned = encoder.clone(); + return { + next: (): Symbol => { + return cloned.produce_next_coded_symbol(); + }, + free: () => { + cloned.free(); + }, + }; + } +} + +const getMissingValuesInRemote = async (properties: { + myEncoder: RangeToEncoders; + from: PublicSignKey; + remoteRange: ReplicationRangeMessage; +}) => { + const findOverlappingRangesIOwn = await properties.myEncoder.rangeIndex + .iterate({ + query: getCoveringRangeQuery( + properties.remoteRange.toReplicationRangeIndexable(properties.from), + ), + }) + .all(); + + const decoders: Map = new Map(); + for (const range of findOverlappingRangesIOwn) { + const segmentId = range.value.toUniqueSegmentId(); + const encoder: EncoderWrapper | undefined = + properties.myEncoder.encoders.get(segmentId); + if (encoder) { + decoders.set(segmentId, encoder.to_decoder()); + } + } + + return { + process: (encodedSymbol: any) => { + let allMissingSymbols: any[] = []; + for (const [k, decoder] of decoders) { + decoder.add_coded_symbol(encodedSymbol); + decoder.try_decode(); + if (decoder.decoded()) { + for (const missingSymbol of decoder.get_local_symbols()) { + allMissingSymbols.push(missingSymbol); + } + decoders.delete(k); + } + } + return { + missing: allMissingSymbols, + done: decoders.size === 0, + }; + }, + }; +}; + +export { RangeToEncoders, getMissingValuesInRemote }; diff --git a/packages/programs/data/shared-log/src/utils.ts b/packages/programs/data/shared-log/src/utils.ts index f6cf74961..e0f4c7016 100644 --- a/packages/programs/data/shared-log/src/utils.ts +++ b/packages/programs/data/shared-log/src/utils.ts @@ -1,9 +1,13 @@ import { Entry, ShallowEntry } from "@peerbit/log"; import type { EntryWithRefs } from "./exchange-heads.js"; -import { EntryReplicated } from "./ranges.js"; +import { type EntryReplicated, isEntryReplicated } from "./ranges.js"; export const groupByGid = async < - T extends ShallowEntry | Entry | EntryWithRefs | EntryReplicated, + T extends + | ShallowEntry + | Entry + | EntryWithRefs + | EntryReplicated, >( entries: T[], ): Promise> => { @@ -14,7 +18,7 @@ export const groupByGid = async < ? (await head.getMeta()).gid : head instanceof ShallowEntry ? head.meta.gid - : head instanceof EntryReplicated + : isEntryReplicated(head) ? head.gid : (await head.entry.getMeta()).gid; let value = groupByGid.get(gid); @@ -27,7 +31,9 @@ export const groupByGid = async < return groupByGid; }; -export const groupByGidSync = async ( +export const groupByGidSync = async < + T extends ShallowEntry | EntryReplicated, +>( entries: T[], ): Promise> => { const groupByGid: Map = new Map(); diff --git a/packages/programs/data/shared-log/test/append.spec.ts b/packages/programs/data/shared-log/test/append.spec.ts index 22a72a2e8..1d769368a 100644 --- a/packages/programs/data/shared-log/test/append.spec.ts +++ b/packages/programs/data/shared-log/test/append.spec.ts @@ -15,7 +15,7 @@ describe("append", () => { it("canAppend checked once", async () => { session = await TestSession.disconnected(1); - const store = await session.peers[0].open(new EventStore()); + const store = await session.peers[0].open(new EventStore()); const canAppend = sinon.spy(store.log.canAppend); store.log.canAppend = canAppend; await store.add("a"); @@ -26,7 +26,7 @@ describe("append", () => { it("override option canAppend checked once", async () => { session = await TestSession.disconnected(1); - const store = await session.peers[0].open(new EventStore()); + const store = await session.peers[0].open(new EventStore()); const canAppend = sinon.spy(store.log.canAppend); store.log.canAppend = canAppend; diff --git a/packages/programs/data/shared-log/test/domain-time.spec.ts b/packages/programs/data/shared-log/test/domain-time.spec.ts index d01296a42..3733c4bc3 100644 --- a/packages/programs/data/shared-log/test/domain-time.spec.ts +++ b/packages/programs/data/shared-log/test/domain-time.spec.ts @@ -3,11 +3,11 @@ import type { Entry } from "@peerbit/log"; import { TestSession } from "@peerbit/test-utils"; import { waitForResolved } from "@peerbit/time"; import { expect } from "chai"; +import { denormalizer } from "../src/integers.js"; import { type ReplicationDomainTime, createReplicationDomainTime, } from "../src/replication-domain-time.js"; -import { scaleToU32 } from "../src/role.js"; import { EventStore } from "./utils/stores/event-store.js"; /** @@ -130,7 +130,7 @@ describe("ReplicationDomainTime", function () { }); await waitForResolved(async () => expect( - scaleToU32(await db2.log.calculateTotalParticipation()), + denormalizer("u32")(await db2.log.calculateTotalParticipation()), ).to.be.closeTo(factor, 1), ); diff --git a/packages/programs/data/shared-log/test/encryption.spec.ts b/packages/programs/data/shared-log/test/encryption.spec.ts index c95f04bfc..c8907b6c7 100644 --- a/packages/programs/data/shared-log/test/encryption.spec.ts +++ b/packages/programs/data/shared-log/test/encryption.spec.ts @@ -9,7 +9,7 @@ import { SharedLog } from "../src/index.js"; @variant("encrypt_store") class SimpleStore extends Program { @field({ type: SharedLog }) - log: SharedLog; // Documents provide document store functionality around your Posts + log: SharedLog; // Documents provide document store functionality around your Posts constructor() { super(); diff --git a/packages/programs/data/shared-log/test/join.spec.ts b/packages/programs/data/shared-log/test/join.spec.ts index dac7a1c43..a39a74b88 100644 --- a/packages/programs/data/shared-log/test/join.spec.ts +++ b/packages/programs/data/shared-log/test/join.spec.ts @@ -10,7 +10,7 @@ import { EventStore } from "./utils/stores/event-store.js"; describe("join", () => { let session: TestSession; - let db1: EventStore, db2: EventStore; + let db1: EventStore, db2: EventStore; before(async () => { session = await TestSession.disconnected(3, [ @@ -78,9 +78,9 @@ describe("join", () => { }); it("can join replicate", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -92,12 +92,41 @@ describe("join", () => { const e1 = await db1.add("hello"); expect(await db2.log.getMyReplicationSegments()).to.have.length(0); await db2.log.join([e1.entry], { replicate: true }); - expect(await db2.log.getMyReplicationSegments()).to.have.length(1); + expect( + (await db2.log.getMyReplicationSegments()).map((x) => x.width), + ).to.deep.eq([1]); // a single pin expect(db2.log.log.length).to.equal(1); }); + it("can join replicate and merge segments", async () => { + db1 = await session.peers[0].open(new EventStore()); + + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { replicate: false }, + }, + ))!; + + await db1.waitFor(session.peers[1].peerId); + + const e1 = await db1.add("hello", { meta: { next: [] } }); + const e2 = await db1.add("hello again", { meta: { next: [] } }); + + expect(await db2.log.getMyReplicationSegments()).to.have.length(0); + await db2.log.join([e1.entry, e2.entry], { + replicate: { mergeSegments: true }, + }); + expect(await db2.log.getMyReplicationSegments()).to.have.length(1); + expect( + (await db2.log.getMyReplicationSegments())[0].width, + ).to.be.greaterThan(1); // a segment covering more than one entry + expect(db2.log.log.length).to.equal(2); + }); + it("will emit one message when replicating multiple entries", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: false }, }); db2 = db1.clone(); @@ -143,7 +172,7 @@ describe("join", () => { }); it("will emit one message when replicating new and already joined entries", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: false }, }); db2 = db1.clone(); @@ -194,9 +223,9 @@ describe("join", () => { describe("already but not replicated", () => { it("entry", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -216,9 +245,9 @@ describe("join", () => { }); it("hash", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -237,9 +266,9 @@ describe("join", () => { expect(db2.log.log.length).to.equal(1); }); it("shallow entry", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { diff --git a/packages/programs/data/shared-log/test/leader.spec.ts b/packages/programs/data/shared-log/test/leader.spec.ts index 171a0fd20..3674d63e2 100644 --- a/packages/programs/data/shared-log/test/leader.spec.ts +++ b/packages/programs/data/shared-log/test/leader.spec.ts @@ -18,7 +18,9 @@ const toEntry = (gid: string | number) => { describe(`isLeader`, function () { let session: TestSession; - let db1: EventStore, db2: EventStore, db3: EventStore; + let db1: EventStore, + db2: EventStore, + db3: EventStore; const options = { args: { @@ -78,7 +80,7 @@ describe(`isLeader`, function () { await session.stop(); }); - beforeEach(async () => {}); + beforeEach(async () => { }); afterEach(async () => { if (db1 && db1.closed === false) await db1.drop(); @@ -90,7 +92,7 @@ describe(`isLeader`, function () { // TODO fix test timeout, isLeader is too slow as we need to wait for peers // perhaps do an event based get peers using the pubsub peers api - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { ...options.args, replicate: { offset: 0, factor: 0.5 } }, }); const isLeaderAOneLeader = await db1.log.isLeader({ @@ -106,7 +108,7 @@ describe(`isLeader`, function () { db2 = (await EventStore.open(db1.address!, session.peers[1], { args: { ...options.args, replicate: { offset: 0.5, factor: 0.5 } }, - })) as EventStore; + })) as EventStore; await waitForResolved(async () => expect((await db1.log.getReplicators()).size).to.equal(2), @@ -155,7 +157,7 @@ describe(`isLeader`, function () { // TODO fix test timeout, isLeader is too slow as we need to wait for peers // perhaps do an event based get peers using the pubsub peers api - const store = await new EventStore(); + const store = await new EventStore(); db1 = await session.peers[0].open(store, { args: { ...options.args }, }); @@ -163,7 +165,7 @@ describe(`isLeader`, function () { db1.address!, session.peers[1], options, - )) as EventStore; + )) as EventStore; await delay(5000); // some delay so that if peers are to replicate, they would have had time to notify each other @@ -188,18 +190,18 @@ describe(`isLeader`, function () { // TODO fix test timeout, isLeader is too slow as we need to wait for peers // perhaps do an event based get peers using the pubsub peers api - const store = await new EventStore(); + const store = await new EventStore(); db1 = await session.peers[0].open(store, { args: { ...options.args, replicate: false }, }); db2 = (await EventStore.open(db1.address!, session.peers[1], { args: { ...options.args, replicate: { factor: 0.5 } }, - })) as EventStore; + })) as EventStore; db3 = (await EventStore.open(db1.address!, session.peers[2], { args: { ...options.args, replicate: { factor: 0.5 } }, - })) as EventStore; + })) as EventStore; await waitForResolved(async () => expect((await db2.log.getReplicators()).size).to.equal(2), @@ -235,7 +237,7 @@ describe(`isLeader`, function () { // TODO fix test timeout, isLeader is too slow as we need to wait for peers // perhaps do an event based get peers using the pubsub peers api - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0, @@ -250,7 +252,7 @@ describe(`isLeader`, function () { factor: 0.3333, }, }, - })) as EventStore; + })) as EventStore; db3 = (await EventStore.open(db1.address!, session.peers[2], { args: { replicate: { @@ -258,7 +260,7 @@ describe(`isLeader`, function () { factor: 0.3333, }, }, - })) as EventStore; + })) as EventStore; await waitForResolved(async () => expect((await db1.log.getReplicators()).size).to.equal(3), @@ -349,7 +351,7 @@ describe(`isLeader`, function () { isLeaderCThreeLeaders, ]).include.members([true, true, true]); resolved += 1; - } catch (error) {} + } catch (error) { } } // since the distribution only in best scenarios distributes perfectly // we might have duplication, i.e. more than expected amount of leaders for a particular @@ -358,33 +360,33 @@ describe(`isLeader`, function () { }); it("evenly distributed", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); db2 = (await EventStore.open( db1.address!, session.peers[1], options, - )) as EventStore; + )) as EventStore; db3 = (await EventStore.open( db1.address!, session.peers[2], options, - )) as EventStore; + )) as EventStore; let allowedError = 0.03; await waitForResolved(async () => expect( - Math.abs((await db1.log.getMyTotalParticipation()) - 0.33), + Math.abs((await db1.log.calculateMyTotalParticipation()) - 0.33), ).lessThan(allowedError), ); await waitForResolved(async () => expect( - Math.abs((await db2.log.getMyTotalParticipation()) - 0.33), + Math.abs((await db2.log.calculateMyTotalParticipation()) - 0.33), ).lessThan(allowedError), ); await waitForResolved(async () => expect( - Math.abs((await db3.log.getMyTotalParticipation()) - 0.33), + Math.abs((await db3.log.calculateMyTotalParticipation()) - 0.33), ).lessThan(allowedError), ); @@ -438,7 +440,7 @@ describe(`isLeader`, function () { describe("union", () => { it("local first", async () => { - const store = new EventStore(); + const store = new EventStore(); db1 = await session.peers[0].open(store, { args: { replicate: { @@ -451,7 +453,7 @@ describe(`isLeader`, function () { }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -487,7 +489,7 @@ describe(`isLeader`, function () { }); it("will consider in flight", async () => { - const store = new EventStore(); + const store = new EventStore(); db1 = await session.peers[0].open(store.clone(), { args: { @@ -555,7 +557,7 @@ describe(`isLeader`, function () { }); it("sets replicators groups correctly", async () => { - const store = new EventStore(); + const store = new EventStore(); db1 = await session.peers[0].open(store, { args: { @@ -568,7 +570,7 @@ describe(`isLeader`, function () { }, }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -584,7 +586,7 @@ describe(`isLeader`, function () { }, ); - db3 = await EventStore.open>( + db3 = await EventStore.open>( db1.address!, session.peers[2], { @@ -626,7 +628,7 @@ describe(`isLeader`, function () { describe("eager", () => { it("eager, me not-mature, all included", async () => { - const store = new EventStore(); + const store = new EventStore(); db1 = await session.peers[0].open(store, { args: { @@ -639,7 +641,7 @@ describe(`isLeader`, function () { }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -654,7 +656,7 @@ describe(`isLeader`, function () { }, ); - db3 = await EventStore.open>( + db3 = await EventStore.open>( db1.address!, session.peers[2], { @@ -696,7 +698,7 @@ describe(`isLeader`, function () { }); it("all non-mature, only me included", async () => { - const store = new EventStore(); + const store = new EventStore(); db1 = await session.peers[0].open(store, { args: { @@ -709,7 +711,7 @@ describe(`isLeader`, function () { }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -724,7 +726,7 @@ describe(`isLeader`, function () { }, ); - db3 = await EventStore.open>( + db3 = await EventStore.open>( db1.address!, session.peers[2], { @@ -763,7 +765,7 @@ describe(`isLeader`, function () { describe("maturity", () => { it("one mature, all included", async () => { - const store = new EventStore(); + const store = new EventStore(); const MATURE_TIME = 2000; db1 = await session.peers[0].open(store, { @@ -780,7 +782,7 @@ describe(`isLeader`, function () { await delay(MATURE_TIME); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -796,7 +798,7 @@ describe(`isLeader`, function () { }, ); - db3 = await EventStore.open>( + db3 = await EventStore.open>( db1.address!, session.peers[2], { @@ -856,7 +858,7 @@ describe(`isLeader`, function () { describe("balance", () => { it("small fractions means little replication", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0, @@ -865,7 +867,7 @@ describe(`isLeader`, function () { }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -932,7 +934,7 @@ describe(`isLeader`, function () { }); it("leader always defined", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { ...options.args, @@ -948,7 +950,7 @@ describe(`isLeader`, function () { factor: 0.3333, }, }, - })) as EventStore; + })) as EventStore; db3 = (await EventStore.open(db1.address!, session.peers[2], { args: { ...options.args, @@ -956,7 +958,7 @@ describe(`isLeader`, function () { factor: 0.3333, }, }, - })) as EventStore; + })) as EventStore; await waitForResolved(async () => expect((await db1.log.getReplicators()).size).to.equal(3), @@ -986,12 +988,12 @@ describe(`isLeader`, function () { describe("get replicators sorted", () => { it("can handle peers leaving and joining", async () => { - db1 = await session.peers[0].open(new EventStore(), options); + db1 = await session.peers[0].open(new EventStore(), options); db2 = (await EventStore.open( db1.address!, session.peers[1], options, - )) as EventStore; + )) as EventStore; await waitForResolved(async () => expect((await db1.log.getReplicators()).size).to.equal(2), @@ -1005,7 +1007,7 @@ describe(`isLeader`, function () { db1.address!, session.peers[2], options, - )) as EventStore; + )) as EventStore; await waitForResolved(async () => expect((await db3.log.getReplicators()).size).to.equal(3), @@ -1037,7 +1039,7 @@ describe(`isLeader`, function () { db1.address!, session.peers[1], options, - )) as EventStore; + )) as EventStore; await waitForResolved(async () => expect((await db1.log.getReplicators()).size).to.equal(3), diff --git a/packages/programs/data/shared-log/test/load.spec.ts b/packages/programs/data/shared-log/test/load.spec.ts index 1db3dbb60..a8d7cdfa0 100644 --- a/packages/programs/data/shared-log/test/load.spec.ts +++ b/packages/programs/data/shared-log/test/load.spec.ts @@ -9,7 +9,7 @@ import { waitForConverged } from "./utils.js"; import { EventStore } from "./utils/stores/event-store.js"; describe("load", function () { - let db1: EventStore, db2: EventStore; + let db1: EventStore, db2: EventStore; let session: TestSession; @@ -28,8 +28,8 @@ describe("load", function () { it("load after replicate", async () => { session = await TestSession.connected(2); - db1 = await session.peers[0].open(new EventStore()); - db2 = await EventStore.open>( + db1 = await session.peers[0].open(new EventStore()); + db2 = await EventStore.open>( db1.address!, session.peers[1], ); @@ -83,7 +83,7 @@ describe("load", function () { }, ]); - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 0.5 }, replicas: { @@ -99,7 +99,7 @@ describe("load", function () { await db1.add("hello" + i, { meta: { next: [] } }); } - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -120,7 +120,7 @@ describe("load", function () { await waitForConverged(() => db2.log.log.length); await session.peers[1].stop(); await db1.close(); - db1 = await EventStore.open>( + db1 = await EventStore.open>( db1.address!, session.peers[0], { @@ -142,7 +142,7 @@ describe("load", function () { { directory: "./tmp/shared-log/load-events/" + uuid() }, ]); - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 1 }, replicas: { diff --git a/packages/programs/data/shared-log/test/migration-8-9.spec.ts b/packages/programs/data/shared-log/test/migration-8-9.spec.ts index 9e63ed9b6..5a0ba5d6c 100644 --- a/packages/programs/data/shared-log/test/migration-8-9.spec.ts +++ b/packages/programs/data/shared-log/test/migration-8-9.spec.ts @@ -14,7 +14,7 @@ import { EventStore } from "./utils/stores/event-store.js"; describe(`migration-8-9`, function () { let session: TestSession; - let db1: EventStore, db2: EventStore; + let db1: EventStore, db2: EventStore; const setup = async (compatibility?: number, order: boolean = false) => { session = await TestSession.connected(2, [ @@ -46,7 +46,7 @@ describe(`migration-8-9`, function () { }, ]); - const db = new EventStore(); + const db = new EventStore(); const createV8 = () => { const db1 = db.clone(); diff --git a/packages/programs/data/shared-log/test/network.spec.ts b/packages/programs/data/shared-log/test/network.spec.ts index 211d5ec26..076858cd0 100644 --- a/packages/programs/data/shared-log/test/network.spec.ts +++ b/packages/programs/data/shared-log/test/network.spec.ts @@ -10,7 +10,7 @@ import { EventStore } from "./utils/stores/event-store.js"; describe(`network`, () => { let session: TestSession; - let db1: EventStore, db2: EventStore; + let db1: EventStore, db2: EventStore; after(async () => {}); @@ -32,7 +32,7 @@ describe(`network`, () => { await session.peers[0].services.blocks.waitFor(session.peers[2].peerId); await session.peers[1].services.blocks.waitFor(session.peers[2].peerId); - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 1, @@ -40,7 +40,7 @@ describe(`network`, () => { }, }); - db2 = await await EventStore.open>( + db2 = await await EventStore.open>( db1.address!, session.peers[1], { diff --git a/packages/programs/data/shared-log/test/observer.spec.ts b/packages/programs/data/shared-log/test/observer.spec.ts index 8e67beca5..f24f50520 100644 --- a/packages/programs/data/shared-log/test/observer.spec.ts +++ b/packages/programs/data/shared-log/test/observer.spec.ts @@ -20,8 +20,8 @@ describe("observer", () => { [session.peers[1], session.peers[2]], ]); - let stores: EventStore[] = []; - const s = new EventStore(); + let stores: EventStore[] = []; + const s = new EventStore(); const createStore = () => deserialize(serialize(s), EventStore); let replicatorEndIndex = 1; @@ -70,7 +70,7 @@ describe("observer", () => { session = await TestSession.connected(2); await session.connect([[session.peers[0], session.peers[1]]]); - const s = new EventStore(); + const s = new EventStore(); const createStore = () => deserialize(serialize(s), EventStore); const replicator = await session.peers[0].open(createStore(), { @@ -108,7 +108,7 @@ describe("observer", () => { [session.peers[1], session.peers[2]], ]); - const s = new EventStore(); + const s = new EventStore(); const createStore = () => deserialize(serialize(s), EventStore); const replicator = await session.peers[0].open(createStore(), { args: { diff --git a/packages/programs/data/shared-log/test/open-close.spec.ts b/packages/programs/data/shared-log/test/open-close.spec.ts index 3ca72d369..4c933034c 100644 --- a/packages/programs/data/shared-log/test/open-close.spec.ts +++ b/packages/programs/data/shared-log/test/open-close.spec.ts @@ -48,7 +48,7 @@ describe("replicators", () => { }); it("clears in flight info when leaving", async () => { - const store = new EventStore(); + const store = new EventStore(); session = await TestSession.connected(3); diff --git a/packages/programs/data/shared-log/test/ranges.spec.ts b/packages/programs/data/shared-log/test/ranges.spec.ts index a17d5c70a..d2b7bcecc 100644 --- a/packages/programs/data/shared-log/test/ranges.spec.ts +++ b/packages/programs/data/shared-log/test/ranges.spec.ts @@ -1,930 +1,1834 @@ import { - Ed25519Keypair, - type Ed25519PublicKey, - randomBytes, + Ed25519Keypair, + type Ed25519PublicKey, + PublicSignKey, + randomBytes, } from "@peerbit/crypto"; import type { Index } from "@peerbit/indexer-interface"; import { create as createIndices } from "@peerbit/indexer-sqlite3"; import { LamportClock, Meta } from "@peerbit/log"; import { expect } from "chai"; import { - EntryReplicated, - ReplicationIntent, - ReplicationRangeIndexable, - getCoverSet, - getDistance, - getEvenlySpacedU32, - getSamples as getSamplesMap, - hasCoveringRange, - toRebalance, + type NumberFromType, + createNumbers, + denormalizer, +} from "../src/integers.js"; +import { + type EntryReplicated, + EntryReplicatedU32, + EntryReplicatedU64, + ReplicationIntent, + type ReplicationRangeIndexable, + ReplicationRangeIndexableU32, + ReplicationRangeIndexableU64, + getCoverSet as getCoverSetGeneric, + getDistance, + getSamples as getSamplesMap, + iHaveCoveringRange, + mergeRanges, + toRebalance, } from "../src/ranges.js"; -import { HALF_MAX_U32, MAX_U32, scaleToU32 } from "../src/role.js"; - -const getSamples = async ( - offset: number, - peers: Index, - count: number, - roleAge: number, -) => { - const map = await getSamplesMap( - getEvenlySpacedU32(offset, count), - peers, - roleAge, - ); - return [...map.keys()]; -}; // prettier-ignore -describe("ranges", () => { - let peers: Index - let a: Ed25519PublicKey, b: Ed25519PublicKey, c: Ed25519PublicKey; - - let create = async (...rects: ReplicationRangeIndexable[]) => { - const indices = (await createIndices()) - await indices.start() - const index = await indices.init({ schema: ReplicationRangeIndexable }) - for (const rect of rects) { - await index.put(rect) - } - peers = index - } - before(async () => { - a = (await Ed25519Keypair.create()).publicKey; - b = (await Ed25519Keypair.create()).publicKey; - c = (await Ed25519Keypair.create()).publicKey; - - // sort keys by hash to make test assertions easier - if (a.hashcode() > b.hashcode()) { - const tmp = a; - a = b; - b = tmp; - } - if (b.hashcode() > c.hashcode()) { - const tmp = b; - b = c; - c = tmp; - } - if (a.hashcode() > b.hashcode()) { - const tmp = a; - a = b; - b = tmp; - } - - }) - beforeEach(() => { - peers = undefined!; - }) - - describe('getCover', () => { - - const rotations = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1] - rotations.forEach((rotation) => { - describe('rotation: ' + String(rotation), () => { - - describe('underflow', () => { - it('includes all', async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.1, offset: (0 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.1, offset: (0.333 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.1, offset: (0.666 + rotation) % 1, timestamp: 0n }) - ); +type R = 'u32' | 'u64' +const resolutions: [R, R] = ["u32", "u64"]; + +resolutions.forEach((resolution) => { + describe("ranges: " + resolution, () => { + const rangeClass = + resolution === "u32" + ? ReplicationRangeIndexableU32 + : ReplicationRangeIndexableU64; + const coerceNumber = (number: number | bigint): NumberFromType => + resolution === "u32" ? number : BigInt(number); + const numbers = createNumbers(resolution); + const denormalizeFn = denormalizer(resolution); + const getCoverSet = async (properties: { + peers: Index>; + start: NumberFromType | PublicSignKey | undefined; + widthToCoverScaled: NumberFromType; + roleAge: number; + eager?: + | { + unmaturedFetchCoverSize?: number; + } + | boolean; + }): Promise> => { + return getCoverSetGeneric({ ...properties, numbers }); + }; + const getSamples = async ( + offset: NumberFromType, + peers: Index>, + count: number, + roleAge: number, + ) => { + const map = await getSamplesMap( + numbers.getGrid(offset, count), + peers, + roleAge, + numbers, + ); + return [...map.keys()]; + }; + + const createReplicationRangeFromNormalized = (properties: { + id?: Uint8Array; + publicKey: PublicSignKey; + length: number; + offset: number; + timestamp?: bigint; + mode?: ReplicationIntent; + }) => { + return new rangeClass({ + id: properties.id, + publicKey: properties.publicKey, + mode: properties.mode, + // @ts-ignore + length: denormalizeFn(properties.length), + // @ts-ignore + offset: denormalizeFn(properties.offset), + timestamp: properties.timestamp, + }); + }; + + const createReplicationRange = (properties: { + id?: Uint8Array; + publicKey: PublicSignKey; + length: number | bigint; + offset: number | bigint; + timestamp?: bigint; + mode?: ReplicationIntent; + }) => { + // @ts-ignore + return new rangeClass({ + id: properties.id, + publicKey: properties.publicKey, + mode: properties.mode, + // @ts-ignore + length: coerceNumber(properties.length), + // @ts-ignore + offset: coerceNumber(properties.offset), + timestamp: properties.timestamp, + }); + }; + + describe("ReplicationRangeIndexable", () => { + let peers: Index>; + let a: Ed25519PublicKey, b: Ed25519PublicKey, c: Ed25519PublicKey; + + let create = async (...rects: ReplicationRangeIndexable[]) => { + const indices = await createIndices(); + await indices.start(); + const index = await indices.init({ schema: rangeClass as any }); + for (const rect of rects) { + try { + await index.put(rect); + } catch (error) { + throw error; + } + } + peers = index as Index>; + }; + + before(async () => { + a = (await Ed25519Keypair.create()).publicKey; + b = (await Ed25519Keypair.create()).publicKey; + c = (await Ed25519Keypair.create()).publicKey; + + // sort keys by hash to make test assertions easier + if (a.hashcode() > b.hashcode()) { + const tmp = a; + a = b; + b = tmp; + } + if (b.hashcode() > c.hashcode()) { + const tmp = b; + b = c; + c = tmp; + } + if (a.hashcode() > b.hashcode()) { + const tmp = a; + a = b; + b = tmp; + } + }); + beforeEach(() => { + peers = undefined!; + }); + + describe("getCover", () => { + const rotations = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]; + rotations.forEach((rotation) => { + describe("rotation: " + String(rotation), () => { + describe("underflow", () => { + it("includes all", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.1, + offset: (0 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.1, + offset: (0.333 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.1, + offset: (0.666 + rotation) % 1, + timestamp: 0n, + }), + ); + + // we try to cover 0.5 starting from a + // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.maxValue, + })), + ]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]); + }); + }); + + describe("overflow", () => { + it("local first", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 1, + offset: (0 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 1, + offset: (0.333 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 1, + offset: (0.666 + rotation) % 1, + timestamp: 0n, + }), + ); + + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.maxValue, + })), + ]).to.have.members([a.hashcode()]); + }); + }); + + describe("unmature", () => { + it("all unmature", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.34, + offset: (0 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.34, + offset: (0.333 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.34, + offset: (0.666 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + ); + + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.maxValue, + })), + ]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]); + }); + + it("full width all unmature", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 1, + offset: (0 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 1, + offset: (0.333 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 1, + offset: (0.666 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + ); + + // special case, assume we only look into selef + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.maxValue, + })), + ]).to.have.members([a.hashcode()]); + }); + + it("two unmature", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.34, + offset: (0 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.34, + offset: (0.333 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.34, + offset: (0.666 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + ); + + // should not be included. TODO is this always expected behaviour? + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.maxValue, + })), + ]).to.have.members([a.hashcode()]); + }); + }); + + describe("eager", () => { + it("all unmature", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.34, + offset: (0 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.34, + offset: (0.333 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.34, + offset: (0.666 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + ); + + // we try to cover 0.5 starting from a + // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.maxValue, + eager: true, + })), + ]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]); + }); + it("full width all mature", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 1, + offset: (0 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 1, + offset: (0.333 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 1, + offset: (0.666 + rotation) % 1, + timestamp: 0n, + }), + ); + + // we try to cover 0.5 starting from a + // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.maxValue, + eager: true, + })), + ]).to.have.members([a.hashcode()]); + }); + + it("full width all unmature", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 1, + offset: (0 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 1, + offset: (0.333 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 1, + offset: (0.666 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + ); + + // special case, assume we only look into selef + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.maxValue, + eager: true, + })), + ]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]); + }); + + it("two unmature", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.34, + offset: (0 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.34, + offset: (0.333 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.34, + offset: (0.666 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + ); + + // should not be included. TODO is this always expected behaviour? + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.maxValue, + eager: true, + })), + ]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]); + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: b, + widthToCoverScaled: numbers.maxValue, + eager: true, + })), + ]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]); + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: c, + widthToCoverScaled: numbers.maxValue, + eager: true, + })), + ]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]); + }); + }); + + describe("skip", () => { + it("next", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.34, + offset: (0 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.41, + offset: (0.1 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.5, + offset: (0.3 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + ); + + // we try to cover 0.5 starting from a + // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.divRound(numbers.maxValue, 2), + })), + ]).to.have.members([a.hashcode(), b.hashcode()]); + }); + it("between", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.34, + offset: (0 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.5, + offset: (0.2 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.34, + offset: (0.3 + rotation) % 1, + timestamp: 0n, + }), + ); + + // we try to cover 0.5 starting from a + // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: numbers.divRound(numbers.maxValue, 2), + })), + ]).to.have.members([a.hashcode(), b.hashcode()]); + }); + }); + + describe("boundary", () => { + it("exact", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.5, + offset: (0.2 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.5, + offset: (0.5 + rotation) % 1, + timestamp: 0n, + }), + ); + + // because of rounding errors, a cover width of 0.5 might yield unecessary results + expect([ + ...(await getCoverSet({ + peers, + roleAge: 0, + start: a, + widthToCoverScaled: numbers.divRound(numbers.maxValue, 2), + })), + ]).to.have.members([a.hashcode()]); + }); + + it("after", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.1, + offset: (0.21 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.5, + offset: (0.5 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.1, + offset: (0.81 + rotation) % 1, + timestamp: 0n, + }), + ); + + expect([ + ...(await getCoverSet({ + peers, + roleAge: 0, + start: b, + widthToCoverScaled: denormalizeFn(0.6), + })), + ]).to.have.members([b.hashcode()]); + }); + + it("skip unmature", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.1, + offset: (0.2 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.5, + offset: (0.5 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.1, + offset: (0.81 + rotation) % 1, + timestamp: 0n, + }), + ); + // starting from b, we need both a and c since b is not mature to cover the width + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: a, + widthToCoverScaled: denormalizeFn(0.5), + })), + ]).to.have.members([a.hashcode(), c.hashcode()]); + }); + + it("include start node identity", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.1, + offset: (0.2 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.5, + offset: (0.5 + rotation) % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.1, + offset: (0.81 + rotation) % 1, + timestamp: 0n, + }), + ); + // starting from b, we need both a and c since b is not mature to cover the width + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: b, + widthToCoverScaled: denormalizeFn(0.5), + })), + ]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]); + }); + + describe("strict", () => { + it("no boundary", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.1, + offset: (0.2 + rotation) % 1, + timestamp: 0n, + mode: ReplicationIntent.Strict, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.5, + offset: (0.5 + rotation) % 1, + timestamp: 0n, + mode: ReplicationIntent.Strict, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.1, + offset: (0.81 + rotation) % 1, + timestamp: 0n, + mode: ReplicationIntent.Strict, + }), + ); + // starting from b, we need both a and c since b is not mature to cover the width + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: b, + widthToCoverScaled: denormalizeFn(0.51), + })), + ]).to.have.members([b.hashcode()]); + }); + + it("empty set boundary", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.1, + offset: (0.2 + rotation) % 1, + timestamp: 0n, + mode: ReplicationIntent.Strict, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.1, + offset: (0.81 + rotation) % 1, + timestamp: 0n, + mode: ReplicationIntent.Strict, + }), + ); + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: denormalizeFn((0.5 + rotation) % 1), + widthToCoverScaled: denormalizeFn(0.3), + })), + ]).to.have.members([]); + }); + + it("overlapping", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.1, + offset: (0.2 + rotation) % 1, + timestamp: 0n, + mode: ReplicationIntent.Strict, + }), + ); + + expect([ + ...(await getCoverSet({ + peers, + roleAge: 1e5, + start: denormalizeFn((0 + rotation) % 1), + widthToCoverScaled: denormalizeFn(0.6), + })), + ]).to.have.members([a.hashcode()]); + }); + }); + }); + }); + }); + }); + + describe("getSamples", () => { + const rotations = [0, 0.333, 0.5, 0.8]; + rotations.forEach((rotation) => { + describe("samples correctly: " + rotation, () => { + it("1 and less than 1", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.2625, + offset: (0.367 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 1, + offset: (0.847 + rotation) % 1, + timestamp: 0n, + }), + ); + expect( + await getSamples(denormalizeFn(0.78), peers, 2, 0), + ).to.have.length(2); + }); - // we try to cover 0.5 starting from a - // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32 })]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]) + it("1 sample but overlapping yield two matches", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 1, + offset: (0.367 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 1, + offset: (0.847 + rotation) % 1, + timestamp: 0n, + }), + ); + expect( + await getSamples(denormalizeFn(0.78), peers, 1, 0), + ).to.have.length(2); + }); - }) - }) + it("closest to", async () => { + await create( + createReplicationRange({ + publicKey: a, + length: 1, + offset: denormalizeFn((0.367 + rotation) % 1), + timestamp: 0n, + }), + createReplicationRange({ + publicKey: b, + length: 1, + offset: denormalizeFn((0.847 + rotation) % 1), + timestamp: 0n, + }), + ); + expect( + await getSamples( + denormalizeFn((0.78 + rotation) % 1), + peers, + 1, + 0, + ), + ).to.deep.eq([b.hashcode()]); + }); + + it("closest to oldest", async () => { + // two exactly the same, but one is older + await create( + createReplicationRange({ + publicKey: a, + length: 1, + offset: denormalizeFn((0.367 + rotation) % 1), + timestamp: 1n, + }), + createReplicationRange({ + publicKey: b, + length: 1, + offset: denormalizeFn((0.367 + rotation) % 1), + timestamp: 0n, + }), + ); - describe("overflow", () => { - it("local first", async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 1, offset: (0 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 1, offset: (0.333 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 1, offset: (0.666 + rotation) % 1, timestamp: 0n })) + expect( + await getSamples( + denormalizeFn((0.78 + rotation) % 1), + peers, + 1, + 0, + ), + ).to.deep.eq([b.hashcode()]); + }); + + it("closest to hash", async () => { + // two exactly the same, but one is older + await create( + createReplicationRange({ + publicKey: a, + length: 1, + offset: denormalizeFn((0.367 + rotation) % 1), + timestamp: 0n, + }), + createReplicationRange({ + publicKey: b, + length: 1, + offset: denormalizeFn((0.367 + rotation) % 1), + timestamp: 0n, + }), + ); - // we try to cover 0.5 starting from a - // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32 })]).to.have.members([a.hashcode()]) - }) - }) + expect(a.hashcode() < b.hashcode()).to.be.true; + expect( + await getSamples( + denormalizeFn((0.78 + rotation) % 1), + peers, + 1, + 0, + ), + ).to.deep.eq([a.hashcode()]); + }); + + it("interescting", async () => { + // two exactly the same, but one is older + await create( + createReplicationRange({ + publicKey: a, + length: numbers.divRound(numbers.maxValue, 2), + offset: denormalizeFn((0 + rotation) % 1), + timestamp: 0n, + }), + createReplicationRange({ + publicKey: b, + length: 1, + offset: denormalizeFn((0.5 + rotation) % 1), + timestamp: 0n, + }), + ); - describe("unmature", () => { + const samples1 = await getSamplesMap( + numbers.getGrid(denormalizeFn((0.25 + rotation) % 1), 1), + peers, + 0, + numbers, + ); + expect( + [...samples1.values()].filter((x) => x.intersecting).length, + ).to.eq(1); + expect(samples1.size).to.eq(1); + + const samples2 = await getSamplesMap( + numbers.getGrid(denormalizeFn((0.75 + rotation) % 1), 2), + peers, + 0, + numbers, + ); + expect( + [...samples2.values()].filter((x) => x.intersecting).length, + ).to.eq(1); + expect(samples2.size).to.eq(2); + }); - it('all unmature', async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.34, offset: (0 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.34, offset: (0.333 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.34, offset: (0.666 + rotation) % 1, timestamp: BigInt(+new Date) }) - ); + // TODO add breakeven test to make sure it is sorted by hash + }); + }); - // we try to cover 0.5 starting from a - // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32 })]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]) + it("factor 0 ", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0, + offset: 0.367 % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 1, + offset: 0.567 % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 1, + offset: 0.847 % 1, + timestamp: 0n, + }), + ); + expect( + await getSamples(denormalizeFn(0.3701), peers, 2, 0), + ).to.have.members([b, c].map((x) => x.hashcode())); + }); - }) + it("factor 0 with 3 peers factor 1", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 1, + offset: 0.145, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0, + offset: 0.367, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 1, + offset: 0.8473, + timestamp: 0n, + }), + ); + expect( + await getSamples(denormalizeFn(0.937), peers, 2, 0), + ).to.have.members([a, c].map((x) => x.hashcode())); + }); + it("factor 0 with 3 peers short", async () => { + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.2, + offset: 0.145, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0, + offset: 0.367, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.2, + offset: 0.8473, + timestamp: 0n, + }), + ); + expect( + await getSamples(denormalizeFn(0.937), peers, 2, 0), + ).to.have.members([a, c].map((x) => x.hashcode())); + }); - it('full width all unmature', async () => { + rotations.forEach((rotation) => { + it("evenly distributed: " + rotation, async () => { await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 1, offset: (0 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 1, offset: (0.333 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 1, offset: (0.666 + rotation) % 1, timestamp: BigInt(+new Date) }) + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.2, + offset: (0.2333 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.2, + offset: (0.56666 + rotation) % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.2, + offset: (0.9 + rotation) % 1, + timestamp: 0n, + }), ); - // special case, assume we only look into selef - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32 })]).to.have.members([a.hashcode()]) - - }) + let ac = 0, + bc = 0, + cc = 0; + let count = 1000; + for (let i = 0; i < count; i++) { + const leaders = await getSamplesMap( + [denormalizeFn(i / count)], + peers, + 0, + numbers, + ); + if (leaders.has(a.hashcode())) { + ac++; + } + if (leaders.has(b.hashcode())) { + bc++; + } + if (leaders.has(c.hashcode())) { + cc++; + } + } + + // check ac, bc and cc are all close to 1/3 + expect(ac / count).to.be.closeTo(1 / 3, 0.1); + expect(bc / count).to.be.closeTo(1 / 3, 0.1); + expect(cc / count).to.be.closeTo(1 / 3, 0.1); + }); + }); - it('two unmature', async () => { + describe("maturity", () => { + it("starting at unmatured", async () => { await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.34, offset: (0 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.34, offset: (0.333 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.34, offset: (0.666 + rotation) % 1, timestamp: BigInt(+new Date) }) + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.333, + offset: 0.333 % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.333, + offset: 0.666 % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.3333, + offset: 0.999 % 1, + timestamp: 0n, + }), ); + expect( + await getSamples(denormalizeFn(0.7), peers, 2, 1e5), + ).to.have.members([a, b, c].map((x) => x.hashcode())); + }); - - // should not be included. TODO is this always expected behaviour? - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32 })]).to.have.members([a.hashcode()]) - - }) - - - }) - - describe('eager', () => { - it('all unmature', async () => { + it("starting at matured", async () => { await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.34, offset: (0 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.34, offset: (0.333 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.34, offset: (0.666 + rotation) % 1, timestamp: BigInt(+new Date) }) + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.333, + offset: 0.333 % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.333, + offset: 0.666 % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.3333, + offset: 0.999 % 1, + timestamp: 0n, + }), ); + // the offset jump will be 0.5 (a) and 0.5 + 0.5 = 1 which will intersect (c) + expect( + await getSamples(denormalizeFn(0.5), peers, 2, 1e5), + ).to.have.members([a, c].map((x) => x.hashcode())); + }); - // we try to cover 0.5 starting from a - // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32, eager: true })]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]) - }) - it('full width all mature', async () => { - + it("starting at matured-2", async () => { await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 1, offset: (0 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 1, offset: (0.333 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 1, offset: (0.666 + rotation) % 1, timestamp: 0n }) + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.333, + offset: 0.333 % 1, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.333, + offset: 0.666 % 1, + timestamp: BigInt(+new Date()), + }), + createReplicationRangeFromNormalized({ + publicKey: c, + length: 0.3333, + offset: 0.999 % 1, + timestamp: 0n, + }), ); - - // we try to cover 0.5 starting from a - // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32, eager: true })]).to.have.members([a.hashcode()]) - }) - - - it('full width all unmature', async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 1, offset: (0 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 1, offset: (0.333 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 1, offset: (0.666 + rotation) % 1, timestamp: BigInt(+new Date) }) + // the offset jump will be 0.2 (a) and 0.2 + 0.5 = 0.7 which will intersect (b) (unmatured) + expect( + await getSamples(numbers.zero, peers, 2, 1e5), + ).to.have.members([a, c].map((x) => x.hashcode())); + }); + }); + + describe("strict", async () => { + rotations.forEach((rotation) => { + it( + "only includes strict segments when intersecting: " + rotation, + async () => { + const offsetNonStrict = (0 + rotation) % 1; + await create( + createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.2, + offset: offsetNonStrict, + timestamp: 0n, + }), + createReplicationRangeFromNormalized({ + publicKey: b, + length: 0.2, + offset: (0.3 + rotation) % 1, + timestamp: 0n, + mode: ReplicationIntent.Strict, + }), + ); + + const leaders = await getSamples( + denormalizeFn(offsetNonStrict + 0.001), + peers, + 2, + 0, + ); + expect(leaders).to.have.members([a].map((x) => x.hashcode())); + }, ); - - // special case, assume we only look into selef - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32, eager: true })]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]) - - }) - - it('two unmature', async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.34, offset: (0 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.34, offset: (0.333 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.34, offset: (0.666 + rotation) % 1, timestamp: BigInt(+new Date) }) + }); + }); + }); + + describe("getDistance", () => { + describe("above", () => { + it("immediate", () => { + expect(getDistance(0.5, 0.4, "above", 1)).to.be.closeTo( + 0.1, + 0.0001, ); + }); - - // should not be included. TODO is this always expected behaviour? - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32, eager: true })]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]) - expect([...await getCoverSet({ peers, roleAge: 1e5, start: b, widthToCoverScaled: MAX_U32, eager: true })]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]) - expect([...await getCoverSet({ peers, roleAge: 1e5, start: c, widthToCoverScaled: MAX_U32, eager: true })]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]) - - }) - }) - - - describe("skip", () => { - it('next', async () => { - - - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.34, offset: (0 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.41, offset: (0.1 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.5, offset: (0.3 + rotation) % 1, timestamp: BigInt(+new Date) }) + it("wrap", () => { + expect(getDistance(0.1, 0.9, "above", 1)).to.be.closeTo( + 0.2, + 0.0001, ); + }); + }); - // we try to cover 0.5 starting from a - // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32 / 2 })]).to.have.members([a.hashcode(), b.hashcode()]) - }) - it('between', async () => { - - - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.34, offset: (0 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.5, offset: (0.2 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.34, offset: (0.3 + rotation) % 1, timestamp: 0n }) + describe("below", () => { + it("immediate", () => { + expect(getDistance(0.5, 0.6, "below", 1)).to.be.closeTo( + 0.1, + 0.0001, ); + }); - - // we try to cover 0.5 starting from a - // this should mean that we would want a and b, because c is not mature enough, even though it would cover a wider set - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: MAX_U32 / 2 })]).to.have.members([a.hashcode(), b.hashcode()]) - - }) - }) - - describe("boundary", () => { - - it('exact', async () => { - - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.5, offset: (0.2 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.5, offset: (0.5 + rotation) % 1, timestamp: 0n }) + it("wrap", () => { + expect(getDistance(0.9, 0.1, "below", 1)).to.be.closeTo( + 0.2, + 0.0001, ); + }); + }); - // because of rounding errors, a cover width of 0.5 might yield unecessary results - expect([...await getCoverSet({ peers, roleAge: 0, start: a, widthToCoverScaled: 0.499 * MAX_U32 })]).to.have.members([a.hashcode()]) - }) - - it('after', async () => { - - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.1, offset: (0.21 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.5, offset: (0.5 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.1, offset: (0.81 + rotation) % 1, timestamp: 0n }) + describe("closest", () => { + it("immediate", () => { + expect(getDistance(0.5, 0.6, "closest", 1)).to.be.closeTo( + 0.1, + 0.0001, ); + }); - expect([...await getCoverSet({ peers, roleAge: 0, start: b, widthToCoverScaled: scaleToU32(0.6) })]).to.have.members([b.hashcode()]) - }) - - it('skip matured', async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.1, offset: (0.2 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.5, offset: (0.5 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.1, offset: (0.81 + rotation) % 1, timestamp: 0n }) + it("wrap", () => { + expect(getDistance(0.9, 0.1, "closest", 1)).to.be.closeTo( + 0.2, + 0.0001, ); - // starting from b, we need both a and c since b is not mature to cover the width - expect([...await getCoverSet({ peers, roleAge: 1e5, start: a, widthToCoverScaled: scaleToU32(0.5) })]).to.have.members([a.hashcode(), c.hashcode()]) - }) + }); - it('include start node identity', async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.1, offset: (0.2 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.5, offset: (0.5 + rotation) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.1, offset: (0.81 + rotation) % 1, timestamp: 0n }) + it("wrap 2", () => { + expect(getDistance(0.1, 0.9, "closest", 1)).to.be.closeTo( + 0.2, + 0.0001, ); - // starting from b, we need both a and c since b is not mature to cover the width - expect([...await getCoverSet({ peers, roleAge: 1e5, start: b, widthToCoverScaled: scaleToU32(0.5) })]).to.have.members([a.hashcode(), b.hashcode(), c.hashcode()]) - }) - - describe('strict', () => { - it('no boundary', async () => { + }); + }); + }); + + describe("hasOneOverlapping", () => { + const rotations = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]; + rotations.forEach((rotation) => { + describe("rotation: " + String(rotation), () => { + it("includes all", async () => { + const cmp = createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.5, + offset: (0 + rotation) % 1, + timestamp: 0n, + }); + await create(cmp); + + const inside = createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.4, + offset: (0.05 + rotation) % 1, + timestamp: 0n, + }); + expect(await iHaveCoveringRange(peers, inside)).to.be.true; + + const outside1 = createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.4, + offset: (0.2 + rotation) % 1, + timestamp: 0n, + }); + expect(await iHaveCoveringRange(peers, outside1)).to.be.false; + + const outside2 = createReplicationRangeFromNormalized({ + publicKey: a, + length: 0.51, + offset: (0.1 + rotation) % 1, + timestamp: 0n, + }); + expect(await iHaveCoveringRange(peers, outside2)).to.be.false; + }); + }); + }); + }); + + describe("merge", () => { + const rotations = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]; + rotations.forEach((rotation) => { + describe("rotation: " + String(rotation), () => { + describe("2 ranges", () => { + it("gap", async () => { + const offset1 = denormalizeFn(0.2 + rotation); + const offset2 = denormalizeFn(0.3 + rotation); + + //@ts-ignore + const diff = numbers.abs(offset1 - offset2); + + //@ts-ignore + const range1 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset1 % numbers.maxValue, + timestamp: 0n, + }); + + //@ts-ignore + const range2 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset2 % numbers.maxValue, + timestamp: 0n, + }); + + const merged = mergeRanges([range1, range2], numbers); + + expect(merged.width).to.eq( + diff + ((typeof diff === "number" ? 1 : 1n) as any), + ); // + 1 for the length of the last range + expect(merged.start1).to.equal(range1.start1); + }); + + it("adjecent", async () => { + const offset = denormalizeFn(0.2 + rotation); + + //@ts-ignore + const range1 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset % numbers.maxValue, + timestamp: 0n, + }); + + //@ts-ignore + const range2 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: (offset + (typeof offset === 'bigint' ? 1n : 1)) % numbers.maxValue, + timestamp: 0n, + }); + + const merged = mergeRanges([range1, range2], numbers); + expect(Number(merged.width)).to.eq(2); + expect(merged.start1).to.equal(range1.start1) + }); + + it("duplicates", async () => { + const offset = denormalizeFn(0.2 + rotation); + + //@ts-ignore + const range1 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset % numbers.maxValue, + timestamp: 0n, + }); + //@ts-ignore + const range2 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset % numbers.maxValue, + timestamp: 0n, + }); + + const merged = mergeRanges([range1, range2], numbers); + expect(Number(merged.width)).to.eq(1); + // expect(merged.start1).to.equal(range1.start1) + }); + }); + + describe("3 ranges", () => { + it("gap", async () => { + const offset1 = denormalizeFn(0.2 + rotation); + const offset2 = denormalizeFn(0.3 + rotation); + const offset3 = denormalizeFn(0.4 + rotation); + + // @ts-ignore + const diff = numbers.abs(offset1 - offset3); + + // @ts-ignore + const range1 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset1 % numbers.maxValue, + timestamp: 0n, + }); + // @ts-ignore + const range2 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset2 % numbers.maxValue, + timestamp: 0n, + }); + const range3 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset3 % numbers.maxValue, + timestamp: 0n, + }); + + const merged = mergeRanges([range1, range2, range3], numbers); + // @ts-ignore + expect(merged.width).to.eq( + // @ts-ignore + diff + (typeof diff === "number" ? 1 : 1n), + ); // + 1 for the length of the last range + }); + + it("adjecent", async () => { + const offset1 = denormalizeFn(0.2 + rotation); + const offset2 = + // @ts-ignore + offset1 + (typeof offset1 === "number" ? 1 : 1n); + // @ts-ignore + const offset3 = + offset2 + (typeof offset2 === "number" ? 1 : 1n); + + // @ts-ignore + const range1 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset1 % numbers.maxValue, + timestamp: 0n, + }); + // @ts-ignore + const range2 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset2 % numbers.maxValue, + timestamp: 0n, + }); + + // @ts-ignore + const range3 = createReplicationRange({ + publicKey: a, + length: 1, + // @ts-ignore + offset: offset3 % numbers.maxValue, + timestamp: 0n, + }); + + const merged = mergeRanges([range1, range2, range3], numbers); + expect(Number(merged.width)).to.eq(3); + }); + }); + }); + }); + }); + + /* describe("removeRange", () => { + + + it('remove outside', () => { + const from = new ReplicationRangeIndexable({ normalized: false, publicKey: a, offset: 1, length: 1, timestamp: 0n }) + const toRemove = new ReplicationRangeIndexable({ normalized: false, publicKey: a, offset: 0, length: 1, timestamp: 0n }) + const result = from.removeRange(toRemove) + expect(result).to.equal(from) + + }) + + it('remove all', () => { + const from = new ReplicationRangeIndexable({ normalized: false, publicKey: a, offset: 1, length: 1, timestamp: 0n }) + const toRemove = new ReplicationRangeIndexable({ normalized: false, publicKey: a, offset: 1, length: 1, timestamp: 0n }) + const result = from.removeRange(toRemove) + expect(result).to.have.length(0) + }) + + const rotations = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1] + rotations.forEach((rotation) => { + describe('rotation: ' + String(rotation), () => { + + it('removes end', () => { + const from = new ReplicationRangeIndexable({ normalized: true, publicKey: a, offset: rotation, length: 0.3, timestamp: 0n }) + const toRemove = new ReplicationRangeIndexable({ normalized: true, publicKey: a, offset: rotation + 0.2, length: 0.2, timestamp: 0n }) + const result = from.removeRange(toRemove) + expect(result).to.have.length(2) + const arr = result as ReplicationRangeIndexable[] + expect(arr[0].start1).to.equal(from.start1) + expect(arr[0].end1).to.equal(toRemove.start1) + expect(arr[1].start2).to.equal(toRemove.start2) + expect(arr[1].end2).to.equal(toRemove.end2) + }) + }) + }) + + }) */ + }); + + describe("entry replicated", () => { + let index: Index>; + const entryClass = + resolution === "u32" ? EntryReplicatedU32 : EntryReplicatedU64; + + let create = async (...rects: EntryReplicated[]) => { + const indices = await createIndices(); + await indices.start(); + index = await indices.init({ schema: entryClass as any }); + for (const rect of rects) { + await index.put(rect); + } + }; + let a: Ed25519PublicKey; + + beforeEach(async () => { + a = (await Ed25519Keypair.create()).publicKey; + index = undefined!; + }); + + describe("toRebalance", () => { + const rotations = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]; + + const consumeAllFromAsyncIterator = async ( + iter: AsyncIterable<{ gid: string; entries: EntryReplicated[] }>, + ) => { + const result = []; + for await (const entry of iter) { + result.push(entry); + } + return result; + }; + + const creatEntryReplicated = (propertoes: { + coordinate: NumberFromType; + hash: string; + meta: Meta; + assignedToRangeBoundary: boolean; + }) => { + return new entryClass({ + coordinate: propertoes.coordinate, + assignedToRangeBoundary: propertoes.assignedToRangeBoundary, + hash: propertoes.hash, + meta: propertoes.meta, + } as any); + }; + + rotations.forEach((rotation) => { + const rotate = (from: number) => (from + rotation) % 1; + describe("rotation: " + String(rotation), () => { + it("empty change set", async () => { await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.1, offset: (0.2 + rotation) % 1, timestamp: 0n, mode: ReplicationIntent.Strict }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.5, offset: (0.5 + rotation) % 1, timestamp: 0n, mode: ReplicationIntent.Strict }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.1, offset: (0.81 + rotation) % 1, timestamp: 0n, mode: ReplicationIntent.Strict }) + creatEntryReplicated({ + coordinate: denormalizeFn(rotate(0)), + assignedToRangeBoundary: false, + hash: "a", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "a", + next: [], + type: 0, + data: undefined, + }), + }), + creatEntryReplicated({ + coordinate: denormalizeFn(rotate(0.3)), + assignedToRangeBoundary: false, + hash: "b", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "b", + next: [], + type: 0, + data: undefined, + }), + }), ); - // starting from b, we need both a and c since b is not mature to cover the width - expect([...await getCoverSet({ peers, roleAge: 1e5, start: b, widthToCoverScaled: scaleToU32(0.51) })]).to.have.members([b.hashcode()]) - }) - it('empty set boundary', async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.1, offset: (0.2 + rotation) % 1, timestamp: 0n, mode: ReplicationIntent.Strict }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.1, offset: (0.81 + rotation) % 1, timestamp: 0n, mode: ReplicationIntent.Strict }) + const result = await consumeAllFromAsyncIterator( + toRebalance([], index), ); - // starting from b, we need both a and c since b is not mature to cover the width - expect([...await getCoverSet({ peers, roleAge: 1e5, start: scaleToU32((0.5 + rotation) % 1), widthToCoverScaled: scaleToU32(0.3) })]).to.have.members([]) - }) - - it('overlapping', async () => { + expect(result).to.have.length(0); + }); + + describe("update", () => { + it("matches prev", async () => { + await create( + creatEntryReplicated({ + coordinate: denormalizeFn(rotate(0)), + assignedToRangeBoundary: false, + hash: "a", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "a", + next: [], + type: 0, + data: undefined, + }), + }), + creatEntryReplicated({ + coordinate: denormalizeFn(rotate(0.3)), + assignedToRangeBoundary: false, + hash: "b", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "b", + next: [], + type: 0, + data: undefined, + }), + }), + ); + + const prev = createReplicationRangeFromNormalized({ + publicKey: a, + offset: rotate(0.2), + length: 0.2, + }); + const updated = createReplicationRangeFromNormalized({ + id: prev.id, + publicKey: a, + offset: rotate(0.5), + length: 0.2, + }); + + const result = await consumeAllFromAsyncIterator( + toRebalance( + [ + { + prev, + range: updated, + type: "updated", + }, + ], + index, + ), + ); + expect(result.map((x) => x.gid)).to.deep.equal(["b"]); + }); + + it("matches next", async () => { + await create( + creatEntryReplicated({ + coordinate: denormalizeFn(rotate(0)), + assignedToRangeBoundary: false, + hash: "a", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "a", + next: [], + type: 0, + data: undefined, + }), + }), + creatEntryReplicated({ + coordinate: denormalizeFn(rotate(0.3)), + assignedToRangeBoundary: false, + hash: "b", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "b", + next: [], + type: 0, + data: undefined, + }), + }), + ); + + const prev = createReplicationRangeFromNormalized({ + publicKey: a, + offset: rotate(0.5), + length: 0.2, + }); + const updated = createReplicationRangeFromNormalized({ + id: prev.id, + publicKey: a, + offset: rotate(0.2), + length: 0.2, + }); + + const result = await consumeAllFromAsyncIterator( + toRebalance( + [ + { + prev, + range: updated, + type: "updated", + }, + ], + index, + ), + ); + expect(result.map((x) => x.gid)).to.deep.equal(["b"]); + }); + }); + + it("not enoughly replicated after change", async () => { await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.1, offset: (0.2 + rotation) % 1, timestamp: 0n, mode: ReplicationIntent.Strict }), + creatEntryReplicated({ + coordinate: denormalizeFn(rotate(0)), + assignedToRangeBoundary: false, + hash: "a", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "a", + next: [], + type: 0, + data: undefined, + }), + }), + creatEntryReplicated({ + coordinate: denormalizeFn(rotate(0.3)), + assignedToRangeBoundary: false, + hash: "b", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "b", + next: [], + type: 0, + data: undefined, + }), + }), ); - // starting from b, we need both a and c since b is not mature to cover the width - expect([...await getCoverSet({ peers, roleAge: 1e5, start: scaleToU32((0 + rotation) % 1), widthToCoverScaled: scaleToU32(0.6) })]).to.have.members([a.hashcode()]) - }) - }) - }) - - - }) - - }) - }) - - describe("getSamples", () => { - const rotations = [0, 0.333, 0.5, 0.8] - rotations.forEach((rotation) => { - describe('samples correctly: ' + rotation, () => { - it("1 and less than 1", async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.2625, offset: (0.367 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 1, offset: (0.847 + rotation) % 1, timestamp: 0n })) - expect(await getSamples(scaleToU32(0.78), peers, 2, 0)).to.have.length(2) - }) - - it("1 sample but overlapping yield two matches", async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 1, offset: (0.367 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 1, offset: (0.847 + rotation) % 1, timestamp: 0n })) - expect(await getSamples(scaleToU32(0.78), peers, 1, 0)).to.have.length(2) - }) - - it("closest to", async () => { - await create( - new ReplicationRangeIndexable({ normalized: false, publicKey: a, length: 1, offset: scaleToU32((0.367 + rotation) % 1), timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: false, publicKey: b, length: 1, offset: scaleToU32((0.847 + rotation) % 1), timestamp: 0n })) - expect(await getSamples(scaleToU32((0.78 + rotation) % 1), peers, 1, 0)).to.deep.eq([b.hashcode()]) - }) - - it("closest to oldest", async () => { - - // two exactly the same, but one is older - await create( - new ReplicationRangeIndexable({ normalized: false, publicKey: a, length: 1, offset: scaleToU32((0.367 + rotation) % 1), timestamp: 1n }), - new ReplicationRangeIndexable({ normalized: false, publicKey: b, length: 1, offset: scaleToU32((0.367 + rotation) % 1), timestamp: 0n })) - - expect(await getSamples(scaleToU32((0.78 + rotation) % 1), peers, 1, 0)).to.deep.eq([b.hashcode()]) - }) - - it("closest to hash", async () => { - - // two exactly the same, but one is older - await create( - new ReplicationRangeIndexable({ normalized: false, publicKey: a, length: 1, offset: scaleToU32((0.367 + rotation) % 1), timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: false, publicKey: b, length: 1, offset: scaleToU32((0.367 + rotation) % 1), timestamp: 0n })) - - expect(a.hashcode() < b.hashcode()).to.be.true - expect(await getSamples(scaleToU32((0.78 + rotation) % 1), peers, 1, 0)).to.deep.eq([a.hashcode()]) - }) - - it("interescting", async () => { - - // two exactly the same, but one is older - await create( - new ReplicationRangeIndexable({ normalized: false, publicKey: a, length: HALF_MAX_U32, offset: scaleToU32((0 + rotation) % 1), timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: false, publicKey: b, length: 1, offset: scaleToU32((0.5 + rotation) % 1), timestamp: 0n })) - - const samples1 = await getSamplesMap(getEvenlySpacedU32(scaleToU32((0.25 + rotation) % 1), 1), peers, 0) - expect([...samples1.values()].filter(x => x.intersecting).length).to.eq(1) - expect(samples1.size).to.eq(1) - - const samples2 = await getSamplesMap(getEvenlySpacedU32(scaleToU32((0.75 + rotation) % 1), 2), peers, 0) - expect([...samples2.values()].filter(x => x.intersecting).length).to.eq(1) - expect(samples2.size).to.eq(2) - - }) + const prev = createReplicationRangeFromNormalized({ + publicKey: a, + offset: rotate(0.2), + length: 0.2, + }); + const updated = createReplicationRangeFromNormalized({ + id: prev.id, + publicKey: a, + offset: rotate(0.4), + length: 0.2, + }); + + const result = await consumeAllFromAsyncIterator( + toRebalance( + [ + { + prev, + range: updated, + type: "updated", + }, + ], + index, + ), + ); + expect(result.map((x) => x.gid)).to.deep.eq(["b"]); + }); - // TODO add breakeven test to make sure it is sorted by hash - - }) - - }) - - - - it("factor 0 ", async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0, offset: (0.367) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 1, offset: (0.567) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 1, offset: (0.847) % 1, timestamp: 0n }) - ); - expect(await getSamples(scaleToU32(0.3701), peers, 2, 0)).to.have.members([b, c].map(x => x.hashcode())) - }) - - - it("factor 0 with 3 peers factor 1", async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 1, offset: 0.145, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0, offset: 0.367, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 1, offset: 0.8473, timestamp: 0n }) - ); - expect(await getSamples(scaleToU32(0.937), peers, 2, 0)).to.have.members([a, c].map(x => x.hashcode())) - }) - - it("factor 0 with 3 peers short", async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.2, offset: 0.145, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0, offset: 0.367, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.2, offset: 0.8473, timestamp: 0n }) - ); - expect(await getSamples(scaleToU32(0.937), peers, 2, 0)).to.have.members([a, c].map(x => x.hashcode())) - }) - - rotations.forEach((rotation) => { - - it("evenly distributed: " + rotation, async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.2, offset: (0.2333 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.2, offset: (0.56666 + rotation) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.2, offset: (0.9 + rotation) % 1, timestamp: 0n }) - ); - - - let ac = 0, bc = 0, cc = 0; - let count = 1000; - for (let i = 0; i < count; i++) { - const leaders = await getSamplesMap([scaleToU32(i / count)], peers, 0) - if (leaders.has(a.hashcode())) { ac++; } - if (leaders.has(b.hashcode())) { bc++; } - if (leaders.has(c.hashcode())) { cc++; } - } - - // check ac, bc and cc are all close to 1/3 - expect(ac / count).to.be.closeTo(1 / 3, 0.1) - expect(bc / count).to.be.closeTo(1 / 3, 0.1) - expect(cc / count).to.be.closeTo(1 / 3, 0.1) - }) - }) - - describe('maturity', () => { - it("starting at unmatured", async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.333, offset: (0.333) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.333, offset: (0.666) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.3333, offset: (0.999) % 1, timestamp: 0n }), - ); - expect(await getSamples(scaleToU32(0.7), peers, 2, 1e5)).to.have.members([a, b, c].map(x => x.hashcode())) - }) - - it("starting at matured", async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.333, offset: (0.333) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.333, offset: (0.666) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.3333, offset: (0.999) % 1, timestamp: 0n }) - ); - // the offset jump will be 0.5 (a) and 0.5 + 0.5 = 1 which will intersect (c) - expect(await getSamples(scaleToU32(0.5), peers, 2, 1e5)).to.have.members([a, c].map(x => x.hashcode())) - }) - - it("starting at matured-2", async () => { - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.333, offset: (0.333) % 1, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.333, offset: (0.666) % 1, timestamp: BigInt(+new Date) }), - new ReplicationRangeIndexable({ normalized: true, publicKey: c, length: 0.3333, offset: (0.999) % 1, timestamp: 0n }) - ); - // the offset jump will be 0.2 (a) and 0.2 + 0.5 = 0.7 which will intersect (b) (unmatured) - expect(await getSamples(0, peers, 2, 1e5)).to.have.members([a, c].map(x => x.hashcode())) - }) - }) - - - describe('strict', async () => { - - rotations.forEach((rotation) => { - - it("only includes strict segments when intersecting: " + rotation, async () => { - - const offsetNonStrict = (0 + rotation) % 1 - await create( - new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.2, offset: offsetNonStrict, timestamp: 0n }), - new ReplicationRangeIndexable({ normalized: true, publicKey: b, length: 0.2, offset: (0.3 + rotation) % 1, timestamp: 0n, mode: ReplicationIntent.Strict }), - ); - - const leaders = await getSamples(scaleToU32(offsetNonStrict + 0.001), peers, 2, 0) - expect(leaders).to.have.members([a].map(x => x.hashcode())) - }) - }) - - - }) - }) - - describe("getDistance", () => { - - describe('above', () => { - it("immediate", () => { - expect(getDistance(0.5, 0.4, 'above', 1)).to.be.closeTo(0.1, 0.0001) - }) - - it('wrap', () => { - expect(getDistance(0.1, 0.9, 'above', 1)).to.be.closeTo(0.2, 0.0001) - }) - }) - - describe('below', () => { - - it("immediate", () => { - expect(getDistance(0.5, 0.6, 'below', 1)).to.be.closeTo(0.1, 0.0001) - }) - - it('wrap', () => { - expect(getDistance(0.9, 0.1, 'below', 1)).to.be.closeTo(0.2, 0.0001) - }) - - }) - - describe('closest', () => { - it('immediate', () => { - expect(getDistance(0.5, 0.6, 'closest', 1)).to.be.closeTo(0.1, 0.0001) - }) - - it('wrap', () => { - expect(getDistance(0.9, 0.1, 'closest', 1)).to.be.closeTo(0.2, 0.0001) - }) - - it('wrap 2', () => { - expect(getDistance(0.1, 0.9, 'closest', 1)).to.be.closeTo(0.2, 0.0001) - }) - }) - }) - - describe("hasOneOverlapping", () => { - const rotations = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1] - rotations.forEach((rotation) => { - describe('rotation: ' + String(rotation), () => { - - it('includes all', async () => { - const cmp = new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.5, offset: (0 + rotation) % 1, timestamp: 0n }) - await create(cmp); - - const inside = new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.4, offset: (0.05 + rotation) % 1, timestamp: 0n }); - expect(await hasCoveringRange(peers, inside)).to.be.true + it("not enoughly replicated after removed", async () => { + await create( + creatEntryReplicated({ + coordinate: denormalizeFn(rotate(0)), + assignedToRangeBoundary: false, + hash: "a", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "a", + next: [], + type: 0, + data: undefined, + }), + }), + creatEntryReplicated({ + coordinate: denormalizeFn(rotate(0.3)), + assignedToRangeBoundary: false, + hash: "b", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "b", + next: [], + type: 0, + data: undefined, + }), + }), + ); - const outside1 = new ReplicationRangeIndexable({ normalized: true, publicKey: a, length: 0.4, offset: (0.2 + rotation) % 1, timestamp: 0n }); - expect(await hasCoveringRange(peers, outside1)).to.be.false + const updated = createReplicationRangeFromNormalized({ + publicKey: a, + offset: rotate(0.2), + length: 0.2, + }); + + const result = await consumeAllFromAsyncIterator( + toRebalance( + [ + { + range: updated, + type: "removed", + }, + ], + index, + ), + ); + expect(result.map((x) => x.gid)).to.deep.eq(["b"]); + }); - const outside2 = new ReplicationRangeIndexable({ - normalized: true, publicKey: a, length: 0.51, offset: (0.1 + rotation) % 1, timestamp: 0n + it("boundary assigned are always included", async () => { + await create( + creatEntryReplicated({ + coordinate: denormalizeFn(rotate(0)), + assignedToRangeBoundary: false, + hash: "a", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "a", + next: [], + type: 0, + data: undefined, + }), + }), + creatEntryReplicated({ + coordinate: denormalizeFn(rotate(0)), + assignedToRangeBoundary: true, + hash: "b", + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: "b", + next: [], + type: 0, + data: undefined, + }), + }), + ); + const result = await consumeAllFromAsyncIterator( + toRebalance([], index), + ); + expect(result.map((x) => x.gid)).to.deep.eq(["b"]); + }); }); - expect(await hasCoveringRange(peers, outside2)).to.be.false - - }) - }) - }) - - }) - - - /* describe("removeRange", () => { - - - it('remove outside', () => { - const from = new ReplicationRangeIndexable({ normalized: false, publicKey: a, offset: 1, length: 1, timestamp: 0n }) - const toRemove = new ReplicationRangeIndexable({ normalized: false, publicKey: a, offset: 0, length: 1, timestamp: 0n }) - const result = from.removeRange(toRemove) - expect(result).to.equal(from) - - }) - - it('remove all', () => { - const from = new ReplicationRangeIndexable({ normalized: false, publicKey: a, offset: 1, length: 1, timestamp: 0n }) - const toRemove = new ReplicationRangeIndexable({ normalized: false, publicKey: a, offset: 1, length: 1, timestamp: 0n }) - const result = from.removeRange(toRemove) - expect(result).to.have.length(0) - }) - - const rotations = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1] - rotations.forEach((rotation) => { - describe('rotation: ' + String(rotation), () => { - - it('removes end', () => { - const from = new ReplicationRangeIndexable({ normalized: true, publicKey: a, offset: rotation, length: 0.3, timestamp: 0n }) - const toRemove = new ReplicationRangeIndexable({ normalized: true, publicKey: a, offset: rotation + 0.2, length: 0.2, timestamp: 0n }) - const result = from.removeRange(toRemove) - expect(result).to.have.length(2) - const arr = result as ReplicationRangeIndexable[] - expect(arr[0].start1).to.equal(from.start1) - expect(arr[0].end1).to.equal(toRemove.start1) - expect(arr[1].start2).to.equal(toRemove.start2) - expect(arr[1].end2).to.equal(toRemove.end2) - }) - }) - }) - - }) */ -}) -describe("entry replicated", () => { - let index: Index; - - let create = async (...rects: EntryReplicated[]) => { - const indices = await createIndices(); - await indices.start(); - index = await indices.init({ schema: EntryReplicated }); - for (const rect of rects) { - await index.put(rect); - } - }; - let a: Ed25519PublicKey; - - beforeEach(async () => { - a = (await Ed25519Keypair.create()).publicKey; - index = undefined!; - }); - - describe("toRebalance", () => { - const rotations = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]; - - const consumeAllFromAsyncIterator = async ( - iter: AsyncIterable<{ gid: string; entries: EntryReplicated[] }>, - ) => { - const result = []; - for await (const entry of iter) { - result.push(entry); - } - return result; - }; - - rotations.forEach((rotation) => { - const rotate = (from: number) => (from + rotation) % 1; - describe("rotation: " + String(rotation), () => { - it("empty change set", async () => { - await create( - new EntryReplicated({ - coordinate: scaleToU32(rotate(0)), - assignedToRangeBoundary: false, - hash: "a", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "a", - next: [], - type: 0, - data: undefined, - }), - }), - new EntryReplicated({ - coordinate: scaleToU32(rotate(0.3)), - assignedToRangeBoundary: false, - hash: "b", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "b", - next: [], - type: 0, - data: undefined, - }), - }), - ); - - const result = await consumeAllFromAsyncIterator( - toRebalance([], index), - ); - expect(result).to.have.length(0); - }); - - describe("update", () => { - it("matches prev", async () => { - await create( - new EntryReplicated({ - coordinate: scaleToU32(rotate(0)), - assignedToRangeBoundary: false, - hash: "a", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "a", - next: [], - type: 0, - data: undefined, - }), - }), - new EntryReplicated({ - coordinate: scaleToU32(rotate(0.3)), - assignedToRangeBoundary: false, - hash: "b", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "b", - next: [], - type: 0, - data: undefined, - }), - }), - ); - - const prev = new ReplicationRangeIndexable({ - normalized: true, - publicKey: a, - offset: rotate(0.2), - length: 0.2, - }); - const updated = new ReplicationRangeIndexable({ - id: prev.id, - normalized: true, - publicKey: a, - offset: rotate(0.5), - length: 0.2, - }); - - const result = await consumeAllFromAsyncIterator( - toRebalance( - [ - { - prev, - range: updated, - type: "updated", - }, - ], - index, - ), - ); - expect(result.map((x) => x.gid)).to.deep.equal(["b"]); - }); - - it("matches next", async () => { - await create( - new EntryReplicated({ - coordinate: scaleToU32(rotate(0)), - assignedToRangeBoundary: false, - hash: "a", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "a", - next: [], - type: 0, - data: undefined, - }), - }), - new EntryReplicated({ - coordinate: scaleToU32(rotate(0.3)), - assignedToRangeBoundary: false, - hash: "b", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "b", - next: [], - type: 0, - data: undefined, - }), - }), - ); - - const prev = new ReplicationRangeIndexable({ - normalized: true, - publicKey: a, - offset: rotate(0.5), - length: 0.2, - }); - const updated = new ReplicationRangeIndexable({ - id: prev.id, - normalized: true, - publicKey: a, - offset: rotate(0.2), - length: 0.2, - }); - - const result = await consumeAllFromAsyncIterator( - toRebalance( - [ - { - prev, - range: updated, - type: "updated", - }, - ], - index, - ), - ); - expect(result.map((x) => x.gid)).to.deep.equal(["b"]); - }); - }); - - it("not enoughly replicated after change", async () => { - await create( - new EntryReplicated({ - coordinate: scaleToU32(rotate(0)), - assignedToRangeBoundary: false, - hash: "a", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "a", - next: [], - type: 0, - data: undefined, - }), - }), - new EntryReplicated({ - coordinate: scaleToU32(rotate(0.3)), - assignedToRangeBoundary: false, - hash: "b", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "b", - next: [], - type: 0, - data: undefined, - }), - }), - ); - - const prev = new ReplicationRangeIndexable({ - normalized: true, - publicKey: a, - offset: rotate(0.2), - length: 0.2, - }); - const updated = new ReplicationRangeIndexable({ - id: prev.id, - normalized: true, - publicKey: a, - offset: rotate(0.4), - length: 0.2, - }); - - const result = await consumeAllFromAsyncIterator( - toRebalance( - [ - { - prev, - range: updated, - type: "updated", - }, - ], - index, - ), - ); - expect(result.map((x) => x.gid)).to.deep.eq(["b"]); - }); - - it("not enoughly replicated after removed", async () => { - await create( - new EntryReplicated({ - coordinate: scaleToU32(rotate(0)), - assignedToRangeBoundary: false, - hash: "a", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "a", - next: [], - type: 0, - data: undefined, - }), - }), - new EntryReplicated({ - coordinate: scaleToU32(rotate(0.3)), - assignedToRangeBoundary: false, - hash: "b", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "b", - next: [], - type: 0, - data: undefined, - }), - }), - ); - - const updated = new ReplicationRangeIndexable({ - normalized: true, - publicKey: a, - offset: rotate(0.2), - length: 0.2, - }); - - const result = await consumeAllFromAsyncIterator( - toRebalance( - [ - { - range: updated, - type: "removed", - }, - ], - index, - ), - ); - expect(result.map((x) => x.gid)).to.deep.eq(["b"]); - }); - - it("boundary assigned are always included", async () => { - await create( - new EntryReplicated({ - coordinate: scaleToU32(rotate(0)), - assignedToRangeBoundary: false, - hash: "a", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "a", - next: [], - type: 0, - data: undefined, - }), - }), - new EntryReplicated({ - coordinate: scaleToU32(rotate(0)), - assignedToRangeBoundary: true, - hash: "b", - meta: new Meta({ - clock: new LamportClock({ id: randomBytes(32) }), - gid: "b", - next: [], - type: 0, - data: undefined, - }), - }), - ); - const result = await consumeAllFromAsyncIterator( - toRebalance([], index), - ); - expect(result.map((x) => x.gid)).to.deep.eq(["b"]); - }); - }); - }); - }); + }); + }); + }); + }); }); diff --git a/packages/programs/data/shared-log/test/replicate.spec.ts b/packages/programs/data/shared-log/test/replicate.spec.ts index daab8a1cb..3aa5d2bd6 100644 --- a/packages/programs/data/shared-log/test/replicate.spec.ts +++ b/packages/programs/data/shared-log/test/replicate.spec.ts @@ -6,17 +6,17 @@ import { delay, waitFor, waitForResolved } from "@peerbit/time"; import { expect } from "chai"; import path from "path"; import { v4 as uuid } from "uuid"; -import type { SharedLog } from "../src/index.js"; -import { - ReplicationIntent, - type ReplicationRangeIndexable, - isMatured, -} from "../src/ranges.js"; +import type { + ReplicationDomainHash, + ReplicationRangeIndexable, + SharedLog, +} from "../src/index.js"; +import { denormalizer } from "../src/integers.js"; +import { ReplicationIntent, isMatured } from "../src/ranges.js"; import { createReplicationDomainHash } from "../src/replication-domain-hash.js"; -import { scaleToU32 } from "../src/role.js"; import { EventStore } from "./utils/stores/event-store.js"; -const checkRoleIsDynamic = async (log: SharedLog) => { +const checkRoleIsDynamic = async (log: SharedLog) => { const roles: any[] = []; log.events.addEventListener("replication:change", (change) => { if (change.detail.publicKey.equals(log.node.identity.publicKey)) { @@ -28,9 +28,12 @@ const checkRoleIsDynamic = async (log: SharedLog) => { await waitForResolved(() => expect(roles.length).greaterThan(3)); }; +const scaleToU32 = denormalizer("u32"); + describe(`replicate`, () => { let session: TestSession; - let db1: EventStore, db2: EventStore; + let db1: EventStore>, + db2: EventStore>; before(async () => { session = await TestSession.disconnected(3, [ @@ -86,7 +89,7 @@ describe(`replicate`, () => { await session.stop(); }); - beforeEach(async () => {}); + beforeEach(async () => { }); afterEach(async () => { if (db1?.closed === false) { @@ -98,11 +101,11 @@ describe(`replicate`, () => { }); it("none", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 1 } }, }); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -124,7 +127,7 @@ describe(`replicate`, () => { describe("observer", () => { it("can update", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); expect( (db1.log.node.services.pubsub as any)["subscriptions"].get( @@ -145,9 +148,9 @@ describe(`replicate`, () => { }); it("observer", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -170,7 +173,7 @@ describe(`replicate`, () => { describe("replictor", () => { it("fixed-object", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0.7, @@ -192,7 +195,7 @@ describe(`replicate`, () => { }); it("fixed-simple", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: 1, }, @@ -204,7 +207,7 @@ describe(`replicate`, () => { }); it("can unreplicate", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: 1, }, @@ -233,7 +236,7 @@ describe(`replicate`, () => { }); it("adding segments", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0, @@ -277,19 +280,19 @@ describe(`replicate`, () => { }); it("dynamic by default", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); await checkRoleIsDynamic(db1.log); }); it("update to dynamic role", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); await db1.log.replicate(false); await db1.log.replicate({ limits: {} }); await checkRoleIsDynamic(db1.log); }); it("waitForReplicator waits until maturity", async () => { - const store = new EventStore(); + const store = new EventStore(); const db1 = await session.peers[0].open(store.clone(), { args: { @@ -313,7 +316,7 @@ describe(`replicate`, () => { }); describe("getDefaultMinRoleAge", () => { it("if not replicating, min role age is 0", async () => { - const store = new EventStore(); + const store = new EventStore(); await session.peers[0].open(store.clone(), { args: { @@ -334,7 +337,7 @@ describe(`replicate`, () => { }); it("oldest is always mature", async () => { - const store = new EventStore(); + const store = new EventStore(); const db1 = await session.peers[0].open(store.clone(), { args: { @@ -407,7 +410,7 @@ describe(`replicate`, () => { describe("mode", () => { it("strict", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { normalized: false, @@ -443,9 +446,9 @@ describe(`replicate`, () => { describe("entry", () => { it("entry", async () => { - const store = new EventStore(); + const store = new EventStore(); - let domain = createReplicationDomainHash(); + let domain = createReplicationDomainHash("u32"); const db1 = await session.peers[0].open(store.clone(), { args: { @@ -462,7 +465,7 @@ describe(`replicate`, () => { }); const checkReplication = async ( - db: EventStore, + db: EventStore, entry: Entry, ) => { const offset = await domain.fromEntry(added.entry); @@ -475,7 +478,7 @@ describe(`replicate`, () => { expect(range.factor).to.equal(1); // mininum unit of length }; - const checkUnreplication = async (db: EventStore) => { + const checkUnreplication = async (db: EventStore) => { const ranges = await db.log.replicationIndex.iterate().all(); expect(ranges).to.have.length(0); }; @@ -492,9 +495,9 @@ describe(`replicate`, () => { }); it("entry with range", async () => { - const store = new EventStore(); + const store = new EventStore(); - let domain = createReplicationDomainHash(); + let domain = createReplicationDomainHash("u32"); let startFactor = 500000; let startOffset = 0; @@ -517,7 +520,7 @@ describe(`replicate`, () => { }); const checkReplication = async ( - db: EventStore, + db: EventStore, entry: Entry, ) => { const offset = await domain.fromEntry(added.entry); @@ -536,7 +539,7 @@ describe(`replicate`, () => { expect(rangeEntry.factor).to.equal(1); // mininum unit of length }; - const checkUnreplication = async (db: EventStore) => { + const checkUnreplication = async (db: EventStore) => { const ranges = await db.log.replicationIndex .iterate({ sort: new Sort({ key: ["start1"] }) }) .all(); @@ -623,7 +626,7 @@ describe(`replicate`, () => { }); it("restart after adding", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0.3, @@ -634,7 +637,7 @@ describe(`replicate`, () => { await db1.log.replicate({ factor: 0.2, offset: 0.6 }); - const checkSegments = async (db: EventStore) => { + const checkSegments = async (db: EventStore) => { const segments = await db.log.replicationIndex .iterate({ sort: [new Sort({ key: "start1" })] }) .all(); @@ -683,7 +686,7 @@ describe(`replicate`, () => { }); it("restart another settings", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0.3, @@ -708,7 +711,7 @@ describe(`replicate`, () => { it("will re-check replication segments on restart", async () => { // make sure non-reachable peers are not included in the replication segments - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0.3, @@ -773,7 +776,7 @@ describe(`replicate`, () => { it("segments updated while offline", async () => { // make sure non-reachable peers are not included in the replication segments - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0.1, @@ -837,7 +840,7 @@ describe(`replicate`, () => { await waitForResolved(async () => { const checkSegments = ( - segments: IndexedResults, + segments: IndexedResults>, ) => { expect(segments).to.have.length(2); @@ -870,8 +873,8 @@ describe(`replicate`, () => { id: "encryption key", group: topic, }); - db2 = await client2.open>( - await EventStore.load>( + db2 = await client2.open>( + await EventStore.load>( client2.libp2p.services.blocks, db1.address! ), diff --git a/packages/programs/data/shared-log/test/replication.spec.ts b/packages/programs/data/shared-log/test/replication.spec.ts index eba86418a..d3f6cfd8a 100644 --- a/packages/programs/data/shared-log/test/replication.spec.ts +++ b/packages/programs/data/shared-log/test/replication.spec.ts @@ -25,7 +25,9 @@ import sinon from "sinon"; import { BlocksMessage } from "../src/blocks.js"; import { ExchangeHeadsMessage, RequestIPrune } from "../src/exchange-heads.js"; import type { ReplicationOptions } from "../src/index.js"; -import type { ReplicationRangeIndexable } from "../src/ranges.js"; +import type { + ReplicationRangeIndexable +} from "../src/ranges.js"; import { AbsoluteReplicas, AddedReplicationSegmentMessage, @@ -44,7 +46,7 @@ import { EventStore, type Operation } from "./utils/stores/event-store.js"; describe(`replication`, function () { let session: TestSession; - let db1: EventStore, db2: EventStore; + let db1: EventStore, db2: EventStore; let fetchEvents: number; let fetchHashes: Set; let fromMultihash: any; @@ -76,6 +78,9 @@ describe(`replication`, function () { 148, 82, 66, 138, 199, 185, ]), ), + services: { + relay: null, // https://github.com/libp2p/js-libp2p/issues/2794 + } as any, }, }, { @@ -89,11 +94,14 @@ describe(`replication`, function () { 174, 212, 159, 187, 2, 137, 47, 192, ]), ), + services: { + relay: null, // https://github.com/libp2p/js-libp2p/issues/2794 + } as any, }, }, ]); - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 1, @@ -116,7 +124,7 @@ describe(`replication`, function () { it("verifies remote signatures by default", async () => { const entry = await db1.add("a", { meta: { next: [] } }); await (session.peers[0] as any)["libp2p"].hangUp(session.peers[1].peerId); - db2 = await session.peers[1].open(new EventStore()); + db2 = await session.peers[1].open(new EventStore()); const clonedEntry = deserialize(serialize(entry.entry), Entry); @@ -134,7 +142,7 @@ describe(`replication`, function () { it("does not verify owned signatures by default", async () => { const entry = await db1.add("a", { meta: { next: [] } }); await (session.peers[0] as any)["libp2p"].hangUp(session.peers[1].peerId); - db2 = await session.peers[1].open(new EventStore()); + db2 = await session.peers[1].open(new EventStore()); const clonedEntry = deserialize(serialize(entry.entry), Entry); @@ -153,8 +161,8 @@ describe(`replication`, function () { const entryCount = 33; const entryArr: number[] = []; - const db1 = await session.peers[0].open(new EventStore()); - const db3 = await session.peers[0].open(new EventStore()); + const db1 = await session.peers[0].open(new EventStore()); + const db3 = await session.peers[0].open(new EventStore()); // Create the entries in the first database for (let i = 0; i < entryCount; i++) { @@ -164,12 +172,12 @@ describe(`replication`, function () { await mapSeries(entryArr, (i) => db1.add("hello" + i)); // Open the second database - const db2 = (await EventStore.open>( + const db2 = (await EventStore.open>( db1.address!, session.peers[1], ))!; - const db4 = (await EventStore.open>( + const db4 = (await EventStore.open>( db3.address!, session.peers[1], ))!; @@ -194,7 +202,7 @@ describe(`replication`, function () { describe("references", () => { it("joins by references", async () => { db1.log.replicas = { min: new AbsoluteReplicas(1) }; - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -209,7 +217,9 @@ describe(`replication`, function () { await db1.log.replicate({ factor: 0.5 }); await db2.log.replicate({ factor: 0.5 }); - const getParticipationPerPer = (ranges: ReplicationRangeIndexable[]) => { + const getParticipationPerPer = ( + ranges: ReplicationRangeIndexable[], + ) => { let map = new Map(); for (const range of ranges) { map.set( @@ -303,7 +313,7 @@ describe(`replication`, function () { return result; }; - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -328,7 +338,7 @@ describe(`replication`, function () { */ it("fetches next blocks once", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicas: { min: 0, @@ -338,7 +348,7 @@ describe(`replication`, function () { }, }); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -397,185 +407,224 @@ describe(`replication`, function () { }); describe("replication", () => { - it("replicates database of 1 entry", async () => { - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - ))!; - - await db1.waitFor(session.peers[1].peerId); - await db2.waitFor(session.peers[0].peerId); - - const value = "hello"; - - await db1.add(value); - await waitForResolved(() => expect(db2.log.log.length).equal(1)); - - expect((await db2.iterator({ limit: -1 })).collect().length).equal(1); - - const db1Entries: Entry>[] = ( - await db1.iterator({ limit: -1 }) - ).collect(); - expect(db1Entries.length).equal(1); + describe("one way", () => { + it("replicates database of 1 entry", async () => { + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + ))!; + + await db1.waitFor(session.peers[1].peerId); + await db2.waitFor(session.peers[0].peerId); + + const value = "hello"; + + await db1.add(value); + await waitForResolved(() => expect(db2.log.log.length).equal(1)); + + expect((await db2.iterator({ limit: -1 })).collect().length).equal(1); + + const db1Entries: Entry>[] = ( + await db1.iterator({ limit: -1 }) + ).collect(); + expect(db1Entries.length).equal(1); + + await waitForResolved(async () => + expect([ + ...( + await db1.log.findLeaders( + { + entry: db1Entries[0], + replicas: maxReplicas(db1.log, db1Entries), + }, + // 0 + ) + ).keys(), + ]).to.have.members( + [session.peers[0].peerId, session.peers[1].peerId].map((p) => + getPublicKeyFromPeerId(p).hashcode(), + ), + ), + ); - await waitForResolved(async () => + expect(db1Entries[0].payload.getValue().value).equal(value); + const db2Entries: Entry>[] = ( + await db2.iterator({ limit: -1 }) + ).collect(); + expect(db2Entries.length).equal(1); expect([ ...( - await db1.log.findLeaders( + await db2.log.findLeaders( { - entry: db1Entries[0], - replicas: maxReplicas(db1.log, db1Entries), + entry: db2Entries[0], + replicas: maxReplicas(db2.log, db2Entries), }, // 0 ) ).keys(), - ]).to.have.members( + ]).include.members( [session.peers[0].peerId, session.peers[1].peerId].map((p) => getPublicKeyFromPeerId(p).hashcode(), ), - ), - ); - - expect(db1Entries[0].payload.getValue().value).equal(value); - const db2Entries: Entry>[] = ( - await db2.iterator({ limit: -1 }) - ).collect(); - expect(db2Entries.length).equal(1); - expect([ - ...( - await db2.log.findLeaders( - { - entry: db2Entries[0], - replicas: maxReplicas(db2.log, db2Entries), - }, - // 0 - ) - ).keys(), - ]).include.members( - [session.peers[0].peerId, session.peers[1].peerId].map((p) => - getPublicKeyFromPeerId(p).hashcode(), - ), - ); - expect(db2Entries[0].payload.getValue().value).equal(value); - }); + ); + expect(db2Entries[0].payload.getValue().value).equal(value); + }); - it("replicates database of 1000 entries", async () => { - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - factor: 1, + it("replicates database of 1000 entries", async () => { + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 1, + }, }, }, - }, - ))!; + ))!; - await db1.waitFor(session.peers[1].peerId); - await db2.waitFor(session.peers[0].peerId); + await db1.waitFor(session.peers[1].peerId); + await db2.waitFor(session.peers[0].peerId); - const entryCount = 1e3; - for (let i = 0; i < entryCount; i++) { - // entryArr.push(i); - await db1.add("hello" + i); - } + const entryCount = 1e3; + for (let i = 0; i < entryCount; i++) { + // entryArr.push(i); + await db1.add("hello" + i); + } - await waitForResolved(() => expect(db2.log.log.length).equal(entryCount)); + await waitForResolved(() => + expect(db2.log.log.length).equal(entryCount), + ); - const entries = (await db2.iterator({ limit: -1 })).collect(); - expect(entries.length).equal(entryCount); - for (let i = 0; i < entryCount; i++) { - try { - expect(entries[i].payload.getValue().value).equal("hello" + i); - } catch (error) { - console.error( - "Entries out of order: " + + const entries = (await db2.iterator({ limit: -1 })).collect(); + expect(entries.length).equal(entryCount); + for (let i = 0; i < entryCount; i++) { + try { + expect(entries[i].payload.getValue().value).equal("hello" + i); + } catch (error) { + console.error( + "Entries out of order: " + entries.map((x) => x.payload.getValue().value).join(", "), - ); - throw error; + ); + throw error; + } } - } - }); + }); - it("replicates database of large entries", async () => { - let count = 10; - for (let i = 0; i < count; i++) { - const value = toBase64(randomBytes(4e6)); - await db1.add(value, { meta: { next: [] } }); // force unique heads - } - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - { - args: { - replicate: { - factor: 1, + it("replicates database of large entries", async () => { + let count = 10; + for (let i = 0; i < count; i++) { + const value = toBase64(randomBytes(4e6)); + await db1.add(value, { meta: { next: [] } }); // force unique heads + } + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 1, + }, }, }, - }, - ))!; + ))!; - await waitForResolved(() => expect(db2.log.log.length).equal(count)); - }); - - it("replicates 1 entry with cut next", async () => { - const first = await db1.add("old"); - const second = await db1.add("new", { - meta: { type: EntryType.CUT, next: [first.entry] }, + await waitForResolved(() => expect(db2.log.log.length).equal(count)); }); - expect( - (await db1.iterator({ limit: -1 })).collect().map((x) => x.hash), - ).to.deep.equal([second.entry.hash]); - expect(db1.log.log.length).equal(1); - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - ))!; - - await waitForResolved(async () => { + it("replicates 1 entry with cut next", async () => { + const first = await db1.add("old"); + const second = await db1.add("new", { + meta: { type: EntryType.CUT, next: [first.entry] }, + }); expect( - (await db2.iterator({ limit: -1 })).collect().map((x) => x.hash), + (await db1.iterator({ limit: -1 })).collect().map((x) => x.hash), ).to.deep.equal([second.entry.hash]); + expect(db1.log.log.length).equal(1); + + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + ))!; + + await waitForResolved(async () => { + expect( + (await db2.iterator({ limit: -1 })).collect().map((x) => x.hash), + ).to.deep.equal([second.entry.hash]); + }); + }); + + it("it does not fetch missing entries from remotes when exchanging heads to remote", async () => { + const first = await db1.add("a", { meta: { next: [] } }); + const second = await db1.add("b", { meta: { next: [] } }); + await db1.log.log.entryIndex.delete(second.entry.hash); + + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + ))!; + + let remoteFetchOptions: any[] = []; + const db1LogGet = db1.log.log.get.bind(db1.log.log); + + db1.log.log.get = async (hash, options) => { + if (hash === second.entry.hash) { + remoteFetchOptions.push(options?.remote); + return undefined; + } + return db1LogGet(hash, options); + }; + + await waitForResolved(async () => { + expect( + (await db2.iterator({ limit: -1 })).collect().map((x) => x.hash), + ).to.deep.equal([first.entry.hash]); + }); + await waitForResolved(() => + expect(remoteFetchOptions).to.have.length(1), + ); + expect(remoteFetchOptions[0]).to.be.undefined; }); }); + describe("two way", () => { + it("partially synced", async () => { + await db1.add("a", { meta: { next: [] } }); - it("it does not fetch missing entries from remotes when exchanging heads to remote", async () => { - const first = await db1.add("a", { meta: { next: [] } }); - const second = await db1.add("b", { meta: { next: [] } }); - await db1.log.log.entryIndex.delete(second.entry.hash); + db2 = (await EventStore.open>( + db1.address!, + session.peers[1], + ))!; - db2 = (await EventStore.open>( - db1.address!, - session.peers[1], - ))!; + await db2.add("b", { meta: { next: [] } }); - let remoteFetchOptions: any[] = []; - const db1LogGet = db1.log.log.get.bind(db1.log.log); + await waitForResolved(() => expect(db1.log.log.length).equal(2)); + await waitForResolved(() => expect(db2.log.log.length).equal(2)); - db1.log.log.get = async (hash, options) => { - if (hash === second.entry.hash) { - remoteFetchOptions.push(options?.remote); - return undefined; - } - return db1LogGet(hash, options); - }; + await db1.node.hangUp(db2.node.identity.publicKey); + await waitForResolved(async () => + expect((await db1.log.getReplicators()).size).equal(1), + ); + await waitForResolved(async () => + expect((await db2.log.getReplicators()).size).equal(1), + ); - await waitForResolved(async () => { - expect( - (await db2.iterator({ limit: -1 })).collect().map((x) => x.hash), - ).to.deep.equal([first.entry.hash]); + await db1.add("c", { meta: { next: [] } }); + await db2.add("d", { meta: { next: [] } }); + + await db1.node.dial(db2.node.getMultiaddrs()); + + await waitForResolved(() => expect(db1.log.log.length).equal(4)); + await waitForResolved(() => expect(db2.log.log.length).equal(4)); }); - await waitForResolved(() => expect(remoteFetchOptions).to.have.length(1)); - expect(remoteFetchOptions[0]).to.be.undefined; }); }); }); describe("redundancy", () => { let session: TestSession; - let db1: EventStore, db2: EventStore, db3: EventStore; + let db1: EventStore, + db2: EventStore, + db3: EventStore; let fetchEvents: number; let fetchHashes: Set; @@ -606,7 +655,7 @@ describe("redundancy", () => { }); it("only sends entries once, 2 peers dynamic", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); await db1.log.replicate(); let count = 100; for (let i = 0; i < count; i++) { @@ -647,7 +696,7 @@ describe("redundancy", () => { }); it("only sends entries once, 2 peers fixed", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); db1.log.replicate({ factor: 1 }); let count = 1000; for (let i = 0; i < count; i++) { @@ -655,7 +704,7 @@ describe("redundancy", () => { } const message1 = collectMessages(db1.log); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -678,7 +727,7 @@ describe("redundancy", () => { }); it("only sends entries once, 2 peers fixed, write after open", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 1 }, }, @@ -686,7 +735,7 @@ describe("redundancy", () => { let count = 1; const message1 = collectMessages(db1.log); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -720,7 +769,7 @@ describe("redundancy", () => { }); it("only sends entries once, 3 peers", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 1, @@ -729,7 +778,7 @@ describe("redundancy", () => { }); const message1 = collectMessages(db1.log); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -749,7 +798,7 @@ describe("redundancy", () => { } await waitForResolved(() => expect(db2.log.log.length).equal(count)); - db3 = await EventStore.open>( + db3 = await EventStore.open>( db1.address!, session.peers[2], { @@ -780,9 +829,9 @@ describe("redundancy", () => { }); it("no fetches needed when replicating live ", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], ))!; @@ -814,7 +863,7 @@ describe("redundancy", () => { expect(fetchEvents).equal(0); // becausel all entries were sent }); it("fetches only once after open", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); const entryCount = 15; @@ -830,7 +879,7 @@ describe("redundancy", () => { await mapSeries(adds, add); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], ))!; @@ -860,7 +909,7 @@ describe(`start/stop`, function () { it("replicate on connect", async () => { const entryCount = 1000; const entryArr: number[] = []; - const db1 = await session.peers[0].open(new EventStore(), { + const db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 1, @@ -876,7 +925,7 @@ describe(`start/stop`, function () { await mapSeries(entryArr, (i) => db1.add("hello" + i)); // Open the second database - const db2 = (await EventStore.open>( + const db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -900,7 +949,7 @@ describe(`start/stop`, function () { }); it("can restart replicate", async () => { - const db1 = await session.peers[0].open(new EventStore(), { + const db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 1, @@ -910,7 +959,7 @@ describe(`start/stop`, function () { await db1.add("hello"); - let db2 = (await EventStore.open>( + let db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -926,7 +975,7 @@ describe(`start/stop`, function () { await db2.close(); await db1.add("world"); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -943,7 +992,9 @@ describe(`start/stop`, function () { describe("canReplicate", () => { let session: TestSession; - let db1: EventStore, db2: EventStore, db3: EventStore; + let db1: EventStore, + db2: EventStore, + db3: EventStore; const init = async ( canReplicate: (publicKey: PublicSignKey) => Promise | boolean, @@ -951,7 +1002,7 @@ describe("canReplicate", () => { ) => { let min = 100; let max = undefined; - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicas: { min, @@ -961,7 +1012,7 @@ describe("canReplicate", () => { canReplicate, }, }); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -976,7 +1027,7 @@ describe("canReplicate", () => { }, ))!; - db3 = (await EventStore.open>( + db3 = (await EventStore.open>( db1.address!, session.peers[2], { @@ -1084,14 +1135,16 @@ describe("canReplicate", () => { describe("replication degree", () => { let session: TestSession; - let db1: EventStore, db2: EventStore, db3: EventStore; + let db1: EventStore, + db2: EventStore, + db3: EventStore; const init = async (props: { min: number; max?: number; beforeOther?: () => Promise | void; }) => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicas: props, replicate: false, @@ -1100,7 +1153,7 @@ describe("replication degree", () => { }); await props.beforeOther?.(); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -1116,7 +1169,7 @@ describe("replication degree", () => { }, ))!; - db3 = (await EventStore.open>( + db3 = (await EventStore.open>( db1.address!, session.peers[2], { @@ -1197,7 +1250,7 @@ describe("replication degree", () => { it("will not prune below replication degree", async () => { let replicas = 2; - const db1 = await session.peers[0].open(new EventStore(), { + const db1 = await session.peers[0].open(new EventStore(), { args: { replicate: false, replicas: { @@ -1206,7 +1259,7 @@ describe("replication degree", () => { }, }); - let db2 = (await EventStore.open>( + let db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -1286,7 +1339,7 @@ describe("replication degree", () => { }); it("will prune when join with partial coverage", async () => { - const db1 = await session.peers[0].open(new EventStore(), { + const db1 = await session.peers[0].open(new EventStore(), { args: { replicate: false, replicas: { @@ -1296,7 +1349,7 @@ describe("replication degree", () => { }); await db1.add("hello"); - let db2 = (await EventStore.open>( + let db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -1319,7 +1372,7 @@ describe("replication degree", () => { }); it("will prune when join with complete coverage", async () => { - const db1 = await session.peers[0].open(new EventStore(), { + const db1 = await session.peers[0].open(new EventStore(), { args: { replicate: false, replicas: { @@ -1329,7 +1382,7 @@ describe("replication degree", () => { }); await db1.add("hello"); - let db2 = (await EventStore.open>( + let db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -1352,7 +1405,7 @@ describe("replication degree", () => { }); it("will prune on insert after join 2 peers", async () => { - const db1 = await session.peers[0].open(new EventStore(), { + const db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0, @@ -1365,7 +1418,7 @@ describe("replication degree", () => { }, }); - let db2 = (await EventStore.open>( + let db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -1437,7 +1490,7 @@ describe("replication degree", () => { let minReplicas = 2; let maxReplicas = 2; - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicas: { min: minReplicas, @@ -1549,7 +1602,7 @@ describe("replication degree", () => { }, }); - const check = async (log: EventStore) => { + const check = async (log: EventStore) => { let replicated3Times = 0; for (const entry of await log.log.log.toArray()) { if (decodeReplicas(entry).getValue(db2.log) === 3) { @@ -1578,7 +1631,7 @@ describe("replication degree", () => { }); } - const check = async (log: EventStore) => { + const check = async (log: EventStore) => { let replicated3Times = 0; for (const entry of await log.log.log.toArray()) { if (decodeReplicas(entry).getValue(db2.log) === 3) { @@ -1611,7 +1664,7 @@ describe("replication degree", () => { // expect e1 to be replicated at db1 and/or 1 other peer (when you write you always store locally) // expect e2 to be replicated everywhere - const check = async (log: EventStore) => { + const check = async (log: EventStore) => { let replicated3Times = 0; let other = 0; for (const entry of await log.log.log.toArray()) { @@ -1629,7 +1682,7 @@ describe("replication degree", () => { }); it("will index replication underflow degree", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicas: { min: 4, @@ -1639,7 +1692,7 @@ describe("replication degree", () => { }, }); - db2 = await session.peers[1].open>(db1.address, { + db2 = await session.peers[1].open>(db1.address, { args: { replicas: { min: 4, @@ -1696,7 +1749,7 @@ describe("replication degree", () => { }); it("observer will not delete unless replicated", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicas: { min: 10, @@ -1704,7 +1757,7 @@ describe("replication degree", () => { replicate: false, }, }); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -1730,7 +1783,7 @@ describe("replication degree", () => { }); it("replicator will not delete unless replicated", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicas: { min: 10, @@ -1740,7 +1793,7 @@ describe("replication degree", () => { }, }, }); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -1771,7 +1824,7 @@ describe("replication degree", () => { // peer 1 observer // peer 2 observer - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicas: { min, @@ -1781,7 +1834,7 @@ describe("replication degree", () => { }, }); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -1887,7 +1940,7 @@ describe("replication degree", () => { // peer 1 observer // peer 2 observer - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicas: { min, @@ -1898,7 +1951,7 @@ describe("replication degree", () => { }); let respondToIHaveTimeout = 3000; - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -1938,7 +1991,7 @@ describe("replication degree", () => { }); it("does not get blocked by slow sends", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 1, @@ -1946,7 +1999,7 @@ describe("replication degree", () => { }, }); - db2 = await session.peers[1].open>(db1.address, { + db2 = await session.peers[1].open>(db1.address, { args: { replicate: { factor: 1, @@ -1984,7 +2037,7 @@ describe("replication degree", () => { let t0 = +new Date(); db1Delay = 0; - db3 = await session.peers[2].open>(db1.address, { + db3 = await session.peers[2].open>(db1.address, { args: { replicate: { factor: 1, @@ -1998,7 +2051,7 @@ describe("replication degree", () => { }); it("restarting node will receive entries", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 1, @@ -2006,7 +2059,7 @@ describe("replication degree", () => { }, }); - db2 = await session.peers[1].open>(db1.address, { + db2 = await session.peers[1].open>(db1.address, { args: { replicate: { factor: 1, @@ -2018,7 +2071,7 @@ describe("replication degree", () => { await db2.drop(); await session.peers[1].stop(); await session.peers[1].start(); - db2 = await session.peers[1].open>(db1.address, { + db2 = await session.peers[1].open>(db1.address, { args: { replicate: { factor: 1, @@ -2029,7 +2082,7 @@ describe("replication degree", () => { }); it("can handle many large messages", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 1, @@ -2042,7 +2095,7 @@ describe("replication degree", () => { for (let i = 0; i < count; i++) { await db1.add(toBase64(randomBytes(6e6)), { meta: { next: [] } }); } - db2 = await session.peers[1].open>(db1.address, { + db2 = await session.peers[1].open>(db1.address, { args: { replicate: { factor: 1, @@ -2055,7 +2108,7 @@ describe("replication degree", () => { describe("update", () => { it("shift", async () => { const u32Div2 = 2147483647; - const db1 = await session.peers[0].open(new EventStore(), { + const db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0, @@ -2068,7 +2121,7 @@ describe("replication degree", () => { }, }); - let db2 = (await EventStore.open>( + let db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -2110,7 +2163,7 @@ describe("replication degree", () => { }); it("to same range", async () => { - const db1 = await session.peers[0].open(new EventStore(), { + const db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0, @@ -2123,7 +2176,7 @@ describe("replication degree", () => { }, }); - let db2 = (await EventStore.open>( + let db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -2158,7 +2211,7 @@ describe("replication degree", () => { }); it("to smaller but already replicated", async () => { - const db1 = await session.peers[0].open(new EventStore(), { + const db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0, @@ -2171,7 +2224,7 @@ describe("replication degree", () => { }, }); - let db2 = (await EventStore.open>( + let db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -2260,7 +2313,7 @@ describe("replication degree", () => { }); it("to smaller will need transfer", async () => { - const db1 = await session.peers[0].open(new EventStore(), { + const db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0, @@ -2273,7 +2326,7 @@ describe("replication degree", () => { }, }); - let db2 = (await EventStore.open>( + let db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -2337,7 +2390,7 @@ describe("replication degree", () => { }); it("to smaller then to larger", async () => { - const db1 = await session.peers[0].open(new EventStore(), { + const db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0, @@ -2350,7 +2403,7 @@ describe("replication degree", () => { }, }); - let db2 = (await EventStore.open>( + let db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -2397,7 +2450,7 @@ describe("replication degree", () => { }); it("replace range with another node write before join", async () => { - const db1 = await session.peers[0].open(new EventStore(), { + const db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0, @@ -2415,7 +2468,7 @@ describe("replication degree", () => { await db1.add("hello" + i, { meta: { next: [] } }); } - let db2 = (await EventStore.open>( + let db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -2432,7 +2485,7 @@ describe("replication degree", () => { }, ))!; - let db3 = (await EventStore.open>( + let db3 = (await EventStore.open>( db1.address!, session.peers[2], { @@ -2492,7 +2545,7 @@ describe("replication degree", () => { }); it("replace range with another node write after join", async () => { - const db1 = await session.peers[0].open(new EventStore(), { + const db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0, @@ -2505,7 +2558,7 @@ describe("replication degree", () => { }, }); - let db2 = (await EventStore.open>( + let db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -2522,7 +2575,7 @@ describe("replication degree", () => { }, ))!; - let db3 = (await EventStore.open>( + let db3 = (await EventStore.open>( db1.address!, session.peers[2], { @@ -2591,7 +2644,7 @@ describe("replication degree", () => { }); it("distribute", async () => { const u32Div3 = Math.round(0xffffffff / 3); - const db1 = await session.peers[0].open(new EventStore(), { + const db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0, @@ -2604,7 +2657,7 @@ describe("replication degree", () => { }, }); - let db2 = (await EventStore.open>( + let db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -2621,7 +2674,7 @@ describe("replication degree", () => { }, ))!; - let db3 = (await EventStore.open>( + let db3 = (await EventStore.open>( db1.address!, session.peers[2], { @@ -2681,7 +2734,7 @@ describe("replication degree", () => { }); it("close", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 0.333, @@ -2690,7 +2743,7 @@ describe("replication degree", () => { }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -2702,7 +2755,7 @@ describe("replication degree", () => { }, }, ); - db3 = await EventStore.open>( + db3 = await EventStore.open>( db1.address!, session.peers[2], { @@ -2748,7 +2801,7 @@ describe("replication degree", () => { }); it("a smaller replicator join leave joins", async () => { - const db1 = await session.peers[0].open(new EventStore(), { + const db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 1, // this replicator will get all entries @@ -2764,7 +2817,7 @@ describe("replication degree", () => { await db1.add("hello" + i, { meta: { next: [] } }); } - let db2 = (await EventStore.open>( + let db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -2789,7 +2842,7 @@ describe("replication degree", () => { ); await db2.close(); - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { @@ -2842,7 +2895,7 @@ describe("replication degree", () => { describe("sync", () => { let session: TestSession; - let db1: EventStore, db2: EventStore; + let db1: EventStore, db2: EventStore; before(async () => { session = await TestSession.connected(2); @@ -2857,19 +2910,22 @@ describe("sync", () => { }); it("manually synced entries will not get pruned", async () => { - db1 = await session.peers[0].open>(new EventStore(), { - args: { - /* sync: () => true, */ - replicas: { - min: 1, - }, - replicate: { - factor: 1, + db1 = await session.peers[0].open>( + new EventStore(), + { + args: { + /* sync: () => true, */ + replicas: { + min: 1, + }, + replicate: { + factor: 1, + }, }, }, - })!; + )!; - db2 = (await EventStore.open>( + db2 = (await EventStore.open>( db1.address!, session.peers[1], { diff --git a/packages/programs/data/shared-log/test/sharding.spec.ts b/packages/programs/data/shared-log/test/sharding.spec.ts index a660f7857..0e19e1588 100644 --- a/packages/programs/data/shared-log/test/sharding.spec.ts +++ b/packages/programs/data/shared-log/test/sharding.spec.ts @@ -11,10 +11,10 @@ import { EventStore } from "./utils/stores/event-store.js"; describe(`sharding`, () => { let session: TestSession; - let db1: EventStore, - db2: EventStore, - db3: EventStore, - db4: EventStore; + let db1: EventStore, + db2: EventStore, + db3: EventStore, + db4: EventStore; before(async () => { session = await TestSession.connected(4, [ @@ -82,7 +82,7 @@ describe(`sharding`, () => { db3?.drop(), db4?.drop(), ]); - } catch (error) {} + } catch (error) { } db1 = undefined as any; db2 = undefined as any; db3 = undefined as any; @@ -96,7 +96,7 @@ describe(`sharding`, () => { const sampleSize = 200; // must be < 255 it("will not have any prunable after balance", async () => { - const store = new EventStore(); + const store = new EventStore(); db1 = await session.peers[0].open(store, { args: { @@ -108,7 +108,7 @@ describe(`sharding`, () => { }); const entryCount = 200; - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -151,7 +151,7 @@ describe(`sharding`, () => { }); it("2 peers", async () => { - const store = new EventStore(); + const store = new EventStore(); db1 = await session.peers[0].open(store, { args: { @@ -161,7 +161,7 @@ describe(`sharding`, () => { /* timeUntilRoleMaturity: 0 */ }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -190,7 +190,7 @@ describe(`sharding`, () => { }); it("2 peers write while joining", async () => { - const store = new EventStore(); + const store = new EventStore(); db1 = await session.peers[0].open(store, { args: { @@ -199,7 +199,7 @@ describe(`sharding`, () => { }, }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -232,7 +232,7 @@ describe(`sharding`, () => { }); it("3 peers", async () => { - const store = new EventStore(); + const store = new EventStore(); db1 = await session.peers[0].open(store); @@ -248,11 +248,11 @@ describe(`sharding`, () => { await Promise.all(promises); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], ); - db3 = await EventStore.open>( + db3 = await EventStore.open>( db1.address!, session.peers[2], ); @@ -271,7 +271,7 @@ describe(`sharding`, () => { }); it("3 peers prune all", async () => { - const store = new EventStore(); + const store = new EventStore(); db1 = await session.peers[0].open(store, { args: { @@ -292,7 +292,7 @@ describe(`sharding`, () => { await Promise.all(promises); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -305,7 +305,7 @@ describe(`sharding`, () => { ); await delay(3e3); - db3 = await EventStore.open>( + db3 = await EventStore.open>( db1.address!, session.peers[2], { @@ -323,10 +323,10 @@ describe(`sharding`, () => { }); it("write while joining peers", async () => { - const store = new EventStore(); + const store = new EventStore(); db1 = await session.peers[0].open(store); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], ); @@ -342,7 +342,7 @@ describe(`sharding`, () => { ); } - db3 = await EventStore.open>( + db3 = await EventStore.open>( db1.address!, session.peers[2], ); @@ -352,9 +352,9 @@ describe(`sharding`, () => { // TODO add tests for late joining and leaving peers it("distributes to joining peers", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], ); @@ -375,22 +375,99 @@ describe(`sharding`, () => { await waitFor(() => db1.log.log.length === entryCount); await waitFor(() => db2.log.log.length === entryCount); - db3 = await EventStore.open>( + db3 = await EventStore.open>( db1.address!, session.peers[2], ); - await checkBounded(entryCount, 0.5, 0.9, db1, db2, db3); + try { + await checkBounded(entryCount, 0.5, 0.9, db1, db2, db3); + } catch (error) { + const [a, b] = await Promise.all([db1, db2].map(x => x.log.getPrunable())) + console.log(a.length, b.length) + console.log(await Promise.all([db1, db2, db3].map(x => x.log.calculateMyTotalParticipation()))) + console.log(db1.log.log.length, db2.log.log.length, db3.log.log.length) + throw error; + } + console.log("???", db1.log.log.length, db2.log.log.length, db3.log.log.length) + + }); + + + it("fixeddistributes to joining peers", async () => { + db1 = await session.peers[0].open(new EventStore(), { + args: { + replicate: { + factor: 0.333, + offset: 0 + } + } + }); + + db2 = await EventStore.open>( + db1.address!, + session.peers[1], + { + args: { + replicate: { + factor: 0.333, + offset: 0.333 + } + } + } + ); + + await waitForResolved(async () => + expect(await db2.log.replicationIndex?.getSize()).equal(2), + ); + + const entryCount = sampleSize; + const promises: Promise[] = []; + for (let i = 0; i < entryCount; i++) { + promises.push( + db1.add(toBase64(new Uint8Array([i])), { + meta: { next: [] }, + }), + ); + } + await waitFor(() => db1.log.log.length === entryCount); + await waitFor(() => db2.log.log.length === entryCount); + + db3 = await EventStore.open>( + db1.address!, + session.peers[2], + { + args: { + replicate: { + factor: 0.333, + offset: 0.6666 + } + } + } + ); + + await delay(1e4) + try { + await checkBounded(entryCount, 0.5, 0.9, db1, db2, db3); + } catch (error) { + const [a, b] = await Promise.all([db1, db2].map(x => x.log.getPrunable())) + console.log(a.length, b.length) + console.log(await Promise.all([db1, db2, db3].map(x => x.log.calculateMyTotalParticipation()))) + console.log(db1.log.log.length, db2.log.log.length, db3.log.log.length) + throw error; + } + console.log("???", db1.log.log.length, db2.log.log.length, db3.log.log.length) + }); it("distributes to leaving peers", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], ); - db3 = await EventStore.open>( + db3 = await EventStore.open>( db1.address!, session.peers[2], ); @@ -428,13 +505,13 @@ describe(`sharding`, () => { }); it("handles peer joining and leaving multiple times", async () => { - db1 = await session.peers[0].open(new EventStore()); + db1 = await session.peers[0].open(new EventStore()); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], ); - db3 = await EventStore.open>( + db3 = await EventStore.open>( db1.address!, session.peers[2], ); @@ -493,7 +570,7 @@ describe(`sharding`, () => { it("drops when no longer replicating as observer", async () => { let COUNT = 10; - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { factor: 1, @@ -501,7 +578,7 @@ describe(`sharding`, () => { }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -519,7 +596,7 @@ describe(`sharding`, () => { await waitForResolved(() => expect(db2.log.log.length).equal(COUNT)); - db3 = await EventStore.open>( + db3 = await EventStore.open>( db1.address!, session.peers[2], { @@ -540,7 +617,7 @@ describe(`sharding`, () => { it("drops when no longer replicating with factor 0", async () => { let COUNT = 100; - const evtStore = new EventStore(); + const evtStore = new EventStore(); const db1p = await session.peers[0].open(evtStore, { args: { replicate: { @@ -566,7 +643,7 @@ describe(`sharding`, () => { await waitForResolved(() => expect(db2.log.log.length).equal(COUNT)); - db3 = await EventStore.open>( + db3 = await EventStore.open>( db1.address!, session.peers[2], { @@ -586,7 +663,7 @@ describe(`sharding`, () => { describe("objectives", () => { describe("cpu", () => { it("no cpu usage allowed", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: true, replicas: { @@ -596,7 +673,7 @@ describe(`sharding`, () => { }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -622,12 +699,12 @@ describe(`sharding`, () => { await delay(3e3); await waitForResolved(async () => - expect(await db2.log.getMyTotalParticipation()).equal(0), + expect(await db2.log.calculateMyTotalParticipation()).equal(0), ); // because the CPU error from fixed usage (0.5) is always greater than max (0) }); it("below limit", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: true, replicas: { @@ -637,7 +714,7 @@ describe(`sharding`, () => { }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -661,19 +738,19 @@ describe(`sharding`, () => { ); await waitForConverged(async () => { - const diff = await db1.log.getMyTotalParticipation(); + const diff = await db1.log.calculateMyTotalParticipation(); return Math.round(diff * 100); }); await waitForConverged(async () => { - const diff = await db2.log.getMyTotalParticipation(); + const diff = await db2.log.calculateMyTotalParticipation(); return Math.round(diff * 100); }); - expect(await db1.log.getMyTotalParticipation()).to.be.within( + expect(await db1.log.calculateMyTotalParticipation()).to.be.within( 0.45, 0.55, ); // because the CPU error from fixed usage (0.5) is always greater than max (0) - expect(await db2.log.getMyTotalParticipation()).to.be.within( + expect(await db2.log.calculateMyTotalParticipation()).to.be.within( 0.45, 0.55, ); // because the CPU error from fixed usage (0.5) is always greater than max (0) @@ -681,7 +758,7 @@ describe(`sharding`, () => { }); describe("memory", () => { it("inserting half limited", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: true, replicas: { @@ -692,7 +769,7 @@ describe(`sharding`, () => { }); const memoryLimit = 100 * 1e3; - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -721,8 +798,8 @@ describe(`sharding`, () => { await waitForConverged(async () => { const diff = Math.abs( - (await db2.log.getMyTotalParticipation()) - - (await db1.log.getMyTotalParticipation()), + (await db2.log.calculateMyTotalParticipation()) - + (await db1.log.calculateMyTotalParticipation()), ); return Math.round(diff * 50); }); @@ -739,7 +816,7 @@ describe(`sharding`, () => { }); it("joining half limited", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicas: { min: new AbsoluteReplicas(1), @@ -749,7 +826,7 @@ describe(`sharding`, () => { }); const memoryLimit = 100 * 1e3; - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -776,8 +853,8 @@ describe(`sharding`, () => { try { await waitForConverged(async () => { const diff = Math.abs( - (await db2.log.getMyTotalParticipation()) - - (await db1.log.getMyTotalParticipation()), + (await db2.log.calculateMyTotalParticipation()) - + (await db1.log.calculateMyTotalParticipation()), ); return Math.round(diff * 100); @@ -802,7 +879,7 @@ describe(`sharding`, () => { it("underflow limited", async () => { const memoryLimit = 100 * 1e3; - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { limits: { @@ -816,7 +893,7 @@ describe(`sharding`, () => { }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -842,11 +919,11 @@ describe(`sharding`, () => { await waitForResolved( async () => { - expect(await db1.log.getMyTotalParticipation()).to.be.within( + expect(await db1.log.calculateMyTotalParticipation()).to.be.within( 0.43, 0.57, ); - expect(await db2.log.getMyTotalParticipation()).to.be.within( + expect(await db2.log.calculateMyTotalParticipation()).to.be.within( 0.43, 0.57, ); @@ -864,7 +941,7 @@ describe(`sharding`, () => { it("overflow limited", async () => { const memoryLimit = 100 * 1e3; - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { limits: { @@ -878,7 +955,7 @@ describe(`sharding`, () => { }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -904,16 +981,16 @@ describe(`sharding`, () => { } await waitForConverged(async () => - Math.round((await db1.log.getMyTotalParticipation()) * 500), + Math.round((await db1.log.calculateMyTotalParticipation()) * 500), ); await waitForConverged(async () => - Math.round((await db2.log.getMyTotalParticipation()) * 500), + Math.round((await db2.log.calculateMyTotalParticipation()) * 500), ); - expect(await db1.log.getMyTotalParticipation()).to.be.within( + expect(await db1.log.calculateMyTotalParticipation()).to.be.within( 0.03, 0.1, ); - expect(await db1.log.getMyTotalParticipation()).to.be.within( + expect(await db1.log.calculateMyTotalParticipation()).to.be.within( 0.03, 0.1, ); @@ -922,7 +999,7 @@ describe(`sharding`, () => { it("evenly if limited when not constrained", async () => { const memoryLimit = 100 * 1e3; - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { limits: { @@ -936,7 +1013,7 @@ describe(`sharding`, () => { }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -962,11 +1039,11 @@ describe(`sharding`, () => { } await waitForResolved(async () => { - expect(await db1.log.getMyTotalParticipation()).to.be.within( + expect(await db1.log.calculateMyTotalParticipation()).to.be.within( 0.45, 0.55, ); - expect(await db2.log.getMyTotalParticipation()).to.be.within( + expect(await db2.log.calculateMyTotalParticipation()).to.be.within( 0.45, 0.55, ); @@ -976,7 +1053,7 @@ describe(`sharding`, () => { it("unequally limited", async () => { const memoryLimit = 100 * 1e3; - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { limits: { @@ -990,7 +1067,7 @@ describe(`sharding`, () => { }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -1047,7 +1124,7 @@ describe(`sharding`, () => { it("greatly limited", async () => { const memoryLimit = 100 * 1e3; - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { limits: { @@ -1061,7 +1138,7 @@ describe(`sharding`, () => { }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -1102,7 +1179,7 @@ describe(`sharding`, () => { }); it("even if unlimited", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: true, replicas: { @@ -1112,7 +1189,7 @@ describe(`sharding`, () => { }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -1134,11 +1211,11 @@ describe(`sharding`, () => { } await waitForResolved(async () => { - expect(await db1.log.getMyTotalParticipation()).to.be.within( + expect(await db1.log.calculateMyTotalParticipation()).to.be.within( 0.45, 0.55, ); - expect(await db2.log.getMyTotalParticipation()).to.be.within( + expect(await db2.log.calculateMyTotalParticipation()).to.be.within( 0.45, 0.55, ); @@ -1149,13 +1226,13 @@ describe(`sharding`, () => { describe("mixed", () => { it("1 limited, 2 factor", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: true, }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { @@ -1167,7 +1244,7 @@ describe(`sharding`, () => { }, ); - db3 = await EventStore.open>( + db3 = await EventStore.open>( db1.address!, session.peers[2], { @@ -1180,14 +1257,14 @@ describe(`sharding`, () => { ); await waitForResolved(async () => - expect(await db1.log.getMyTotalParticipation()).equal(0), + expect(await db1.log.calculateMyTotalParticipation()).equal(0), ); }); }); describe("fixed", () => { it("can weight by factor", async () => { - db1 = await session.peers[0].open(new EventStore(), { + db1 = await session.peers[0].open(new EventStore(), { args: { replicate: { offset: 0, factor: 0.05 }, replicas: { @@ -1197,7 +1274,7 @@ describe(`sharding`, () => { }, }); - db2 = await EventStore.open>( + db2 = await EventStore.open>( db1.address!, session.peers[1], { diff --git a/packages/programs/data/shared-log/test/sync.spec.ts b/packages/programs/data/shared-log/test/sync.spec.ts new file mode 100644 index 000000000..2454184c2 --- /dev/null +++ b/packages/programs/data/shared-log/test/sync.spec.ts @@ -0,0 +1,283 @@ +import { + Ed25519Keypair, + Ed25519PublicKey, + PublicSignKey, + randomBytes, +} from "@peerbit/crypto"; +import type { Index } from "@peerbit/indexer-interface"; +import { + SQLiteIndices, + create as createIndices, +} from "@peerbit/indexer-sqlite3"; +import { LamportClock, Meta } from "@peerbit/log"; +import { expect } from "chai"; +import type { ReplicationRangeIndexable } from "../src"; +import { type NumberFromType, denormalizer } from "../src/integers"; +import { + type EntryReplicated, + EntryReplicatedU32, + EntryReplicatedU64, + ReplicationRangeIndexableU32, + ReplicationRangeIndexableU64, +} from "../src/ranges"; +import { RangeToEncoders, getMissingValuesInRemote } from "../src/sync"; + +const resolveClasses = (resolution: "u32" | "u64") => { + if (resolution === "u32") { + return { + rangeClass: ReplicationRangeIndexableU32, + entryClass: EntryReplicatedU32, + }; + } + if (resolution === "u64") { + return { + rangeClass: ReplicationRangeIndexableU64, + entryClass: EntryReplicatedU64, + }; + } + + throw new Error("Invalid resolution"); +}; +describe("sync", () => { + let indicesArr: SQLiteIndices[]; + + let createRangeEncoder = async ( + resolution: R, + publicKey: PublicSignKey, + rects: { publicKey: PublicSignKey; length: number; offset: number }[], + entries: number[], + ) => { + const { indices, entry, ranges } = await createFromValues( + resolution, + rects, + entries, + ); + const rangeEncoders = new RangeToEncoders( + publicKey, + indices.rangeIndex, + indices.entryIndex, + ); + await rangeEncoders.build(); + return { indices, rangeEncoders, entry, ranges }; + }; + let createFromValues = async ( + resolution: R, + rects: { publicKey: PublicSignKey; length: number; offset: number }[], + entries: number[], + ) => { + const { rangeClass, entryClass } = resolveClasses(resolution); + const denormalizerFN = denormalizer(resolution); + let ranges: ReplicationRangeIndexable[] = rects.map( + (x) => + // @ts-ignore + new rangeClass({ + normalized: true, + publicKey: x.publicKey, + length: x.length, + offset: x.offset, + timestamp: 0n, + }) as unknown as ReplicationRangeIndexable, + ) as ReplicationRangeIndexable[]; + let entry: EntryReplicated[] = entries.map( + (x) => + // @ts-ignore + new entryClass({ + // @ts-ignore + coordinate: denormalizerFN(x) as NumberFromType, + assignedToRangeBoundary: false, + hash: String("a"), + meta: new Meta({ + clock: new LamportClock({ id: randomBytes(32) }), + gid: String(x), + next: [], + type: 0, + data: undefined, + }), + }) as EntryReplicated, + ); + + return { + indices: await create(ranges, entry, resolution), + ranges, + entry, + }; + }; + + let create = async ( + rects: ReplicationRangeIndexable[], + entries: EntryReplicated[], + resolution: R, + ) => { + let indices = await createIndices(); + await indices.start(); + + const rangeClass = + resolution === "u32" + ? ReplicationRangeIndexableU32 + : ReplicationRangeIndexableU64; + const indexRects = await indices.init({ schema: rangeClass as any }); + for (const rect of rects) { + await indexRects.put(rect); + } + + const entryClass = + resolution === "u32" ? EntryReplicatedU32 : EntryReplicatedU64; + const indexEntries = await indices.init({ schema: entryClass as any }); + for (const entry of entries) { + await indexEntries.put(entry); + } + + indicesArr.push(indices); + return { + rangeIndex: indexRects, + entryIndex: indexEntries, + } as { + rangeIndex: Index>; + entryIndex: Index>; + }; + }; + let a: Ed25519PublicKey; + let b: Ed25519PublicKey; + + beforeEach(async () => { + indicesArr = []; + a = (await Ed25519Keypair.create()).publicKey; + b = (await Ed25519Keypair.create()).publicKey; + }); + + afterEach(async () => { + await Promise.all(indicesArr.map((x) => x.stop())); + }); + + it("builds", async () => { + const { indices } = await createFromValues( + "u64", + [{ publicKey: a, offset: 0, length: 1 }], + [0.5], + ); + const rangeEncoders = new RangeToEncoders( + a, + indices.rangeIndex, + indices.entryIndex, + ); + await rangeEncoders.build(); + expect(rangeEncoders.encoders.size).to.equal(1); + }); + + it("generates determenistically", async () => { + const { indices, ranges } = await createFromValues( + "u64", + [{ publicKey: a, offset: 0, length: 1 }], + [0.5], + ); + const rangeEncoders = new RangeToEncoders( + a, + indices.rangeIndex, + indices.entryIndex, + ); + await rangeEncoders.build(); + expect(rangeEncoders.encoders.size).to.equal(1); + + const generator = rangeEncoders.createSymbolGenerator(ranges[0]); + + let symbol1 = generator.next(); + expect(typeof symbol1.count).to.equal("bigint"); + expect(typeof symbol1.hash).to.equal("bigint"); + expect(typeof symbol1.symbol).to.equal("bigint"); + expect(symbol1.hash).to.not.equal(0n); + + const generator2 = rangeEncoders.createSymbolGenerator(ranges[0]); + + let symbol2 = generator2.next(); + expect(symbol1).to.deep.equal(symbol2); + }); + + describe("diff", () => { + it("no difference", async () => { + const local = await createRangeEncoder( + "u64", + a, + [{ publicKey: a, offset: 0, length: 1 }], + [0.5], + ); + const remote = await createRangeEncoder( + "u64", + b, + [{ publicKey: b, offset: 0, length: 1 }], + [0.5], + ); + + const receiver = await getMissingValuesInRemote({ + myEncoder: local.rangeEncoders, + from: b, + remoteRange: remote.ranges[0].toReplicationRange(), + }); + + const bobGenerator = remote.rangeEncoders.createSymbolGenerator( + remote.ranges[0], + ); + const next = bobGenerator.next(); + const out = receiver.process(next); + expect(out.done).to.equal(true); + expect(out.missing).to.have.length(0); + }); + + it("remote is missing entry", async () => { + const local = await createRangeEncoder( + "u64", + a, + [{ publicKey: a, offset: 0, length: 1 }], + [0.5], + ); + const remote = await createRangeEncoder( + "u64", + b, + [{ publicKey: b, offset: 0, length: 1 }], + [], + ); + + const receiver = await getMissingValuesInRemote({ + myEncoder: local.rangeEncoders, + from: b, + remoteRange: remote.ranges[0].toReplicationRange(), + }); + + const bobGenerator = remote.rangeEncoders.createSymbolGenerator( + remote.ranges[0], + ); + const next = bobGenerator.next(); + const out = receiver.process(next); + expect(out.done).to.equal(true); + expect(out.missing).to.deep.eq([BigInt(local.entry[0].coordinate)]); + }); + + it("local is missing entry", async () => { + const local = await createRangeEncoder( + "u64", + a, + [{ publicKey: a, offset: 0, length: 1 }], + [], + ); + const remote = await createRangeEncoder( + "u64", + b, + [{ publicKey: b, offset: 0, length: 1 }], + [0.5], + ); + + const receiver = await getMissingValuesInRemote({ + myEncoder: local.rangeEncoders, + from: b, + remoteRange: remote.ranges[0].toReplicationRange(), + }); + + const bobGenerator = remote.rangeEncoders.createSymbolGenerator( + remote.ranges[0], + ); + const next = bobGenerator.next(); + const out = receiver.process(next); + expect(out.done).to.equal(true); + expect(out.missing).to.deep.eq([]); + }); + }); +}); diff --git a/packages/programs/data/shared-log/test/utils.ts b/packages/programs/data/shared-log/test/utils.ts index 87bb1f042..5d9f85044 100644 --- a/packages/programs/data/shared-log/test/utils.ts +++ b/packages/programs/data/shared-log/test/utils.ts @@ -9,7 +9,7 @@ import { import { type SharedLog, maxReplicas } from "../src/index.js"; import type { TransportMessage } from "../src/message"; -export const collectMessages = (log: SharedLog) => { +export const collectMessages = (log: SharedLog) => { const messages: [TransportMessage, PublicSignKey][] = []; // TODO types @@ -21,7 +21,7 @@ export const collectMessages = (log: SharedLog) => { return messages; }; -export const collectMessagesFn = (log: SharedLog) => { +export const collectMessagesFn = (log: SharedLog) => { const messages: [TransportMessage, PublicSignKey][] = []; const onMessageOrg = log._onMessage.bind(log); const fn = async (msg: any, ctx: any) => { @@ -32,7 +32,7 @@ export const collectMessagesFn = (log: SharedLog) => { }; export const slowDownSend = ( - log: SharedLog, + log: SharedLog, type: Constructor, tms: number, abortSignal?: AbortSignal, @@ -96,7 +96,7 @@ export const waitForConverged = async ( } }; export const getUnionSize = async ( - dbs: { log: SharedLog }[], + dbs: { log: SharedLog }[], expectedUnionSize: number, ) => { const union = new Set(); @@ -111,7 +111,7 @@ export const checkBounded = async ( entryCount: number, lower: number, higher: number, - ...dbs: { log: SharedLog }[] + ...dbs: { log: SharedLog }[] ) => { for (const [_i, db] of dbs.entries()) { try { @@ -131,7 +131,7 @@ export const checkBounded = async ( } } - const checkConverged = async (db: { log: SharedLog }) => { + const checkConverged = async (db: { log: SharedLog }) => { const a = db.log.log.length; await delay(100); // arb delay return a === db.log.log.length; @@ -161,7 +161,7 @@ export const checkBounded = async ( }; export const checkReplicas = ( - dbs: { log: SharedLog }[], + dbs: { log: SharedLog }[], minReplicas: number, entryCount: number, ) => { diff --git a/packages/programs/data/shared-log/test/utils/access.ts b/packages/programs/data/shared-log/test/utils/access.ts index 887c26e39..90de14878 100644 --- a/packages/programs/data/shared-log/test/utils/access.ts +++ b/packages/programs/data/shared-log/test/utils/access.ts @@ -5,9 +5,9 @@ import { EventStore } from "./stores"; @variant("test_simple") export class SimpleStoreContract extends Program { @field({ type: EventStore }) - store!: EventStore; + store!: EventStore; - constructor(properties?: { store: EventStore }) { + constructor(properties?: { store: EventStore }) { super(); if (properties) { this.store = properties.store; diff --git a/packages/programs/data/shared-log/test/utils/stores/event-store.ts b/packages/programs/data/shared-log/test/utils/stores/event-store.ts index 351220ff3..b553a7fa0 100644 --- a/packages/programs/data/shared-log/test/utils/stores/event-store.ts +++ b/packages/programs/data/shared-log/test/utils/stores/event-store.ts @@ -20,7 +20,6 @@ import { } from "../../../src/index.js"; import type { TransportMessage } from "../../../src/message.js"; import type { EntryReplicated } from "../../../src/ranges.js"; -import type { ReplicationDomainHash } from "../../../src/replication-domain-hash.js"; import type { ReplicationDomain } from "../../../src/replication-domain.js"; import { JSON_ENCODING } from "./encoding.js"; @@ -32,8 +31,8 @@ export interface Operation { } export class EventIndex { - _log: SharedLog>; - constructor(log: SharedLog>) { + _log: SharedLog, any>; + constructor(log: SharedLog, any>) { this._log = log; } @@ -42,7 +41,13 @@ export class EventIndex { } } -export type Args>> = { +export type Args< + T, + D extends ReplicationDomain, R>, + R extends "u32" | "u64" = D extends ReplicationDomain + ? I + : "u32", +> = { onChange?: (change: Change>) => void; replicate?: ReplicationOptions; trim?: TrimOptions; @@ -52,7 +57,7 @@ export type Args>> = { timeUntilRoleMaturity?: number; waitForReplicatorTimeout?: number; sync?: ( - entry: Entry> | ShallowEntry | EntryReplicated, + entry: Entry> | ShallowEntry | EntryReplicated, ) => boolean; canAppend?: CanAppend>; canReplicate?: (publicKey: PublicSignKey) => Promise | boolean; @@ -63,10 +68,13 @@ export type Args>> = { @variant("event_store") export class EventStore< T, - D extends ReplicationDomain> = ReplicationDomainHash, -> extends Program> { + D extends ReplicationDomain, R>, + R extends "u32" | "u64" = D extends ReplicationDomain + ? I + : "u32", +> extends Program> { @field({ type: SharedLog }) - log: SharedLog, D>; + log: SharedLog, D, R>; @field({ type: Uint8Array }) id: Uint8Array; @@ -84,7 +92,7 @@ export class EventStore< this._canAppend = canAppend; } - async open(properties?: Args) { + async open(properties?: Args) { this._index = new EventIndex(this.log); if (properties?.onMessage) { diff --git a/packages/programs/program/src/client.ts b/packages/programs/program/src/client.ts index befd782d4..c7e8f56c7 100644 --- a/packages/programs/program/src/client.ts +++ b/packages/programs/program/src/client.ts @@ -1,8 +1,12 @@ -import type { PeerId as Libp2pPeerId } from "@libp2p/interface"; +import type { PeerId as Libp2pPeerId, PeerId } from "@libp2p/interface"; import type { Multiaddr } from "@multiformats/multiaddr"; import type { AnyStore } from "@peerbit/any-store-interface"; import { type Blocks } from "@peerbit/blocks-interface"; -import { type Ed25519PublicKey, type Identity } from "@peerbit/crypto"; +import { + type Ed25519PublicKey, + type Identity, + PublicSignKey, +} from "@peerbit/crypto"; import type { Indices } from "@peerbit/indexer-interface"; import { type Keychain } from "@peerbit/keychain"; import { type PubSub } from "@peerbit/pubsub-interface"; @@ -19,6 +23,7 @@ export interface Client>> { identity: Identity; getMultiaddrs: () => Multiaddr[]; dial(address: string | Multiaddr | Multiaddr[]): Promise; + hangUp(address: PeerId | PublicSignKey | string | Multiaddr): Promise; services: { pubsub: PubSub; blocks: Blocks; diff --git a/packages/programs/program/src/handler.ts b/packages/programs/program/src/handler.ts index ce06d2120..40e03888b 100644 --- a/packages/programs/program/src/handler.ts +++ b/packages/programs/program/src/handler.ts @@ -60,6 +60,10 @@ export type ProgramInitializationOptions> = { EventOptions; export const addParent = (child: Manageable, parent?: Manageable) => { + if (child.parents && child.parents.includes(parent) && parent == null) { + return; // prevent root parents to exist multiple times. This will allow use to close a program onces even if it is reused multiple times + } + (child.parents || (child.parents = [])).push(parent); if (parent) { (parent.children || (parent.children = [])).push(child); diff --git a/packages/programs/program/test/handler.spec.ts b/packages/programs/program/test/handler.spec.ts index 6a29eb82b..732d5b852 100644 --- a/packages/programs/program/test/handler.spec.ts +++ b/packages/programs/program/test/handler.spec.ts @@ -19,7 +19,7 @@ describe(`shared`, () => { it("open same store twice will share instance", async () => { const db1 = await client.open(new TestProgram()); - await expect(await client.open(db1)).equal(db1); + expect(await client.open(db1)).equal(db1); }); it("can open different dbs concurrently", async () => { @@ -126,6 +126,30 @@ describe(`shared`, () => { expect(p2.nested.openInvoked).to.not.be.true; }); + it("reuse clone multiple times and close", async () => { + const p1 = new TestProgram(); + const db1Promise = client.open(p1); + await db1Promise; + const p2 = await client.open(p1.clone(), { existing: "reuse" }); + const p3 = await client.open(p1.clone(), { existing: "reuse" }); + expect(p2 === p1).to.be.true; + expect(p3 === p1).to.be.true; + await p2.close(); + expect(p1.closed).to.be.true; + }); + + it("reuse multiple times and close", async () => { + const p1 = new TestProgram(); + const db1Promise = client.open(p1); + await db1Promise; + const p2 = await client.open(p1, { existing: "reuse" }); + const p3 = await client.open(p1, { existing: "reuse" }); + expect(p2 === p1).to.be.true; + expect(p3 === p1).to.be.true; + await p2.close(); + expect(p1.closed).to.be.true; + }); + it("rejects", async () => { const someParent = new TestProgram(); await expect(client.open(someParent, { parent: someParent })).rejectedWith( diff --git a/packages/programs/program/test/utils.ts b/packages/programs/program/test/utils.ts index 03c2da938..98155de5a 100644 --- a/packages/programs/program/test/utils.ts +++ b/packages/programs/program/test/utils.ts @@ -53,6 +53,7 @@ export const createPeer = async ( identity: keypair, getMultiaddrs: () => [], dial: () => Promise.resolve(false), + hangUp: () => Promise.resolve(), services: { blocks: { get: (c) => blocks.get(c), diff --git a/packages/transport/libp2p-test-utils/src/session.ts b/packages/transport/libp2p-test-utils/src/session.ts index 63e3edd34..7cde71ade 100644 --- a/packages/transport/libp2p-test-utils/src/session.ts +++ b/packages/transport/libp2p-test-utils/src/session.ts @@ -88,11 +88,22 @@ export class TestSession { const result = async () => { const definedOptions: Libp2pOptions | undefined = (options as any)?.[i] || options; + + const services: any = { + identify: identify(), + ...definedOptions?.services, + }; + if (definedOptions?.services?.relay !== null) { + services.relay = relay(); + } else { + delete services.relay; + } + const node = await createLibp2p({ addresses: { listen: listen(), }, - connectionManager: definedOptions?.connectionManager ?? {}, + connectionManager: definedOptions?.connectionManager, privateKey: definedOptions?.privateKey, datastore: definedOptions?.datastore, transports: definedOptions?.transports ?? transports(), @@ -100,11 +111,7 @@ export class TestSession { enabled: false, }, - services: { - relay: relay(), - identify: identify(), - ...definedOptions?.services, - } as any, + services, connectionEncrypters: [noise()], streamMuxers: definedOptions?.streamMuxers || [yamux()], start: definedOptions?.start, diff --git a/packages/transport/stream/test/stream.spec.ts b/packages/transport/stream/test/stream.spec.ts index 46cb6632d..ae40b525a 100644 --- a/packages/transport/stream/test/stream.spec.ts +++ b/packages/transport/stream/test/stream.spec.ts @@ -3239,6 +3239,25 @@ describe("start/stop", () => { await session.peers[0].stop(); await session.peers[0].start(); }); + + it("streams are pruned on disconnect", async () => { + // https://github.com/libp2p/js-libp2p/issues/2794 + session = await disconnected(2, { + services: { + relay: null, + directstream: (c: any) => new TestDirectStream(c), + }, + } as any); + await session.connect([[session.peers[0], session.peers[1]]]); + await waitForResolved(() => + expect(session.peers[0].services.directstream.peers.size).to.equal(1), + ); + + await session.peers[0].hangUp(session.peers[1].peerId); + await waitForResolved(() => + expect(session.peers[0].services.directstream.peers.size).to.equal(0), + ); + }); }); describe("multistream", () => { diff --git a/packages/utils/indexer/interface/src/id.ts b/packages/utils/indexer/interface/src/id.ts index cb1cf0d7a..4ef87595d 100644 --- a/packages/utils/indexer/interface/src/id.ts +++ b/packages/utils/indexer/interface/src/id.ts @@ -52,8 +52,8 @@ export class BigUnsignedIntegerValue extends IntegerValue { constructor(number: bigint) { super(); - if (number > 18446744073709551615n || number < 0) { - throw new Error("Number is not u32"); + if (number > 18446744073709551615n || number < 0n) { + throw new Error("Number is not u64"); } this.number = number; } @@ -129,10 +129,26 @@ export class IntegerKey extends IdKey { } } +@variant(3) +export class LargeIntegerKey extends IdKey { + @field({ type: "u64" }) // max value is 2^63 - 1 (9007199254740991) + key: bigint; + + constructor(key: bigint) { + super(); + this.key = key; + } + + get primitive() { + return this.key; + } +} + export type Ideable = string | number | bigint | Uint8Array; const idKeyTypes = new Set(["string", "number", "bigint"]); +const u64Max = 18446744073709551615n; export const toId = (obj: Ideable): IdKey => { if (typeof obj === "string") { return new StringKey(obj); @@ -141,11 +157,14 @@ export const toId = (obj: Ideable): IdKey => { return new IntegerKey(obj); } if (typeof obj === "bigint") { - if (obj <= Number.MAX_SAFE_INTEGER && obj >= 0) { - return new IntegerKey(Number(obj)); + if (obj <= u64Max && obj >= 0n) { + return new LargeIntegerKey(obj); } throw new Error( - "BigInt is not less than 2^53. Max value is 9007199254740991", + "BigInt is not less than 2^64 - 1. Max value is " + + (2 ** 64 - 1) + + ". Provided value: " + + obj, ); } if (obj instanceof Uint8Array) { diff --git a/packages/utils/indexer/simple/test/index.spec.ts b/packages/utils/indexer/simple/test/index.spec.ts index 71727d13b..77e18ce9f 100644 --- a/packages/utils/indexer/simple/test/index.spec.ts +++ b/packages/utils/indexer/simple/test/index.spec.ts @@ -2,5 +2,8 @@ import { tests } from "@peerbit/indexer-tests"; import { create } from "../src"; describe("all", () => { - tests(create, "transient", false); + tests(create, "transient", { + shapingSupported: false, + u64SumSupported: true, + }); }); diff --git a/packages/utils/indexer/sqlite3/src/engine.ts b/packages/utils/indexer/sqlite3/src/engine.ts index ea4a76939..7bfde975b 100644 --- a/packages/utils/indexer/sqlite3/src/engine.ts +++ b/packages/utils/indexer/sqlite3/src/engine.ts @@ -13,9 +13,11 @@ import { buildJoin, convertCountRequestToQuery, convertDeleteRequestToQuery, + convertFromSQLType, convertSearchRequestToQuery, /* getTableName, */ convertSumRequestToQuery, + convertToSQLType, escapeColumnName, generateSelectQuery, getInlineTableFieldName, @@ -253,7 +255,11 @@ export class SQLLiteIndex> ); const sql = `${generateSelectQuery(table, selects)} ${buildJoin(joinMap, true)} where ${this.primaryKeyString} = ? `; const stmt = await this.properties.db.prepare(sql, sql); - const rows = await stmt.get([id.key]); + const rows = await stmt.get([ + table.primaryField?.from?.type + ? convertToSQLType(id.key, table.primaryField.from.type) + : id.key, + ]); if (!rows) { continue; } @@ -389,9 +395,12 @@ export class SQLLiteIndex> return { value, id: types.toId( - row[ - getTablePrefixedField(selectedTable, this.primaryKeyString) - ], + convertFromSQLType( + row[ + getTablePrefixedField(selectedTable, this.primaryKeyString) + ], + selectedTable.primaryField!.from!.type, + ), ), }; }), @@ -548,10 +557,12 @@ export class SQLLiteIndex> const stmt = await this.properties.db.prepare(sql, sql); const result = await stmt.get(bindable); if (result != null) { + const value = result.sum as number; + if (ret == null) { - (ret as any) = result.sum as number; + ret = value; } else { - (ret as any) += result.sum as number; + ret += value; } once = true; } diff --git a/packages/utils/indexer/sqlite3/src/schema.ts b/packages/utils/indexer/sqlite3/src/schema.ts index c72ede9a4..59497889d 100644 --- a/packages/utils/indexer/sqlite3/src/schema.ts +++ b/packages/utils/indexer/sqlite3/src/schema.ts @@ -54,17 +54,31 @@ export type BindableValue = | ArrayBuffer | null; +export const u64ToI64 = (u64: bigint | number) => { + try { + return (typeof u64 === "number" ? BigInt(u64) : u64) - 9223372036854775808n; + } catch (error) { + throw error; + } +}; +export const i64ToU64 = (i64: number | bigint) => + (typeof i64 === "number" ? BigInt(i64) : i64) + 9223372036854775808n; + export const convertToSQLType = ( value: boolean | bigint | string | number | Uint8Array, type?: FieldType, ): BindableValue => { // add bigint when https://github.com/TryGhost/node-sqlite3/pull/1501 fixed - if (type === "bool") { - if (value != null) { + if (value != null) { + if (type === "bool") { return value ? 1 : 0; } - return null; + if (type === "u64") { + // shift to fit in i64 + + return u64ToI64(value as number | bigint); + } } return value as BindableValue; }; @@ -101,9 +115,15 @@ export const convertFromSQLType = ( : nullAsUndefined(value); } if (type === "u64") { - return typeof value === "number" || typeof value === "string" - ? BigInt(value) - : nullAsUndefined(value); + if (typeof value === "number" || typeof value === "bigint") { + return i64ToU64(value as number | bigint); // TODO is not always value type bigint? + } + if (value == null) { + return nullAsUndefined(value); + } + throw new Error( + `Unexpected value type for value ${value} expected number or bigint for u64 field`, + ); } return nullAsUndefined(value); }; @@ -145,7 +165,8 @@ export interface Table { name: string; ctor: Constructor; primary: string | false; - primaryIndex: number; + primaryIndex: number; // can be -1 for nested tables TODO make it more clear + primaryField?: SQLField; // can be undefined for nested tables TODO make it required path: string[]; parentPath: string[] | undefined; // field path of the parent where this table originates from fields: SQLField[]; @@ -195,6 +216,7 @@ export const getSQLTable = ( ctor, parentPath: path, path: newPath, + primaryField: fields.find((x) => x.isPrimary)!, primary, primaryIndex: fields.findIndex((x) => x.isPrimary), children: dependencies, @@ -1254,8 +1276,16 @@ export const convertSumRequestToQuery = ( tables, table, ); + + const inlineName = getInlineTableFieldName(request.key); + const field = table.fields.find((x) => x.name === inlineName); + if (unwrapNestedType(field!.from!.type) === "u64") { + throw new Error("Summing is not supported for u64 fields"); + } + const column = `${table.name}.${getInlineTableFieldName(request.key)}`; + return { - sql: `SELECT SUM(${table.name}.${getInlineTableFieldName(request.key)}) as sum FROM ${table.name} ${query}`, + sql: `SELECT SUM(${column}) as sum FROM ${table.name} ${query}`, bindable, }; }; @@ -1793,7 +1823,13 @@ const convertStateFieldQuery = ( } else { throw new Error(`Unsupported compare type: ${query.compare}`); } - bindable.push(query.value.value); + + if (unwrapNestedType(tableField.from!.type) === "u64") { + // shift left because that is how we insert the value + bindable.push(u64ToI64(query.value.value)); + } else { + bindable.push(query.value.value); + } } } else if (query instanceof types.IsNull) { where = `${keyWithTable} IS NULL`; diff --git a/packages/utils/indexer/sqlite3/test/index.spec.ts b/packages/utils/indexer/sqlite3/test/index.spec.ts index b28cbbc20..e35fbce75 100644 --- a/packages/utils/indexer/sqlite3/test/index.spec.ts +++ b/packages/utils/indexer/sqlite3/test/index.spec.ts @@ -2,6 +2,9 @@ import { tests } from "@peerbit/indexer-tests"; import { create } from "../src/index.js"; describe("all", () => { - tests(create, "persist", true); - tests(create, "transient", true); + tests(create, "persist", { shapingSupported: true, u64SumSupported: false }); + tests(create, "transient", { + shapingSupported: true, + u64SumSupported: false, + }); }); diff --git a/packages/utils/indexer/sqlite3/test/statement.spec.ts b/packages/utils/indexer/sqlite3/test/statement.spec.ts index e0c7c8a27..168c8d685 100644 --- a/packages/utils/indexer/sqlite3/test/statement.spec.ts +++ b/packages/utils/indexer/sqlite3/test/statement.spec.ts @@ -1,34 +1,14 @@ import { field } from "@dao-xyz/borsh"; import { - type Index, - type IndexEngineInitProperties, - type Indices, StringMatch, StringMatchMethod, - getIdProperty, id, toId, } from "@peerbit/indexer-interface"; import { expect } from "chai"; import { SQLLiteIndex } from "../src/engine.js"; import { create } from "../src/index.js"; - -const setup = async >( - properties: Partial> & { schema: any }, - createIndicies: (directory?: string) => Indices | Promise, -): Promise<{ indices: Indices; store: Index; directory?: string }> => { - const indices = await createIndicies(); - await indices.start(); - const indexProps: IndexEngineInitProperties = { - ...{ - indexBy: getIdProperty(properties.schema) || ["id"], - iterator: { batch: { maxSize: 5e6, sizeProperty: ["__size"] } }, - }, - ...properties, - }; - const store = await indices.init(indexProps); - return { indices, store }; -}; +import { setup } from "./utils.js"; describe("statement", () => { let index: Awaited>>; diff --git a/packages/utils/indexer/sqlite3/test/table.spec.ts b/packages/utils/indexer/sqlite3/test/table.spec.ts index 3512eb6ae..a799609c7 100644 --- a/packages/utils/indexer/sqlite3/test/table.spec.ts +++ b/packages/utils/indexer/sqlite3/test/table.spec.ts @@ -1,31 +1,9 @@ import { field } from "@dao-xyz/borsh"; -import { - type Index, - type IndexEngineInitProperties, - type Indices, - getIdProperty, - id, -} from "@peerbit/indexer-interface"; +import { id } from "@peerbit/indexer-interface"; import { expect } from "chai"; import { SQLLiteIndex } from "../src/engine.js"; import { create } from "../src/index.js"; - -const setup = async >( - properties: Partial> & { schema: any }, - createIndicies: (directory?: string) => Indices | Promise, -): Promise<{ indices: Indices; store: Index; directory?: string }> => { - const indices = await createIndicies(); - await indices.start(); - const indexProps: IndexEngineInitProperties = { - ...{ - indexBy: getIdProperty(properties.schema) || ["id"], - iterator: { batch: { maxSize: 5e6, sizeProperty: ["__size"] } }, - }, - ...properties, - }; - const store = await indices.init(indexProps); - return { indices, store }; -}; +import { setup } from "./utils.js"; describe("table", () => { let index: Awaited>>; diff --git a/packages/utils/indexer/sqlite3/test/u64.spec.ts b/packages/utils/indexer/sqlite3/test/u64.spec.ts new file mode 100644 index 000000000..8a329c3c2 --- /dev/null +++ b/packages/utils/indexer/sqlite3/test/u64.spec.ts @@ -0,0 +1,65 @@ +import { field } from "@dao-xyz/borsh"; +import { type IndexedResults, id } from "@peerbit/indexer-interface"; +import { expect, use } from "chai"; +import chaiAsPromised from "chai-as-promised"; +import { SQLLiteIndex } from "../src/engine.js"; +import { create } from "../src/index.js"; +import { setup } from "./utils.js"; + +use(chaiAsPromised); + +describe("u64", () => { + // u64 is a special case since we need to shift values to fit into signed 64 bit integers + + let index: Awaited>>; + + afterEach(async () => { + await index.store.stop(); + }); + + class DocumentWithBigint { + @id({ type: "u64" }) + id: bigint; + + @field({ type: "u64" }) + value: bigint; + + constructor(id: bigint, value: bigint) { + this.id = id; + this.value = value; + } + } + + it("fetch bounds ", async () => { + index = await setup({ schema: DocumentWithBigint }, create); + const store = index.store as SQLLiteIndex; + expect(store.tables.size).to.equal(1); + await index.store.put(new DocumentWithBigint(0n, 0n)); + await index.store.put( + new DocumentWithBigint(18446744073709551615n, 18446744073709551615n), + ); + await index.store.put(new DocumentWithBigint(123n, 123n)); + + const checkValue = async (value: bigint) => { + const max: IndexedResults = await index.store + .iterate({ query: { value: value } }) + .all(); + expect(max.length).to.equal(1); + expect(max[0].id.primitive).to.equal(value); + expect(max[0].value.id).to.equal(value); + expect(max[0].value.value).to.equal(value); + }; + + await checkValue(0n); + await checkValue(18446744073709551615n); + await checkValue(123n); + }); + + it("summing not supported", async () => { + index = await setup({ schema: DocumentWithBigint }, create); + const store = index.store as SQLLiteIndex; + await expect(store.sum({ key: "value" })).eventually.rejectedWith( + "Summing is not supported for u64 fields", + ); + }); +}); diff --git a/packages/utils/indexer/sqlite3/test/utils.ts b/packages/utils/indexer/sqlite3/test/utils.ts new file mode 100644 index 000000000..0008d02da --- /dev/null +++ b/packages/utils/indexer/sqlite3/test/utils.ts @@ -0,0 +1,22 @@ +import { + type Index, + type IndexEngineInitProperties, + type Indices, + getIdProperty, +} from "@peerbit/indexer-interface"; + +export const setup = async >( + properties: Partial> & { schema: any }, + createIndicies: (directory?: string) => Indices | Promise, +): Promise<{ indices: Indices; store: Index; directory?: string }> => { + const indices = await createIndicies(); + await indices.start(); + const indexProps: IndexEngineInitProperties = { + ...{ + indexBy: getIdProperty(properties.schema) || ["id"], + }, + ...properties, + }; + const store = await indices.init(indexProps); + return { indices, store }; +}; diff --git a/packages/utils/indexer/tests/src/tests.ts b/packages/utils/indexer/tests/src/tests.ts index 3b00dc8cb..6e6d40402 100644 --- a/packages/utils/indexer/tests/src/tests.ts +++ b/packages/utils/indexer/tests/src/tests.ts @@ -142,7 +142,10 @@ const assertIteratorIsDone = async (iterator: IndexIterator) => { export const tests = ( createIndicies: (directory?: string) => Indices | Promise, type: "transient" | "persist" = "transient", - shapingSupported: boolean, + properties: { + shapingSupported: boolean; + u64SumSupported: boolean; + }, ) => { return describe("index", () => { let store: Index; @@ -494,10 +497,10 @@ export const tests = ( @field({ type: "u64" }) id: bigint; - @field({ type: "string" }) - value: string; + @field({ type: "u64" }) + value: bigint; - constructor(properties: { id: bigint; value: string }) { + constructor(properties: { id: bigint; value: bigint }) { this.id = properties.id; this.value = properties.value; } @@ -507,10 +510,10 @@ export const tests = ( const { store } = await setup({ schema: DocumentBigintId }); // make the id less than 2^53, but greater than u32 max - const id = BigInt(2 ** 53 - 1); + const id = BigInt(2 ** 63 - 1); const doc = new DocumentBigintId({ id, - value: "Hello world", + value: id, }); await testIndex(store, doc); }); @@ -2212,7 +2215,7 @@ export const tests = ( ); expect(results).to.have.length(4); for (const result of results) { - if (shapingSupported) { + if (properties.shapingSupported) { expect(Object.keys(result.value)).to.have.length(1); expect(result.value["id"]).to.exist; } else { @@ -2238,7 +2241,7 @@ export const tests = ( if (arr.length > 0) { for (const element of arr) { expect(element.number).to.exist; - if (shapingSupported) { + if (properties.shapingSupported) { expect(Object.keys(element)).to.have.length(1); } } @@ -2314,7 +2317,7 @@ export const tests = ( expect(shapedResults).to.have.length(1); expect(shapedResults[0].value.id).to.equal("2"); - if (shapingSupported) { + if (properties.shapingSupported) { expect(shapedResults[0].value["nested"]).to.be.undefined; } else { expect(shapedResults[0].value["nested"]).to.exist; @@ -2366,7 +2369,7 @@ export const tests = ( expect(shapedResults).to.have.length(1); expect(shapedResults[0].value.id).to.equal(d2.id); - if (shapingSupported) { + if (properties.shapingSupported) { expect({ ...shapedResults[0].value.nested }).to.deep.equal({ bool: false, }); @@ -2466,7 +2469,7 @@ export const tests = ( expect(shapedResults).to.have.length(1); expect(shapedResults[0].value.id).to.equal("2"); - if (shapingSupported) { + if (properties.shapingSupported) { expect(shapedResults[0].value["nested"]).to.be.undefined; } else { expect(shapedResults[0].value["nested"]).to.exist; @@ -2519,7 +2522,7 @@ export const tests = ( expect(shapedResults).to.have.length(1); expect(shapedResults[0].value.id).to.equal(d2.id); - if (shapingSupported) { + if (properties.shapingSupported) { expect({ ...shapedResults[0].value.nested }).to.deep.equal({ bool: false, }); @@ -2596,7 +2599,7 @@ export const tests = ( expect(shapedResults).to.have.length(1); expect(shapedResults[0].value.id).to.equal("2"); - if (shapingSupported) { + if (properties.shapingSupported) { expect(shapedResults[0].value["nested"]).to.be.undefined; } else { expect(shapedResults[0].value["nested"]).to.exist; @@ -2638,7 +2641,7 @@ export const tests = ( expect(shapedResults).to.have.length(1); expect(shapedResults[0].value.id).to.equal(d2.id); - if (shapingSupported) { + if (properties.shapingSupported) { expect({ ...shapedResults[0].value.nested[0] }).to.deep.equal({ bool: false, }); @@ -3107,24 +3110,70 @@ export const tests = ( }); describe("sum", () => { + class SummableDocument { + @field({ type: "string" }) + id: string; + + @field({ type: option("u32") }) + value?: number; + + constructor(opts: SummableDocument) { + this.id = opts.id; + this.value = opts.value; + } + } it("it returns sum", async () => { - await setupDefault(); - const sum = await store.sum({ key: "number" }); + await setup({ schema: SummableDocument }); + await store.put( + new SummableDocument({ + id: "1", + value: 1, + }), + ); + await store.put( + new SummableDocument({ + id: "2", + value: 2, + }), + ); + const sum = await store.sum({ key: "value" }); typeof sum === "bigint" - ? expect(sum).to.equal(6n) - : expect(sum).to.equal(6); + ? expect(sum).to.equal(3n) + : expect(sum).to.equal(3); }); + if (properties.u64SumSupported) { + it("u64", async () => { + await setupDefault(); + const sum = await store.sum({ key: "number" }); + typeof sum === "bigint" + ? expect(sum).to.equal(6n) + : expect(sum).to.equal(6); + }); + } + it("it returns sum with query", async () => { - await setupDefault(); + await setup({ schema: SummableDocument }); + await store.put( + new SummableDocument({ + id: "1", + value: 1, + }), + ); + await store.put( + new SummableDocument({ + id: "2", + value: 2, + }), + ); + const sum = await store.sum({ - key: "number", + key: "value", query: [ - new StringMatch({ - key: "tags", - value: "world", - method: StringMatchMethod.contains, - caseInsensitive: true, + new IntegerCompare({ + key: "value", + compare: Compare.Greater, + value: 1, }), ], }); diff --git a/packages/utils/riblt b/packages/utils/riblt new file mode 160000 index 000000000..fc67eae61 --- /dev/null +++ b/packages/utils/riblt @@ -0,0 +1 @@ +Subproject commit fc67eae619b6caf9c7a37cf9f5c6e43893e7f260