diff --git a/dist/cjs/plugins/storage-lokijs/rx-storage-lokijs.js b/dist/cjs/plugins/storage-lokijs/rx-storage-lokijs.js index 4d80b0892ca..1479ed50551 100644 --- a/dist/cjs/plugins/storage-lokijs/rx-storage-lokijs.js +++ b/dist/cjs/plugins/storage-lokijs/rx-storage-lokijs.js @@ -29,6 +29,10 @@ var RxStorageLoki = exports.RxStorageLoki = /*#__PURE__*/function () { }; return RxStorageLoki; }(); +/** + * @deprecated The lokijs RxStorage is deprecated, more info at: + * @link https://rxdb.info/rx-storage-lokijs.html + */ function getRxStorageLoki(databaseSettings = {}) { var storage = new RxStorageLoki(databaseSettings); return storage; diff --git a/dist/cjs/plugins/storage-lokijs/rx-storage-lokijs.js.map b/dist/cjs/plugins/storage-lokijs/rx-storage-lokijs.js.map index 06dd12e7d2a..17b46bedc91 100644 --- a/dist/cjs/plugins/storage-lokijs/rx-storage-lokijs.js.map +++ b/dist/cjs/plugins/storage-lokijs/rx-storage-lokijs.js.map @@ -1 +1 @@ -{"version":3,"file":"rx-storage-lokijs.js","names":["_rxStorageInstanceLoki","require","_lokijsHelper","_rxStorageHelper","_utilsRxdbVersion","RxStorageLoki","exports","databaseSettings","name","RX_STORAGE_NAME_LOKIJS","rxdbVersion","RXDB_VERSION","leaderElectorByLokiDbName","Map","_proto","prototype","createStorageInstance","params","ensureRxStorageInstanceParamsAreCorrect","createLokiStorageInstance","getRxStorageLoki","storage"],"sources":["../../../../src/plugins/storage-lokijs/rx-storage-lokijs.ts"],"sourcesContent":["import type {\n LokiDatabaseSettings,\n LokiSettings,\n LokiStorageInternals,\n RxStorage,\n RxStorageInstanceCreationParams\n} from '../../types/index.d.ts';\nimport {\n createLokiStorageInstance,\n RxStorageInstanceLoki\n} from './rx-storage-instance-loki.ts';\nimport { RX_STORAGE_NAME_LOKIJS } from './lokijs-helper.ts';\nimport type { LeaderElector } from 'broadcast-channel';\n\nimport { ensureRxStorageInstanceParamsAreCorrect } from '../../rx-storage-helper.ts';\nimport { RXDB_VERSION } from '../utils/utils-rxdb-version.ts';\n\nexport class RxStorageLoki implements RxStorage {\n public name = RX_STORAGE_NAME_LOKIJS;\n public readonly rxdbVersion = RXDB_VERSION;\n\n /**\n * Create one leader elector by db name.\n * This is done inside of the storage, not globally\n * to make it easier to test multi-tab behavior.\n */\n public leaderElectorByLokiDbName: Map = new Map();\n\n constructor(\n public databaseSettings: LokiDatabaseSettings\n ) { }\n\n public createStorageInstance(\n params: RxStorageInstanceCreationParams\n ): Promise> {\n ensureRxStorageInstanceParamsAreCorrect(params);\n return createLokiStorageInstance(this, params, this.databaseSettings);\n }\n}\n\nexport function getRxStorageLoki(\n databaseSettings: LokiDatabaseSettings = {}\n): RxStorageLoki {\n const storage = new RxStorageLoki(databaseSettings);\n return storage;\n}\n"],"mappings":";;;;;;;AAOA,IAAAA,sBAAA,GAAAC,OAAA;AAIA,IAAAC,aAAA,GAAAD,OAAA;AAGA,IAAAE,gBAAA,GAAAF,OAAA;AACA,IAAAG,iBAAA,GAAAH,OAAA;AAA8D,IAEjDI,aAAa,GAAAC,OAAA,CAAAD,aAAA;EAItB;AACJ;AACA;AACA;AACA;;EAUI,SAAAA,cACWE,gBAAsC,EAC/C;IAAA,KAnBKC,IAAI,GAAGC,oCAAsB;IAAA,KACpBC,WAAW,GAAGC,8BAAY;IAAA,KAOnCC,yBAAyB,GAO3B,IAAIC,GAAG,CAAC,CAAC;IAAA,KAGHN,gBAAsC,GAAtCA,gBAAsC;EAC7C;EAAC,IAAAO,MAAA,GAAAT,aAAA,CAAAU,SAAA;EAAAD,MAAA,CAEEE,qBAAqB,GAA5B,SAAAA,sBACIC,MAAgE,EACvB;IACzC,IAAAC,wDAAuC,EAACD,MAAM,CAAC;IAC/C,OAAO,IAAAE,gDAAyB,EAAC,IAAI,EAAEF,MAAM,EAAE,IAAI,CAACV,gBAAgB,CAAC;EACzE,CAAC;EAAA,OAAAF,aAAA;AAAA;AAGE,SAASe,gBAAgBA,CAC5Bb,gBAAsC,GAAG,CAAC,CAAC,EAC9B;EACb,IAAMc,OAAO,GAAG,IAAIhB,aAAa,CAACE,gBAAgB,CAAC;EACnD,OAAOc,OAAO;AAClB","ignoreList":[]} \ No newline at end of file +{"version":3,"file":"rx-storage-lokijs.js","names":["_rxStorageInstanceLoki","require","_lokijsHelper","_rxStorageHelper","_utilsRxdbVersion","RxStorageLoki","exports","databaseSettings","name","RX_STORAGE_NAME_LOKIJS","rxdbVersion","RXDB_VERSION","leaderElectorByLokiDbName","Map","_proto","prototype","createStorageInstance","params","ensureRxStorageInstanceParamsAreCorrect","createLokiStorageInstance","getRxStorageLoki","storage"],"sources":["../../../../src/plugins/storage-lokijs/rx-storage-lokijs.ts"],"sourcesContent":["import type {\n LokiDatabaseSettings,\n LokiSettings,\n LokiStorageInternals,\n RxStorage,\n RxStorageInstanceCreationParams\n} from '../../types/index.d.ts';\nimport {\n createLokiStorageInstance,\n RxStorageInstanceLoki\n} from './rx-storage-instance-loki.ts';\nimport { RX_STORAGE_NAME_LOKIJS } from './lokijs-helper.ts';\nimport type { LeaderElector } from 'broadcast-channel';\n\nimport { ensureRxStorageInstanceParamsAreCorrect } from '../../rx-storage-helper.ts';\nimport { RXDB_VERSION } from '../utils/utils-rxdb-version.ts';\n\nexport class RxStorageLoki implements RxStorage {\n public name = RX_STORAGE_NAME_LOKIJS;\n public readonly rxdbVersion = RXDB_VERSION;\n\n /**\n * Create one leader elector by db name.\n * This is done inside of the storage, not globally\n * to make it easier to test multi-tab behavior.\n */\n public leaderElectorByLokiDbName: Map = new Map();\n\n constructor(\n public databaseSettings: LokiDatabaseSettings\n ) { }\n\n public createStorageInstance(\n params: RxStorageInstanceCreationParams\n ): Promise> {\n ensureRxStorageInstanceParamsAreCorrect(params);\n return createLokiStorageInstance(this, params, this.databaseSettings);\n }\n}\n\n/**\n * @deprecated The lokijs RxStorage is deprecated, more info at:\n * @link https://rxdb.info/rx-storage-lokijs.html\n */\nexport function getRxStorageLoki(\n databaseSettings: LokiDatabaseSettings = {}\n): RxStorageLoki {\n const storage = new RxStorageLoki(databaseSettings);\n return storage;\n}\n"],"mappings":";;;;;;;AAOA,IAAAA,sBAAA,GAAAC,OAAA;AAIA,IAAAC,aAAA,GAAAD,OAAA;AAGA,IAAAE,gBAAA,GAAAF,OAAA;AACA,IAAAG,iBAAA,GAAAH,OAAA;AAA8D,IAEjDI,aAAa,GAAAC,OAAA,CAAAD,aAAA;EAItB;AACJ;AACA;AACA;AACA;;EAUI,SAAAA,cACWE,gBAAsC,EAC/C;IAAA,KAnBKC,IAAI,GAAGC,oCAAsB;IAAA,KACpBC,WAAW,GAAGC,8BAAY;IAAA,KAOnCC,yBAAyB,GAO3B,IAAIC,GAAG,CAAC,CAAC;IAAA,KAGHN,gBAAsC,GAAtCA,gBAAsC;EAC7C;EAAC,IAAAO,MAAA,GAAAT,aAAA,CAAAU,SAAA;EAAAD,MAAA,CAEEE,qBAAqB,GAA5B,SAAAA,sBACIC,MAAgE,EACvB;IACzC,IAAAC,wDAAuC,EAACD,MAAM,CAAC;IAC/C,OAAO,IAAAE,gDAAyB,EAAC,IAAI,EAAEF,MAAM,EAAE,IAAI,CAACV,gBAAgB,CAAC;EACzE,CAAC;EAAA,OAAAF,aAAA;AAAA;AAGL;AACA;AACA;AACA;AACO,SAASe,gBAAgBA,CAC5Bb,gBAAsC,GAAG,CAAC,CAAC,EAC9B;EACb,IAAMc,OAAO,GAAG,IAAIhB,aAAa,CAACE,gBAAgB,CAAC;EACnD,OAAOc,OAAO;AAClB","ignoreList":[]} \ No newline at end of file diff --git a/dist/cjs/plugins/utils/utils-rxdb-version.js b/dist/cjs/plugins/utils/utils-rxdb-version.js index 540c34967ce..29bb2334266 100644 --- a/dist/cjs/plugins/utils/utils-rxdb-version.js +++ b/dist/cjs/plugins/utils/utils-rxdb-version.js @@ -7,5 +7,5 @@ exports.RXDB_VERSION = void 0; /** * This file is replaced in the 'npm run build:version' script. */ -var RXDB_VERSION = exports.RXDB_VERSION = '15.15.1'; +var RXDB_VERSION = exports.RXDB_VERSION = '15.16.0'; //# sourceMappingURL=utils-rxdb-version.js.map \ No newline at end of file diff --git a/dist/cjs/plugins/utils/utils-rxdb-version.js.map b/dist/cjs/plugins/utils/utils-rxdb-version.js.map index 513f5af98b5..a66ebf110e8 100644 --- a/dist/cjs/plugins/utils/utils-rxdb-version.js.map +++ b/dist/cjs/plugins/utils/utils-rxdb-version.js.map @@ -1 +1 @@ -{"version":3,"file":"utils-rxdb-version.js","names":["RXDB_VERSION","exports"],"sources":["../../../../src/plugins/utils/utils-rxdb-version.ts"],"sourcesContent":["/**\n * This file is replaced in the 'npm run build:version' script.\n */\nexport const RXDB_VERSION = '15.15.1';\n"],"mappings":";;;;;;AAAA;AACA;AACA;AACO,IAAMA,YAAY,GAAAC,OAAA,CAAAD,YAAA,GAAG,SAAS","ignoreList":[]} \ No newline at end of file +{"version":3,"file":"utils-rxdb-version.js","names":["RXDB_VERSION","exports"],"sources":["../../../../src/plugins/utils/utils-rxdb-version.ts"],"sourcesContent":["/**\n * This file is replaced in the 'npm run build:version' script.\n */\nexport const RXDB_VERSION = '15.16.0';\n"],"mappings":";;;;;;AAAA;AACA;AACA;AACO,IAAMA,YAAY,GAAAC,OAAA,CAAAD,YAAA,GAAG,SAAS","ignoreList":[]} \ No newline at end of file diff --git a/dist/esm/plugins/storage-lokijs/rx-storage-lokijs.js b/dist/esm/plugins/storage-lokijs/rx-storage-lokijs.js index 114741d4b61..1f4ae6210a5 100644 --- a/dist/esm/plugins/storage-lokijs/rx-storage-lokijs.js +++ b/dist/esm/plugins/storage-lokijs/rx-storage-lokijs.js @@ -22,6 +22,11 @@ export var RxStorageLoki = /*#__PURE__*/function () { }; return RxStorageLoki; }(); + +/** + * @deprecated The lokijs RxStorage is deprecated, more info at: + * @link https://rxdb.info/rx-storage-lokijs.html + */ export function getRxStorageLoki(databaseSettings = {}) { var storage = new RxStorageLoki(databaseSettings); return storage; diff --git a/dist/esm/plugins/storage-lokijs/rx-storage-lokijs.js.map b/dist/esm/plugins/storage-lokijs/rx-storage-lokijs.js.map index d4d29c5396b..917eec19787 100644 --- a/dist/esm/plugins/storage-lokijs/rx-storage-lokijs.js.map +++ b/dist/esm/plugins/storage-lokijs/rx-storage-lokijs.js.map @@ -1 +1 @@ -{"version":3,"file":"rx-storage-lokijs.js","names":["createLokiStorageInstance","RX_STORAGE_NAME_LOKIJS","ensureRxStorageInstanceParamsAreCorrect","RXDB_VERSION","RxStorageLoki","databaseSettings","name","rxdbVersion","leaderElectorByLokiDbName","Map","_proto","prototype","createStorageInstance","params","getRxStorageLoki","storage"],"sources":["../../../../src/plugins/storage-lokijs/rx-storage-lokijs.ts"],"sourcesContent":["import type {\n LokiDatabaseSettings,\n LokiSettings,\n LokiStorageInternals,\n RxStorage,\n RxStorageInstanceCreationParams\n} from '../../types/index.d.ts';\nimport {\n createLokiStorageInstance,\n RxStorageInstanceLoki\n} from './rx-storage-instance-loki.ts';\nimport { RX_STORAGE_NAME_LOKIJS } from './lokijs-helper.ts';\nimport type { LeaderElector } from 'broadcast-channel';\n\nimport { ensureRxStorageInstanceParamsAreCorrect } from '../../rx-storage-helper.ts';\nimport { RXDB_VERSION } from '../utils/utils-rxdb-version.ts';\n\nexport class RxStorageLoki implements RxStorage {\n public name = RX_STORAGE_NAME_LOKIJS;\n public readonly rxdbVersion = RXDB_VERSION;\n\n /**\n * Create one leader elector by db name.\n * This is done inside of the storage, not globally\n * to make it easier to test multi-tab behavior.\n */\n public leaderElectorByLokiDbName: Map = new Map();\n\n constructor(\n public databaseSettings: LokiDatabaseSettings\n ) { }\n\n public createStorageInstance(\n params: RxStorageInstanceCreationParams\n ): Promise> {\n ensureRxStorageInstanceParamsAreCorrect(params);\n return createLokiStorageInstance(this, params, this.databaseSettings);\n }\n}\n\nexport function getRxStorageLoki(\n databaseSettings: LokiDatabaseSettings = {}\n): RxStorageLoki {\n const storage = new RxStorageLoki(databaseSettings);\n return storage;\n}\n"],"mappings":"AAOA,SACIA,yBAAyB,QAEtB,+BAA+B;AACtC,SAASC,sBAAsB,QAAQ,oBAAoB;AAG3D,SAASC,uCAAuC,QAAQ,4BAA4B;AACpF,SAASC,YAAY,QAAQ,gCAAgC;AAE7D,WAAaC,aAAa;EAItB;AACJ;AACA;AACA;AACA;;EAUI,SAAAA,cACWC,gBAAsC,EAC/C;IAAA,KAnBKC,IAAI,GAAGL,sBAAsB;IAAA,KACpBM,WAAW,GAAGJ,YAAY;IAAA,KAOnCK,yBAAyB,GAO3B,IAAIC,GAAG,CAAC,CAAC;IAAA,KAGHJ,gBAAsC,GAAtCA,gBAAsC;EAC7C;EAAC,IAAAK,MAAA,GAAAN,aAAA,CAAAO,SAAA;EAAAD,MAAA,CAEEE,qBAAqB,GAA5B,SAAAA,sBACIC,MAAgE,EACvB;IACzCX,uCAAuC,CAACW,MAAM,CAAC;IAC/C,OAAOb,yBAAyB,CAAC,IAAI,EAAEa,MAAM,EAAE,IAAI,CAACR,gBAAgB,CAAC;EACzE,CAAC;EAAA,OAAAD,aAAA;AAAA;AAGL,OAAO,SAASU,gBAAgBA,CAC5BT,gBAAsC,GAAG,CAAC,CAAC,EAC9B;EACb,IAAMU,OAAO,GAAG,IAAIX,aAAa,CAACC,gBAAgB,CAAC;EACnD,OAAOU,OAAO;AAClB","ignoreList":[]} \ No newline at end of file +{"version":3,"file":"rx-storage-lokijs.js","names":["createLokiStorageInstance","RX_STORAGE_NAME_LOKIJS","ensureRxStorageInstanceParamsAreCorrect","RXDB_VERSION","RxStorageLoki","databaseSettings","name","rxdbVersion","leaderElectorByLokiDbName","Map","_proto","prototype","createStorageInstance","params","getRxStorageLoki","storage"],"sources":["../../../../src/plugins/storage-lokijs/rx-storage-lokijs.ts"],"sourcesContent":["import type {\n LokiDatabaseSettings,\n LokiSettings,\n LokiStorageInternals,\n RxStorage,\n RxStorageInstanceCreationParams\n} from '../../types/index.d.ts';\nimport {\n createLokiStorageInstance,\n RxStorageInstanceLoki\n} from './rx-storage-instance-loki.ts';\nimport { RX_STORAGE_NAME_LOKIJS } from './lokijs-helper.ts';\nimport type { LeaderElector } from 'broadcast-channel';\n\nimport { ensureRxStorageInstanceParamsAreCorrect } from '../../rx-storage-helper.ts';\nimport { RXDB_VERSION } from '../utils/utils-rxdb-version.ts';\n\nexport class RxStorageLoki implements RxStorage {\n public name = RX_STORAGE_NAME_LOKIJS;\n public readonly rxdbVersion = RXDB_VERSION;\n\n /**\n * Create one leader elector by db name.\n * This is done inside of the storage, not globally\n * to make it easier to test multi-tab behavior.\n */\n public leaderElectorByLokiDbName: Map = new Map();\n\n constructor(\n public databaseSettings: LokiDatabaseSettings\n ) { }\n\n public createStorageInstance(\n params: RxStorageInstanceCreationParams\n ): Promise> {\n ensureRxStorageInstanceParamsAreCorrect(params);\n return createLokiStorageInstance(this, params, this.databaseSettings);\n }\n}\n\n/**\n * @deprecated The lokijs RxStorage is deprecated, more info at:\n * @link https://rxdb.info/rx-storage-lokijs.html\n */\nexport function getRxStorageLoki(\n databaseSettings: LokiDatabaseSettings = {}\n): RxStorageLoki {\n const storage = new RxStorageLoki(databaseSettings);\n return storage;\n}\n"],"mappings":"AAOA,SACIA,yBAAyB,QAEtB,+BAA+B;AACtC,SAASC,sBAAsB,QAAQ,oBAAoB;AAG3D,SAASC,uCAAuC,QAAQ,4BAA4B;AACpF,SAASC,YAAY,QAAQ,gCAAgC;AAE7D,WAAaC,aAAa;EAItB;AACJ;AACA;AACA;AACA;;EAUI,SAAAA,cACWC,gBAAsC,EAC/C;IAAA,KAnBKC,IAAI,GAAGL,sBAAsB;IAAA,KACpBM,WAAW,GAAGJ,YAAY;IAAA,KAOnCK,yBAAyB,GAO3B,IAAIC,GAAG,CAAC,CAAC;IAAA,KAGHJ,gBAAsC,GAAtCA,gBAAsC;EAC7C;EAAC,IAAAK,MAAA,GAAAN,aAAA,CAAAO,SAAA;EAAAD,MAAA,CAEEE,qBAAqB,GAA5B,SAAAA,sBACIC,MAAgE,EACvB;IACzCX,uCAAuC,CAACW,MAAM,CAAC;IAC/C,OAAOb,yBAAyB,CAAC,IAAI,EAAEa,MAAM,EAAE,IAAI,CAACR,gBAAgB,CAAC;EACzE,CAAC;EAAA,OAAAD,aAAA;AAAA;;AAGL;AACA;AACA;AACA;AACA,OAAO,SAASU,gBAAgBA,CAC5BT,gBAAsC,GAAG,CAAC,CAAC,EAC9B;EACb,IAAMU,OAAO,GAAG,IAAIX,aAAa,CAACC,gBAAgB,CAAC;EACnD,OAAOU,OAAO;AAClB","ignoreList":[]} \ No newline at end of file diff --git a/dist/esm/plugins/utils/utils-rxdb-version.js b/dist/esm/plugins/utils/utils-rxdb-version.js index 8dd88e3b130..eebbf4b9f6b 100644 --- a/dist/esm/plugins/utils/utils-rxdb-version.js +++ b/dist/esm/plugins/utils/utils-rxdb-version.js @@ -1,5 +1,5 @@ /** * This file is replaced in the 'npm run build:version' script. */ -export var RXDB_VERSION = '15.15.1'; +export var RXDB_VERSION = '15.16.0'; //# sourceMappingURL=utils-rxdb-version.js.map \ No newline at end of file diff --git a/dist/esm/plugins/utils/utils-rxdb-version.js.map b/dist/esm/plugins/utils/utils-rxdb-version.js.map index d95e71f1311..59e1b98f92b 100644 --- a/dist/esm/plugins/utils/utils-rxdb-version.js.map +++ b/dist/esm/plugins/utils/utils-rxdb-version.js.map @@ -1 +1 @@ -{"version":3,"file":"utils-rxdb-version.js","names":["RXDB_VERSION"],"sources":["../../../../src/plugins/utils/utils-rxdb-version.ts"],"sourcesContent":["/**\n * This file is replaced in the 'npm run build:version' script.\n */\nexport const RXDB_VERSION = '15.15.1';\n"],"mappings":"AAAA;AACA;AACA;AACA,OAAO,IAAMA,YAAY,GAAG,SAAS","ignoreList":[]} \ No newline at end of file +{"version":3,"file":"utils-rxdb-version.js","names":["RXDB_VERSION"],"sources":["../../../../src/plugins/utils/utils-rxdb-version.ts"],"sourcesContent":["/**\n * This file is replaced in the 'npm run build:version' script.\n */\nexport const RXDB_VERSION = '15.16.0';\n"],"mappings":"AAAA;AACA;AACA;AACA,OAAO,IAAMA,YAAY,GAAG,SAAS","ignoreList":[]} \ No newline at end of file diff --git a/dist/types/plugins/storage-denokv/index.d.ts b/dist/types/plugins/storage-denokv/index.d.ts index c41064d8102..56fbece0303 100644 --- a/dist/types/plugins/storage-denokv/index.d.ts +++ b/dist/types/plugins/storage-denokv/index.d.ts @@ -4,7 +4,7 @@ import { RxStorageInstanceDenoKV } from "./rx-storage-instance-denokv.ts"; export declare class RxStorageDenoKV implements RxStorage, DenoKVSettings> { settings: DenoKVSettings; name: string; - readonly rxdbVersion = "15.15.1"; + readonly rxdbVersion = "15.16.0"; constructor(settings: DenoKVSettings); createStorageInstance(params: RxStorageInstanceCreationParams): Promise>; } diff --git a/dist/types/plugins/storage-dexie/rx-storage-dexie.d.ts b/dist/types/plugins/storage-dexie/rx-storage-dexie.d.ts index ac9453badec..03ec8319277 100644 --- a/dist/types/plugins/storage-dexie/rx-storage-dexie.d.ts +++ b/dist/types/plugins/storage-dexie/rx-storage-dexie.d.ts @@ -4,7 +4,7 @@ import { RxStorageInstanceDexie } from './rx-storage-instance-dexie.ts'; export declare class RxStorageDexie implements RxStorage { settings: DexieSettings; name: string; - readonly rxdbVersion = "15.15.1"; + readonly rxdbVersion = "15.16.0"; constructor(settings: DexieSettings); createStorageInstance(params: RxStorageInstanceCreationParams): Promise>; } diff --git a/dist/types/plugins/storage-lokijs/rx-storage-lokijs.d.ts b/dist/types/plugins/storage-lokijs/rx-storage-lokijs.d.ts index 21f4bb6f676..5b2ac88372d 100644 --- a/dist/types/plugins/storage-lokijs/rx-storage-lokijs.d.ts +++ b/dist/types/plugins/storage-lokijs/rx-storage-lokijs.d.ts @@ -4,7 +4,7 @@ import type { LeaderElector } from 'broadcast-channel'; export declare class RxStorageLoki implements RxStorage { databaseSettings: LokiDatabaseSettings; name: string; - readonly rxdbVersion = "15.15.1"; + readonly rxdbVersion = "15.16.0"; /** * Create one leader elector by db name. * This is done inside of the storage, not globally @@ -21,4 +21,8 @@ export declare class RxStorageLoki implements RxStorage(params: RxStorageInstanceCreationParams): Promise>; } +/** + * @deprecated The lokijs RxStorage is deprecated, more info at: + * @link https://rxdb.info/rx-storage-lokijs.html + */ export declare function getRxStorageLoki(databaseSettings?: LokiDatabaseSettings): RxStorageLoki; diff --git a/dist/types/plugins/storage-mongodb/rx-storage-mongodb.d.ts b/dist/types/plugins/storage-mongodb/rx-storage-mongodb.d.ts index 593c32a5c80..097b7268117 100644 --- a/dist/types/plugins/storage-mongodb/rx-storage-mongodb.d.ts +++ b/dist/types/plugins/storage-mongodb/rx-storage-mongodb.d.ts @@ -4,7 +4,7 @@ import { RxStorageInstanceMongoDB } from './rx-storage-instance-mongodb.ts'; export declare class RxStorageMongoDB implements RxStorage { databaseSettings: MongoDBDatabaseSettings; name: string; - readonly rxdbVersion = "15.15.1"; + readonly rxdbVersion = "15.16.0"; constructor(databaseSettings: MongoDBDatabaseSettings); createStorageInstance(params: RxStorageInstanceCreationParams): Promise>; } diff --git a/dist/types/plugins/storage-remote/rx-storage-remote.d.ts b/dist/types/plugins/storage-remote/rx-storage-remote.d.ts index 648fe6be386..a8d2b447604 100644 --- a/dist/types/plugins/storage-remote/rx-storage-remote.d.ts +++ b/dist/types/plugins/storage-remote/rx-storage-remote.d.ts @@ -4,7 +4,7 @@ import type { MessageFromRemote, RemoteMessageChannel, RxStorageRemoteInternals, export declare class RxStorageRemote implements RxStorage { readonly settings: RxStorageRemoteSettings; readonly name: string; - readonly rxdbVersion = "15.15.1"; + readonly rxdbVersion = "15.16.0"; private seed; private lastRequestId; messageChannelIfOneMode?: Promise; diff --git a/dist/types/plugins/utils/utils-rxdb-version.d.ts b/dist/types/plugins/utils/utils-rxdb-version.d.ts index 444a024ebe8..c23966a5958 100644 --- a/dist/types/plugins/utils/utils-rxdb-version.d.ts +++ b/dist/types/plugins/utils/utils-rxdb-version.d.ts @@ -1,4 +1,4 @@ /** * This file is replaced in the 'npm run build:version' script. */ -export declare const RXDB_VERSION = "15.15.1"; +export declare const RXDB_VERSION = "15.16.0"; diff --git a/dist/types/rx-database.d.ts b/dist/types/rx-database.d.ts index df7821263bd..79d10a7437c 100644 --- a/dist/types/rx-database.d.ts +++ b/dist/types/rx-database.d.ts @@ -29,7 +29,7 @@ export declare class RxDatabaseBase | undefined; readonly idleQueue: IdleQueue; - readonly rxdbVersion = "15.15.1"; + readonly rxdbVersion = "15.16.0"; /** * Contains all known non-closed storage instances * that belong to this database. diff --git a/package.json b/package.json index 5ecd9d29df2..c0911045be7 100644 --- a/package.json +++ b/package.json @@ -434,7 +434,8 @@ "preversion": "npm run lint && npm run test", "dev": "watch 'npm run test:node:memory' src/ test/", "dev:example": "watch 'npm run transpile:src && echo \"done\"' src/ test/", - "cloud-signaling-server": "node ./scripts/start-cloud-signaling-server.mjs --max-old-space-size=2048" + "cloud-signaling-server": "node ./scripts/start-cloud-signaling-server.mjs --max-old-space-size=2048", + "watch:transpile": "nodemon --watch src/ --ext ts --ignore 'src/plugins/*' --exec npm run transpile" }, "pre-commit": [ "lint" @@ -557,6 +558,7 @@ "nconf": "0.12.1", "node-datachannel": "0.5.5", "node-pre-gyp": "0.17.0", + "nodemon": "3.0.1", "pre-commit": "1.2.2", "process": "0.11.10", "querystring-es3": "0.2.1", @@ -581,4 +583,4 @@ "webpack-cli": "5.1.4", "webpack-dev-server": "5.0.4" } -} \ No newline at end of file +} diff --git a/src/event-reduce.ts b/src/event-reduce.ts index 34e7ec5e4fa..e9105487676 100644 --- a/src/event-reduce.ts +++ b/src/event-reduce.ts @@ -6,7 +6,19 @@ import { QueryMatcher, DeterministicSortComparator, StateResolveFunctionInput, - ChangeEvent + ChangeEvent, + hasLimit, + isUpdate, + isDelete, + isFindOne, + isInsert, + hasSkip, + wasResultsEmpty, + wasInResult, + wasSortedAfterLast, + wasLimitReached, + wasMatching, + doesMatchNow, } from 'event-reduce-js'; import type { RxQuery, @@ -31,6 +43,7 @@ export type EventReduceResultPos = { runFullQueryAgain: false; changed: boolean; newResults: RxDocumentType[]; + limitResultsRemoved: boolean; }; export type EventReduceResult = EventReduceResultNeg | EventReduceResultPos; @@ -112,6 +125,48 @@ export function getQueryParams( ); } +// This catches a specific case where we have a limit query (of say LIMIT items), and then +// a document is removed from the result set by the current change. In this case, +// the event-reduce library (rightly) tells us we need to recompute the query to get a +// full result set of LIMIT items. +// However, if we have a "limit buffer", we can instead fill in the missing result from there. +// For more info, see the rx-query.test tests under "Limit Buffer". +// This function checks if we are actually in the specific case where the limit buffer can be used. +function canFillResultSetFromLimitBuffer(s: StateResolveFunctionInput) { + // We figure out if this event is our special case using the same "state resolve" functions that event-reduce uses: + // https://github.com/pubkey/event-reduce/blob/fcb46947b29eac97c97dcb05e08af337f362fe5c/javascript/src/states/index.ts#L87 + // (we also keep the state resolve functions in the same order they're defined in event-reduce.js) + return ( + !isInsert(s) && // inserts can never cause + (isUpdate(s) || isDelete(s)) && // both updates and deletes can remove a doc from our results + hasLimit(s) && // only limit queries + !isFindOne(s) && // if it's a findOne, we have no buffer and have to re-compute + !hasSkip(s) && // we could potentially make skip queries work later, but for now ignore them -- too hard + !wasResultsEmpty(s) && // this should never happen + wasLimitReached(s) && // if not, the event reducer shouldn't have a problem + // any value of wasFirst(s), position is not relevant for this case, as wasInResults + // any value of wasLast(s) , position is not relevant for this case, as wasInResults + // any value of sortParamsChanged(s), eg a doc could be archived but also have last_status_update changed + wasInResult(s) && // we only care about docs already in the results set being removed + // any value of wasSortedBeforeFirst(s) -- this is true when the doc is first in the results set + !wasSortedAfterLast(s) && // I don't think this could be true anyways, but whatever + // any value of isSortedBeforeFirst(s) -- this is true when the doc is first in order (but it could still be filtered out) + // any value of isSortedAfterLast(s) + wasMatching(s) && // it couldn't have been wasInResult unless it was also matching + !doesMatchNow(s) // Limit buffer only cares rn when the changed doc was indeed removed (so no longer matching) + ); +} + + +function actionRemovesItemFromResults(action: ActionName): boolean { + return [ + 'removeFirstItem', + 'removeLastItem', + 'removeExisting', + 'runFullQueryAgain', + ].includes(action); +} + export function calculateNewResults( rxQuery: RxQuery, @@ -126,6 +181,7 @@ export function calculateNewResults( const previousResults: RxDocumentType[] = ensureNotFalsy(rxQuery._result).docsData.slice(0); const previousResultsMap: Map = ensureNotFalsy(rxQuery._result).docsDataMap; let changed: boolean = false; + let limitResultsRemoved: boolean = false; const eventReduceEvents: ChangeEvent[] = rxChangeEvents .map(cE => rxChangeEventToEventReduceChangeEvent(cE)) @@ -140,7 +196,31 @@ export function calculateNewResults( }; const actionName: ActionName = calculateActionName(stateResolveFunctionInput); + if (actionName === 'runFullQueryAgain') { + if (canFillResultSetFromLimitBuffer(stateResolveFunctionInput) && rxQuery._limitBufferResults !== null && rxQuery._limitBufferResults.length > 0) { + // replace the missing item with an item from our limit buffer! + const replacementItem = rxQuery._limitBufferResults.shift(); + if (replacementItem === undefined) { + return true; + } + + changed = true; + runAction( + 'removeExisting', + queryParams, + eventReduceEvent, + previousResults, + previousResultsMap, + ); + previousResults.push(replacementItem); + if (previousResultsMap) { + // We have to assume the primaryKey value is a string. According to the rxdb docs, this is always the case: + // https://github.com/pubkey/rxdb/blob/c8162c25c7b033fa9f70191512ee84d44d0dd913/docs/rx-schema.html#L2523 + previousResultsMap.set(replacementItem[rxQuery.collection.schema.primaryPath] as string, replacementItem); + } + return false; + } return true; } else if (actionName !== 'doNothing') { changed = true; @@ -151,6 +231,9 @@ export function calculateNewResults( previousResults, previousResultsMap ); + if (actionRemovesItemFromResults(actionName)) { + limitResultsRemoved = true; + } return false; } }); @@ -162,7 +245,8 @@ export function calculateNewResults( return { runFullQueryAgain: false, changed, - newResults: previousResults + newResults: previousResults, + limitResultsRemoved, }; } } diff --git a/src/rx-query-single-result.ts b/src/rx-query-single-result.ts index 1e4f91f01e7..13dc0dee114 100644 --- a/src/rx-query-single-result.ts +++ b/src/rx-query-single-result.ts @@ -73,4 +73,13 @@ export class RxQuerySingleResult{ map ); } + + get docsKeys(): string[] { + const keys = Array.from(this.docsMap.keys()); + return overwriteGetterForCaching( + this, + 'docsKeys', + keys + ); + } } diff --git a/src/rx-query.ts b/src/rx-query.ts index 20673aa4f41..ccdc166b8fa 100644 --- a/src/rx-query.ts +++ b/src/rx-query.ts @@ -13,15 +13,14 @@ import { shareReplay } from 'rxjs/operators'; import { - sortObject, - pluginMissing, - overwriteGetterForCaching, + appendToArray, + areRxDocumentArraysEqual, now, - PROMISE_RESOLVE_FALSE, + overwriteGetterForCaching, + pluginMissing, + PROMISE_RESOLVE_FALSE, RX_META_LWT_MINIMUM, RXJS_SHARE_REPLAY_DEFAULTS, - ensureNotFalsy, - areRxDocumentArraysEqual, - appendToArray + sortObject } from './plugins/utils/index.ts'; import { newRxError @@ -30,32 +29,47 @@ import { runPluginHooks } from './hooks.ts'; import type { - RxCollection, - RxDocument, - RxQueryOP, - RxQuery, MangoQuery, - MangoQuerySortPart, - MangoQuerySelector, PreparedQuery, + QueryMatcher, RxChangeEvent, - RxDocumentWriteData, + RxCollection, + RxDocument, RxDocumentData, - QueryMatcher, RxJsonSchema, - FilledMangoQuery + FilledMangoQuery, + RxDocumentWriteData, + RxQuery, + RxQueryOP, MangoQuerySelector, MangoQuerySortPart } from './types/index.d.ts'; -import { calculateNewResults } from './event-reduce.ts'; -import { triggerCacheReplacement } from './query-cache.ts'; -import { getQueryMatcher, normalizeMangoQuery } from './rx-query-helper.ts'; +import { getQueryMatcher, getSortComparator, normalizeMangoQuery } from './rx-query-helper.ts'; import { RxQuerySingleResult } from './rx-query-single-result.ts'; import { getQueryPlan } from './query-planner.ts'; +import { calculateNewResults } from './event-reduce.ts'; +import { triggerCacheReplacement } from './query-cache.ts'; +import { ensureNotFalsy } from 'event-reduce-js'; +import { getChangedDocumentsSince } from './rx-storage-helper.ts'; + + +export interface QueryCacheBackend { + getItem(key: string): Promise; + setItem(key: string, value: T): Promise; +} let _queryCount = 0; const newQueryID = function (): number { return ++_queryCount; }; +// allow changes to be 100ms older than the actual lwt value +const RESTORE_QUERY_UPDATE_DRIFT = 100; + +// 5000 seems like a sane number where re-executing the query will be easier than trying to restore +const RESTORE_QUERY_MAX_DOCS_CHANGED = 5000; + +// If a query was persisted more than a week ago, just re-execute it +export const RESTORE_QUERY_MAX_TIME_AGO = 7 * 24 * 60 * 60 * 1000; + export class RxQueryBase< RxDocType, RxQueryResult, @@ -186,6 +200,16 @@ export class RxQueryBase< public _lastExecStart: number = 0; public _lastExecEnd: number = 0; + // Fields used for the Limit Buffer when enabled: + public _limitBufferSize: number | null = null; + public _limitBufferResults: RxDocumentData[] | null = null; + + // Fields used for the persistent query cache when enabled: + public _persistentQueryCacheResult?: string[] | string = undefined; + public _persistentQueryCacheResultLwt?: string = undefined; // lwt = latest write time + public _persistentQueryCacheLoaded?: Promise; + public _persistentQueryCacheBackend?: QueryCacheBackend; + /** * ensures that the exec-runs * are not run in parallel @@ -217,12 +241,24 @@ export class RxQueryBase< newResultData = Array.from((newResultData as Map>).values()); } - const newQueryResult = new RxQuerySingleResult( - this.collection, - newResultData, - newResultData.length - ); - this._result = newQueryResult; + const docsDataMap = new Map(); + const docsMap = new Map(); + + + const docs = newResultData.map(docData => this.collection._docCache.getCachedRxDocument(docData)); + + /** + * Instead of using the newResultData in the result cache, + * we directly use the objects that are stored in the RxDocument + * to ensure we do not store the same data twice and fill up the memory. + */ + const docsData = docs.map(doc => { + docsDataMap.set(doc.primary, doc._data); + docsMap.set(doc.primary, doc); + return doc._data; + }); + + this._result = new RxQuerySingleResult(this.collection, docsData, docsData.length); } /** @@ -356,6 +392,10 @@ export class RxQueryBase< return value; } + persistentQueryId() { + return String(this.collection.database.hashFunction(this.toString())); + } + /** * returns the prepared query * which can be send to the storage instance to query for documents. @@ -370,10 +410,16 @@ export class RxQueryBase< this.mangoQuery ) }; + (hookInput.mangoQuery.selector as any)._deleted = { $eq: false }; if (hookInput.mangoQuery.index) { hookInput.mangoQuery.index.unshift('_deleted'); } + + if (this._limitBufferSize !== null && hookInput.mangoQuery.limit) { + hookInput.mangoQuery.limit = hookInput.mangoQuery.limit + this._limitBufferSize; + } + runPluginHooks('prePrepareQuery', hookInput); const value = prepareQuery( @@ -446,6 +492,162 @@ export class RxQueryBase< limit(_amount: number | null): RxQuery { throw pluginMissing('query-builder'); } + + enableLimitBuffer(bufferSize: number) { + if (this._limitBufferSize !== null) { + // Limit buffer has already been enabled, do nothing: + return this; + } + if (this._lastExecStart !== 0) { + console.error('Can\'t use limit buffer if query has already executed'); + return this; + } + if (this.mangoQuery.skip || !this.mangoQuery.limit) { + console.error('Right now, limit buffer only works on non-skip, limit queries.'); + return this; + } + this._limitBufferSize = bufferSize; + return this; + } + + enablePersistentQueryCache(backend: QueryCacheBackend) { + if (this._persistentQueryCacheBackend) { + // We've already tried to enable the query cache + return this; + } + this._persistentQueryCacheBackend = backend; + this._persistentQueryCacheLoaded = this._restoreQueryCacheFromPersistedState(); + return this; + } + + private async _restoreQueryCacheFromPersistedState() { + if (!this._persistentQueryCacheBackend) { + // no cache backend provided, do nothing + return; + } + if (this._persistentQueryCacheResult) { + // we already restored the cache once, no need to run twice + return; + } + if (this.mangoQuery.skip || this.op === 'count') { + console.error('The persistent query cache only works on non-skip, non-count queries.'); + return; + } + + // First, check if there are any query results persisted: + const persistentQueryId = this.persistentQueryId(); + const value = await this._persistentQueryCacheBackend.getItem(`qc:${persistentQueryId}`); + if (!value || !Array.isArray(value) || value.length === 0) { + // eslint-disable-next-line no-console + console.log(`no persistent query cache found in the backend, returning early ${this.toString()}`); + return; + } + + // If there are persisted ids, create our two Sets of ids from the cache: + const persistedQueryCacheIds = new Set(); + const limitBufferIds = new Set(); + + for (const id of value) { + if (id.startsWith('lb-')) { + limitBufferIds.add(id.replace('lb-', '')); + } else { + persistedQueryCacheIds.add(id); + } + } + + // eslint-disable-next-line no-console + console.time(`Restoring persistent querycache ${this.toString()}`); + + // Next, pull the lwt from the cache: + // TODO: if lwt is too old, should we just give up here? What if there are too many changedDocs? + const lwt = (await this._persistentQueryCacheBackend.getItem(`qc:${persistentQueryId}:lwt`)) as string | null; + if (!lwt) { + return; + } + + // If the query was persisted too long ago, just re-execute it. + if (now() - Number(lwt) > RESTORE_QUERY_MAX_TIME_AGO) { + return; + } + + const primaryPath = this.collection.schema.primaryPath; + + const {documents: changedDocs} = await getChangedDocumentsSince(this.collection.storageInstance, + RESTORE_QUERY_MAX_DOCS_CHANGED, + // make sure we remove the monotonic clock (xxx.01, xxx.02) from the lwt timestamp to avoid issues with + // lookups in indices (dexie) + {id: '', lwt: Math.floor(Number(lwt)) - RESTORE_QUERY_UPDATE_DRIFT} + ); + + // If too many docs have changed, just give up and re-execute the query + if (changedDocs.length === RESTORE_QUERY_MAX_DOCS_CHANGED) { + return; + } + + const changedDocIds = new Set(changedDocs.map((d) => d[primaryPath] as string)); + + const docIdsWeNeedToFetch = [...persistedQueryCacheIds, ...limitBufferIds].filter((id) => !changedDocIds.has(id)); + + // We use _queryCollectionByIds to fetch the remaining docs we need efficiently, pulling + // from query cache if we can (and the storageInstance by ids if we can't): + const otherPotentialMatchingDocs: RxDocumentData[] = []; + await _queryCollectionByIds(this as any, otherPotentialMatchingDocs, docIdsWeNeedToFetch); + + // Now that we have all potential documents, we just filter (in-memory) the ones that still match our query: + let docsData: RxDocumentData[] = []; + for (const doc of changedDocs.concat(otherPotentialMatchingDocs)) { + if (this.doesDocumentDataMatch(doc)) { + docsData.push(doc); + } + } + + // Sort the documents by the query's sort field: + const normalizedMangoQuery = normalizeMangoQuery( + this.collection.schema.jsonSchema, + this.mangoQuery + ); + const sortComparator = getSortComparator(this.collection.schema.jsonSchema, normalizedMangoQuery); + const limit = normalizedMangoQuery.limit ? normalizedMangoQuery.limit : Infinity; + docsData = docsData.sort(sortComparator); + + // We know for sure that all persisted and limit buffer ids (and changed docs before them) are in the correct + // result set. And we can't be sure about any past that point. So cut it off there: + const lastValidIndex = docsData.findLastIndex((d) => limitBufferIds.has(d[primaryPath] as string) || persistedQueryCacheIds.has(d[primaryPath] as string)); + docsData = docsData.slice(0, lastValidIndex + 1); + + // Now this is the trickiest part. + // If we somehow have fewer docs than the limit of our query + // (and this wasn't the case because before persistence) + // then there is no way for us to know the correct results, and we re-exec: + const unchangedItemsMayNowBeInResults = ( + this.mangoQuery.limit && + docsData.length < this.mangoQuery.limit && + persistedQueryCacheIds.size >= this.mangoQuery.limit + ); + if (unchangedItemsMayNowBeInResults) { + return; + } + + // Our finalResults are the actual results of this query, and pastLimitItems are any remaining matching + // documents we have left over (past the limit). + const pastLimitItems = docsData.slice(limit); + const finalResults = docsData.slice(0, limit); + + // If there are still items past the first LIMIT items, try to restore the limit buffer with them: + if (limitBufferIds.size && pastLimitItems.length > 0) { + this._limitBufferResults = pastLimitItems; + } else { + this._limitBufferResults = []; + } + + // Finally, set the query's results to what we've pulled from disk: + this._lastEnsureEqual = now(); + this._latestChangeEvent = this.collection._changeEventBuffer.counter; + this._setResultData(finalResults); + + // eslint-disable-next-line no-console + console.timeEnd(`Restoring persistent querycache ${this.toString()}`); + } } export function _getDefaultQuery(): MangoQuery { @@ -480,6 +682,7 @@ export function createRxQuery( // ensure when created with same params, only one is created ret = tunnelQueryCache(ret); + // TODO: clear persistent query cache as well triggerCacheReplacement(collection); return ret; @@ -519,11 +722,14 @@ function _ensureEqual(rxQuery: RxQueryBase): Promise { return rxQuery._ensureEqualQueue; } + /** * ensures that the results of this query is equal to the results which a query over the database would give * @return true if results have changed */ -function __ensureEqual(rxQuery: RxQueryBase): Promise { +async function __ensureEqual(rxQuery: RxQueryBase): Promise { + await rxQuery._persistentQueryCacheLoaded; + rxQuery._lastEnsureEqual = now(); /** @@ -560,6 +766,18 @@ function __ensureEqual(rxQuery: RxQueryBase): Promise ._changeEventBuffer .reduceByLastOfDoc(missedChangeEvents); + if (rxQuery._limitBufferResults !== null) { + // Check if any item in our limit buffer was modified by a change event + for (const cE of runChangeEvents) { + if (rxQuery._limitBufferResults.find((doc) => doc[rxQuery.collection.schema.primaryPath] === cE.documentId)) { + // If so, the limit buffer is potential invalid -- let's just blow it up + // TODO: could we instead update the documents in the limit buffer? + rxQuery._limitBufferResults = null; + break; + } + } + } + if (rxQuery.op === 'count') { // 'count' query const previousCount = ensureNotFalsy(rxQuery._result).count; @@ -632,9 +850,73 @@ function __ensureEqual(rxQuery: RxQueryBase): Promise rxQuery._setResultData(newResultData as any); } return ret; + }) + .then(async (returnValue) => { + await updatePersistentQueryCache(rxQuery); + return returnValue; }); } - return Promise.resolve(ret); // true if results have changed + + return ret; // true if results have changed +} + + +async function updatePersistentQueryCache(rxQuery: RxQueryBase) { + if (!rxQuery._persistentQueryCacheBackend) { + return; + } + + const backend = rxQuery._persistentQueryCacheBackend; + + const key = rxQuery.persistentQueryId(); + + // update _persistedQueryCacheResult + rxQuery._persistentQueryCacheResult = rxQuery._result?.docsKeys ?? []; + + const idsToPersist = [...rxQuery._persistentQueryCacheResult]; + if (rxQuery._limitBufferResults) { + rxQuery._limitBufferResults.forEach((d) => { + idsToPersist.push(`lb-${d[rxQuery.collection.schema.primaryPath]}`); + }); + } + // eslint-disable-next-line no-console + console.time(`Query persistence: persisting results of ${JSON.stringify(rxQuery.mangoQuery)}`); + // persist query cache + const lwt = rxQuery._result?.time ?? RX_META_LWT_MINIMUM; + + await Promise.all([ + backend.setItem(`qc:${String(key)}`, idsToPersist), + backend.setItem(`qc:${String(key)}:lwt`, lwt.toString()), + ]); + + // eslint-disable-next-line no-console + console.timeEnd(`Query persistence: persisting results of ${JSON.stringify(rxQuery.mangoQuery)}`); +} + + +// Refactored out of `queryCollection`: modifies the docResults array to fill it with data +async function _queryCollectionByIds(rxQuery: RxQuery | RxQueryBase, docResults: RxDocumentData[], docIds: string[]) { + const collection = rxQuery.collection; + docIds = docIds.filter(docId => { + // first try to fill from docCache + const docData = rxQuery.collection._docCache.getLatestDocumentDataIfExists(docId); + if (docData) { + if (!docData._deleted) { + docResults.push(docData); + } + return false; + } else { + return true; + } + }); + + // otherwise get from storage + if (docIds.length > 0) { + const docsMap = await collection.storageInstance.findDocumentsById(docIds, false); + Object.values(docsMap).forEach(docData => { + docResults.push(docData); + }); + } } /** @@ -675,6 +957,8 @@ export function prepareQuery( export async function queryCollection( rxQuery: RxQuery | RxQueryBase ): Promise[]> { + await rxQuery._persistentQueryCacheLoaded; + let docs: RxDocumentData[] = []; const collection = rxQuery.collection; @@ -704,6 +988,7 @@ export async function queryCollection( const docsFromStorage = await collection.storageInstance.findDocumentsById(docIds, false); appendToArray(docs, docsFromStorage); } + await _queryCollectionByIds(rxQuery, docs, rxQuery.isFindOneByIdQuery); } else { const docId = rxQuery.isFindOneByIdQuery; @@ -723,10 +1008,14 @@ export async function queryCollection( } else { const preparedQuery = rxQuery.getPreparedQuery(); const queryResult = await collection.storageInstance.query(preparedQuery); + if (rxQuery._limitBufferSize !== null && rxQuery.mangoQuery.limit && queryResult.documents.length > rxQuery.mangoQuery.limit) { + // If there are more than query.limit results, we pull out our buffer items from the + // last rxQuery._limitBufferSize items of the results. + rxQuery._limitBufferResults = queryResult.documents.splice(rxQuery.mangoQuery.limit); + } docs = queryResult.documents; } return docs; - } /** @@ -772,7 +1061,6 @@ export function isFindOneByIdQuery( } - export function isRxQuery(obj: any): boolean { return obj instanceof RxQueryBase; } diff --git a/test/helper/cache.ts b/test/helper/cache.ts new file mode 100644 index 00000000000..2be9accf43e --- /dev/null +++ b/test/helper/cache.ts @@ -0,0 +1,31 @@ +import {QueryCacheBackend, RxCollection} from '../../src'; + +export class Cache implements QueryCacheBackend { + private readonly items; + + constructor() { + this.items = new Map(); + } + + getItem(key: string) { + return this.items.get(key); + } + + async setItem(key: string, value: T) { + this.items.set(key, value); + return await Promise.resolve(value); + } + + get size() { + return this.items.size; + } + + getItems() { + return this.items; + } +} + +export function clearQueryCache(collection: RxCollection) { + const queryCache = collection._queryCache; + queryCache._map = new Map(); +} diff --git a/test/unit/rx-query.test.ts b/test/unit/rx-query.test.ts index bf512667907..47d96694f47 100644 --- a/test/unit/rx-query.test.ts +++ b/test/unit/rx-query.test.ts @@ -7,7 +7,8 @@ import { schemaObjects, schemas, humansCollection, - isNode + isNode, + HumanDocumentType } from '../../plugins/test-utils/index.mjs'; import { @@ -17,11 +18,14 @@ import { promiseWait, randomCouchString, ensureNotFalsy, - deepFreeze + deepFreeze, + now, uncacheRxQuery, RxCollection, } from '../../plugins/core/index.mjs'; import { firstValueFrom } from 'rxjs'; +import {Cache, clearQueryCache} from '../helper/cache.ts'; + describe('rx-query.test.ts', () => { describeParallel('.constructor', () => { it('should throw dev-mode error on wrong query object', async () => { @@ -1436,4 +1440,830 @@ describe('rx-query.test.ts', () => { db.destroy(); }); }); + + async function setUpLimitBufferCollectionAndQuery(enableLimitBufferSize?: number, numRowsTotal=20, skipRows?: number) { + const limitRows = 10; + const collection = await humansCollection.create(numRowsTotal); + + // Setup a query where the limit buffer would be useful. + // This .find initially matches all docs in the collection + let query = collection.find({selector: { + firstName: { + $ne: 'Dollaritas' + } + }}).sort('-lastName').limit(limitRows); + + if (skipRows !== undefined) { + query = query.skip(skipRows); + } + + if (enableLimitBufferSize !== undefined) { + query.enableLimitBuffer(enableLimitBufferSize); + } + + const initialResults = await query.exec(); + + assert.strictEqual(initialResults.length, Math.min(limitRows, numRowsTotal)); + assert.strictEqual(query._execOverDatabaseCount, 1); + + // We already have a change event for each row from humansCollection.create: + assert.strictEqual(query._latestChangeEvent, numRowsTotal); + + return {query, collection, numRowsTotal, limitRows, initialResults}; + } + + async function removeSingleDocFromMatchingQuery(collection: Awaited>['collection'], doc: HumanDocumentType) { + await collection.find({selector: {passportId: doc.passportId}}).update({ + $set: { + firstName: 'Dollaritas' + } + }); + } + + describeParallel('Limit Buffer', () => { + it('By default, limit queries will have to re-exec when item is removed', async () => { + // Set up the query, without using the limit buffer: + const { query, collection, numRowsTotal, limitRows, initialResults } = await setUpLimitBufferCollectionAndQuery(undefined); + + // Now, make a change that removes a single doc from the result set + await removeSingleDocFromMatchingQuery(collection, initialResults[0]); + + // Re-exec the query: + const updatedResults = await query.exec(); + // Confirm the change was processed, and the results are correct: + assert.strictEqual(updatedResults.length, limitRows); + assert.notStrictEqual(updatedResults[0].passportId, initialResults[0].passportId); + assert.strictEqual(query.collection._changeEventBuffer.counter, numRowsTotal + 1); + assert.strictEqual(query._latestChangeEvent, numRowsTotal + 1); + + // Confirm that the query had to run via db again instead of using the query cache: + assert.strictEqual(query._execOverDatabaseCount, 2); + + collection.database.destroy(); + }); + it('Limit buffer works properly in usual cases', async () => { + const limitBufferSize = 5; + const {query, collection, numRowsTotal, limitRows, initialResults} = await setUpLimitBufferCollectionAndQuery(limitBufferSize, 30); + + // Now, make a change that removes a single doc from the result set + await removeSingleDocFromMatchingQuery(collection, initialResults[0]); + + // Re-exec the query: + const updatedResults = await query.exec(); + // Confirm the change was processed, and the results are correct: + assert.strictEqual(updatedResults.length, limitRows); + assert.notStrictEqual(updatedResults[0].passportId, initialResults[0].passportId); + assert.strictEqual(query.collection._changeEventBuffer.counter, numRowsTotal + 1); + assert.strictEqual(query._latestChangeEvent, numRowsTotal + 1); + + // Confirm that the query DID NOT exec over the db again, because it used the query cache via limit buffer: + assert.strictEqual(query._execOverDatabaseCount, 1); + // And that one item was taken from the limit buffer: + assert.strictEqual(query._limitBufferResults?.length, limitBufferSize - 1); + + // Do it all again to make sure this is consistent across multiple updates: + await removeSingleDocFromMatchingQuery(collection, initialResults[8]); + const updatedResultsAgain = await query.exec(); + assert.strictEqual(updatedResultsAgain.length, limitRows); + assert.strictEqual(query._execOverDatabaseCount, 1); + + // However, if we "use up" the whole limit buffer (5 documents), + // the query will have to re-exec. Let's remove 3 more items to show that: + for (const doc of initialResults.slice(1, 4)) { + await removeSingleDocFromMatchingQuery(collection, doc); + await query.exec(); + assert.strictEqual(query._execOverDatabaseCount, 1); + } + + // The Limit buffer should now be empty: + assert.strictEqual(query._limitBufferResults?.length, 0); + + // So removing one more item will require a re-exec on the db: + await removeSingleDocFromMatchingQuery(collection, initialResults[4]); + await query.exec(); + assert.strictEqual(query._execOverDatabaseCount, 2); + + // After this re-exec on the db, the limit buffer should be filled again: + assert.strictEqual(query._limitBufferResults?.length, limitBufferSize); + + // And further removals will use the new limit buffer again: + await removeSingleDocFromMatchingQuery(collection, initialResults[5]); + const finalResults = await query.exec(); + assert.strictEqual(finalResults.length, limitRows); + assert.strictEqual(query._execOverDatabaseCount, 2); + assert.strictEqual(query._limitBufferResults?.length, limitBufferSize - 1); + + collection.database.destroy(); + }); + it('Limit buffer doesn\'t do anything when fewer than LIMIT items', async () => { + // Set up with only 8 rows total, but a limit of 10 (and limit buffer 5): + const limitBufferSize = 5; + const {query, collection, numRowsTotal, initialResults} = await setUpLimitBufferCollectionAndQuery(limitBufferSize, 8); + + // Now, make a change that removes a single doc from the result set + await removeSingleDocFromMatchingQuery(collection, initialResults[0]); + + // Re-exec the query after removing one, so the results should be 7 docs now: + const updatedResults = await query.exec(); + // Confirm the change was processed, and the results are correct: + assert.strictEqual(updatedResults.length, numRowsTotal - 1); + assert.notStrictEqual(updatedResults[0].passportId, initialResults[0].passportId); + + // And the limitBuffer wasn't filled at all: + assert.strictEqual(query._limitBufferResults, null); + + // The query wouldn't have to re-exec because of the normal query cache: + assert.strictEqual(query._execOverDatabaseCount, 1); + + collection.database.destroy(); + }); + it('Limit buffer works with skip=0', async () => { + // Set up with a skip=0 (limit buffer should work normally) + const limitBufferSize = 5; + const {query, collection, initialResults} = await setUpLimitBufferCollectionAndQuery(limitBufferSize, 20, 0); + assert.strictEqual(query._limitBufferResults?.length, limitBufferSize); + await removeSingleDocFromMatchingQuery(collection, initialResults[1]); + await query.exec(); + assert.strictEqual(query._execOverDatabaseCount, 1); + collection.database.destroy(); + }); + it('Limit buffer does nothing with a non-zero skip', async () => { + const limitBufferSize = 5; + const {query, collection, initialResults} = await setUpLimitBufferCollectionAndQuery(limitBufferSize, 20, 10); + assert.strictEqual(query._limitBufferResults, null); + await removeSingleDocFromMatchingQuery(collection, initialResults[1]); + await query.exec(); + assert.strictEqual(query._execOverDatabaseCount, 2); + collection.database.destroy(); + }); + it('Limit buffer does nothing if item is removed from results due to sort changing only', async () => { + // Do a normal setup with the limit, and confirm the limit buffer gets filled: + const limitBufferSize = 5; + const {query, collection, initialResults} = await setUpLimitBufferCollectionAndQuery(limitBufferSize, 20); + assert.strictEqual(query._limitBufferResults?.length, limitBufferSize); + assert.strictEqual(query._execOverDatabaseCount, 1); + + // Instead of removing an item from the results by making it break the query selector + // (what removeSingleDocFromMatchingQuery does) just move it to the end of the sort + // which will kick it out of the query results due to the LIMIT + await collection.find({selector: {passportId: initialResults[0].passportId}}).update({ + $set: { + lastName: 'AAAAAAAAAAAAAAA' + } + }); + + // Explicitly, the limit buffer does not replace items in this case (although it technically + // could with little trouble in the future, we just haven't implemented it) + // so the query should re-run on the database to fill in the missing document: + const updatedResults = await query.exec(); + assert.strictEqual(query._execOverDatabaseCount, 2); + assert.notStrictEqual(updatedResults[0].passportId, initialResults[0].passportId); + collection.database.destroy(); + }); + it('Limit buffer omits buffered items that have been modified to no longer', async () => { + const limitBufferSize = 5; + const {query, collection, initialResults} = await setUpLimitBufferCollectionAndQuery(limitBufferSize, 20); + + if (query._limitBufferResults === null) { + throw new Error('_limitBufferResults not set'); + } + // Get the first item from the limit buffer, and change it so it no longer matches the query selector: + const firstBufferItem = query._limitBufferResults[0]; + await collection.find({selector: {passportId: firstBufferItem.passportId}}).update({ + $set: { + firstName: 'Dollaritas' + } + }); + // Now, remove an item from the initial results, so that the buffer _should_ be used + // to fill the last item in the updated results. + await removeSingleDocFromMatchingQuery(collection, initialResults[1]); + + // Make sure we DO NOT pull the modified item from the limit buffer, as it no longer matches query: + const updatedResults = await query.exec(); + assert.notStrictEqual(updatedResults[updatedResults.length - 1].passportId, firstBufferItem.passportId); + + collection.database.destroy(); + }); + }); + + async function setUpPersistentQueryCacheCollection() { + const collection = await humansCollection.create(0); + return {collection}; + } + + describeParallel('Persistent Query Cache', () => { + it('query fills cache', async () => { + const {collection} = await setUpPersistentQueryCacheCollection(); + + const query = collection.find({ limit: 1 }); + const cache = new Cache(); + query.enableLimitBuffer(5).enablePersistentQueryCache(cache); + + const human1 = schemaObjects.humanData(); + const human2 = schemaObjects.humanData(); + + await collection.bulkInsert([human1, human2]); + await query.exec(); + + assert.strictEqual(cache.size, 2); + + collection.database.destroy(); + }); + + it('does not query from database after restoring from persistent query cache', async () => { + const {collection} = await setUpPersistentQueryCacheCollection(); + + const human1 = schemaObjects.humanData(); + const human2 = schemaObjects.humanData(); + + await collection.bulkInsert([human1, human2]); + + const query = collection.find({ limit: 2 }); + + // fill cache + const queryId = query.persistentQueryId(); + const cache = new Cache(); + await cache.setItem(`qc:${queryId}`, [human1.passportId, human2.passportId]); + await cache.setItem(`qc:${queryId}:lwt`, `${now()}`); + query.enableLimitBuffer(5).enablePersistentQueryCache(cache); + + // execute query + const result = await query.exec(); + + assert.strictEqual(result.length, 2); + assert.strictEqual(query._execOverDatabaseCount, 0); + + collection.database.destroy(); + }); + + it('does not query from database after modifying a document', async () => { + const {collection} = await setUpPersistentQueryCacheCollection(); + + const human1 = schemaObjects.humanData(); + const human1Age = human1.age; + + await collection.bulkInsert([human1]); + + const query1 = collection.find({ selector: { age: human1Age }}); + + // fill cache + const queryId = query1.persistentQueryId(); + const cache = new Cache(); + await cache.setItem(`qc:${queryId}`, [human1.passportId]); + await cache.setItem(`qc:${queryId}:lwt`, `${now()}`); + query1.enableLimitBuffer(5).enablePersistentQueryCache(cache); + + // execute query + const result1 = await query1.exec(); + assert.strictEqual(result1.length, 1); + + const human1Doc = result1[0]; + await human1Doc.modify(data => { + data.age += 1; + return data; + }); + + clearQueryCache(collection); + + const query2 = collection.find({ selector: { age: human1Age }}); + query2.enableLimitBuffer(5).enablePersistentQueryCache(cache); + + const result2 = await query2.exec(); + + assert.strictEqual(result1.length, 1); + assert.strictEqual(result2.length, 0); + assert.strictEqual(query1._execOverDatabaseCount, 0); + assert.strictEqual(query2._execOverDatabaseCount, 0); + + collection.database.destroy(); + }); + + it('does not query from database after adding an object', async () => { + const {collection} = await setUpPersistentQueryCacheCollection(); + + const human1 = schemaObjects.humanData(); + const human2 = schemaObjects.humanData(); + const human3 = schemaObjects.humanData(); + + await collection.bulkInsert([human1, human2]); + + const query = collection.find({ limit: 3 }); + const queryId = query.persistentQueryId(); + const cache = new Cache(); + await cache.setItem(`qc:${queryId}`, [human1.passportId, human2.passportId]); + await cache.setItem(`qc:${queryId}:lwt`, `${now()}`); + query.enableLimitBuffer(5).enablePersistentQueryCache(cache); + + const result1 = await query.exec(); + + await collection.insert(human3); + + const result2 = await query.exec(); + + assert.strictEqual(result1.length, 2); + assert.strictEqual(result2.length, 3); + assert.strictEqual(query._execOverDatabaseCount, 0); + + collection.database.destroy(); + }); + + it('does return docs from cache in correct order and with limits applied', async () => { + const {collection} = await setUpPersistentQueryCacheCollection(); + + const human1 = schemaObjects.humanData('1', 30); + const human2 = schemaObjects.humanData('2', 40); + const human3 = schemaObjects.humanData('3', 50); + + await collection.bulkInsert([human2, human3]); + + const query1 = collection.find({ limit: 2, sort: [{age: 'asc'}] }); + const queryId = query1.persistentQueryId(); + const lwt = now(); + + const cache = new Cache(); + await cache.setItem(`qc:${queryId}`, [human2.passportId, human3.passportId]); + await cache.setItem(`qc:${queryId}:lwt`, `${lwt}`); + + await collection.insert(human1); + + clearQueryCache(collection); + + const query2 = collection.find({ limit: 2, sort: [{age: 'asc'}] }); + query2.enableLimitBuffer(5).enablePersistentQueryCache(cache); + + const result2 = await query2.exec(); + + assert.strictEqual(query1._execOverDatabaseCount, 0); + assert.strictEqual(query2._execOverDatabaseCount, 0); + assert.deepStrictEqual(result2.map(item => item.passportId), ['1', '2']); + + collection.database.destroy(); + }); + + it('removing an item from the database, but not from cache does not lead to wrong results after restoring', async () => { + const {collection} = await setUpPersistentQueryCacheCollection(); + + const human1 = schemaObjects.humanData('1', 30); + const human2 = schemaObjects.humanData('2', 40); + const human3 = schemaObjects.humanData('3', 50); + + await collection.bulkInsert([human1, human2, human3]); + + const query1 = collection.find({ limit: 2, sort: [{age: 'asc'}] }); + const queryId = query1.persistentQueryId(); + const lwt = now(); + + const cache = new Cache(); + await cache.setItem(`qc:${queryId}`, [human1.passportId, human2.passportId, human3.passportId]); + await cache.setItem(`qc:${queryId}:lwt`, `${lwt}`); + + const removeQuery = collection.find({ selector: { passportId: '2' }}); + await removeQuery.remove(); + + clearQueryCache(collection); + + const query2 = collection.find({ limit: 2, sort: [{age: 'asc'}] }); + query2.enableLimitBuffer(5).enablePersistentQueryCache(cache); + + assert.strictEqual(cache.getItem(`qc:${queryId}`).length, 3); + + const result2 = await query2.exec(); + + assert.strictEqual(query1._execOverDatabaseCount, 0); + assert.strictEqual(query2._execOverDatabaseCount, 0); + assert.deepStrictEqual(result2.map(item => item.passportId), ['1', '3']); + + collection.database.destroy(); + }); + + it('old cache values are updated when documents are modified', async () => { + const {collection} = await setUpPersistentQueryCacheCollection(); + + const human1 = schemaObjects.humanData('1', 30); + + await collection.bulkInsert([human1]); + + // fill cache + const cache = new Cache(); + const query1 = collection.find({limit: 1}); + query1.enableLimitBuffer(5).enablePersistentQueryCache(cache); + const queryId = query1.persistentQueryId(); + + const result1 = await query1.exec(); + assert.strictEqual(result1.length, 1); + assert.strictEqual(cache.size, 2); + + clearQueryCache(collection); + + // go back in time + const lwt = now() - 7200 * 1000; // go back in time (2hrs) + await cache.setItem(`qc:${queryId}:lwt`, `${lwt}`); + + const query2 = collection.find({limit: 1}); + query2.enableLimitBuffer(5).enablePersistentQueryCache(cache); + await query2._persistentQueryCacheLoaded; + + await result1[0].remove(); + + await query2.exec(); + + const currLwt = Number(await cache.getItem(`qc:${queryId}:lwt`)); + assert.strictEqual(currLwt > lwt, true); + + collection.database.destroy(); + }); + + it('query from database when cache is empty', async () => { + const {collection} = await setUpPersistentQueryCacheCollection(); + + const human1 = schemaObjects.humanData(); + await collection.bulkInsert([human1]); + + const query = collection.find({ limit: 3 }); + + const cache = new Cache(); + query.enableLimitBuffer(5).enablePersistentQueryCache(cache); + + const result = await query.exec(); + + assert.strictEqual(result.length, 1); + assert.strictEqual(query._execOverDatabaseCount, 1); + + collection.database.destroy(); + }); + + it('will re-execute queries if they were cached a long time ago', async () => { + const {collection} = await setUpPersistentQueryCacheCollection(); + + const human1 = schemaObjects.humanData('1', 30); + await collection.bulkInsert([human1]); + + // fill cache + const cache = new Cache(); + const query1 = collection.find({limit: 1}); + query1.enableLimitBuffer(5).enablePersistentQueryCache(cache); + const queryId = query1.persistentQueryId(); + + await query1.exec(); + clearQueryCache(collection); + + // If we restore the same query, it shouldn't need to re-exec: + const querySoon = collection.find({limit: 1}); + querySoon.enableLimitBuffer(5).enablePersistentQueryCache(cache); + await querySoon.exec(); + assert.strictEqual(querySoon._execOverDatabaseCount, 0); + + clearQueryCache(collection); + + // Now, simulate the query having been cached over a week ago. + // It should have to re-exec. + const lwt = now() - RESTORE_QUERY_MAX_TIME_AGO - 1000; + await cache.setItem(`qc:${queryId}:lwt`, `${lwt}`); + + const queryLater = collection.find({limit: 1}); + queryLater.enableLimitBuffer(5).enablePersistentQueryCache(cache); + + await queryLater.exec(); + assert.strictEqual(queryLater._execOverDatabaseCount, 1); + + collection.database.destroy(); + }); + + describe('persisting queries with limit buffers', () => { + async function setUpLimitBufferSituation() { + const {collection} = await setUpPersistentQueryCacheCollection(); + await collection.bulkInsert([ + schemaObjects.humanData('1', 30), + schemaObjects.humanData('2', 40), + schemaObjects.humanData('3', 50), + schemaObjects.humanData('4', 60), + schemaObjects.humanData('5', 70), + ]); + + // wait 1 second so that not all docs are included in lwt + await new Promise((resolve) => { + setTimeout(resolve, 500); + }); + + // Cache a limited query: + const query = collection.find({ limit: 2, sort: [{age: 'asc'}], selector: { age: { $gt: 10 } } }); + const cache = new Cache(); + + return { query, cache, collection }; + } + + function simulateNewSession(collection: RxCollection) { + clearQueryCache(collection); + collection._docCache.cacheItemByDocId.clear(); + } + + // This is how it should operate when we don't persist limit buffers: + it('limit buffer not enabled, still gives correct results through re-execution', async () => { + const { collection, query, cache} = await setUpLimitBufferSituation(); + + // persist with no limit buffer enabled + await query.enablePersistentQueryCache(cache); + const originalResults = await query.exec(); + assert.deepStrictEqual(originalResults.map(h => h.passportId), ['1', '2']); + + // Now, get into a state where that query is no longer in memory (eg new tab) + // (but, the query should still be persisted on disk) + simulateNewSession(collection); + assert.strictEqual(cache.size, 2); + + // while the query is not in memory, remove one of the items from the query results + await collection.find({selector: { passportId: '1'}}).update({ + $set: { age: 1 } + }); + + // now when we create the query again, it has no way of knowing how to fill the missing item + const queryAgain = collection.find(query.mangoQuery); + assert.strictEqual(queryAgain._execOverDatabaseCount, 0); + + await queryAgain.enablePersistentQueryCache(cache); + const updatedResults = await queryAgain.exec(); + + // We must re-exec the query to make it correct. + assert.strictEqual(queryAgain._execOverDatabaseCount, 1); + assert.deepStrictEqual(updatedResults.map(h => h.passportId), ['2', '3']); + collection.database.destroy(); + }); + + it('limit buffer enabled, restores normal changes, results correctly with no re-exec', async () => { + const { collection, query, cache} = await setUpLimitBufferSituation(); + + // Persist WITH the limit buffer enabled + query.enableLimitBuffer(5).enablePersistentQueryCache(cache); + + const originalResults = await query.exec(); + assert.deepStrictEqual(originalResults.map(h => h.passportId), ['1', '2']); + assert.strictEqual(query._limitBufferResults?.length, 3); + assert.strictEqual(cache.size, 2); + + // remove one of the items from the query results + await collection.find({ selector: { passportId: '1' } }).update({ + $set: { age: 1 } + }); + + simulateNewSession(collection); + + // now when we create the query again, it should fill in the missing element from the limit buffer + const queryAgain = collection.find(query.mangoQuery); + queryAgain.enableLimitBuffer(5).enablePersistentQueryCache(cache); + + const updatedResults = await queryAgain.exec(); + + // The query should use the limit buffer to restore the results, and not need to re-exec the query + assert.strictEqual(queryAgain._execOverDatabaseCount, 0); + assert.deepStrictEqual(updatedResults.map(h => h.passportId), ['2', '3']); + + // There should now only be 2 items left in the limit buffer, it used the first one up to fill the results + assert.strictEqual(queryAgain._limitBufferResults?.length, 2); + + collection.database.destroy(); + }); + + it('limit buffer enabled, restores missing changes, results correctly with no re-exec', async () => { + const { collection, query, cache} = await setUpLimitBufferSituation(); + + // Persist WITH the limit buffer enabled + query.enableLimitBuffer(5).enablePersistentQueryCache(cache); + + const originalResults = await query.exec(); + assert.deepStrictEqual(originalResults.map(h => h.passportId), ['1', '2']); + assert.strictEqual(query._limitBufferResults?.length, 3); + + // uncache the query first, before changes are made + simulateNewSession(collection); + assert.strictEqual(cache.size, 2); + + // remove one of the items from the query results while query is not listening in memory + await collection.find({ selector: { passportId: '1' } }).update({ + $set: { age: 1 } + }); + + // now when we create the query again, it will fill in the missing element from the limit buffer + const queryAgain = collection.find(query.mangoQuery); + queryAgain.enableLimitBuffer(5).enablePersistentQueryCache(cache); + + const updatedResults = await queryAgain.exec(); + + // The query should use the limit buffer to restore the results, and not need to re-exec the query + assert.strictEqual(queryAgain._execOverDatabaseCount, 0); + assert.deepStrictEqual(updatedResults.map(h => h.passportId), ['2', '3']); + + // There should now only be 2 items left in the limit buffer, it used the first one up to fill the results + assert.strictEqual(queryAgain._limitBufferResults?.length, 2); + + collection.database.destroy(); + }); + + it('limit buffer enabled, but gets exhausted', async () => { + const { collection, query, cache} = await setUpLimitBufferSituation(); + + // Persist WITH the limit buffer enabled, but only one doc + query.enableLimitBuffer(1).enablePersistentQueryCache(cache); + await query.exec(); + simulateNewSession(collection); + + // remove two of the items from the query results + await collection.find({ selector: { passportId: '1' } }).update({ + $set: { age: 1 } + }); + await collection.find({ selector: { passportId: '2' } }).update({ + $set: { age: 1 } + }); + + // now when we create the query again, it will fill in the missing element from the limit buffer + // but then still need another item to hit the limit=2 + const queryAgain = collection.find(query.mangoQuery); + queryAgain.enableLimitBuffer(1).enablePersistentQueryCache(cache); + + const updatedResults = await queryAgain.exec(); + + // The query will have to still re-exec, but give the correct results + assert.strictEqual(queryAgain._execOverDatabaseCount, 1); + assert.deepStrictEqual(updatedResults.map(h => h.passportId), ['3', '4']); + + // And re-fill the 1 item in limit buffer: + assert.strictEqual(queryAgain._limitBufferResults?.length, 1); + assert.strictEqual(queryAgain._limitBufferResults?.[0].passportId, '5'); + + collection.database.destroy(); + }); + + it('limit buffer enabled, with a bunch of deletions', async () => { + const { collection, query, cache} = await setUpLimitBufferSituation(); + + // Persist WITH the limit buffer enabled + query.enableLimitBuffer(3).enablePersistentQueryCache(cache); + await query.exec(); + simulateNewSession(collection); + + // delete one item from the results, and one item from the limit buffer: + await collection.find({ selector: { passportId: '1' } }).remove(); + await collection.find({ selector: { passportId: '3' } }).remove(); + + const queryAgain = collection.find(query.mangoQuery); + queryAgain.enableLimitBuffer(3).enablePersistentQueryCache(cache); + + const updatedResults = await queryAgain.exec(); + + // The query should be able to fill up from the limit buffer + assert.strictEqual(queryAgain._execOverDatabaseCount, 0); + assert.deepStrictEqual(updatedResults.map(h => h.passportId), ['2', '4']); + assert.strictEqual(queryAgain._limitBufferResults?.length, 1); + + // But if we go further, and use the last items from the limit buffer, we'll have to re-exec: + uncacheRxQuery(collection._queryCache, queryAgain); + await collection.find({ selector: { passportId: '4' } }).remove(); + await collection.find({ selector: { passportId: '5' } }).remove(); + + const queryFinal = collection.find(query.mangoQuery); + queryFinal.enableLimitBuffer(3).enablePersistentQueryCache(cache); + + const finalResults = await queryFinal.exec(); + assert.strictEqual(queryFinal._execOverDatabaseCount, 1); + assert.deepStrictEqual(finalResults.map(h => h.passportId), ['2']); + + collection.database.destroy(); + }); + + it('limit buffer enabled, doc added and limit buffer items changed, still restores correctly', async () => { + const { collection, query, cache} = await setUpLimitBufferSituation(); + + // Persist WITH the limit buffer enabled + query.enableLimitBuffer(5).enablePersistentQueryCache(cache); + + await query.exec(); + + simulateNewSession(collection); + + // Let's make 3 changes: + // 1. remove both of the original results + // 2. add in a new doc that should now be in the results + // 3. modify one of the items in the limit buffer to change the correct order there + await collection.find({ selector: { passportId: '1' } }).update({ + $set: { age: 1 } + }); + await collection.find({ selector: { passportId: '2' } }).update({ + $set: { age: 1 } + }); + // the new item should now be the first result, since it has the lowest age + await collection.bulkUpsert([ + schemaObjects.humanData('6', 20), + ]); + // change what would be the next result (passport id 3) to still match the filter, but now be last (so not in the results) + await collection.find({ selector: { passportId: '3' } }).update({ + $set: { age: 100 } + }); + + const queryAgain = collection.find(query.mangoQuery); + queryAgain.enableLimitBuffer(5).enablePersistentQueryCache(cache); + const updatedResults = await queryAgain.exec(); + + // The query should use the limit buffer to restore the results, and not need to re-exec the query + assert.strictEqual(queryAgain._execOverDatabaseCount, 0); + + // But it should also correctly fill in the new document into the correct position, and also handle the sort change + assert.deepStrictEqual(updatedResults.map(h => h.passportId), ['6', '4']); + + // The two items in limit buffer should be in the correct order: + assert.deepStrictEqual(queryAgain._limitBufferResults?.map((d) => d.passportId), ['5', '3']); + + collection.database.destroy(); + }); + + it('limit buffer enabled, all items in buffer used but we have more matching non-buffer items', async () => { + const { collection, query, cache} = await setUpLimitBufferSituation(); + + // Persist WITH the limit buffer enabled + query.enableLimitBuffer(2).enablePersistentQueryCache(cache); + await query.exec(); + simulateNewSession(collection); + + // remove the 2 results, so we use up the 2 items in the limit buffer: + await collection.find({ selector: { passportId: '1' } }).remove(); + await collection.find({ selector: { passportId: '2' } }).update({ + $set: { age: 1 } + }); + // But also add in some new docs, that match the filter but are sorted last + await collection.bulkUpsert([ + schemaObjects.humanData('6', 90), + schemaObjects.humanData('7', 90), + ]); + + const queryAgain = collection.find(query.mangoQuery); + queryAgain.enableLimitBuffer(2).enablePersistentQueryCache(cache); + + const updatedResults = await queryAgain.exec(); + + // In this case we can use the limit buffer without re-execing, and still get correct results: + assert.strictEqual(queryAgain._execOverDatabaseCount, 0); + assert.deepStrictEqual(updatedResults.map(h => h.passportId), ['3', '4']); + + // But the new limit buffer will be empty -- we can't use the new documents because we don't know + // how they would be sorted relative to other documents + assert.strictEqual(queryAgain._limitBufferResults?.length, 0); + + simulateNewSession(collection); + + // If one more doc is removed from our results, we will HAVE to re-exec to ensure + // correct results, test that: + await collection.find({ selector: { passportId: '3' } }).update({ + $set: { age: 1 } + }); + + const queryFinal = collection.find(query.mangoQuery); + queryFinal.enableLimitBuffer(2).enablePersistentQueryCache(cache); + + const finalResults = await queryFinal.exec(); + + // Query re-execs, and gives correct results: + assert.strictEqual(queryFinal._execOverDatabaseCount, 1); + assert.deepStrictEqual(finalResults.map(h => h.passportId), ['4', '5']); + + // When we re-exec, the limit buffer will also get filled: + assert.deepStrictEqual(queryFinal._limitBufferResults?.map(h => h.passportId), ['6', '7']); + + collection.database.destroy(); + }); + + it('Handles case where we have fewer than LIMIT matches', async () => { + const { collection, cache } = await setUpLimitBufferSituation(); + + const query = collection.find({ limit: 3, sort: [{age: 'asc'}], selector: { age: { $lt: 45 } } }); + query.enableLimitBuffer(2).enablePersistentQueryCache(cache); + await query.exec(); + simulateNewSession(collection); + + // Remove something, still correct and no-re-exec + await collection.find({ selector: { passportId: '1' } }).remove(); + + const queryRemoved = collection.find(query.mangoQuery); + queryRemoved.enableLimitBuffer(2).enablePersistentQueryCache(cache); + const removedResults = await queryRemoved.exec(); + assert.strictEqual(queryRemoved._execOverDatabaseCount, 0); + assert.deepStrictEqual(removedResults.map(h => h.passportId), ['2']); + + simulateNewSession(collection); + + // Now add some matching docs. Since they change, they should now be in results with no re-exec. + await collection.find({ selector: { passportId: '5' } }).update({ + $set: { age: 1 } + }); + await collection.bulkUpsert([ + schemaObjects.humanData('6', 2), + schemaObjects.humanData('7', 3), + ]); + const queryAdded = collection.find(query.mangoQuery); + queryAdded.enableLimitBuffer(2).enablePersistentQueryCache(cache); + const addedResults = await queryRemoved.exec(); + assert.strictEqual(queryAdded._execOverDatabaseCount, 0); + assert.deepStrictEqual(addedResults.map(h => h.passportId), ['5', '6', '7']); + + collection.database.destroy(); + }); + }); + }); }); diff --git a/tsconfig.json b/tsconfig.json index db8f130695a..e5e15a2cf6a 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -2,7 +2,7 @@ // @link https://iamturns.com/typescript-babel/ "compilerOptions": { // Target latest version of ECMAScript. - "target": "es2022", + "target": "esnext", // Search under node_modules for non-relative imports. // https://www.youtube.com/watch?v=H91aqUHn8sE "moduleResolution": "node", @@ -11,7 +11,7 @@ "allowJs": false, // Enable strictest settings like strictNullChecks & noImplicitAny. "strict": true, - // Disallow features that require + // Disallow features that require // cross-file information for emit. "isolatedModules": false, // Import non-ES modules as default imports.