@harperfast/harper 5.0.0-alpha.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CODE_OF_CONDUCT.md +83 -0
- package/LICENSE +201 -0
- package/README.md +54 -0
- package/SECURITY.md +18 -0
- package/SUPPORT.md +26 -0
- package/dist/bin/BinObjects.d.ts +9 -0
- package/dist/bin/BinObjects.js +15 -0
- package/dist/bin/BinObjects.js.map +1 -0
- package/dist/bin/cliOperations.d.ts +12 -0
- package/dist/bin/cliOperations.js +156 -0
- package/dist/bin/cliOperations.js.map +1 -0
- package/dist/bin/copyDb.d.ts +2 -0
- package/dist/bin/copyDb.js +298 -0
- package/dist/bin/copyDb.js.map +1 -0
- package/dist/bin/harper.d.ts +2 -0
- package/dist/bin/harper.js +148 -0
- package/dist/bin/harper.js.map +1 -0
- package/dist/bin/install.d.ts +2 -0
- package/dist/bin/install.js +16 -0
- package/dist/bin/install.js.map +1 -0
- package/dist/bin/lite.d.ts +1 -0
- package/dist/bin/lite.js +6 -0
- package/dist/bin/lite.js.map +1 -0
- package/dist/bin/restart.d.ts +13 -0
- package/dist/bin/restart.js +194 -0
- package/dist/bin/restart.js.map +1 -0
- package/dist/bin/run.d.ts +21 -0
- package/dist/bin/run.js +365 -0
- package/dist/bin/run.js.map +1 -0
- package/dist/bin/status.d.ts +2 -0
- package/dist/bin/status.js +56 -0
- package/dist/bin/status.js.map +1 -0
- package/dist/bin/stop.d.ts +2 -0
- package/dist/bin/stop.js +17 -0
- package/dist/bin/stop.js.map +1 -0
- package/dist/bin/upgrade.d.ts +7 -0
- package/dist/bin/upgrade.js +111 -0
- package/dist/bin/upgrade.js.map +1 -0
- package/dist/components/Application.d.ts +110 -0
- package/dist/components/Application.js +509 -0
- package/dist/components/Application.js.map +1 -0
- package/dist/components/ApplicationScope.d.ts +28 -0
- package/dist/components/ApplicationScope.js +81 -0
- package/dist/components/ApplicationScope.js.map +1 -0
- package/dist/components/Component.d.ts +21 -0
- package/dist/components/Component.js +43 -0
- package/dist/components/Component.js.map +1 -0
- package/dist/components/ComponentV1.d.ts +69 -0
- package/dist/components/ComponentV1.js +263 -0
- package/dist/components/ComponentV1.js.map +1 -0
- package/dist/components/DEFAULT_CONFIG.d.ts +18 -0
- package/dist/components/DEFAULT_CONFIG.js +22 -0
- package/dist/components/DEFAULT_CONFIG.js.map +1 -0
- package/dist/components/EntryHandler.d.ts +61 -0
- package/dist/components/EntryHandler.js +148 -0
- package/dist/components/EntryHandler.js.map +1 -0
- package/dist/components/OptionsWatcher.d.ts +75 -0
- package/dist/components/OptionsWatcher.js +281 -0
- package/dist/components/OptionsWatcher.js.map +1 -0
- package/dist/components/PluginModule.d.ts +5 -0
- package/dist/components/PluginModule.js +3 -0
- package/dist/components/PluginModule.js.map +1 -0
- package/dist/components/Scope.d.ts +49 -0
- package/dist/components/Scope.js +262 -0
- package/dist/components/Scope.js.map +1 -0
- package/dist/components/componentLoader.js +498 -0
- package/dist/components/componentLoader.js.map +1 -0
- package/dist/components/deriveCommonPatternBase.d.ts +1 -0
- package/dist/components/deriveCommonPatternBase.js +34 -0
- package/dist/components/deriveCommonPatternBase.js.map +1 -0
- package/dist/components/deriveGlobOptions.d.ts +13 -0
- package/dist/components/deriveGlobOptions.js +30 -0
- package/dist/components/deriveGlobOptions.js.map +1 -0
- package/dist/components/deriveURLPath.d.ts +3 -0
- package/dist/components/deriveURLPath.js +55 -0
- package/dist/components/deriveURLPath.js.map +1 -0
- package/dist/components/operations.d.ts +97 -0
- package/dist/components/operations.js +556 -0
- package/dist/components/operations.js.map +1 -0
- package/dist/components/operationsValidation.d.ts +44 -0
- package/dist/components/operationsValidation.js +221 -0
- package/dist/components/operationsValidation.js.map +1 -0
- package/dist/components/packageComponent.d.ts +8 -0
- package/dist/components/packageComponent.js +42 -0
- package/dist/components/packageComponent.js.map +1 -0
- package/dist/components/requestRestart.d.ts +3 -0
- package/dist/components/requestRestart.js +27 -0
- package/dist/components/requestRestart.js.map +1 -0
- package/dist/components/resolveBaseURLPath.d.ts +15 -0
- package/dist/components/resolveBaseURLPath.js +38 -0
- package/dist/components/resolveBaseURLPath.js.map +1 -0
- package/dist/components/status/ComponentStatus.d.ts +61 -0
- package/dist/components/status/ComponentStatus.js +102 -0
- package/dist/components/status/ComponentStatus.js.map +1 -0
- package/dist/components/status/ComponentStatusRegistry.d.ts +89 -0
- package/dist/components/status/ComponentStatusRegistry.js +195 -0
- package/dist/components/status/ComponentStatusRegistry.js.map +1 -0
- package/dist/components/status/api.d.ts +104 -0
- package/dist/components/status/api.js +137 -0
- package/dist/components/status/api.js.map +1 -0
- package/dist/components/status/crossThread.d.ts +62 -0
- package/dist/components/status/crossThread.js +343 -0
- package/dist/components/status/crossThread.js.map +1 -0
- package/dist/components/status/errors.d.ts +68 -0
- package/dist/components/status/errors.js +123 -0
- package/dist/components/status/errors.js.map +1 -0
- package/dist/components/status/index.d.ts +35 -0
- package/dist/components/status/index.js +75 -0
- package/dist/components/status/index.js.map +1 -0
- package/dist/components/status/internal.d.ts +40 -0
- package/dist/components/status/internal.js +76 -0
- package/dist/components/status/internal.js.map +1 -0
- package/dist/components/status/registry.d.ts +10 -0
- package/dist/components/status/registry.js +14 -0
- package/dist/components/status/registry.js.map +1 -0
- package/dist/components/status/types.d.ts +94 -0
- package/dist/components/status/types.js +20 -0
- package/dist/components/status/types.js.map +1 -0
- package/dist/config/RootConfigWatcher.d.ts +10 -0
- package/dist/config/RootConfigWatcher.js +59 -0
- package/dist/config/RootConfigWatcher.js.map +1 -0
- package/dist/config/configHelpers.d.ts +6 -0
- package/dist/config/configHelpers.js +47 -0
- package/dist/config/configHelpers.js.map +1 -0
- package/dist/config/configUtils.d.ts +85 -0
- package/dist/config/configUtils.js +801 -0
- package/dist/config/configUtils.js.map +1 -0
- package/dist/config/harperConfigEnvVars.d.ts +46 -0
- package/dist/config/harperConfigEnvVars.js +527 -0
- package/dist/config/harperConfigEnvVars.js.map +1 -0
- package/dist/dataLayer/CreateAttributeObject.d.ts +19 -0
- package/dist/dataLayer/CreateAttributeObject.js +23 -0
- package/dist/dataLayer/CreateAttributeObject.js.map +1 -0
- package/dist/dataLayer/CreateTableObject.d.ts +7 -0
- package/dist/dataLayer/CreateTableObject.js +10 -0
- package/dist/dataLayer/CreateTableObject.js.map +1 -0
- package/dist/dataLayer/DataLayerObjects.d.ts +22 -0
- package/dist/dataLayer/DataLayerObjects.js +33 -0
- package/dist/dataLayer/DataLayerObjects.js.map +1 -0
- package/dist/dataLayer/DeleteBeforeObject.d.ts +18 -0
- package/dist/dataLayer/DeleteBeforeObject.js +21 -0
- package/dist/dataLayer/DeleteBeforeObject.js.map +1 -0
- package/dist/dataLayer/DeleteObject.d.ts +19 -0
- package/dist/dataLayer/DeleteObject.js +23 -0
- package/dist/dataLayer/DeleteObject.js.map +1 -0
- package/dist/dataLayer/DropAttributeObject.d.ts +7 -0
- package/dist/dataLayer/DropAttributeObject.js +10 -0
- package/dist/dataLayer/DropAttributeObject.js.map +1 -0
- package/dist/dataLayer/GetBackupObject.d.ts +16 -0
- package/dist/dataLayer/GetBackupObject.js +20 -0
- package/dist/dataLayer/GetBackupObject.js.map +1 -0
- package/dist/dataLayer/InsertObject.d.ts +20 -0
- package/dist/dataLayer/InsertObject.js +24 -0
- package/dist/dataLayer/InsertObject.js.map +1 -0
- package/dist/dataLayer/ReadAuditLogObject.d.ts +18 -0
- package/dist/dataLayer/ReadAuditLogObject.js +22 -0
- package/dist/dataLayer/ReadAuditLogObject.js.map +1 -0
- package/dist/dataLayer/SQLSearch.d.ts +171 -0
- package/dist/dataLayer/SQLSearch.js +1168 -0
- package/dist/dataLayer/SQLSearch.js.map +1 -0
- package/dist/dataLayer/SearchByConditionsObject.d.ts +85 -0
- package/dist/dataLayer/SearchByConditionsObject.js +57 -0
- package/dist/dataLayer/SearchByConditionsObject.js.map +1 -0
- package/dist/dataLayer/SearchByHashObject.d.ts +17 -0
- package/dist/dataLayer/SearchByHashObject.js +20 -0
- package/dist/dataLayer/SearchByHashObject.js.map +1 -0
- package/dist/dataLayer/SearchObject.d.ts +30 -0
- package/dist/dataLayer/SearchObject.js +33 -0
- package/dist/dataLayer/SearchObject.js.map +1 -0
- package/dist/dataLayer/SqlSearchObject.d.ts +10 -0
- package/dist/dataLayer/SqlSearchObject.js +13 -0
- package/dist/dataLayer/SqlSearchObject.js.map +1 -0
- package/dist/dataLayer/UpdateObject.d.ts +18 -0
- package/dist/dataLayer/UpdateObject.js +22 -0
- package/dist/dataLayer/UpdateObject.js.map +1 -0
- package/dist/dataLayer/UpsertObject.d.ts +18 -0
- package/dist/dataLayer/UpsertObject.js +22 -0
- package/dist/dataLayer/UpsertObject.js.map +1 -0
- package/dist/dataLayer/bulkLoad.d.ts +28 -0
- package/dist/dataLayer/bulkLoad.js +624 -0
- package/dist/dataLayer/bulkLoad.js.map +1 -0
- package/dist/dataLayer/dataObjects/BulkLoadObjects.d.ts +17 -0
- package/dist/dataLayer/dataObjects/BulkLoadObjects.js +25 -0
- package/dist/dataLayer/dataObjects/BulkLoadObjects.js.map +1 -0
- package/dist/dataLayer/dataObjects/UpsertObject.d.ts +18 -0
- package/dist/dataLayer/dataObjects/UpsertObject.js +22 -0
- package/dist/dataLayer/dataObjects/UpsertObject.js.map +1 -0
- package/dist/dataLayer/delete.d.ts +22 -0
- package/dist/dataLayer/delete.js +111 -0
- package/dist/dataLayer/delete.js.map +1 -0
- package/dist/dataLayer/export.d.ts +15 -0
- package/dist/dataLayer/export.js +302 -0
- package/dist/dataLayer/export.js.map +1 -0
- package/dist/dataLayer/getBackup.d.ts +8 -0
- package/dist/dataLayer/getBackup.js +28 -0
- package/dist/dataLayer/getBackup.js.map +1 -0
- package/dist/dataLayer/harperBridge/BridgeMethods.d.ts +24 -0
- package/dist/dataLayer/harperBridge/BridgeMethods.js +62 -0
- package/dist/dataLayer/harperBridge/BridgeMethods.js.map +1 -0
- package/dist/dataLayer/harperBridge/ResourceBridge.d.ts +104 -0
- package/dist/dataLayer/harperBridge/ResourceBridge.js +630 -0
- package/dist/dataLayer/harperBridge/ResourceBridge.js.map +1 -0
- package/dist/dataLayer/harperBridge/bridgeUtility/insertUpdateReturnObj.d.ts +14 -0
- package/dist/dataLayer/harperBridge/bridgeUtility/insertUpdateReturnObj.js +24 -0
- package/dist/dataLayer/harperBridge/bridgeUtility/insertUpdateReturnObj.js.map +1 -0
- package/dist/dataLayer/harperBridge/bridgeUtility/insertUpdateValidate.d.ts +11 -0
- package/dist/dataLayer/harperBridge/bridgeUtility/insertUpdateValidate.js +68 -0
- package/dist/dataLayer/harperBridge/bridgeUtility/insertUpdateValidate.js.map +1 -0
- package/dist/dataLayer/harperBridge/harperBridge.d.ts +2 -0
- package/dist/dataLayer/harperBridge/harperBridge.js +18 -0
- package/dist/dataLayer/harperBridge/harperBridge.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/LMDBBridge.d.ts +47 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/LMDBBridge.js +99 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/LMDBBridge.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/DeleteAuditLogsBeforeResults.d.ts +15 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/DeleteAuditLogsBeforeResults.js +18 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/DeleteAuditLogsBeforeResults.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateAttribute.d.ts +12 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateAttribute.js +74 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateAttribute.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateRecords.d.ts +13 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateRecords.js +54 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateRecords.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateSchema.d.ts +6 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateSchema.js +23 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateSchema.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateTable.d.ts +8 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateTable.js +62 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateTable.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDeleteAuditLogsBefore.d.ts +8 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDeleteAuditLogsBefore.js +82 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDeleteAuditLogsBefore.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDeleteRecords.d.ts +11 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDeleteRecords.js +76 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDeleteRecords.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDropAttribute.d.ts +9 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDropAttribute.js +83 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDropAttribute.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDropSchema.d.ts +6 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDropSchema.js +73 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDropSchema.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDropTable.d.ts +6 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDropTable.js +110 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDropTable.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbFlush.d.ts +14 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbFlush.js +33 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbFlush.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbGetBackup.d.ts +7 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbGetBackup.js +110 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbGetBackup.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbGetDataByHash.d.ts +6 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbGetDataByHash.js +21 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbGetDataByHash.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbGetDataByValue.d.ts +14 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbGetDataByValue.js +25 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbGetDataByValue.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbReadAuditLog.d.ts +7 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbReadAuditLog.js +180 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbReadAuditLog.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbSearchByConditions.d.ts +8 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbSearchByConditions.js +134 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbSearchByConditions.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbSearchByHash.d.ts +6 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbSearchByHash.js +14 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbSearchByHash.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbSearchByValue.d.ts +15 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbSearchByValue.js +26 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbSearchByValue.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbTransaction.d.ts +8 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbTransaction.js +17 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbTransaction.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbUpdateRecords.d.ts +12 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbUpdateRecords.js +52 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbUpdateRecords.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbUpsertRecords.d.ts +15 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbUpsertRecords.js +56 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbUpsertRecords.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBCreateAttributeObject.d.ts +16 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBCreateAttributeObject.js +20 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBCreateAttributeObject.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBDeleteTransactionObject.d.ts +16 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBDeleteTransactionObject.js +22 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBDeleteTransactionObject.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBInsertTransactionObject.d.ts +16 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBInsertTransactionObject.js +21 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBInsertTransactionObject.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBTransactionObject.d.ts +19 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBTransactionObject.js +22 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBTransactionObject.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBUpdateTransactionObject.d.ts +18 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBUpdateTransactionObject.js +23 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBUpdateTransactionObject.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBUpsertTransactionObject.d.ts +18 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBUpsertTransactionObject.js +23 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBUpsertTransactionObject.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/TableSizeObject.d.ts +21 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/TableSizeObject.js +24 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/TableSizeObject.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/initializeHashSearch.d.ts +7 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/initializeHashSearch.js +19 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/initializeHashSearch.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/initializePaths.d.ts +22 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/initializePaths.js +137 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/initializePaths.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbCheckForNewAttributes.d.ts +9 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbCheckForNewAttributes.js +73 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbCheckForNewAttributes.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbCreateTransactionsAuditEnvironment.d.ts +8 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbCreateTransactionsAuditEnvironment.js +38 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbCreateTransactionsAuditEnvironment.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbGetTableSize.d.ts +8 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbGetTableSize.js +29 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbGetTableSize.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbProcessRows.d.ts +17 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbProcessRows.js +63 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbProcessRows.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbSearch.d.ts +106 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbSearch.js +251 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbSearch.js.map +1 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbWriteTransaction.d.ts +8 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbWriteTransaction.js +66 -0
- package/dist/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbWriteTransaction.js.map +1 -0
- package/dist/dataLayer/hdbInfoController.d.ts +28 -0
- package/dist/dataLayer/hdbInfoController.js +203 -0
- package/dist/dataLayer/hdbInfoController.js.map +1 -0
- package/dist/dataLayer/insert.d.ts +44 -0
- package/dist/dataLayer/insert.js +192 -0
- package/dist/dataLayer/insert.js.map +1 -0
- package/dist/dataLayer/readAuditLog.d.ts +8 -0
- package/dist/dataLayer/readAuditLog.js +37 -0
- package/dist/dataLayer/readAuditLog.js.map +1 -0
- package/dist/dataLayer/schema.d.ts +24 -0
- package/dist/dataLayer/schema.js +225 -0
- package/dist/dataLayer/schema.js.map +1 -0
- package/dist/dataLayer/schemaDescribe.d.ts +26 -0
- package/dist/dataLayer/schemaDescribe.js +265 -0
- package/dist/dataLayer/schemaDescribe.js.map +1 -0
- package/dist/dataLayer/search.d.ts +4 -0
- package/dist/dataLayer/search.js +56 -0
- package/dist/dataLayer/search.js.map +1 -0
- package/dist/dataLayer/transaction.d.ts +8 -0
- package/dist/dataLayer/transaction.js +16 -0
- package/dist/dataLayer/transaction.js.map +1 -0
- package/dist/dataLayer/update.d.ts +15 -0
- package/dist/dataLayer/update.js +107 -0
- package/dist/dataLayer/update.js.map +1 -0
- package/dist/globals.d.ts +7 -0
- package/dist/globals.js +12 -0
- package/dist/globals.js.map +1 -0
- package/dist/index.d.ts +7 -0
- package/dist/index.js +40 -0
- package/dist/index.js.map +1 -0
- package/dist/json/systemSchema.json +373 -0
- package/dist/launchServiceScripts/utility/checkNodeVersion.d.ts +4 -0
- package/dist/launchServiceScripts/utility/checkNodeVersion.js +13 -0
- package/dist/launchServiceScripts/utility/checkNodeVersion.js.map +1 -0
- package/dist/resources/DatabaseTransaction.d.ts +96 -0
- package/dist/resources/DatabaseTransaction.js +354 -0
- package/dist/resources/DatabaseTransaction.js.map +1 -0
- package/dist/resources/ErrorResource.d.ts +26 -0
- package/dist/resources/ErrorResource.js +60 -0
- package/dist/resources/ErrorResource.js.map +1 -0
- package/dist/resources/IterableEventQueue.d.ts +19 -0
- package/dist/resources/IterableEventQueue.js +103 -0
- package/dist/resources/IterableEventQueue.js.map +1 -0
- package/dist/resources/LMDBTransaction.d.ts +43 -0
- package/dist/resources/LMDBTransaction.js +371 -0
- package/dist/resources/LMDBTransaction.js.map +1 -0
- package/dist/resources/RecordEncoder.d.ts +65 -0
- package/dist/resources/RecordEncoder.js +700 -0
- package/dist/resources/RecordEncoder.js.map +1 -0
- package/dist/resources/RequestTarget.d.ts +57 -0
- package/dist/resources/RequestTarget.js +91 -0
- package/dist/resources/RequestTarget.js.map +1 -0
- package/dist/resources/Resource.d.ts +152 -0
- package/dist/resources/Resource.js +727 -0
- package/dist/resources/Resource.js.map +1 -0
- package/dist/resources/ResourceInterface.d.ts +162 -0
- package/dist/resources/ResourceInterface.js +3 -0
- package/dist/resources/ResourceInterface.js.map +1 -0
- package/dist/resources/ResourceInterfaceV2.d.ts +21 -0
- package/dist/resources/ResourceInterfaceV2.js +3 -0
- package/dist/resources/ResourceInterfaceV2.js.map +1 -0
- package/dist/resources/ResourceV2.d.ts +30 -0
- package/dist/resources/ResourceV2.js +27 -0
- package/dist/resources/ResourceV2.js.map +1 -0
- package/dist/resources/Resources.d.ts +36 -0
- package/dist/resources/Resources.js +155 -0
- package/dist/resources/Resources.js.map +1 -0
- package/dist/resources/RocksIndexStore.d.ts +24 -0
- package/dist/resources/RocksIndexStore.js +54 -0
- package/dist/resources/RocksIndexStore.js.map +1 -0
- package/dist/resources/RocksTransactionLogStore.d.ts +60 -0
- package/dist/resources/RocksTransactionLogStore.js +344 -0
- package/dist/resources/RocksTransactionLogStore.js.map +1 -0
- package/dist/resources/Table.d.ts +854 -0
- package/dist/resources/Table.js +4350 -0
- package/dist/resources/Table.js.map +1 -0
- package/dist/resources/analytics/hostnames.d.ts +16 -0
- package/dist/resources/analytics/hostnames.js +72 -0
- package/dist/resources/analytics/hostnames.js.map +1 -0
- package/dist/resources/analytics/metadata.d.ts +9 -0
- package/dist/resources/analytics/metadata.js +12 -0
- package/dist/resources/analytics/metadata.js.map +1 -0
- package/dist/resources/analytics/profile.d.ts +2 -0
- package/dist/resources/analytics/profile.js +144 -0
- package/dist/resources/analytics/profile.js.map +1 -0
- package/dist/resources/analytics/read.d.ts +41 -0
- package/dist/resources/analytics/read.js +189 -0
- package/dist/resources/analytics/read.js.map +1 -0
- package/dist/resources/analytics/write.d.ts +131 -0
- package/dist/resources/analytics/write.js +822 -0
- package/dist/resources/analytics/write.js.map +1 -0
- package/dist/resources/auditStore.d.ts +81 -0
- package/dist/resources/auditStore.js +572 -0
- package/dist/resources/auditStore.js.map +1 -0
- package/dist/resources/blob.d.ts +137 -0
- package/dist/resources/blob.js +1273 -0
- package/dist/resources/blob.js.map +1 -0
- package/dist/resources/crdt.d.ts +19 -0
- package/dist/resources/crdt.js +146 -0
- package/dist/resources/crdt.js.map +1 -0
- package/dist/resources/dataLoader.d.ts +98 -0
- package/dist/resources/dataLoader.js +461 -0
- package/dist/resources/dataLoader.js.map +1 -0
- package/dist/resources/databases.d.ts +131 -0
- package/dist/resources/databases.js +1220 -0
- package/dist/resources/databases.js.map +1 -0
- package/dist/resources/graphql.d.ts +19 -0
- package/dist/resources/graphql.js +223 -0
- package/dist/resources/graphql.js.map +1 -0
- package/dist/resources/indexes/HierarchicalNavigableSmallWorld.d.ts +87 -0
- package/dist/resources/indexes/HierarchicalNavigableSmallWorld.js +598 -0
- package/dist/resources/indexes/HierarchicalNavigableSmallWorld.js.map +1 -0
- package/dist/resources/indexes/customIndexes.d.ts +4 -0
- package/dist/resources/indexes/customIndexes.js +10 -0
- package/dist/resources/indexes/customIndexes.js.map +1 -0
- package/dist/resources/indexes/vector.d.ts +2 -0
- package/dist/resources/indexes/vector.js +40 -0
- package/dist/resources/indexes/vector.js.map +1 -0
- package/dist/resources/jsResource.d.ts +24 -0
- package/dist/resources/jsResource.js +82 -0
- package/dist/resources/jsResource.js.map +1 -0
- package/dist/resources/loadEnv.d.ts +5 -0
- package/dist/resources/loadEnv.js +28 -0
- package/dist/resources/loadEnv.js.map +1 -0
- package/dist/resources/login.d.ts +3 -0
- package/dist/resources/login.js +22 -0
- package/dist/resources/login.js.map +1 -0
- package/dist/resources/openApi.d.ts +27 -0
- package/dist/resources/openApi.js +327 -0
- package/dist/resources/openApi.js.map +1 -0
- package/dist/resources/registrationDeprecated.d.ts +4 -0
- package/dist/resources/registrationDeprecated.js +11 -0
- package/dist/resources/registrationDeprecated.js.map +1 -0
- package/dist/resources/replayLogs.d.ts +2 -0
- package/dist/resources/replayLogs.js +170 -0
- package/dist/resources/replayLogs.js.map +1 -0
- package/dist/resources/roles.d.ts +11 -0
- package/dist/resources/roles.js +102 -0
- package/dist/resources/roles.js.map +1 -0
- package/dist/resources/search.d.ts +39 -0
- package/dist/resources/search.js +1333 -0
- package/dist/resources/search.js.map +1 -0
- package/dist/resources/tracked.d.ts +49 -0
- package/dist/resources/tracked.js +665 -0
- package/dist/resources/tracked.js.map +1 -0
- package/dist/resources/transaction.d.ts +9 -0
- package/dist/resources/transaction.js +89 -0
- package/dist/resources/transaction.js.map +1 -0
- package/dist/resources/transactionBroadcast.d.ts +38 -0
- package/dist/resources/transactionBroadcast.js +263 -0
- package/dist/resources/transactionBroadcast.js.map +1 -0
- package/dist/security/auth.d.ts +9 -0
- package/dist/security/auth.js +408 -0
- package/dist/security/auth.js.map +1 -0
- package/dist/security/certificateVerification/certificateVerificationSource.d.ts +18 -0
- package/dist/security/certificateVerification/certificateVerificationSource.js +78 -0
- package/dist/security/certificateVerification/certificateVerificationSource.js.map +1 -0
- package/dist/security/certificateVerification/configValidation.d.ts +14 -0
- package/dist/security/certificateVerification/configValidation.js +101 -0
- package/dist/security/certificateVerification/configValidation.js.map +1 -0
- package/dist/security/certificateVerification/crlVerification.d.ts +29 -0
- package/dist/security/certificateVerification/crlVerification.js +564 -0
- package/dist/security/certificateVerification/crlVerification.js.map +1 -0
- package/dist/security/certificateVerification/index.d.ts +31 -0
- package/dist/security/certificateVerification/index.js +111 -0
- package/dist/security/certificateVerification/index.js.map +1 -0
- package/dist/security/certificateVerification/ocspVerification.d.ts +23 -0
- package/dist/security/certificateVerification/ocspVerification.js +117 -0
- package/dist/security/certificateVerification/ocspVerification.js.map +1 -0
- package/dist/security/certificateVerification/pkijs-ed25519-patch.d.ts +14 -0
- package/dist/security/certificateVerification/pkijs-ed25519-patch.js +183 -0
- package/dist/security/certificateVerification/pkijs-ed25519-patch.js.map +1 -0
- package/dist/security/certificateVerification/types.d.ts +105 -0
- package/dist/security/certificateVerification/types.js +6 -0
- package/dist/security/certificateVerification/types.js.map +1 -0
- package/dist/security/certificateVerification/verificationConfig.d.ts +29 -0
- package/dist/security/certificateVerification/verificationConfig.js +121 -0
- package/dist/security/certificateVerification/verificationConfig.js.map +1 -0
- package/dist/security/certificateVerification/verificationUtils.d.ts +79 -0
- package/dist/security/certificateVerification/verificationUtils.js +441 -0
- package/dist/security/certificateVerification/verificationUtils.js.map +1 -0
- package/dist/security/cryptoHash.d.ts +2 -0
- package/dist/security/cryptoHash.js +35 -0
- package/dist/security/cryptoHash.js.map +1 -0
- package/dist/security/data_objects/PermissionAttributeResponseObject.d.ts +11 -0
- package/dist/security/data_objects/PermissionAttributeResponseObject.js +14 -0
- package/dist/security/data_objects/PermissionAttributeResponseObject.js.map +1 -0
- package/dist/security/data_objects/PermissionResponseObject.d.ts +57 -0
- package/dist/security/data_objects/PermissionResponseObject.js +105 -0
- package/dist/security/data_objects/PermissionResponseObject.js.map +1 -0
- package/dist/security/data_objects/PermissionTableResponseObject.d.ts +16 -0
- package/dist/security/data_objects/PermissionTableResponseObject.js +19 -0
- package/dist/security/data_objects/PermissionTableResponseObject.js.map +1 -0
- package/dist/security/fastifyAuth.d.ts +2 -0
- package/dist/security/fastifyAuth.js +135 -0
- package/dist/security/fastifyAuth.js.map +1 -0
- package/dist/security/impersonation.d.ts +11 -0
- package/dist/security/impersonation.js +139 -0
- package/dist/security/impersonation.js.map +1 -0
- package/dist/security/jsLoader.d.ts +9 -0
- package/dist/security/jsLoader.js +522 -0
- package/dist/security/jsLoader.js.map +1 -0
- package/dist/security/keys.d.ts +119 -0
- package/dist/security/keys.js +866 -0
- package/dist/security/keys.js.map +1 -0
- package/dist/security/permissionsTranslator.d.ts +9 -0
- package/dist/security/permissionsTranslator.js +269 -0
- package/dist/security/permissionsTranslator.js.map +1 -0
- package/dist/security/role.d.ts +5 -0
- package/dist/security/role.js +160 -0
- package/dist/security/role.js.map +1 -0
- package/dist/security/tokenAuthentication.d.ts +38 -0
- package/dist/security/tokenAuthentication.js +205 -0
- package/dist/security/tokenAuthentication.js.map +1 -0
- package/dist/security/user.d.ts +77 -0
- package/dist/security/user.js +349 -0
- package/dist/security/user.js.map +1 -0
- package/dist/server/DurableSubscriptionsSession.d.ts +74 -0
- package/dist/server/DurableSubscriptionsSession.js +511 -0
- package/dist/server/DurableSubscriptionsSession.js.map +1 -0
- package/dist/server/REST.d.ts +16 -0
- package/dist/server/REST.js +423 -0
- package/dist/server/REST.js.map +1 -0
- package/dist/server/Server.d.ts +62 -0
- package/dist/server/Server.js +27 -0
- package/dist/server/Server.js.map +1 -0
- package/dist/server/fastifyRoutes/helpers/getCORSOptions.d.ts +11 -0
- package/dist/server/fastifyRoutes/helpers/getCORSOptions.js +32 -0
- package/dist/server/fastifyRoutes/helpers/getCORSOptions.js.map +1 -0
- package/dist/server/fastifyRoutes/helpers/getHeaderTimeoutConfig.d.ts +6 -0
- package/dist/server/fastifyRoutes/helpers/getHeaderTimeoutConfig.js +13 -0
- package/dist/server/fastifyRoutes/helpers/getHeaderTimeoutConfig.js.map +1 -0
- package/dist/server/fastifyRoutes/helpers/getServerOptions.d.ts +12 -0
- package/dist/server/fastifyRoutes/helpers/getServerOptions.js +30 -0
- package/dist/server/fastifyRoutes/helpers/getServerOptions.js.map +1 -0
- package/dist/server/fastifyRoutes/plugins/hdbCore.d.ts +2 -0
- package/dist/server/fastifyRoutes/plugins/hdbCore.js +31 -0
- package/dist/server/fastifyRoutes/plugins/hdbCore.js.map +1 -0
- package/dist/server/fastifyRoutes.d.ts +25 -0
- package/dist/server/fastifyRoutes.js +235 -0
- package/dist/server/fastifyRoutes.js.map +1 -0
- package/dist/server/graphqlQuerying.d.ts +1 -0
- package/dist/server/graphqlQuerying.js +630 -0
- package/dist/server/graphqlQuerying.js.map +1 -0
- package/dist/server/http.d.ts +15 -0
- package/dist/server/http.js +650 -0
- package/dist/server/http.js.map +1 -0
- package/dist/server/itc/serverHandlers.d.ts +10 -0
- package/dist/server/itc/serverHandlers.js +153 -0
- package/dist/server/itc/serverHandlers.js.map +1 -0
- package/dist/server/itc/utility/ITCEventObject.d.ts +6 -0
- package/dist/server/itc/utility/ITCEventObject.js +9 -0
- package/dist/server/itc/utility/ITCEventObject.js.map +1 -0
- package/dist/server/jobs/JobObject.d.ts +15 -0
- package/dist/server/jobs/JobObject.js +22 -0
- package/dist/server/jobs/JobObject.js.map +1 -0
- package/dist/server/jobs/jobProcess.d.ts +1 -0
- package/dist/server/jobs/jobProcess.js +66 -0
- package/dist/server/jobs/jobProcess.js.map +1 -0
- package/dist/server/jobs/jobRunner.d.ts +11 -0
- package/dist/server/jobs/jobRunner.js +160 -0
- package/dist/server/jobs/jobRunner.js.map +1 -0
- package/dist/server/jobs/jobs.d.ts +20 -0
- package/dist/server/jobs/jobs.js +267 -0
- package/dist/server/jobs/jobs.js.map +1 -0
- package/dist/server/loadRootComponents.d.ts +5 -0
- package/dist/server/loadRootComponents.js +45 -0
- package/dist/server/loadRootComponents.js.map +1 -0
- package/dist/server/mqtt.d.ts +9 -0
- package/dist/server/mqtt.js +466 -0
- package/dist/server/mqtt.js.map +1 -0
- package/dist/server/nodeName.d.ts +5 -0
- package/dist/server/nodeName.js +84 -0
- package/dist/server/nodeName.js.map +1 -0
- package/dist/server/operationsServer.d.ts +48 -0
- package/dist/server/operationsServer.js +265 -0
- package/dist/server/operationsServer.js.map +1 -0
- package/dist/server/serverHelpers/Headers.d.ts +20 -0
- package/dist/server/serverHelpers/Headers.js +134 -0
- package/dist/server/serverHelpers/Headers.js.map +1 -0
- package/dist/server/serverHelpers/JSONStream.d.ts +14 -0
- package/dist/server/serverHelpers/JSONStream.js +322 -0
- package/dist/server/serverHelpers/JSONStream.js.map +1 -0
- package/dist/server/serverHelpers/OperationFunctionObject.d.ts +9 -0
- package/dist/server/serverHelpers/OperationFunctionObject.js +17 -0
- package/dist/server/serverHelpers/OperationFunctionObject.js.map +1 -0
- package/dist/server/serverHelpers/Request.d.ts +69 -0
- package/dist/server/serverHelpers/Request.js +141 -0
- package/dist/server/serverHelpers/Request.js.map +1 -0
- package/dist/server/serverHelpers/contentTypes.d.ts +57 -0
- package/dist/server/serverHelpers/contentTypes.js +639 -0
- package/dist/server/serverHelpers/contentTypes.js.map +1 -0
- package/dist/server/serverHelpers/requestTimePlugin.d.ts +2 -0
- package/dist/server/serverHelpers/requestTimePlugin.js +56 -0
- package/dist/server/serverHelpers/requestTimePlugin.js.map +1 -0
- package/dist/server/serverHelpers/serverHandlers.d.ts +6 -0
- package/dist/server/serverHelpers/serverHandlers.js +130 -0
- package/dist/server/serverHelpers/serverHandlers.js.map +1 -0
- package/dist/server/serverHelpers/serverUtilities.d.ts +29 -0
- package/dist/server/serverHelpers/serverUtilities.js +356 -0
- package/dist/server/serverHelpers/serverUtilities.js.map +1 -0
- package/dist/server/serverRegistry.d.ts +3 -0
- package/dist/server/serverRegistry.js +11 -0
- package/dist/server/serverRegistry.js.map +1 -0
- package/dist/server/static.d.ts +16 -0
- package/dist/server/static.js +164 -0
- package/dist/server/static.js.map +1 -0
- package/dist/server/status/definitions.d.ts +27 -0
- package/dist/server/status/definitions.js +22 -0
- package/dist/server/status/definitions.js.map +1 -0
- package/dist/server/status/index.d.ts +26 -0
- package/dist/server/status/index.js +89 -0
- package/dist/server/status/index.js.map +1 -0
- package/dist/server/storageReclamation.d.ts +18 -0
- package/dist/server/storageReclamation.js +96 -0
- package/dist/server/storageReclamation.js.map +1 -0
- package/dist/server/threads/itc.d.ts +53 -0
- package/dist/server/threads/itc.js +81 -0
- package/dist/server/threads/itc.js.map +1 -0
- package/dist/server/threads/manageThreads.d.ts +30 -0
- package/dist/server/threads/manageThreads.js +579 -0
- package/dist/server/threads/manageThreads.js.map +1 -0
- package/dist/server/threads/socketRouter.d.ts +6 -0
- package/dist/server/threads/socketRouter.js +395 -0
- package/dist/server/threads/socketRouter.js.map +1 -0
- package/dist/server/threads/threadServer.d.ts +5 -0
- package/dist/server/threads/threadServer.js +288 -0
- package/dist/server/threads/threadServer.js.map +1 -0
- package/dist/server/throttle.d.ts +7 -0
- package/dist/server/throttle.js +71 -0
- package/dist/server/throttle.js.map +1 -0
- package/dist/sqlTranslator/SelectValidator.d.ts +79 -0
- package/dist/sqlTranslator/SelectValidator.js +274 -0
- package/dist/sqlTranslator/SelectValidator.js.map +1 -0
- package/dist/sqlTranslator/alasqlFunctionImporter.d.ts +2 -0
- package/dist/sqlTranslator/alasqlFunctionImporter.js +55 -0
- package/dist/sqlTranslator/alasqlFunctionImporter.js.map +1 -0
- package/dist/sqlTranslator/deleteTranslator.d.ts +2 -0
- package/dist/sqlTranslator/deleteTranslator.js +56 -0
- package/dist/sqlTranslator/deleteTranslator.js.map +1 -0
- package/dist/sqlTranslator/index.d.ts +16 -0
- package/dist/sqlTranslator/index.js +215 -0
- package/dist/sqlTranslator/index.js.map +1 -0
- package/dist/sqlTranslator/sql_statement_bucket.d.ts +46 -0
- package/dist/sqlTranslator/sql_statement_bucket.js +430 -0
- package/dist/sqlTranslator/sql_statement_bucket.js.map +1 -0
- package/dist/upgrade/UpgradeObjects.d.ts +5 -0
- package/dist/upgrade/UpgradeObjects.js +12 -0
- package/dist/upgrade/UpgradeObjects.js.map +1 -0
- package/dist/upgrade/directives/directivesController.d.ts +30 -0
- package/dist/upgrade/directives/directivesController.js +76 -0
- package/dist/upgrade/directives/directivesController.js.map +1 -0
- package/dist/upgrade/directivesManager.d.ts +7 -0
- package/dist/upgrade/directivesManager.js +125 -0
- package/dist/upgrade/directivesManager.js.map +1 -0
- package/dist/upgrade/upgradePrompt.d.ts +13 -0
- package/dist/upgrade/upgradePrompt.js +102 -0
- package/dist/upgrade/upgradePrompt.js.map +1 -0
- package/dist/upgrade/upgradeUtilities.d.ts +10 -0
- package/dist/upgrade/upgradeUtilities.js +26 -0
- package/dist/upgrade/upgradeUtilities.js.map +1 -0
- package/dist/utility/AWS/AWSConnector.d.ts +2 -0
- package/dist/utility/AWS/AWSConnector.js +26 -0
- package/dist/utility/AWS/AWSConnector.js.map +1 -0
- package/dist/utility/OperationFunctionCaller.d.ts +9 -0
- package/dist/utility/OperationFunctionCaller.js +58 -0
- package/dist/utility/OperationFunctionCaller.js.map +1 -0
- package/dist/utility/assignCmdEnvVariables.d.ts +10 -0
- package/dist/utility/assignCmdEnvVariables.js +55 -0
- package/dist/utility/assignCmdEnvVariables.js.map +1 -0
- package/dist/utility/common_utils.d.ts +264 -0
- package/dist/utility/common_utils.js +806 -0
- package/dist/utility/common_utils.js.map +1 -0
- package/dist/utility/environment/environmentManager.d.ts +41 -0
- package/dist/utility/environment/environmentManager.js +179 -0
- package/dist/utility/environment/environmentManager.js.map +1 -0
- package/dist/utility/environment/systemInformation.d.ts +67 -0
- package/dist/utility/environment/systemInformation.js +326 -0
- package/dist/utility/environment/systemInformation.js.map +1 -0
- package/dist/utility/errors/commonErrors.d.ts +171 -0
- package/dist/utility/errors/commonErrors.js +230 -0
- package/dist/utility/errors/commonErrors.js.map +1 -0
- package/dist/utility/errors/hdbError.d.ts +76 -0
- package/dist/utility/errors/hdbError.js +128 -0
- package/dist/utility/errors/hdbError.js.map +1 -0
- package/dist/utility/functions/date/dateFunctions.d.ts +11 -0
- package/dist/utility/functions/date/dateFunctions.js +64 -0
- package/dist/utility/functions/date/dateFunctions.js.map +1 -0
- package/dist/utility/functions/geo.d.ts +74 -0
- package/dist/utility/functions/geo.js +311 -0
- package/dist/utility/functions/geo.js.map +1 -0
- package/dist/utility/functions/sql/alaSQLExtension.d.ts +13 -0
- package/dist/utility/functions/sql/alaSQLExtension.js +96 -0
- package/dist/utility/functions/sql/alaSQLExtension.js.map +1 -0
- package/dist/utility/globalSchema.d.ts +151 -0
- package/dist/utility/globalSchema.js +34 -0
- package/dist/utility/globalSchema.js.map +1 -0
- package/dist/utility/hdbTerms.d.ts +737 -0
- package/dist/utility/hdbTerms.js +756 -0
- package/dist/utility/hdbTerms.js.map +1 -0
- package/dist/utility/install/checkJWTTokensExist.d.ts +5 -0
- package/dist/utility/install/checkJWTTokensExist.js +53 -0
- package/dist/utility/install/checkJWTTokensExist.js.map +1 -0
- package/dist/utility/install/installer.d.ts +17 -0
- package/dist/utility/install/installer.js +569 -0
- package/dist/utility/install/installer.js.map +1 -0
- package/dist/utility/installation.d.ts +12 -0
- package/dist/utility/installation.js +64 -0
- package/dist/utility/installation.js.map +1 -0
- package/dist/utility/lmdb/DBIDefinition.d.ts +16 -0
- package/dist/utility/lmdb/DBIDefinition.js +19 -0
- package/dist/utility/lmdb/DBIDefinition.js.map +1 -0
- package/dist/utility/lmdb/DeleteRecordsResponseObject.d.ts +21 -0
- package/dist/utility/lmdb/DeleteRecordsResponseObject.js +24 -0
- package/dist/utility/lmdb/DeleteRecordsResponseObject.js.map +1 -0
- package/dist/utility/lmdb/InsertRecordsResponseObject.d.ts +18 -0
- package/dist/utility/lmdb/InsertRecordsResponseObject.js +21 -0
- package/dist/utility/lmdb/InsertRecordsResponseObject.js.map +1 -0
- package/dist/utility/lmdb/OpenDBIObject.d.ts +23 -0
- package/dist/utility/lmdb/OpenDBIObject.js +29 -0
- package/dist/utility/lmdb/OpenDBIObject.js.map +1 -0
- package/dist/utility/lmdb/OpenEnvironmentObject.d.ts +22 -0
- package/dist/utility/lmdb/OpenEnvironmentObject.js +40 -0
- package/dist/utility/lmdb/OpenEnvironmentObject.js.map +1 -0
- package/dist/utility/lmdb/UpdateRecordsResponseObject.d.ts +21 -0
- package/dist/utility/lmdb/UpdateRecordsResponseObject.js +24 -0
- package/dist/utility/lmdb/UpdateRecordsResponseObject.js.map +1 -0
- package/dist/utility/lmdb/UpsertRecordsResponseObject.d.ts +18 -0
- package/dist/utility/lmdb/UpsertRecordsResponseObject.js +21 -0
- package/dist/utility/lmdb/UpsertRecordsResponseObject.js.map +1 -0
- package/dist/utility/lmdb/cleanLMDBMap.d.ts +6 -0
- package/dist/utility/lmdb/cleanLMDBMap.js +63 -0
- package/dist/utility/lmdb/cleanLMDBMap.js.map +1 -0
- package/dist/utility/lmdb/commonUtility.d.ts +28 -0
- package/dist/utility/lmdb/commonUtility.js +120 -0
- package/dist/utility/lmdb/commonUtility.js.map +1 -0
- package/dist/utility/lmdb/deleteUtility.d.ts +10 -0
- package/dist/utility/lmdb/deleteUtility.js +115 -0
- package/dist/utility/lmdb/deleteUtility.js.map +1 -0
- package/dist/utility/lmdb/environmentUtility.d.ts +81 -0
- package/dist/utility/lmdb/environmentUtility.js +432 -0
- package/dist/utility/lmdb/environmentUtility.js.map +1 -0
- package/dist/utility/lmdb/searchCursorFunctions.d.ts +93 -0
- package/dist/utility/lmdb/searchCursorFunctions.js +174 -0
- package/dist/utility/lmdb/searchCursorFunctions.js.map +1 -0
- package/dist/utility/lmdb/searchUtility.d.ts +204 -0
- package/dist/utility/lmdb/searchUtility.js +724 -0
- package/dist/utility/lmdb/searchUtility.js.map +1 -0
- package/dist/utility/lmdb/terms.d.ts +34 -0
- package/dist/utility/lmdb/terms.js +52 -0
- package/dist/utility/lmdb/terms.js.map +1 -0
- package/dist/utility/lmdb/writeUtility.d.ts +32 -0
- package/dist/utility/lmdb/writeUtility.js +360 -0
- package/dist/utility/lmdb/writeUtility.js.map +1 -0
- package/dist/utility/logging/harper_logger.d.ts +141 -0
- package/dist/utility/logging/harper_logger.js +862 -0
- package/dist/utility/logging/harper_logger.js.map +1 -0
- package/dist/utility/logging/logRotator.d.ts +19 -0
- package/dist/utility/logging/logRotator.js +146 -0
- package/dist/utility/logging/logRotator.js.map +1 -0
- package/dist/utility/logging/logger.d.ts +11 -0
- package/dist/utility/logging/logger.js +19 -0
- package/dist/utility/logging/logger.js.map +1 -0
- package/dist/utility/logging/readLog.d.ts +8 -0
- package/dist/utility/logging/readLog.js +339 -0
- package/dist/utility/logging/readLog.js.map +1 -0
- package/dist/utility/logging/transactionLog.d.ts +8 -0
- package/dist/utility/logging/transactionLog.js +46 -0
- package/dist/utility/logging/transactionLog.js.map +1 -0
- package/dist/utility/mount_hdb.d.ts +2 -0
- package/dist/utility/mount_hdb.js +51 -0
- package/dist/utility/mount_hdb.js.map +1 -0
- package/dist/utility/npmUtilities.d.ts +6 -0
- package/dist/utility/npmUtilities.js +91 -0
- package/dist/utility/npmUtilities.js.map +1 -0
- package/dist/utility/operationPermissions.d.ts +36 -0
- package/dist/utility/operationPermissions.js +116 -0
- package/dist/utility/operationPermissions.js.map +1 -0
- package/dist/utility/operation_authorization.d.ts +18 -0
- package/dist/utility/operation_authorization.js +667 -0
- package/dist/utility/operation_authorization.js.map +1 -0
- package/dist/utility/packageUtils.d.ts +9 -0
- package/dist/utility/packageUtils.js +52 -0
- package/dist/utility/packageUtils.js.map +1 -0
- package/dist/utility/password.d.ts +20 -0
- package/dist/utility/password.js +119 -0
- package/dist/utility/password.js.map +1 -0
- package/dist/utility/processManagement/processManagement.d.ts +35 -0
- package/dist/utility/processManagement/processManagement.js +188 -0
- package/dist/utility/processManagement/processManagement.js.map +1 -0
- package/dist/utility/processManagement/servicesConfig.d.ts +29 -0
- package/dist/utility/processManagement/servicesConfig.js +52 -0
- package/dist/utility/processManagement/servicesConfig.js.map +1 -0
- package/dist/utility/scripts/restartHdb.d.ts +1 -0
- package/dist/utility/scripts/restartHdb.js +23 -0
- package/dist/utility/scripts/restartHdb.js.map +1 -0
- package/dist/utility/signalling.d.ts +2 -0
- package/dist/utility/signalling.js +35 -0
- package/dist/utility/signalling.js.map +1 -0
- package/dist/utility/terms/certificates.d.ts +46 -0
- package/dist/utility/terms/certificates.js +65 -0
- package/dist/utility/terms/certificates.js.map +1 -0
- package/dist/utility/when.d.ts +3 -0
- package/dist/utility/when.js +18 -0
- package/dist/utility/when.js.map +1 -0
- package/dist/validation/bulkDeleteValidator.d.ts +2 -0
- package/dist/validation/bulkDeleteValidator.js +21 -0
- package/dist/validation/bulkDeleteValidator.js.map +1 -0
- package/dist/validation/check_permissions.d.ts +2 -0
- package/dist/validation/check_permissions.js +20 -0
- package/dist/validation/check_permissions.js.map +1 -0
- package/dist/validation/common_validators.d.ts +19 -0
- package/dist/validation/common_validators.js +76 -0
- package/dist/validation/common_validators.js.map +1 -0
- package/dist/validation/configValidator.d.ts +8 -0
- package/dist/validation/configValidator.js +292 -0
- package/dist/validation/configValidator.js.map +1 -0
- package/dist/validation/deleteValidator.d.ts +2 -0
- package/dist/validation/deleteValidator.js +15 -0
- package/dist/validation/deleteValidator.js.map +1 -0
- package/dist/validation/fileLoadValidator.d.ts +4 -0
- package/dist/validation/fileLoadValidator.js +138 -0
- package/dist/validation/fileLoadValidator.js.map +1 -0
- package/dist/validation/insertValidator.d.ts +2 -0
- package/dist/validation/insertValidator.js +38 -0
- package/dist/validation/insertValidator.js.map +1 -0
- package/dist/validation/installValidator.d.ts +7 -0
- package/dist/validation/installValidator.js +28 -0
- package/dist/validation/installValidator.js.map +1 -0
- package/dist/validation/readLogValidator.d.ts +2 -0
- package/dist/validation/readLogValidator.js +48 -0
- package/dist/validation/readLogValidator.js.map +1 -0
- package/dist/validation/role_validation.d.ts +3 -0
- package/dist/validation/role_validation.js +284 -0
- package/dist/validation/role_validation.js.map +1 -0
- package/dist/validation/schemaMetadataValidator.d.ts +16 -0
- package/dist/validation/schemaMetadataValidator.js +38 -0
- package/dist/validation/schemaMetadataValidator.js.map +1 -0
- package/dist/validation/searchValidator.d.ts +2 -0
- package/dist/validation/searchValidator.js +141 -0
- package/dist/validation/searchValidator.js.map +1 -0
- package/dist/validation/statusValidator.d.ts +19 -0
- package/dist/validation/statusValidator.js +95 -0
- package/dist/validation/statusValidator.js.map +1 -0
- package/dist/validation/transactionLogValidator.d.ts +2 -0
- package/dist/validation/transactionLogValidator.js +28 -0
- package/dist/validation/transactionLogValidator.js.map +1 -0
- package/dist/validation/user_validation.d.ts +3 -0
- package/dist/validation/user_validation.js +52 -0
- package/dist/validation/user_validation.js.map +1 -0
- package/dist/validation/validationWrapper.d.ts +15 -0
- package/dist/validation/validationWrapper.js +95 -0
- package/dist/validation/validationWrapper.js.map +1 -0
- package/package.json +225 -0
- package/static/README.md +13 -0
- package/static/ascii_logo.txt +21 -0
- package/static/defaultConfig.yaml +75 -0
|
@@ -0,0 +1,4350 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* This module provides the main table implementation of the Resource API, providing full access to Harper
|
|
4
|
+
* tables through the interface defined by the Resource class. This module is responsible for handling these
|
|
5
|
+
* table-level interactions, loading records, updating records, querying, and more.
|
|
6
|
+
*/
|
|
7
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
8
|
+
if (k2 === undefined) k2 = k;
|
|
9
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
10
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
11
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
12
|
+
}
|
|
13
|
+
Object.defineProperty(o, k2, desc);
|
|
14
|
+
}) : (function(o, m, k, k2) {
|
|
15
|
+
if (k2 === undefined) k2 = k;
|
|
16
|
+
o[k2] = m[k];
|
|
17
|
+
}));
|
|
18
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
19
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
20
|
+
}) : function(o, v) {
|
|
21
|
+
o["default"] = v;
|
|
22
|
+
});
|
|
23
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
24
|
+
var ownKeys = function(o) {
|
|
25
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
26
|
+
var ar = [];
|
|
27
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
28
|
+
return ar;
|
|
29
|
+
};
|
|
30
|
+
return ownKeys(o);
|
|
31
|
+
};
|
|
32
|
+
return function (mod) {
|
|
33
|
+
if (mod && mod.__esModule) return mod;
|
|
34
|
+
var result = {};
|
|
35
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
36
|
+
__setModuleDefault(result, mod);
|
|
37
|
+
return result;
|
|
38
|
+
};
|
|
39
|
+
})();
|
|
40
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
41
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
42
|
+
};
|
|
43
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
44
|
+
exports.EVICTED = exports.INVALIDATED = void 0;
|
|
45
|
+
exports.makeTable = makeTable;
|
|
46
|
+
exports.coerceType = coerceType;
|
|
47
|
+
const hdbTerms_ts_1 = require("../utility/hdbTerms.js");
|
|
48
|
+
const commonUtility_js_1 = require("../utility/lmdb/commonUtility.js");
|
|
49
|
+
const lodash_1 = __importDefault(require("lodash"));
|
|
50
|
+
const extended_iterable_1 = require("@harperfast/extended-iterable");
|
|
51
|
+
const lmdbProcessRows_js_1 = __importDefault(require("../dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbProcessRows.js"));
|
|
52
|
+
const Resource_ts_1 = require("./Resource.js");
|
|
53
|
+
const when_ts_1 = require("../utility/when.js");
|
|
54
|
+
const DatabaseTransaction_ts_1 = require("./DatabaseTransaction.js");
|
|
55
|
+
const envMngr = __importStar(require("../utility/environment/environmentManager.js"));
|
|
56
|
+
const transactionBroadcast_ts_1 = require("./transactionBroadcast.js");
|
|
57
|
+
const hdbError_js_1 = require("../utility/errors/hdbError.js");
|
|
58
|
+
const signalling = __importStar(require("../utility/signalling.js"));
|
|
59
|
+
const itc_js_1 = require("../server/threads/itc.js");
|
|
60
|
+
const databases_ts_1 = require("./databases.js");
|
|
61
|
+
const search_ts_1 = require("./search.js");
|
|
62
|
+
const logger_ts_1 = require("../utility/logging/logger.js");
|
|
63
|
+
const tracked_ts_1 = require("./tracked.js");
|
|
64
|
+
const transaction_ts_1 = require("./transaction.js");
|
|
65
|
+
const ordered_binary_1 = require("ordered-binary");
|
|
66
|
+
const manageThreads_js_1 = require("../server/threads/manageThreads.js");
|
|
67
|
+
const auditStore_ts_1 = require("./auditStore.js");
|
|
68
|
+
const common_utils_js_1 = require("../utility/common_utils.js");
|
|
69
|
+
const RecordEncoder_ts_1 = require("./RecordEncoder.js");
|
|
70
|
+
const write_ts_1 = require("./analytics/write.js");
|
|
71
|
+
const crdt_ts_1 = require("./crdt.js");
|
|
72
|
+
const Headers_ts_1 = require("../server/serverHelpers/Headers.js");
|
|
73
|
+
const node_fs_1 = __importDefault(require("node:fs"));
|
|
74
|
+
const blob_ts_1 = require("./blob.js");
|
|
75
|
+
const storageReclamation_ts_1 = require("../server/storageReclamation.js");
|
|
76
|
+
const harper_logger_js_1 = __importDefault(require("../utility/logging/harper_logger.js"));
|
|
77
|
+
const throttle_ts_1 = require("../server/throttle.js");
|
|
78
|
+
const rocksdb_js_1 = require("@harperfast/rocksdb-js");
|
|
79
|
+
const LMDBTransaction_1 = require("./LMDBTransaction");
|
|
80
|
+
const contentTypes_1 = require("../server/serverHelpers/contentTypes");
|
|
81
|
+
const { sortBy } = lodash_1.default;
|
|
82
|
+
const { validateAttribute } = lmdbProcessRows_js_1.default;
|
|
83
|
+
const NULL_WITH_TIMESTAMP = new Uint8Array(9);
|
|
84
|
+
NULL_WITH_TIMESTAMP[8] = 0xc0; // null
|
|
85
|
+
const UNCACHEABLE_TIMESTAMP = Infinity; // we use this when dynamic content is accessed that we can't safely cache, and this prevents earlier timestamps from change the "last" modification
|
|
86
|
+
const RECORD_PRUNING_INTERVAL = 60000; // one minute
|
|
87
|
+
envMngr.initSync();
|
|
88
|
+
const LMDB_PREFETCH_WRITES = envMngr.get(hdbTerms_ts_1.CONFIG_PARAMS.STORAGE_PREFETCHWRITES);
|
|
89
|
+
const LOCK_TIMEOUT = 10000;
|
|
90
|
+
exports.INVALIDATED = 1;
|
|
91
|
+
exports.EVICTED = 8; // note that 2 is reserved for timestamps
|
|
92
|
+
const TEST_WRITE_KEY_BUFFER = Buffer.allocUnsafeSlow(8192);
|
|
93
|
+
const MAX_KEY_BYTES = 1978;
|
|
94
|
+
const EVENT_HIGH_WATER_MARK = 100;
|
|
95
|
+
const FULL_PERMISSIONS = {
|
|
96
|
+
read: true,
|
|
97
|
+
insert: true,
|
|
98
|
+
update: true,
|
|
99
|
+
delete: true,
|
|
100
|
+
isSuperUser: true,
|
|
101
|
+
};
|
|
102
|
+
/**
|
|
103
|
+
* This returns a Table class for the given table settings (determined from the metadata table)
|
|
104
|
+
* Instances of the returned class are Resource instances, intended to provide a consistent view or transaction of the table
|
|
105
|
+
* @param options
|
|
106
|
+
*/
|
|
107
|
+
function makeTable(options) {
|
|
108
|
+
const { primaryKey, indices, tableId, tableName, primaryStore, databasePath, databaseName, auditStore, schemaDefined, dbisDB: dbisDb, sealed, splitSegments, replicate, } = options;
|
|
109
|
+
let { expirationMS: expirationMs, evictionMS: evictionMs, audit, trackDeletes } = options;
|
|
110
|
+
evictionMs ??= 0;
|
|
111
|
+
let { attributes } = options;
|
|
112
|
+
if (!attributes)
|
|
113
|
+
attributes = [];
|
|
114
|
+
const updateRecord = (0, RecordEncoder_ts_1.recordUpdater)(primaryStore, tableId, auditStore);
|
|
115
|
+
let sourceLoad; // if a source has a load function (replicator), record it here
|
|
116
|
+
let hasSourceGet;
|
|
117
|
+
let primaryKeyAttribute = {};
|
|
118
|
+
let lastEvictionCompletion = Promise.resolve();
|
|
119
|
+
let createdTimeProperty, updatedTimeProperty, expiresAtProperty;
|
|
120
|
+
for (const attribute of attributes) {
|
|
121
|
+
if (attribute.assignCreatedTime || attribute.name === '__createdtime__')
|
|
122
|
+
createdTimeProperty = attribute;
|
|
123
|
+
if (attribute.assignUpdatedTime || attribute.name === '__updatedtime__')
|
|
124
|
+
updatedTimeProperty = attribute;
|
|
125
|
+
if (attribute.expiresAt)
|
|
126
|
+
expiresAtProperty = attribute;
|
|
127
|
+
if (attribute.isPrimaryKey)
|
|
128
|
+
primaryKeyAttribute = attribute;
|
|
129
|
+
}
|
|
130
|
+
let deleteCallbackHandle;
|
|
131
|
+
let prefetchIds = [];
|
|
132
|
+
let prefetchCallbacks = [];
|
|
133
|
+
let untilNextPrefetch = 1;
|
|
134
|
+
let nonPrefetchSequence = 2;
|
|
135
|
+
let cleanupInterval = 86400000;
|
|
136
|
+
let cleanupPriority = 0;
|
|
137
|
+
let lastCleanupInterval;
|
|
138
|
+
let cleanupTimer;
|
|
139
|
+
let propertyResolvers;
|
|
140
|
+
let hasRelationships = false;
|
|
141
|
+
let runningRecordExpiration;
|
|
142
|
+
const isRocksDB = primaryStore instanceof rocksdb_js_1.RocksDatabase;
|
|
143
|
+
let idIncrementer;
|
|
144
|
+
let replicateToCount;
|
|
145
|
+
const databaseReplications = envMngr.get(hdbTerms_ts_1.CONFIG_PARAMS.REPLICATION_DATABASES);
|
|
146
|
+
if (Array.isArray(databaseReplications)) {
|
|
147
|
+
for (const dbReplication of databaseReplications) {
|
|
148
|
+
if (dbReplication.name === databaseName && dbReplication.replicateTo >= 0) {
|
|
149
|
+
replicateToCount = dbReplication.replicateTo;
|
|
150
|
+
break;
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
const MAX_PREFETCH_SEQUENCE = 10;
|
|
155
|
+
const MAX_PREFETCH_BUNDLE = 6;
|
|
156
|
+
if (audit)
|
|
157
|
+
addDeleteRemoval();
|
|
158
|
+
(0, storageReclamation_ts_1.onStorageReclamation)(primaryStore.path, (priority) => {
|
|
159
|
+
if (hasSourceGet)
|
|
160
|
+
return scheduleCleanup(priority);
|
|
161
|
+
});
|
|
162
|
+
class Updatable extends tracked_ts_1.GenericTrackedObject {
|
|
163
|
+
getUpdatedTime() {
|
|
164
|
+
return RecordEncoder_ts_1.entryMap.get(this.getRecord())?.version;
|
|
165
|
+
}
|
|
166
|
+
getExpiresAt() {
|
|
167
|
+
return RecordEncoder_ts_1.entryMap.get(this.getRecord())?.expiresAt;
|
|
168
|
+
}
|
|
169
|
+
addTo(property, value) {
|
|
170
|
+
if (typeof value === 'number' || typeof value === 'bigint') {
|
|
171
|
+
this.set(property, new tracked_ts_1.Addition(value));
|
|
172
|
+
}
|
|
173
|
+
else {
|
|
174
|
+
throw new Error('Can not add or subtract a non-numeric value');
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
subtractFrom(property, value) {
|
|
178
|
+
return this.addTo(property, -value);
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
class TableResource extends Resource_ts_1.Resource {
|
|
182
|
+
#record; // the stored/frozen record from the database and stored in the cache (should not be modified directly)
|
|
183
|
+
#changes; // the changes to the record that have been made (should not be modified directly)
|
|
184
|
+
#version; // version of the record
|
|
185
|
+
#entry; // the entry from the database
|
|
186
|
+
#savingOperation; // operation for the record is currently being saved
|
|
187
|
+
static name = tableName; // for display/debugging purposes
|
|
188
|
+
static primaryStore = primaryStore;
|
|
189
|
+
static auditStore = auditStore;
|
|
190
|
+
static primaryKey = primaryKey;
|
|
191
|
+
static tableName = tableName;
|
|
192
|
+
static tableId = tableId;
|
|
193
|
+
static indices = indices;
|
|
194
|
+
static audit = audit;
|
|
195
|
+
static databasePath = databasePath;
|
|
196
|
+
static databaseName = databaseName;
|
|
197
|
+
static attributes = attributes;
|
|
198
|
+
static replicate = replicate;
|
|
199
|
+
static sealed = sealed;
|
|
200
|
+
static splitSegments = splitSegments ?? true;
|
|
201
|
+
static createdTimeProperty = createdTimeProperty;
|
|
202
|
+
static updatedTimeProperty = updatedTimeProperty;
|
|
203
|
+
static propertyResolvers;
|
|
204
|
+
static userResolvers = {};
|
|
205
|
+
static source;
|
|
206
|
+
static getResidencyById;
|
|
207
|
+
static get expirationMS() {
|
|
208
|
+
return expirationMs;
|
|
209
|
+
}
|
|
210
|
+
static dbisDB = dbisDb;
|
|
211
|
+
static schemaDefined = schemaDefined;
|
|
212
|
+
/**
|
|
213
|
+
* This defines a source for a table. This effectively makes a table into a cache, where the canonical
|
|
214
|
+
* source of data (or source of truth) is provided here in the Resource argument. Additional options
|
|
215
|
+
* can be provided to indicate how the caching should be handled.
|
|
216
|
+
* @param source
|
|
217
|
+
* @param options
|
|
218
|
+
* @returns
|
|
219
|
+
*/
|
|
220
|
+
static sourcedFrom(source, options) {
|
|
221
|
+
// define a source for retrieving invalidated entries for caching purposes
|
|
222
|
+
if (options) {
|
|
223
|
+
this.sourceOptions = options;
|
|
224
|
+
if (options.expiration || options.eviction || options.scanInterval)
|
|
225
|
+
this.setTTLExpiration(options);
|
|
226
|
+
}
|
|
227
|
+
if (options?.intermediateSource) {
|
|
228
|
+
source.intermediateSource = true;
|
|
229
|
+
// intermediateSource should register sourceLoad and setup subscription but not assign to this.source
|
|
230
|
+
}
|
|
231
|
+
else {
|
|
232
|
+
if (this.source) {
|
|
233
|
+
if (this.source.name === source.name) {
|
|
234
|
+
// if we are adding a source that is already set, we don't add it again
|
|
235
|
+
return;
|
|
236
|
+
}
|
|
237
|
+
throw new Error('Can not have multiple sources');
|
|
238
|
+
}
|
|
239
|
+
this.source = source;
|
|
240
|
+
}
|
|
241
|
+
hasSourceGet = hasSourceGet || (source.get && (!source.get.reliesOnPrototype || source.prototype.get));
|
|
242
|
+
sourceLoad = sourceLoad || source.load;
|
|
243
|
+
const shouldRevalidateEvents = this.source?.shouldRevalidateEvents;
|
|
244
|
+
// External data source may provide a subscribe method, allowing for real-time proactive delivery
|
|
245
|
+
// of data from the source to this caching table. This is generally greatly superior to expiration-based
|
|
246
|
+
// caching since it much for accurately ensures freshness and maximizing caching time.
|
|
247
|
+
// Here we subscribe the external data source if it is available, getting notification events
|
|
248
|
+
// as they come in, and directly writing them to this table. We use the notification option to ensure
|
|
249
|
+
// that we don't re-broadcast these as "requested" changes back to the source.
|
|
250
|
+
(async () => {
|
|
251
|
+
let userRoleUpdate = false;
|
|
252
|
+
let lastSequenceId;
|
|
253
|
+
// perform the write of an individual write event
|
|
254
|
+
const writeUpdate = async (event, context) => {
|
|
255
|
+
const value = event.value;
|
|
256
|
+
const Table = event.table ? databases_ts_1.databases[databaseName][event.table] : TableResource;
|
|
257
|
+
if (databaseName === hdbTerms_ts_1.SYSTEM_SCHEMA_NAME &&
|
|
258
|
+
(event.table === hdbTerms_ts_1.SYSTEM_TABLE_NAMES.ROLE_TABLE_NAME || event.table === hdbTerms_ts_1.SYSTEM_TABLE_NAMES.USER_TABLE_NAME)) {
|
|
259
|
+
userRoleUpdate = true;
|
|
260
|
+
}
|
|
261
|
+
if (event.id === undefined) {
|
|
262
|
+
event.id = value[Table.primaryKey];
|
|
263
|
+
if (event.id === undefined)
|
|
264
|
+
throw new Error('Replication message without an id ' + JSON.stringify(event));
|
|
265
|
+
}
|
|
266
|
+
event.source = source;
|
|
267
|
+
const options = {
|
|
268
|
+
residencyId: getResidencyId(event.residencyList),
|
|
269
|
+
isNotification: true,
|
|
270
|
+
ensureLoaded: false,
|
|
271
|
+
nodeId: event.nodeId,
|
|
272
|
+
viaNodeId: event.viaNodeId,
|
|
273
|
+
async: true,
|
|
274
|
+
};
|
|
275
|
+
const id = event.id;
|
|
276
|
+
const resource = await Table.getResource(id, context, options);
|
|
277
|
+
if (event.finished)
|
|
278
|
+
await event.finished;
|
|
279
|
+
switch (event.type) {
|
|
280
|
+
case 'put':
|
|
281
|
+
return shouldRevalidateEvents
|
|
282
|
+
? resource._writeInvalidate(id, value, options)
|
|
283
|
+
: resource._writeUpdate(id, value, true, options);
|
|
284
|
+
case 'patch':
|
|
285
|
+
return shouldRevalidateEvents
|
|
286
|
+
? resource._writeInvalidate(id, value, options)
|
|
287
|
+
: resource._writeUpdate(id, value, false, options);
|
|
288
|
+
case 'delete':
|
|
289
|
+
return resource._writeDelete(id, options);
|
|
290
|
+
case 'publish':
|
|
291
|
+
case 'message':
|
|
292
|
+
return resource._writePublish(id, value, options);
|
|
293
|
+
case 'invalidate':
|
|
294
|
+
return resource._writeInvalidate(id, value, options);
|
|
295
|
+
case 'relocate':
|
|
296
|
+
return resource._writeRelocate(id, options);
|
|
297
|
+
default:
|
|
298
|
+
logger_ts_1.logger.error?.('Unknown operation', event.type, event.id);
|
|
299
|
+
}
|
|
300
|
+
};
|
|
301
|
+
try {
|
|
302
|
+
const hasSubscribe = source.subscribe;
|
|
303
|
+
// if subscriptions come in out-of-order, we need to track deletes to ensure consistency
|
|
304
|
+
if (hasSubscribe && trackDeletes == undefined)
|
|
305
|
+
trackDeletes = true;
|
|
306
|
+
const subscriptionOptions = {
|
|
307
|
+
// this is used to indicate that all threads are (presumably) making this subscription
|
|
308
|
+
// and we do not need to propagate events across threads (more efficient)
|
|
309
|
+
crossThreads: false,
|
|
310
|
+
// this is used to indicate that we want, if possible, immediate notification of writes
|
|
311
|
+
// within the process (not supported yet)
|
|
312
|
+
inTransactionUpdates: true,
|
|
313
|
+
// supports transaction operations
|
|
314
|
+
supportsTransactions: true,
|
|
315
|
+
// don't need the current state, should be up-to-date
|
|
316
|
+
omitCurrent: true,
|
|
317
|
+
};
|
|
318
|
+
const subscribeOnThisThread = source.subscribeOnThisThread
|
|
319
|
+
? source.subscribeOnThisThread((0, manageThreads_js_1.getWorkerIndex)(), subscriptionOptions)
|
|
320
|
+
: (0, manageThreads_js_1.getWorkerIndex)() === 0;
|
|
321
|
+
const subscription = hasSubscribe && subscribeOnThisThread && (await source.subscribe?.(subscriptionOptions));
|
|
322
|
+
if (subscription) {
|
|
323
|
+
let txnInProgress;
|
|
324
|
+
// we listen for events by iterating through the async iterator provided by the subscription
|
|
325
|
+
for await (const event of subscription) {
|
|
326
|
+
try {
|
|
327
|
+
const firstWrite = event.type === 'transaction' ? event.writes[0] : event;
|
|
328
|
+
if (!firstWrite) {
|
|
329
|
+
logger_ts_1.logger.error?.('Bad subscription event', event);
|
|
330
|
+
continue;
|
|
331
|
+
}
|
|
332
|
+
event.source = source;
|
|
333
|
+
if (event.type === 'end_txn') {
|
|
334
|
+
txnInProgress?.resolve();
|
|
335
|
+
let updateRecordedSequenceId;
|
|
336
|
+
if (event.localTime && lastSequenceId !== event.localTime) {
|
|
337
|
+
if (event.remoteNodeIds?.length > 0) {
|
|
338
|
+
updateRecordedSequenceId = () => {
|
|
339
|
+
// the key for tracking the sequence ids and txn times received from this node
|
|
340
|
+
const seqKey = [Symbol.for('seq'), event.remoteNodeIds[0]];
|
|
341
|
+
const existingSeq = dbisDb.get(seqKey);
|
|
342
|
+
let nodeStates = existingSeq?.nodes;
|
|
343
|
+
if (!nodeStates) {
|
|
344
|
+
// if we don't have a list of nodes, we need to create one, with the main one using the existing seqId
|
|
345
|
+
nodeStates = [];
|
|
346
|
+
}
|
|
347
|
+
// if we are not the only node in the list, we are getting proxied subscriptions, and we need
|
|
348
|
+
// to track this separately
|
|
349
|
+
// track the other nodes in the list
|
|
350
|
+
for (const nodeId of event.remoteNodeIds.slice(1)) {
|
|
351
|
+
let nodeState = nodeStates.find((existingNode) => existingNode.id === nodeId);
|
|
352
|
+
// remove any duplicates
|
|
353
|
+
nodeStates = nodeStates.filter((existingNode) => existingNode.id !== nodeId || existingNode === nodeState);
|
|
354
|
+
if (!nodeState) {
|
|
355
|
+
nodeState = { id: nodeId, seqId: 0 };
|
|
356
|
+
nodeStates.push(nodeState);
|
|
357
|
+
}
|
|
358
|
+
nodeState.seqId = Math.max(existingSeq?.seqId ?? 1, event.localTime);
|
|
359
|
+
if (nodeId === txnInProgress?.nodeId) {
|
|
360
|
+
nodeState.lastTxnTime = event.timestamp;
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
const seqId = Math.max(existingSeq?.seqId ?? 1, event.localTime);
|
|
364
|
+
logger_ts_1.logger.trace?.('Received txn', databaseName, seqId, new Date(seqId), event.localTime, new Date(event.localTime), event.remoteNodeIds);
|
|
365
|
+
dbisDb.put(seqKey, {
|
|
366
|
+
seqId,
|
|
367
|
+
nodes: nodeStates,
|
|
368
|
+
});
|
|
369
|
+
};
|
|
370
|
+
lastSequenceId = event.localTime;
|
|
371
|
+
}
|
|
372
|
+
}
|
|
373
|
+
if (event.onCommit) {
|
|
374
|
+
// if there was an onCommit callback, call that. This function can be async
|
|
375
|
+
// and if so, we want to delay the recording of the sequence id until it finished
|
|
376
|
+
// (as it can be used to indicate more associated actions, like blob transfer, are in flight)
|
|
377
|
+
const onCommitFinished = txnInProgress
|
|
378
|
+
? txnInProgress.committed.then(event.onCommit)
|
|
379
|
+
: event.onCommit();
|
|
380
|
+
if (updateRecordedSequenceId) {
|
|
381
|
+
if (onCommitFinished?.then)
|
|
382
|
+
onCommitFinished.then(updateRecordedSequenceId);
|
|
383
|
+
else
|
|
384
|
+
updateRecordedSequenceId();
|
|
385
|
+
}
|
|
386
|
+
}
|
|
387
|
+
else if (updateRecordedSequenceId)
|
|
388
|
+
updateRecordedSequenceId();
|
|
389
|
+
continue;
|
|
390
|
+
}
|
|
391
|
+
if (txnInProgress) {
|
|
392
|
+
if (event.beginTxn) {
|
|
393
|
+
// if we are starting a new transaction, finish the existing one
|
|
394
|
+
txnInProgress.resolve();
|
|
395
|
+
}
|
|
396
|
+
else {
|
|
397
|
+
// write in the current transaction if one is in progress
|
|
398
|
+
txnInProgress.writePromises.push(writeUpdate(event, txnInProgress));
|
|
399
|
+
continue;
|
|
400
|
+
}
|
|
401
|
+
}
|
|
402
|
+
// use the version as the transaction timestamp
|
|
403
|
+
if (!event.timestamp && event.version)
|
|
404
|
+
event.timestamp = event.version;
|
|
405
|
+
const commitResolution = (0, transaction_ts_1.transaction)(event, () => {
|
|
406
|
+
if (event.type === 'transaction') {
|
|
407
|
+
// if it is a transaction, we need to individually iterate through each write event
|
|
408
|
+
const promises = [];
|
|
409
|
+
for (const write of event.writes) {
|
|
410
|
+
try {
|
|
411
|
+
promises.push(writeUpdate(write, event));
|
|
412
|
+
}
|
|
413
|
+
catch (error) {
|
|
414
|
+
error.message +=
|
|
415
|
+
' writing ' + JSON.stringify(write) + ' of event ' + JSON.stringify(event);
|
|
416
|
+
throw error;
|
|
417
|
+
}
|
|
418
|
+
}
|
|
419
|
+
return Promise.all(promises);
|
|
420
|
+
}
|
|
421
|
+
else if (event.type === 'define_schema') {
|
|
422
|
+
// ensure table has the provided attributes
|
|
423
|
+
const updatedAttributes = this.attributes.slice(0);
|
|
424
|
+
let hasChanges = false;
|
|
425
|
+
for (const attribute of event.attributes) {
|
|
426
|
+
if (!updatedAttributes.find((existing) => existing.name === attribute.name)) {
|
|
427
|
+
updatedAttributes.push(attribute);
|
|
428
|
+
hasChanges = true;
|
|
429
|
+
}
|
|
430
|
+
}
|
|
431
|
+
if (hasChanges) {
|
|
432
|
+
(0, databases_ts_1.table)({
|
|
433
|
+
table: tableName,
|
|
434
|
+
database: databaseName,
|
|
435
|
+
attributes: updatedAttributes,
|
|
436
|
+
origin: 'cluster',
|
|
437
|
+
});
|
|
438
|
+
signalling.signalSchemaChange(new itc_js_1.SchemaEventMsg(process.pid, hdbTerms_ts_1.OPERATIONS_ENUM.CREATE_TABLE, databaseName, tableName));
|
|
439
|
+
}
|
|
440
|
+
}
|
|
441
|
+
else {
|
|
442
|
+
if (event.beginTxn) {
|
|
443
|
+
// if we are beginning a new transaction, we record the current
|
|
444
|
+
// event/context as transaction in progress and then future events
|
|
445
|
+
// are applied with that context until the next transaction begins/ends
|
|
446
|
+
txnInProgress = event;
|
|
447
|
+
txnInProgress.writePromises = [writeUpdate(event, event)];
|
|
448
|
+
return new Promise((resolve) => {
|
|
449
|
+
// callback for when this transaction is finished (will be called on next txn begin/end).
|
|
450
|
+
txnInProgress.resolve = () => resolve(Promise.all(txnInProgress.writePromises)); // and make sure we wait for the write update to finish
|
|
451
|
+
});
|
|
452
|
+
}
|
|
453
|
+
return writeUpdate(event, event);
|
|
454
|
+
}
|
|
455
|
+
});
|
|
456
|
+
if (txnInProgress)
|
|
457
|
+
txnInProgress.committed = commitResolution;
|
|
458
|
+
if (userRoleUpdate && commitResolution && !commitResolution?.waitingForUserChange) {
|
|
459
|
+
// if the user role changed, asynchronously signal the user change (but don't block this function)
|
|
460
|
+
commitResolution.then(() => signalling.signalUserChange(new itc_js_1.UserEventMsg(process.pid)));
|
|
461
|
+
commitResolution.waitingForUserChange = true; // only need to send one signal per transaction
|
|
462
|
+
}
|
|
463
|
+
if (event.onCommit) {
|
|
464
|
+
if (commitResolution)
|
|
465
|
+
commitResolution.then(event.onCommit);
|
|
466
|
+
else
|
|
467
|
+
event.onCommit();
|
|
468
|
+
}
|
|
469
|
+
}
|
|
470
|
+
catch (error) {
|
|
471
|
+
logger_ts_1.logger.error?.('error in subscription handler', error);
|
|
472
|
+
}
|
|
473
|
+
}
|
|
474
|
+
}
|
|
475
|
+
}
|
|
476
|
+
catch (error) {
|
|
477
|
+
logger_ts_1.logger.error?.(error);
|
|
478
|
+
}
|
|
479
|
+
})();
|
|
480
|
+
return this;
|
|
481
|
+
}
|
|
482
|
+
// define a caching table as one that has a origin source with a get
|
|
483
|
+
static get isCaching() {
|
|
484
|
+
return hasSourceGet;
|
|
485
|
+
}
|
|
486
|
+
/** Indicates if the events should be revalidated when they are received. By default we do this if the get
|
|
487
|
+
* method is overriden */
|
|
488
|
+
static get shouldRevalidateEvents() {
|
|
489
|
+
return this.prototype.get !== TableResource.prototype.get;
|
|
490
|
+
}
|
|
491
|
+
/**
|
|
492
|
+
* Gets a resource instance, as defined by the Resource class, adding the table-specific handling
|
|
493
|
+
* of also loading the stored record into the resource instance.
|
|
494
|
+
* @param target
|
|
495
|
+
* @param request
|
|
496
|
+
* @param resourceOptions An important option is ensureLoaded, which can be used to indicate that it is necessary for a caching table to load data from the source if there is not a local copy of the data in the table (usually not necessary for a delete, for example).
|
|
497
|
+
* @returns
|
|
498
|
+
*/
|
|
499
|
+
static getResource(target, request, resourceOptions) {
|
|
500
|
+
const resource = super.getResource(target, request, resourceOptions);
|
|
501
|
+
if (this.loadAsInstance !== false) {
|
|
502
|
+
return resource._loadRecord(target, request, resourceOptions);
|
|
503
|
+
}
|
|
504
|
+
return resource;
|
|
505
|
+
}
|
|
506
|
+
_loadRecord(target, request, resourceOptions) {
|
|
507
|
+
const id = target && typeof target === 'object' ? target.id : target;
|
|
508
|
+
if (id == null)
|
|
509
|
+
return this;
|
|
510
|
+
checkValidId(id);
|
|
511
|
+
try {
|
|
512
|
+
if (this.getRecord?.())
|
|
513
|
+
return this; // already loaded, don't reload, current version may have modifications
|
|
514
|
+
if (typeof id === 'object' && id && !Array.isArray(id)) {
|
|
515
|
+
throw new Error(`Invalid id ${JSON.stringify(id)}`);
|
|
516
|
+
}
|
|
517
|
+
const sync = target?.sync || primaryStore.cache?.get?.(id);
|
|
518
|
+
const txn = txnForContext(request);
|
|
519
|
+
const readTxn = txn.getReadTxn();
|
|
520
|
+
if (readTxn?.isDone) {
|
|
521
|
+
throw new Error('You can not read from a transaction that has already been committed/aborted');
|
|
522
|
+
}
|
|
523
|
+
return loadLocalRecord(id, request, { transaction: readTxn, ensureLoaded: resourceOptions?.ensureLoaded }, sync, (entry) => {
|
|
524
|
+
if (entry) {
|
|
525
|
+
TableResource._updateResource(this, entry);
|
|
526
|
+
}
|
|
527
|
+
else
|
|
528
|
+
this.#record = null;
|
|
529
|
+
if (request.onlyIfCached) {
|
|
530
|
+
// don't go into the loading from source condition, but HTTP spec says to
|
|
531
|
+
// return 504 (rather than 404) if there is no content and the cache-control header
|
|
532
|
+
// dictates not to go to source
|
|
533
|
+
if (!this.doesExist())
|
|
534
|
+
throw new hdbError_js_1.ServerError('Entry is not cached', 504);
|
|
535
|
+
}
|
|
536
|
+
else if (resourceOptions?.ensureLoaded) {
|
|
537
|
+
const loadingFromSource = ensureLoadedFromSource(this.constructor.source, id, entry, request, this);
|
|
538
|
+
if (loadingFromSource) {
|
|
539
|
+
txn?.disregardReadTxn(); // this could take some time, so don't keep the transaction open if possible
|
|
540
|
+
target.loadedFromSource = true;
|
|
541
|
+
return (0, when_ts_1.when)(loadingFromSource, (entry) => {
|
|
542
|
+
TableResource._updateResource(this, entry);
|
|
543
|
+
return this;
|
|
544
|
+
});
|
|
545
|
+
}
|
|
546
|
+
else if (hasSourceGet)
|
|
547
|
+
target.loadedFromSource = false; // mark it as cached
|
|
548
|
+
}
|
|
549
|
+
return this;
|
|
550
|
+
});
|
|
551
|
+
}
|
|
552
|
+
catch (error) {
|
|
553
|
+
if (error.message.includes('Unable to serialize object'))
|
|
554
|
+
error.message += ': ' + JSON.stringify(id);
|
|
555
|
+
throw error;
|
|
556
|
+
}
|
|
557
|
+
}
|
|
558
|
+
static _updateResource(resource, entry) {
|
|
559
|
+
resource.#entry = entry;
|
|
560
|
+
resource.#record = entry?.value ?? null;
|
|
561
|
+
resource.#version = entry?.version;
|
|
562
|
+
}
|
|
563
|
+
/**
|
|
564
|
+
* This is a request to explicitly ensure that the record is loaded from source, rather than only using the local record.
|
|
565
|
+
* This will load from source if the current record is expired, missing, or invalidated.
|
|
566
|
+
* @returns
|
|
567
|
+
*/
|
|
568
|
+
ensureLoaded() {
|
|
569
|
+
const loadedFromSource = ensureLoadedFromSource(this.constructor.source, this.getId(), this.#entry, this.getContext());
|
|
570
|
+
if (loadedFromSource) {
|
|
571
|
+
return (0, when_ts_1.when)(loadedFromSource, (entry) => {
|
|
572
|
+
this.#entry = entry;
|
|
573
|
+
this.#record = entry.value;
|
|
574
|
+
this.#version = entry.version;
|
|
575
|
+
});
|
|
576
|
+
}
|
|
577
|
+
}
|
|
578
|
+
static getNewId() {
|
|
579
|
+
const type = primaryKeyAttribute?.type;
|
|
580
|
+
// the default Resource behavior is to return a GUID, but for a table we can return incrementing numeric keys if the type is (or can be) numeric
|
|
581
|
+
if (type === 'String' || type === 'ID')
|
|
582
|
+
return super.getNewId();
|
|
583
|
+
if (!idIncrementer) {
|
|
584
|
+
// if there is no id incrementer yet, we get or create one
|
|
585
|
+
const idAllocationEntry = primaryStore.getEntry(Symbol.for('id_allocation'));
|
|
586
|
+
let idAllocation = idAllocationEntry?.value;
|
|
587
|
+
let lastKey;
|
|
588
|
+
if (idAllocation &&
|
|
589
|
+
idAllocation.nodeName === server.hostname &&
|
|
590
|
+
(!hasOtherProcesses(primaryStore) || idAllocation.pid === process.pid)) {
|
|
591
|
+
// the database has an existing id allocation that we can continue from
|
|
592
|
+
const startingId = idAllocation.start;
|
|
593
|
+
const endingId = idAllocation.end;
|
|
594
|
+
lastKey = startingId;
|
|
595
|
+
// once it is loaded, we need to find the last key in the allocated range and start from there
|
|
596
|
+
for (const key of primaryStore.getKeys({ start: endingId, end: startingId, limit: 1, reverse: true })) {
|
|
597
|
+
lastKey = key;
|
|
598
|
+
}
|
|
599
|
+
}
|
|
600
|
+
else {
|
|
601
|
+
// we need to create a new id allocation
|
|
602
|
+
idAllocation = createNewAllocation(idAllocationEntry?.version ?? null);
|
|
603
|
+
lastKey = idAllocation.start;
|
|
604
|
+
}
|
|
605
|
+
// all threads will use a shared buffer to atomically increment the id
|
|
606
|
+
// first, we create our proposed incrementer buffer that will be used if we are the first thread to get here
|
|
607
|
+
// and initialize it with the starting id
|
|
608
|
+
idIncrementer = new BigInt64Array([BigInt(lastKey) + 1n]);
|
|
609
|
+
// now get the selected incrementer buffer, this is the shared buffer was first registered and that all threads will use
|
|
610
|
+
idIncrementer = new BigInt64Array(primaryStore.getUserSharedBuffer('id', idIncrementer.buffer));
|
|
611
|
+
// and we set the maximum safe id to the end of the allocated range before we check for conflicting ids again
|
|
612
|
+
idIncrementer.maxSafeId = idAllocation.end;
|
|
613
|
+
}
|
|
614
|
+
// this is where we actually do the atomic incrementation. All the threads should be pointing to the same
|
|
615
|
+
// memory location of this incrementer, so we can be sure that the id is unique and sequential.
|
|
616
|
+
const nextId = Number(Atomics.add(idIncrementer, 0, 1n));
|
|
617
|
+
const asyncIdExpansionThreshold = type === 'Int' ? 0x200 : 0x100000;
|
|
618
|
+
if (nextId + asyncIdExpansionThreshold >= idIncrementer.maxSafeId) {
|
|
619
|
+
const updateEnd = (inTxn) => {
|
|
620
|
+
// we update the end of the allocation range after verifying we don't have any conflicting ids in front of us
|
|
621
|
+
idIncrementer.maxSafeId = nextId + (type === 'Int' ? 0x3ff : 0x3fffff);
|
|
622
|
+
let idAfter = (type === 'Int' ? Math.pow(2, 31) : Math.pow(2, 49)) - 1;
|
|
623
|
+
const readTxn = inTxn ? undefined : primaryStore.useReadTransaction?.();
|
|
624
|
+
// get the latest id after the read transaction to make sure we aren't reading any new ids that we assigned from this node
|
|
625
|
+
const newestId = Number(idIncrementer[0]);
|
|
626
|
+
for (const key of primaryStore.getKeys({
|
|
627
|
+
start: newestId + 1,
|
|
628
|
+
end: idAfter,
|
|
629
|
+
limit: 1,
|
|
630
|
+
transaction: readTxn,
|
|
631
|
+
})) {
|
|
632
|
+
idAfter = key;
|
|
633
|
+
}
|
|
634
|
+
readTxn?.done();
|
|
635
|
+
const { value: updatedIdAllocation, version } = primaryStore.getEntry(Symbol.for('id_allocation'));
|
|
636
|
+
if (idIncrementer.maxSafeId < idAfter) {
|
|
637
|
+
// note that this is just a noop/direct callback if we are inside the sync transaction
|
|
638
|
+
// first check to see if it actually got updated by another thread
|
|
639
|
+
if (updatedIdAllocation.end > idIncrementer.maxSafeId - 100) {
|
|
640
|
+
// the allocation was already updated by another thread
|
|
641
|
+
return;
|
|
642
|
+
}
|
|
643
|
+
logger_ts_1.logger.info?.('New id allocation', nextId, idIncrementer.maxSafeId, version);
|
|
644
|
+
primaryStore.put(Symbol.for('id_allocation'), {
|
|
645
|
+
start: updatedIdAllocation.start,
|
|
646
|
+
end: idIncrementer.maxSafeId,
|
|
647
|
+
nodeName: server.hostname,
|
|
648
|
+
pid: process.pid,
|
|
649
|
+
}, Date.now(), version);
|
|
650
|
+
}
|
|
651
|
+
else {
|
|
652
|
+
// indicate that we have run out of ids in the allocated range, so we need to allocate a new range
|
|
653
|
+
logger_ts_1.logger.warn?.(`Id conflict detected, starting new id allocation range, attempting to allocate to ${idIncrementer.maxSafeId}, but id of ${idAfter} detected`);
|
|
654
|
+
const idAllocation = createNewAllocation(version);
|
|
655
|
+
// reassign the incrementer to the new range/starting point
|
|
656
|
+
if (!idAllocation.alreadyUpdated)
|
|
657
|
+
Atomics.store(idIncrementer, 0, BigInt(idAllocation.start + 1));
|
|
658
|
+
// and we set the maximum safe id to the end of the allocated range before we check for conflicting ids again
|
|
659
|
+
idIncrementer.maxSafeId = idAllocation.end;
|
|
660
|
+
}
|
|
661
|
+
};
|
|
662
|
+
if (nextId + asyncIdExpansionThreshold === idIncrementer.maxSafeId) {
|
|
663
|
+
setImmediate(updateEnd); // if we are getting kind of close to the end, we try to update it asynchronously
|
|
664
|
+
}
|
|
665
|
+
else if (nextId + 100 >= idIncrementer.maxSafeId) {
|
|
666
|
+
logger_ts_1.logger.warn?.(`Synchronous id allocation required on table ${tableName}${type == 'Int'
|
|
667
|
+
? ', it is highly recommended that you use Long or Float as the type for auto-incremented primary keys'
|
|
668
|
+
: ''}`);
|
|
669
|
+
// if we are very close to the end, synchronously update
|
|
670
|
+
primaryStore.transactionSync(() => updateEnd(true));
|
|
671
|
+
}
|
|
672
|
+
//TODO: Add a check to recordUpdate to check if a new id infringes on the allocated id range
|
|
673
|
+
}
|
|
674
|
+
return nextId;
|
|
675
|
+
function createNewAllocation(expectedVersion) {
|
|
676
|
+
// there is no id allocation (or it is for the wrong node name or used up), so we need to create one
|
|
677
|
+
// start by determining the max id for the type
|
|
678
|
+
const maxId = (type === 'Int' ? Math.pow(2, 31) : Math.pow(2, 49)) - 1;
|
|
679
|
+
let safeDistance = maxId / 4; // we want to allocate ids in a range that is at least 1/4 of the total id space from ids in either direction
|
|
680
|
+
let idBefore, idAfter;
|
|
681
|
+
let complained = false;
|
|
682
|
+
let lastKey;
|
|
683
|
+
let idAllocation;
|
|
684
|
+
do {
|
|
685
|
+
// we start with a random id and verify that there is a good gap in the ids to allocate a decent range
|
|
686
|
+
lastKey = Math.floor(Math.random() * maxId);
|
|
687
|
+
idAllocation = {
|
|
688
|
+
start: lastKey,
|
|
689
|
+
end: lastKey + (type === 'Int' ? 0x400 : 0x400000),
|
|
690
|
+
nodeName: server.hostname,
|
|
691
|
+
pid: process.pid,
|
|
692
|
+
};
|
|
693
|
+
idBefore = 0;
|
|
694
|
+
// now find the next id before the last key
|
|
695
|
+
for (const key of primaryStore.getKeys({ start: lastKey, end: true, limit: 1, reverse: true })) {
|
|
696
|
+
idBefore = key;
|
|
697
|
+
}
|
|
698
|
+
idAfter = maxId;
|
|
699
|
+
// and next key after
|
|
700
|
+
for (const key of primaryStore.getKeys({ start: lastKey + 1, end: maxId, limit: 1 })) {
|
|
701
|
+
idAfter = key;
|
|
702
|
+
}
|
|
703
|
+
safeDistance *= 0.875; // if we fail, we try again with a smaller range, looking for a good gap without really knowing how packed the ids are
|
|
704
|
+
if (safeDistance < 1000 && !complained) {
|
|
705
|
+
complained = true;
|
|
706
|
+
logger_ts_1.logger.error?.(`Id allocation in table ${tableName} is very dense, limited safe range of numbers to allocate ids in${type === 'Int'
|
|
707
|
+
? ', it is highly recommended that you use Long or Float as the type for auto-incremented primary keys'
|
|
708
|
+
: ''}`, lastKey, idBefore, idAfter, safeDistance);
|
|
709
|
+
}
|
|
710
|
+
// see if we maintained an adequate distance from the surrounding ids
|
|
711
|
+
} while (!(safeDistance < idAfter - lastKey && (safeDistance < lastKey - idBefore || idBefore === 0)));
|
|
712
|
+
// we have to ensure that the id allocation is atomic and multiple threads don't set different ids, so we use a sync transaction
|
|
713
|
+
return primaryStore.transactionSync(() => {
|
|
714
|
+
// first check to see if it actually got set by another thread
|
|
715
|
+
const updatedIdAllocation = primaryStore.getEntry(Symbol.for('id_allocation'));
|
|
716
|
+
if ((updatedIdAllocation?.version ?? null) == expectedVersion) {
|
|
717
|
+
logger_ts_1.logger.info?.('Allocated new id range', idAllocation);
|
|
718
|
+
primaryStore.put(Symbol.for('id_allocation'), idAllocation, Date.now());
|
|
719
|
+
return idAllocation;
|
|
720
|
+
}
|
|
721
|
+
else {
|
|
722
|
+
logger_ts_1.logger.debug?.('Looks like ids were already allocated');
|
|
723
|
+
return { alreadyUpdated: true, ...updatedIdAllocation.value };
|
|
724
|
+
}
|
|
725
|
+
});
|
|
726
|
+
}
|
|
727
|
+
}
|
|
728
|
+
/**
|
|
729
|
+
* Set TTL expiration for records in this table. On retrieval, record timestamps are checked for expiration.
|
|
730
|
+
* This also informs the scheduling for record eviction.
|
|
731
|
+
* @param expirationTime Time in seconds until records expire (are stale)
|
|
732
|
+
* @param evictionTime Time in seconds until records are evicted (removed)
|
|
733
|
+
*/
|
|
734
|
+
static setTTLExpiration(expiration) {
|
|
735
|
+
// we set up a timer to remove expired entries. we only want the timer/reaper to run in one thread,
|
|
736
|
+
// so we use the first one
|
|
737
|
+
if (typeof expiration === 'number') {
|
|
738
|
+
expirationMs = expiration * 1000;
|
|
739
|
+
if (!evictionMs)
|
|
740
|
+
evictionMs = 0; // by default, no extra time for eviction
|
|
741
|
+
}
|
|
742
|
+
else if (expiration && typeof expiration === 'object') {
|
|
743
|
+
// an object with expiration times/options specified
|
|
744
|
+
expirationMs = expiration.expiration * 1000;
|
|
745
|
+
evictionMs = (expiration.eviction || 0) * 1000;
|
|
746
|
+
cleanupInterval = expiration.scanInterval * 1000;
|
|
747
|
+
}
|
|
748
|
+
else
|
|
749
|
+
throw new Error('Invalid expiration value type');
|
|
750
|
+
if (expirationMs < 0)
|
|
751
|
+
throw new Error('Expiration can not be negative');
|
|
752
|
+
// default to one quarter of the total eviction time, and make sure it fits into a 32-bit signed integer
|
|
753
|
+
cleanupInterval = cleanupInterval || (expirationMs + evictionMs) / 4;
|
|
754
|
+
scheduleCleanup();
|
|
755
|
+
}
|
|
756
|
+
static getResidencyRecord(id) {
|
|
757
|
+
return dbisDb.get([Symbol.for('residency_by_id'), id]);
|
|
758
|
+
}
|
|
759
|
+
static setResidency(getResidency) {
|
|
760
|
+
TableResource.getResidency =
|
|
761
|
+
getResidency &&
|
|
762
|
+
((record, context) => {
|
|
763
|
+
try {
|
|
764
|
+
return getResidency(record, context);
|
|
765
|
+
}
|
|
766
|
+
catch (error) {
|
|
767
|
+
error.message += ` in residency function for table ${tableName}`;
|
|
768
|
+
throw error;
|
|
769
|
+
}
|
|
770
|
+
});
|
|
771
|
+
}
|
|
772
|
+
static setResidencyById(getResidencyById) {
|
|
773
|
+
TableResource.getResidencyById =
|
|
774
|
+
getResidencyById &&
|
|
775
|
+
((id) => {
|
|
776
|
+
try {
|
|
777
|
+
return getResidencyById(id);
|
|
778
|
+
}
|
|
779
|
+
catch (error) {
|
|
780
|
+
error.message += ` in residency function for table ${tableName}`;
|
|
781
|
+
throw error;
|
|
782
|
+
}
|
|
783
|
+
});
|
|
784
|
+
}
|
|
785
|
+
static getResidency(record, context) {
|
|
786
|
+
if (TableResource.getResidencyById) {
|
|
787
|
+
return TableResource.getResidencyById(record[primaryKey]);
|
|
788
|
+
}
|
|
789
|
+
let count = replicateToCount;
|
|
790
|
+
if (context.replicateTo != undefined) {
|
|
791
|
+
// if the context specifies where we are replicating to, use that
|
|
792
|
+
if (Array.isArray(context.replicateTo)) {
|
|
793
|
+
return context.replicateTo.includes(server.hostname)
|
|
794
|
+
? context.replicateTo
|
|
795
|
+
: [server.hostname, ...context.replicateTo];
|
|
796
|
+
}
|
|
797
|
+
if (context.replicateTo >= 0)
|
|
798
|
+
count = context.replicateTo;
|
|
799
|
+
}
|
|
800
|
+
if (count >= 0 && server.nodes) {
|
|
801
|
+
// if we are given a count, choose nodes and return them
|
|
802
|
+
const replicateTo = [server.hostname]; // start with ourselves, we should always be in the list
|
|
803
|
+
if (context.previousResidency) {
|
|
804
|
+
// if we have a previous residency, we should preserve it
|
|
805
|
+
replicateTo.push(...context.previousResidency.slice(0, count));
|
|
806
|
+
}
|
|
807
|
+
else {
|
|
808
|
+
// otherwise need to create a new list of nodes to replicate to, based on available nodes
|
|
809
|
+
// randomize this to ensure distribution of data
|
|
810
|
+
const nodes = server.nodes.map((node) => node.name);
|
|
811
|
+
const startingIndex = Math.floor(nodes.length * Math.random());
|
|
812
|
+
replicateTo.push(...nodes.slice(startingIndex, startingIndex + count));
|
|
813
|
+
const remainingToAdd = startingIndex + count - nodes.length;
|
|
814
|
+
if (remainingToAdd > 0)
|
|
815
|
+
replicateTo.push(...nodes.slice(0, remainingToAdd));
|
|
816
|
+
}
|
|
817
|
+
return replicateTo;
|
|
818
|
+
}
|
|
819
|
+
return; // returning undefined will return the default residency of replicating everywhere
|
|
820
|
+
}
|
|
821
|
+
/**
|
|
822
|
+
* Turn on auditing at runtime
|
|
823
|
+
*/
|
|
824
|
+
static enableAuditing() {
|
|
825
|
+
if (audit)
|
|
826
|
+
return; // already enabled
|
|
827
|
+
audit = true;
|
|
828
|
+
addDeleteRemoval();
|
|
829
|
+
TableResource.audit = true;
|
|
830
|
+
}
|
|
831
|
+
/**
|
|
832
|
+
* Coerce the id as a string to the correct type for the primary key
|
|
833
|
+
* @param id
|
|
834
|
+
* @returns
|
|
835
|
+
*/
|
|
836
|
+
static coerceId(id) {
|
|
837
|
+
if (id === '')
|
|
838
|
+
return null;
|
|
839
|
+
return coerceType(id, primaryKeyAttribute);
|
|
840
|
+
}
|
|
841
|
+
static async dropTable() {
|
|
842
|
+
delete databases_ts_1.databases[databaseName][tableName];
|
|
843
|
+
for (const entry of primaryStore.getRange({ versions: true, snapshot: false, lazy: true })) {
|
|
844
|
+
if (entry.metadataFlags & auditStore_ts_1.HAS_BLOBS && entry.value) {
|
|
845
|
+
(0, blob_ts_1.deleteBlobsInObject)(entry.value);
|
|
846
|
+
}
|
|
847
|
+
}
|
|
848
|
+
if (databaseName === databasePath) {
|
|
849
|
+
// part of a database
|
|
850
|
+
for (const attribute of attributes) {
|
|
851
|
+
dbisDb.remove(TableResource.tableName + '/' + attribute.name);
|
|
852
|
+
const index = indices[attribute.name];
|
|
853
|
+
index?.drop();
|
|
854
|
+
}
|
|
855
|
+
dbisDb.remove(TableResource.tableName + '/');
|
|
856
|
+
primaryStore.drop();
|
|
857
|
+
await dbisDb.committed;
|
|
858
|
+
}
|
|
859
|
+
else {
|
|
860
|
+
// legacy table per database
|
|
861
|
+
await primaryStore.close();
|
|
862
|
+
node_fs_1.default.unlinkSync(primaryStore.path);
|
|
863
|
+
}
|
|
864
|
+
signalling.signalSchemaChange(new itc_js_1.SchemaEventMsg(process.pid, hdbTerms_ts_1.OPERATIONS_ENUM.DROP_TABLE, databaseName, tableName));
|
|
865
|
+
}
|
|
866
|
+
get(target) {
|
|
867
|
+
const constructor = this.constructor;
|
|
868
|
+
if (typeof target === 'string' && constructor.loadAsInstance !== false)
|
|
869
|
+
return this.getProperty(target);
|
|
870
|
+
if (isSearchTarget(target)) {
|
|
871
|
+
// go back to the static search method so it gets a chance to override
|
|
872
|
+
return constructor.search(target, this.getContext());
|
|
873
|
+
}
|
|
874
|
+
if (target && target.id === undefined && !target.toString()) {
|
|
875
|
+
const description = {
|
|
876
|
+
// basically a describe call
|
|
877
|
+
records: './', // an href to the records themselves
|
|
878
|
+
name: tableName,
|
|
879
|
+
database: databaseName,
|
|
880
|
+
auditSize: auditStore?.getStats().entryCount,
|
|
881
|
+
attributes,
|
|
882
|
+
recordCount: undefined,
|
|
883
|
+
estimatedRecordRange: undefined,
|
|
884
|
+
};
|
|
885
|
+
if (this.getContext()?.includeExpensiveRecordCountEstimates) {
|
|
886
|
+
return TableResource.getRecordCount().then((recordCount) => {
|
|
887
|
+
description.recordCount = recordCount.recordCount;
|
|
888
|
+
description.estimatedRecordRange = recordCount.estimatedRange;
|
|
889
|
+
return description;
|
|
890
|
+
});
|
|
891
|
+
}
|
|
892
|
+
return description;
|
|
893
|
+
}
|
|
894
|
+
if (target !== undefined && constructor.loadAsInstance === false) {
|
|
895
|
+
const context = this.getContext();
|
|
896
|
+
const txn = txnForContext(context);
|
|
897
|
+
const readTxn = txn.getReadTxn();
|
|
898
|
+
if (readTxn?.isDone) {
|
|
899
|
+
throw new Error('You can not read from a transaction that has already been committed/aborted');
|
|
900
|
+
}
|
|
901
|
+
const id = requestTargetToId(target);
|
|
902
|
+
checkValidId(id);
|
|
903
|
+
let allowed = true;
|
|
904
|
+
if (target.checkPermission) {
|
|
905
|
+
// requesting authorization verification
|
|
906
|
+
allowed = this.allowRead(context.user, target, context);
|
|
907
|
+
}
|
|
908
|
+
return (0, when_ts_1.promiseNormalize)((0, when_ts_1.when)((0, when_ts_1.when)(allowed, (allowed) => {
|
|
909
|
+
if (!allowed) {
|
|
910
|
+
throw new hdbError_js_1.AccessViolation(context.user);
|
|
911
|
+
}
|
|
912
|
+
const ensureLoaded = true;
|
|
913
|
+
return loadLocalRecord(id, context, { transaction: readTxn, ensureLoaded }, false, (entry) => {
|
|
914
|
+
if (context.onlyIfCached) {
|
|
915
|
+
// don't go into the loading from source condition, but HTTP spec says to
|
|
916
|
+
// return 504 (rather than 404) if there is no content and the cache-control header
|
|
917
|
+
// dictates not to go to source
|
|
918
|
+
if (!entry?.value)
|
|
919
|
+
throw new hdbError_js_1.ServerError('Entry is not cached', 504);
|
|
920
|
+
}
|
|
921
|
+
else if (ensureLoaded) {
|
|
922
|
+
const loadingFromSource = ensureLoadedFromSource(constructor.source, id, entry, context, this);
|
|
923
|
+
if (loadingFromSource) {
|
|
924
|
+
txn?.disregardReadTxn(); // this could take some time, so don't keep the transaction open if possible
|
|
925
|
+
target.loadedFromSource = true;
|
|
926
|
+
return loadingFromSource.then((entry) => entry?.value);
|
|
927
|
+
}
|
|
928
|
+
}
|
|
929
|
+
return entry?.value;
|
|
930
|
+
});
|
|
931
|
+
}), (record) => {
|
|
932
|
+
const select = target?.select;
|
|
933
|
+
if (select && record != null) {
|
|
934
|
+
const transform = (0, Resource_ts_1.transformForSelect)(select, this.constructor);
|
|
935
|
+
return transform(record);
|
|
936
|
+
}
|
|
937
|
+
if (target?.property) {
|
|
938
|
+
return record[target?.property];
|
|
939
|
+
}
|
|
940
|
+
return record;
|
|
941
|
+
}), target);
|
|
942
|
+
}
|
|
943
|
+
if (target?.property)
|
|
944
|
+
return this.getProperty(target.property);
|
|
945
|
+
if (!constructor.getReturnMutable) {
|
|
946
|
+
// if we are not explicitly using getReturnMutable, return the frozen record
|
|
947
|
+
const record = this.#record;
|
|
948
|
+
const select = target?.select;
|
|
949
|
+
if (select && record != null) {
|
|
950
|
+
const transform = (0, Resource_ts_1.transformForSelect)(select, this.constructor);
|
|
951
|
+
return (0, when_ts_1.promiseNormalize)(transform(record), target);
|
|
952
|
+
}
|
|
953
|
+
return (0, when_ts_1.promiseNormalize)(record, target);
|
|
954
|
+
}
|
|
955
|
+
if (this.doesExist() || target?.ensureLoaded === false || this.getContext()?.returnNonexistent) {
|
|
956
|
+
return this;
|
|
957
|
+
}
|
|
958
|
+
return undefined;
|
|
959
|
+
}
|
|
960
|
+
/**
|
|
961
|
+
* Determine if the user is allowed to get/read data from the current resource
|
|
962
|
+
*/
|
|
963
|
+
allowRead(user, target, context) {
|
|
964
|
+
const tablePermission = getTablePermissions(user, target);
|
|
965
|
+
if (tablePermission?.read) {
|
|
966
|
+
if (tablePermission.isSuperUser)
|
|
967
|
+
return true;
|
|
968
|
+
const attribute_permissions = tablePermission.attribute_permissions;
|
|
969
|
+
const select = target?.select;
|
|
970
|
+
if (attribute_permissions?.length > 0 || (hasRelationships && select)) {
|
|
971
|
+
// If attribute permissions are defined, we need to ensure there is a select that only returns the attributes the user has permission to
|
|
972
|
+
// or if there are relationships, we need to ensure that the user has permission to read from the related table
|
|
973
|
+
// Note that if we do not have a select, we do not return any relationships by default.
|
|
974
|
+
if (!target)
|
|
975
|
+
target = {};
|
|
976
|
+
if (select) {
|
|
977
|
+
const selectArray = Array.isArray(select) ? select : [select];
|
|
978
|
+
const attrsForType = attribute_permissions?.length > 0 && attributesAsObject(attribute_permissions, 'read');
|
|
979
|
+
target.select = selectArray
|
|
980
|
+
.map((property) => {
|
|
981
|
+
const propertyName = property.name || property;
|
|
982
|
+
if (!attrsForType || attrsForType[propertyName]) {
|
|
983
|
+
const relatedTable = propertyResolvers[propertyName]?.definition?.tableClass;
|
|
984
|
+
if (relatedTable) {
|
|
985
|
+
// if there is a related table, we need to ensure the user has permission to read from that table and that attributes are properly restricted
|
|
986
|
+
if (!property.name)
|
|
987
|
+
property = { name: property };
|
|
988
|
+
if (!property.checkPermission && target.checkPermission)
|
|
989
|
+
property.checkPermission = target.checkPermission;
|
|
990
|
+
if (!relatedTable.prototype.allowRead.call(null, user, property, context))
|
|
991
|
+
return false;
|
|
992
|
+
if (!property.select)
|
|
993
|
+
return property.name; // no select was applied, just return the name
|
|
994
|
+
}
|
|
995
|
+
return property;
|
|
996
|
+
}
|
|
997
|
+
})
|
|
998
|
+
.filter(Boolean);
|
|
999
|
+
}
|
|
1000
|
+
else {
|
|
1001
|
+
target.select = attribute_permissions
|
|
1002
|
+
.filter((attribute) => attribute.read && !propertyResolvers[attribute.attribute_name])
|
|
1003
|
+
.map((attribute) => attribute.attribute_name);
|
|
1004
|
+
}
|
|
1005
|
+
return true;
|
|
1006
|
+
}
|
|
1007
|
+
else {
|
|
1008
|
+
return true;
|
|
1009
|
+
}
|
|
1010
|
+
}
|
|
1011
|
+
}
|
|
1012
|
+
/**
|
|
1013
|
+
* Determine if the user is allowed to update data from the current resource
|
|
1014
|
+
*/
|
|
1015
|
+
// @ts-expect-error Tables only allow synchronous allowUpdate checks.
|
|
1016
|
+
// eslint-disable-next-line no-unused-vars
|
|
1017
|
+
allowUpdate(user, updatedData, context) {
|
|
1018
|
+
const tablePermission = getTablePermissions(user);
|
|
1019
|
+
if (tablePermission?.update) {
|
|
1020
|
+
const attribute_permissions = tablePermission.attribute_permissions;
|
|
1021
|
+
if (attribute_permissions?.length > 0) {
|
|
1022
|
+
// if attribute permissions are defined, we need to ensure there is a select that only returns the attributes the user has permission to
|
|
1023
|
+
const attrsForType = attributesAsObject(attribute_permissions, 'update');
|
|
1024
|
+
for (const key in updatedData) {
|
|
1025
|
+
if (!attrsForType[key])
|
|
1026
|
+
return false;
|
|
1027
|
+
}
|
|
1028
|
+
// if this is a full put operation that removes missing properties, we don't want to remove properties
|
|
1029
|
+
// that the user doesn't have permission to remove
|
|
1030
|
+
for (const permission of attribute_permissions) {
|
|
1031
|
+
const key = permission.attribute_name;
|
|
1032
|
+
if (!permission.update && !(key in updatedData)) {
|
|
1033
|
+
updatedData[key] = this.getProperty(key);
|
|
1034
|
+
}
|
|
1035
|
+
}
|
|
1036
|
+
}
|
|
1037
|
+
return checkContextPermissions(this.getContext());
|
|
1038
|
+
}
|
|
1039
|
+
}
|
|
1040
|
+
/**
|
|
1041
|
+
* Determine if the user is allowed to create new data in the current resource
|
|
1042
|
+
*/
|
|
1043
|
+
// @ts-expect-error Tables only allow synchronous allowCreate checks.
|
|
1044
|
+
allowCreate(user, newData, context) {
|
|
1045
|
+
if (this.isCollection) {
|
|
1046
|
+
const tablePermission = getTablePermissions(user);
|
|
1047
|
+
if (tablePermission?.insert) {
|
|
1048
|
+
const attribute_permissions = tablePermission.attribute_permissions;
|
|
1049
|
+
if (attribute_permissions?.length > 0) {
|
|
1050
|
+
// if attribute permissions are defined, we need to ensure there is a select that only returns the attributes the user has permission to
|
|
1051
|
+
const attrsForType = attributesAsObject(attribute_permissions, 'insert');
|
|
1052
|
+
for (const key in newData) {
|
|
1053
|
+
if (!attrsForType[key])
|
|
1054
|
+
return false;
|
|
1055
|
+
}
|
|
1056
|
+
return checkContextPermissions(this.getContext());
|
|
1057
|
+
}
|
|
1058
|
+
else {
|
|
1059
|
+
return checkContextPermissions(this.getContext());
|
|
1060
|
+
}
|
|
1061
|
+
}
|
|
1062
|
+
}
|
|
1063
|
+
else {
|
|
1064
|
+
// creating *within* a record resource just means we are adding some data to a current record, which is
|
|
1065
|
+
// an update to the record, it is not an insert of a new record into the table, so not a table create operation
|
|
1066
|
+
// so does not use table insert permissions
|
|
1067
|
+
return this.allowUpdate(user, newData, context);
|
|
1068
|
+
}
|
|
1069
|
+
}
|
|
1070
|
+
/**
|
|
1071
|
+
* Determine if the user is allowed to delete from the current resource
|
|
1072
|
+
*/
|
|
1073
|
+
allowDelete(user, target, context) {
|
|
1074
|
+
const tablePermission = getTablePermissions(user, target);
|
|
1075
|
+
return !!tablePermission?.delete && checkContextPermissions(context);
|
|
1076
|
+
}
|
|
1077
|
+
update(target, updates) {
|
|
1078
|
+
let id;
|
|
1079
|
+
// determine if it is a legacy call
|
|
1080
|
+
const directInstance = typeof updates === 'boolean' ||
|
|
1081
|
+
(updates === undefined &&
|
|
1082
|
+
(target == undefined || (typeof target === 'object' && !(target instanceof URLSearchParams))));
|
|
1083
|
+
let fullUpdate = false;
|
|
1084
|
+
if (directInstance) {
|
|
1085
|
+
// legacy, shift the arguments
|
|
1086
|
+
fullUpdate = updates;
|
|
1087
|
+
updates = target;
|
|
1088
|
+
id = this.getId();
|
|
1089
|
+
}
|
|
1090
|
+
else {
|
|
1091
|
+
id = requestTargetToId(target);
|
|
1092
|
+
}
|
|
1093
|
+
const context = this.getContext();
|
|
1094
|
+
const envTxn = txnForContext(context);
|
|
1095
|
+
if (!envTxn)
|
|
1096
|
+
throw new Error('Can not update a table resource outside of a transaction');
|
|
1097
|
+
// record in the list of updating records so it can be written to the database when we commit
|
|
1098
|
+
if (updates === false) {
|
|
1099
|
+
// TODO: Remove from transaction
|
|
1100
|
+
return this;
|
|
1101
|
+
}
|
|
1102
|
+
if (typeof updates === 'object' && updates) {
|
|
1103
|
+
if (fullUpdate) {
|
|
1104
|
+
// legacy full update where we need to update the entire record, but the instance needs to continue
|
|
1105
|
+
// track any further changes
|
|
1106
|
+
if (Object.isFrozen(updates))
|
|
1107
|
+
updates = { ...updates };
|
|
1108
|
+
this.#record = {}; // clear out the existing record
|
|
1109
|
+
this.#changes = updates;
|
|
1110
|
+
}
|
|
1111
|
+
else if (directInstance) {
|
|
1112
|
+
// incremental update with legacy arguments
|
|
1113
|
+
const ownData = this.#changes;
|
|
1114
|
+
if (ownData)
|
|
1115
|
+
updates = Object.assign(ownData, updates);
|
|
1116
|
+
this.#changes = updates;
|
|
1117
|
+
}
|
|
1118
|
+
else {
|
|
1119
|
+
// standard path, where we retrieve the references record and return an instance, initialized with any
|
|
1120
|
+
// updates that were passed into this method
|
|
1121
|
+
let allowed = true;
|
|
1122
|
+
if (target == undefined)
|
|
1123
|
+
throw new TypeError('Can not put a record without a target');
|
|
1124
|
+
if (target.checkPermission) {
|
|
1125
|
+
// requesting authorization verification
|
|
1126
|
+
allowed = this.allowUpdate(context.user, updates, context);
|
|
1127
|
+
}
|
|
1128
|
+
return (0, when_ts_1.when)(allowed, (allowed) => {
|
|
1129
|
+
if (!allowed) {
|
|
1130
|
+
throw new hdbError_js_1.AccessViolation(context.user);
|
|
1131
|
+
}
|
|
1132
|
+
let loading;
|
|
1133
|
+
if (!this.#entry && this.constructor.loadAsInstance === false) {
|
|
1134
|
+
// load the record if it hasn't been done yet
|
|
1135
|
+
loading = this._loadRecord(target, context, { ensureLoaded: true, async: true });
|
|
1136
|
+
}
|
|
1137
|
+
return (0, when_ts_1.when)(loading, () => {
|
|
1138
|
+
this.#changes = updates;
|
|
1139
|
+
this._writeUpdate(id, this.#changes, false);
|
|
1140
|
+
return this;
|
|
1141
|
+
});
|
|
1142
|
+
});
|
|
1143
|
+
}
|
|
1144
|
+
}
|
|
1145
|
+
this._writeUpdate(id, this.#changes, fullUpdate);
|
|
1146
|
+
return this;
|
|
1147
|
+
}
|
|
1148
|
+
/**
|
|
1149
|
+
* Save any changes into this instance to the current transaction
|
|
1150
|
+
*/
|
|
1151
|
+
save() {
|
|
1152
|
+
if (this.#savingOperation) {
|
|
1153
|
+
const transaction = txnForContext(this.getContext());
|
|
1154
|
+
if (transaction.save) {
|
|
1155
|
+
try {
|
|
1156
|
+
return transaction.save(this.#savingOperation);
|
|
1157
|
+
}
|
|
1158
|
+
finally {
|
|
1159
|
+
this.#savingOperation = null;
|
|
1160
|
+
}
|
|
1161
|
+
}
|
|
1162
|
+
}
|
|
1163
|
+
}
|
|
1164
|
+
addTo(property, value) {
|
|
1165
|
+
if (typeof value === 'number' || typeof value === 'bigint') {
|
|
1166
|
+
if (this.#savingOperation?.fullUpdate)
|
|
1167
|
+
this.set(property, (+this.getProperty(property) || 0) + value);
|
|
1168
|
+
else {
|
|
1169
|
+
if (!this.#savingOperation)
|
|
1170
|
+
this.update();
|
|
1171
|
+
this.set(property, new tracked_ts_1.Addition(value));
|
|
1172
|
+
}
|
|
1173
|
+
}
|
|
1174
|
+
else {
|
|
1175
|
+
throw new Error('Can not add a non-numeric value');
|
|
1176
|
+
}
|
|
1177
|
+
}
|
|
1178
|
+
subtractFrom(property, value) {
|
|
1179
|
+
if (typeof value === 'number') {
|
|
1180
|
+
return this.addTo(property, -value);
|
|
1181
|
+
}
|
|
1182
|
+
else {
|
|
1183
|
+
throw new Error('Can not subtract a non-numeric value');
|
|
1184
|
+
}
|
|
1185
|
+
}
|
|
1186
|
+
getMetadata() {
|
|
1187
|
+
return this.#entry;
|
|
1188
|
+
}
|
|
1189
|
+
getRecord() {
|
|
1190
|
+
return this.#record;
|
|
1191
|
+
}
|
|
1192
|
+
getChanges() {
|
|
1193
|
+
return this.#changes;
|
|
1194
|
+
}
|
|
1195
|
+
_setChanges(changes) {
|
|
1196
|
+
this.#changes = changes;
|
|
1197
|
+
}
|
|
1198
|
+
setRecord(record) {
|
|
1199
|
+
this.#record = record;
|
|
1200
|
+
}
|
|
1201
|
+
invalidate(target) {
|
|
1202
|
+
let allowed = true;
|
|
1203
|
+
const context = this.getContext();
|
|
1204
|
+
if (target?.checkPermission) {
|
|
1205
|
+
// requesting authorization verification
|
|
1206
|
+
allowed = this.allowDelete(context.user, target, context);
|
|
1207
|
+
}
|
|
1208
|
+
return (0, when_ts_1.when)(allowed, (allowed) => {
|
|
1209
|
+
if (!allowed) {
|
|
1210
|
+
throw new hdbError_js_1.AccessViolation(context.user);
|
|
1211
|
+
}
|
|
1212
|
+
this._writeInvalidate(target ? requestTargetToId(target) : this.getId());
|
|
1213
|
+
});
|
|
1214
|
+
}
|
|
1215
|
+
_writeInvalidate(id, partialRecord, options) {
|
|
1216
|
+
const context = this.getContext();
|
|
1217
|
+
checkValidId(id);
|
|
1218
|
+
const transaction = txnForContext(this.getContext());
|
|
1219
|
+
transaction.addWrite({
|
|
1220
|
+
key: id,
|
|
1221
|
+
store: primaryStore,
|
|
1222
|
+
invalidated: true,
|
|
1223
|
+
entry: this.#entry,
|
|
1224
|
+
beforeIntermediate: preCommitBlobsForRecordBefore(partialRecord),
|
|
1225
|
+
commit: (txnTime, existingEntry, _retry, transaction) => {
|
|
1226
|
+
if (precedesExistingVersion(txnTime, existingEntry, options?.nodeId) <= 0)
|
|
1227
|
+
return;
|
|
1228
|
+
partialRecord ??= null;
|
|
1229
|
+
for (const name in indices) {
|
|
1230
|
+
if (!partialRecord)
|
|
1231
|
+
partialRecord = {};
|
|
1232
|
+
// if there are any indices, we need to preserve a partial invalidated record to ensure we can still do searches
|
|
1233
|
+
if (partialRecord[name] === undefined) {
|
|
1234
|
+
partialRecord[name] = this.getProperty(name);
|
|
1235
|
+
}
|
|
1236
|
+
}
|
|
1237
|
+
logger_ts_1.logger.trace?.(`Invalidating entry in ${tableName} id: ${id}, timestamp: ${new Date(txnTime).toISOString()}`);
|
|
1238
|
+
updateRecord(id, partialRecord, existingEntry, txnTime, exports.INVALIDATED, audit, {
|
|
1239
|
+
user: context?.user,
|
|
1240
|
+
residencyId: options?.residencyId,
|
|
1241
|
+
nodeId: options?.nodeId,
|
|
1242
|
+
viaNodeId: options?.viaNodeId,
|
|
1243
|
+
transaction,
|
|
1244
|
+
tableToTrack: tableName,
|
|
1245
|
+
}, 'invalidate');
|
|
1246
|
+
// TODO: recordDeletion?
|
|
1247
|
+
},
|
|
1248
|
+
});
|
|
1249
|
+
}
|
|
1250
|
+
_writeRelocate(id, options) {
|
|
1251
|
+
const context = this.getContext();
|
|
1252
|
+
checkValidId(id);
|
|
1253
|
+
const transaction = txnForContext(this.getContext());
|
|
1254
|
+
transaction.addWrite({
|
|
1255
|
+
key: id,
|
|
1256
|
+
store: primaryStore,
|
|
1257
|
+
invalidated: true,
|
|
1258
|
+
entry: this.#entry,
|
|
1259
|
+
before: this.constructor.source?.relocate && !context?.source
|
|
1260
|
+
? this.constructor.source.relocate.bind(this.constructor.source, id, undefined, context)
|
|
1261
|
+
: undefined,
|
|
1262
|
+
commit: (txnTime, existingEntry, _retry, transaction) => {
|
|
1263
|
+
if (precedesExistingVersion(txnTime, existingEntry, options?.nodeId) <= 0)
|
|
1264
|
+
return;
|
|
1265
|
+
const residency = TableResource.getResidencyRecord(options.residencyId);
|
|
1266
|
+
let metadata = 0;
|
|
1267
|
+
let newRecord = null;
|
|
1268
|
+
const existingRecord = existingEntry?.value;
|
|
1269
|
+
if (residency && !residency.includes(server.hostname)) {
|
|
1270
|
+
for (const name in indices) {
|
|
1271
|
+
if (!newRecord)
|
|
1272
|
+
newRecord = {};
|
|
1273
|
+
// if there are any indices, we need to preserve a partial invalidated record to ensure we can still do searches
|
|
1274
|
+
newRecord[name] = existingRecord[name];
|
|
1275
|
+
}
|
|
1276
|
+
metadata = exports.INVALIDATED;
|
|
1277
|
+
}
|
|
1278
|
+
else {
|
|
1279
|
+
newRecord = existingRecord;
|
|
1280
|
+
}
|
|
1281
|
+
logger_ts_1.logger.trace?.(`Relocating entry id: ${id}, timestamp: ${new Date(txnTime).toISOString()}`);
|
|
1282
|
+
updateRecord(id, newRecord, existingEntry, txnTime, metadata, audit, {
|
|
1283
|
+
user: context.user,
|
|
1284
|
+
residencyId: options.residencyId,
|
|
1285
|
+
nodeId: options.nodeId,
|
|
1286
|
+
viaNodeId: options?.viaNodeId,
|
|
1287
|
+
expiresAt: options.expiresAt,
|
|
1288
|
+
transaction,
|
|
1289
|
+
}, 'relocate', false, null);
|
|
1290
|
+
},
|
|
1291
|
+
});
|
|
1292
|
+
}
|
|
1293
|
+
/**
|
|
1294
|
+
* Record the relocation of an entry (when a record is moved to a different node), return true if it is now located locally
|
|
1295
|
+
* @param existingEntry
|
|
1296
|
+
* @param entry
|
|
1297
|
+
*/
|
|
1298
|
+
static _recordRelocate(existingEntry, entry) {
|
|
1299
|
+
if (this.getResidencyById)
|
|
1300
|
+
return false; // we don't want to relocate entries that are located by id
|
|
1301
|
+
const context = {
|
|
1302
|
+
previousResidency: this.getResidencyRecord(existingEntry.residencyId),
|
|
1303
|
+
isRelocation: true,
|
|
1304
|
+
};
|
|
1305
|
+
const residency = residencyFromFunction(this.getResidency(entry.value, context));
|
|
1306
|
+
let residencyId;
|
|
1307
|
+
if (residency) {
|
|
1308
|
+
if (!residency.includes(server.hostname))
|
|
1309
|
+
return false; // if we aren't in the residency, we don't need to do anything, we are not responsible for storing this record
|
|
1310
|
+
residencyId = getResidencyId(residency);
|
|
1311
|
+
}
|
|
1312
|
+
const metadata = 0;
|
|
1313
|
+
logger_ts_1.logger.debug?.('Performing a relocate of an entry', existingEntry.key, entry.value, residency);
|
|
1314
|
+
updateRecord(existingEntry.key, entry.value, // store the record we downloaded
|
|
1315
|
+
existingEntry, existingEntry.version, // version number should not change
|
|
1316
|
+
metadata, true, { residencyId, expiresAt: entry.expiresAt, transaction: txnForContext(context).transaction }, 'relocate', false, null // the audit record value should be empty since there are no changes to the actual data
|
|
1317
|
+
);
|
|
1318
|
+
return true;
|
|
1319
|
+
}
|
|
1320
|
+
/**
|
|
1321
|
+
* Evicting a record will remove it from a caching table. This is not considered a canonical data change, and it is assumed that retrieving this record from the source will still yield the same record, this is only removing the local copy of the record.
|
|
1322
|
+
*/
|
|
1323
|
+
static evict(id, existingRecord, existingVersion) {
|
|
1324
|
+
let entry;
|
|
1325
|
+
if (hasSourceGet || audit) {
|
|
1326
|
+
if (!existingRecord)
|
|
1327
|
+
return;
|
|
1328
|
+
entry = primaryStore.getEntry(id);
|
|
1329
|
+
if (!entry || !existingRecord)
|
|
1330
|
+
return;
|
|
1331
|
+
if (entry.version !== existingVersion)
|
|
1332
|
+
return;
|
|
1333
|
+
}
|
|
1334
|
+
if (hasSourceGet) {
|
|
1335
|
+
// if there is a resolution in-progress, abandon the eviction
|
|
1336
|
+
if (primaryStore.hasLock(id, entry.version))
|
|
1337
|
+
return;
|
|
1338
|
+
// if there is a source, we are not "deleting" the record, just removing our local copy, but preserving what we need for indexing
|
|
1339
|
+
let partialRecord;
|
|
1340
|
+
for (const name in indices) {
|
|
1341
|
+
// if there are any indices, we need to preserve a partial evicted record to ensure we can still do searches
|
|
1342
|
+
if (!partialRecord)
|
|
1343
|
+
partialRecord = {};
|
|
1344
|
+
partialRecord[name] = existingRecord[name];
|
|
1345
|
+
}
|
|
1346
|
+
// if we are evicting and not deleting, need to preserve the partial record
|
|
1347
|
+
if (partialRecord) {
|
|
1348
|
+
// treat this as a record resolution (so previous version is checked) with no audit record
|
|
1349
|
+
return updateRecord(id, partialRecord, entry, existingVersion, exports.EVICTED, null, null, null, true);
|
|
1350
|
+
}
|
|
1351
|
+
}
|
|
1352
|
+
primaryStore.ifVersion?.(id, existingVersion, () => {
|
|
1353
|
+
updateIndices(id, existingRecord, null);
|
|
1354
|
+
});
|
|
1355
|
+
// evictions never go in the audit log, so we can not record a deletion entry for the eviction
|
|
1356
|
+
// as there is no corresponding audit entry and it would never get cleaned up. So we must simply
|
|
1357
|
+
// removed the entry entirely
|
|
1358
|
+
return (0, RecordEncoder_ts_1.removeEntry)(primaryStore, entry ?? primaryStore.getEntry(id), existingVersion);
|
|
1359
|
+
}
|
|
1360
|
+
/**
|
|
1361
|
+
* This is intended to acquire a lock on a record from the whole cluster.
|
|
1362
|
+
*/
|
|
1363
|
+
lock() {
|
|
1364
|
+
throw new Error('Not yet implemented');
|
|
1365
|
+
}
|
|
1366
|
+
static operation(operation, context) {
|
|
1367
|
+
operation.table ||= tableName;
|
|
1368
|
+
operation.schema ||= databaseName;
|
|
1369
|
+
return global.operation(operation, context);
|
|
1370
|
+
}
|
|
1371
|
+
/**
|
|
1372
|
+
* Store the provided record data into the current resource. This is not written
|
|
1373
|
+
* until the corresponding transaction is committed.
|
|
1374
|
+
*/
|
|
1375
|
+
put(target, record) {
|
|
1376
|
+
if (record === undefined || record instanceof URLSearchParams) {
|
|
1377
|
+
// legacy argument position, shift the arguments and go through the update method for back-compat
|
|
1378
|
+
this.update(target, true);
|
|
1379
|
+
return this.save();
|
|
1380
|
+
}
|
|
1381
|
+
else {
|
|
1382
|
+
let allowed = true;
|
|
1383
|
+
if (target == undefined)
|
|
1384
|
+
throw new TypeError('Can not put a record without a target');
|
|
1385
|
+
const context = this.getContext();
|
|
1386
|
+
if (target.checkPermission) {
|
|
1387
|
+
// requesting authorization verification
|
|
1388
|
+
allowed = this.allowUpdate(context.user, record, context);
|
|
1389
|
+
}
|
|
1390
|
+
return (0, when_ts_1.when)(allowed, (allowed) => {
|
|
1391
|
+
if (!allowed) {
|
|
1392
|
+
throw new hdbError_js_1.AccessViolation(context.user);
|
|
1393
|
+
}
|
|
1394
|
+
// standard path, handle arrays as multiple updates, and otherwise do a direct update
|
|
1395
|
+
if (Array.isArray(record)) {
|
|
1396
|
+
return Promise.all(record.map((element) => {
|
|
1397
|
+
const id = element[primaryKey];
|
|
1398
|
+
this._writeUpdate(id, element, true);
|
|
1399
|
+
return this.save();
|
|
1400
|
+
}));
|
|
1401
|
+
}
|
|
1402
|
+
else {
|
|
1403
|
+
const id = requestTargetToId(target);
|
|
1404
|
+
this._writeUpdate(id, record, true);
|
|
1405
|
+
return this.save();
|
|
1406
|
+
}
|
|
1407
|
+
});
|
|
1408
|
+
}
|
|
1409
|
+
// always return undefined
|
|
1410
|
+
}
|
|
1411
|
+
create(target, record) {
|
|
1412
|
+
let allowed = true;
|
|
1413
|
+
const context = this.getContext();
|
|
1414
|
+
if (!record && !(target instanceof URLSearchParams)) {
|
|
1415
|
+
// single argument, shift arguments
|
|
1416
|
+
record = target;
|
|
1417
|
+
target = undefined;
|
|
1418
|
+
}
|
|
1419
|
+
if (!record || typeof record !== 'object' || Array.isArray(record)) {
|
|
1420
|
+
throw new TypeError('Can not create a record without an object');
|
|
1421
|
+
}
|
|
1422
|
+
if (target?.checkPermission) {
|
|
1423
|
+
// requesting authorization verification
|
|
1424
|
+
allowed = this.allowCreate(context.user, record, context);
|
|
1425
|
+
}
|
|
1426
|
+
return (0, when_ts_1.when)(allowed, (allowed) => {
|
|
1427
|
+
if (!allowed) {
|
|
1428
|
+
throw new hdbError_js_1.AccessViolation(context.user);
|
|
1429
|
+
}
|
|
1430
|
+
let id = requestTargetToId(target) ?? record[primaryKey];
|
|
1431
|
+
if (id === undefined) {
|
|
1432
|
+
id = this.constructor.getNewId();
|
|
1433
|
+
record[primaryKey] = id; // make this immediately available
|
|
1434
|
+
}
|
|
1435
|
+
else {
|
|
1436
|
+
const existing = primaryStore.getSync(id);
|
|
1437
|
+
if (existing) {
|
|
1438
|
+
throw new hdbError_js_1.ClientError('Record already exists', 409);
|
|
1439
|
+
}
|
|
1440
|
+
}
|
|
1441
|
+
this._writeUpdate(id, record, true);
|
|
1442
|
+
return record;
|
|
1443
|
+
});
|
|
1444
|
+
}
|
|
1445
|
+
// @ts-expect-error The implementation handles the possibility of target and recordUpdate being swapped
|
|
1446
|
+
patch(target, recordUpdate) {
|
|
1447
|
+
if (recordUpdate === undefined || recordUpdate instanceof URLSearchParams) {
|
|
1448
|
+
// legacy argument position, shift the arguments and go through the update method for back-compat
|
|
1449
|
+
this.update(target, false);
|
|
1450
|
+
return this.save();
|
|
1451
|
+
}
|
|
1452
|
+
else {
|
|
1453
|
+
// standard path, ensure there is no return object
|
|
1454
|
+
return (0, when_ts_1.when)(this.update(target, recordUpdate), () => {
|
|
1455
|
+
return (0, when_ts_1.when)(this.save(), () => undefined); // wait for the update and save, but return undefined
|
|
1456
|
+
});
|
|
1457
|
+
}
|
|
1458
|
+
}
|
|
1459
|
+
// perform the actual write operation; this may come from a user request to write (put, post, etc.), or
|
|
1460
|
+
// a notification that a write has already occurred in the canonical data source, we need to update our
|
|
1461
|
+
// local copy
|
|
1462
|
+
_writeUpdate(id, recordUpdate, fullUpdate, options) {
|
|
1463
|
+
const context = this.getContext();
|
|
1464
|
+
const transaction = txnForContext(context);
|
|
1465
|
+
checkValidId(id);
|
|
1466
|
+
const entry = this.#entry ?? primaryStore.getEntry(id, { transaction: transaction.getReadTxn() });
|
|
1467
|
+
const writeToSource = () => {
|
|
1468
|
+
if (!this.constructor.source || context?.source)
|
|
1469
|
+
return;
|
|
1470
|
+
if (fullUpdate) {
|
|
1471
|
+
// full update is a put
|
|
1472
|
+
if (this.constructor.source.put) {
|
|
1473
|
+
return () => this.constructor.source.put(id, recordUpdate, context);
|
|
1474
|
+
}
|
|
1475
|
+
}
|
|
1476
|
+
else {
|
|
1477
|
+
// incremental update
|
|
1478
|
+
if (this.constructor.source.patch) {
|
|
1479
|
+
return () => this.constructor.source.patch(id, recordUpdate, context);
|
|
1480
|
+
}
|
|
1481
|
+
else if (this.constructor.source.put) {
|
|
1482
|
+
// if this is incremental, but only have put, we can use that by generating the full record (at least the expected one)
|
|
1483
|
+
return () => this.constructor.source.put(id, (0, tracked_ts_1.updateAndFreeze)(this), context);
|
|
1484
|
+
}
|
|
1485
|
+
}
|
|
1486
|
+
};
|
|
1487
|
+
const write = {
|
|
1488
|
+
key: id,
|
|
1489
|
+
store: primaryStore,
|
|
1490
|
+
entry,
|
|
1491
|
+
nodeName: context?.nodeName,
|
|
1492
|
+
fullUpdate,
|
|
1493
|
+
deferSave: true,
|
|
1494
|
+
validate: (txnTime) => {
|
|
1495
|
+
if (!recordUpdate)
|
|
1496
|
+
recordUpdate = this.#changes;
|
|
1497
|
+
if (fullUpdate || (recordUpdate && (0, tracked_ts_1.hasChanges)(this.#changes === recordUpdate ? this : recordUpdate))) {
|
|
1498
|
+
if (!context?.source) {
|
|
1499
|
+
transaction.checkOverloaded();
|
|
1500
|
+
this.validate(recordUpdate, !fullUpdate);
|
|
1501
|
+
if (updatedTimeProperty) {
|
|
1502
|
+
recordUpdate[updatedTimeProperty.name] =
|
|
1503
|
+
updatedTimeProperty.type === 'Date'
|
|
1504
|
+
? new Date(txnTime)
|
|
1505
|
+
: updatedTimeProperty.type === 'String'
|
|
1506
|
+
? new Date(txnTime).toISOString()
|
|
1507
|
+
: txnTime;
|
|
1508
|
+
}
|
|
1509
|
+
if (createdTimeProperty) {
|
|
1510
|
+
if (entry?.value) {
|
|
1511
|
+
if (fullUpdate || recordUpdate[createdTimeProperty.name]) {
|
|
1512
|
+
// make sure to retain original created time
|
|
1513
|
+
recordUpdate[createdTimeProperty.name] = entry?.value[createdTimeProperty.name];
|
|
1514
|
+
}
|
|
1515
|
+
}
|
|
1516
|
+
else {
|
|
1517
|
+
// new entry, set created time
|
|
1518
|
+
recordUpdate[createdTimeProperty.name] =
|
|
1519
|
+
createdTimeProperty.type === 'Date'
|
|
1520
|
+
? new Date(txnTime)
|
|
1521
|
+
: createdTimeProperty.type === 'String'
|
|
1522
|
+
? new Date(txnTime).toISOString()
|
|
1523
|
+
: txnTime;
|
|
1524
|
+
}
|
|
1525
|
+
}
|
|
1526
|
+
if (primaryKey && recordUpdate[primaryKey] !== id && (fullUpdate || primaryKey in recordUpdate)) {
|
|
1527
|
+
// ensure that the primary key is correct, if there is supposed to be one
|
|
1528
|
+
recordUpdate[primaryKey] = id;
|
|
1529
|
+
}
|
|
1530
|
+
if (fullUpdate) {
|
|
1531
|
+
recordUpdate = (0, tracked_ts_1.updateAndFreeze)(recordUpdate); // this flatten and freeze the record
|
|
1532
|
+
}
|
|
1533
|
+
// TODO: else freeze after we have applied the changes
|
|
1534
|
+
}
|
|
1535
|
+
}
|
|
1536
|
+
else {
|
|
1537
|
+
transaction.removeWrite?.(write);
|
|
1538
|
+
return false;
|
|
1539
|
+
}
|
|
1540
|
+
},
|
|
1541
|
+
before: writeToSource(),
|
|
1542
|
+
beforeIntermediate: preCommitBlobsForRecordBefore(recordUpdate),
|
|
1543
|
+
commit: (txnTime, existingEntry, retry, transaction) => {
|
|
1544
|
+
if (retry) {
|
|
1545
|
+
if (context && existingEntry?.version > (context.lastModified || 0))
|
|
1546
|
+
context.lastModified = existingEntry.version;
|
|
1547
|
+
this.#entry = existingEntry;
|
|
1548
|
+
if (existingEntry?.value && existingEntry.value.getRecord)
|
|
1549
|
+
throw new Error('Can not assign a record to a record, check for circular references');
|
|
1550
|
+
if (!fullUpdate)
|
|
1551
|
+
this.#record = existingEntry?.value ?? null;
|
|
1552
|
+
}
|
|
1553
|
+
this.#changes = undefined; // once we are committing to write this update, we no longer should track the changes, and want to avoid double application (of any CRDTs)
|
|
1554
|
+
this.#version = txnTime;
|
|
1555
|
+
const existingRecord = existingEntry?.value;
|
|
1556
|
+
let incrementalUpdateToApply;
|
|
1557
|
+
this.#savingOperation = null;
|
|
1558
|
+
let omitLocalRecord = false;
|
|
1559
|
+
// we use optimistic locking to only commit if the existing record state still holds true.
|
|
1560
|
+
// this is superior to using an async transaction since it doesn't require JS execution
|
|
1561
|
+
// during the write transaction.
|
|
1562
|
+
let precedesExisting = precedesExistingVersion(txnTime, existingEntry, options?.nodeId);
|
|
1563
|
+
let auditRecordToStore; // what to store in the audit record. For a full update, this can be left undefined in which case it is the same as full record update and optimized to use a binary copy
|
|
1564
|
+
const type = fullUpdate ? 'put' : 'patch';
|
|
1565
|
+
let residencyId;
|
|
1566
|
+
if (options?.residencyId != undefined)
|
|
1567
|
+
residencyId = options.residencyId;
|
|
1568
|
+
const expiresAt = context?.expiresAt ?? (expirationMs ? expirationMs + Date.now() : -1);
|
|
1569
|
+
let additionalAuditRefs = []; // track additional audit refs to store
|
|
1570
|
+
if (precedesExisting <= 0) {
|
|
1571
|
+
// This block is to handle the case of saving an update where the transaction timestamp is older than the
|
|
1572
|
+
// existing timestamp, which means that we received updates out of order, and must resequence the application
|
|
1573
|
+
// of the updates to the record to ensure consistency across the cluster
|
|
1574
|
+
// TODO: can the previous version be older, but even more previous version be newer?
|
|
1575
|
+
if (audit) {
|
|
1576
|
+
// incremental CRDT updates are only available with audit logging on
|
|
1577
|
+
let localTime = existingEntry.localTime;
|
|
1578
|
+
let auditedVersion = existingEntry.version;
|
|
1579
|
+
logger_ts_1.logger.debug?.('Applying CRDT update to record with id: ', id, 'txn time', new Date(txnTime), 'applying later update from:', new Date(auditedVersion), 'local recorded time', new Date(localTime));
|
|
1580
|
+
let nodeId = existingEntry.nodeId;
|
|
1581
|
+
const succeedingUpdates = []; // record the "future" updates, as we need to apply the updates in reverse order
|
|
1582
|
+
const auditRefsToVisit = existingEntry.additionalAuditRefs
|
|
1583
|
+
? existingEntry.additionalAuditRefs.map((ref) => ({ localTime: ref.version, nodeId: ref.nodeId }))
|
|
1584
|
+
: [];
|
|
1585
|
+
// Collect any existing audit refs that should be preserved (those older than current transaction)
|
|
1586
|
+
if (existingEntry.additionalAuditRefs) {
|
|
1587
|
+
for (const ref of existingEntry.additionalAuditRefs) {
|
|
1588
|
+
if (ref.version <= txnTime) {
|
|
1589
|
+
additionalAuditRefs.push(ref);
|
|
1590
|
+
}
|
|
1591
|
+
}
|
|
1592
|
+
}
|
|
1593
|
+
let addedAuditRef = false;
|
|
1594
|
+
let nextRef;
|
|
1595
|
+
do {
|
|
1596
|
+
while (localTime > txnTime || (auditedVersion >= txnTime && localTime > 0)) {
|
|
1597
|
+
const auditRecord = auditStore.get(localTime, tableId, id, nodeId);
|
|
1598
|
+
if (!auditRecord)
|
|
1599
|
+
break;
|
|
1600
|
+
auditedVersion = auditRecord.version;
|
|
1601
|
+
if (auditedVersion >= txnTime) {
|
|
1602
|
+
if (auditedVersion === txnTime) {
|
|
1603
|
+
precedesExisting = precedesExistingVersion(txnTime, { version: auditedVersion, localTime: localTime, key: id, nodeId: auditRecord.nodeId }, options?.nodeId);
|
|
1604
|
+
if (precedesExisting === 0) {
|
|
1605
|
+
logger_ts_1.logger.debug?.('The transaction time is equal to the existing version, treating as duplicate', id);
|
|
1606
|
+
return; // treat a tie as a duplicate and drop it
|
|
1607
|
+
}
|
|
1608
|
+
if (precedesExisting > 0) {
|
|
1609
|
+
// if the existing version is older, we can skip this update
|
|
1610
|
+
localTime = auditRecord.previousVersion;
|
|
1611
|
+
nodeId = auditRecord.previousNodeId;
|
|
1612
|
+
continue;
|
|
1613
|
+
}
|
|
1614
|
+
}
|
|
1615
|
+
if (auditRecord.type === 'patch') {
|
|
1616
|
+
logger_ts_1.logger.debug?.('out of order patch will be applied', id, auditRecord);
|
|
1617
|
+
// record patches so we can reply in order
|
|
1618
|
+
succeedingUpdates.push(auditRecord);
|
|
1619
|
+
auditRecordToStore = recordUpdate; // use the original update for the audit record
|
|
1620
|
+
}
|
|
1621
|
+
else if (auditRecord.type === 'put' || auditRecord.type === 'delete') {
|
|
1622
|
+
// There is newer full record update, so this incremental update is completely superseded
|
|
1623
|
+
return;
|
|
1624
|
+
}
|
|
1625
|
+
}
|
|
1626
|
+
if (!addedAuditRef && isRocksDB) {
|
|
1627
|
+
addedAuditRef = true;
|
|
1628
|
+
// Add a reference to this older audit record if we had out-of-order writes
|
|
1629
|
+
additionalAuditRefs.push({ version: txnTime, nodeId: options?.nodeId });
|
|
1630
|
+
logger_ts_1.logger.debug?.('Adding additional audit ref for out-of-order write', {
|
|
1631
|
+
version: txnTime,
|
|
1632
|
+
nodeId: options?.nodeId,
|
|
1633
|
+
});
|
|
1634
|
+
}
|
|
1635
|
+
// Collect any additional audit refs from this audit record to traverse other branches
|
|
1636
|
+
if (auditRecord.previousAdditionalAuditRefs) {
|
|
1637
|
+
for (const ref of auditRecord.previousAdditionalAuditRefs) {
|
|
1638
|
+
auditRefsToVisit.push({ localTime: ref.version, nodeId: ref.nodeId });
|
|
1639
|
+
logger_ts_1.logger.debug?.('Adding audit ref from audit record to visit queue', {
|
|
1640
|
+
version: ref.version,
|
|
1641
|
+
nodeId: ref.nodeId,
|
|
1642
|
+
});
|
|
1643
|
+
}
|
|
1644
|
+
}
|
|
1645
|
+
localTime = auditRecord.previousVersion;
|
|
1646
|
+
nodeId = auditRecord.previousNodeId;
|
|
1647
|
+
}
|
|
1648
|
+
// Check if we need to scan additional audit refs from this record
|
|
1649
|
+
nextRef = auditRefsToVisit.shift();
|
|
1650
|
+
if (nextRef) {
|
|
1651
|
+
localTime = auditedVersion = nextRef.localTime;
|
|
1652
|
+
nodeId = nextRef.nodeId;
|
|
1653
|
+
logger_ts_1.logger.debug?.('Following additional audit ref to continue scanning', { localTime, nodeId });
|
|
1654
|
+
}
|
|
1655
|
+
} while (nextRef);
|
|
1656
|
+
if (!localTime) {
|
|
1657
|
+
// if we reached the end of the audit trail, we can just apply the update
|
|
1658
|
+
logger_ts_1.logger.debug?.('No further audit history, applying incremental updates based on available history', id, 'existing version preserved', existingEntry);
|
|
1659
|
+
}
|
|
1660
|
+
succeedingUpdates.sort((a, b) => a.version - b.version); // order the patches
|
|
1661
|
+
for (const auditRecord of succeedingUpdates) {
|
|
1662
|
+
const newerUpdate = auditRecord.getValue(primaryStore);
|
|
1663
|
+
logger_ts_1.logger.debug?.('Rebuilding update with future patch:', new Date(auditRecord.version), newerUpdate, auditRecord);
|
|
1664
|
+
incrementalUpdateToApply = (0, crdt_ts_1.rebuildUpdateBefore)(incrementalUpdateToApply ?? recordUpdate, newerUpdate, fullUpdate);
|
|
1665
|
+
if (!incrementalUpdateToApply)
|
|
1666
|
+
return writeCommit(false); // if all changes are overwritten, nothing left to do
|
|
1667
|
+
}
|
|
1668
|
+
}
|
|
1669
|
+
else if (fullUpdate) {
|
|
1670
|
+
// if no audit, we can't accurately do incremental updates, so we just assume the last update
|
|
1671
|
+
// was the same type. Assuming a full update this record update loses and there are no changes
|
|
1672
|
+
return writeCommit(false);
|
|
1673
|
+
}
|
|
1674
|
+
else {
|
|
1675
|
+
// no audit, assume updates are overwritten except CRDT operations or properties that didn't exist
|
|
1676
|
+
incrementalUpdateToApply = (0, crdt_ts_1.rebuildUpdateBefore)(incrementalUpdateToApply ?? recordUpdate, existingRecord, fullUpdate);
|
|
1677
|
+
logger_ts_1.logger.debug?.('Rebuilding update without audit:', incrementalUpdateToApply);
|
|
1678
|
+
}
|
|
1679
|
+
logger_ts_1.logger.trace?.('Rebuilt record to save:', incrementalUpdateToApply, ' is full update:', fullUpdate);
|
|
1680
|
+
}
|
|
1681
|
+
let recordToStore;
|
|
1682
|
+
if (fullUpdate && !incrementalUpdateToApply)
|
|
1683
|
+
recordToStore = recordUpdate;
|
|
1684
|
+
else {
|
|
1685
|
+
if (this.constructor.loadAsInstance === false)
|
|
1686
|
+
recordToStore = (0, tracked_ts_1.updateAndFreeze)(existingRecord, incrementalUpdateToApply ?? recordUpdate);
|
|
1687
|
+
else {
|
|
1688
|
+
this.#record = existingRecord;
|
|
1689
|
+
recordToStore = (0, tracked_ts_1.updateAndFreeze)(this, incrementalUpdateToApply ?? recordUpdate);
|
|
1690
|
+
}
|
|
1691
|
+
}
|
|
1692
|
+
this.#record = recordToStore;
|
|
1693
|
+
if (recordToStore && recordToStore.getRecord)
|
|
1694
|
+
throw new Error('Can not assign a record to a record, check for circular references');
|
|
1695
|
+
if (residencyId == undefined) {
|
|
1696
|
+
if (entry?.residencyId)
|
|
1697
|
+
context.previousResidency = TableResource.getResidencyRecord(entry.residencyId);
|
|
1698
|
+
const residency = residencyFromFunction(TableResource.getResidency(recordToStore, context));
|
|
1699
|
+
if (residency) {
|
|
1700
|
+
if (!residency.includes(server.hostname)) {
|
|
1701
|
+
// if we aren't in the residency list, specify that our local record should be omitted or be partial
|
|
1702
|
+
auditRecordToStore ??= recordToStore;
|
|
1703
|
+
omitLocalRecord = true;
|
|
1704
|
+
if (TableResource.getResidencyById) {
|
|
1705
|
+
// complete omission of the record that doesn't belong here
|
|
1706
|
+
recordToStore = undefined;
|
|
1707
|
+
}
|
|
1708
|
+
else {
|
|
1709
|
+
// store the partial record
|
|
1710
|
+
recordToStore = null;
|
|
1711
|
+
for (const name in indices) {
|
|
1712
|
+
if (!recordToStore) {
|
|
1713
|
+
recordToStore = {};
|
|
1714
|
+
}
|
|
1715
|
+
// if there are any indices, we need to preserve a partial invalidated record to ensure we can still do searches
|
|
1716
|
+
recordToStore[name] = auditRecordToStore[name];
|
|
1717
|
+
}
|
|
1718
|
+
}
|
|
1719
|
+
}
|
|
1720
|
+
}
|
|
1721
|
+
residencyId = getResidencyId(residency);
|
|
1722
|
+
}
|
|
1723
|
+
if (!fullUpdate) {
|
|
1724
|
+
// we use our own data as the basis for the audit record, which will include information about the incremental updates, even if it was overwritten by CRDT resolution
|
|
1725
|
+
auditRecordToStore = recordUpdate;
|
|
1726
|
+
}
|
|
1727
|
+
logger_ts_1.logger.trace?.(`Saving record with id: ${id}, timestamp: ${new Date(txnTime).toISOString()}${expiresAt ? ', expires at: ' + new Date(expiresAt).toISOString() : ''}${existingEntry?.version
|
|
1728
|
+
? ', replaces entry from: ' + new Date(existingEntry.version).toISOString()
|
|
1729
|
+
: ', new entry'}`, (() => {
|
|
1730
|
+
try {
|
|
1731
|
+
return JSON.stringify(recordToStore).slice(0, 100);
|
|
1732
|
+
}
|
|
1733
|
+
catch {
|
|
1734
|
+
return '';
|
|
1735
|
+
}
|
|
1736
|
+
})());
|
|
1737
|
+
updateIndices(id, existingRecord, recordToStore, transaction && { transaction });
|
|
1738
|
+
writeCommit(true);
|
|
1739
|
+
if (context.expiresAt)
|
|
1740
|
+
scheduleCleanup();
|
|
1741
|
+
function writeCommit(storeRecord) {
|
|
1742
|
+
// we need to write the commit. if storeRecord then we need to store the record, otherwise we just need to store the audit record
|
|
1743
|
+
updateRecord(id, storeRecord ? recordToStore : undefined, storeRecord ? existingEntry : { ...existingEntry, value: undefined }, isRocksDB
|
|
1744
|
+
? Math.max(txnTime, existingEntry?.version ?? 0) // RocksDB uses a singular version/local time, so it must be most recent
|
|
1745
|
+
: txnTime, omitLocalRecord ? exports.INVALIDATED : 0, audit, {
|
|
1746
|
+
omitLocalRecord,
|
|
1747
|
+
user: context?.user,
|
|
1748
|
+
residencyId,
|
|
1749
|
+
expiresAt,
|
|
1750
|
+
nodeId: options?.nodeId,
|
|
1751
|
+
viaNodeId: options?.viaNodeId,
|
|
1752
|
+
originatingOperation: context?.originatingOperation,
|
|
1753
|
+
transaction,
|
|
1754
|
+
tableToTrack: databaseName === 'system' ? null : options?.replay ? null : tableName, // don't track analytics on system tables
|
|
1755
|
+
additionalAuditRefs: additionalAuditRefs.length > 0 ? additionalAuditRefs : undefined,
|
|
1756
|
+
}, type, false, storeRecord ? auditRecordToStore : (auditRecordToStore ?? recordUpdate));
|
|
1757
|
+
}
|
|
1758
|
+
},
|
|
1759
|
+
};
|
|
1760
|
+
this.#savingOperation = write;
|
|
1761
|
+
return transaction.addWrite(write);
|
|
1762
|
+
}
|
|
1763
|
+
async delete(target) {
|
|
1764
|
+
if (isSearchTarget(target)) {
|
|
1765
|
+
target.select = ['$id']; // just get the primary key of each record so we can delete them
|
|
1766
|
+
for await (const entry of this.search(target)) {
|
|
1767
|
+
this._writeDelete(entry.$id);
|
|
1768
|
+
}
|
|
1769
|
+
return true;
|
|
1770
|
+
}
|
|
1771
|
+
if (target) {
|
|
1772
|
+
let allowed = true;
|
|
1773
|
+
const context = this.getContext();
|
|
1774
|
+
if (target.checkPermission) {
|
|
1775
|
+
// requesting authorization verification
|
|
1776
|
+
allowed = this.allowDelete(context.user, target, context);
|
|
1777
|
+
}
|
|
1778
|
+
return (0, when_ts_1.when)(allowed, (allowed) => {
|
|
1779
|
+
if (!allowed) {
|
|
1780
|
+
throw new hdbError_js_1.AccessViolation(context.user);
|
|
1781
|
+
}
|
|
1782
|
+
const id = requestTargetToId(target);
|
|
1783
|
+
this._writeDelete(id);
|
|
1784
|
+
return true;
|
|
1785
|
+
});
|
|
1786
|
+
}
|
|
1787
|
+
this._writeDelete(this.getId());
|
|
1788
|
+
return Boolean(this.#record);
|
|
1789
|
+
}
|
|
1790
|
+
_writeDelete(id, options) {
|
|
1791
|
+
const context = this.getContext();
|
|
1792
|
+
const transaction = txnForContext(context);
|
|
1793
|
+
checkValidId(id);
|
|
1794
|
+
const entry = this.#entry ?? primaryStore.getEntry(id, { transaction: transaction.getReadTxn() });
|
|
1795
|
+
transaction.addWrite({
|
|
1796
|
+
key: id,
|
|
1797
|
+
store: primaryStore,
|
|
1798
|
+
entry,
|
|
1799
|
+
nodeName: context?.nodeName,
|
|
1800
|
+
before: this.constructor.source?.delete && !context?.source
|
|
1801
|
+
? this.constructor.source.delete.bind(this.constructor.source, id, undefined, context)
|
|
1802
|
+
: undefined,
|
|
1803
|
+
commit: (txnTime, existingEntry, retry, transaction) => {
|
|
1804
|
+
const existingRecord = existingEntry?.value;
|
|
1805
|
+
if (retry) {
|
|
1806
|
+
if (context && existingEntry?.version > (context.lastModified || 0))
|
|
1807
|
+
context.lastModified = existingEntry.version;
|
|
1808
|
+
TableResource._updateResource(this, existingEntry);
|
|
1809
|
+
}
|
|
1810
|
+
if (precedesExistingVersion(txnTime, existingEntry, options?.nodeId) <= 0)
|
|
1811
|
+
return; // a newer record exists locally
|
|
1812
|
+
updateIndices(id, existingRecord);
|
|
1813
|
+
logger_ts_1.logger.trace?.(`Deleting record with id: ${id}, txn timestamp: ${new Date(txnTime).toISOString()}`);
|
|
1814
|
+
if (audit || trackDeletes) {
|
|
1815
|
+
updateRecord(id, null, existingEntry, txnTime, 0, audit, {
|
|
1816
|
+
user: context?.user,
|
|
1817
|
+
nodeId: options?.nodeId,
|
|
1818
|
+
viaNodeId: options?.viaNodeId,
|
|
1819
|
+
transaction,
|
|
1820
|
+
tableToTrack: tableName,
|
|
1821
|
+
}, 'delete');
|
|
1822
|
+
if (!audit || isRocksDB)
|
|
1823
|
+
scheduleCleanup();
|
|
1824
|
+
}
|
|
1825
|
+
else {
|
|
1826
|
+
(0, RecordEncoder_ts_1.removeEntry)(primaryStore, existingEntry);
|
|
1827
|
+
}
|
|
1828
|
+
},
|
|
1829
|
+
});
|
|
1830
|
+
return true;
|
|
1831
|
+
}
|
|
1832
|
+
search(target) {
|
|
1833
|
+
const context = this.getContext();
|
|
1834
|
+
const txn = txnForContext(context);
|
|
1835
|
+
if (!target)
|
|
1836
|
+
throw new Error('No query provided');
|
|
1837
|
+
if (target.parseError)
|
|
1838
|
+
throw target.parseError; // if there was a parse error, we can throw it now
|
|
1839
|
+
if (target.checkPermission) {
|
|
1840
|
+
// requesting authorization verification
|
|
1841
|
+
const allowed = this.allowRead(context.user, target, context);
|
|
1842
|
+
if (!allowed) {
|
|
1843
|
+
throw new hdbError_js_1.AccessViolation(context.user);
|
|
1844
|
+
}
|
|
1845
|
+
}
|
|
1846
|
+
if (context)
|
|
1847
|
+
context.lastModified = UNCACHEABLE_TIMESTAMP;
|
|
1848
|
+
let conditions = target.conditions;
|
|
1849
|
+
if (!conditions)
|
|
1850
|
+
conditions = Array.isArray(target) ? target : target[Symbol.iterator] ? Array.from(target) : [];
|
|
1851
|
+
else if (conditions.length === undefined) {
|
|
1852
|
+
conditions = conditions[Symbol.iterator] ? Array.from(conditions) : [conditions];
|
|
1853
|
+
}
|
|
1854
|
+
const id = target.id ?? this.getId();
|
|
1855
|
+
if (id) {
|
|
1856
|
+
conditions = [
|
|
1857
|
+
{
|
|
1858
|
+
attribute: null,
|
|
1859
|
+
comparator: Array.isArray(id) ? 'prefix' : 'starts_with',
|
|
1860
|
+
value: id,
|
|
1861
|
+
},
|
|
1862
|
+
].concat(conditions);
|
|
1863
|
+
}
|
|
1864
|
+
let orderAlignedCondition;
|
|
1865
|
+
const filtered = {};
|
|
1866
|
+
function prepareConditions(conditions, operator) {
|
|
1867
|
+
// some validation:
|
|
1868
|
+
switch (operator) {
|
|
1869
|
+
case 'and':
|
|
1870
|
+
case undefined:
|
|
1871
|
+
if (conditions.length < 1)
|
|
1872
|
+
throw new Error('An "and" operator requires at least one condition');
|
|
1873
|
+
break;
|
|
1874
|
+
case 'or':
|
|
1875
|
+
if (conditions.length < 2)
|
|
1876
|
+
throw new Error('An "or" operator requires at least two conditions');
|
|
1877
|
+
break;
|
|
1878
|
+
default:
|
|
1879
|
+
throw new Error('Invalid operator ' + operator);
|
|
1880
|
+
}
|
|
1881
|
+
for (const condition of conditions) {
|
|
1882
|
+
if (condition.conditions) {
|
|
1883
|
+
condition.conditions = prepareConditions(condition.conditions, condition.operator);
|
|
1884
|
+
continue;
|
|
1885
|
+
}
|
|
1886
|
+
const attribute_name = condition[0] ?? condition.attribute;
|
|
1887
|
+
const attribute = attribute_name == null ? primaryKeyAttribute : (0, search_ts_1.findAttribute)(attributes, attribute_name);
|
|
1888
|
+
if (!attribute) {
|
|
1889
|
+
if (attribute_name != null && !target.allowConditionsOnDynamicAttributes)
|
|
1890
|
+
throw (0, hdbError_js_1.handleHDBError)(new Error(), `${attribute_name} is not a defined attribute`, 404);
|
|
1891
|
+
}
|
|
1892
|
+
else if (attribute.type || search_ts_1.COERCIBLE_OPERATORS[condition.comparator]) {
|
|
1893
|
+
// Do auto-coercion or coercion as required by the attribute type
|
|
1894
|
+
if (condition[1] === undefined)
|
|
1895
|
+
condition.value = coerceTypedValues(condition.value, attribute);
|
|
1896
|
+
else
|
|
1897
|
+
condition[1] = coerceTypedValues(condition[1], attribute);
|
|
1898
|
+
}
|
|
1899
|
+
if (condition.chainedConditions) {
|
|
1900
|
+
if (condition.chainedConditions.length === 1 && (!condition.operator || condition.operator == 'and')) {
|
|
1901
|
+
const chained = condition.chainedConditions[0];
|
|
1902
|
+
let upper, lower;
|
|
1903
|
+
if (chained.comparator === 'gt' ||
|
|
1904
|
+
chained.comparator === 'greater_than' ||
|
|
1905
|
+
chained.comparator === 'ge' ||
|
|
1906
|
+
chained.comparator === 'greater_than_equal') {
|
|
1907
|
+
upper = condition;
|
|
1908
|
+
lower = chained;
|
|
1909
|
+
}
|
|
1910
|
+
else {
|
|
1911
|
+
upper = chained;
|
|
1912
|
+
lower = condition;
|
|
1913
|
+
}
|
|
1914
|
+
if (upper.comparator !== 'lt' &&
|
|
1915
|
+
upper.comparator !== 'less_than' &&
|
|
1916
|
+
upper.comparator !== 'le' &&
|
|
1917
|
+
upper.comparator !== 'less_than_equal') {
|
|
1918
|
+
throw new Error('Invalid chained condition, only less than and greater than conditions can be chained together');
|
|
1919
|
+
}
|
|
1920
|
+
const isGe = lower.comparator === 'ge' || lower.comparator === 'greater_than_equal';
|
|
1921
|
+
const isLe = upper.comparator === 'le' || upper.comparator === 'less_than_equal';
|
|
1922
|
+
condition.comparator = (isGe ? 'ge' : 'gt') + (isLe ? 'le' : 'lt');
|
|
1923
|
+
condition.value = [lower.value, upper.value];
|
|
1924
|
+
}
|
|
1925
|
+
else
|
|
1926
|
+
throw new Error('Multiple chained conditions are not currently supported');
|
|
1927
|
+
}
|
|
1928
|
+
}
|
|
1929
|
+
return conditions;
|
|
1930
|
+
}
|
|
1931
|
+
function orderConditions(conditions, operator) {
|
|
1932
|
+
if (target.enforceExecutionOrder)
|
|
1933
|
+
return conditions; // don't rearrange conditions
|
|
1934
|
+
for (const condition of conditions) {
|
|
1935
|
+
if (condition.conditions)
|
|
1936
|
+
condition.conditions = orderConditions(condition.conditions, condition.operator);
|
|
1937
|
+
}
|
|
1938
|
+
// Sort the query by narrowest to broadest, so we can use the fastest index as possible with minimal filtering.
|
|
1939
|
+
// Note, that we do allow users to disable condition re-ordering, in case they have knowledge of a preferred
|
|
1940
|
+
// order for their query.
|
|
1941
|
+
if (conditions.length > 1 && operator !== 'or')
|
|
1942
|
+
return sortBy(conditions, (0, search_ts_1.estimateCondition)(TableResource));
|
|
1943
|
+
else
|
|
1944
|
+
return conditions;
|
|
1945
|
+
}
|
|
1946
|
+
function coerceTypedValues(value, attribute) {
|
|
1947
|
+
if (Array.isArray(value)) {
|
|
1948
|
+
return value.map((value) => coerceType(value, attribute));
|
|
1949
|
+
}
|
|
1950
|
+
return coerceType(value, attribute);
|
|
1951
|
+
}
|
|
1952
|
+
const operator = target.operator;
|
|
1953
|
+
if (conditions.length > 0 || operator)
|
|
1954
|
+
conditions = prepareConditions(conditions, operator);
|
|
1955
|
+
const sort = typeof target.sort === 'object' && target.sort;
|
|
1956
|
+
let postOrdering;
|
|
1957
|
+
if (sort) {
|
|
1958
|
+
// TODO: Support index-assisted sorts of unions, which will require potentially recursively adding/modifying an order aligned condition and be able to recursively undo it if necessary
|
|
1959
|
+
if (operator !== 'or') {
|
|
1960
|
+
const attribute_name = sort.attribute;
|
|
1961
|
+
if (attribute_name == undefined)
|
|
1962
|
+
throw new hdbError_js_1.ClientError('Sort requires an attribute');
|
|
1963
|
+
orderAlignedCondition = conditions.find((condition) => (0, search_ts_1.flattenKey)(condition.attribute) === (0, search_ts_1.flattenKey)(attribute_name));
|
|
1964
|
+
if (orderAlignedCondition) {
|
|
1965
|
+
// if there is a condition on the same attribute as the first sort, we can use it to align the sort
|
|
1966
|
+
// and avoid a sort operation
|
|
1967
|
+
}
|
|
1968
|
+
else {
|
|
1969
|
+
const attribute = (0, search_ts_1.findAttribute)(attributes, attribute_name);
|
|
1970
|
+
if (!attribute)
|
|
1971
|
+
throw (0, hdbError_js_1.handleHDBError)(new Error(), `${Array.isArray(attribute_name) ? attribute_name.join('.') : attribute_name} is not a defined attribute`, 404);
|
|
1972
|
+
if (attribute.indexed) {
|
|
1973
|
+
// if it is indexed, we add a pseudo-condition to align with the natural sort order of the index
|
|
1974
|
+
orderAlignedCondition = { ...sort, comparator: 'sort' };
|
|
1975
|
+
conditions.push(orderAlignedCondition);
|
|
1976
|
+
}
|
|
1977
|
+
else if (conditions.length === 0 && !target.allowFullScan)
|
|
1978
|
+
throw (0, hdbError_js_1.handleHDBError)(new Error(), `${Array.isArray(attribute_name) ? attribute_name.join('.') : attribute_name} is not indexed and not combined with any other conditions`, 404);
|
|
1979
|
+
}
|
|
1980
|
+
if (orderAlignedCondition)
|
|
1981
|
+
orderAlignedCondition.descending = Boolean(sort.descending);
|
|
1982
|
+
}
|
|
1983
|
+
}
|
|
1984
|
+
conditions = orderConditions(conditions, operator);
|
|
1985
|
+
if (sort) {
|
|
1986
|
+
if (orderAlignedCondition && conditions[0] === orderAlignedCondition) {
|
|
1987
|
+
// The db index is providing the order for the first sort, may need post ordering next sort order
|
|
1988
|
+
if (sort.next) {
|
|
1989
|
+
postOrdering = {
|
|
1990
|
+
dbOrderedAttribute: sort.attribute,
|
|
1991
|
+
attribute: sort.next.attribute,
|
|
1992
|
+
descending: sort.next.descending,
|
|
1993
|
+
next: sort.next.next,
|
|
1994
|
+
};
|
|
1995
|
+
}
|
|
1996
|
+
}
|
|
1997
|
+
else {
|
|
1998
|
+
// if we had to add an aligned condition that isn't first, we remove it and do ordering later
|
|
1999
|
+
if (orderAlignedCondition)
|
|
2000
|
+
conditions.splice(conditions.indexOf(orderAlignedCondition), 1);
|
|
2001
|
+
postOrdering = sort;
|
|
2002
|
+
}
|
|
2003
|
+
}
|
|
2004
|
+
const select = target.select;
|
|
2005
|
+
if (conditions.length === 0) {
|
|
2006
|
+
conditions = [{ attribute: primaryKey, comparator: 'greater_than', value: true }];
|
|
2007
|
+
}
|
|
2008
|
+
if (target.explain) {
|
|
2009
|
+
return {
|
|
2010
|
+
conditions,
|
|
2011
|
+
operator,
|
|
2012
|
+
postOrdering,
|
|
2013
|
+
selectApplied: Boolean(select),
|
|
2014
|
+
};
|
|
2015
|
+
}
|
|
2016
|
+
// we mark the read transaction as in use (necessary for a stable read
|
|
2017
|
+
// transaction, and we really don't care if the
|
|
2018
|
+
// counts are done in the same read transaction because they are just estimates) until the search
|
|
2019
|
+
// results have been iterated and finished.
|
|
2020
|
+
const readTxn = txn.useReadTxn();
|
|
2021
|
+
const entries = (0, search_ts_1.executeConditions)(conditions, operator, TableResource, readTxn, target, context, (results, filters) => transformToEntries(results, select, context, readTxn, filters), filtered);
|
|
2022
|
+
const ensure_loaded = target.ensureLoaded !== false;
|
|
2023
|
+
const transformToRecord = TableResource.transformEntryForSelect(select, context, readTxn, filtered, ensure_loaded, true);
|
|
2024
|
+
let results = TableResource.transformToOrderedSelect(entries, select, postOrdering, context, readTxn, transformToRecord);
|
|
2025
|
+
// apply any offset/limit after all the sorting and filtering
|
|
2026
|
+
if (target.offset || target.limit !== undefined)
|
|
2027
|
+
results = results.slice(target.offset, target.limit !== undefined ? (target.offset || 0) + target.limit : undefined);
|
|
2028
|
+
results.onDone = () => {
|
|
2029
|
+
results.onDone = null; // ensure that it isn't called twice
|
|
2030
|
+
txn.doneReadTxn();
|
|
2031
|
+
};
|
|
2032
|
+
results.selectApplied = true;
|
|
2033
|
+
results.getColumns = () => {
|
|
2034
|
+
if (select) {
|
|
2035
|
+
const columns = [];
|
|
2036
|
+
for (const column of select) {
|
|
2037
|
+
if (column === '*')
|
|
2038
|
+
columns.push(...attributes.map((attribute) => attribute.name));
|
|
2039
|
+
else
|
|
2040
|
+
columns.push(column.name || column);
|
|
2041
|
+
}
|
|
2042
|
+
return columns;
|
|
2043
|
+
}
|
|
2044
|
+
return attributes
|
|
2045
|
+
.filter((attribute) => !attribute.computed && !attribute.relationship)
|
|
2046
|
+
.map((attribute) => attribute.name);
|
|
2047
|
+
};
|
|
2048
|
+
return results;
|
|
2049
|
+
}
|
|
2050
|
+
/**
|
|
2051
|
+
* This is responsible for ordering and select()ing the attributes/properties from returned entries
|
|
2052
|
+
* @param select
|
|
2053
|
+
* @param context
|
|
2054
|
+
* @param filtered
|
|
2055
|
+
* @param ensure_loaded
|
|
2056
|
+
* @param canSkip
|
|
2057
|
+
* @returns
|
|
2058
|
+
*/
|
|
2059
|
+
static transformToOrderedSelect(entries, select, sort, context, readTxn, transformToRecord) {
|
|
2060
|
+
let results = new extended_iterable_1.ExtendedIterable();
|
|
2061
|
+
if (sort) {
|
|
2062
|
+
// there might be some situations where we don't need to transform to entries for sorting, not sure
|
|
2063
|
+
entries = transformToEntries(entries, select, context, readTxn, null);
|
|
2064
|
+
let ordered;
|
|
2065
|
+
// if we are doing post-ordering, we need to get records first, then sort them
|
|
2066
|
+
results.iterate = function () {
|
|
2067
|
+
let sortedArrayIterator;
|
|
2068
|
+
const dbIterator = entries[Symbol.asyncIterator]
|
|
2069
|
+
? entries[Symbol.asyncIterator]()
|
|
2070
|
+
: entries[Symbol.iterator]();
|
|
2071
|
+
let dbDone;
|
|
2072
|
+
const dbOrderedAttribute = sort.dbOrderedAttribute;
|
|
2073
|
+
let enqueuedEntryForNextGroup;
|
|
2074
|
+
let lastGroupingValue;
|
|
2075
|
+
let firstEntry = true;
|
|
2076
|
+
function createComparator(order) {
|
|
2077
|
+
const nextComparator = order.next && createComparator(order.next);
|
|
2078
|
+
const descending = order.descending;
|
|
2079
|
+
context.sort = order; // make sure this is set to the current sort order
|
|
2080
|
+
return (entryA, entryB) => {
|
|
2081
|
+
const a = getAttributeValue(entryA, order.attribute, context);
|
|
2082
|
+
const b = getAttributeValue(entryB, order.attribute, context);
|
|
2083
|
+
const diff = descending ? (0, ordered_binary_1.compareKeys)(b, a) : (0, ordered_binary_1.compareKeys)(a, b);
|
|
2084
|
+
if (diff === 0)
|
|
2085
|
+
return nextComparator?.(entryA, entryB) || 0;
|
|
2086
|
+
return diff;
|
|
2087
|
+
};
|
|
2088
|
+
}
|
|
2089
|
+
const comparator = createComparator(sort);
|
|
2090
|
+
return {
|
|
2091
|
+
async next() {
|
|
2092
|
+
let iteration;
|
|
2093
|
+
if (sortedArrayIterator) {
|
|
2094
|
+
iteration = sortedArrayIterator.next();
|
|
2095
|
+
if (iteration.done) {
|
|
2096
|
+
if (dbDone) {
|
|
2097
|
+
if (results.onDone)
|
|
2098
|
+
results.onDone();
|
|
2099
|
+
return iteration;
|
|
2100
|
+
}
|
|
2101
|
+
}
|
|
2102
|
+
else
|
|
2103
|
+
return {
|
|
2104
|
+
value: await transformToRecord.call(this, iteration.value),
|
|
2105
|
+
};
|
|
2106
|
+
}
|
|
2107
|
+
ordered = [];
|
|
2108
|
+
if (enqueuedEntryForNextGroup)
|
|
2109
|
+
ordered.push(enqueuedEntryForNextGroup);
|
|
2110
|
+
// need to load all the entries into ordered
|
|
2111
|
+
do {
|
|
2112
|
+
iteration = await dbIterator.next();
|
|
2113
|
+
if (iteration.done) {
|
|
2114
|
+
dbDone = true;
|
|
2115
|
+
if (!ordered.length) {
|
|
2116
|
+
if (results.onDone)
|
|
2117
|
+
results.onDone();
|
|
2118
|
+
return iteration;
|
|
2119
|
+
}
|
|
2120
|
+
else
|
|
2121
|
+
break;
|
|
2122
|
+
}
|
|
2123
|
+
else {
|
|
2124
|
+
let entry = iteration.value;
|
|
2125
|
+
if (entry?.then)
|
|
2126
|
+
entry = await entry;
|
|
2127
|
+
// if the index has already provided the first order of sorting, we only need to sort
|
|
2128
|
+
// within each grouping
|
|
2129
|
+
if (dbOrderedAttribute) {
|
|
2130
|
+
const groupingValue = getAttributeValue(entry, dbOrderedAttribute, context);
|
|
2131
|
+
if (firstEntry) {
|
|
2132
|
+
firstEntry = false;
|
|
2133
|
+
lastGroupingValue = groupingValue;
|
|
2134
|
+
}
|
|
2135
|
+
else if (groupingValue !== lastGroupingValue) {
|
|
2136
|
+
lastGroupingValue = groupingValue;
|
|
2137
|
+
enqueuedEntryForNextGroup = entry;
|
|
2138
|
+
break;
|
|
2139
|
+
}
|
|
2140
|
+
}
|
|
2141
|
+
// we store the value we will sort on, for fast sorting, and the entry so the records can be GC'ed if necessary
|
|
2142
|
+
// before the sorting is completed
|
|
2143
|
+
ordered.push(entry);
|
|
2144
|
+
}
|
|
2145
|
+
} while (true);
|
|
2146
|
+
if (sort.isGrouped) {
|
|
2147
|
+
// TODO: Return grouped results
|
|
2148
|
+
}
|
|
2149
|
+
ordered.sort(comparator);
|
|
2150
|
+
sortedArrayIterator = ordered[Symbol.iterator]();
|
|
2151
|
+
iteration = sortedArrayIterator.next();
|
|
2152
|
+
if (!iteration.done)
|
|
2153
|
+
return {
|
|
2154
|
+
value: await transformToRecord.call(this, iteration.value),
|
|
2155
|
+
};
|
|
2156
|
+
if (results.onDone)
|
|
2157
|
+
results.onDone();
|
|
2158
|
+
return iteration;
|
|
2159
|
+
},
|
|
2160
|
+
return() {
|
|
2161
|
+
if (results.onDone)
|
|
2162
|
+
results.onDone();
|
|
2163
|
+
return dbIterator.return();
|
|
2164
|
+
},
|
|
2165
|
+
throw() {
|
|
2166
|
+
if (results.onDone)
|
|
2167
|
+
results.onDone();
|
|
2168
|
+
return dbIterator.throw();
|
|
2169
|
+
},
|
|
2170
|
+
};
|
|
2171
|
+
};
|
|
2172
|
+
const applySortingOnSelect = (sort) => {
|
|
2173
|
+
if (typeof select === 'object' && Array.isArray(sort.attribute)) {
|
|
2174
|
+
for (let i = 0; i < select.length; i++) {
|
|
2175
|
+
const column = select[i];
|
|
2176
|
+
let columnSort;
|
|
2177
|
+
if (column.name === sort.attribute[0]) {
|
|
2178
|
+
columnSort = column.sort || (column.sort = {});
|
|
2179
|
+
while (columnSort.next)
|
|
2180
|
+
columnSort = columnSort.next;
|
|
2181
|
+
columnSort.attribute = sort.attribute.slice(1);
|
|
2182
|
+
columnSort.descending = sort.descending;
|
|
2183
|
+
}
|
|
2184
|
+
else if (column === sort.attribute[0]) {
|
|
2185
|
+
select[i] = columnSort = {
|
|
2186
|
+
name: column,
|
|
2187
|
+
sort: {
|
|
2188
|
+
attribute: sort.attribute.slice(1),
|
|
2189
|
+
descending: sort.descending,
|
|
2190
|
+
},
|
|
2191
|
+
};
|
|
2192
|
+
}
|
|
2193
|
+
}
|
|
2194
|
+
}
|
|
2195
|
+
if (sort.next)
|
|
2196
|
+
applySortingOnSelect(sort.next);
|
|
2197
|
+
};
|
|
2198
|
+
applySortingOnSelect(sort);
|
|
2199
|
+
}
|
|
2200
|
+
else {
|
|
2201
|
+
results.iterate = (entries[Symbol.asyncIterator] || entries[Symbol.iterator]).bind(entries);
|
|
2202
|
+
results = results.map(function (entry) {
|
|
2203
|
+
try {
|
|
2204
|
+
// because this is a part of a stream of results, we will often be continuing to iterate over the results when there are errors,
|
|
2205
|
+
// but to improve the legibility of the error, we attach the primary key to the error
|
|
2206
|
+
const result = transformToRecord.call(this, entry);
|
|
2207
|
+
// if it is a catchable thenable (promise)
|
|
2208
|
+
if (typeof result?.catch === 'function')
|
|
2209
|
+
return result.catch((error) => {
|
|
2210
|
+
error.partialObject = { [primaryKey]: entry.key };
|
|
2211
|
+
throw error;
|
|
2212
|
+
});
|
|
2213
|
+
return result;
|
|
2214
|
+
}
|
|
2215
|
+
catch (error) {
|
|
2216
|
+
error.partialObject = { [primaryKey]: entry.key };
|
|
2217
|
+
throw error;
|
|
2218
|
+
}
|
|
2219
|
+
});
|
|
2220
|
+
}
|
|
2221
|
+
return results;
|
|
2222
|
+
}
|
|
2223
|
+
/**
|
|
2224
|
+
* This is responsible for select()ing the attributes/properties from returned entries
|
|
2225
|
+
* @param select
|
|
2226
|
+
* @param context
|
|
2227
|
+
* @param filtered
|
|
2228
|
+
* @param ensure_loaded
|
|
2229
|
+
* @param canSkip
|
|
2230
|
+
* @returns
|
|
2231
|
+
*/
|
|
2232
|
+
static transformEntryForSelect(select, context, readTxn, filtered, ensure_loaded, canSkip) {
|
|
2233
|
+
let checkLoaded;
|
|
2234
|
+
if (ensure_loaded &&
|
|
2235
|
+
hasSourceGet &&
|
|
2236
|
+
// determine if we need to fully loading the records ahead of time, this is why we would not need to load the full record:
|
|
2237
|
+
!(typeof select === 'string' ? [select] : select)?.every((attribute) => {
|
|
2238
|
+
let attribute_name;
|
|
2239
|
+
if (typeof attribute === 'object') {
|
|
2240
|
+
attribute_name = attribute.name;
|
|
2241
|
+
}
|
|
2242
|
+
else
|
|
2243
|
+
attribute_name = attribute;
|
|
2244
|
+
// TODO: Resolvers may not need a full record, either because they are not using the record, or because they are a redirected property
|
|
2245
|
+
return indices[attribute_name] || attribute_name === primaryKey;
|
|
2246
|
+
})) {
|
|
2247
|
+
checkLoaded = true;
|
|
2248
|
+
}
|
|
2249
|
+
let transformCache;
|
|
2250
|
+
const source = this.source;
|
|
2251
|
+
// Transform an entry to a record. Note that *this* instance is intended to be the iterator.
|
|
2252
|
+
const transform = function (entry) {
|
|
2253
|
+
let record;
|
|
2254
|
+
if (context?.transaction?.stale)
|
|
2255
|
+
context.transaction.stale = false;
|
|
2256
|
+
if (entry != undefined) {
|
|
2257
|
+
record = entry.deref ? entry.deref() : entry.value;
|
|
2258
|
+
if (entry.metadataFlags & exports.INVALIDATED && context.replicateFrom === false && canSkip && entry.residencyId) {
|
|
2259
|
+
return extended_iterable_1.SKIP;
|
|
2260
|
+
}
|
|
2261
|
+
if (!record && (entry.key === undefined || entry.deref)) {
|
|
2262
|
+
// if the record is not loaded, either due to the entry actually be a key, or the entry's value
|
|
2263
|
+
// being GC'ed, we need to load it now
|
|
2264
|
+
entry = loadLocalRecord(entry.key ?? entry, context, {
|
|
2265
|
+
transaction: readTxn,
|
|
2266
|
+
lazy: select?.length < 4,
|
|
2267
|
+
ensureLoaded: ensure_loaded,
|
|
2268
|
+
}, this?.isSync, (entry) => entry);
|
|
2269
|
+
if (entry?.then)
|
|
2270
|
+
return entry.then(transform.bind(this));
|
|
2271
|
+
record = entry?.value;
|
|
2272
|
+
}
|
|
2273
|
+
if ((checkLoaded && entry?.metadataFlags & (exports.INVALIDATED | exports.EVICTED)) || // invalidated or evicted should go to load from source
|
|
2274
|
+
(entry?.expiresAt != undefined && entry?.expiresAt < Date.now())) {
|
|
2275
|
+
// should expiration really apply?
|
|
2276
|
+
if (context.onlyIfCached) {
|
|
2277
|
+
return {
|
|
2278
|
+
[primaryKey]: entry.key,
|
|
2279
|
+
message: 'This entry has expired',
|
|
2280
|
+
};
|
|
2281
|
+
}
|
|
2282
|
+
const loadingFromSource = ensureLoadedFromSource(source, entry.key ?? entry, entry, context);
|
|
2283
|
+
if (loadingFromSource?.then) {
|
|
2284
|
+
return loadingFromSource.then(transform);
|
|
2285
|
+
}
|
|
2286
|
+
}
|
|
2287
|
+
}
|
|
2288
|
+
if (record == null)
|
|
2289
|
+
return canSkip ? extended_iterable_1.SKIP : record;
|
|
2290
|
+
if (select && !(select[0] === '*' && select.length === 1)) {
|
|
2291
|
+
let promises;
|
|
2292
|
+
const selectAttribute = (attribute, callback) => {
|
|
2293
|
+
let attribute_name;
|
|
2294
|
+
if (typeof attribute === 'object') {
|
|
2295
|
+
attribute_name = attribute.name;
|
|
2296
|
+
}
|
|
2297
|
+
else
|
|
2298
|
+
attribute_name = attribute;
|
|
2299
|
+
const resolver = propertyResolvers?.[attribute_name];
|
|
2300
|
+
let value;
|
|
2301
|
+
if (resolver) {
|
|
2302
|
+
const filterMap = filtered?.[attribute_name];
|
|
2303
|
+
if (filterMap) {
|
|
2304
|
+
if (filterMap.hasMappings) {
|
|
2305
|
+
const key = resolver.from ? record[resolver.from] : (0, search_ts_1.flattenKey)(entry.key);
|
|
2306
|
+
value = filterMap.get(key);
|
|
2307
|
+
if (!value)
|
|
2308
|
+
value = [];
|
|
2309
|
+
}
|
|
2310
|
+
else {
|
|
2311
|
+
value = filterMap.fromRecord?.(record);
|
|
2312
|
+
}
|
|
2313
|
+
}
|
|
2314
|
+
else {
|
|
2315
|
+
value = resolver(record, context, entry, true);
|
|
2316
|
+
}
|
|
2317
|
+
const handleResolvedValue = (value) => {
|
|
2318
|
+
if (resolver.directReturn)
|
|
2319
|
+
return callback(value, attribute_name);
|
|
2320
|
+
if (value && typeof value === 'object') {
|
|
2321
|
+
const targetTable = resolver.definition?.tableClass || TableResource;
|
|
2322
|
+
if (!transformCache)
|
|
2323
|
+
transformCache = {};
|
|
2324
|
+
const transform = transformCache[attribute_name] ||
|
|
2325
|
+
(transformCache[attribute_name] = targetTable.transformEntryForSelect(
|
|
2326
|
+
// if it is a simple string, there is no select for the next level,
|
|
2327
|
+
// otherwise pass along the nested selected
|
|
2328
|
+
attribute_name === attribute
|
|
2329
|
+
? null
|
|
2330
|
+
: attribute.select || (Array.isArray(attribute) ? attribute : null), context, readTxn, filterMap, ensure_loaded));
|
|
2331
|
+
if (Array.isArray(value)) {
|
|
2332
|
+
const results = [];
|
|
2333
|
+
const iterator = targetTable
|
|
2334
|
+
.transformToOrderedSelect(value, attribute.select, typeof attribute.sort === 'object' && attribute.sort, context, readTxn, transform)[this.isSync ? Symbol.iterator : Symbol.asyncIterator]();
|
|
2335
|
+
const nextValue = (iteration) => {
|
|
2336
|
+
while (!iteration.done) {
|
|
2337
|
+
if (iteration?.then)
|
|
2338
|
+
return iteration.then(nextValue);
|
|
2339
|
+
results.push(iteration.value);
|
|
2340
|
+
iteration = iterator.next();
|
|
2341
|
+
}
|
|
2342
|
+
callback(results, attribute_name);
|
|
2343
|
+
};
|
|
2344
|
+
const promised = nextValue(iterator.next());
|
|
2345
|
+
if (promised) {
|
|
2346
|
+
if (!promises)
|
|
2347
|
+
promises = [];
|
|
2348
|
+
promises.push(promised);
|
|
2349
|
+
}
|
|
2350
|
+
return;
|
|
2351
|
+
}
|
|
2352
|
+
else {
|
|
2353
|
+
value = transform.call(this, value);
|
|
2354
|
+
if (value?.then) {
|
|
2355
|
+
if (!promises)
|
|
2356
|
+
promises = [];
|
|
2357
|
+
promises.push(value.then((value) => callback(value, attribute_name)));
|
|
2358
|
+
return;
|
|
2359
|
+
}
|
|
2360
|
+
}
|
|
2361
|
+
}
|
|
2362
|
+
callback(value, attribute_name);
|
|
2363
|
+
};
|
|
2364
|
+
if (value?.then) {
|
|
2365
|
+
if (!promises)
|
|
2366
|
+
promises = [];
|
|
2367
|
+
promises.push(value.then(handleResolvedValue));
|
|
2368
|
+
}
|
|
2369
|
+
else
|
|
2370
|
+
handleResolvedValue(value);
|
|
2371
|
+
return;
|
|
2372
|
+
}
|
|
2373
|
+
else {
|
|
2374
|
+
value = record[attribute_name];
|
|
2375
|
+
if (value && typeof value === 'object' && attribute_name !== attribute) {
|
|
2376
|
+
value = TableResource.transformEntryForSelect(attribute.select || attribute, context, readTxn, null)({ value });
|
|
2377
|
+
}
|
|
2378
|
+
}
|
|
2379
|
+
callback(value, attribute_name);
|
|
2380
|
+
};
|
|
2381
|
+
let selected;
|
|
2382
|
+
if (typeof select === 'string') {
|
|
2383
|
+
selectAttribute(select, (value) => {
|
|
2384
|
+
selected = value;
|
|
2385
|
+
});
|
|
2386
|
+
}
|
|
2387
|
+
else if (Array.isArray(select)) {
|
|
2388
|
+
if (select.asArray) {
|
|
2389
|
+
selected = [];
|
|
2390
|
+
select.forEach((attribute, index) => {
|
|
2391
|
+
if (attribute === '*')
|
|
2392
|
+
select[index] = record;
|
|
2393
|
+
else
|
|
2394
|
+
selectAttribute(attribute, (value) => (selected[index] = value));
|
|
2395
|
+
});
|
|
2396
|
+
}
|
|
2397
|
+
else {
|
|
2398
|
+
selected = {};
|
|
2399
|
+
const forceNulls = select.forceNulls;
|
|
2400
|
+
for (const attribute of select) {
|
|
2401
|
+
if (attribute === '*')
|
|
2402
|
+
for (const key in record) {
|
|
2403
|
+
selected[key] = record[key];
|
|
2404
|
+
}
|
|
2405
|
+
else
|
|
2406
|
+
selectAttribute(attribute, (value, attribute_name) => {
|
|
2407
|
+
if (value === undefined && forceNulls)
|
|
2408
|
+
value = null;
|
|
2409
|
+
selected[attribute_name] = value;
|
|
2410
|
+
});
|
|
2411
|
+
}
|
|
2412
|
+
}
|
|
2413
|
+
}
|
|
2414
|
+
else
|
|
2415
|
+
throw new hdbError_js_1.ClientError('Invalid select' + select);
|
|
2416
|
+
if (promises) {
|
|
2417
|
+
return Promise.all(promises).then(() => selected);
|
|
2418
|
+
}
|
|
2419
|
+
return selected;
|
|
2420
|
+
}
|
|
2421
|
+
return record;
|
|
2422
|
+
};
|
|
2423
|
+
return transform;
|
|
2424
|
+
}
|
|
2425
|
+
async subscribe(request) {
|
|
2426
|
+
if (!auditStore)
|
|
2427
|
+
throw new Error('Can not subscribe to a table without an audit log');
|
|
2428
|
+
if (!audit) {
|
|
2429
|
+
(0, databases_ts_1.table)({ table: tableName, database: databaseName, schemaDefined, attributes, audit: true });
|
|
2430
|
+
}
|
|
2431
|
+
if (!request)
|
|
2432
|
+
request = {};
|
|
2433
|
+
const getFullRecord = !request.rawEvents;
|
|
2434
|
+
let pendingRealTimeQueue = []; // while we are servicing a loop for older messages, we have to queue up real-time messages and deliver them in order
|
|
2435
|
+
const thisId = requestTargetToId(request) ?? null; // treat undefined and null as the root
|
|
2436
|
+
const subscription = (0, transactionBroadcast_ts_1.addSubscription)(TableResource, thisId, function (id, auditRecord, localTime, beginTxn) {
|
|
2437
|
+
try {
|
|
2438
|
+
let type = auditRecord.type;
|
|
2439
|
+
let value;
|
|
2440
|
+
if (type === 'message' || request.rawEvents) {
|
|
2441
|
+
// we only send the full message, this are individual messages that can be sent out of order
|
|
2442
|
+
// TODO: Do we want to have a limit to how far out-of-order we are willing to send?
|
|
2443
|
+
value = auditRecord.getValue?.(primaryStore, getFullRecord);
|
|
2444
|
+
}
|
|
2445
|
+
else if (type !== 'end_txn') {
|
|
2446
|
+
// these are events that indicate that the primary record has changed. I believe we always want to simply
|
|
2447
|
+
// send the latest value. Note that it is fine to synchronously access these records, they should have just
|
|
2448
|
+
// been written, so are fresh in memory.
|
|
2449
|
+
const entry = primaryStore.getEntry(id);
|
|
2450
|
+
if (entry) {
|
|
2451
|
+
if (entry.version !== auditRecord.version)
|
|
2452
|
+
return; // out of order event, with old update, don't send anything
|
|
2453
|
+
value = entry.value;
|
|
2454
|
+
type = entry.metadataFlags & exports.INVALIDATED ? 'invalidate' : value ? 'put' : 'delete';
|
|
2455
|
+
}
|
|
2456
|
+
else {
|
|
2457
|
+
type = 'delete';
|
|
2458
|
+
}
|
|
2459
|
+
}
|
|
2460
|
+
const event = {
|
|
2461
|
+
id,
|
|
2462
|
+
localTime,
|
|
2463
|
+
value,
|
|
2464
|
+
version: auditRecord.version,
|
|
2465
|
+
type,
|
|
2466
|
+
beginTxn,
|
|
2467
|
+
};
|
|
2468
|
+
if (pendingRealTimeQueue)
|
|
2469
|
+
pendingRealTimeQueue.push(event);
|
|
2470
|
+
else {
|
|
2471
|
+
if (databaseName !== 'system') {
|
|
2472
|
+
(0, write_ts_1.recordAction)(auditRecord.size ?? 1, 'db-message', tableName, null);
|
|
2473
|
+
}
|
|
2474
|
+
this.send(event);
|
|
2475
|
+
}
|
|
2476
|
+
}
|
|
2477
|
+
catch (error) {
|
|
2478
|
+
logger_ts_1.logger.error?.(error);
|
|
2479
|
+
}
|
|
2480
|
+
}, request.startTime || 0, request);
|
|
2481
|
+
const result = (async () => {
|
|
2482
|
+
const isCollection = request.isCollection ?? thisId == null;
|
|
2483
|
+
if (isCollection) {
|
|
2484
|
+
subscription.includeDescendants = true;
|
|
2485
|
+
if (request.onlyChildren)
|
|
2486
|
+
subscription.onlyChildren = true;
|
|
2487
|
+
}
|
|
2488
|
+
if (request.supportsTransactions)
|
|
2489
|
+
subscription.supportsTransactions = true;
|
|
2490
|
+
let count = request.previousCount;
|
|
2491
|
+
if (count > 1000)
|
|
2492
|
+
count = 1000; // don't allow too many, we have to hold these in memory
|
|
2493
|
+
let startTime = request.startTime;
|
|
2494
|
+
if (isCollection) {
|
|
2495
|
+
// a collection should retrieve all descendant ids
|
|
2496
|
+
if (startTime) {
|
|
2497
|
+
if (count)
|
|
2498
|
+
throw new hdbError_js_1.ClientError('startTime and previousCount can not be combined for a table level subscription');
|
|
2499
|
+
// start time specified, get the audit history for this time range
|
|
2500
|
+
for (const auditRecord of auditStore.getRange({
|
|
2501
|
+
start: startTime,
|
|
2502
|
+
exclusiveStart: true,
|
|
2503
|
+
snapshot: false, // no need for a snapshot, audits don't change
|
|
2504
|
+
})) {
|
|
2505
|
+
if (auditRecord.tableId !== tableId)
|
|
2506
|
+
continue;
|
|
2507
|
+
const id = auditRecord.recordId;
|
|
2508
|
+
if (thisId == null || isDescendantId(thisId, id)) {
|
|
2509
|
+
const value = auditRecord.getValue(primaryStore, getFullRecord, auditRecord.localTime);
|
|
2510
|
+
send({
|
|
2511
|
+
id,
|
|
2512
|
+
localTime: auditRecord.localTime,
|
|
2513
|
+
value,
|
|
2514
|
+
version: auditRecord.version,
|
|
2515
|
+
type: auditRecord.type,
|
|
2516
|
+
size: auditRecord.size,
|
|
2517
|
+
});
|
|
2518
|
+
if (subscription.queue?.length > EVENT_HIGH_WATER_MARK) {
|
|
2519
|
+
// if we have too many messages, we need to pause and let the client catch up
|
|
2520
|
+
if ((await subscription.waitForDrain()) === false)
|
|
2521
|
+
return;
|
|
2522
|
+
}
|
|
2523
|
+
}
|
|
2524
|
+
// TODO: Would like to do this asynchronously, but would need to catch up on anything published during iteration
|
|
2525
|
+
//await rest(); // yield for fairness
|
|
2526
|
+
subscription.startTime = auditRecord.localTime; // update so we don't double send
|
|
2527
|
+
}
|
|
2528
|
+
}
|
|
2529
|
+
else if (count) {
|
|
2530
|
+
const history = [];
|
|
2531
|
+
// we are collecting the history in reverse order to get the right count, then reversing to send
|
|
2532
|
+
for (const auditRecord of auditStore.getRange({ start: 'z', end: false, reverse: true })) {
|
|
2533
|
+
try {
|
|
2534
|
+
if (auditRecord.tableId !== tableId)
|
|
2535
|
+
continue;
|
|
2536
|
+
const id = auditRecord.recordId;
|
|
2537
|
+
if (thisId == null || isDescendantId(thisId, id)) {
|
|
2538
|
+
const value = auditRecord.getValue(primaryStore, getFullRecord, auditRecord.localTime);
|
|
2539
|
+
history.push({
|
|
2540
|
+
id,
|
|
2541
|
+
localTime: auditRecord.localTime,
|
|
2542
|
+
value,
|
|
2543
|
+
version: auditRecord.version,
|
|
2544
|
+
type: auditRecord.type,
|
|
2545
|
+
});
|
|
2546
|
+
if (--count <= 0)
|
|
2547
|
+
break;
|
|
2548
|
+
}
|
|
2549
|
+
}
|
|
2550
|
+
catch (error) {
|
|
2551
|
+
logger_ts_1.logger.error?.('Error getting history entry', auditRecord.localTime, error);
|
|
2552
|
+
}
|
|
2553
|
+
// TODO: Would like to do this asynchronously, but would need to catch up on anything published during iteration
|
|
2554
|
+
//await rest(); // yield for fairness
|
|
2555
|
+
}
|
|
2556
|
+
for (let i = history.length; i > 0;) {
|
|
2557
|
+
send(history[--i]);
|
|
2558
|
+
}
|
|
2559
|
+
if (history[0])
|
|
2560
|
+
subscription.startTime = history[0].localTime; // update so don't double send
|
|
2561
|
+
}
|
|
2562
|
+
else if (!request.omitCurrent) {
|
|
2563
|
+
for (const { key: id, value, version, localTime, size } of primaryStore.getRange({
|
|
2564
|
+
start: thisId ?? false,
|
|
2565
|
+
end: thisId == null ? undefined : [thisId, ordered_binary_1.MAXIMUM_KEY],
|
|
2566
|
+
versions: true,
|
|
2567
|
+
snapshot: false, // no need for a snapshot, just want the latest data
|
|
2568
|
+
})) {
|
|
2569
|
+
if (!value)
|
|
2570
|
+
continue;
|
|
2571
|
+
send({ id, localTime, value, version, type: 'put', size });
|
|
2572
|
+
if (subscription.queue?.length > EVENT_HIGH_WATER_MARK) {
|
|
2573
|
+
// if we have too many messages, we need to pause and let the client catch up
|
|
2574
|
+
if ((await subscription.waitForDrain()) === false)
|
|
2575
|
+
return;
|
|
2576
|
+
}
|
|
2577
|
+
}
|
|
2578
|
+
}
|
|
2579
|
+
}
|
|
2580
|
+
else {
|
|
2581
|
+
if (count && !startTime)
|
|
2582
|
+
startTime = 0;
|
|
2583
|
+
let entry = this.#entry;
|
|
2584
|
+
let localTime = entry?.localTime;
|
|
2585
|
+
if (!entry) {
|
|
2586
|
+
entry = primaryStore.getEntry(thisId);
|
|
2587
|
+
localTime = entry?.localTime;
|
|
2588
|
+
}
|
|
2589
|
+
else if (localTime === RecordEncoder_ts_1.PENDING_LOCAL_TIME) {
|
|
2590
|
+
// we can't use the pending commit because it doesn't have the local audit time yet,
|
|
2591
|
+
// so try to retrieve the previous/committed record
|
|
2592
|
+
primaryStore.cache?.delete(thisId);
|
|
2593
|
+
entry = primaryStore.getEntry(thisId);
|
|
2594
|
+
logger_ts_1.logger.trace?.('re-retrieved record', localTime, this.#entry?.localTime);
|
|
2595
|
+
localTime = entry?.localTime;
|
|
2596
|
+
}
|
|
2597
|
+
logger_ts_1.logger.trace?.('Subscription from', startTime, 'from', thisId, localTime);
|
|
2598
|
+
if (startTime < localTime) {
|
|
2599
|
+
// start time specified, get the audit history for this record
|
|
2600
|
+
const history = [];
|
|
2601
|
+
let nextTime = localTime;
|
|
2602
|
+
let nodeId = entry?.nodeId;
|
|
2603
|
+
do {
|
|
2604
|
+
//TODO: Would like to do this asynchronously, but we will need to run catch after this to ensure we didn't miss anything
|
|
2605
|
+
//await auditStore.prefetch([key]); // do it asynchronously for better fairness/concurrency and avoid page faults
|
|
2606
|
+
const auditRecord = auditStore.getSync(nextTime, tableId, thisId, nodeId);
|
|
2607
|
+
if (auditRecord) {
|
|
2608
|
+
if (startTime < nextTime) {
|
|
2609
|
+
request.omitCurrent = true; // we are sending the current version from history, so don't double send
|
|
2610
|
+
const value = auditRecord.getValue(primaryStore, getFullRecord, nextTime);
|
|
2611
|
+
if (getFullRecord)
|
|
2612
|
+
auditRecord.type = 'put';
|
|
2613
|
+
history.push({
|
|
2614
|
+
id: thisId,
|
|
2615
|
+
value,
|
|
2616
|
+
localTime: nextTime,
|
|
2617
|
+
...auditRecord,
|
|
2618
|
+
});
|
|
2619
|
+
}
|
|
2620
|
+
nextTime = auditRecord.previousVersion;
|
|
2621
|
+
nodeId = auditRecord.previousNodeId;
|
|
2622
|
+
}
|
|
2623
|
+
else
|
|
2624
|
+
break;
|
|
2625
|
+
if (count)
|
|
2626
|
+
count--;
|
|
2627
|
+
} while (nextTime > startTime && count !== 0);
|
|
2628
|
+
for (let i = history.length; i > 0;) {
|
|
2629
|
+
send(history[--i]);
|
|
2630
|
+
}
|
|
2631
|
+
subscription.startTime = localTime; // make sure we don't re-broadcast the current version that we already sent
|
|
2632
|
+
}
|
|
2633
|
+
if (!request.omitCurrent && entry?.value) {
|
|
2634
|
+
// if retain and it exists, send the current value first
|
|
2635
|
+
send({
|
|
2636
|
+
id: thisId,
|
|
2637
|
+
...entry,
|
|
2638
|
+
type: 'put',
|
|
2639
|
+
});
|
|
2640
|
+
}
|
|
2641
|
+
}
|
|
2642
|
+
// now send any queued messages
|
|
2643
|
+
for (const event of pendingRealTimeQueue) {
|
|
2644
|
+
send(event);
|
|
2645
|
+
}
|
|
2646
|
+
pendingRealTimeQueue = null;
|
|
2647
|
+
})();
|
|
2648
|
+
result.catch((error) => {
|
|
2649
|
+
harper_logger_js_1.default.error?.('Error in real-time subscription:', error);
|
|
2650
|
+
subscription.send(error);
|
|
2651
|
+
});
|
|
2652
|
+
function send(event) {
|
|
2653
|
+
if (databaseName !== 'system') {
|
|
2654
|
+
(0, write_ts_1.recordAction)(event.size ?? 1, 'db-message', tableName, null);
|
|
2655
|
+
}
|
|
2656
|
+
subscription.send(event);
|
|
2657
|
+
}
|
|
2658
|
+
if (request.listener)
|
|
2659
|
+
subscription.on('data', request.listener);
|
|
2660
|
+
return subscription;
|
|
2661
|
+
}
|
|
2662
|
+
/**
|
|
2663
|
+
* Subscribe on one thread unless this is a per-thread subscription
|
|
2664
|
+
* @param workerIndex
|
|
2665
|
+
* @param options
|
|
2666
|
+
*/
|
|
2667
|
+
static subscribeOnThisThread(workerIndex, options) {
|
|
2668
|
+
return workerIndex === 0 || options?.crossThreads === false;
|
|
2669
|
+
}
|
|
2670
|
+
doesExist() {
|
|
2671
|
+
return Boolean(this.#record || this.#savingOperation);
|
|
2672
|
+
}
|
|
2673
|
+
/**
|
|
2674
|
+
* Publishing a message to a record adds an (observable) entry in the audit log, but does not change
|
|
2675
|
+
* the record at all. This entries should be replicated and trigger subscription listeners.
|
|
2676
|
+
* @param id
|
|
2677
|
+
* @param message
|
|
2678
|
+
* @param options
|
|
2679
|
+
*/
|
|
2680
|
+
publish(target, message, options) {
|
|
2681
|
+
if (message === undefined || message instanceof URLSearchParams) {
|
|
2682
|
+
// legacy arg format, shift the args
|
|
2683
|
+
this._writePublish(this.getId(), target, message);
|
|
2684
|
+
}
|
|
2685
|
+
else {
|
|
2686
|
+
let allowed = true;
|
|
2687
|
+
const context = this.getContext();
|
|
2688
|
+
if (target.checkPermission) {
|
|
2689
|
+
// requesting authorization verification
|
|
2690
|
+
allowed = this.allowCreate(context.user, message, context);
|
|
2691
|
+
}
|
|
2692
|
+
return (0, when_ts_1.when)(allowed, (allowed) => {
|
|
2693
|
+
if (!allowed) {
|
|
2694
|
+
throw new hdbError_js_1.AccessViolation(context.user);
|
|
2695
|
+
}
|
|
2696
|
+
const id = requestTargetToId(target);
|
|
2697
|
+
this._writePublish(id, message, options);
|
|
2698
|
+
});
|
|
2699
|
+
}
|
|
2700
|
+
}
|
|
2701
|
+
_writePublish(id, message, options) {
|
|
2702
|
+
const transaction = txnForContext(this.getContext());
|
|
2703
|
+
id ??= null;
|
|
2704
|
+
if (id !== null)
|
|
2705
|
+
checkValidId(id); // note that we allow the null id for publishing so that you can publish to the root topic
|
|
2706
|
+
const context = this.getContext();
|
|
2707
|
+
transaction.addWrite({
|
|
2708
|
+
key: id,
|
|
2709
|
+
store: primaryStore,
|
|
2710
|
+
entry: this.#entry,
|
|
2711
|
+
nodeName: context?.nodeName,
|
|
2712
|
+
validate: () => {
|
|
2713
|
+
if (!context?.source) {
|
|
2714
|
+
transaction.checkOverloaded();
|
|
2715
|
+
this.validate(message);
|
|
2716
|
+
}
|
|
2717
|
+
},
|
|
2718
|
+
before: this.constructor.source?.publish && !context?.source
|
|
2719
|
+
? this.constructor.source.publish.bind(this.constructor.source, id, message, context)
|
|
2720
|
+
: undefined,
|
|
2721
|
+
beforeIntermediate: preCommitBlobsForRecordBefore(message, undefined, true // because transaction log entries can be deleted at any point, we must save the blobs in the record, there is no cleanup of them
|
|
2722
|
+
),
|
|
2723
|
+
commit: (txnTime, existingEntry, _retry, transaction) => {
|
|
2724
|
+
// just need to update the version number of the record so it points to the latest audit record
|
|
2725
|
+
// but have to update the version number of the record
|
|
2726
|
+
// TODO: would be faster to use getBinaryFast here and not have the record loaded
|
|
2727
|
+
if (existingEntry === undefined && trackDeletes && !audit) {
|
|
2728
|
+
scheduleCleanup();
|
|
2729
|
+
}
|
|
2730
|
+
logger_ts_1.logger.trace?.(`Publishing message to id: ${id}, timestamp: ${new Date(txnTime).toISOString()}`);
|
|
2731
|
+
// always audit this, but don't change existing version
|
|
2732
|
+
// TODO: Use direct writes in the future (copying binary data is hard because it invalidates the cache)
|
|
2733
|
+
updateRecord(id, existingEntry?.value ?? null, existingEntry, txnTime, 0, true, {
|
|
2734
|
+
user: context?.user,
|
|
2735
|
+
residencyId: options?.residencyId,
|
|
2736
|
+
expiresAt: context?.expiresAt,
|
|
2737
|
+
nodeId: options?.nodeId,
|
|
2738
|
+
viaNodeId: options?.viaNodeId,
|
|
2739
|
+
transaction,
|
|
2740
|
+
tableToTrack: tableName,
|
|
2741
|
+
}, 'message', false, message);
|
|
2742
|
+
},
|
|
2743
|
+
});
|
|
2744
|
+
}
|
|
2745
|
+
validate(record, patch) {
|
|
2746
|
+
let validationErrors;
|
|
2747
|
+
const validateValue = (value, attribute, name) => {
|
|
2748
|
+
if (attribute.type && value != null) {
|
|
2749
|
+
if (patch && value.__op__)
|
|
2750
|
+
value = value.value;
|
|
2751
|
+
if (attribute.properties) {
|
|
2752
|
+
if (typeof value !== 'object') {
|
|
2753
|
+
(validationErrors || (validationErrors = [])).push(`Value ${stringify(value)} in property ${name} must be an object${attribute.type ? ' (' + attribute.type + ')' : ''}`);
|
|
2754
|
+
}
|
|
2755
|
+
const properties = attribute.properties;
|
|
2756
|
+
for (let i = 0, l = properties.length; i < l; i++) {
|
|
2757
|
+
const attribute = properties[i];
|
|
2758
|
+
if (attribute.relationship || attribute.computed) {
|
|
2759
|
+
if (record.hasOwnProperty(attribute.name)) {
|
|
2760
|
+
(validationErrors || (validationErrors = [])).push(`Computed property ${name}.${attribute.name} may not be directly assigned a value`);
|
|
2761
|
+
}
|
|
2762
|
+
continue;
|
|
2763
|
+
}
|
|
2764
|
+
const updated = validateValue(value[attribute.name], attribute, name + '.' + attribute.name);
|
|
2765
|
+
if (updated)
|
|
2766
|
+
value[attribute.name] = updated;
|
|
2767
|
+
}
|
|
2768
|
+
if (attribute.sealed && value != null && typeof value === 'object') {
|
|
2769
|
+
for (const key in value) {
|
|
2770
|
+
if (!properties.find((property) => property.name === key)) {
|
|
2771
|
+
(validationErrors || (validationErrors = [])).push(`Property ${key} is not allowed within object in property ${name}`);
|
|
2772
|
+
}
|
|
2773
|
+
}
|
|
2774
|
+
}
|
|
2775
|
+
}
|
|
2776
|
+
else {
|
|
2777
|
+
switch (attribute.type) {
|
|
2778
|
+
case 'Int':
|
|
2779
|
+
if (typeof value !== 'number' || value >> 0 !== value)
|
|
2780
|
+
(validationErrors || (validationErrors = [])).push(`Value ${stringify(value)} in property ${name} must be an integer (from -2147483648 to 2147483647)`);
|
|
2781
|
+
break;
|
|
2782
|
+
case 'Long':
|
|
2783
|
+
if (typeof value !== 'number' || !(Math.floor(value) === value && Math.abs(value) <= 9007199254740992))
|
|
2784
|
+
(validationErrors || (validationErrors = [])).push(`Value ${stringify(value)} in property ${name} must be an integer (from -9007199254740992 to 9007199254740992)`);
|
|
2785
|
+
break;
|
|
2786
|
+
case 'Float':
|
|
2787
|
+
if (typeof value !== 'number')
|
|
2788
|
+
(validationErrors || (validationErrors = [])).push(`Value ${stringify(value)} in property ${name} must be a number`);
|
|
2789
|
+
break;
|
|
2790
|
+
case 'ID':
|
|
2791
|
+
if (!(typeof value === 'string' ||
|
|
2792
|
+
(value?.length > 0 && value.every?.((value) => typeof value === 'string'))))
|
|
2793
|
+
(validationErrors || (validationErrors = [])).push(`Value ${stringify(value)} in property ${name} must be a string, or an array of strings`);
|
|
2794
|
+
break;
|
|
2795
|
+
case 'String':
|
|
2796
|
+
if (typeof value !== 'string')
|
|
2797
|
+
(validationErrors || (validationErrors = [])).push(`Value ${stringify(value)} in property ${name} must be a string`);
|
|
2798
|
+
break;
|
|
2799
|
+
case 'Boolean':
|
|
2800
|
+
if (typeof value !== 'boolean')
|
|
2801
|
+
(validationErrors || (validationErrors = [])).push(`Value ${stringify(value)} in property ${name} must be a boolean`);
|
|
2802
|
+
break;
|
|
2803
|
+
case 'Date':
|
|
2804
|
+
if (!(value instanceof Date)) {
|
|
2805
|
+
if (typeof value === 'string' || typeof value === 'number')
|
|
2806
|
+
return new Date(value);
|
|
2807
|
+
else
|
|
2808
|
+
(validationErrors || (validationErrors = [])).push(`Value ${stringify(value)} in property ${name} must be a Date`);
|
|
2809
|
+
}
|
|
2810
|
+
break;
|
|
2811
|
+
case 'BigInt':
|
|
2812
|
+
if (typeof value !== 'bigint') {
|
|
2813
|
+
// do coercion because otherwise it is rather difficult to get numbers to consistently be bigints
|
|
2814
|
+
if (typeof value === 'string' || typeof value === 'number')
|
|
2815
|
+
return BigInt(value);
|
|
2816
|
+
(validationErrors || (validationErrors = [])).push(`Value ${stringify(value)} in property ${name} must be a bigint`);
|
|
2817
|
+
}
|
|
2818
|
+
break;
|
|
2819
|
+
case 'Bytes':
|
|
2820
|
+
if (!(value instanceof Uint8Array)) {
|
|
2821
|
+
if (typeof value === 'string')
|
|
2822
|
+
return Buffer.from(value);
|
|
2823
|
+
(validationErrors || (validationErrors = [])).push(`Value ${stringify(value)} in property ${name} must be a Buffer or Uint8Array`);
|
|
2824
|
+
}
|
|
2825
|
+
break;
|
|
2826
|
+
case 'Blob':
|
|
2827
|
+
if (!(value instanceof blob_ts_1.Blob)) {
|
|
2828
|
+
if (typeof value === 'string')
|
|
2829
|
+
value = Buffer.from(value);
|
|
2830
|
+
if (value instanceof Buffer) {
|
|
2831
|
+
return createBlob(value, { type: 'text/plain' });
|
|
2832
|
+
}
|
|
2833
|
+
(validationErrors || (validationErrors = [])).push(`Value ${stringify(value)} in property ${name} must be a Blob`);
|
|
2834
|
+
}
|
|
2835
|
+
break;
|
|
2836
|
+
case 'array':
|
|
2837
|
+
if (Array.isArray(value)) {
|
|
2838
|
+
if (attribute.elements) {
|
|
2839
|
+
for (let i = 0, l = value.length; i < l; i++) {
|
|
2840
|
+
const element = value[i];
|
|
2841
|
+
const updated = validateValue(element, attribute.elements, name + '[*]');
|
|
2842
|
+
if (updated)
|
|
2843
|
+
value[i] = updated;
|
|
2844
|
+
}
|
|
2845
|
+
}
|
|
2846
|
+
}
|
|
2847
|
+
else
|
|
2848
|
+
(validationErrors || (validationErrors = [])).push(`Value ${stringify(value)} in property ${name} must be an Array`);
|
|
2849
|
+
break;
|
|
2850
|
+
}
|
|
2851
|
+
}
|
|
2852
|
+
}
|
|
2853
|
+
if (attribute.nullable === false && value == null) {
|
|
2854
|
+
(validationErrors || (validationErrors = [])).push(`Property ${name} is required (and not does not allow null values)`);
|
|
2855
|
+
}
|
|
2856
|
+
};
|
|
2857
|
+
for (let i = 0, l = attributes.length; i < l; i++) {
|
|
2858
|
+
const attribute = attributes[i];
|
|
2859
|
+
if (attribute.relationship || attribute.computed) {
|
|
2860
|
+
if (Object.hasOwn(record, attribute.name)) {
|
|
2861
|
+
(validationErrors || (validationErrors = [])).push(`Computed property ${attribute.name} may not be directly assigned a value`);
|
|
2862
|
+
}
|
|
2863
|
+
continue;
|
|
2864
|
+
}
|
|
2865
|
+
if (!patch || attribute.name in record) {
|
|
2866
|
+
const updated = validateValue(record[attribute.name], attribute, attribute.name);
|
|
2867
|
+
if (updated !== undefined)
|
|
2868
|
+
record[attribute.name] = updated;
|
|
2869
|
+
}
|
|
2870
|
+
}
|
|
2871
|
+
if (sealed) {
|
|
2872
|
+
for (const key in record) {
|
|
2873
|
+
if (!attributes.find((attribute) => attribute.name === key)) {
|
|
2874
|
+
(validationErrors || (validationErrors = [])).push(`Property ${key} is not allowed`);
|
|
2875
|
+
}
|
|
2876
|
+
}
|
|
2877
|
+
}
|
|
2878
|
+
if (validationErrors) {
|
|
2879
|
+
throw new hdbError_js_1.ClientError(validationErrors.join('. '));
|
|
2880
|
+
}
|
|
2881
|
+
}
|
|
2882
|
+
getUpdatedTime() {
|
|
2883
|
+
return this.#version;
|
|
2884
|
+
}
|
|
2885
|
+
static async addAttributes(attributesToAdd) {
|
|
2886
|
+
const new_attributes = attributes.slice(0);
|
|
2887
|
+
for (const attribute of attributesToAdd) {
|
|
2888
|
+
if (!attribute.name)
|
|
2889
|
+
throw new hdbError_js_1.ClientError('Attribute name is required');
|
|
2890
|
+
if (attribute.name.match(/[`/]/))
|
|
2891
|
+
throw new hdbError_js_1.ClientError('Attribute names cannot include backticks or forward slashes');
|
|
2892
|
+
validateAttribute(attribute.name);
|
|
2893
|
+
new_attributes.push(attribute);
|
|
2894
|
+
}
|
|
2895
|
+
(0, databases_ts_1.table)({
|
|
2896
|
+
table: tableName,
|
|
2897
|
+
database: databaseName,
|
|
2898
|
+
schemaDefined,
|
|
2899
|
+
attributes: new_attributes,
|
|
2900
|
+
});
|
|
2901
|
+
return TableResource.indexingOperation;
|
|
2902
|
+
}
|
|
2903
|
+
static async removeAttributes(names) {
|
|
2904
|
+
const new_attributes = attributes.filter((attribute) => !names.includes(attribute.name));
|
|
2905
|
+
(0, databases_ts_1.table)({
|
|
2906
|
+
table: tableName,
|
|
2907
|
+
database: databaseName,
|
|
2908
|
+
schemaDefined,
|
|
2909
|
+
attributes: new_attributes,
|
|
2910
|
+
});
|
|
2911
|
+
return TableResource.indexingOperation;
|
|
2912
|
+
}
|
|
2913
|
+
/**
|
|
2914
|
+
* Get the size of the table in bytes (based on amount of pages stored in the database)
|
|
2915
|
+
* @param options
|
|
2916
|
+
*/
|
|
2917
|
+
static getSize() {
|
|
2918
|
+
if (isRocksDB) {
|
|
2919
|
+
return primaryStore.getDBIntProperty('rocksdb.estimate-live-data-size') ?? 0;
|
|
2920
|
+
}
|
|
2921
|
+
const stats = primaryStore.getStats();
|
|
2922
|
+
return (stats.treeBranchPageCount + stats.treeLeafPageCount + stats.overflowPages) * stats.pageSize;
|
|
2923
|
+
}
|
|
2924
|
+
static getAuditSize() {
|
|
2925
|
+
const stats = auditStore?.getStats();
|
|
2926
|
+
return (stats &&
|
|
2927
|
+
(stats.totalSize ??
|
|
2928
|
+
(stats.treeBranchPageCount + stats.treeLeafPageCount + stats.overflowPages) * stats.pageSize));
|
|
2929
|
+
}
|
|
2930
|
+
static getStorageStats() {
|
|
2931
|
+
const stats = node_fs_1.default.statfsSync(primaryStore.path);
|
|
2932
|
+
return {
|
|
2933
|
+
available: stats.bavail * stats.bsize,
|
|
2934
|
+
free: stats.bfree * stats.bsize,
|
|
2935
|
+
size: stats.blocks * stats.bsize,
|
|
2936
|
+
};
|
|
2937
|
+
}
|
|
2938
|
+
static async getRecordCount(options) {
|
|
2939
|
+
// iterate through the metadata entries to exclude their count and exclude the deletion counts
|
|
2940
|
+
const entryCount = primaryStore.getStats().entryCount;
|
|
2941
|
+
const TIME_LIMIT = 1000 / 2; // one second time limit, enforced by seeing if we are halfway through at 500ms
|
|
2942
|
+
const start = performance.now();
|
|
2943
|
+
const halfway = Math.floor(entryCount / 2);
|
|
2944
|
+
const exactCount = options?.exactCount;
|
|
2945
|
+
let recordCount = 0;
|
|
2946
|
+
let entriesScanned = 0;
|
|
2947
|
+
let limit;
|
|
2948
|
+
for (const { value } of primaryStore.getRange({ start: true, lazy: true, snapshot: false })) {
|
|
2949
|
+
if (value != null)
|
|
2950
|
+
recordCount++;
|
|
2951
|
+
entriesScanned++;
|
|
2952
|
+
await rest();
|
|
2953
|
+
if (!exactCount && entriesScanned < halfway && performance.now() - start > TIME_LIMIT) {
|
|
2954
|
+
// it is taking too long, so we will just take this sample and a sample from the end to estimate
|
|
2955
|
+
limit = entriesScanned;
|
|
2956
|
+
break;
|
|
2957
|
+
}
|
|
2958
|
+
}
|
|
2959
|
+
if (limit) {
|
|
2960
|
+
// in this case we are going to make an estimate of the table count using the first thousand
|
|
2961
|
+
// entries and last thousand entries
|
|
2962
|
+
const firstRecordCount = recordCount;
|
|
2963
|
+
recordCount = 0;
|
|
2964
|
+
for (const { value } of primaryStore.getRange({
|
|
2965
|
+
start: '\uffff',
|
|
2966
|
+
reverse: true,
|
|
2967
|
+
lazy: true,
|
|
2968
|
+
limit,
|
|
2969
|
+
snapshot: false,
|
|
2970
|
+
})) {
|
|
2971
|
+
if (value != null)
|
|
2972
|
+
recordCount++;
|
|
2973
|
+
await rest();
|
|
2974
|
+
}
|
|
2975
|
+
const sampleSize = limit * 2;
|
|
2976
|
+
const recordRate = (recordCount + firstRecordCount) / sampleSize;
|
|
2977
|
+
const variance = Math.pow((recordCount - firstRecordCount + 1) / limit / 2, 2) + // variance between samples
|
|
2978
|
+
(recordRate * (1 - recordRate)) / sampleSize;
|
|
2979
|
+
const sd = Math.max(Math.sqrt(variance) * entryCount, 1);
|
|
2980
|
+
const estimatedRecordCount = Math.round(recordRate * entryCount);
|
|
2981
|
+
// TODO: This uses a normal/Wald interval, but a binomial confidence interval is probably better calculated using
|
|
2982
|
+
// Wilson score interval or Agresti-Coull interval (I think the latter is a little easier to calculate/implement).
|
|
2983
|
+
const lowerCiLimit = Math.max(estimatedRecordCount - 1.96 * sd, recordCount + firstRecordCount);
|
|
2984
|
+
const upperCiLimit = Math.min(estimatedRecordCount + 1.96 * sd, entryCount);
|
|
2985
|
+
let significantUnit = Math.pow(10, Math.round(Math.log10(sd)));
|
|
2986
|
+
if (significantUnit > estimatedRecordCount)
|
|
2987
|
+
significantUnit = significantUnit / 10;
|
|
2988
|
+
recordCount = Math.round(estimatedRecordCount / significantUnit) * significantUnit;
|
|
2989
|
+
return {
|
|
2990
|
+
recordCount,
|
|
2991
|
+
estimatedRange: [Math.round(lowerCiLimit), Math.round(upperCiLimit)],
|
|
2992
|
+
};
|
|
2993
|
+
}
|
|
2994
|
+
return {
|
|
2995
|
+
recordCount,
|
|
2996
|
+
};
|
|
2997
|
+
}
|
|
2998
|
+
/**
|
|
2999
|
+
* When attributes have been changed, we update the accessors that are assigned to this table
|
|
3000
|
+
*/
|
|
3001
|
+
static updatedAttributes() {
|
|
3002
|
+
propertyResolvers = this.propertyResolvers = {
|
|
3003
|
+
$id: (object, context, entry) => ({ value: entry.key }),
|
|
3004
|
+
$updatedtime: (object, context, entry) => entry.version,
|
|
3005
|
+
$updatedTime: (object, context, entry) => entry.version,
|
|
3006
|
+
$expiresAt: (object, context, entry) => entry.expiresAt,
|
|
3007
|
+
$record: (object, context, entry) => (entry ? { value: object } : object),
|
|
3008
|
+
$distance: (object, context, entry) => {
|
|
3009
|
+
return entry && (entry.distance ?? context?.vectorDistances?.get(entry));
|
|
3010
|
+
},
|
|
3011
|
+
};
|
|
3012
|
+
for (const attribute of this.attributes) {
|
|
3013
|
+
if (attribute.isPrimaryKey)
|
|
3014
|
+
primaryKeyAttribute = attribute;
|
|
3015
|
+
attribute.resolve = null; // reset this
|
|
3016
|
+
const relationship = attribute.relationship;
|
|
3017
|
+
const computed = attribute.computed;
|
|
3018
|
+
if (relationship) {
|
|
3019
|
+
if (attribute.indexed) {
|
|
3020
|
+
console.error(`A relationship property can not be directly indexed, (but you may want to index the foreign key attribute)`);
|
|
3021
|
+
}
|
|
3022
|
+
if (computed) {
|
|
3023
|
+
console.error(`A relationship property is already computed and can not be combined with a computed function (the relationship will be given precedence)`);
|
|
3024
|
+
}
|
|
3025
|
+
hasRelationships = true;
|
|
3026
|
+
if (relationship.to) {
|
|
3027
|
+
if (attribute.elements?.definition) {
|
|
3028
|
+
propertyResolvers[attribute.name] = attribute.resolve = (object, context, entry, returnEntry) => {
|
|
3029
|
+
// TODO: Get raw record/entry?
|
|
3030
|
+
const id = object[relationship.from ? relationship.from : primaryKey];
|
|
3031
|
+
const relatedTable = attribute.elements.definition.tableClass;
|
|
3032
|
+
if (returnEntry) {
|
|
3033
|
+
return (0, search_ts_1.searchByIndex)({ attribute: relationship.to, value: id }, txnForContext(context).getReadTxn(), false, relatedTable, false).map((entry) => {
|
|
3034
|
+
if (entry && entry.key !== undefined)
|
|
3035
|
+
return entry;
|
|
3036
|
+
return relatedTable.primaryStore.getEntry(entry, {
|
|
3037
|
+
transaction: txnForContext(context).getReadTxn(),
|
|
3038
|
+
});
|
|
3039
|
+
}).asArray;
|
|
3040
|
+
}
|
|
3041
|
+
return relatedTable.search([{ attribute: relationship.to, value: id }], context).asArray;
|
|
3042
|
+
};
|
|
3043
|
+
attribute.set = () => {
|
|
3044
|
+
// ideally we want to throw an error here, but if the user had (accidently?) set a property into storage
|
|
3045
|
+
// conflicts with this attribute, we don't want to prevent loading
|
|
3046
|
+
// throw new Error('Setting a one-to-many relationship property is not supported');
|
|
3047
|
+
};
|
|
3048
|
+
attribute.resolve.definition = attribute.elements.definition;
|
|
3049
|
+
// preserve relationship information for searching
|
|
3050
|
+
attribute.resolve.to = relationship.to;
|
|
3051
|
+
if (relationship.from)
|
|
3052
|
+
attribute.resolve.from = relationship.from;
|
|
3053
|
+
}
|
|
3054
|
+
else
|
|
3055
|
+
console.error(`The one-to-many/many-to-many relationship property "${attribute.name}" in table "${tableName}" must have an array type referencing a table as the elements`);
|
|
3056
|
+
}
|
|
3057
|
+
else if (relationship.from) {
|
|
3058
|
+
const definition = attribute.definition || attribute.elements?.definition;
|
|
3059
|
+
if (definition) {
|
|
3060
|
+
propertyResolvers[attribute.name] = attribute.resolve = (object, context, entry, returnEntry) => {
|
|
3061
|
+
const ids = object[relationship.from];
|
|
3062
|
+
if (ids === undefined)
|
|
3063
|
+
return undefined;
|
|
3064
|
+
if (attribute.elements) {
|
|
3065
|
+
let hasPromises;
|
|
3066
|
+
const results = ids?.map((id) => {
|
|
3067
|
+
const value = definition.tableClass.primaryStore[returnEntry ? 'getEntry' : 'get'](id, {
|
|
3068
|
+
transaction: txnForContext(context).getReadTxn(),
|
|
3069
|
+
});
|
|
3070
|
+
if (value?.then)
|
|
3071
|
+
hasPromises = true;
|
|
3072
|
+
// for now, we shouldn't be getting promises until rocksdb
|
|
3073
|
+
if (TableResource.loadAsInstance === false)
|
|
3074
|
+
Object.freeze(returnEntry ? value?.value : value);
|
|
3075
|
+
return value;
|
|
3076
|
+
});
|
|
3077
|
+
return relationship.filterMissing
|
|
3078
|
+
? hasPromises
|
|
3079
|
+
? Promise.all(results).then((results) => results.filter(exists))
|
|
3080
|
+
: results.filter(exists)
|
|
3081
|
+
: hasPromises
|
|
3082
|
+
? Promise.all(results)
|
|
3083
|
+
: results;
|
|
3084
|
+
}
|
|
3085
|
+
const value = definition.tableClass.primaryStore[returnEntry ? 'getEntry' : 'getSync'](ids, {
|
|
3086
|
+
transaction: txnForContext(context).getReadTxn(),
|
|
3087
|
+
});
|
|
3088
|
+
// for now, we shouldn't be getting promises until rocksdb
|
|
3089
|
+
if (TableResource.loadAsInstance === false)
|
|
3090
|
+
Object.freeze(returnEntry ? value?.value : value);
|
|
3091
|
+
return value;
|
|
3092
|
+
};
|
|
3093
|
+
attribute.set = (object, related) => {
|
|
3094
|
+
if (Array.isArray(related)) {
|
|
3095
|
+
const targetIds = related.map((related) => related.getId?.() || related[definition.tableClass.primaryKey]);
|
|
3096
|
+
object[relationship.from] = targetIds;
|
|
3097
|
+
}
|
|
3098
|
+
else {
|
|
3099
|
+
const targetId = related.getId?.() || related[definition.tableClass.primaryKey];
|
|
3100
|
+
object[relationship.from] = targetId;
|
|
3101
|
+
}
|
|
3102
|
+
};
|
|
3103
|
+
attribute.resolve.definition = attribute.definition || attribute.elements?.definition;
|
|
3104
|
+
attribute.resolve.from = relationship.from;
|
|
3105
|
+
}
|
|
3106
|
+
else {
|
|
3107
|
+
console.error(`The relationship property "${attribute.name}" in table "${tableName}" must be a type that references a table`);
|
|
3108
|
+
}
|
|
3109
|
+
}
|
|
3110
|
+
else {
|
|
3111
|
+
console.error(`The relationship directive on "${attribute.name}" in table "${tableName}" must use either "from" or "to" arguments`);
|
|
3112
|
+
}
|
|
3113
|
+
}
|
|
3114
|
+
else if (computed) {
|
|
3115
|
+
if (typeof computed.from === 'function') {
|
|
3116
|
+
this.setComputedAttribute(attribute.name, computed.from);
|
|
3117
|
+
}
|
|
3118
|
+
propertyResolvers[attribute.name] = attribute.resolve = (object, context, entry) => {
|
|
3119
|
+
const value = typeof computed.from === 'string' ? object[computed.from] : object;
|
|
3120
|
+
const userResolver = this.userResolvers[attribute.name];
|
|
3121
|
+
if (userResolver)
|
|
3122
|
+
return userResolver(value, context, entry);
|
|
3123
|
+
else {
|
|
3124
|
+
logger_ts_1.logger.warn?.(`Computed attribute "${attribute.name}" does not have a function assigned to it. Please use setComputedAttribute('${attribute.name}', resolver) to assign a resolver function.`);
|
|
3125
|
+
// silence future warnings but just returning undefined
|
|
3126
|
+
this.userResolvers[attribute.name] = () => { };
|
|
3127
|
+
}
|
|
3128
|
+
};
|
|
3129
|
+
attribute.resolve.directReturn = true;
|
|
3130
|
+
}
|
|
3131
|
+
else if (indices[attribute.name]?.customIndex?.propertyResolver) {
|
|
3132
|
+
const customIndex = indices[attribute.name].customIndex;
|
|
3133
|
+
propertyResolvers[attribute.name] = (object, context, entry) => {
|
|
3134
|
+
const value = object[attribute.name];
|
|
3135
|
+
return customIndex.propertyResolver(value, context, entry);
|
|
3136
|
+
};
|
|
3137
|
+
propertyResolvers[attribute.name].directReturn = true;
|
|
3138
|
+
}
|
|
3139
|
+
}
|
|
3140
|
+
(0, tracked_ts_1.assignTrackedAccessors)(this, this);
|
|
3141
|
+
(0, tracked_ts_1.assignTrackedAccessors)(Updatable, this, true);
|
|
3142
|
+
for (const attribute of attributes) {
|
|
3143
|
+
const name = attribute.name;
|
|
3144
|
+
if (attribute.resolve) {
|
|
3145
|
+
Object.defineProperty(primaryStore.encoder.structPrototype, name, {
|
|
3146
|
+
get() {
|
|
3147
|
+
return attribute.resolve(this, transaction_ts_1.contextStorage.getStore()); // it is only possible to get the context from ALS, we don't have a direct reference to the current context
|
|
3148
|
+
},
|
|
3149
|
+
set(related) {
|
|
3150
|
+
return attribute.set(this, related);
|
|
3151
|
+
},
|
|
3152
|
+
configurable: true,
|
|
3153
|
+
enumerable: attribute.enumerable,
|
|
3154
|
+
});
|
|
3155
|
+
if (attribute.enumerable && !primaryStore.encoder.structPrototype.toJSON) {
|
|
3156
|
+
Object.defineProperty(primaryStore.encoder.structPrototype, 'toJSON', {
|
|
3157
|
+
configurable: true,
|
|
3158
|
+
value() {
|
|
3159
|
+
const json = {};
|
|
3160
|
+
for (const key in this) {
|
|
3161
|
+
// copy all enumerable properties, including from prototype
|
|
3162
|
+
json[key] = this[key];
|
|
3163
|
+
}
|
|
3164
|
+
return json;
|
|
3165
|
+
},
|
|
3166
|
+
});
|
|
3167
|
+
}
|
|
3168
|
+
}
|
|
3169
|
+
}
|
|
3170
|
+
}
|
|
3171
|
+
static setComputedAttribute(attribute_name, resolver) {
|
|
3172
|
+
const attribute = (0, search_ts_1.findAttribute)(attributes, attribute_name);
|
|
3173
|
+
if (!attribute) {
|
|
3174
|
+
console.error(`The attribute "${attribute_name}" does not exist in the table "${tableName}"`);
|
|
3175
|
+
return;
|
|
3176
|
+
}
|
|
3177
|
+
if (!attribute.computed) {
|
|
3178
|
+
console.error(`The attribute "${attribute_name}" is not defined as computed in the table "${tableName}"`);
|
|
3179
|
+
return;
|
|
3180
|
+
}
|
|
3181
|
+
this.userResolvers[attribute_name] = resolver;
|
|
3182
|
+
}
|
|
3183
|
+
static async deleteHistory(endTime = 0, cleanupDeletedRecords = false) {
|
|
3184
|
+
let completion;
|
|
3185
|
+
for (const auditRecord of auditStore.getRange({
|
|
3186
|
+
start: 0,
|
|
3187
|
+
end: endTime,
|
|
3188
|
+
})) {
|
|
3189
|
+
await rest(); // yield to other async operations
|
|
3190
|
+
if (auditRecord.tableId !== tableId)
|
|
3191
|
+
continue;
|
|
3192
|
+
completion = (0, auditStore_ts_1.removeAuditEntry)(auditStore, auditRecord);
|
|
3193
|
+
}
|
|
3194
|
+
if (cleanupDeletedRecords) {
|
|
3195
|
+
// this is separate procedure we can do if the records are not being cleaned up by the audit log. This shouldn't
|
|
3196
|
+
// ever happen, but if there are cleanup failures for some reason, we can run this to clean up the records
|
|
3197
|
+
for (const entry of primaryStore.getRange({ start: 0, versions: true })) {
|
|
3198
|
+
const { value, localTime } = entry;
|
|
3199
|
+
await rest(); // yield to other async operations
|
|
3200
|
+
if (value === null && localTime < endTime) {
|
|
3201
|
+
completion = (0, RecordEncoder_ts_1.removeEntry)(primaryStore, entry);
|
|
3202
|
+
}
|
|
3203
|
+
}
|
|
3204
|
+
}
|
|
3205
|
+
await completion;
|
|
3206
|
+
}
|
|
3207
|
+
static async *getHistory(startTime = 0, endTime = Infinity) {
|
|
3208
|
+
for (const auditRecord of auditStore.getRange({
|
|
3209
|
+
start: startTime || 1, // if startTime is 0, we actually want to shift to 1 because 0 is encoded as all zeros with audit store's special encoder, and will include symbols
|
|
3210
|
+
end: endTime,
|
|
3211
|
+
})) {
|
|
3212
|
+
await rest(); // yield to other async operations
|
|
3213
|
+
if (auditRecord.tableId !== tableId)
|
|
3214
|
+
continue;
|
|
3215
|
+
yield {
|
|
3216
|
+
id: auditRecord.recordId,
|
|
3217
|
+
localTime: auditRecord.version,
|
|
3218
|
+
version: auditRecord.version,
|
|
3219
|
+
type: auditRecord.type,
|
|
3220
|
+
value: auditRecord.getValue(primaryStore, true, auditRecord.version),
|
|
3221
|
+
user: auditRecord.user,
|
|
3222
|
+
operation: auditRecord.originatingOperation,
|
|
3223
|
+
};
|
|
3224
|
+
}
|
|
3225
|
+
}
|
|
3226
|
+
static async getHistoryOfRecord(id) {
|
|
3227
|
+
const history = [];
|
|
3228
|
+
if (id == undefined)
|
|
3229
|
+
throw new Error('An id is required');
|
|
3230
|
+
const entry = primaryStore.getEntry(id);
|
|
3231
|
+
if (!entry)
|
|
3232
|
+
return history;
|
|
3233
|
+
let nextVersion = entry.localTime;
|
|
3234
|
+
if (!nextVersion)
|
|
3235
|
+
throw new Error('The entry does not have a local audit time');
|
|
3236
|
+
const count = 0;
|
|
3237
|
+
const auditWindow = 100;
|
|
3238
|
+
do {
|
|
3239
|
+
await rest(); // yield to other async operations
|
|
3240
|
+
let insertionPoint = history.length;
|
|
3241
|
+
let highestPreviousVersion = 0;
|
|
3242
|
+
const start = nextVersion - auditWindow;
|
|
3243
|
+
for (const auditRecord of auditStore.getRange({ start, end: nextVersion + 0.001 })) {
|
|
3244
|
+
if (auditRecord.tableId === tableId && (0, ordered_binary_1.compareKeys)(auditRecord.recordId, id) === 0) {
|
|
3245
|
+
history.splice(insertionPoint, 0, {
|
|
3246
|
+
id: auditRecord.recordId,
|
|
3247
|
+
localTime: nextVersion,
|
|
3248
|
+
version: auditRecord.version,
|
|
3249
|
+
type: auditRecord.type,
|
|
3250
|
+
value: auditRecord.getValue(primaryStore, true, nextVersion),
|
|
3251
|
+
user: auditRecord.user,
|
|
3252
|
+
operation: auditRecord.originatingOperation,
|
|
3253
|
+
});
|
|
3254
|
+
if (auditRecord.previousVersion > highestPreviousVersion && auditRecord.previousVersion < start) {
|
|
3255
|
+
highestPreviousVersion = auditRecord.previousVersion;
|
|
3256
|
+
}
|
|
3257
|
+
}
|
|
3258
|
+
}
|
|
3259
|
+
nextVersion = highestPreviousVersion;
|
|
3260
|
+
} while (count < 1000 && nextVersion);
|
|
3261
|
+
return history.reverse();
|
|
3262
|
+
}
|
|
3263
|
+
static clear() {
|
|
3264
|
+
return primaryStore.clear();
|
|
3265
|
+
}
|
|
3266
|
+
static cleanup() {
|
|
3267
|
+
deleteCallbackHandle?.remove();
|
|
3268
|
+
}
|
|
3269
|
+
static _readTxnForContext(context) {
|
|
3270
|
+
return txnForContext(context).getReadTxn();
|
|
3271
|
+
}
|
|
3272
|
+
}
|
|
3273
|
+
const throttledCallToSource = (0, throttle_ts_1.throttle)(async (source, id, sourceContext, existingEntry) => {
|
|
3274
|
+
// call the data source if it exists and will fulfill our request for data
|
|
3275
|
+
if (source && source.get && (!source.get.reliesOnPrototype || source.prototype.get)) {
|
|
3276
|
+
if (source.available?.(existingEntry) !== false) {
|
|
3277
|
+
sourceContext.source = source;
|
|
3278
|
+
const resolvedData = await source.get(id, sourceContext);
|
|
3279
|
+
if (resolvedData)
|
|
3280
|
+
return resolvedData;
|
|
3281
|
+
}
|
|
3282
|
+
}
|
|
3283
|
+
}, () => {
|
|
3284
|
+
throw new hdbError_js_1.ServerError('Service unavailable, exceeded request queue limit for resolving cache record', 503);
|
|
3285
|
+
});
|
|
3286
|
+
TableResource.updatedAttributes(); // on creation, update accessors as well
|
|
3287
|
+
if (expirationMs)
|
|
3288
|
+
TableResource.setTTLExpiration(expirationMs / 1000);
|
|
3289
|
+
if (expiresAtProperty)
|
|
3290
|
+
runRecordExpirationEviction();
|
|
3291
|
+
return TableResource;
|
|
3292
|
+
function updateIndices(id, existingRecord, record, options) {
|
|
3293
|
+
let hasChanges;
|
|
3294
|
+
// iterate the entries from the record
|
|
3295
|
+
// for-in is about 5x as fast as for-of Object.entries, and this is extremely time sensitive since it can be
|
|
3296
|
+
// inside a write transaction
|
|
3297
|
+
// TODO: Make an array version of indices that is faster
|
|
3298
|
+
for (const key in indices) {
|
|
3299
|
+
const index = indices[key];
|
|
3300
|
+
const isIndexing = index.isIndexing;
|
|
3301
|
+
const resolver = propertyResolvers[key];
|
|
3302
|
+
const value = record && (resolver ? resolver(record) : record[key]);
|
|
3303
|
+
const existingValue = existingRecord && (resolver ? resolver(existingRecord) : existingRecord[key]);
|
|
3304
|
+
if (value === existingValue && !isIndexing) {
|
|
3305
|
+
continue;
|
|
3306
|
+
}
|
|
3307
|
+
if (index.customIndex) {
|
|
3308
|
+
index.customIndex.index(id, value, existingValue, options);
|
|
3309
|
+
continue;
|
|
3310
|
+
}
|
|
3311
|
+
hasChanges = true;
|
|
3312
|
+
const indexNulls = index.indexNulls;
|
|
3313
|
+
// determine what index values need to be removed and added
|
|
3314
|
+
let valuesToAdd = (0, commonUtility_js_1.getIndexedValues)(value, indexNulls);
|
|
3315
|
+
let valuesToRemove = (0, commonUtility_js_1.getIndexedValues)(existingValue, indexNulls);
|
|
3316
|
+
if (valuesToRemove?.length > 0) {
|
|
3317
|
+
// put this in a conditional so we can do a faster version for new records
|
|
3318
|
+
// determine the changes/diff from new values and old values
|
|
3319
|
+
const setToRemove = new Set(valuesToRemove);
|
|
3320
|
+
valuesToAdd = valuesToAdd
|
|
3321
|
+
? valuesToAdd.filter((value) => {
|
|
3322
|
+
if (setToRemove.has(value)) {
|
|
3323
|
+
// if the value is retained, we don't need to remove or add it, so remove it from the set
|
|
3324
|
+
setToRemove.delete(value);
|
|
3325
|
+
}
|
|
3326
|
+
else {
|
|
3327
|
+
// keep in the list of values to add to index
|
|
3328
|
+
return true;
|
|
3329
|
+
}
|
|
3330
|
+
})
|
|
3331
|
+
: [];
|
|
3332
|
+
valuesToRemove = Array.from(setToRemove);
|
|
3333
|
+
if ((valuesToRemove.length > 0 || valuesToAdd.length > 0) && LMDB_PREFETCH_WRITES) {
|
|
3334
|
+
// prefetch any values that have been removed or added
|
|
3335
|
+
const valuesToPrefetch = valuesToRemove.concat(valuesToAdd).map((v) => ({ key: v, value: id }));
|
|
3336
|
+
index.prefetch?.(valuesToPrefetch, noop);
|
|
3337
|
+
}
|
|
3338
|
+
//if the update cleared out the attribute value we need to delete it from the index
|
|
3339
|
+
for (let i = 0, l = valuesToRemove.length; i < l; i++) {
|
|
3340
|
+
index.remove(valuesToRemove[i], id, options);
|
|
3341
|
+
}
|
|
3342
|
+
}
|
|
3343
|
+
else if (valuesToAdd?.length > 0 && LMDB_PREFETCH_WRITES) {
|
|
3344
|
+
// no old values, just new
|
|
3345
|
+
index.prefetch?.(valuesToAdd.map((v) => ({ key: v, value: id })), noop);
|
|
3346
|
+
}
|
|
3347
|
+
if (valuesToAdd) {
|
|
3348
|
+
for (let i = 0, l = valuesToAdd.length; i < l; i++) {
|
|
3349
|
+
index.put(valuesToAdd[i], id, options);
|
|
3350
|
+
}
|
|
3351
|
+
}
|
|
3352
|
+
}
|
|
3353
|
+
return hasChanges;
|
|
3354
|
+
}
|
|
3355
|
+
function checkValidId(id) {
|
|
3356
|
+
switch (typeof id) {
|
|
3357
|
+
case 'number':
|
|
3358
|
+
return true;
|
|
3359
|
+
case 'string':
|
|
3360
|
+
if (id.length < 659)
|
|
3361
|
+
return true; // max number of characters that can't expand our key size limit
|
|
3362
|
+
if (id.length > MAX_KEY_BYTES) {
|
|
3363
|
+
// we can quickly determine this is too big
|
|
3364
|
+
throw new Error('Primary key size is too large: ' + id.length);
|
|
3365
|
+
}
|
|
3366
|
+
// TODO: We could potentially have a faster test here, Buffer.byteLength is close, but we have to handle characters < 4 that are escaped in ordered-binary
|
|
3367
|
+
break; // otherwise we have to test it, in this range, unicode characters could put it over the limit
|
|
3368
|
+
case 'object':
|
|
3369
|
+
if (id === null) {
|
|
3370
|
+
throw new Error('Invalid primary key of null');
|
|
3371
|
+
}
|
|
3372
|
+
break; // otherwise we have to test it
|
|
3373
|
+
case 'bigint':
|
|
3374
|
+
if (id < 2n ** 64n && id > -(2n ** 64n))
|
|
3375
|
+
return true;
|
|
3376
|
+
break; // otherwise we have to test it
|
|
3377
|
+
default:
|
|
3378
|
+
throw new Error('Invalid primary key type: ' + typeof id);
|
|
3379
|
+
}
|
|
3380
|
+
// otherwise it is difficult to determine if the key size is too large
|
|
3381
|
+
// without actually attempting to serialize it
|
|
3382
|
+
const length = (0, ordered_binary_1.writeKey)(id, TEST_WRITE_KEY_BUFFER, 0);
|
|
3383
|
+
if (length > MAX_KEY_BYTES)
|
|
3384
|
+
throw new Error('Primary key size is too large: ' + id.length);
|
|
3385
|
+
return true;
|
|
3386
|
+
}
|
|
3387
|
+
function requestTargetToId(target) {
|
|
3388
|
+
return typeof target === 'object' && target ? target.id : target;
|
|
3389
|
+
}
|
|
3390
|
+
function isSearchTarget(target) {
|
|
3391
|
+
return typeof target === 'object' && target && target.isCollection;
|
|
3392
|
+
}
|
|
3393
|
+
function loadLocalRecord(id, context, options, sync, withEntry) {
|
|
3394
|
+
if (TableResource.getResidencyById && options.ensureLoaded && context?.replicateFrom !== false) {
|
|
3395
|
+
// this is a special case for when the residency can be determined from the id alone (hash-based sharding),
|
|
3396
|
+
// allow for a fast path to load the record from the correct node
|
|
3397
|
+
const residency = residencyFromFunction(TableResource.getResidencyById(id));
|
|
3398
|
+
if (residency) {
|
|
3399
|
+
if (!residency.includes(server.hostname) && sourceLoad) {
|
|
3400
|
+
// this record is not on this node, so we shouldn't load it here
|
|
3401
|
+
return sourceLoad({ key: id, residency }).then(withEntry);
|
|
3402
|
+
}
|
|
3403
|
+
}
|
|
3404
|
+
}
|
|
3405
|
+
// TODO: determine if we use lazy access properties
|
|
3406
|
+
const whenPrefetched = () => {
|
|
3407
|
+
if (context?.transaction?.stale)
|
|
3408
|
+
context.transaction.stale = false;
|
|
3409
|
+
// if the transaction was closed, which can happen if we are iterating
|
|
3410
|
+
// through query results and the iterator ends (abruptly)
|
|
3411
|
+
if (options.transaction?.isDone)
|
|
3412
|
+
return withEntry(null, id);
|
|
3413
|
+
if (!sync && options) {
|
|
3414
|
+
options.async = true;
|
|
3415
|
+
return (0, when_ts_1.when)(primaryStore.getEntry(id, options), withLocalEntry);
|
|
3416
|
+
}
|
|
3417
|
+
else {
|
|
3418
|
+
return withLocalEntry(primaryStore.getEntry(id, options));
|
|
3419
|
+
}
|
|
3420
|
+
};
|
|
3421
|
+
function withLocalEntry(entry) {
|
|
3422
|
+
// skip recording reads for most system tables except hdb_analytics
|
|
3423
|
+
// we want to track analytics reads in licensing, etc.
|
|
3424
|
+
if (databaseName !== 'system' && (options.type === 'read' || !options.type)) {
|
|
3425
|
+
harper_logger_js_1.default.trace?.('Recording db-read action for', `${databaseName}.${tableName}`);
|
|
3426
|
+
(0, write_ts_1.recordAction)(entry?.size ?? 1, 'db-read', tableName, null);
|
|
3427
|
+
}
|
|
3428
|
+
// we need to freeze entry records to ensure the integrity of the cache;
|
|
3429
|
+
// but we only do this when users have opted into loadAsInstance/freezeRecords to avoid back-compat
|
|
3430
|
+
// issues
|
|
3431
|
+
Object.freeze(entry?.value);
|
|
3432
|
+
if (entry?.residencyId &&
|
|
3433
|
+
entry.metadataFlags & exports.INVALIDATED &&
|
|
3434
|
+
sourceLoad &&
|
|
3435
|
+
options.ensureLoaded &&
|
|
3436
|
+
context?.replicateFrom !== false) {
|
|
3437
|
+
// load from other node
|
|
3438
|
+
return sourceLoad(entry).then((entry) => withEntry(entry, id), (error) => {
|
|
3439
|
+
logger_ts_1.logger.error?.('Error loading remote record', id, entry, options, error);
|
|
3440
|
+
return withEntry(null, id);
|
|
3441
|
+
});
|
|
3442
|
+
}
|
|
3443
|
+
if (entry && context) {
|
|
3444
|
+
if (entry?.version > (context.lastModified || 0))
|
|
3445
|
+
context.lastModified = entry.version;
|
|
3446
|
+
if (entry?.localTime && !context.lastRefreshed)
|
|
3447
|
+
context.lastRefreshed = entry.localTime;
|
|
3448
|
+
}
|
|
3449
|
+
return withEntry(entry, id);
|
|
3450
|
+
}
|
|
3451
|
+
// To prefetch or not to prefetch is one of the biggest questions Harper has to make.
|
|
3452
|
+
// Prefetching has important benefits as it allows any page fault to be executed asynchronously
|
|
3453
|
+
// in the work threads, and it provides event turn yielding, allowing other async functions
|
|
3454
|
+
// to execute. However, prefetching is expensive, and the cost of enqueuing a task with the
|
|
3455
|
+
// worker threads and enqueuing the callback on the JS thread and the downstream promise handling
|
|
3456
|
+
// is usually at least several times more expensive than skipping the prefetch and just directly
|
|
3457
|
+
// getting the entry.
|
|
3458
|
+
// Determining if we should prefetch is challenging. It is not possible to determine if a page
|
|
3459
|
+
// fault will happen, OSes intentionally hide that information. So here we use some heuristics
|
|
3460
|
+
// to evaluate if prefetching is a good idea.
|
|
3461
|
+
// First, the caller can tell us. If the record is in our local cache, we use that as indication
|
|
3462
|
+
// that we can get the value very quickly without a page fault.
|
|
3463
|
+
if (sync || isRocksDB)
|
|
3464
|
+
return whenPrefetched();
|
|
3465
|
+
// Next, we allow for non-prefetch mode where we can execute some gets without prefetching,
|
|
3466
|
+
// but we will limit the number before we do another prefetch
|
|
3467
|
+
if (untilNextPrefetch > 0) {
|
|
3468
|
+
untilNextPrefetch--;
|
|
3469
|
+
return whenPrefetched();
|
|
3470
|
+
}
|
|
3471
|
+
// Now, we are going to prefetch before loading, so need a promise:
|
|
3472
|
+
return new Promise((resolve, reject) => {
|
|
3473
|
+
if (untilNextPrefetch === 0) {
|
|
3474
|
+
// If we were in non-prefetch mode and used up our non-prefetch gets, we immediately trigger
|
|
3475
|
+
// a prefetch for the current id
|
|
3476
|
+
untilNextPrefetch--;
|
|
3477
|
+
primaryStore.prefetch([id], () => {
|
|
3478
|
+
prefetch();
|
|
3479
|
+
load();
|
|
3480
|
+
});
|
|
3481
|
+
}
|
|
3482
|
+
else {
|
|
3483
|
+
// If there is a prefetch in flight, we accumulate ids so we can attempt to batch prefetch
|
|
3484
|
+
// requests into a single or just a few async operations, reducing the cost of async queuing.
|
|
3485
|
+
prefetchIds.push(id);
|
|
3486
|
+
prefetchCallbacks.push(load);
|
|
3487
|
+
if (prefetchIds.length > MAX_PREFETCH_BUNDLE) {
|
|
3488
|
+
untilNextPrefetch--;
|
|
3489
|
+
prefetch();
|
|
3490
|
+
}
|
|
3491
|
+
}
|
|
3492
|
+
function prefetch() {
|
|
3493
|
+
if (prefetchIds.length > 0) {
|
|
3494
|
+
const callbacks = prefetchCallbacks;
|
|
3495
|
+
primaryStore.prefetch(prefetchIds, () => {
|
|
3496
|
+
if (untilNextPrefetch === -1) {
|
|
3497
|
+
prefetch();
|
|
3498
|
+
}
|
|
3499
|
+
else {
|
|
3500
|
+
// if there is another prefetch callback pending, we don't need to trigger another prefetch
|
|
3501
|
+
untilNextPrefetch++;
|
|
3502
|
+
}
|
|
3503
|
+
for (const callback of callbacks)
|
|
3504
|
+
callback();
|
|
3505
|
+
});
|
|
3506
|
+
prefetchIds = [];
|
|
3507
|
+
prefetchCallbacks = [];
|
|
3508
|
+
// Here is the where the feedback mechanism informs future execution. If we were able
|
|
3509
|
+
// to enqueue multiple prefetch requests, this is an indication that we have concurrency
|
|
3510
|
+
// and/or page fault/slow data retrieval, and the prefetches are valuable to us, so
|
|
3511
|
+
// we stay in prefetch mode.
|
|
3512
|
+
// We also reduce the number of non-prefetches we allow in next non-prefetch sequence
|
|
3513
|
+
if (nonPrefetchSequence > 2)
|
|
3514
|
+
nonPrefetchSequence--;
|
|
3515
|
+
}
|
|
3516
|
+
else {
|
|
3517
|
+
// If we have not enqueued any prefetch requests, this is a hint that prefetching may
|
|
3518
|
+
// not have been that advantageous, so we let it go back to the non-prefetch mode,
|
|
3519
|
+
// for the next few requests. We also increment the number of non-prefetches that
|
|
3520
|
+
// we allow so there is a "memory" of how well prefetch vs non-prefetch is going.
|
|
3521
|
+
untilNextPrefetch = nonPrefetchSequence;
|
|
3522
|
+
if (nonPrefetchSequence < MAX_PREFETCH_SEQUENCE)
|
|
3523
|
+
nonPrefetchSequence++;
|
|
3524
|
+
}
|
|
3525
|
+
}
|
|
3526
|
+
function load() {
|
|
3527
|
+
try {
|
|
3528
|
+
resolve(whenPrefetched());
|
|
3529
|
+
}
|
|
3530
|
+
catch (error) {
|
|
3531
|
+
reject(error);
|
|
3532
|
+
}
|
|
3533
|
+
}
|
|
3534
|
+
});
|
|
3535
|
+
}
|
|
3536
|
+
function getTablePermissions(user, target) {
|
|
3537
|
+
let permission = target?.checkPermission; // first check to see the request target specifically provides the permissions to authorize
|
|
3538
|
+
if (typeof permission !== 'object') {
|
|
3539
|
+
if (!user?.role)
|
|
3540
|
+
return;
|
|
3541
|
+
permission = user.role.permission;
|
|
3542
|
+
}
|
|
3543
|
+
if (permission.super_user)
|
|
3544
|
+
return FULL_PERMISSIONS;
|
|
3545
|
+
const dbPermission = permission[databaseName];
|
|
3546
|
+
let table;
|
|
3547
|
+
const tables = dbPermission?.tables;
|
|
3548
|
+
if (tables) {
|
|
3549
|
+
return tables[tableName];
|
|
3550
|
+
}
|
|
3551
|
+
else if (databaseName === 'data' && (table = permission[tableName]) && !table.tables) {
|
|
3552
|
+
return table;
|
|
3553
|
+
}
|
|
3554
|
+
}
|
|
3555
|
+
function ensureLoadedFromSource(source, id, entry, context, resource) {
|
|
3556
|
+
if (hasSourceGet) {
|
|
3557
|
+
let needsSourceData = false;
|
|
3558
|
+
if (context.noCache)
|
|
3559
|
+
needsSourceData = true;
|
|
3560
|
+
else {
|
|
3561
|
+
if (entry) {
|
|
3562
|
+
if (!entry.value ||
|
|
3563
|
+
entry.metadataFlags & (exports.INVALIDATED | exports.EVICTED) || // invalidated or evicted should go to load from source
|
|
3564
|
+
(entry.expiresAt != undefined && entry.expiresAt < Date.now()))
|
|
3565
|
+
needsSourceData = true;
|
|
3566
|
+
// else needsSourceData is left falsy
|
|
3567
|
+
// TODO: Allow getEntryByVariation to find a sub-variation of this record and determine if
|
|
3568
|
+
// it still needs to be loaded from source
|
|
3569
|
+
}
|
|
3570
|
+
else
|
|
3571
|
+
needsSourceData = true;
|
|
3572
|
+
(0, write_ts_1.recordActionBinary)(!needsSourceData, 'cache-hit', tableName);
|
|
3573
|
+
}
|
|
3574
|
+
if (needsSourceData) {
|
|
3575
|
+
const loadingFromSource = getFromSource(source, id, entry, context).then((entry) => {
|
|
3576
|
+
if (entry?.value && entry?.value.getRecord?.())
|
|
3577
|
+
logger_ts_1.logger.error?.('Can not assign a record that is already a resource');
|
|
3578
|
+
if (context) {
|
|
3579
|
+
if (entry?.version > (context.lastModified || 0))
|
|
3580
|
+
context.lastModified = entry.version;
|
|
3581
|
+
context.lastRefreshed = Date.now(); // localTime is probably not available yet
|
|
3582
|
+
}
|
|
3583
|
+
return entry;
|
|
3584
|
+
});
|
|
3585
|
+
// if the resource defines a method for indicating if stale-while-revalidate is allowed for a record
|
|
3586
|
+
if (context?.onlyIfCached || (entry?.value && resource?.allowStaleWhileRevalidate?.(entry, id))) {
|
|
3587
|
+
// since we aren't waiting for it any errors won't propagate so we should at least log them
|
|
3588
|
+
loadingFromSource.catch((error) => logger_ts_1.logger.warn?.(error));
|
|
3589
|
+
if (context?.onlyIfCached && !resource.doesExist())
|
|
3590
|
+
throw new hdbError_js_1.ServerError('Entry is not cached', 504);
|
|
3591
|
+
return; // go ahead and return and let the current stale value be used while we re-validate
|
|
3592
|
+
}
|
|
3593
|
+
else
|
|
3594
|
+
return loadingFromSource; // return the promise for the resolved value
|
|
3595
|
+
}
|
|
3596
|
+
}
|
|
3597
|
+
else if (entry?.value) {
|
|
3598
|
+
// if we don't have a source, but we have an entry, we check the expiration
|
|
3599
|
+
if (entry.expiresAt != undefined && entry.expiresAt < Date.now()) {
|
|
3600
|
+
// if it has expired and there is no source, we evict it and then return null, using a fake promise to indicate that this is providing the response
|
|
3601
|
+
TableResource.evict(entry.key, entry.value, entry.version);
|
|
3602
|
+
entry.value = null;
|
|
3603
|
+
return {
|
|
3604
|
+
then(callback) {
|
|
3605
|
+
return callback(entry); // return undefined, no source to get data from
|
|
3606
|
+
},
|
|
3607
|
+
};
|
|
3608
|
+
}
|
|
3609
|
+
}
|
|
3610
|
+
}
|
|
3611
|
+
function txnForContext(context) {
|
|
3612
|
+
let transaction = context?.transaction;
|
|
3613
|
+
if (transaction) {
|
|
3614
|
+
if (!transaction.db && isRocksDB) {
|
|
3615
|
+
// this is an uninitialized DatabaseTransaction, we can claim it
|
|
3616
|
+
transaction.db = primaryStore;
|
|
3617
|
+
if (context?.timestamp)
|
|
3618
|
+
transaction.timestamp = context.timestamp;
|
|
3619
|
+
return transaction;
|
|
3620
|
+
}
|
|
3621
|
+
do {
|
|
3622
|
+
// See if this is a transaction for our database and if so, use it
|
|
3623
|
+
if (transaction.db?.path === primaryStore.path)
|
|
3624
|
+
return transaction;
|
|
3625
|
+
// try the next one:
|
|
3626
|
+
const nextTxn = transaction.next;
|
|
3627
|
+
if (!nextTxn) {
|
|
3628
|
+
// no next one, then add our database
|
|
3629
|
+
transaction = transaction.next = isRocksDB ? new DatabaseTransaction_ts_1.DatabaseTransaction() : new LMDBTransaction_1.LMDBTransaction();
|
|
3630
|
+
transaction.db = primaryStore;
|
|
3631
|
+
return transaction;
|
|
3632
|
+
}
|
|
3633
|
+
transaction = nextTxn;
|
|
3634
|
+
} while (true);
|
|
3635
|
+
}
|
|
3636
|
+
else {
|
|
3637
|
+
transaction = isRocksDB ? new DatabaseTransaction_ts_1.ImmediateTransaction(primaryStore) : new LMDBTransaction_1.ImmediateTransaction(primaryStore);
|
|
3638
|
+
if (context) {
|
|
3639
|
+
context.transaction = transaction;
|
|
3640
|
+
if (context.timestamp)
|
|
3641
|
+
transaction.timestamp = context.timestamp;
|
|
3642
|
+
}
|
|
3643
|
+
return transaction;
|
|
3644
|
+
}
|
|
3645
|
+
}
|
|
3646
|
+
function getAttributeValue(entry, attribute_name, context) {
|
|
3647
|
+
if (!entry) {
|
|
3648
|
+
return;
|
|
3649
|
+
}
|
|
3650
|
+
const record = (entry.deref ? entry.deref() : entry.value) ?? primaryStore.getEntry(entry.key)?.value;
|
|
3651
|
+
if (typeof attribute_name === 'object') {
|
|
3652
|
+
// attribute_name is an array of attributes, pointing to nested attribute
|
|
3653
|
+
let resolvers = propertyResolvers;
|
|
3654
|
+
let value = record;
|
|
3655
|
+
for (let i = 0, l = attribute_name.length; i < l; i++) {
|
|
3656
|
+
const attribute = attribute_name[i];
|
|
3657
|
+
const resolver = resolvers?.[attribute];
|
|
3658
|
+
value = resolver && value ? resolver(value, context, entry) : value?.[attribute];
|
|
3659
|
+
entry = null; // can't use this in the nested object
|
|
3660
|
+
resolvers = resolver?.definition?.tableClass?.propertyResolvers;
|
|
3661
|
+
}
|
|
3662
|
+
return value;
|
|
3663
|
+
}
|
|
3664
|
+
const resolver = propertyResolvers[attribute_name];
|
|
3665
|
+
return resolver ? resolver(record, context, entry) : record[attribute_name];
|
|
3666
|
+
}
|
|
3667
|
+
function transformToEntries(ids, select, context, readTxn, filters) {
|
|
3668
|
+
// TODO: Test and ensure that we break out of these loops when a connection is lost
|
|
3669
|
+
const filtersLength = filters?.length;
|
|
3670
|
+
const loadOptions = {
|
|
3671
|
+
transaction: readTxn,
|
|
3672
|
+
lazy: filtersLength > 0 || typeof select === 'string' || select?.length < 4,
|
|
3673
|
+
alwaysPrefetch: true,
|
|
3674
|
+
};
|
|
3675
|
+
let idFiltersApplied;
|
|
3676
|
+
// for filter operations, we intentionally use async and yield the event turn so that scanning queries
|
|
3677
|
+
// do not hog resources and give more processing opportunity for more efficient index-driven queries.
|
|
3678
|
+
// this also gives an opportunity to prefetch and ensure any page faults happen in a different thread
|
|
3679
|
+
function processEntry(entry, id) {
|
|
3680
|
+
const record = entry?.value;
|
|
3681
|
+
if (!record)
|
|
3682
|
+
return extended_iterable_1.SKIP;
|
|
3683
|
+
// apply the record-level filters
|
|
3684
|
+
for (let i = 0; i < filtersLength; i++) {
|
|
3685
|
+
if (idFiltersApplied?.includes(i))
|
|
3686
|
+
continue; // already applied
|
|
3687
|
+
if (!filters[i](record, entry))
|
|
3688
|
+
return extended_iterable_1.SKIP; // didn't match filters
|
|
3689
|
+
}
|
|
3690
|
+
if (id !== undefined)
|
|
3691
|
+
entry.key = id;
|
|
3692
|
+
return entry;
|
|
3693
|
+
}
|
|
3694
|
+
if (filtersLength > 0 || !ids.hasEntries) {
|
|
3695
|
+
let results = ids.map((idOrEntry) => {
|
|
3696
|
+
idFiltersApplied = null;
|
|
3697
|
+
if (typeof idOrEntry === 'object' && idOrEntry?.key !== undefined)
|
|
3698
|
+
return filtersLength > 0 ? processEntry(idOrEntry) : idOrEntry; // already an entry
|
|
3699
|
+
if (idOrEntry == undefined) {
|
|
3700
|
+
return extended_iterable_1.SKIP;
|
|
3701
|
+
}
|
|
3702
|
+
// it is an id, so we can try to use id any filters that are available (note that these can come into existence later, during the query)
|
|
3703
|
+
for (let i = 0; i < filtersLength; i++) {
|
|
3704
|
+
const filter = filters[i];
|
|
3705
|
+
const idFilter = filter.idFilter;
|
|
3706
|
+
if (idFilter) {
|
|
3707
|
+
if (!idFilter(idOrEntry))
|
|
3708
|
+
return extended_iterable_1.SKIP; // didn't match filters
|
|
3709
|
+
if (!idFiltersApplied)
|
|
3710
|
+
idFiltersApplied = [];
|
|
3711
|
+
idFiltersApplied.push(i);
|
|
3712
|
+
}
|
|
3713
|
+
}
|
|
3714
|
+
return loadLocalRecord(idOrEntry, context, loadOptions, false, processEntry);
|
|
3715
|
+
});
|
|
3716
|
+
if (Array.isArray(ids))
|
|
3717
|
+
results = results.filter((entry) => entry !== extended_iterable_1.SKIP);
|
|
3718
|
+
results.hasEntries = true;
|
|
3719
|
+
return results;
|
|
3720
|
+
}
|
|
3721
|
+
return ids;
|
|
3722
|
+
}
|
|
3723
|
+
function precedesExistingVersion(txnTime, existingEntry, nodeId) {
|
|
3724
|
+
if (nodeId === undefined) {
|
|
3725
|
+
nodeId = server.replication?.getThisNodeId(auditStore);
|
|
3726
|
+
}
|
|
3727
|
+
if (txnTime <= existingEntry?.version) {
|
|
3728
|
+
if (existingEntry?.version === txnTime && nodeId !== undefined) {
|
|
3729
|
+
// if we have a timestamp tie, we break the tie by comparing the node name of the
|
|
3730
|
+
// existing entry to the node name of the update
|
|
3731
|
+
const nodeNameToId = server.replication?.exportIdMapping(auditStore);
|
|
3732
|
+
let existingNodeId = existingEntry.nodeId;
|
|
3733
|
+
if (nodeId === existingNodeId) {
|
|
3734
|
+
return 0; // early match for a tie
|
|
3735
|
+
}
|
|
3736
|
+
let updatedNodeName, existingNodeName;
|
|
3737
|
+
for (const node_name in nodeNameToId) {
|
|
3738
|
+
if (nodeNameToId[node_name] === nodeId)
|
|
3739
|
+
updatedNodeName = node_name;
|
|
3740
|
+
if (nodeNameToId[node_name] === existingNodeId)
|
|
3741
|
+
existingNodeName = node_name;
|
|
3742
|
+
}
|
|
3743
|
+
if (updatedNodeName > existingNodeName)
|
|
3744
|
+
// if the updated node name is greater (alphabetically), it wins (it doesn't precede the existing version)
|
|
3745
|
+
return 1;
|
|
3746
|
+
if (updatedNodeName === existingNodeName)
|
|
3747
|
+
return 0; // a tie
|
|
3748
|
+
}
|
|
3749
|
+
// transaction time is older than existing version, so we treat that as an update that loses to the existing record version
|
|
3750
|
+
return -1;
|
|
3751
|
+
}
|
|
3752
|
+
return 1;
|
|
3753
|
+
}
|
|
3754
|
+
/**
|
|
3755
|
+
* This is used to record that a retrieve a record from source
|
|
3756
|
+
*/
|
|
3757
|
+
async function getFromSource(source, id, existingEntry, context) {
|
|
3758
|
+
const metadataFlags = existingEntry?.metadataFlags;
|
|
3759
|
+
const existingVersion = existingEntry?.version;
|
|
3760
|
+
let whenResolved, timer;
|
|
3761
|
+
// We start by locking the record so that there is only one resolution happening at once;
|
|
3762
|
+
// if there is already a resolution in process, we want to use the results of that resolution
|
|
3763
|
+
// tryLock() will return true if we got the lock, and the callback won't be called.
|
|
3764
|
+
// If another thread has the lock it returns false and then the callback is called once
|
|
3765
|
+
// the other thread releases the lock.
|
|
3766
|
+
const callback = () => {
|
|
3767
|
+
// This is called when another thread releases the lock on resolution. Hopefully
|
|
3768
|
+
// it should be resolved now and we can use the value it saved.
|
|
3769
|
+
clearTimeout(timer);
|
|
3770
|
+
const entry = primaryStore.getEntry(id);
|
|
3771
|
+
if (!entry || !entry.value || entry.metadataFlags & (exports.INVALIDATED | exports.EVICTED))
|
|
3772
|
+
// try again
|
|
3773
|
+
whenResolved(getFromSource(source, id, primaryStore.getEntry(id), context));
|
|
3774
|
+
else
|
|
3775
|
+
whenResolved(entry);
|
|
3776
|
+
};
|
|
3777
|
+
const lockAcquired = primaryStore.tryLock(id, callback);
|
|
3778
|
+
if (!lockAcquired) {
|
|
3779
|
+
return new Promise((resolve) => {
|
|
3780
|
+
whenResolved = resolve;
|
|
3781
|
+
timer = setTimeout(() => {
|
|
3782
|
+
primaryStore.unlock(id);
|
|
3783
|
+
}, LOCK_TIMEOUT);
|
|
3784
|
+
});
|
|
3785
|
+
}
|
|
3786
|
+
const existingRecord = existingEntry?.value;
|
|
3787
|
+
// it is important to remember that this is _NOT_ part of the current transaction; nothing is changing
|
|
3788
|
+
// with the canonical data, we are simply fulfilling our local copy of the canonical data, but still don't
|
|
3789
|
+
// want a timestamp later than the current transaction
|
|
3790
|
+
// we create a new context for the source, we want to determine the timestamp and don't want to
|
|
3791
|
+
// attribute this to the current user
|
|
3792
|
+
const sourceContext = {
|
|
3793
|
+
requestContext: context,
|
|
3794
|
+
// provide access to previous data
|
|
3795
|
+
replacingRecord: existingRecord,
|
|
3796
|
+
replacingEntry: existingEntry,
|
|
3797
|
+
replacingVersion: existingVersion,
|
|
3798
|
+
noCacheStore: false,
|
|
3799
|
+
source: null,
|
|
3800
|
+
// use the same resource cache as a parent context so that if modifications are made to resources,
|
|
3801
|
+
// they are visible in the parent requesting context
|
|
3802
|
+
resourceCache: context?.resourceCache,
|
|
3803
|
+
transaction: undefined,
|
|
3804
|
+
expiresAt: undefined,
|
|
3805
|
+
lastModified: undefined,
|
|
3806
|
+
};
|
|
3807
|
+
const responseHeaders = context?.responseHeaders;
|
|
3808
|
+
return new Promise((resolve, reject) => {
|
|
3809
|
+
// we don't want to wait for the transaction because we want to return as fast as possible
|
|
3810
|
+
// and let the transaction commit in the background
|
|
3811
|
+
let resolved;
|
|
3812
|
+
(0, when_ts_1.when)((0, transaction_ts_1.transaction)(sourceContext, async (_txn) => {
|
|
3813
|
+
const start = performance.now();
|
|
3814
|
+
let updatedRecord;
|
|
3815
|
+
let hasChanges, invalidated;
|
|
3816
|
+
try {
|
|
3817
|
+
updatedRecord = await throttledCallToSource(source, id, sourceContext, existingEntry);
|
|
3818
|
+
invalidated = metadataFlags & exports.INVALIDATED;
|
|
3819
|
+
let version = sourceContext.lastModified || (invalidated && existingVersion);
|
|
3820
|
+
hasChanges = invalidated || version > existingVersion || !existingRecord;
|
|
3821
|
+
const resolveDuration = performance.now() - start;
|
|
3822
|
+
(0, write_ts_1.recordAction)(resolveDuration, 'cache-resolution', tableName, null, 'success');
|
|
3823
|
+
if (responseHeaders)
|
|
3824
|
+
(0, Headers_ts_1.appendHeader)(responseHeaders, 'Server-Timing', `cache-resolve;dur=${resolveDuration.toFixed(2)}`, true);
|
|
3825
|
+
if (expirationMs && sourceContext.expiresAt == undefined)
|
|
3826
|
+
sourceContext.expiresAt = Date.now() + expirationMs;
|
|
3827
|
+
if (updatedRecord) {
|
|
3828
|
+
if (typeof updatedRecord !== 'object')
|
|
3829
|
+
throw new Error('Only objects can be cached and stored in tables');
|
|
3830
|
+
if (updatedRecord.status > 0 && updatedRecord.headers) {
|
|
3831
|
+
// if the source has a status code and headers, treat it as a response
|
|
3832
|
+
if (updatedRecord.status >= 300) {
|
|
3833
|
+
if (updatedRecord.status === 304) {
|
|
3834
|
+
// revalidation of our current cached record
|
|
3835
|
+
updatedRecord = existingRecord;
|
|
3836
|
+
version = existingVersion;
|
|
3837
|
+
}
|
|
3838
|
+
else {
|
|
3839
|
+
// if the source has an error status, we need to throw an error
|
|
3840
|
+
throw new hdbError_js_1.ServerError(updatedRecord.body || 'Error from source', updatedRecord.status);
|
|
3841
|
+
} // there are definitely more status codes to handle
|
|
3842
|
+
}
|
|
3843
|
+
else {
|
|
3844
|
+
let headers;
|
|
3845
|
+
const sourceHeaders = updatedRecord.headers;
|
|
3846
|
+
if (sourceHeaders[Symbol.iterator]) {
|
|
3847
|
+
headers = {};
|
|
3848
|
+
for (let [name, value] of sourceHeaders) {
|
|
3849
|
+
headers[name.toLowerCase()] = value;
|
|
3850
|
+
}
|
|
3851
|
+
}
|
|
3852
|
+
else {
|
|
3853
|
+
headers = sourceHeaders; // just a plain object
|
|
3854
|
+
}
|
|
3855
|
+
const contentType = sourceHeaders.get?.('Content-Type');
|
|
3856
|
+
let data;
|
|
3857
|
+
if (contentType === 'application/json' && updatedRecord.json) {
|
|
3858
|
+
// use native .json() if possible
|
|
3859
|
+
data = await updatedRecord.json();
|
|
3860
|
+
}
|
|
3861
|
+
else {
|
|
3862
|
+
const contentTypeHandler = contentType && contentTypes_1.contentTypes.get(contentType);
|
|
3863
|
+
if (contentTypeHandler?.deserialize) {
|
|
3864
|
+
data = contentTypeHandler.deserialize(await (contentType.startsWith('text/') ? updatedRecord.text() : updatedRecord.bytes()));
|
|
3865
|
+
}
|
|
3866
|
+
}
|
|
3867
|
+
if (data !== undefined) {
|
|
3868
|
+
// we have structured data that we have parsed
|
|
3869
|
+
delete headers['content-type']; // don't store the content type if we have already parsed it
|
|
3870
|
+
updatedRecord = {
|
|
3871
|
+
headers,
|
|
3872
|
+
data,
|
|
3873
|
+
};
|
|
3874
|
+
}
|
|
3875
|
+
else {
|
|
3876
|
+
updatedRecord = {
|
|
3877
|
+
headers,
|
|
3878
|
+
body: createBlob(updatedRecord.body),
|
|
3879
|
+
};
|
|
3880
|
+
}
|
|
3881
|
+
}
|
|
3882
|
+
}
|
|
3883
|
+
if (typeof updatedRecord.toJSON === 'function')
|
|
3884
|
+
updatedRecord = updatedRecord.toJSON();
|
|
3885
|
+
if (primaryKey && updatedRecord[primaryKey] !== id)
|
|
3886
|
+
updatedRecord[primaryKey] = id;
|
|
3887
|
+
}
|
|
3888
|
+
resolved = true;
|
|
3889
|
+
resolve({
|
|
3890
|
+
key: id,
|
|
3891
|
+
version,
|
|
3892
|
+
value: updatedRecord,
|
|
3893
|
+
});
|
|
3894
|
+
}
|
|
3895
|
+
catch (error) {
|
|
3896
|
+
error.message += ` while resolving record ${id} for ${tableName}`;
|
|
3897
|
+
if (existingRecord &&
|
|
3898
|
+
(((error.code === 'ECONNRESET' || error.code === 'ECONNREFUSED' || error.code === 'EAI_AGAIN') &&
|
|
3899
|
+
!context?.mustRevalidate) ||
|
|
3900
|
+
(context?.staleIfError &&
|
|
3901
|
+
(error.statusCode === 500 ||
|
|
3902
|
+
error.statusCode === 502 ||
|
|
3903
|
+
error.statusCode === 503 ||
|
|
3904
|
+
error.statusCode === 504)))) {
|
|
3905
|
+
// these are conditions under which we can use stale data after an error
|
|
3906
|
+
resolve({
|
|
3907
|
+
key: id,
|
|
3908
|
+
version: existingVersion,
|
|
3909
|
+
value: existingRecord,
|
|
3910
|
+
});
|
|
3911
|
+
logger_ts_1.logger.trace?.(error.message, '(returned stale record)');
|
|
3912
|
+
}
|
|
3913
|
+
else
|
|
3914
|
+
reject(error);
|
|
3915
|
+
const resolveDuration = performance.now() - start;
|
|
3916
|
+
(0, write_ts_1.recordAction)(resolveDuration, 'cache-resolution', tableName, null, 'fail');
|
|
3917
|
+
if (responseHeaders)
|
|
3918
|
+
(0, Headers_ts_1.appendHeader)(responseHeaders, 'Server-Timing', `cache-resolve;dur=${resolveDuration.toFixed(2)}`, true);
|
|
3919
|
+
sourceContext.transaction.abort();
|
|
3920
|
+
return;
|
|
3921
|
+
}
|
|
3922
|
+
if (context?.noCacheStore || sourceContext.noCacheStore) {
|
|
3923
|
+
// abort before we write any change
|
|
3924
|
+
sourceContext.transaction.abort();
|
|
3925
|
+
return;
|
|
3926
|
+
}
|
|
3927
|
+
const dbTxn = txnForContext(sourceContext);
|
|
3928
|
+
dbTxn.addWrite({
|
|
3929
|
+
key: id,
|
|
3930
|
+
store: primaryStore,
|
|
3931
|
+
entry: existingEntry,
|
|
3932
|
+
nodeName: 'source',
|
|
3933
|
+
before: preCommitBlobsForRecordBefore(updatedRecord),
|
|
3934
|
+
commit: (txnTime, existingEntry, _retry, transaction) => {
|
|
3935
|
+
if (existingEntry?.version !== existingVersion) {
|
|
3936
|
+
// don't do anything if the version has changed
|
|
3937
|
+
return;
|
|
3938
|
+
}
|
|
3939
|
+
updateIndices(id, existingRecord, updatedRecord);
|
|
3940
|
+
if (updatedRecord) {
|
|
3941
|
+
if (existingEntry) {
|
|
3942
|
+
context.previousResidency = TableResource.getResidencyRecord(existingEntry.residencyId);
|
|
3943
|
+
}
|
|
3944
|
+
let auditRecord;
|
|
3945
|
+
let omitLocalRecord = false;
|
|
3946
|
+
let residencyId;
|
|
3947
|
+
const residency = residencyFromFunction(TableResource.getResidency(updatedRecord, context));
|
|
3948
|
+
if (residency) {
|
|
3949
|
+
if (!residency.includes(server.hostname)) {
|
|
3950
|
+
// if we aren't in the residency list, specify that our local record should be omitted or be partial
|
|
3951
|
+
auditRecord = updatedRecord;
|
|
3952
|
+
omitLocalRecord = true;
|
|
3953
|
+
if (TableResource.getResidencyById) {
|
|
3954
|
+
// complete omission of the record that doesn't belong here
|
|
3955
|
+
updatedRecord = undefined;
|
|
3956
|
+
}
|
|
3957
|
+
else {
|
|
3958
|
+
// store the partial record
|
|
3959
|
+
updatedRecord = null;
|
|
3960
|
+
for (const name in indices) {
|
|
3961
|
+
if (!updatedRecord) {
|
|
3962
|
+
updatedRecord = {};
|
|
3963
|
+
}
|
|
3964
|
+
// if there are any indices, we need to preserve a partial invalidated record to ensure we can still do searches
|
|
3965
|
+
updatedRecord[name] = auditRecord[name];
|
|
3966
|
+
}
|
|
3967
|
+
}
|
|
3968
|
+
}
|
|
3969
|
+
residencyId = getResidencyId(residency);
|
|
3970
|
+
}
|
|
3971
|
+
logger_ts_1.logger.trace?.(`Writing resolved record from source with id: ${id}, timestamp: ${new Date(txnTime).toISOString()}`);
|
|
3972
|
+
// TODO: We are doing a double check for ifVersion that should probably be cleaned out
|
|
3973
|
+
updateRecord(id, updatedRecord, existingEntry, txnTime, omitLocalRecord ? exports.INVALIDATED : 0, (audit && (hasChanges || omitLocalRecord)) || null, {
|
|
3974
|
+
user: sourceContext?.user,
|
|
3975
|
+
expiresAt: sourceContext.expiresAt,
|
|
3976
|
+
residencyId,
|
|
3977
|
+
transaction,
|
|
3978
|
+
tableToTrack: tableName,
|
|
3979
|
+
}, 'put', Boolean(invalidated), auditRecord);
|
|
3980
|
+
}
|
|
3981
|
+
else if (existingEntry) {
|
|
3982
|
+
logger_ts_1.logger.trace?.(`Deleting resolved record from source with id: ${id}, timestamp: ${new Date(txnTime).toISOString()}`);
|
|
3983
|
+
if (audit || trackDeletes) {
|
|
3984
|
+
updateRecord(id, null, existingEntry, txnTime, 0, (audit && hasChanges) || null, { user: sourceContext?.user, transaction, tableToTrack: tableName }, 'delete', Boolean(invalidated));
|
|
3985
|
+
}
|
|
3986
|
+
else {
|
|
3987
|
+
(0, RecordEncoder_ts_1.removeEntry)(primaryStore, existingEntry, existingVersion);
|
|
3988
|
+
}
|
|
3989
|
+
}
|
|
3990
|
+
},
|
|
3991
|
+
});
|
|
3992
|
+
}), () => {
|
|
3993
|
+
primaryStore.unlock(id);
|
|
3994
|
+
}, (error) => {
|
|
3995
|
+
primaryStore.unlock(id);
|
|
3996
|
+
if (resolved)
|
|
3997
|
+
logger_ts_1.logger.error?.('Error committing cache update', error);
|
|
3998
|
+
// else the error was already propagated as part of the promise that we returned
|
|
3999
|
+
});
|
|
4000
|
+
});
|
|
4001
|
+
}
|
|
4002
|
+
/**
|
|
4003
|
+
* Verify that the context does not have any replication parameters that are not allowed
|
|
4004
|
+
* @param context
|
|
4005
|
+
*/
|
|
4006
|
+
function checkContextPermissions(context) {
|
|
4007
|
+
if (!context)
|
|
4008
|
+
return true;
|
|
4009
|
+
if (context.user?.role?.permission?.super_user)
|
|
4010
|
+
return true;
|
|
4011
|
+
if (context.replicateTo)
|
|
4012
|
+
throw new hdbError_js_1.ClientError('Can not specify replication parameters without super user permissions', 403);
|
|
4013
|
+
if (context.replicatedConfirmation)
|
|
4014
|
+
throw new hdbError_js_1.ClientError('Can not specify replication confirmation without super user permissions', 403);
|
|
4015
|
+
return true;
|
|
4016
|
+
}
|
|
4017
|
+
function scheduleCleanup(priority) {
|
|
4018
|
+
let runImmediately = false;
|
|
4019
|
+
if (priority) {
|
|
4020
|
+
// run immediately if there is a big increase in priority
|
|
4021
|
+
if (priority - cleanupPriority > 1)
|
|
4022
|
+
runImmediately = true;
|
|
4023
|
+
cleanupPriority = priority;
|
|
4024
|
+
}
|
|
4025
|
+
// Periodically evict expired records and deleted records searching for records who expiresAt timestamp is before now
|
|
4026
|
+
if (cleanupInterval === lastCleanupInterval && !runImmediately)
|
|
4027
|
+
return;
|
|
4028
|
+
lastCleanupInterval = cleanupInterval;
|
|
4029
|
+
if ((0, manageThreads_js_1.getWorkerIndex)() === (0, manageThreads_js_1.getWorkerCount)() - 1) {
|
|
4030
|
+
// run on the last thread so we aren't overloading lower-numbered threads
|
|
4031
|
+
if (cleanupTimer)
|
|
4032
|
+
clearTimeout(cleanupTimer);
|
|
4033
|
+
if (!cleanupInterval)
|
|
4034
|
+
return;
|
|
4035
|
+
return new Promise((resolve) => {
|
|
4036
|
+
const startOfYear = new Date();
|
|
4037
|
+
startOfYear.setMonth(0);
|
|
4038
|
+
startOfYear.setDate(1);
|
|
4039
|
+
startOfYear.setHours(0);
|
|
4040
|
+
startOfYear.setMinutes(0);
|
|
4041
|
+
startOfYear.setSeconds(0);
|
|
4042
|
+
const nextInterval = cleanupInterval / (1 + cleanupPriority);
|
|
4043
|
+
// find the next scheduled run based on regular cycles from the beginning of the year (if we restart, this enables a good continuation of scheduling)
|
|
4044
|
+
const nextScheduled = runImmediately
|
|
4045
|
+
? Date.now()
|
|
4046
|
+
: Math.ceil((Date.now() - startOfYear.getTime()) / nextInterval) * nextInterval + startOfYear.getTime();
|
|
4047
|
+
const startNextTimer = (nextScheduled) => {
|
|
4048
|
+
logger_ts_1.logger.trace?.(`Scheduled next cleanup scan at ${new Date(nextScheduled)}`);
|
|
4049
|
+
// noinspection JSVoidFunctionReturnValueUsed
|
|
4050
|
+
cleanupTimer = setTimeout(() => (lastEvictionCompletion = lastEvictionCompletion.then(async () => {
|
|
4051
|
+
// schedule the next run for when the next cleanup interval should occur (or now if it is in the past)
|
|
4052
|
+
startNextTimer(Math.max(nextScheduled + cleanupInterval, Date.now()));
|
|
4053
|
+
const rootStore = primaryStore.rootStore;
|
|
4054
|
+
if (rootStore.status !== 'open') {
|
|
4055
|
+
clearTimeout(cleanupTimer);
|
|
4056
|
+
return;
|
|
4057
|
+
}
|
|
4058
|
+
const MAX_CLEANUP_CONCURRENCY = 50;
|
|
4059
|
+
const outstandingCleanupOperations = new Array(MAX_CLEANUP_CONCURRENCY);
|
|
4060
|
+
let cleanupIndex = 0;
|
|
4061
|
+
const evictThreshold = Math.pow(cleanupPriority, 8) *
|
|
4062
|
+
(envMngr.get(hdbTerms_ts_1.CONFIG_PARAMS.STORAGE_RECLAMATION_EVICTIONFACTOR) ?? 100000);
|
|
4063
|
+
const adjustedEviction = evictionMs / Math.pow(Math.max(cleanupPriority, 1), 4);
|
|
4064
|
+
logger_ts_1.logger.debug?.(`Starting cleanup scan for ${tableName}, evict threshold ${evictThreshold}, adjusted eviction ${adjustedEviction}ms`);
|
|
4065
|
+
function shouldEvict(expiresAt, version, metadataFlags, record) {
|
|
4066
|
+
const evictWhen = expiresAt + adjustedEviction - Date.now();
|
|
4067
|
+
if (evictWhen < 0)
|
|
4068
|
+
return true;
|
|
4069
|
+
else if (cleanupPriority) {
|
|
4070
|
+
let size = primaryStore.lastSize;
|
|
4071
|
+
if (metadataFlags & auditStore_ts_1.HAS_BLOBS) {
|
|
4072
|
+
(0, blob_ts_1.findBlobsInObject)(record, (blob) => {
|
|
4073
|
+
if (blob.size)
|
|
4074
|
+
size += blob.size;
|
|
4075
|
+
});
|
|
4076
|
+
}
|
|
4077
|
+
logger_ts_1.logger.trace?.(`shouldEvict adjusted ${evictWhen} ${size}, ${(evictWhen * (expiresAt - version)) / size} < ${evictThreshold}`);
|
|
4078
|
+
// heuristic to determine if we should perform early eviction based on priority
|
|
4079
|
+
return (evictWhen * (expiresAt - version)) / size < evictThreshold;
|
|
4080
|
+
}
|
|
4081
|
+
return false;
|
|
4082
|
+
}
|
|
4083
|
+
try {
|
|
4084
|
+
let count = 0;
|
|
4085
|
+
let removeDeletedRecords = !audit || isRocksDB;
|
|
4086
|
+
// iterate through all entries to find expired records and deleted records
|
|
4087
|
+
for (const entry of primaryStore.getRange({
|
|
4088
|
+
start: false,
|
|
4089
|
+
snapshot: false, // we don't want to keep read transaction snapshots open
|
|
4090
|
+
versions: true,
|
|
4091
|
+
lazy: true, // only want to access metadata most of the time
|
|
4092
|
+
})) {
|
|
4093
|
+
const { key, value: record, version, expiresAt, metadataFlags } = entry;
|
|
4094
|
+
// if there is no auditing cleanup and we are tracking deletion, need to do cleanup of
|
|
4095
|
+
// these deletion entries (LMDB audit cleanup has its own scheduled job for this)
|
|
4096
|
+
let resolution;
|
|
4097
|
+
if (record === null && removeDeletedRecords && version + auditStore_ts_1.auditRetention < Date.now()) {
|
|
4098
|
+
// make sure it is still deleted when we do the removal
|
|
4099
|
+
resolution = (0, RecordEncoder_ts_1.removeEntry)(primaryStore, entry, version);
|
|
4100
|
+
}
|
|
4101
|
+
else if (expiresAt != undefined && shouldEvict(expiresAt, version, metadataFlags, record)) {
|
|
4102
|
+
// evict!
|
|
4103
|
+
resolution = TableResource.evict(key, record, version);
|
|
4104
|
+
count++;
|
|
4105
|
+
}
|
|
4106
|
+
if (resolution) {
|
|
4107
|
+
await outstandingCleanupOperations[cleanupIndex];
|
|
4108
|
+
outstandingCleanupOperations[cleanupIndex] = resolution.catch((error) => {
|
|
4109
|
+
logger_ts_1.logger.error?.('Cleanup error', error);
|
|
4110
|
+
});
|
|
4111
|
+
if (++cleanupIndex >= MAX_CLEANUP_CONCURRENCY)
|
|
4112
|
+
cleanupIndex = 0;
|
|
4113
|
+
}
|
|
4114
|
+
await rest();
|
|
4115
|
+
}
|
|
4116
|
+
logger_ts_1.logger.debug?.(`Finished cleanup scan for ${tableName}, evicted ${count} entries`);
|
|
4117
|
+
}
|
|
4118
|
+
catch (error) {
|
|
4119
|
+
logger_ts_1.logger.warn?.(`Error in cleanup scan for ${tableName}:`, error);
|
|
4120
|
+
}
|
|
4121
|
+
resolve(undefined);
|
|
4122
|
+
cleanupPriority = 0; // reset the priority
|
|
4123
|
+
})), Math.min(nextScheduled - Date.now(), 0x7fffffff) // make sure it can fit in 32-bit signed number
|
|
4124
|
+
).unref(); // don't let this prevent closing the thread
|
|
4125
|
+
};
|
|
4126
|
+
startNextTimer(nextScheduled);
|
|
4127
|
+
});
|
|
4128
|
+
}
|
|
4129
|
+
}
|
|
4130
|
+
function addDeleteRemoval() {
|
|
4131
|
+
deleteCallbackHandle = auditStore?.addDeleteRemovalCallback(tableId, primaryStore, (id, version) => {
|
|
4132
|
+
primaryStore.remove(id, version);
|
|
4133
|
+
});
|
|
4134
|
+
}
|
|
4135
|
+
function runRecordExpirationEviction() {
|
|
4136
|
+
// Periodically evict expired records, searching for records who expiresAt timestamp is before now
|
|
4137
|
+
if ((0, manageThreads_js_1.getWorkerIndex)() === 0) {
|
|
4138
|
+
// we want to run the pruning of expired records on only one thread so we don't have conflicts in evicting
|
|
4139
|
+
setInterval(async () => {
|
|
4140
|
+
// go through each database and table and then search for expired entries
|
|
4141
|
+
// find any entries that are set to expire before now
|
|
4142
|
+
if (runningRecordExpiration)
|
|
4143
|
+
return;
|
|
4144
|
+
runningRecordExpiration = true;
|
|
4145
|
+
try {
|
|
4146
|
+
const expiresAtName = expiresAtProperty.name;
|
|
4147
|
+
const index = indices[expiresAtName];
|
|
4148
|
+
if (!index)
|
|
4149
|
+
throw new Error(`expiresAt attribute ${expiresAtProperty} must be indexed`);
|
|
4150
|
+
for (const key of index.getRange({
|
|
4151
|
+
start: true,
|
|
4152
|
+
values: false,
|
|
4153
|
+
end: Date.now(),
|
|
4154
|
+
snapshot: false,
|
|
4155
|
+
})) {
|
|
4156
|
+
for (const id of index.getValues(key)) {
|
|
4157
|
+
const recordEntry = primaryStore.getEntry(id);
|
|
4158
|
+
if (!recordEntry?.value) {
|
|
4159
|
+
// cleanup the index if the record is gone
|
|
4160
|
+
primaryStore.ifVersion(id, recordEntry?.version, () => index.remove(key, id));
|
|
4161
|
+
}
|
|
4162
|
+
else if (recordEntry.value[expiresAtName] < Date.now()) {
|
|
4163
|
+
// make sure the record hasn't changed and won't change while removing
|
|
4164
|
+
TableResource.evict(id, recordEntry.value, recordEntry.version);
|
|
4165
|
+
}
|
|
4166
|
+
}
|
|
4167
|
+
await rest();
|
|
4168
|
+
}
|
|
4169
|
+
}
|
|
4170
|
+
catch (error) {
|
|
4171
|
+
logger_ts_1.logger.error?.('Error in evicting old records', error);
|
|
4172
|
+
}
|
|
4173
|
+
finally {
|
|
4174
|
+
runningRecordExpiration = false;
|
|
4175
|
+
}
|
|
4176
|
+
}, RECORD_PRUNING_INTERVAL).unref();
|
|
4177
|
+
}
|
|
4178
|
+
}
|
|
4179
|
+
function residencyFromFunction(shardOrResidencyList) {
|
|
4180
|
+
if (shardOrResidencyList == undefined)
|
|
4181
|
+
return;
|
|
4182
|
+
if (Array.isArray(shardOrResidencyList))
|
|
4183
|
+
return shardOrResidencyList;
|
|
4184
|
+
if (typeof shardOrResidencyList === 'number') {
|
|
4185
|
+
if (shardOrResidencyList >= 65536)
|
|
4186
|
+
throw new Error(`Shard id ${shardOrResidencyList} must be below 65536`);
|
|
4187
|
+
const residencyList = server.shards?.get?.(shardOrResidencyList);
|
|
4188
|
+
if (residencyList) {
|
|
4189
|
+
logger_ts_1.logger.trace?.(`Shard ${shardOrResidencyList} mapped to ${residencyList.map((node) => node.name).join(', ')}`);
|
|
4190
|
+
return residencyList.map((node) => node.name);
|
|
4191
|
+
}
|
|
4192
|
+
throw new Error(`Shard ${shardOrResidencyList} is not defined`);
|
|
4193
|
+
}
|
|
4194
|
+
throw new Error(`Shard or residency list ${shardOrResidencyList} is not a valid type, must be a shard number or residency list of node hostnames`);
|
|
4195
|
+
}
|
|
4196
|
+
function getResidencyId(ownerNodeNames) {
|
|
4197
|
+
if (ownerNodeNames) {
|
|
4198
|
+
const setKey = ownerNodeNames.join(',');
|
|
4199
|
+
let residencyId = dbisDb.get([Symbol.for('residency_by_set'), setKey]);
|
|
4200
|
+
if (residencyId)
|
|
4201
|
+
return residencyId;
|
|
4202
|
+
dbisDb.put([Symbol.for('residency_by_set'), setKey], (residencyId = Math.floor(Math.random() * 0x7fff0000) + 0xffff));
|
|
4203
|
+
dbisDb.put([Symbol.for('residency_by_id'), residencyId], ownerNodeNames);
|
|
4204
|
+
return residencyId;
|
|
4205
|
+
}
|
|
4206
|
+
}
|
|
4207
|
+
function preCommitBlobsForRecordBefore(record, before, saveInRecord) {
|
|
4208
|
+
const blobCompletion = (0, blob_ts_1.startPreCommitBlobsForRecord)(record, primaryStore.rootStore, saveInRecord);
|
|
4209
|
+
if (blobCompletion) {
|
|
4210
|
+
// if there are blobs that we have started saving, they need to be saved and completed before we commit, so we need to wait for
|
|
4211
|
+
// them to finish and we return a new callback for the before phase of the commit
|
|
4212
|
+
const callSources = before;
|
|
4213
|
+
return callSources
|
|
4214
|
+
? async () => {
|
|
4215
|
+
// if we are calling the sources first and waiting for blobs, do those in order
|
|
4216
|
+
await callSources();
|
|
4217
|
+
await blobCompletion();
|
|
4218
|
+
}
|
|
4219
|
+
: () => blobCompletion();
|
|
4220
|
+
}
|
|
4221
|
+
return before;
|
|
4222
|
+
}
|
|
4223
|
+
}
|
|
4224
|
+
function attributesAsObject(attribute_permissions, type) {
|
|
4225
|
+
const attrObject = attribute_permissions.attr_object || (attribute_permissions.attr_object = {});
|
|
4226
|
+
let attrsForType = attrObject[type];
|
|
4227
|
+
if (attrsForType)
|
|
4228
|
+
return attrsForType;
|
|
4229
|
+
attrsForType = attrObject[type] = Object.create(null);
|
|
4230
|
+
for (const permission of attribute_permissions) {
|
|
4231
|
+
attrsForType[permission.attribute_name] = permission[type];
|
|
4232
|
+
}
|
|
4233
|
+
return attrsForType;
|
|
4234
|
+
}
|
|
4235
|
+
function noop() {
|
|
4236
|
+
// prefetch callback
|
|
4237
|
+
}
|
|
4238
|
+
const ENDS_WITH_TIMEZONE = /[+-][0-9]{2}:[0-9]{2}|[a-zA-Z]$/;
|
|
4239
|
+
/**
|
|
4240
|
+
* Coerce a string to the type defined by the attribute
|
|
4241
|
+
* @param value
|
|
4242
|
+
* @param attribute
|
|
4243
|
+
* @returns
|
|
4244
|
+
*/
|
|
4245
|
+
function coerceType(value, attribute) {
|
|
4246
|
+
const type = attribute?.type;
|
|
4247
|
+
//if a type is String is it safe to execute a .toString() on the value and return? Does not work for Array/Object so we would need to detect if is either of those first
|
|
4248
|
+
if (value === null) {
|
|
4249
|
+
return value;
|
|
4250
|
+
}
|
|
4251
|
+
else if (value === '' && type && type !== 'String' && type !== 'Any') {
|
|
4252
|
+
return null;
|
|
4253
|
+
}
|
|
4254
|
+
try {
|
|
4255
|
+
switch (type) {
|
|
4256
|
+
case 'Int':
|
|
4257
|
+
case 'Long':
|
|
4258
|
+
// allow $ prefix as special syntax for more compact numeric representations and then use parseInt to force being an integer (might consider Math.floor, which is a little faster, but rounds in a different way with negative numbers).
|
|
4259
|
+
if (value[0] === '$')
|
|
4260
|
+
return rejectNaN(parseInt(value.slice(1), 36));
|
|
4261
|
+
if (value === 'null')
|
|
4262
|
+
return null;
|
|
4263
|
+
// strict check to make sure it is really an integer (there is also a sensible conversion from dates)
|
|
4264
|
+
if (!/^-?[0-9]+$/.test(value) && !(value instanceof Date))
|
|
4265
|
+
throw new SyntaxError();
|
|
4266
|
+
return rejectNaN(+value); // numeric conversion is stricter than parseInt
|
|
4267
|
+
case 'Float':
|
|
4268
|
+
return value === 'null' ? null : rejectNaN(+value); // numeric conversion is stricter than parseFloat
|
|
4269
|
+
case 'BigInt':
|
|
4270
|
+
return value === 'null' ? null : BigInt(value);
|
|
4271
|
+
case 'Boolean':
|
|
4272
|
+
return (0, common_utils_js_1.autoCastBooleanStrict)(value);
|
|
4273
|
+
case 'Date':
|
|
4274
|
+
if (isNaN(value)) {
|
|
4275
|
+
if (value === 'null')
|
|
4276
|
+
return null;
|
|
4277
|
+
//if the value is not an integer (to handle epoch values) and does not end in a timezone we suffiz with 'Z' tom make sure the Date is GMT timezone
|
|
4278
|
+
if (!ENDS_WITH_TIMEZONE.test(value)) {
|
|
4279
|
+
value += 'Z';
|
|
4280
|
+
}
|
|
4281
|
+
const date = new Date(value);
|
|
4282
|
+
rejectNaN(date.getTime());
|
|
4283
|
+
return date;
|
|
4284
|
+
}
|
|
4285
|
+
return new Date(+value); // epoch ms number
|
|
4286
|
+
case undefined:
|
|
4287
|
+
case 'Any':
|
|
4288
|
+
return (0, common_utils_js_1.autoCast)(value);
|
|
4289
|
+
default:
|
|
4290
|
+
return value;
|
|
4291
|
+
}
|
|
4292
|
+
}
|
|
4293
|
+
catch (error) {
|
|
4294
|
+
error.message = `Invalid value for attribute ${attribute.name}: "${value}", expecting ${type}`;
|
|
4295
|
+
error.statusCode = 400;
|
|
4296
|
+
throw error;
|
|
4297
|
+
}
|
|
4298
|
+
}
|
|
4299
|
+
// This is a simple function to throw on NaNs that can come out of parseInt, parseFloat, etc.
|
|
4300
|
+
function rejectNaN(value) {
|
|
4301
|
+
if (isNaN(value))
|
|
4302
|
+
throw new SyntaxError(); // will set the message in the catch block with more context
|
|
4303
|
+
return value;
|
|
4304
|
+
}
|
|
4305
|
+
function isDescendantId(ancestorId, descendantId) {
|
|
4306
|
+
if (ancestorId == null)
|
|
4307
|
+
return true; // ancestor of all ids
|
|
4308
|
+
if (!Array.isArray(descendantId))
|
|
4309
|
+
return ancestorId === descendantId || descendantId.startsWith?.(ancestorId);
|
|
4310
|
+
if (Array.isArray(ancestorId)) {
|
|
4311
|
+
let al = ancestorId.length;
|
|
4312
|
+
if (ancestorId[al - 1] === null)
|
|
4313
|
+
al--;
|
|
4314
|
+
if (descendantId.length >= al) {
|
|
4315
|
+
for (let i = 0; i < al; i++) {
|
|
4316
|
+
if (descendantId[i] !== ancestorId[i])
|
|
4317
|
+
return false;
|
|
4318
|
+
}
|
|
4319
|
+
return true;
|
|
4320
|
+
}
|
|
4321
|
+
return false;
|
|
4322
|
+
}
|
|
4323
|
+
else if (descendantId[0] === ancestorId)
|
|
4324
|
+
return true;
|
|
4325
|
+
}
|
|
4326
|
+
// wait for an event turn (via a promise)
|
|
4327
|
+
const rest = () => new Promise(setImmediate);
|
|
4328
|
+
// for filtering
|
|
4329
|
+
function exists(value) {
|
|
4330
|
+
return value != null;
|
|
4331
|
+
}
|
|
4332
|
+
function stringify(value) {
|
|
4333
|
+
try {
|
|
4334
|
+
return JSON.stringify(value);
|
|
4335
|
+
}
|
|
4336
|
+
catch {
|
|
4337
|
+
return value;
|
|
4338
|
+
}
|
|
4339
|
+
}
|
|
4340
|
+
function hasOtherProcesses(store) {
|
|
4341
|
+
const pid = process.pid;
|
|
4342
|
+
return store.env
|
|
4343
|
+
.readerList?.()
|
|
4344
|
+
.slice(1)
|
|
4345
|
+
.some((line) => {
|
|
4346
|
+
// if the pid from the reader list is different than ours, must be another process accessing the database
|
|
4347
|
+
return +line.match(/\d+/)?.[0] != pid;
|
|
4348
|
+
});
|
|
4349
|
+
}
|
|
4350
|
+
//# sourceMappingURL=Table.js.map
|