@harperfast/harper 5.0.0-alpha.10 → 5.0.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (444) hide show
  1. package/bin/BinObjects.js +17 -0
  2. package/bin/cliOperations.js +157 -0
  3. package/bin/copyDb.ts +280 -0
  4. package/bin/harper.js +156 -0
  5. package/bin/install.js +15 -0
  6. package/bin/lite.js +5 -0
  7. package/bin/restart.js +201 -0
  8. package/bin/run.js +409 -0
  9. package/bin/status.js +65 -0
  10. package/bin/stop.js +22 -0
  11. package/bin/upgrade.js +134 -0
  12. package/components/Application.ts +646 -0
  13. package/components/ApplicationScope.ts +49 -0
  14. package/components/Component.ts +53 -0
  15. package/components/ComponentV1.ts +342 -0
  16. package/components/DEFAULT_CONFIG.ts +18 -0
  17. package/components/EntryHandler.ts +227 -0
  18. package/components/Logger.ts +14 -0
  19. package/components/OptionsWatcher.ts +354 -0
  20. package/components/PluginModule.ts +6 -0
  21. package/components/Scope.ts +329 -0
  22. package/components/componentLoader.ts +529 -0
  23. package/components/deriveCommonPatternBase.ts +31 -0
  24. package/components/deriveGlobOptions.ts +44 -0
  25. package/components/deriveURLPath.ts +57 -0
  26. package/components/operations.js +658 -0
  27. package/components/operationsValidation.js +246 -0
  28. package/components/packageComponent.ts +39 -0
  29. package/components/requestRestart.ts +26 -0
  30. package/components/resolveBaseURLPath.ts +38 -0
  31. package/components/status/ComponentStatus.ts +110 -0
  32. package/components/status/ComponentStatusRegistry.ts +251 -0
  33. package/components/status/api.ts +153 -0
  34. package/components/status/crossThread.ts +405 -0
  35. package/components/status/errors.ts +152 -0
  36. package/components/status/index.ts +44 -0
  37. package/components/status/internal.ts +65 -0
  38. package/components/status/registry.ts +12 -0
  39. package/components/status/types.ts +96 -0
  40. package/config/RootConfigWatcher.ts +59 -0
  41. package/config/configHelpers.ts +11 -0
  42. package/config/configUtils.js +967 -0
  43. package/config/harperConfigEnvVars.ts +641 -0
  44. package/dataLayer/CreateAttributeObject.js +25 -0
  45. package/dataLayer/CreateTableObject.js +11 -0
  46. package/dataLayer/DataLayerObjects.js +43 -0
  47. package/dataLayer/DeleteBeforeObject.js +22 -0
  48. package/dataLayer/DeleteObject.js +25 -0
  49. package/dataLayer/DropAttributeObject.js +11 -0
  50. package/dataLayer/GetBackupObject.js +22 -0
  51. package/dataLayer/InsertObject.js +24 -0
  52. package/dataLayer/ReadAuditLogObject.js +24 -0
  53. package/dataLayer/SQLSearch.js +1335 -0
  54. package/dataLayer/SearchByConditionsObject.js +61 -0
  55. package/dataLayer/SearchByHashObject.js +21 -0
  56. package/dataLayer/SearchObject.js +45 -0
  57. package/dataLayer/SqlSearchObject.js +14 -0
  58. package/dataLayer/UpdateObject.js +23 -0
  59. package/dataLayer/UpsertObject.js +23 -0
  60. package/dataLayer/bulkLoad.js +813 -0
  61. package/dataLayer/dataObjects/BulkLoadObjects.js +27 -0
  62. package/dataLayer/dataObjects/UpsertObject.js +23 -0
  63. package/dataLayer/delete.js +164 -0
  64. package/dataLayer/export.js +381 -0
  65. package/dataLayer/getBackup.js +40 -0
  66. package/dataLayer/harperBridge/BridgeMethods.js +81 -0
  67. package/dataLayer/harperBridge/ResourceBridge.ts +633 -0
  68. package/dataLayer/harperBridge/bridgeUtility/insertUpdateReturnObj.js +28 -0
  69. package/dataLayer/harperBridge/bridgeUtility/insertUpdateValidate.js +88 -0
  70. package/dataLayer/harperBridge/harperBridge.js +21 -0
  71. package/dataLayer/harperBridge/lmdbBridge/LMDBBridge.js +119 -0
  72. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/DeleteAuditLogsBeforeResults.js +19 -0
  73. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateAttribute.js +112 -0
  74. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateRecords.js +67 -0
  75. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateSchema.js +31 -0
  76. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateTable.js +94 -0
  77. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDeleteAuditLogsBefore.js +98 -0
  78. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDeleteRecords.js +89 -0
  79. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDropAttribute.js +109 -0
  80. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDropSchema.js +107 -0
  81. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDropTable.js +137 -0
  82. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbFlush.js +35 -0
  83. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbGetBackup.js +111 -0
  84. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbGetDataByHash.js +28 -0
  85. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbGetDataByValue.js +29 -0
  86. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbReadAuditLog.js +207 -0
  87. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbSearchByConditions.js +156 -0
  88. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbSearchByHash.js +21 -0
  89. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbSearchByValue.js +30 -0
  90. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbTransaction.js +19 -0
  91. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbUpdateRecords.js +64 -0
  92. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbUpsertRecords.js +70 -0
  93. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBCreateAttributeObject.js +22 -0
  94. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBDeleteTransactionObject.js +23 -0
  95. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBInsertTransactionObject.js +22 -0
  96. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBTransactionObject.js +23 -0
  97. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBUpdateTransactionObject.js +24 -0
  98. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBUpsertTransactionObject.js +24 -0
  99. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/TableSizeObject.js +25 -0
  100. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/initializeHashSearch.js +21 -0
  101. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/initializePaths.js +157 -0
  102. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbCheckForNewAttributes.js +94 -0
  103. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbCreateTransactionsAuditEnvironment.js +39 -0
  104. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbGetTableSize.js +34 -0
  105. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbProcessRows.js +100 -0
  106. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbSearch.js +371 -0
  107. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbWriteTransaction.js +109 -0
  108. package/dataLayer/hdbInfoController.js +254 -0
  109. package/dataLayer/insert.js +266 -0
  110. package/dataLayer/readAuditLog.js +59 -0
  111. package/dataLayer/schema.js +366 -0
  112. package/dataLayer/schemaDescribe.js +289 -0
  113. package/dataLayer/search.js +60 -0
  114. package/dataLayer/transaction.js +17 -0
  115. package/dataLayer/update.js +124 -0
  116. package/dist/components/Logger.d.ts +12 -0
  117. package/dist/components/Logger.js +3 -0
  118. package/dist/components/Logger.js.map +1 -0
  119. package/dist/components/Scope.d.ts +14 -4
  120. package/dist/components/Scope.js +18 -10
  121. package/dist/components/Scope.js.map +1 -1
  122. package/dist/components/componentLoader.js +16 -9
  123. package/dist/components/componentLoader.js.map +1 -1
  124. package/dist/components/operations.js +2 -2
  125. package/dist/components/operations.js.map +1 -1
  126. package/dist/config/configUtils.d.ts +1 -1
  127. package/dist/config/configUtils.js +1 -1
  128. package/dist/config/configUtils.js.map +1 -1
  129. package/dist/dataLayer/CreateTableObject.d.ts +2 -2
  130. package/dist/dataLayer/CreateTableObject.js +2 -2
  131. package/dist/dataLayer/CreateTableObject.js.map +1 -1
  132. package/dist/dataLayer/delete.d.ts +1 -1
  133. package/dist/dataLayer/schema.js +6 -5
  134. package/dist/dataLayer/schema.js.map +1 -1
  135. package/dist/dataLayer/schemaDescribe.js +1 -1
  136. package/dist/dataLayer/schemaDescribe.js.map +1 -1
  137. package/dist/index.d.ts +1 -1
  138. package/dist/index.js +2 -0
  139. package/dist/index.js.map +1 -1
  140. package/dist/resources/DatabaseTransaction.d.ts +1 -1
  141. package/dist/resources/IterableEventQueue.d.ts +1 -1
  142. package/dist/resources/LMDBTransaction.d.ts +5 -1
  143. package/dist/resources/Resource.d.ts +1 -1
  144. package/dist/resources/RocksIndexStore.d.ts +3 -3
  145. package/dist/resources/RocksTransactionLogStore.d.ts +6 -3
  146. package/dist/resources/Table.d.ts +15 -6
  147. package/dist/resources/Table.js +4 -1
  148. package/dist/resources/Table.js.map +1 -1
  149. package/dist/resources/analytics/read.js +32 -22
  150. package/dist/resources/analytics/read.js.map +1 -1
  151. package/dist/resources/analytics/write.js +3 -6
  152. package/dist/resources/analytics/write.js.map +1 -1
  153. package/dist/resources/auditStore.d.ts +3 -3
  154. package/dist/resources/blob.d.ts +25 -2
  155. package/dist/resources/databases.d.ts +12 -2
  156. package/dist/resources/databases.js +22 -19
  157. package/dist/resources/databases.js.map +1 -1
  158. package/dist/resources/search.js +11 -5
  159. package/dist/resources/search.js.map +1 -1
  160. package/dist/resources/transaction.d.ts +2 -1
  161. package/dist/security/auth.js +1 -1
  162. package/dist/security/auth.js.map +1 -1
  163. package/dist/security/cryptoHash.d.ts +2 -2
  164. package/dist/security/jsLoader.js +243 -66
  165. package/dist/security/jsLoader.js.map +1 -1
  166. package/dist/security/keys.js +4 -5
  167. package/dist/security/keys.js.map +1 -1
  168. package/dist/security/user.js +3 -3
  169. package/dist/security/user.js.map +1 -1
  170. package/dist/server/REST.js +16 -2
  171. package/dist/server/REST.js.map +1 -1
  172. package/dist/server/Server.d.ts +2 -1
  173. package/dist/server/Server.js.map +1 -1
  174. package/dist/server/fastifyRoutes/plugins/hdbCore.d.ts +6 -1
  175. package/dist/server/fastifyRoutes.js +2 -0
  176. package/dist/server/fastifyRoutes.js.map +1 -1
  177. package/dist/server/http.js +12 -6
  178. package/dist/server/http.js.map +1 -1
  179. package/dist/server/jobs/JobObject.d.ts +3 -3
  180. package/dist/server/loadRootComponents.js +1 -0
  181. package/dist/server/loadRootComponents.js.map +1 -1
  182. package/dist/server/operationsServer.js +3 -1
  183. package/dist/server/operationsServer.js.map +1 -1
  184. package/dist/server/serverHelpers/JSONStream.d.ts +3 -3
  185. package/dist/server/serverHelpers/Request.d.ts +5 -5
  186. package/dist/server/serverHelpers/requestTimePlugin.d.ts +1 -1
  187. package/dist/server/threads/manageThreads.d.ts +2 -2
  188. package/dist/server/threads/manageThreads.js +50 -35
  189. package/dist/server/threads/manageThreads.js.map +1 -1
  190. package/dist/server/threads/socketRouter.d.ts +1 -1
  191. package/dist/sqlTranslator/deleteTranslator.d.ts +1 -1
  192. package/dist/utility/AWS/AWSConnector.d.ts +3 -2
  193. package/dist/utility/common_utils.d.ts +3 -3
  194. package/dist/utility/environment/systemInformation.d.ts +1 -0
  195. package/dist/utility/functions/date/dateFunctions.d.ts +11 -11
  196. package/dist/utility/globalSchema.d.ts +1 -1
  197. package/dist/utility/hdbTerms.d.ts +3 -0
  198. package/dist/utility/hdbTerms.js +3 -0
  199. package/dist/utility/hdbTerms.js.map +1 -1
  200. package/dist/utility/installation.d.ts +2 -4
  201. package/dist/utility/installation.js.map +1 -1
  202. package/dist/utility/lmdb/commonUtility.d.ts +1 -0
  203. package/dist/utility/lmdb/deleteUtility.d.ts +1 -0
  204. package/dist/utility/lmdb/environmentUtility.d.ts +1 -0
  205. package/dist/utility/lmdb/searchUtility.d.ts +2 -1
  206. package/dist/utility/lmdb/writeUtility.d.ts +1 -0
  207. package/dist/utility/logging/harper_logger.d.ts +6 -6
  208. package/dist/utility/processManagement/processManagement.d.ts +1 -1
  209. package/dist/utility/processManagement/servicesConfig.d.ts +12 -6
  210. package/dist/validation/common_validators.d.ts +4 -3
  211. package/dist/validation/configValidator.d.ts +3 -2
  212. package/index.d.ts +56 -0
  213. package/index.js +41 -0
  214. package/json/systemSchema.json +373 -0
  215. package/launchServiceScripts/launchHarperDB.js +3 -0
  216. package/launchServiceScripts/utility/checkNodeVersion.js +15 -0
  217. package/package.json +21 -3
  218. package/resources/DatabaseTransaction.ts +378 -0
  219. package/resources/ErrorResource.ts +57 -0
  220. package/resources/IterableEventQueue.ts +94 -0
  221. package/resources/LMDBTransaction.ts +349 -0
  222. package/resources/RecordEncoder.ts +702 -0
  223. package/resources/RequestTarget.ts +134 -0
  224. package/resources/Resource.ts +789 -0
  225. package/resources/ResourceInterface.ts +221 -0
  226. package/resources/ResourceInterfaceV2.ts +53 -0
  227. package/resources/ResourceV2.ts +67 -0
  228. package/resources/Resources.ts +162 -0
  229. package/resources/RocksIndexStore.ts +70 -0
  230. package/resources/RocksTransactionLogStore.ts +352 -0
  231. package/resources/Table.ts +4527 -0
  232. package/resources/analytics/hostnames.ts +72 -0
  233. package/resources/analytics/metadata.ts +10 -0
  234. package/resources/analytics/read.ts +252 -0
  235. package/resources/analytics/write.ts +803 -0
  236. package/resources/auditStore.ts +556 -0
  237. package/resources/blob.ts +1268 -0
  238. package/resources/crdt.ts +125 -0
  239. package/resources/dataLoader.ts +527 -0
  240. package/resources/databases.ts +1290 -0
  241. package/resources/graphql.ts +221 -0
  242. package/resources/indexes/HierarchicalNavigableSmallWorld.ts +638 -0
  243. package/resources/indexes/customIndexes.ts +7 -0
  244. package/resources/indexes/vector.ts +38 -0
  245. package/resources/jsResource.ts +86 -0
  246. package/resources/loadEnv.ts +22 -0
  247. package/resources/login.ts +18 -0
  248. package/resources/openApi.ts +409 -0
  249. package/resources/registrationDeprecated.ts +8 -0
  250. package/resources/replayLogs.ts +136 -0
  251. package/resources/roles.ts +98 -0
  252. package/resources/search.ts +1301 -0
  253. package/resources/tracked.ts +584 -0
  254. package/resources/transaction.ts +89 -0
  255. package/resources/transactionBroadcast.ts +258 -0
  256. package/security/auth.ts +376 -0
  257. package/security/certificateVerification/certificateVerificationSource.ts +84 -0
  258. package/security/certificateVerification/configValidation.ts +107 -0
  259. package/security/certificateVerification/crlVerification.ts +623 -0
  260. package/security/certificateVerification/index.ts +121 -0
  261. package/security/certificateVerification/ocspVerification.ts +148 -0
  262. package/security/certificateVerification/pkijs-ed25519-patch.ts +188 -0
  263. package/security/certificateVerification/types.ts +128 -0
  264. package/security/certificateVerification/verificationConfig.ts +138 -0
  265. package/security/certificateVerification/verificationUtils.ts +447 -0
  266. package/security/cryptoHash.js +42 -0
  267. package/security/data_objects/PermissionAttributeResponseObject.js +15 -0
  268. package/security/data_objects/PermissionResponseObject.js +115 -0
  269. package/security/data_objects/PermissionTableResponseObject.js +20 -0
  270. package/security/fastifyAuth.js +169 -0
  271. package/security/impersonation.ts +160 -0
  272. package/security/jsLoader.ts +716 -0
  273. package/security/keys.js +948 -0
  274. package/security/permissionsTranslator.js +300 -0
  275. package/security/role.js +218 -0
  276. package/security/tokenAuthentication.ts +228 -0
  277. package/security/user.ts +449 -0
  278. package/server/DurableSubscriptionsSession.ts +503 -0
  279. package/server/REST.ts +407 -0
  280. package/server/Server.ts +89 -0
  281. package/server/fastifyRoutes/helpers/getCORSOptions.js +36 -0
  282. package/server/fastifyRoutes/helpers/getHeaderTimeoutConfig.js +15 -0
  283. package/server/fastifyRoutes/helpers/getServerOptions.js +33 -0
  284. package/server/fastifyRoutes/plugins/hdbCore.js +39 -0
  285. package/server/fastifyRoutes.ts +205 -0
  286. package/server/graphqlQuerying.ts +700 -0
  287. package/server/http.ts +640 -0
  288. package/server/itc/serverHandlers.js +161 -0
  289. package/server/itc/utility/ITCEventObject.js +10 -0
  290. package/server/jobs/JobObject.js +24 -0
  291. package/server/jobs/jobProcess.js +69 -0
  292. package/server/jobs/jobRunner.js +162 -0
  293. package/server/jobs/jobs.js +304 -0
  294. package/server/loadRootComponents.js +44 -0
  295. package/server/mqtt.ts +485 -0
  296. package/server/nodeName.ts +75 -0
  297. package/server/operationsServer.ts +313 -0
  298. package/server/serverHelpers/Headers.ts +108 -0
  299. package/server/serverHelpers/JSONStream.ts +269 -0
  300. package/server/serverHelpers/OperationFunctionObject.ts +13 -0
  301. package/server/serverHelpers/Request.ts +158 -0
  302. package/server/serverHelpers/contentTypes.ts +637 -0
  303. package/server/serverHelpers/requestTimePlugin.js +57 -0
  304. package/server/serverHelpers/serverHandlers.js +148 -0
  305. package/server/serverHelpers/serverUtilities.ts +473 -0
  306. package/server/serverRegistry.ts +8 -0
  307. package/server/static.ts +187 -0
  308. package/server/status/definitions.ts +37 -0
  309. package/server/status/index.ts +125 -0
  310. package/server/storageReclamation.ts +93 -0
  311. package/server/threads/itc.js +89 -0
  312. package/server/threads/manageThreads.js +594 -0
  313. package/server/threads/socketRouter.ts +360 -0
  314. package/server/threads/threadServer.js +279 -0
  315. package/server/throttle.ts +73 -0
  316. package/sqlTranslator/SelectValidator.js +330 -0
  317. package/sqlTranslator/alasqlFunctionImporter.js +62 -0
  318. package/sqlTranslator/deleteTranslator.js +67 -0
  319. package/sqlTranslator/index.js +242 -0
  320. package/sqlTranslator/sql_statement_bucket.js +472 -0
  321. package/static/defaultConfig.yaml +3 -0
  322. package/studio/web/HDBDogOnly.svg +78 -0
  323. package/studio/web/assets/PPRadioGrotesk-Bold-DDaUYG8E.woff +0 -0
  324. package/studio/web/assets/fa-brands-400-CEJbCg16.woff +0 -0
  325. package/studio/web/assets/fa-brands-400-CSYNqBb_.ttf +0 -0
  326. package/studio/web/assets/fa-brands-400-DnkPfk3o.eot +0 -0
  327. package/studio/web/assets/fa-brands-400-UxlILjvJ.woff2 +0 -0
  328. package/studio/web/assets/fa-brands-400-cH1MgKbP.svg +3717 -0
  329. package/studio/web/assets/fa-regular-400-BhTwtT8w.eot +0 -0
  330. package/studio/web/assets/fa-regular-400-D1vz6WBx.ttf +0 -0
  331. package/studio/web/assets/fa-regular-400-DFnMcJPd.woff +0 -0
  332. package/studio/web/assets/fa-regular-400-DGzu1beS.woff2 +0 -0
  333. package/studio/web/assets/fa-regular-400-gwj8Pxq-.svg +801 -0
  334. package/studio/web/assets/fa-solid-900-B4ZZ7kfP.svg +5034 -0
  335. package/studio/web/assets/fa-solid-900-B6Axprfb.eot +0 -0
  336. package/studio/web/assets/fa-solid-900-BUswJgRo.woff2 +0 -0
  337. package/studio/web/assets/fa-solid-900-DOXgCApm.woff +0 -0
  338. package/studio/web/assets/fa-solid-900-mxuxnBEa.ttf +0 -0
  339. package/studio/web/assets/index-BTgXJX9d.js +235 -0
  340. package/studio/web/assets/index-BTgXJX9d.js.map +1 -0
  341. package/studio/web/assets/index-C-GXfcup.js +37 -0
  342. package/studio/web/assets/index-C-GXfcup.js.map +1 -0
  343. package/studio/web/assets/index-PFlNdimM.js +2 -0
  344. package/studio/web/assets/index-PFlNdimM.js.map +1 -0
  345. package/studio/web/assets/index-Y2g_iFpU.css +1 -0
  346. package/studio/web/assets/index-jiPwkrsB.css +1 -0
  347. package/studio/web/assets/index.lazy-C3TJZJ4o.js +266 -0
  348. package/studio/web/assets/index.lazy-C3TJZJ4o.js.map +1 -0
  349. package/studio/web/assets/profiler-DotzgiCJ.js +2 -0
  350. package/studio/web/assets/profiler-DotzgiCJ.js.map +1 -0
  351. package/studio/web/assets/react-redux-VxUEx_mU.js +6 -0
  352. package/studio/web/assets/react-redux-VxUEx_mU.js.map +1 -0
  353. package/studio/web/assets/startRecording-B_9J9Csd.js +3 -0
  354. package/studio/web/assets/startRecording-B_9J9Csd.js.map +1 -0
  355. package/studio/web/fabric-signup-background.webp +0 -0
  356. package/studio/web/fabric-signup-text.png +0 -0
  357. package/studio/web/favicon_purple.png +0 -0
  358. package/studio/web/github-icon.svg +15 -0
  359. package/studio/web/harper-fabric_black.png +0 -0
  360. package/studio/web/harper-fabric_white.png +0 -0
  361. package/studio/web/harper-studio_white.png +0 -0
  362. package/studio/web/index.html +16 -0
  363. package/studio/web/running.css +148 -0
  364. package/studio/web/running.html +147 -0
  365. package/studio/web/running.js +111 -0
  366. package/upgrade/UpgradeObjects.js +13 -0
  367. package/upgrade/directives/directivesController.js +90 -0
  368. package/upgrade/directivesManager.js +139 -0
  369. package/upgrade/upgradePrompt.js +124 -0
  370. package/upgrade/upgradeUtilities.js +28 -0
  371. package/utility/AWS/AWSConnector.js +29 -0
  372. package/utility/OperationFunctionCaller.js +63 -0
  373. package/utility/assignCmdEnvVariables.js +62 -0
  374. package/utility/common_utils.js +867 -0
  375. package/utility/environment/environmentManager.js +208 -0
  376. package/utility/environment/systemInformation.js +355 -0
  377. package/utility/errors/commonErrors.js +267 -0
  378. package/utility/errors/hdbError.js +146 -0
  379. package/utility/functions/date/dateFunctions.js +65 -0
  380. package/utility/functions/geo.js +355 -0
  381. package/utility/functions/sql/alaSQLExtension.js +104 -0
  382. package/utility/globalSchema.js +35 -0
  383. package/utility/hdbTerms.ts +819 -0
  384. package/utility/install/checkJWTTokensExist.js +62 -0
  385. package/utility/install/harperdb.conf +15 -0
  386. package/utility/install/harperdb.service +14 -0
  387. package/utility/install/installer.js +635 -0
  388. package/utility/installation.ts +30 -0
  389. package/utility/lmdb/DBIDefinition.js +20 -0
  390. package/utility/lmdb/DeleteRecordsResponseObject.js +25 -0
  391. package/utility/lmdb/InsertRecordsResponseObject.js +22 -0
  392. package/utility/lmdb/OpenDBIObject.js +31 -0
  393. package/utility/lmdb/OpenEnvironmentObject.js +41 -0
  394. package/utility/lmdb/UpdateRecordsResponseObject.js +25 -0
  395. package/utility/lmdb/UpsertRecordsResponseObject.js +22 -0
  396. package/utility/lmdb/cleanLMDBMap.js +65 -0
  397. package/utility/lmdb/commonUtility.js +119 -0
  398. package/utility/lmdb/deleteUtility.js +128 -0
  399. package/utility/lmdb/environmentUtility.js +477 -0
  400. package/utility/lmdb/searchCursorFunctions.js +187 -0
  401. package/utility/lmdb/searchUtility.js +918 -0
  402. package/utility/lmdb/terms.js +57 -0
  403. package/utility/lmdb/writeUtility.js +407 -0
  404. package/utility/logging/harper_logger.js +876 -0
  405. package/utility/logging/logRotator.js +157 -0
  406. package/utility/logging/logger.ts +24 -0
  407. package/utility/logging/readLog.js +355 -0
  408. package/utility/logging/transactionLog.js +57 -0
  409. package/utility/mount_hdb.js +59 -0
  410. package/utility/npmUtilities.js +102 -0
  411. package/utility/operationPermissions.ts +112 -0
  412. package/utility/operation_authorization.js +836 -0
  413. package/utility/packageUtils.js +55 -0
  414. package/utility/password.ts +99 -0
  415. package/utility/processManagement/processManagement.js +187 -0
  416. package/utility/processManagement/servicesConfig.js +56 -0
  417. package/utility/scripts/restartHdb.js +24 -0
  418. package/utility/scripts/user_data.sh +13 -0
  419. package/utility/signalling.js +36 -0
  420. package/utility/terms/certificates.js +81 -0
  421. package/utility/when.ts +20 -0
  422. package/v1.d.ts +39 -0
  423. package/v1.js +41 -0
  424. package/v2.d.ts +39 -0
  425. package/v2.js +41 -0
  426. package/validation/bulkDeleteValidator.js +24 -0
  427. package/validation/check_permissions.js +19 -0
  428. package/validation/common_validators.js +95 -0
  429. package/validation/configValidator.js +331 -0
  430. package/validation/deleteValidator.js +15 -0
  431. package/validation/fileLoadValidator.js +153 -0
  432. package/validation/insertValidator.js +40 -0
  433. package/validation/installValidator.js +37 -0
  434. package/validation/readLogValidator.js +64 -0
  435. package/validation/role_validation.js +320 -0
  436. package/validation/schemaMetadataValidator.js +42 -0
  437. package/validation/searchValidator.js +166 -0
  438. package/validation/statusValidator.ts +66 -0
  439. package/validation/transactionLogValidator.js +33 -0
  440. package/validation/user_validation.js +55 -0
  441. package/validation/validationWrapper.js +105 -0
  442. package/dist/resources/analytics/profile.d.ts +0 -2
  443. package/dist/resources/analytics/profile.js +0 -144
  444. package/dist/resources/analytics/profile.js.map +0 -1
@@ -0,0 +1,4527 @@
1
+ /**
2
+ * This module provides the main table implementation of the Resource API, providing full access to Harper
3
+ * tables through the interface defined by the Resource class. This module is responsible for handling these
4
+ * table-level interactions, loading records, updating records, querying, and more.
5
+ */
6
+
7
+ import { CONFIG_PARAMS, OPERATIONS_ENUM, SYSTEM_TABLE_NAMES, SYSTEM_SCHEMA_NAME } from '../utility/hdbTerms.ts';
8
+ import { type Database } from 'lmdb';
9
+ import { getIndexedValues } from '../utility/lmdb/commonUtility.js';
10
+ import lodash from 'lodash';
11
+ import { ExtendedIterable, SKIP } from '@harperfast/extended-iterable';
12
+ import type {
13
+ ResourceInterface,
14
+ SubscriptionRequest,
15
+ Id,
16
+ Context,
17
+ Condition,
18
+ Sort,
19
+ SubSelect,
20
+ RequestTargetOrId,
21
+ } from './ResourceInterface.ts';
22
+ import type { User } from '../security/user.ts';
23
+ import lmdbProcessRows from '../dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbProcessRows.js';
24
+ import { Resource, transformForSelect } from './Resource.ts';
25
+ import { when, promiseNormalize } from '../utility/when.ts';
26
+ import { DatabaseTransaction, ImmediateTransaction } from './DatabaseTransaction.ts';
27
+ import * as envMngr from '../utility/environment/environmentManager.js';
28
+ import { addSubscription } from './transactionBroadcast.ts';
29
+ import { handleHDBError, ClientError, ServerError, AccessViolation } from '../utility/errors/hdbError.js';
30
+ import * as signalling from '../utility/signalling.js';
31
+ import { SchemaEventMsg, UserEventMsg } from '../server/threads/itc.js';
32
+ import { databases, table } from './databases.ts';
33
+ import {
34
+ searchByIndex,
35
+ findAttribute,
36
+ estimateCondition,
37
+ flattenKey,
38
+ COERCIBLE_OPERATORS,
39
+ executeConditions,
40
+ } from './search.ts';
41
+ import { logger } from '../utility/logging/logger.ts';
42
+ import { Addition, assignTrackedAccessors, updateAndFreeze, hasChanges, GenericTrackedObject } from './tracked.ts';
43
+ import { transaction, contextStorage } from './transaction.ts';
44
+ import { MAXIMUM_KEY, writeKey, compareKeys } from 'ordered-binary';
45
+ import { getWorkerIndex, getWorkerCount } from '../server/threads/manageThreads.js';
46
+ import { HAS_BLOBS, auditRetention, removeAuditEntry } from './auditStore.ts';
47
+ import { autoCast, autoCastBooleanStrict } from '../utility/common_utils.js';
48
+ import {
49
+ recordUpdater,
50
+ removeEntry,
51
+ PENDING_LOCAL_TIME,
52
+ type RecordObject,
53
+ type Entry,
54
+ entryMap,
55
+ } from './RecordEncoder.ts';
56
+ import { recordAction, recordActionBinary } from './analytics/write.ts';
57
+ import { rebuildUpdateBefore } from './crdt.ts';
58
+ import { appendHeader } from '../server/serverHelpers/Headers.ts';
59
+ import fs from 'node:fs';
60
+ import { Blob, deleteBlobsInObject, findBlobsInObject, startPreCommitBlobsForRecord } from './blob.ts';
61
+ import { onStorageReclamation } from '../server/storageReclamation.ts';
62
+ import { RequestTarget } from './RequestTarget.ts';
63
+ import harperLogger from '../utility/logging/harper_logger.js';
64
+ import { throttle } from '../server/throttle.ts';
65
+ import { RocksDatabase } from '@harperfast/rocksdb-js';
66
+ import { LMDBTransaction, ImmediateTransaction as ImmediateLMDBTransaction } from './LMDBTransaction';
67
+ import { contentTypes } from '../server/serverHelpers/contentTypes';
68
+
69
+ const { sortBy } = lodash;
70
+ const { validateAttribute } = lmdbProcessRows;
71
+
72
+ export type Attribute = {
73
+ name: string;
74
+ type: 'ID' | 'Int' | 'Float' | 'Long' | 'String' | 'Boolean' | 'Date' | 'Bytes' | 'Any' | 'BigInt' | 'Blob' | string;
75
+ assignCreatedTime?: boolean;
76
+ assignUpdatedTime?: boolean;
77
+ nullable?: boolean;
78
+ expiresAt?: boolean;
79
+ isPrimaryKey?: boolean;
80
+ indexed?: unknown;
81
+ relationship?: unknown;
82
+ computed?: unknown;
83
+ properties?: Array<Attribute>;
84
+ elements?: Attribute;
85
+ };
86
+
87
+ type MaybePromise<T> = T | Promise<T>;
88
+
89
+ const NULL_WITH_TIMESTAMP = new Uint8Array(9);
90
+ NULL_WITH_TIMESTAMP[8] = 0xc0; // null
91
+ const UNCACHEABLE_TIMESTAMP = Infinity; // we use this when dynamic content is accessed that we can't safely cache, and this prevents earlier timestamps from change the "last" modification
92
+ const RECORD_PRUNING_INTERVAL = 60000; // one minute
93
+ envMngr.initSync();
94
+ const LMDB_PREFETCH_WRITES = envMngr.get(CONFIG_PARAMS.STORAGE_PREFETCHWRITES);
95
+ const LOCK_TIMEOUT = 10000;
96
+ export const INVALIDATED = 1;
97
+ export const EVICTED = 8; // note that 2 is reserved for timestamps
98
+ const TEST_WRITE_KEY_BUFFER = Buffer.allocUnsafeSlow(8192);
99
+ const MAX_KEY_BYTES = 1978;
100
+ const EVENT_HIGH_WATER_MARK = 100;
101
+ const FULL_PERMISSIONS = {
102
+ read: true,
103
+ insert: true,
104
+ update: true,
105
+ delete: true,
106
+ isSuperUser: true,
107
+ };
108
+ export interface Table {
109
+ primaryStore: Database;
110
+ auditStore: Database;
111
+ indices: {};
112
+ databasePath: string;
113
+ tableName: string;
114
+ databaseName: string;
115
+ attributes: Attribute[];
116
+ primaryKey: string;
117
+ splitSegments?: boolean;
118
+ replicate?: boolean;
119
+ subscriptions: Map<any, Function[]>;
120
+ expirationMS: number;
121
+ indexingOperations?: Promise<void>;
122
+ source?: new () => ResourceInterface;
123
+ Transaction: ReturnType<typeof makeTable>;
124
+ }
125
+ type ResidencyDefinition = number | string[] | void;
126
+
127
+ /**
128
+ * This returns a Table class for the given table settings (determined from the metadata table)
129
+ * Instances of the returned class are Resource instances, intended to provide a consistent view or transaction of the table
130
+ * @param options
131
+ */
132
+ export function makeTable(options) {
133
+ const {
134
+ primaryKey,
135
+ indices,
136
+ tableId,
137
+ tableName,
138
+ primaryStore,
139
+ databasePath,
140
+ databaseName,
141
+ auditStore,
142
+ schemaDefined,
143
+ dbisDB: dbisDb,
144
+ sealed,
145
+ splitSegments,
146
+ replicate,
147
+ } = options;
148
+ let { expirationMS: expirationMs, evictionMS: evictionMs, audit, trackDeletes } = options;
149
+ evictionMs ??= 0;
150
+ let { attributes }: { attributes: Attribute[] } = options;
151
+ if (!attributes) attributes = [];
152
+ const updateRecord = recordUpdater(primaryStore, tableId, auditStore);
153
+ let sourceLoad: any; // if a source has a load function (replicator), record it here
154
+ let hasSourceGet: any;
155
+ let primaryKeyAttribute: Attribute = {};
156
+ let lastEvictionCompletion: Promise<void> = Promise.resolve();
157
+ let createdTimeProperty: Attribute, updatedTimeProperty: Attribute, expiresAtProperty: Attribute;
158
+ for (const attribute of attributes) {
159
+ if (attribute.assignCreatedTime || attribute.name === '__createdtime__') createdTimeProperty = attribute;
160
+ if (attribute.assignUpdatedTime || attribute.name === '__updatedtime__') updatedTimeProperty = attribute;
161
+ if (attribute.expiresAt) expiresAtProperty = attribute;
162
+ if (attribute.isPrimaryKey) primaryKeyAttribute = attribute;
163
+ }
164
+ let deleteCallbackHandle: { remove: () => void };
165
+ let prefetchIds = [];
166
+ let prefetchCallbacks = [];
167
+ let untilNextPrefetch = 1;
168
+ let nonPrefetchSequence = 2;
169
+ let cleanupInterval = 86400000;
170
+ let cleanupPriority = 0;
171
+ let lastCleanupInterval: number;
172
+ let cleanupTimer: NodeJS.Timeout;
173
+ let propertyResolvers: any;
174
+ let hasRelationships = false;
175
+ let runningRecordExpiration: boolean;
176
+ const isRocksDB = primaryStore instanceof RocksDatabase;
177
+ type BigInt64ArrayAndMaxSafeId = BigInt64Array & { maxSafeId: number };
178
+ let idIncrementer: BigInt64ArrayAndMaxSafeId;
179
+ let replicateToCount;
180
+ const databaseReplications = envMngr.get(CONFIG_PARAMS.REPLICATION_DATABASES);
181
+ if (Array.isArray(databaseReplications)) {
182
+ for (const dbReplication of databaseReplications) {
183
+ if (dbReplication.name === databaseName && dbReplication.replicateTo >= 0) {
184
+ replicateToCount = dbReplication.replicateTo;
185
+ break;
186
+ }
187
+ }
188
+ }
189
+ const MAX_PREFETCH_SEQUENCE = 10;
190
+ const MAX_PREFETCH_BUNDLE = 6;
191
+ if (audit) addDeleteRemoval();
192
+ onStorageReclamation(primaryStore.path, (priority: number) => {
193
+ if (hasSourceGet) return scheduleCleanup(priority);
194
+ });
195
+
196
+ class Updatable extends GenericTrackedObject implements RecordObject {
197
+ declare set: (property: string, value: any) => void;
198
+ declare getProperty: (property: string) => any;
199
+ getUpdatedTime(): number {
200
+ return entryMap.get(this.getRecord())?.version;
201
+ }
202
+ getExpiresAt(): number {
203
+ return entryMap.get(this.getRecord())?.expiresAt;
204
+ }
205
+ addTo(property: string, value: number | bigint) {
206
+ if (typeof value === 'number' || typeof value === 'bigint') {
207
+ this.set(property, new Addition(value));
208
+ } else {
209
+ throw new Error('Can not add or subtract a non-numeric value');
210
+ }
211
+ }
212
+ subtractFrom(property: string, value: number | bigint) {
213
+ return this.addTo(property, -value);
214
+ }
215
+ }
216
+ class TableResource<Record extends object = any> extends Resource<Record> {
217
+ #record: any; // the stored/frozen record from the database and stored in the cache (should not be modified directly)
218
+ #changes: any; // the changes to the record that have been made (should not be modified directly)
219
+ #version?: number; // version of the record
220
+ #entry?: Entry; // the entry from the database
221
+ #savingOperation?: any; // operation for the record is currently being saved
222
+
223
+ declare getProperty: (name: string) => any;
224
+ static name = tableName; // for display/debugging purposes
225
+ static primaryStore = primaryStore;
226
+ static auditStore = auditStore;
227
+ static primaryKey = primaryKey;
228
+ static tableName = tableName;
229
+ static tableId = tableId;
230
+ static indices = indices;
231
+ static audit = audit;
232
+ static databasePath = databasePath;
233
+ static databaseName = databaseName;
234
+ static attributes = attributes;
235
+ static replicate = replicate;
236
+ static sealed = sealed;
237
+ static splitSegments = splitSegments ?? true;
238
+ static createdTimeProperty = createdTimeProperty;
239
+ static updatedTimeProperty = updatedTimeProperty;
240
+ static propertyResolvers;
241
+ static userResolvers = {};
242
+ static source?: typeof TableResource;
243
+ declare static sourceOptions: any;
244
+ declare static intermediateSource: boolean;
245
+ static getResidencyById: (id: Id) => number | void;
246
+ static get expirationMS() {
247
+ return expirationMs;
248
+ }
249
+ static dbisDB = dbisDb;
250
+ static schemaDefined = schemaDefined;
251
+ /**
252
+ * This defines a source for a table. This effectively makes a table into a cache, where the canonical
253
+ * source of data (or source of truth) is provided here in the Resource argument. Additional options
254
+ * can be provided to indicate how the caching should be handled.
255
+ * @param source
256
+ * @param options
257
+ * @returns
258
+ */
259
+ static sourcedFrom(source, options) {
260
+ // define a source for retrieving invalidated entries for caching purposes
261
+ if (options) {
262
+ this.sourceOptions = options;
263
+ if (options.expiration || options.eviction || options.scanInterval) this.setTTLExpiration(options);
264
+ }
265
+ if (options?.intermediateSource) {
266
+ source.intermediateSource = true;
267
+ // intermediateSource should register sourceLoad and setup subscription but not assign to this.source
268
+ } else {
269
+ if (this.source) {
270
+ if (this.source.name === source.name) {
271
+ // if we are adding a source that is already set, we don't add it again
272
+ return;
273
+ }
274
+ throw new Error('Can not have multiple sources');
275
+ }
276
+ this.source = source;
277
+ }
278
+ hasSourceGet = hasSourceGet || (source.get && (!source.get.reliesOnPrototype || source.prototype.get));
279
+ sourceLoad = sourceLoad || source.load;
280
+ const shouldRevalidateEvents = this.source?.shouldRevalidateEvents;
281
+
282
+ // External data source may provide a subscribe method, allowing for real-time proactive delivery
283
+ // of data from the source to this caching table. This is generally greatly superior to expiration-based
284
+ // caching since it much for accurately ensures freshness and maximizing caching time.
285
+ // Here we subscribe the external data source if it is available, getting notification events
286
+ // as they come in, and directly writing them to this table. We use the notification option to ensure
287
+ // that we don't re-broadcast these as "requested" changes back to the source.
288
+ (async () => {
289
+ let userRoleUpdate = false;
290
+ let lastSequenceId;
291
+ // perform the write of an individual write event
292
+ const writeUpdate = async (event, context) => {
293
+ const value = event.value;
294
+ const Table = event.table ? databases[databaseName][event.table] : TableResource;
295
+ if (
296
+ databaseName === SYSTEM_SCHEMA_NAME &&
297
+ (event.table === SYSTEM_TABLE_NAMES.ROLE_TABLE_NAME || event.table === SYSTEM_TABLE_NAMES.USER_TABLE_NAME)
298
+ ) {
299
+ userRoleUpdate = true;
300
+ }
301
+ if (event.id === undefined) {
302
+ event.id = value[Table.primaryKey];
303
+ if (event.id === undefined) throw new Error('Replication message without an id ' + JSON.stringify(event));
304
+ }
305
+ event.source = source;
306
+ const options = {
307
+ residencyId: getResidencyId(event.residencyList),
308
+ isNotification: true,
309
+ ensureLoaded: false,
310
+ nodeId: event.nodeId,
311
+ viaNodeId: event.viaNodeId,
312
+ async: true,
313
+ };
314
+ const id = event.id;
315
+ const resource: TableResource = await Table.getResource(id, context, options);
316
+ if (event.finished) await event.finished;
317
+ switch (event.type) {
318
+ case 'put':
319
+ return shouldRevalidateEvents
320
+ ? resource._writeInvalidate(id, value, options)
321
+ : resource._writeUpdate(id, value, true, options);
322
+ case 'patch':
323
+ return shouldRevalidateEvents
324
+ ? resource._writeInvalidate(id, value, options)
325
+ : resource._writeUpdate(id, value, false, options);
326
+ case 'delete':
327
+ return resource._writeDelete(id, options);
328
+ case 'publish':
329
+ case 'message':
330
+ return resource._writePublish(id, value, options);
331
+ case 'invalidate':
332
+ return resource._writeInvalidate(id, value, options);
333
+ case 'relocate':
334
+ return resource._writeRelocate(id, options);
335
+ default:
336
+ logger.error?.('Unknown operation', event.type, event.id);
337
+ }
338
+ };
339
+
340
+ try {
341
+ const hasSubscribe = source.subscribe;
342
+ // if subscriptions come in out-of-order, we need to track deletes to ensure consistency
343
+ if (hasSubscribe && trackDeletes == undefined) trackDeletes = true;
344
+ const subscriptionOptions = {
345
+ // this is used to indicate that all threads are (presumably) making this subscription
346
+ // and we do not need to propagate events across threads (more efficient)
347
+ crossThreads: false,
348
+ // this is used to indicate that we want, if possible, immediate notification of writes
349
+ // within the process (not supported yet)
350
+ inTransactionUpdates: true,
351
+ // supports transaction operations
352
+ supportsTransactions: true,
353
+ // don't need the current state, should be up-to-date
354
+ omitCurrent: true,
355
+ };
356
+ const subscribeOnThisThread = source.subscribeOnThisThread
357
+ ? source.subscribeOnThisThread(getWorkerIndex(), subscriptionOptions)
358
+ : getWorkerIndex() === 0;
359
+ const subscription = hasSubscribe && subscribeOnThisThread && (await source.subscribe?.(subscriptionOptions));
360
+ if (subscription) {
361
+ let txnInProgress;
362
+ // we listen for events by iterating through the async iterator provided by the subscription
363
+ for await (const event of subscription) {
364
+ try {
365
+ const firstWrite = event.type === 'transaction' ? event.writes[0] : event;
366
+ if (!firstWrite) {
367
+ logger.error?.('Bad subscription event', event);
368
+ continue;
369
+ }
370
+ event.source = source;
371
+ if (event.type === 'end_txn') {
372
+ txnInProgress?.resolve();
373
+ let updateRecordedSequenceId: () => void;
374
+ if (event.localTime && lastSequenceId !== event.localTime) {
375
+ if (event.remoteNodeIds?.length > 0) {
376
+ updateRecordedSequenceId = () => {
377
+ // the key for tracking the sequence ids and txn times received from this node
378
+ const seqKey = [Symbol.for('seq'), event.remoteNodeIds[0]];
379
+ const existingSeq = dbisDb.get(seqKey);
380
+ let nodeStates = existingSeq?.nodes;
381
+ if (!nodeStates) {
382
+ // if we don't have a list of nodes, we need to create one, with the main one using the existing seqId
383
+ nodeStates = [];
384
+ }
385
+ // if we are not the only node in the list, we are getting proxied subscriptions, and we need
386
+ // to track this separately
387
+ // track the other nodes in the list
388
+ for (const nodeId of event.remoteNodeIds.slice(1)) {
389
+ let nodeState = nodeStates.find((existingNode) => existingNode.id === nodeId);
390
+ // remove any duplicates
391
+ nodeStates = nodeStates.filter(
392
+ (existingNode) => existingNode.id !== nodeId || existingNode === nodeState
393
+ );
394
+ if (!nodeState) {
395
+ nodeState = { id: nodeId, seqId: 0 };
396
+ nodeStates.push(nodeState);
397
+ }
398
+ nodeState.seqId = Math.max(existingSeq?.seqId ?? 1, event.localTime);
399
+ if (nodeId === txnInProgress?.nodeId) {
400
+ nodeState.lastTxnTime = event.timestamp;
401
+ }
402
+ }
403
+ const seqId = Math.max(existingSeq?.seqId ?? 1, event.localTime);
404
+ logger.trace?.(
405
+ 'Received txn',
406
+ databaseName,
407
+ seqId,
408
+ new Date(seqId),
409
+ event.localTime,
410
+ new Date(event.localTime),
411
+ event.remoteNodeIds
412
+ );
413
+ dbisDb.put(seqKey, {
414
+ seqId,
415
+ nodes: nodeStates,
416
+ });
417
+ };
418
+ lastSequenceId = event.localTime;
419
+ }
420
+ }
421
+ if (event.onCommit) {
422
+ // if there was an onCommit callback, call that. This function can be async
423
+ // and if so, we want to delay the recording of the sequence id until it finished
424
+ // (as it can be used to indicate more associated actions, like blob transfer, are in flight)
425
+ const onCommitFinished = txnInProgress
426
+ ? txnInProgress.committed.then(event.onCommit)
427
+ : event.onCommit();
428
+ if (updateRecordedSequenceId) {
429
+ if (onCommitFinished?.then) onCommitFinished.then(updateRecordedSequenceId);
430
+ else updateRecordedSequenceId();
431
+ }
432
+ } else if (updateRecordedSequenceId) updateRecordedSequenceId();
433
+ continue;
434
+ }
435
+ if (txnInProgress) {
436
+ if (event.beginTxn) {
437
+ // if we are starting a new transaction, finish the existing one
438
+ txnInProgress.resolve();
439
+ } else {
440
+ // write in the current transaction if one is in progress
441
+ txnInProgress.writePromises.push(writeUpdate(event, txnInProgress));
442
+ continue;
443
+ }
444
+ }
445
+ // use the version as the transaction timestamp
446
+ if (!event.timestamp && event.version) event.timestamp = event.version;
447
+ const commitResolution = transaction(event, () => {
448
+ if (event.type === 'transaction') {
449
+ // if it is a transaction, we need to individually iterate through each write event
450
+ const promises: Promise<any>[] = [];
451
+ for (const write of event.writes) {
452
+ try {
453
+ promises.push(writeUpdate(write, event));
454
+ } catch (error) {
455
+ (error as Error).message +=
456
+ ' writing ' + JSON.stringify(write) + ' of event ' + JSON.stringify(event);
457
+ throw error;
458
+ }
459
+ }
460
+ return Promise.all(promises);
461
+ } else if (event.type === 'define_schema') {
462
+ // ensure table has the provided attributes
463
+ const updatedAttributes = this.attributes.slice(0);
464
+ let hasChanges = false;
465
+ for (const attribute of event.attributes) {
466
+ if (!updatedAttributes.find((existing) => existing.name === attribute.name)) {
467
+ updatedAttributes.push(attribute);
468
+ hasChanges = true;
469
+ }
470
+ }
471
+ if (hasChanges) {
472
+ table({
473
+ table: tableName,
474
+ database: databaseName,
475
+ attributes: updatedAttributes,
476
+ origin: 'cluster',
477
+ });
478
+ signalling.signalSchemaChange(
479
+ new SchemaEventMsg(process.pid, OPERATIONS_ENUM.CREATE_TABLE, databaseName, tableName)
480
+ );
481
+ }
482
+ } else {
483
+ if (event.beginTxn) {
484
+ // if we are beginning a new transaction, we record the current
485
+ // event/context as transaction in progress and then future events
486
+ // are applied with that context until the next transaction begins/ends
487
+ txnInProgress = event;
488
+ txnInProgress.writePromises = [writeUpdate(event, event)];
489
+ return new Promise((resolve) => {
490
+ // callback for when this transaction is finished (will be called on next txn begin/end).
491
+ txnInProgress.resolve = () => resolve(Promise.all(txnInProgress.writePromises)); // and make sure we wait for the write update to finish
492
+ });
493
+ }
494
+ return writeUpdate(event, event);
495
+ }
496
+ });
497
+ if (txnInProgress) txnInProgress.committed = commitResolution;
498
+ if (userRoleUpdate && commitResolution && !commitResolution?.waitingForUserChange) {
499
+ // if the user role changed, asynchronously signal the user change (but don't block this function)
500
+ commitResolution.then(() => signalling.signalUserChange(new UserEventMsg(process.pid)));
501
+ commitResolution.waitingForUserChange = true; // only need to send one signal per transaction
502
+ }
503
+
504
+ if (event.onCommit) {
505
+ if (commitResolution) commitResolution.then(event.onCommit);
506
+ else event.onCommit();
507
+ }
508
+ } catch (error) {
509
+ logger.error?.('error in subscription handler', error);
510
+ }
511
+ }
512
+ }
513
+ } catch (error) {
514
+ logger.error?.(error);
515
+ }
516
+ })();
517
+ return this;
518
+ }
519
+ // define a caching table as one that has a origin source with a get
520
+ static get isCaching() {
521
+ return hasSourceGet;
522
+ }
523
+
524
+ /** Indicates if the events should be revalidated when they are received. By default we do this if the get
525
+ * method is overriden */
526
+ static get shouldRevalidateEvents() {
527
+ return this.prototype.get !== TableResource.prototype.get;
528
+ }
529
+
530
+ /**
531
+ * Gets a resource instance, as defined by the Resource class, adding the table-specific handling
532
+ * of also loading the stored record into the resource instance.
533
+ * @param target
534
+ * @param request
535
+ * @param resourceOptions An important option is ensureLoaded, which can be used to indicate that it is necessary for a caching table to load data from the source if there is not a local copy of the data in the table (usually not necessary for a delete, for example).
536
+ * @returns
537
+ */
538
+ static getResource<Record extends object = any>(
539
+ target: RequestTarget,
540
+ request: Context,
541
+ resourceOptions?: any
542
+ ): Promise<TableResource<Record>> | TableResource<Record> {
543
+ const resource: TableResource = super.getResource(target, request, resourceOptions) as any;
544
+ if (this.loadAsInstance !== false) {
545
+ return resource._loadRecord(target, request, resourceOptions);
546
+ }
547
+ return resource;
548
+ }
549
+ _loadRecord<Record extends object = any>(
550
+ target: RequestTarget,
551
+ request: Context,
552
+ resourceOptions?: any
553
+ ): MaybePromise<TableResource<Record>> {
554
+ const id = target && typeof target === 'object' ? target.id : target;
555
+ if (id == null) return this;
556
+ checkValidId(id);
557
+ try {
558
+ if (this.getRecord?.()) return this; // already loaded, don't reload, current version may have modifications
559
+ if (typeof id === 'object' && id && !Array.isArray(id)) {
560
+ throw new Error(`Invalid id ${JSON.stringify(id)}`);
561
+ }
562
+ const sync = target?.sync || primaryStore.cache?.get?.(id);
563
+ const txn = txnForContext(request);
564
+ const readTxn = txn.getReadTxn();
565
+ if (readTxn?.isDone) {
566
+ throw new Error('You can not read from a transaction that has already been committed/aborted');
567
+ }
568
+ return loadLocalRecord(
569
+ id,
570
+ request,
571
+ { transaction: readTxn, ensureLoaded: resourceOptions?.ensureLoaded },
572
+ sync,
573
+ (entry) => {
574
+ if (entry) {
575
+ TableResource._updateResource(this, entry);
576
+ } else this.#record = null;
577
+ if (request.onlyIfCached) {
578
+ // don't go into the loading from source condition, but HTTP spec says to
579
+ // return 504 (rather than 404) if there is no content and the cache-control header
580
+ // dictates not to go to source
581
+ if (!this.doesExist()) throw new ServerError('Entry is not cached', 504);
582
+ } else if (resourceOptions?.ensureLoaded) {
583
+ const loadingFromSource = ensureLoadedFromSource(this.constructor.source, id, entry, request, this);
584
+ if (loadingFromSource) {
585
+ txn?.disregardReadTxn(); // this could take some time, so don't keep the transaction open if possible
586
+ target.loadedFromSource = true;
587
+ return when(loadingFromSource, (entry) => {
588
+ TableResource._updateResource(this, entry);
589
+ return this;
590
+ });
591
+ } else if (hasSourceGet) target.loadedFromSource = false; // mark it as cached
592
+ }
593
+ return this;
594
+ }
595
+ );
596
+ } catch (error) {
597
+ if (error.message.includes('Unable to serialize object')) error.message += ': ' + JSON.stringify(id);
598
+ throw error;
599
+ }
600
+ }
601
+ static _updateResource(resource, entry) {
602
+ resource.#entry = entry;
603
+ resource.#record = entry?.value ?? null;
604
+ resource.#version = entry?.version;
605
+ }
606
+ /**
607
+ * This is a request to explicitly ensure that the record is loaded from source, rather than only using the local record.
608
+ * This will load from source if the current record is expired, missing, or invalidated.
609
+ * @returns
610
+ */
611
+ ensureLoaded() {
612
+ const loadedFromSource = ensureLoadedFromSource(
613
+ this.constructor.source,
614
+ this.getId(),
615
+ this.#entry,
616
+ this.getContext()
617
+ );
618
+ if (loadedFromSource) {
619
+ return when(loadedFromSource, (entry) => {
620
+ this.#entry = entry;
621
+ this.#record = entry.value;
622
+ this.#version = entry.version;
623
+ });
624
+ }
625
+ }
626
+ static getNewId(): any {
627
+ const type = primaryKeyAttribute?.type;
628
+ // the default Resource behavior is to return a GUID, but for a table we can return incrementing numeric keys if the type is (or can be) numeric
629
+ if (type === 'String' || type === 'ID') return super.getNewId();
630
+ if (!idIncrementer) {
631
+ // if there is no id incrementer yet, we get or create one
632
+ const idAllocationEntry = primaryStore.getEntry(Symbol.for('id_allocation'));
633
+ let idAllocation = idAllocationEntry?.value;
634
+ let lastKey;
635
+ if (
636
+ idAllocation &&
637
+ idAllocation.nodeName === server.hostname &&
638
+ (!hasOtherProcesses(primaryStore) || idAllocation.pid === process.pid)
639
+ ) {
640
+ // the database has an existing id allocation that we can continue from
641
+ const startingId = idAllocation.start;
642
+ const endingId = idAllocation.end;
643
+ lastKey = startingId;
644
+ // once it is loaded, we need to find the last key in the allocated range and start from there
645
+ for (const key of primaryStore.getKeys({ start: endingId, end: startingId, limit: 1, reverse: true })) {
646
+ lastKey = key;
647
+ }
648
+ } else {
649
+ // we need to create a new id allocation
650
+ idAllocation = createNewAllocation(idAllocationEntry?.version ?? null);
651
+ lastKey = idAllocation.start;
652
+ }
653
+ // all threads will use a shared buffer to atomically increment the id
654
+ // first, we create our proposed incrementer buffer that will be used if we are the first thread to get here
655
+ // and initialize it with the starting id
656
+ idIncrementer = new BigInt64Array([BigInt(lastKey) + 1n]) as BigInt64ArrayAndMaxSafeId;
657
+ // now get the selected incrementer buffer, this is the shared buffer was first registered and that all threads will use
658
+ idIncrementer = new BigInt64Array(
659
+ primaryStore.getUserSharedBuffer('id', idIncrementer.buffer)
660
+ ) as BigInt64ArrayAndMaxSafeId;
661
+ // and we set the maximum safe id to the end of the allocated range before we check for conflicting ids again
662
+ idIncrementer.maxSafeId = idAllocation.end;
663
+ }
664
+ // this is where we actually do the atomic incrementation. All the threads should be pointing to the same
665
+ // memory location of this incrementer, so we can be sure that the id is unique and sequential.
666
+ const nextId = Number(Atomics.add(idIncrementer, 0, 1n));
667
+ const asyncIdExpansionThreshold = type === 'Int' ? 0x200 : 0x100000;
668
+ if (nextId + asyncIdExpansionThreshold >= idIncrementer.maxSafeId) {
669
+ const updateEnd = (inTxn) => {
670
+ // we update the end of the allocation range after verifying we don't have any conflicting ids in front of us
671
+ idIncrementer.maxSafeId = nextId + (type === 'Int' ? 0x3ff : 0x3fffff);
672
+ let idAfter = (type === 'Int' ? Math.pow(2, 31) : Math.pow(2, 49)) - 1;
673
+ const readTxn = inTxn ? undefined : primaryStore.useReadTransaction?.();
674
+ // get the latest id after the read transaction to make sure we aren't reading any new ids that we assigned from this node
675
+ const newestId = Number(idIncrementer[0]);
676
+ for (const key of primaryStore.getKeys({
677
+ start: newestId + 1,
678
+ end: idAfter,
679
+ limit: 1,
680
+ transaction: readTxn,
681
+ })) {
682
+ idAfter = key;
683
+ }
684
+ readTxn?.done();
685
+ const { value: updatedIdAllocation, version } = primaryStore.getEntry(Symbol.for('id_allocation'));
686
+ if (idIncrementer.maxSafeId < idAfter) {
687
+ // note that this is just a noop/direct callback if we are inside the sync transaction
688
+ // first check to see if it actually got updated by another thread
689
+ if (updatedIdAllocation.end > idIncrementer.maxSafeId - 100) {
690
+ // the allocation was already updated by another thread
691
+ return;
692
+ }
693
+ logger.info?.('New id allocation', nextId, idIncrementer.maxSafeId, version);
694
+ primaryStore.put(
695
+ Symbol.for('id_allocation'),
696
+ {
697
+ start: updatedIdAllocation.start,
698
+ end: idIncrementer.maxSafeId,
699
+ nodeName: server.hostname,
700
+ pid: process.pid,
701
+ },
702
+ Date.now(),
703
+ version
704
+ );
705
+ } else {
706
+ // indicate that we have run out of ids in the allocated range, so we need to allocate a new range
707
+ logger.warn?.(
708
+ `Id conflict detected, starting new id allocation range, attempting to allocate to ${idIncrementer.maxSafeId}, but id of ${idAfter} detected`
709
+ );
710
+ const idAllocation = createNewAllocation(version);
711
+ // reassign the incrementer to the new range/starting point
712
+ if (!idAllocation.alreadyUpdated) Atomics.store(idIncrementer, 0, BigInt(idAllocation.start + 1));
713
+ // and we set the maximum safe id to the end of the allocated range before we check for conflicting ids again
714
+ idIncrementer.maxSafeId = idAllocation.end;
715
+ }
716
+ };
717
+ if (nextId + asyncIdExpansionThreshold === idIncrementer.maxSafeId) {
718
+ setImmediate(updateEnd); // if we are getting kind of close to the end, we try to update it asynchronously
719
+ } else if (nextId + 100 >= idIncrementer.maxSafeId) {
720
+ logger.warn?.(
721
+ `Synchronous id allocation required on table ${tableName}${
722
+ type == 'Int'
723
+ ? ', it is highly recommended that you use Long or Float as the type for auto-incremented primary keys'
724
+ : ''
725
+ }`
726
+ );
727
+ // if we are very close to the end, synchronously update
728
+ primaryStore.transactionSync(() => updateEnd(true));
729
+ }
730
+ //TODO: Add a check to recordUpdate to check if a new id infringes on the allocated id range
731
+ }
732
+ return nextId;
733
+ function createNewAllocation(expectedVersion) {
734
+ // there is no id allocation (or it is for the wrong node name or used up), so we need to create one
735
+ // start by determining the max id for the type
736
+ const maxId = (type === 'Int' ? Math.pow(2, 31) : Math.pow(2, 49)) - 1;
737
+ let safeDistance = maxId / 4; // we want to allocate ids in a range that is at least 1/4 of the total id space from ids in either direction
738
+ let idBefore: number, idAfter: number;
739
+ let complained = false;
740
+ let lastKey;
741
+ let idAllocation;
742
+ do {
743
+ // we start with a random id and verify that there is a good gap in the ids to allocate a decent range
744
+ lastKey = Math.floor(Math.random() * maxId);
745
+ idAllocation = {
746
+ start: lastKey,
747
+ end: lastKey + (type === 'Int' ? 0x400 : 0x400000),
748
+ nodeName: server.hostname,
749
+ pid: process.pid,
750
+ };
751
+ idBefore = 0;
752
+ // now find the next id before the last key
753
+ for (const key of primaryStore.getKeys({ start: lastKey, end: true, limit: 1, reverse: true })) {
754
+ idBefore = key;
755
+ }
756
+ idAfter = maxId;
757
+ // and next key after
758
+ for (const key of primaryStore.getKeys({ start: lastKey + 1, end: maxId, limit: 1 })) {
759
+ idAfter = key;
760
+ }
761
+ safeDistance *= 0.875; // if we fail, we try again with a smaller range, looking for a good gap without really knowing how packed the ids are
762
+ if (safeDistance < 1000 && !complained) {
763
+ complained = true;
764
+ logger.error?.(
765
+ `Id allocation in table ${tableName} is very dense, limited safe range of numbers to allocate ids in${
766
+ type === 'Int'
767
+ ? ', it is highly recommended that you use Long or Float as the type for auto-incremented primary keys'
768
+ : ''
769
+ }`,
770
+ lastKey,
771
+ idBefore,
772
+ idAfter,
773
+ safeDistance
774
+ );
775
+ }
776
+ // see if we maintained an adequate distance from the surrounding ids
777
+ } while (!(safeDistance < idAfter - lastKey && (safeDistance < lastKey - idBefore || idBefore === 0)));
778
+ // we have to ensure that the id allocation is atomic and multiple threads don't set different ids, so we use a sync transaction
779
+ return primaryStore.transactionSync(() => {
780
+ // first check to see if it actually got set by another thread
781
+ const updatedIdAllocation = primaryStore.getEntry(Symbol.for('id_allocation'));
782
+ if ((updatedIdAllocation?.version ?? null) == expectedVersion) {
783
+ logger.info?.('Allocated new id range', idAllocation);
784
+ primaryStore.put(Symbol.for('id_allocation'), idAllocation, Date.now());
785
+ return idAllocation;
786
+ } else {
787
+ logger.debug?.('Looks like ids were already allocated');
788
+ return { alreadyUpdated: true, ...updatedIdAllocation.value };
789
+ }
790
+ });
791
+ }
792
+ }
793
+
794
+ /**
795
+ * Set TTL expiration for records in this table. On retrieval, record timestamps are checked for expiration.
796
+ * This also informs the scheduling for record eviction.
797
+ * @param expirationTime Time in seconds until records expire (are stale)
798
+ * @param evictionTime Time in seconds until records are evicted (removed)
799
+ */
800
+ static setTTLExpiration(expiration: number | { expiration: number; eviction?: number; scanInterval?: number }) {
801
+ // we set up a timer to remove expired entries. we only want the timer/reaper to run in one thread,
802
+ // so we use the first one
803
+ if (typeof expiration === 'number') {
804
+ expirationMs = expiration * 1000;
805
+ if (!evictionMs) evictionMs = 0; // by default, no extra time for eviction
806
+ } else if (expiration && typeof expiration === 'object') {
807
+ // an object with expiration times/options specified
808
+ expirationMs = expiration.expiration * 1000;
809
+ evictionMs = (expiration.eviction || 0) * 1000;
810
+ cleanupInterval = expiration.scanInterval * 1000;
811
+ } else throw new Error('Invalid expiration value type');
812
+ if (expirationMs < 0) throw new Error('Expiration can not be negative');
813
+ // default to one quarter of the total eviction time, and make sure it fits into a 32-bit signed integer
814
+ cleanupInterval = cleanupInterval || (expirationMs + evictionMs) / 4;
815
+ scheduleCleanup();
816
+ }
817
+
818
+ static getResidencyRecord(id: Id) {
819
+ return dbisDb.get([Symbol.for('residency_by_id'), id]);
820
+ }
821
+
822
+ static setResidency(getResidency?: (record: object, context: Context) => ResidencyDefinition) {
823
+ TableResource.getResidency =
824
+ getResidency &&
825
+ ((record: object, context: Context) => {
826
+ try {
827
+ return getResidency(record, context);
828
+ } catch (error: unknown) {
829
+ (error as Error).message += ` in residency function for table ${tableName}`;
830
+ throw error;
831
+ }
832
+ });
833
+ }
834
+ static setResidencyById(getResidencyById?: (id: Id) => number | void) {
835
+ TableResource.getResidencyById =
836
+ getResidencyById &&
837
+ ((id: Id) => {
838
+ try {
839
+ return getResidencyById(id);
840
+ } catch (error: unknown) {
841
+ (error as Error).message += ` in residency function for table ${tableName}`;
842
+ throw error;
843
+ }
844
+ });
845
+ }
846
+ static getResidency(record: object, context: Context) {
847
+ if (TableResource.getResidencyById) {
848
+ return TableResource.getResidencyById(record[primaryKey]);
849
+ }
850
+ let count = replicateToCount;
851
+ if (context.replicateTo != undefined) {
852
+ // if the context specifies where we are replicating to, use that
853
+ if (Array.isArray(context.replicateTo)) {
854
+ return context.replicateTo.includes(server.hostname)
855
+ ? context.replicateTo
856
+ : [server.hostname, ...context.replicateTo];
857
+ }
858
+ if (context.replicateTo >= 0) count = context.replicateTo;
859
+ }
860
+ if (count >= 0 && server.nodes) {
861
+ // if we are given a count, choose nodes and return them
862
+ const replicateTo = [server.hostname]; // start with ourselves, we should always be in the list
863
+ if (context.previousResidency) {
864
+ // if we have a previous residency, we should preserve it
865
+ replicateTo.push(...context.previousResidency.slice(0, count));
866
+ } else {
867
+ // otherwise need to create a new list of nodes to replicate to, based on available nodes
868
+ // randomize this to ensure distribution of data
869
+ const nodes = server.nodes.map((node) => node.name);
870
+ const startingIndex = Math.floor(nodes.length * Math.random());
871
+ replicateTo.push(...nodes.slice(startingIndex, startingIndex + count));
872
+ const remainingToAdd = startingIndex + count - nodes.length;
873
+ if (remainingToAdd > 0) replicateTo.push(...nodes.slice(0, remainingToAdd));
874
+ }
875
+ return replicateTo;
876
+ }
877
+ return; // returning undefined will return the default residency of replicating everywhere
878
+ }
879
+
880
+ /**
881
+ * Turn on auditing at runtime
882
+ */
883
+ static enableAuditing() {
884
+ if (audit) return; // already enabled
885
+ audit = true;
886
+ addDeleteRemoval();
887
+ TableResource.audit = true;
888
+ }
889
+ /**
890
+ * Coerce the id as a string to the correct type for the primary key
891
+ * @param id
892
+ * @returns
893
+ */
894
+ static coerceId(id: string): number | string {
895
+ if (id === '') return null;
896
+ return coerceType(id, primaryKeyAttribute);
897
+ }
898
+
899
+ static async dropTable() {
900
+ delete databases[databaseName][tableName];
901
+ for (const entry of primaryStore.getRange({ versions: true, snapshot: false, lazy: true })) {
902
+ if (entry.metadataFlags & HAS_BLOBS && entry.value) {
903
+ deleteBlobsInObject(entry.value);
904
+ }
905
+ }
906
+ if (databaseName === databasePath) {
907
+ // part of a database
908
+ for (const attribute of attributes) {
909
+ dbisDb.remove(TableResource.tableName + '/' + attribute.name);
910
+ const index = indices[attribute.name];
911
+ index?.drop();
912
+ }
913
+ dbisDb.remove(TableResource.tableName + '/');
914
+ primaryStore.drop();
915
+ await dbisDb.committed;
916
+ } else {
917
+ // legacy table per database
918
+ await primaryStore.close();
919
+ fs.unlinkSync(primaryStore.path);
920
+ }
921
+ signalling.signalSchemaChange(
922
+ new SchemaEventMsg(process.pid, OPERATIONS_ENUM.DROP_TABLE, databaseName, tableName)
923
+ );
924
+ }
925
+ /**
926
+ * This retrieves the data of this resource. By default, with no argument, just return `this`.
927
+ */
928
+ get(): TableResource<Record> | undefined;
929
+ /**
930
+ * This retrieves the data of this resource.
931
+ * @param target - If included, is an identifier/query that specifies the requested target to retrieve and query
932
+ */
933
+ get(target: RequestTargetOrId): Record | AsyncIterable<Record> | Promise<Record | AsyncIterable<Record>>;
934
+ get(
935
+ target?: RequestTargetOrId
936
+ ): TableResource<Record> | undefined | Record | AsyncIterable<Record> | Promise<Record | AsyncIterable<Record>> {
937
+ const constructor: Resource = this.constructor;
938
+ if (typeof target === 'string' && constructor.loadAsInstance !== false) return this.getProperty(target);
939
+ if (isSearchTarget(target)) {
940
+ // go back to the static search method so it gets a chance to override
941
+ return constructor.search(target, this.getContext());
942
+ }
943
+ if (target && target.id === undefined && !target.toString()) {
944
+ const description = {
945
+ // basically a describe call
946
+ records: './', // an href to the records themselves
947
+ name: tableName,
948
+ database: databaseName,
949
+ auditSize: auditStore?.getStats().entryCount,
950
+ attributes,
951
+ recordCount: undefined,
952
+ estimatedRecordRange: undefined,
953
+ };
954
+ if (this.getContext()?.includeExpensiveRecordCountEstimates) {
955
+ return TableResource.getRecordCount().then((recordCount) => {
956
+ description.recordCount = recordCount.recordCount;
957
+ description.estimatedRecordRange = recordCount.estimatedRange;
958
+ return description;
959
+ });
960
+ }
961
+ return description;
962
+ }
963
+ if (target !== undefined && constructor.loadAsInstance === false) {
964
+ const context = this.getContext();
965
+ const txn = txnForContext(context);
966
+ const readTxn = txn.getReadTxn();
967
+ if (readTxn?.isDone) {
968
+ throw new Error('You can not read from a transaction that has already been committed/aborted');
969
+ }
970
+ const id = requestTargetToId(target);
971
+ checkValidId(id);
972
+ let allowed = true;
973
+ if (target.checkPermission) {
974
+ // requesting authorization verification
975
+ allowed = this.allowRead(context.user, target, context);
976
+ }
977
+ return promiseNormalize(
978
+ when(
979
+ when(allowed, (allowed: boolean) => {
980
+ if (!allowed) {
981
+ throw new AccessViolation(context.user);
982
+ }
983
+ const ensureLoaded = true;
984
+ return loadLocalRecord(id, context, { transaction: readTxn, ensureLoaded }, false, (entry) => {
985
+ if (context.onlyIfCached) {
986
+ // don't go into the loading from source condition, but HTTP spec says to
987
+ // return 504 (rather than 404) if there is no content and the cache-control header
988
+ // dictates not to go to source
989
+ if (!entry?.value) throw new ServerError('Entry is not cached', 504);
990
+ } else if (ensureLoaded) {
991
+ const loadingFromSource = ensureLoadedFromSource(constructor.source, id, entry, context, this);
992
+ if (loadingFromSource) {
993
+ txn?.disregardReadTxn(); // this could take some time, so don't keep the transaction open if possible
994
+ target.loadedFromSource = true;
995
+ return loadingFromSource.then((entry) => entry?.value);
996
+ }
997
+ }
998
+ return entry?.value;
999
+ });
1000
+ }),
1001
+ (record) => {
1002
+ const select = target?.select;
1003
+ if (select && record != null) {
1004
+ const transform = transformForSelect(select, this.constructor);
1005
+ return transform(record);
1006
+ }
1007
+ if (target?.property) {
1008
+ return record[target?.property];
1009
+ }
1010
+ return record;
1011
+ }
1012
+ ),
1013
+ target
1014
+ );
1015
+ }
1016
+ if (target?.property) return this.getProperty(target.property);
1017
+ if (!constructor.getReturnMutable) {
1018
+ // if we are not explicitly using getReturnMutable, return the frozen record
1019
+ const record = this.#record;
1020
+ const select = target?.select;
1021
+ if (select && record != null) {
1022
+ const transform = transformForSelect(select, this.constructor);
1023
+ return promiseNormalize(transform(record), target);
1024
+ }
1025
+ return promiseNormalize(record, target);
1026
+ }
1027
+ if (this.doesExist() || target?.ensureLoaded === false || this.getContext()?.returnNonexistent) {
1028
+ return this;
1029
+ }
1030
+ return undefined;
1031
+ }
1032
+ /**
1033
+ * Determine if the user is allowed to get/read data from the current resource
1034
+ */
1035
+ allowRead(user: User, target: RequestTarget, context: Context): boolean {
1036
+ const tablePermission = getTablePermissions(user, target);
1037
+ if (tablePermission?.read) {
1038
+ if (tablePermission.isSuperUser) return true;
1039
+ const attribute_permissions = tablePermission.attribute_permissions;
1040
+ const select = target?.select;
1041
+ if (attribute_permissions?.length > 0 || (hasRelationships && select)) {
1042
+ // If attribute permissions are defined, we need to ensure there is a select that only returns the attributes the user has permission to
1043
+ // or if there are relationships, we need to ensure that the user has permission to read from the related table
1044
+ // Note that if we do not have a select, we do not return any relationships by default.
1045
+ if (!target) target = {};
1046
+ if (select) {
1047
+ const selectArray = Array.isArray(select) ? select : [select];
1048
+ const attrsForType = attribute_permissions?.length > 0 && attributesAsObject(attribute_permissions, 'read');
1049
+ target.select = selectArray
1050
+ .map((property) => {
1051
+ const propertyName = property.name || property;
1052
+ if (!attrsForType || attrsForType[propertyName]) {
1053
+ const relatedTable = propertyResolvers[propertyName]?.definition?.tableClass;
1054
+ if (relatedTable) {
1055
+ // if there is a related table, we need to ensure the user has permission to read from that table and that attributes are properly restricted
1056
+ if (!property.name) property = { name: property };
1057
+ if (!property.checkPermission && target.checkPermission)
1058
+ property.checkPermission = target.checkPermission;
1059
+ if (!relatedTable.prototype.allowRead.call(null, user, property, context)) return false;
1060
+ if (!property.select) return property.name; // no select was applied, just return the name
1061
+ }
1062
+ return property;
1063
+ }
1064
+ })
1065
+ .filter(Boolean);
1066
+ } else {
1067
+ target.select = attribute_permissions
1068
+ .filter((attribute) => attribute.read && !propertyResolvers[attribute.attribute_name])
1069
+ .map((attribute) => attribute.attribute_name);
1070
+ }
1071
+ return true;
1072
+ } else {
1073
+ return true;
1074
+ }
1075
+ }
1076
+ }
1077
+
1078
+ /**
1079
+ * Determine if the user is allowed to update data from the current resource
1080
+ */
1081
+ // @ts-expect-error Tables only allow synchronous allowUpdate checks.
1082
+ // eslint-disable-next-line no-unused-vars
1083
+ allowUpdate(user: User, updatedData: Record, context: Context): boolean {
1084
+ const tablePermission = getTablePermissions(user);
1085
+ if (tablePermission?.update) {
1086
+ const attribute_permissions = tablePermission.attribute_permissions;
1087
+ if (attribute_permissions?.length > 0) {
1088
+ // if attribute permissions are defined, we need to ensure there is a select that only returns the attributes the user has permission to
1089
+ const attrsForType = attributesAsObject(attribute_permissions, 'update');
1090
+ for (const key in updatedData) {
1091
+ if (!attrsForType[key]) return false;
1092
+ }
1093
+ // if this is a full put operation that removes missing properties, we don't want to remove properties
1094
+ // that the user doesn't have permission to remove
1095
+ for (const permission of attribute_permissions) {
1096
+ const key = permission.attribute_name;
1097
+ if (!permission.update && !(key in updatedData)) {
1098
+ updatedData[key] = this.getProperty(key);
1099
+ }
1100
+ }
1101
+ }
1102
+ return checkContextPermissions(this.getContext());
1103
+ }
1104
+ }
1105
+
1106
+ /**
1107
+ * Determine if the user is allowed to create new data in the current resource
1108
+ */
1109
+ // @ts-expect-error Tables only allow synchronous allowCreate checks.
1110
+ allowCreate(user: User, newData: Record, context: Context): boolean {
1111
+ if (this.isCollection) {
1112
+ const tablePermission = getTablePermissions(user);
1113
+ if (tablePermission?.insert) {
1114
+ const attribute_permissions = tablePermission.attribute_permissions;
1115
+ if (attribute_permissions?.length > 0) {
1116
+ // if attribute permissions are defined, we need to ensure there is a select that only returns the attributes the user has permission to
1117
+ const attrsForType = attributesAsObject(attribute_permissions, 'insert');
1118
+ for (const key in newData) {
1119
+ if (!attrsForType[key]) return false;
1120
+ }
1121
+ return checkContextPermissions(this.getContext());
1122
+ } else {
1123
+ return checkContextPermissions(this.getContext());
1124
+ }
1125
+ }
1126
+ } else {
1127
+ // creating *within* a record resource just means we are adding some data to a current record, which is
1128
+ // an update to the record, it is not an insert of a new record into the table, so not a table create operation
1129
+ // so does not use table insert permissions
1130
+ return this.allowUpdate(user, newData, context);
1131
+ }
1132
+ }
1133
+
1134
+ /**
1135
+ * Determine if the user is allowed to delete from the current resource
1136
+ */
1137
+ allowDelete(user: User, target: RequestTarget, context: Context): boolean {
1138
+ const tablePermission = getTablePermissions(user, target);
1139
+ return !!tablePermission?.delete && checkContextPermissions(context);
1140
+ }
1141
+
1142
+ /**
1143
+ * Start updating a record. The returned resource will record changes which are written
1144
+ * once the corresponding transaction is committed. These changes can (eventually) include CRDT type operations.
1145
+ */
1146
+ update(updates: Record & RecordObject, fullUpdate: true);
1147
+ update(updates: Partial<Record & RecordObject>, target?: RequestTarget);
1148
+ update(target: RequestTarget, updates?: any);
1149
+ update(target: any, updates?: any) {
1150
+ let id: Id;
1151
+ // determine if it is a legacy call
1152
+ const directInstance =
1153
+ typeof updates === 'boolean' ||
1154
+ (updates === undefined &&
1155
+ (target == undefined || (typeof target === 'object' && !(target instanceof URLSearchParams))));
1156
+ let fullUpdate: boolean = false;
1157
+ if (directInstance) {
1158
+ // legacy, shift the arguments
1159
+ fullUpdate = updates;
1160
+ updates = target;
1161
+ id = this.getId();
1162
+ } else {
1163
+ id = requestTargetToId(target);
1164
+ }
1165
+
1166
+ const context = this.getContext();
1167
+ const envTxn = txnForContext(context);
1168
+ if (!envTxn) throw new Error('Can not update a table resource outside of a transaction');
1169
+ // record in the list of updating records so it can be written to the database when we commit
1170
+ if (updates === false) {
1171
+ // TODO: Remove from transaction
1172
+ return this;
1173
+ }
1174
+ if (typeof updates === 'object' && updates) {
1175
+ if (fullUpdate) {
1176
+ // legacy full update where we need to update the entire record, but the instance needs to continue
1177
+ // track any further changes
1178
+ if (Object.isFrozen(updates)) updates = { ...updates };
1179
+ this.#record = {}; // clear out the existing record
1180
+ this.#changes = updates;
1181
+ } else if (directInstance) {
1182
+ // incremental update with legacy arguments
1183
+ const ownData = this.#changes;
1184
+ if (ownData) updates = Object.assign(ownData, updates);
1185
+ this.#changes = updates;
1186
+ } else {
1187
+ // standard path, where we retrieve the references record and return an instance, initialized with any
1188
+ // updates that were passed into this method
1189
+ let allowed = true;
1190
+ if (target == undefined) throw new TypeError('Can not put a record without a target');
1191
+ if (target.checkPermission) {
1192
+ // requesting authorization verification
1193
+ allowed = this.allowUpdate(context.user, updates, context);
1194
+ }
1195
+ return when(allowed, (allowed) => {
1196
+ if (!allowed) {
1197
+ throw new AccessViolation(context.user);
1198
+ }
1199
+ let loading: Promise<any>;
1200
+ if (!this.#entry && this.constructor.loadAsInstance === false) {
1201
+ // load the record if it hasn't been done yet
1202
+ loading = this._loadRecord(target, context, { ensureLoaded: true, async: true }) as Promise<any>;
1203
+ }
1204
+ return when(loading, () => {
1205
+ this.#changes = updates;
1206
+ this._writeUpdate(id, this.#changes, false);
1207
+ return this;
1208
+ });
1209
+ });
1210
+ }
1211
+ }
1212
+ this._writeUpdate(id, this.#changes, fullUpdate);
1213
+ return this;
1214
+ }
1215
+
1216
+ /**
1217
+ * Save any changes into this instance to the current transaction
1218
+ */
1219
+ save() {
1220
+ if (this.#savingOperation) {
1221
+ const transaction = txnForContext(this.getContext());
1222
+ if (transaction.save) {
1223
+ try {
1224
+ return transaction.save(this.#savingOperation);
1225
+ } finally {
1226
+ this.#savingOperation = null;
1227
+ }
1228
+ }
1229
+ }
1230
+ }
1231
+
1232
+ addTo(property, value) {
1233
+ if (typeof value === 'number' || typeof value === 'bigint') {
1234
+ if (this.#savingOperation?.fullUpdate) this.set(property, (+this.getProperty(property) || 0) + value);
1235
+ else {
1236
+ if (!this.#savingOperation) this.update();
1237
+ this.set(property, new Addition(value));
1238
+ }
1239
+ } else {
1240
+ throw new Error('Can not add a non-numeric value');
1241
+ }
1242
+ }
1243
+ subtractFrom(property, value) {
1244
+ if (typeof value === 'number') {
1245
+ return this.addTo(property, -value);
1246
+ } else {
1247
+ throw new Error('Can not subtract a non-numeric value');
1248
+ }
1249
+ }
1250
+ getMetadata() {
1251
+ return this.#entry;
1252
+ }
1253
+ getRecord() {
1254
+ return this.#record;
1255
+ }
1256
+ getChanges() {
1257
+ return this.#changes;
1258
+ }
1259
+ _setChanges(changes) {
1260
+ this.#changes = changes;
1261
+ }
1262
+ setRecord(record) {
1263
+ this.#record = record;
1264
+ }
1265
+
1266
+ invalidate(target: RequestTargetOrId) {
1267
+ let allowed = true;
1268
+ const context = this.getContext();
1269
+ if ((target as RequestTarget)?.checkPermission) {
1270
+ // requesting authorization verification
1271
+ allowed = this.allowDelete(context.user, target as RequestTarget, context);
1272
+ }
1273
+ return when(allowed, (allowed: boolean) => {
1274
+ if (!allowed) {
1275
+ throw new AccessViolation(context.user);
1276
+ }
1277
+ this._writeInvalidate(target ? requestTargetToId(target) : this.getId());
1278
+ });
1279
+ }
1280
+ _writeInvalidate(id: Id, partialRecord?: any, options?: any) {
1281
+ const context = this.getContext();
1282
+ checkValidId(id);
1283
+ const transaction = txnForContext(this.getContext());
1284
+ transaction.addWrite({
1285
+ key: id,
1286
+ store: primaryStore,
1287
+ invalidated: true,
1288
+ entry: this.#entry,
1289
+ beforeIntermediate: preCommitBlobsForRecordBefore(partialRecord),
1290
+ commit: (txnTime, existingEntry, _retry, transaction: any) => {
1291
+ if (precedesExistingVersion(txnTime, existingEntry, options?.nodeId) <= 0) return;
1292
+ partialRecord ??= null;
1293
+ for (const name in indices) {
1294
+ if (!partialRecord) partialRecord = {};
1295
+ // if there are any indices, we need to preserve a partial invalidated record to ensure we can still do searches
1296
+ if (partialRecord[name] === undefined) {
1297
+ partialRecord[name] = this.getProperty(name);
1298
+ }
1299
+ }
1300
+ logger.trace?.(`Invalidating entry in ${tableName} id: ${id}, timestamp: ${new Date(txnTime).toISOString()}`);
1301
+ updateRecord(
1302
+ id,
1303
+ partialRecord,
1304
+ existingEntry,
1305
+ txnTime,
1306
+ INVALIDATED,
1307
+ audit,
1308
+ {
1309
+ user: context?.user,
1310
+ residencyId: options?.residencyId,
1311
+ nodeId: options?.nodeId,
1312
+ viaNodeId: options?.viaNodeId,
1313
+ transaction,
1314
+ tableToTrack: tableName,
1315
+ },
1316
+ 'invalidate'
1317
+ );
1318
+ // TODO: recordDeletion?
1319
+ },
1320
+ });
1321
+ }
1322
+ _writeRelocate(id: Id, options: any) {
1323
+ const context = this.getContext();
1324
+ checkValidId(id);
1325
+ const transaction = txnForContext(this.getContext());
1326
+ transaction.addWrite({
1327
+ key: id,
1328
+ store: primaryStore,
1329
+ invalidated: true,
1330
+ entry: this.#entry,
1331
+ before:
1332
+ this.constructor.source?.relocate && !context?.source
1333
+ ? this.constructor.source.relocate.bind(this.constructor.source, id, undefined, context)
1334
+ : undefined,
1335
+ commit: (txnTime, existingEntry, _retry, transaction: any) => {
1336
+ if (precedesExistingVersion(txnTime, existingEntry, options?.nodeId) <= 0) return;
1337
+ const residency = TableResource.getResidencyRecord(options.residencyId);
1338
+ let metadata = 0;
1339
+ let newRecord = null;
1340
+ const existingRecord = existingEntry?.value;
1341
+ if (residency && !residency.includes(server.hostname)) {
1342
+ for (const name in indices) {
1343
+ if (!newRecord) newRecord = {};
1344
+ // if there are any indices, we need to preserve a partial invalidated record to ensure we can still do searches
1345
+ newRecord[name] = existingRecord[name];
1346
+ }
1347
+ metadata = INVALIDATED;
1348
+ } else {
1349
+ newRecord = existingRecord;
1350
+ }
1351
+
1352
+ logger.trace?.(`Relocating entry id: ${id}, timestamp: ${new Date(txnTime).toISOString()}`);
1353
+
1354
+ updateRecord(
1355
+ id,
1356
+ newRecord,
1357
+ existingEntry,
1358
+ txnTime,
1359
+ metadata,
1360
+ audit,
1361
+ {
1362
+ user: context.user,
1363
+ residencyId: options.residencyId,
1364
+ nodeId: options.nodeId,
1365
+ viaNodeId: options?.viaNodeId,
1366
+ expiresAt: options.expiresAt,
1367
+ transaction,
1368
+ },
1369
+ 'relocate',
1370
+ false,
1371
+ null
1372
+ );
1373
+ },
1374
+ });
1375
+ }
1376
+
1377
+ /**
1378
+ * Record the relocation of an entry (when a record is moved to a different node), return true if it is now located locally
1379
+ * @param existingEntry
1380
+ * @param entry
1381
+ */
1382
+ static _recordRelocate(existingEntry, entry): boolean {
1383
+ if (this.getResidencyById) return false; // we don't want to relocate entries that are located by id
1384
+ const context = {
1385
+ previousResidency: this.getResidencyRecord(existingEntry.residencyId),
1386
+ isRelocation: true,
1387
+ };
1388
+ const residency = residencyFromFunction(this.getResidency(entry.value, context));
1389
+ let residencyId: number;
1390
+ if (residency) {
1391
+ if (!residency.includes(server.hostname)) return false; // if we aren't in the residency, we don't need to do anything, we are not responsible for storing this record
1392
+ residencyId = getResidencyId(residency);
1393
+ }
1394
+ const metadata = 0;
1395
+ logger.debug?.('Performing a relocate of an entry', existingEntry.key, entry.value, residency);
1396
+ updateRecord(
1397
+ existingEntry.key,
1398
+ entry.value, // store the record we downloaded
1399
+ existingEntry,
1400
+ existingEntry.version, // version number should not change
1401
+ metadata,
1402
+ true,
1403
+ { residencyId, expiresAt: entry.expiresAt, transaction: txnForContext(context).transaction },
1404
+ 'relocate',
1405
+ false,
1406
+ null // the audit record value should be empty since there are no changes to the actual data
1407
+ );
1408
+ return true;
1409
+ }
1410
+ /**
1411
+ * Evicting a record will remove it from a caching table. This is not considered a canonical data change, and it is assumed that retrieving this record from the source will still yield the same record, this is only removing the local copy of the record.
1412
+ */
1413
+ static evict(id, existingRecord, existingVersion) {
1414
+ let entry;
1415
+ if (hasSourceGet || audit) {
1416
+ if (!existingRecord) return;
1417
+ entry = primaryStore.getEntry(id);
1418
+ if (!entry || !existingRecord) return;
1419
+ if (entry.version !== existingVersion) return;
1420
+ }
1421
+ if (hasSourceGet) {
1422
+ // if there is a resolution in-progress, abandon the eviction
1423
+ if (primaryStore.hasLock(id, entry.version)) return;
1424
+ // if there is a source, we are not "deleting" the record, just removing our local copy, but preserving what we need for indexing
1425
+ let partialRecord;
1426
+ for (const name in indices) {
1427
+ // if there are any indices, we need to preserve a partial evicted record to ensure we can still do searches
1428
+ if (!partialRecord) partialRecord = {};
1429
+ partialRecord[name] = existingRecord[name];
1430
+ }
1431
+ // if we are evicting and not deleting, need to preserve the partial record
1432
+ if (partialRecord) {
1433
+ // treat this as a record resolution (so previous version is checked) with no audit record
1434
+ return updateRecord(id, partialRecord, entry, existingVersion, EVICTED, null, null, null, true);
1435
+ }
1436
+ }
1437
+ primaryStore.ifVersion?.(id, existingVersion, () => {
1438
+ updateIndices(id, existingRecord, null);
1439
+ });
1440
+ // evictions never go in the audit log, so we can not record a deletion entry for the eviction
1441
+ // as there is no corresponding audit entry and it would never get cleaned up. So we must simply
1442
+ // removed the entry entirely
1443
+ return removeEntry(primaryStore, entry ?? primaryStore.getEntry(id), existingVersion);
1444
+ }
1445
+ /**
1446
+ * This is intended to acquire a lock on a record from the whole cluster.
1447
+ */
1448
+ lock() {
1449
+ throw new Error('Not yet implemented');
1450
+ }
1451
+ static operation(operation, context) {
1452
+ operation.table ||= tableName;
1453
+ operation.schema ||= databaseName;
1454
+ return global.operation(operation, context);
1455
+ }
1456
+
1457
+ /**
1458
+ * Store the provided record data into the current resource. This is not written
1459
+ * until the corresponding transaction is committed.
1460
+ */
1461
+ put(
1462
+ target: RequestTarget,
1463
+ record: Record & RecordObject
1464
+ ): void | (Record & Partial<RecordObject>) | Promise<void | (Record & Partial<RecordObject>)> {
1465
+ if (record === undefined || record instanceof URLSearchParams) {
1466
+ // legacy argument position, shift the arguments and go through the update method for back-compat
1467
+ this.update(target, true);
1468
+ return this.save();
1469
+ } else {
1470
+ let allowed = true;
1471
+ if (target == undefined) throw new TypeError('Can not put a record without a target');
1472
+ const context = this.getContext();
1473
+ if (target.checkPermission) {
1474
+ // requesting authorization verification
1475
+ allowed = this.allowUpdate(context.user, record, context);
1476
+ }
1477
+ return when(allowed, (allowed) => {
1478
+ if (!allowed) {
1479
+ throw new AccessViolation(context.user);
1480
+ }
1481
+ // standard path, handle arrays as multiple updates, and otherwise do a direct update
1482
+ if (Array.isArray(record)) {
1483
+ return Promise.all(
1484
+ record.map((element) => {
1485
+ const id = element[primaryKey];
1486
+ this._writeUpdate(id, element, true);
1487
+ return this.save();
1488
+ })
1489
+ );
1490
+ } else {
1491
+ const id = requestTargetToId(target);
1492
+ this._writeUpdate(id, record, true);
1493
+ return this.save();
1494
+ }
1495
+ });
1496
+ }
1497
+ // always return undefined
1498
+ }
1499
+
1500
+ create(
1501
+ target: RequestTarget,
1502
+ record: Partial<Record & RecordObject>
1503
+ ): void | (Record & Partial<RecordObject>) | Promise<Record & Partial<RecordObject>> {
1504
+ let allowed = true;
1505
+ const context = this.getContext();
1506
+ if (!record && !(target instanceof URLSearchParams)) {
1507
+ // single argument, shift arguments
1508
+ record = target;
1509
+ target = undefined;
1510
+ }
1511
+ if (!record || typeof record !== 'object' || Array.isArray(record)) {
1512
+ throw new TypeError('Can not create a record without an object');
1513
+ }
1514
+ if (target?.checkPermission) {
1515
+ // requesting authorization verification
1516
+ allowed = this.allowCreate(context.user, record, context);
1517
+ }
1518
+ return when(allowed, (allowed) => {
1519
+ if (!allowed) {
1520
+ throw new AccessViolation(context.user);
1521
+ }
1522
+ let id = requestTargetToId(target) ?? record[primaryKey];
1523
+ if (id === undefined) {
1524
+ id = this.constructor.getNewId();
1525
+ record[primaryKey] = id; // make this immediately available
1526
+ } else {
1527
+ const existing = primaryStore.getSync(id);
1528
+ if (existing) {
1529
+ throw new ClientError('Record already exists', 409);
1530
+ }
1531
+ }
1532
+ this._writeUpdate(id, record, true);
1533
+ return record;
1534
+ });
1535
+ }
1536
+
1537
+ // @ts-expect-error The implementation handles the possibility of target and recordUpdate being swapped
1538
+ patch(
1539
+ target: RequestTarget,
1540
+ recordUpdate: Partial<Record & RecordObject>
1541
+ ): void | (Record & Partial<RecordObject>) | Promise<void | (Record & Partial<RecordObject>)> {
1542
+ if (recordUpdate === undefined || recordUpdate instanceof URLSearchParams) {
1543
+ // legacy argument position, shift the arguments and go through the update method for back-compat
1544
+ this.update(target, false);
1545
+ return this.save();
1546
+ } else {
1547
+ // standard path, ensure there is no return object
1548
+ return when(this.update(target, recordUpdate), () => {
1549
+ return when(this.save(), () => undefined); // wait for the update and save, but return undefined
1550
+ });
1551
+ }
1552
+ }
1553
+ // perform the actual write operation; this may come from a user request to write (put, post, etc.), or
1554
+ // a notification that a write has already occurred in the canonical data source, we need to update our
1555
+ // local copy
1556
+ _writeUpdate(id: Id, recordUpdate: any, fullUpdate: boolean, options?: any) {
1557
+ const context = this.getContext();
1558
+ const transaction = txnForContext(context);
1559
+
1560
+ checkValidId(id);
1561
+ const entry = this.#entry ?? primaryStore.getEntry(id, { transaction: transaction.getReadTxn() });
1562
+ const writeToSource = () => {
1563
+ if (!this.constructor.source || context?.source) return;
1564
+ if (fullUpdate) {
1565
+ // full update is a put
1566
+ if (this.constructor.source.put) {
1567
+ return () => this.constructor.source.put(id, recordUpdate, context);
1568
+ }
1569
+ } else {
1570
+ // incremental update
1571
+ if (this.constructor.source.patch) {
1572
+ return () => this.constructor.source.patch(id, recordUpdate, context);
1573
+ } else if (this.constructor.source.put) {
1574
+ // if this is incremental, but only have put, we can use that by generating the full record (at least the expected one)
1575
+ return () => this.constructor.source.put(id, updateAndFreeze(this), context);
1576
+ }
1577
+ }
1578
+ };
1579
+
1580
+ const write = {
1581
+ key: id,
1582
+ store: primaryStore,
1583
+ entry,
1584
+ nodeName: context?.nodeName,
1585
+ fullUpdate,
1586
+ deferSave: true,
1587
+ validate: (txnTime) => {
1588
+ if (!recordUpdate) recordUpdate = this.#changes;
1589
+ if (fullUpdate || (recordUpdate && hasChanges(this.#changes === recordUpdate ? this : recordUpdate))) {
1590
+ if (!context?.source) {
1591
+ transaction.checkOverloaded();
1592
+ this.validate(recordUpdate, !fullUpdate);
1593
+ if (updatedTimeProperty) {
1594
+ recordUpdate[updatedTimeProperty.name] =
1595
+ updatedTimeProperty.type === 'Date'
1596
+ ? new Date(txnTime)
1597
+ : updatedTimeProperty.type === 'String'
1598
+ ? new Date(txnTime).toISOString()
1599
+ : txnTime;
1600
+ }
1601
+ if (createdTimeProperty) {
1602
+ if (entry?.value) {
1603
+ if (fullUpdate || recordUpdate[createdTimeProperty.name]) {
1604
+ // make sure to retain original created time
1605
+ recordUpdate[createdTimeProperty.name] = entry?.value[createdTimeProperty.name];
1606
+ }
1607
+ } else {
1608
+ // new entry, set created time
1609
+ recordUpdate[createdTimeProperty.name] =
1610
+ createdTimeProperty.type === 'Date'
1611
+ ? new Date(txnTime)
1612
+ : createdTimeProperty.type === 'String'
1613
+ ? new Date(txnTime).toISOString()
1614
+ : txnTime;
1615
+ }
1616
+ }
1617
+ if (primaryKey && recordUpdate[primaryKey] !== id && (fullUpdate || primaryKey in recordUpdate)) {
1618
+ // ensure that the primary key is correct, if there is supposed to be one
1619
+ recordUpdate[primaryKey] = id;
1620
+ }
1621
+ if (fullUpdate) {
1622
+ recordUpdate = updateAndFreeze(recordUpdate); // this flatten and freeze the record
1623
+ }
1624
+ // TODO: else freeze after we have applied the changes
1625
+ }
1626
+ } else {
1627
+ transaction.removeWrite?.(write);
1628
+ return false;
1629
+ }
1630
+ },
1631
+ before: writeToSource(),
1632
+ beforeIntermediate: preCommitBlobsForRecordBefore(recordUpdate),
1633
+ commit: (txnTime: number, existingEntry: Entry, retry: boolean, transaction: any) => {
1634
+ if (retry) {
1635
+ if (context && existingEntry?.version > (context.lastModified || 0))
1636
+ context.lastModified = existingEntry.version;
1637
+ this.#entry = existingEntry;
1638
+ if (existingEntry?.value && existingEntry.value.getRecord)
1639
+ throw new Error('Can not assign a record to a record, check for circular references');
1640
+ if (!fullUpdate) this.#record = existingEntry?.value ?? null;
1641
+ }
1642
+ this.#changes = undefined; // once we are committing to write this update, we no longer should track the changes, and want to avoid double application (of any CRDTs)
1643
+ this.#version = txnTime;
1644
+ const existingRecord = existingEntry?.value;
1645
+ let incrementalUpdateToApply: boolean;
1646
+
1647
+ this.#savingOperation = null;
1648
+ let omitLocalRecord = false;
1649
+ // we use optimistic locking to only commit if the existing record state still holds true.
1650
+ // this is superior to using an async transaction since it doesn't require JS execution
1651
+ // during the write transaction.
1652
+ let precedesExisting = precedesExistingVersion(txnTime, existingEntry, options?.nodeId);
1653
+ let auditRecordToStore: any; // what to store in the audit record. For a full update, this can be left undefined in which case it is the same as full record update and optimized to use a binary copy
1654
+ const type = fullUpdate ? 'put' : 'patch';
1655
+ let residencyId: number | undefined;
1656
+ if (options?.residencyId != undefined) residencyId = options.residencyId;
1657
+ const expiresAt = context?.expiresAt ?? (expirationMs ? expirationMs + Date.now() : -1);
1658
+ let additionalAuditRefs: Array<{ version: number; nodeId: number }> = []; // track additional audit refs to store
1659
+
1660
+ if (precedesExisting <= 0) {
1661
+ // This block is to handle the case of saving an update where the transaction timestamp is older than the
1662
+ // existing timestamp, which means that we received updates out of order, and must resequence the application
1663
+ // of the updates to the record to ensure consistency across the cluster
1664
+ // TODO: can the previous version be older, but even more previous version be newer?
1665
+ if (audit) {
1666
+ // incremental CRDT updates are only available with audit logging on
1667
+ let localTime = existingEntry.localTime;
1668
+ let auditedVersion = existingEntry.version;
1669
+ logger.debug?.(
1670
+ 'Applying CRDT update to record with id: ',
1671
+ id,
1672
+ 'txn time',
1673
+ new Date(txnTime),
1674
+ 'applying later update from:',
1675
+ new Date(auditedVersion),
1676
+ 'local recorded time',
1677
+ new Date(localTime)
1678
+ );
1679
+
1680
+ let nodeId = existingEntry.nodeId;
1681
+ const succeedingUpdates = []; // record the "future" updates, as we need to apply the updates in reverse order
1682
+ const auditRefsToVisit: Array<{ localTime: number; nodeId: number }> = existingEntry.additionalAuditRefs
1683
+ ? existingEntry.additionalAuditRefs.map((ref) => ({ localTime: ref.version, nodeId: ref.nodeId }))
1684
+ : [];
1685
+
1686
+ // Collect any existing audit refs that should be preserved (those older than current transaction)
1687
+ if (existingEntry.additionalAuditRefs) {
1688
+ for (const ref of existingEntry.additionalAuditRefs) {
1689
+ if (ref.version <= txnTime) {
1690
+ additionalAuditRefs.push(ref);
1691
+ }
1692
+ }
1693
+ }
1694
+ let addedAuditRef = false;
1695
+ let nextRef: { localTime: number; nodeId: number };
1696
+ do {
1697
+ while (localTime > txnTime || (auditedVersion >= txnTime && localTime > 0)) {
1698
+ const auditRecord = auditStore.get(localTime, tableId, id, nodeId);
1699
+ if (!auditRecord) break;
1700
+ auditedVersion = auditRecord.version;
1701
+ if (auditedVersion >= txnTime) {
1702
+ if (auditedVersion === txnTime) {
1703
+ precedesExisting = precedesExistingVersion(
1704
+ txnTime,
1705
+ { version: auditedVersion, localTime: localTime, key: id, nodeId: auditRecord.nodeId },
1706
+ options?.nodeId
1707
+ );
1708
+ if (precedesExisting === 0) {
1709
+ logger.debug?.(
1710
+ 'The transaction time is equal to the existing version, treating as duplicate',
1711
+ id
1712
+ );
1713
+ return; // treat a tie as a duplicate and drop it
1714
+ }
1715
+ if (precedesExisting > 0) {
1716
+ // if the existing version is older, we can skip this update
1717
+ localTime = auditRecord.previousVersion;
1718
+ nodeId = auditRecord.previousNodeId;
1719
+ continue;
1720
+ }
1721
+ }
1722
+ if (auditRecord.type === 'patch') {
1723
+ logger.debug?.('out of order patch will be applied', id, auditRecord);
1724
+ // record patches so we can reply in order
1725
+ succeedingUpdates.push(auditRecord);
1726
+ auditRecordToStore = recordUpdate; // use the original update for the audit record
1727
+ } else if (auditRecord.type === 'put' || auditRecord.type === 'delete') {
1728
+ // There is newer full record update, so this incremental update is completely superseded
1729
+ return;
1730
+ }
1731
+ }
1732
+ if (!addedAuditRef && isRocksDB) {
1733
+ addedAuditRef = true;
1734
+ // Add a reference to this older audit record if we had out-of-order writes
1735
+ additionalAuditRefs.push({ version: txnTime, nodeId: options?.nodeId });
1736
+ logger.debug?.('Adding additional audit ref for out-of-order write', {
1737
+ version: txnTime,
1738
+ nodeId: options?.nodeId,
1739
+ });
1740
+ }
1741
+ // Collect any additional audit refs from this audit record to traverse other branches
1742
+ if (auditRecord.previousAdditionalAuditRefs) {
1743
+ for (const ref of auditRecord.previousAdditionalAuditRefs) {
1744
+ auditRefsToVisit.push({ localTime: ref.version, nodeId: ref.nodeId });
1745
+ logger.debug?.('Adding audit ref from audit record to visit queue', {
1746
+ version: ref.version,
1747
+ nodeId: ref.nodeId,
1748
+ });
1749
+ }
1750
+ }
1751
+
1752
+ localTime = auditRecord.previousVersion;
1753
+ nodeId = auditRecord.previousNodeId;
1754
+ }
1755
+ // Check if we need to scan additional audit refs from this record
1756
+ nextRef = auditRefsToVisit.shift();
1757
+ if (nextRef) {
1758
+ localTime = auditedVersion = nextRef.localTime;
1759
+ nodeId = nextRef.nodeId;
1760
+ logger.debug?.('Following additional audit ref to continue scanning', { localTime, nodeId });
1761
+ }
1762
+ } while (nextRef);
1763
+ if (!localTime) {
1764
+ // if we reached the end of the audit trail, we can just apply the update
1765
+ logger.debug?.(
1766
+ 'No further audit history, applying incremental updates based on available history',
1767
+ id,
1768
+ 'existing version preserved',
1769
+ existingEntry
1770
+ );
1771
+ }
1772
+ succeedingUpdates.sort((a, b) => a.version - b.version); // order the patches
1773
+ for (const auditRecord of succeedingUpdates) {
1774
+ const newerUpdate = auditRecord.getValue(primaryStore);
1775
+ logger.debug?.(
1776
+ 'Rebuilding update with future patch:',
1777
+ new Date(auditRecord.version),
1778
+ newerUpdate,
1779
+ auditRecord
1780
+ );
1781
+ incrementalUpdateToApply = rebuildUpdateBefore(
1782
+ incrementalUpdateToApply ?? recordUpdate,
1783
+ newerUpdate,
1784
+ fullUpdate
1785
+ );
1786
+ if (!incrementalUpdateToApply) return writeCommit(false); // if all changes are overwritten, nothing left to do
1787
+ }
1788
+ } else if (fullUpdate) {
1789
+ // if no audit, we can't accurately do incremental updates, so we just assume the last update
1790
+ // was the same type. Assuming a full update this record update loses and there are no changes
1791
+ return writeCommit(false);
1792
+ } else {
1793
+ // no audit, assume updates are overwritten except CRDT operations or properties that didn't exist
1794
+ incrementalUpdateToApply = rebuildUpdateBefore(
1795
+ incrementalUpdateToApply ?? recordUpdate,
1796
+ existingRecord,
1797
+ fullUpdate
1798
+ );
1799
+ logger.debug?.('Rebuilding update without audit:', incrementalUpdateToApply);
1800
+ }
1801
+ logger.trace?.('Rebuilt record to save:', incrementalUpdateToApply, ' is full update:', fullUpdate);
1802
+ }
1803
+ let recordToStore: any;
1804
+ if (fullUpdate && !incrementalUpdateToApply) recordToStore = recordUpdate;
1805
+ else {
1806
+ if (this.constructor.loadAsInstance === false)
1807
+ recordToStore = updateAndFreeze(existingRecord, incrementalUpdateToApply ?? recordUpdate);
1808
+ else {
1809
+ this.#record = existingRecord;
1810
+ recordToStore = updateAndFreeze(this, incrementalUpdateToApply ?? recordUpdate);
1811
+ }
1812
+ }
1813
+ this.#record = recordToStore;
1814
+ if (recordToStore && recordToStore.getRecord)
1815
+ throw new Error('Can not assign a record to a record, check for circular references');
1816
+ if (residencyId == undefined) {
1817
+ if (entry?.residencyId) context.previousResidency = TableResource.getResidencyRecord(entry.residencyId);
1818
+ const residency = residencyFromFunction(TableResource.getResidency(recordToStore, context));
1819
+ if (residency) {
1820
+ if (!residency.includes(server.hostname)) {
1821
+ // if we aren't in the residency list, specify that our local record should be omitted or be partial
1822
+ auditRecordToStore ??= recordToStore;
1823
+ omitLocalRecord = true;
1824
+ if (TableResource.getResidencyById) {
1825
+ // complete omission of the record that doesn't belong here
1826
+ recordToStore = undefined;
1827
+ } else {
1828
+ // store the partial record
1829
+ recordToStore = null;
1830
+ for (const name in indices) {
1831
+ if (!recordToStore) {
1832
+ recordToStore = {};
1833
+ }
1834
+ // if there are any indices, we need to preserve a partial invalidated record to ensure we can still do searches
1835
+ recordToStore[name] = auditRecordToStore[name];
1836
+ }
1837
+ }
1838
+ }
1839
+ }
1840
+ residencyId = getResidencyId(residency);
1841
+ }
1842
+ if (!fullUpdate) {
1843
+ // we use our own data as the basis for the audit record, which will include information about the incremental updates, even if it was overwritten by CRDT resolution
1844
+ auditRecordToStore = recordUpdate;
1845
+ }
1846
+ logger.trace?.(
1847
+ `Saving record with id: ${id}, timestamp: ${new Date(txnTime).toISOString()}${
1848
+ expiresAt ? ', expires at: ' + new Date(expiresAt).toISOString() : ''
1849
+ }${
1850
+ existingEntry?.version
1851
+ ? ', replaces entry from: ' + new Date(existingEntry.version).toISOString()
1852
+ : ', new entry'
1853
+ }`,
1854
+ (() => {
1855
+ try {
1856
+ return JSON.stringify(recordToStore).slice(0, 100);
1857
+ } catch {
1858
+ return '';
1859
+ }
1860
+ })()
1861
+ );
1862
+ updateIndices(id, existingRecord, recordToStore, transaction && { transaction });
1863
+
1864
+ writeCommit(true);
1865
+ if (context.expiresAt) scheduleCleanup();
1866
+ function writeCommit(storeRecord: boolean) {
1867
+ // we need to write the commit. if storeRecord then we need to store the record, otherwise we just need to store the audit record
1868
+ updateRecord(
1869
+ id,
1870
+ storeRecord ? recordToStore : undefined,
1871
+ storeRecord ? existingEntry : { ...existingEntry, value: undefined },
1872
+ isRocksDB
1873
+ ? Math.max(txnTime, existingEntry?.version ?? 0) // RocksDB uses a singular version/local time, so it must be most recent
1874
+ : txnTime,
1875
+ omitLocalRecord ? INVALIDATED : 0,
1876
+ audit,
1877
+ {
1878
+ omitLocalRecord,
1879
+ user: context?.user,
1880
+ residencyId,
1881
+ expiresAt,
1882
+ nodeId: options?.nodeId,
1883
+ viaNodeId: options?.viaNodeId,
1884
+ originatingOperation: context?.originatingOperation,
1885
+ transaction,
1886
+ tableToTrack: databaseName === 'system' ? null : options?.replay ? null : tableName, // don't track analytics on system tables
1887
+ additionalAuditRefs: additionalAuditRefs.length > 0 ? additionalAuditRefs : undefined,
1888
+ },
1889
+ type,
1890
+ false,
1891
+ storeRecord ? auditRecordToStore : (auditRecordToStore ?? recordUpdate)
1892
+ );
1893
+ }
1894
+ },
1895
+ };
1896
+ this.#savingOperation = write;
1897
+ return transaction.addWrite(write);
1898
+ }
1899
+
1900
+ async delete(target: RequestTargetOrId): Promise<boolean> {
1901
+ if (isSearchTarget(target)) {
1902
+ target.select = ['$id']; // just get the primary key of each record so we can delete them
1903
+ for await (const entry of this.search(target)) {
1904
+ this._writeDelete(entry.$id);
1905
+ }
1906
+ return true;
1907
+ }
1908
+ if (target) {
1909
+ let allowed = true;
1910
+ const context = this.getContext();
1911
+ if (target.checkPermission) {
1912
+ // requesting authorization verification
1913
+ allowed = this.allowDelete(context.user, target, context);
1914
+ }
1915
+ return when(allowed, (allowed: boolean) => {
1916
+ if (!allowed) {
1917
+ throw new AccessViolation(context.user);
1918
+ }
1919
+ const id = requestTargetToId(target);
1920
+ this._writeDelete(id);
1921
+ return true;
1922
+ });
1923
+ }
1924
+ this._writeDelete(this.getId());
1925
+ return Boolean(this.#record);
1926
+ }
1927
+ _writeDelete(id: Id, options?: any) {
1928
+ const context = this.getContext();
1929
+ const transaction = txnForContext(context);
1930
+ checkValidId(id);
1931
+ const entry = this.#entry ?? primaryStore.getEntry(id, { transaction: transaction.getReadTxn() });
1932
+
1933
+ transaction.addWrite({
1934
+ key: id,
1935
+ store: primaryStore,
1936
+ entry,
1937
+ nodeName: context?.nodeName,
1938
+ before:
1939
+ this.constructor.source?.delete && !context?.source
1940
+ ? this.constructor.source.delete.bind(this.constructor.source, id, undefined, context)
1941
+ : undefined,
1942
+ commit: (txnTime, existingEntry, retry, transaction: any) => {
1943
+ const existingRecord = existingEntry?.value;
1944
+ if (retry) {
1945
+ if (context && existingEntry?.version > (context.lastModified || 0))
1946
+ context.lastModified = existingEntry.version;
1947
+ TableResource._updateResource(this, existingEntry);
1948
+ }
1949
+ if (precedesExistingVersion(txnTime, existingEntry, options?.nodeId) <= 0) return; // a newer record exists locally
1950
+ updateIndices(id, existingRecord);
1951
+ logger.trace?.(`Deleting record with id: ${id}, txn timestamp: ${new Date(txnTime).toISOString()}`);
1952
+ if (audit || trackDeletes) {
1953
+ updateRecord(
1954
+ id,
1955
+ null,
1956
+ existingEntry,
1957
+ txnTime,
1958
+ 0,
1959
+ audit,
1960
+ {
1961
+ user: context?.user,
1962
+ nodeId: options?.nodeId,
1963
+ viaNodeId: options?.viaNodeId,
1964
+ transaction,
1965
+ tableToTrack: tableName,
1966
+ },
1967
+ 'delete'
1968
+ );
1969
+ if (!audit || isRocksDB) scheduleCleanup();
1970
+ } else {
1971
+ removeEntry(primaryStore, existingEntry);
1972
+ }
1973
+ },
1974
+ });
1975
+ return true;
1976
+ }
1977
+
1978
+ search(target: RequestTarget): AsyncIterable<Record & Partial<RecordObject>> {
1979
+ const context = this.getContext();
1980
+ const txn = txnForContext(context);
1981
+ if (!target) throw new Error('No query provided');
1982
+ if (target.parseError) throw target.parseError; // if there was a parse error, we can throw it now
1983
+ if (target.checkPermission) {
1984
+ // requesting authorization verification
1985
+ const allowed = this.allowRead(context.user, target, context);
1986
+ if (!allowed) {
1987
+ throw new AccessViolation(context.user);
1988
+ }
1989
+ }
1990
+ if (context) context.lastModified = UNCACHEABLE_TIMESTAMP;
1991
+
1992
+ let conditions = target.conditions;
1993
+ if (!conditions) conditions = Array.isArray(target) ? target : target[Symbol.iterator] ? Array.from(target) : [];
1994
+ else if (conditions.length === undefined) {
1995
+ conditions = conditions[Symbol.iterator] ? Array.from(conditions) : [conditions];
1996
+ }
1997
+ const id = target.id ?? this.getId();
1998
+ if (id) {
1999
+ conditions = [
2000
+ {
2001
+ attribute: null,
2002
+ comparator: Array.isArray(id) ? 'prefix' : 'starts_with',
2003
+ value: id,
2004
+ },
2005
+ ].concat(conditions);
2006
+ }
2007
+ let orderAlignedCondition;
2008
+ const filtered = {};
2009
+
2010
+ function prepareConditions(conditions: Condition[], operator: string) {
2011
+ // some validation:
2012
+ switch (operator) {
2013
+ case 'and':
2014
+ case undefined:
2015
+ if (conditions.length < 1) throw new Error('An "and" operator requires at least one condition');
2016
+ break;
2017
+ case 'or':
2018
+ if (conditions.length < 2) throw new Error('An "or" operator requires at least two conditions');
2019
+ break;
2020
+ default:
2021
+ throw new Error('Invalid operator ' + operator);
2022
+ }
2023
+ for (const condition of conditions) {
2024
+ if (condition.conditions) {
2025
+ condition.conditions = prepareConditions(condition.conditions, condition.operator);
2026
+ continue;
2027
+ }
2028
+ const attribute_name = condition[0] ?? condition.attribute;
2029
+ const attribute = attribute_name == null ? primaryKeyAttribute : findAttribute(attributes, attribute_name);
2030
+ if (!attribute) {
2031
+ if (attribute_name != null && !target.allowConditionsOnDynamicAttributes)
2032
+ throw handleHDBError(new Error(), `${attribute_name} is not a defined attribute`, 404);
2033
+ } else if (attribute.type || COERCIBLE_OPERATORS[condition.comparator]) {
2034
+ // Do auto-coercion or coercion as required by the attribute type
2035
+ if (condition[1] === undefined) condition.value = coerceTypedValues(condition.value, attribute);
2036
+ else condition[1] = coerceTypedValues(condition[1], attribute);
2037
+ }
2038
+ if (condition.chainedConditions) {
2039
+ if (condition.chainedConditions.length === 1 && (!condition.operator || condition.operator == 'and')) {
2040
+ const chained = condition.chainedConditions[0];
2041
+ let upper: any, lower: any;
2042
+ if (
2043
+ chained.comparator === 'gt' ||
2044
+ chained.comparator === 'greater_than' ||
2045
+ chained.comparator === 'ge' ||
2046
+ chained.comparator === 'greater_than_equal'
2047
+ ) {
2048
+ upper = condition;
2049
+ lower = chained;
2050
+ } else {
2051
+ upper = chained;
2052
+ lower = condition;
2053
+ }
2054
+ if (
2055
+ upper.comparator !== 'lt' &&
2056
+ upper.comparator !== 'less_than' &&
2057
+ upper.comparator !== 'le' &&
2058
+ upper.comparator !== 'less_than_equal'
2059
+ ) {
2060
+ throw new Error(
2061
+ 'Invalid chained condition, only less than and greater than conditions can be chained together'
2062
+ );
2063
+ }
2064
+ const isGe = lower.comparator === 'ge' || lower.comparator === 'greater_than_equal';
2065
+ const isLe = upper.comparator === 'le' || upper.comparator === 'less_than_equal';
2066
+ condition.comparator = (isGe ? 'ge' : 'gt') + (isLe ? 'le' : 'lt');
2067
+ condition.value = [lower.value, upper.value];
2068
+ } else throw new Error('Multiple chained conditions are not currently supported');
2069
+ }
2070
+ }
2071
+ return conditions;
2072
+ }
2073
+ function orderConditions(conditions: Condition[], operator: string) {
2074
+ if (target.enforceExecutionOrder) return conditions; // don't rearrange conditions
2075
+ for (const condition of conditions) {
2076
+ if (condition.conditions) condition.conditions = orderConditions(condition.conditions, condition.operator);
2077
+ }
2078
+ // Sort the query by narrowest to broadest, so we can use the fastest index as possible with minimal filtering.
2079
+ // Note, that we do allow users to disable condition re-ordering, in case they have knowledge of a preferred
2080
+ // order for their query.
2081
+ if (conditions.length > 1 && operator !== 'or') return sortBy(conditions, estimateCondition(TableResource));
2082
+ else return conditions;
2083
+ }
2084
+ function coerceTypedValues(value: any, attribute: Attribute) {
2085
+ if (Array.isArray(value)) {
2086
+ return value.map((value) => coerceType(value, attribute));
2087
+ }
2088
+ return coerceType(value, attribute);
2089
+ }
2090
+ const operator = target.operator;
2091
+ if (conditions.length > 0 || operator) conditions = prepareConditions(conditions, operator);
2092
+ const sort = typeof target.sort === 'object' && target.sort;
2093
+ let postOrdering;
2094
+ if (sort) {
2095
+ // TODO: Support index-assisted sorts of unions, which will require potentially recursively adding/modifying an order aligned condition and be able to recursively undo it if necessary
2096
+ if (operator !== 'or') {
2097
+ const attribute_name = sort.attribute;
2098
+ if (attribute_name == undefined) throw new ClientError('Sort requires an attribute');
2099
+ orderAlignedCondition = conditions.find(
2100
+ (condition) => flattenKey(condition.attribute) === flattenKey(attribute_name)
2101
+ );
2102
+ if (orderAlignedCondition) {
2103
+ // if there is a condition on the same attribute as the first sort, we can use it to align the sort
2104
+ // and avoid a sort operation
2105
+ } else {
2106
+ const attribute = findAttribute(attributes, attribute_name);
2107
+ if (!attribute)
2108
+ throw handleHDBError(
2109
+ new Error(),
2110
+ `${
2111
+ Array.isArray(attribute_name) ? attribute_name.join('.') : attribute_name
2112
+ } is not a defined attribute`,
2113
+ 404
2114
+ );
2115
+ if (attribute.indexed) {
2116
+ // if it is indexed, we add a pseudo-condition to align with the natural sort order of the index
2117
+ orderAlignedCondition = { ...sort, comparator: 'sort' };
2118
+ conditions.push(orderAlignedCondition);
2119
+ } else if (conditions.length === 0 && !target.allowFullScan)
2120
+ throw handleHDBError(
2121
+ new Error(),
2122
+ `${
2123
+ Array.isArray(attribute_name) ? attribute_name.join('.') : attribute_name
2124
+ } is not indexed and not combined with any other conditions`,
2125
+ 404
2126
+ );
2127
+ }
2128
+ if (orderAlignedCondition) orderAlignedCondition.descending = Boolean(sort.descending);
2129
+ }
2130
+ }
2131
+ conditions = orderConditions(conditions, operator);
2132
+ if (sort) {
2133
+ if (orderAlignedCondition && conditions[0] === orderAlignedCondition) {
2134
+ // The db index is providing the order for the first sort, may need post ordering next sort order
2135
+ if (sort.next) {
2136
+ postOrdering = {
2137
+ dbOrderedAttribute: sort.attribute,
2138
+ attribute: sort.next.attribute,
2139
+ descending: sort.next.descending,
2140
+ next: sort.next.next,
2141
+ };
2142
+ }
2143
+ } else {
2144
+ // if we had to add an aligned condition that isn't first, we remove it and do ordering later
2145
+ if (orderAlignedCondition) conditions.splice(conditions.indexOf(orderAlignedCondition), 1);
2146
+ postOrdering = sort;
2147
+ }
2148
+ }
2149
+ const select = target.select;
2150
+ if (conditions.length === 0) {
2151
+ conditions = [{ attribute: primaryKey, comparator: 'greater_than', value: true }];
2152
+ }
2153
+ if (target.explain) {
2154
+ return {
2155
+ conditions,
2156
+ operator,
2157
+ postOrdering,
2158
+ selectApplied: Boolean(select),
2159
+ };
2160
+ }
2161
+ // we mark the read transaction as in use (necessary for a stable read
2162
+ // transaction, and we really don't care if the
2163
+ // counts are done in the same read transaction because they are just estimates) until the search
2164
+ // results have been iterated and finished.
2165
+ const readTxn = txn.useReadTxn();
2166
+ const entries = executeConditions(
2167
+ conditions,
2168
+ operator,
2169
+ TableResource,
2170
+ readTxn,
2171
+ target,
2172
+ context,
2173
+ (results: any[], filters: Function[]) => transformToEntries(results, select, context, readTxn, filters),
2174
+ filtered
2175
+ );
2176
+ const ensure_loaded = target.ensureLoaded !== false;
2177
+ const transformToRecord = TableResource.transformEntryForSelect(
2178
+ select,
2179
+ context,
2180
+ readTxn,
2181
+ filtered,
2182
+ ensure_loaded,
2183
+ true
2184
+ );
2185
+ let results = TableResource.transformToOrderedSelect(
2186
+ entries,
2187
+ select,
2188
+ postOrdering,
2189
+ context,
2190
+ readTxn,
2191
+ transformToRecord
2192
+ );
2193
+ // apply any offset/limit after all the sorting and filtering
2194
+ if (target.offset || target.limit !== undefined)
2195
+ results = results.slice(
2196
+ target.offset,
2197
+ target.limit !== undefined ? (target.offset || 0) + target.limit : undefined
2198
+ );
2199
+ results.onDone = () => {
2200
+ results.onDone = null; // ensure that it isn't called twice
2201
+ txn.doneReadTxn();
2202
+ };
2203
+ results.selectApplied = true;
2204
+ results.getColumns = () => {
2205
+ if (select) {
2206
+ const columns = [];
2207
+ for (const column of select) {
2208
+ if (column === '*') columns.push(...attributes.map((attribute) => attribute.name));
2209
+ else columns.push(column.name || column);
2210
+ }
2211
+ return columns;
2212
+ }
2213
+ return attributes
2214
+ .filter((attribute) => !attribute.computed && !attribute.relationship)
2215
+ .map((attribute) => attribute.name);
2216
+ };
2217
+ return results;
2218
+ }
2219
+ /**
2220
+ * This is responsible for ordering and select()ing the attributes/properties from returned entries
2221
+ * @param select
2222
+ * @param context
2223
+ * @param filtered
2224
+ * @param ensure_loaded
2225
+ * @param canSkip
2226
+ * @returns
2227
+ */
2228
+ static transformToOrderedSelect(
2229
+ entries: any[],
2230
+ select: (string | SubSelect)[],
2231
+ sort: Sort,
2232
+ context: Context,
2233
+ readTxn: any,
2234
+ transformToRecord: Function
2235
+ ) {
2236
+ let results = new ExtendedIterable();
2237
+ if (sort) {
2238
+ // there might be some situations where we don't need to transform to entries for sorting, not sure
2239
+ entries = transformToEntries(entries, select, context, readTxn, null);
2240
+ let ordered;
2241
+ // if we are doing post-ordering, we need to get records first, then sort them
2242
+ results.iterate = function () {
2243
+ let sortedArrayIterator: IterableIterator<any>;
2244
+ const dbIterator = entries[Symbol.asyncIterator]
2245
+ ? entries[Symbol.asyncIterator]()
2246
+ : entries[Symbol.iterator]();
2247
+ let dbDone: boolean;
2248
+ const dbOrderedAttribute = sort.dbOrderedAttribute;
2249
+ let enqueuedEntryForNextGroup: any;
2250
+ let lastGroupingValue: any;
2251
+ let firstEntry = true;
2252
+ function createComparator(order: Sort) {
2253
+ const nextComparator = order.next && createComparator(order.next);
2254
+ const descending = order.descending;
2255
+ context.sort = order; // make sure this is set to the current sort order
2256
+ return (entryA, entryB) => {
2257
+ const a = getAttributeValue(entryA, order.attribute, context);
2258
+ const b = getAttributeValue(entryB, order.attribute, context);
2259
+ const diff = descending ? compareKeys(b, a) : compareKeys(a, b);
2260
+ if (diff === 0) return nextComparator?.(entryA, entryB) || 0;
2261
+ return diff;
2262
+ };
2263
+ }
2264
+ const comparator = createComparator(sort);
2265
+ return {
2266
+ async next() {
2267
+ let iteration: IteratorResult<any>;
2268
+ if (sortedArrayIterator) {
2269
+ iteration = sortedArrayIterator.next();
2270
+ if (iteration.done) {
2271
+ if (dbDone) {
2272
+ if (results.onDone) results.onDone();
2273
+ return iteration;
2274
+ }
2275
+ } else
2276
+ return {
2277
+ value: await transformToRecord.call(this, iteration.value),
2278
+ };
2279
+ }
2280
+ ordered = [];
2281
+ if (enqueuedEntryForNextGroup) ordered.push(enqueuedEntryForNextGroup);
2282
+ // need to load all the entries into ordered
2283
+ do {
2284
+ iteration = await dbIterator.next();
2285
+ if (iteration.done) {
2286
+ dbDone = true;
2287
+ if (!ordered.length) {
2288
+ if (results.onDone) results.onDone();
2289
+ return iteration;
2290
+ } else break;
2291
+ } else {
2292
+ let entry = iteration.value;
2293
+ if (entry?.then) entry = await entry;
2294
+ // if the index has already provided the first order of sorting, we only need to sort
2295
+ // within each grouping
2296
+ if (dbOrderedAttribute) {
2297
+ const groupingValue = getAttributeValue(entry, dbOrderedAttribute, context);
2298
+ if (firstEntry) {
2299
+ firstEntry = false;
2300
+ lastGroupingValue = groupingValue;
2301
+ } else if (groupingValue !== lastGroupingValue) {
2302
+ lastGroupingValue = groupingValue;
2303
+ enqueuedEntryForNextGroup = entry;
2304
+ break;
2305
+ }
2306
+ }
2307
+ // we store the value we will sort on, for fast sorting, and the entry so the records can be GC'ed if necessary
2308
+ // before the sorting is completed
2309
+ ordered.push(entry);
2310
+ }
2311
+ } while (true);
2312
+ if (sort.isGrouped) {
2313
+ // TODO: Return grouped results
2314
+ }
2315
+ ordered.sort(comparator);
2316
+ sortedArrayIterator = ordered[Symbol.iterator]();
2317
+ iteration = sortedArrayIterator.next();
2318
+ if (!iteration.done)
2319
+ return {
2320
+ value: await transformToRecord.call(this, iteration.value),
2321
+ };
2322
+ if (results.onDone) results.onDone();
2323
+ return iteration;
2324
+ },
2325
+ return() {
2326
+ if (results.onDone) results.onDone();
2327
+ return dbIterator.return();
2328
+ },
2329
+ throw() {
2330
+ if (results.onDone) results.onDone();
2331
+ return dbIterator.throw();
2332
+ },
2333
+ };
2334
+ };
2335
+ const applySortingOnSelect = (sort) => {
2336
+ if (typeof select === 'object' && Array.isArray(sort.attribute)) {
2337
+ for (let i = 0; i < select.length; i++) {
2338
+ const column = select[i];
2339
+ let columnSort;
2340
+ if (column.name === sort.attribute[0]) {
2341
+ columnSort = column.sort || (column.sort = {});
2342
+ while (columnSort.next) columnSort = columnSort.next;
2343
+ columnSort.attribute = sort.attribute.slice(1);
2344
+ columnSort.descending = sort.descending;
2345
+ } else if (column === sort.attribute[0]) {
2346
+ select[i] = columnSort = {
2347
+ name: column,
2348
+ sort: {
2349
+ attribute: sort.attribute.slice(1),
2350
+ descending: sort.descending,
2351
+ },
2352
+ };
2353
+ }
2354
+ }
2355
+ }
2356
+ if (sort.next) applySortingOnSelect(sort.next);
2357
+ };
2358
+ applySortingOnSelect(sort);
2359
+ } else {
2360
+ results.iterate = (entries[Symbol.asyncIterator] || entries[Symbol.iterator]).bind(entries);
2361
+ results = results.map(function (entry) {
2362
+ try {
2363
+ // because this is a part of a stream of results, we will often be continuing to iterate over the results when there are errors,
2364
+ // but to improve the legibility of the error, we attach the primary key to the error
2365
+ const result = transformToRecord.call(this, entry);
2366
+ // if it is a catchable thenable (promise)
2367
+ if (typeof result?.catch === 'function')
2368
+ return result.catch((error) => {
2369
+ error.partialObject = { [primaryKey]: entry.key };
2370
+ throw error;
2371
+ });
2372
+ return result;
2373
+ } catch (error) {
2374
+ error.partialObject = { [primaryKey]: entry.key };
2375
+ throw error;
2376
+ }
2377
+ });
2378
+ }
2379
+ return results;
2380
+ }
2381
+ /**
2382
+ * This is responsible for select()ing the attributes/properties from returned entries
2383
+ * @param select
2384
+ * @param context
2385
+ * @param filtered
2386
+ * @param ensure_loaded
2387
+ * @param canSkip
2388
+ * @returns
2389
+ */
2390
+ static transformEntryForSelect(select, context, readTxn, filtered, ensure_loaded?, canSkip?) {
2391
+ let checkLoaded;
2392
+ if (
2393
+ ensure_loaded &&
2394
+ hasSourceGet &&
2395
+ // determine if we need to fully loading the records ahead of time, this is why we would not need to load the full record:
2396
+ !(typeof select === 'string' ? [select] : select)?.every((attribute) => {
2397
+ let attribute_name;
2398
+ if (typeof attribute === 'object') {
2399
+ attribute_name = attribute.name;
2400
+ } else attribute_name = attribute;
2401
+ // TODO: Resolvers may not need a full record, either because they are not using the record, or because they are a redirected property
2402
+ return indices[attribute_name] || attribute_name === primaryKey;
2403
+ })
2404
+ ) {
2405
+ checkLoaded = true;
2406
+ }
2407
+ let transformCache;
2408
+ const source = this.source;
2409
+ // Transform an entry to a record. Note that *this* instance is intended to be the iterator.
2410
+ const transform = function (entry: Entry) {
2411
+ let record;
2412
+ if (context?.transaction?.stale) context.transaction.stale = false;
2413
+ if (entry != undefined) {
2414
+ record = entry.deref ? entry.deref() : entry.value;
2415
+ if (entry.metadataFlags & INVALIDATED && context.replicateFrom === false && canSkip && entry.residencyId) {
2416
+ return SKIP;
2417
+ }
2418
+ if (!record && (entry.key === undefined || entry.deref)) {
2419
+ // if the record is not loaded, either due to the entry actually be a key, or the entry's value
2420
+ // being GC'ed, we need to load it now
2421
+ entry = loadLocalRecord(
2422
+ entry.key ?? entry,
2423
+ context,
2424
+ {
2425
+ transaction: readTxn,
2426
+ lazy: select?.length < 4,
2427
+ ensureLoaded: ensure_loaded,
2428
+ },
2429
+ this?.isSync,
2430
+ (entry: Entry) => entry
2431
+ );
2432
+ if (entry?.then) return entry.then(transform.bind(this));
2433
+ record = entry?.value;
2434
+ }
2435
+ if (
2436
+ (checkLoaded && entry?.metadataFlags & (INVALIDATED | EVICTED)) || // invalidated or evicted should go to load from source
2437
+ (entry?.expiresAt != undefined && entry?.expiresAt < Date.now())
2438
+ ) {
2439
+ // should expiration really apply?
2440
+ if (context.onlyIfCached) {
2441
+ return {
2442
+ [primaryKey]: entry.key,
2443
+ message: 'This entry has expired',
2444
+ };
2445
+ }
2446
+ const loadingFromSource = ensureLoadedFromSource(source, entry.key ?? entry, entry, context);
2447
+ if (loadingFromSource?.then) {
2448
+ return loadingFromSource.then(transform);
2449
+ }
2450
+ }
2451
+ }
2452
+ if (record == null) return canSkip ? SKIP : record;
2453
+ if (select && !(select[0] === '*' && select.length === 1)) {
2454
+ let promises: Promise<any>[];
2455
+ const selectAttribute = (attribute, callback) => {
2456
+ let attribute_name;
2457
+ if (typeof attribute === 'object') {
2458
+ attribute_name = attribute.name;
2459
+ } else attribute_name = attribute;
2460
+ const resolver = propertyResolvers?.[attribute_name];
2461
+ let value;
2462
+ if (resolver) {
2463
+ const filterMap = filtered?.[attribute_name];
2464
+ if (filterMap) {
2465
+ if (filterMap.hasMappings) {
2466
+ const key = resolver.from ? record[resolver.from] : flattenKey(entry.key);
2467
+ value = filterMap.get(key);
2468
+ if (!value) value = [];
2469
+ } else {
2470
+ value = filterMap.fromRecord?.(record);
2471
+ }
2472
+ } else {
2473
+ value = resolver(record, context, entry, true);
2474
+ }
2475
+ const handleResolvedValue = (value: any) => {
2476
+ if (resolver.directReturn) return callback(value, attribute_name);
2477
+ if (value && typeof value === 'object') {
2478
+ const targetTable = resolver.definition?.tableClass || TableResource;
2479
+ if (!transformCache) transformCache = {};
2480
+ const transform =
2481
+ transformCache[attribute_name] ||
2482
+ (transformCache[attribute_name] = targetTable.transformEntryForSelect(
2483
+ // if it is a simple string, there is no select for the next level,
2484
+ // otherwise pass along the nested selected
2485
+ attribute_name === attribute
2486
+ ? null
2487
+ : attribute.select || (Array.isArray(attribute) ? attribute : null),
2488
+ context,
2489
+ readTxn,
2490
+ filterMap,
2491
+ ensure_loaded
2492
+ ));
2493
+ if (Array.isArray(value)) {
2494
+ const results = [];
2495
+ const iterator = targetTable
2496
+ .transformToOrderedSelect(
2497
+ value,
2498
+ attribute.select,
2499
+ typeof attribute.sort === 'object' && attribute.sort,
2500
+ context,
2501
+ readTxn,
2502
+ transform
2503
+ )
2504
+ [this.isSync ? Symbol.iterator : Symbol.asyncIterator]();
2505
+ const nextValue = (iteration: IteratorResult<any> & Promise<any>) => {
2506
+ while (!iteration.done) {
2507
+ if (iteration?.then) return iteration.then(nextValue);
2508
+ results.push(iteration.value);
2509
+ iteration = iterator.next();
2510
+ }
2511
+ callback(results, attribute_name);
2512
+ };
2513
+ const promised = nextValue(iterator.next());
2514
+ if (promised) {
2515
+ if (!promises) promises = [];
2516
+ promises.push(promised);
2517
+ }
2518
+ return;
2519
+ } else {
2520
+ value = transform.call(this, value);
2521
+ if (value?.then) {
2522
+ if (!promises) promises = [];
2523
+ promises.push(value.then((value: any) => callback(value, attribute_name)));
2524
+ return;
2525
+ }
2526
+ }
2527
+ }
2528
+ callback(value, attribute_name);
2529
+ };
2530
+ if (value?.then) {
2531
+ if (!promises) promises = [];
2532
+ promises.push(value.then(handleResolvedValue));
2533
+ } else handleResolvedValue(value);
2534
+ return;
2535
+ } else {
2536
+ value = record[attribute_name];
2537
+ if (value && typeof value === 'object' && attribute_name !== attribute) {
2538
+ value = TableResource.transformEntryForSelect(
2539
+ attribute.select || attribute,
2540
+ context,
2541
+ readTxn,
2542
+ null
2543
+ )({ value });
2544
+ }
2545
+ }
2546
+ callback(value, attribute_name);
2547
+ };
2548
+ let selected: any;
2549
+ if (typeof select === 'string') {
2550
+ selectAttribute(select, (value) => {
2551
+ selected = value;
2552
+ });
2553
+ } else if (Array.isArray(select)) {
2554
+ if (select.asArray) {
2555
+ selected = [];
2556
+ select.forEach((attribute, index) => {
2557
+ if (attribute === '*') select[index] = record;
2558
+ else selectAttribute(attribute, (value) => (selected[index] = value));
2559
+ });
2560
+ } else {
2561
+ selected = {};
2562
+ const forceNulls = select.forceNulls;
2563
+ for (const attribute of select) {
2564
+ if (attribute === '*')
2565
+ for (const key in record) {
2566
+ selected[key] = record[key];
2567
+ }
2568
+ else
2569
+ selectAttribute(attribute, (value, attribute_name) => {
2570
+ if (value === undefined && forceNulls) value = null;
2571
+ selected[attribute_name] = value;
2572
+ });
2573
+ }
2574
+ }
2575
+ } else throw new ClientError('Invalid select' + select);
2576
+ if (promises) {
2577
+ return Promise.all(promises).then(() => selected);
2578
+ }
2579
+ return selected;
2580
+ }
2581
+ return record;
2582
+ };
2583
+ return transform;
2584
+ }
2585
+
2586
+ async subscribe(request: SubscriptionRequest): Promise<AsyncIterable<Record>> {
2587
+ if (!auditStore) throw new Error('Can not subscribe to a table without an audit log');
2588
+ if (!audit) {
2589
+ table({ table: tableName, database: databaseName, schemaDefined, attributes, audit: true });
2590
+ }
2591
+ if (!request) request = {};
2592
+ const getFullRecord = !request.rawEvents;
2593
+ let pendingRealTimeQueue = []; // while we are servicing a loop for older messages, we have to queue up real-time messages and deliver them in order
2594
+ const thisId = requestTargetToId(request) ?? null; // treat undefined and null as the root
2595
+ const subscription = addSubscription(
2596
+ TableResource,
2597
+ thisId,
2598
+ function (id: Id, auditRecord: any, localTime: number, beginTxn: boolean) {
2599
+ try {
2600
+ let type = auditRecord.type;
2601
+ let value;
2602
+ if (type === 'message' || request.rawEvents) {
2603
+ // we only send the full message, this are individual messages that can be sent out of order
2604
+ // TODO: Do we want to have a limit to how far out-of-order we are willing to send?
2605
+ value = auditRecord.getValue?.(primaryStore, getFullRecord);
2606
+ } else if (type !== 'end_txn') {
2607
+ // these are events that indicate that the primary record has changed. I believe we always want to simply
2608
+ // send the latest value. Note that it is fine to synchronously access these records, they should have just
2609
+ // been written, so are fresh in memory.
2610
+ const entry: Entry = primaryStore.getEntry(id);
2611
+ if (entry) {
2612
+ if (entry.version !== auditRecord.version) return; // out of order event, with old update, don't send anything
2613
+ value = entry.value;
2614
+ type = entry.metadataFlags & INVALIDATED ? 'invalidate' : value ? 'put' : 'delete';
2615
+ } else {
2616
+ type = 'delete';
2617
+ }
2618
+ }
2619
+ const event = {
2620
+ id,
2621
+ localTime,
2622
+ value,
2623
+ version: auditRecord.version,
2624
+ type,
2625
+ beginTxn,
2626
+ };
2627
+ if (pendingRealTimeQueue) pendingRealTimeQueue.push(event);
2628
+ else {
2629
+ if (databaseName !== 'system') {
2630
+ recordAction(auditRecord.size ?? 1, 'db-message', tableName, null);
2631
+ }
2632
+ this.send(event);
2633
+ }
2634
+ } catch (error) {
2635
+ logger.error?.(error);
2636
+ }
2637
+ },
2638
+ request.startTime || 0,
2639
+ request
2640
+ );
2641
+ const result = (async () => {
2642
+ const isCollection = request.isCollection ?? thisId == null;
2643
+ if (isCollection) {
2644
+ subscription.includeDescendants = true;
2645
+ if (request.onlyChildren) subscription.onlyChildren = true;
2646
+ }
2647
+ if (request.supportsTransactions) subscription.supportsTransactions = true;
2648
+ let count = request.previousCount;
2649
+ if (count > 1000) count = 1000; // don't allow too many, we have to hold these in memory
2650
+ let startTime = request.startTime;
2651
+ if (isCollection) {
2652
+ // a collection should retrieve all descendant ids
2653
+ if (startTime) {
2654
+ if (count)
2655
+ throw new ClientError('startTime and previousCount can not be combined for a table level subscription');
2656
+ // start time specified, get the audit history for this time range
2657
+ for (const auditRecord of auditStore.getRange({
2658
+ start: startTime,
2659
+ exclusiveStart: true,
2660
+ snapshot: false, // no need for a snapshot, audits don't change
2661
+ })) {
2662
+ if (auditRecord.tableId !== tableId) continue;
2663
+ const id = auditRecord.recordId;
2664
+ if (thisId == null || isDescendantId(thisId, id)) {
2665
+ const value = auditRecord.getValue(primaryStore, getFullRecord, auditRecord.localTime);
2666
+ send({
2667
+ id,
2668
+ localTime: auditRecord.localTime,
2669
+ value,
2670
+ version: auditRecord.version,
2671
+ type: auditRecord.type,
2672
+ size: auditRecord.size,
2673
+ });
2674
+ if (subscription.queue?.length > EVENT_HIGH_WATER_MARK) {
2675
+ // if we have too many messages, we need to pause and let the client catch up
2676
+ if ((await subscription.waitForDrain()) === false) return;
2677
+ }
2678
+ }
2679
+ // TODO: Would like to do this asynchronously, but would need to catch up on anything published during iteration
2680
+ //await rest(); // yield for fairness
2681
+ subscription.startTime = auditRecord.localTime; // update so we don't double send
2682
+ }
2683
+ } else if (count) {
2684
+ const history = [];
2685
+ // we are collecting the history in reverse order to get the right count, then reversing to send
2686
+ for (const auditRecord of auditStore.getRange({ start: 'z', end: false, reverse: true })) {
2687
+ try {
2688
+ if (auditRecord.tableId !== tableId) continue;
2689
+ const id = auditRecord.recordId;
2690
+ if (thisId == null || isDescendantId(thisId, id)) {
2691
+ const value = auditRecord.getValue(primaryStore, getFullRecord, auditRecord.localTime);
2692
+ history.push({
2693
+ id,
2694
+ localTime: auditRecord.localTime,
2695
+ value,
2696
+ version: auditRecord.version,
2697
+ type: auditRecord.type,
2698
+ });
2699
+ if (--count <= 0) break;
2700
+ }
2701
+ } catch (error) {
2702
+ logger.error?.('Error getting history entry', auditRecord.localTime, error);
2703
+ }
2704
+ // TODO: Would like to do this asynchronously, but would need to catch up on anything published during iteration
2705
+ //await rest(); // yield for fairness
2706
+ }
2707
+ for (let i = history.length; i > 0; ) {
2708
+ send(history[--i]);
2709
+ }
2710
+ if (history[0]) subscription.startTime = history[0].localTime; // update so don't double send
2711
+ } else if (!request.omitCurrent) {
2712
+ for (const { key: id, value, version, localTime, size } of primaryStore.getRange({
2713
+ start: thisId ?? false,
2714
+ end: thisId == null ? undefined : [thisId, MAXIMUM_KEY],
2715
+ versions: true,
2716
+ snapshot: false, // no need for a snapshot, just want the latest data
2717
+ })) {
2718
+ if (!value) continue;
2719
+ send({ id, localTime, value, version, type: 'put', size });
2720
+ if (subscription.queue?.length > EVENT_HIGH_WATER_MARK) {
2721
+ // if we have too many messages, we need to pause and let the client catch up
2722
+ if ((await subscription.waitForDrain()) === false) return;
2723
+ }
2724
+ }
2725
+ }
2726
+ } else {
2727
+ if (count && !startTime) startTime = 0;
2728
+ let entry = this.#entry;
2729
+ let localTime = entry?.localTime;
2730
+ if (!entry) {
2731
+ entry = primaryStore.getEntry(thisId);
2732
+ localTime = entry?.localTime;
2733
+ } else if (localTime === PENDING_LOCAL_TIME) {
2734
+ // we can't use the pending commit because it doesn't have the local audit time yet,
2735
+ // so try to retrieve the previous/committed record
2736
+ primaryStore.cache?.delete(thisId);
2737
+ entry = primaryStore.getEntry(thisId);
2738
+ logger.trace?.('re-retrieved record', localTime, this.#entry?.localTime);
2739
+ localTime = entry?.localTime;
2740
+ }
2741
+ logger.trace?.('Subscription from', startTime, 'from', thisId, localTime);
2742
+ if (startTime < localTime) {
2743
+ // start time specified, get the audit history for this record
2744
+ const history = [];
2745
+ let nextTime = localTime;
2746
+ let nodeId = entry?.nodeId;
2747
+ do {
2748
+ //TODO: Would like to do this asynchronously, but we will need to run catch after this to ensure we didn't miss anything
2749
+ //await auditStore.prefetch([key]); // do it asynchronously for better fairness/concurrency and avoid page faults
2750
+ const auditRecord = auditStore.getSync(nextTime, tableId, thisId, nodeId);
2751
+ if (auditRecord) {
2752
+ if (startTime < nextTime) {
2753
+ request.omitCurrent = true; // we are sending the current version from history, so don't double send
2754
+ const value = auditRecord.getValue(primaryStore, getFullRecord, nextTime);
2755
+ if (getFullRecord) auditRecord.type = 'put';
2756
+ history.push({
2757
+ id: thisId,
2758
+ value,
2759
+ localTime: nextTime,
2760
+ ...auditRecord,
2761
+ });
2762
+ }
2763
+ nextTime = auditRecord.previousVersion;
2764
+ nodeId = auditRecord.previousNodeId;
2765
+ } else break;
2766
+ if (count) count--;
2767
+ } while (nextTime > startTime && count !== 0);
2768
+ for (let i = history.length; i > 0; ) {
2769
+ send(history[--i]);
2770
+ }
2771
+ subscription.startTime = localTime; // make sure we don't re-broadcast the current version that we already sent
2772
+ }
2773
+ if (!request.omitCurrent && entry?.value) {
2774
+ // if retain and it exists, send the current value first
2775
+ send({
2776
+ id: thisId,
2777
+ ...entry,
2778
+ type: 'put',
2779
+ });
2780
+ }
2781
+ }
2782
+ // now send any queued messages
2783
+ for (const event of pendingRealTimeQueue) {
2784
+ send(event);
2785
+ }
2786
+ pendingRealTimeQueue = null;
2787
+ })();
2788
+ result.catch((error) => {
2789
+ harperLogger.error?.('Error in real-time subscription:', error);
2790
+ subscription.send(error);
2791
+ });
2792
+ function send(event: any) {
2793
+ if (databaseName !== 'system') {
2794
+ recordAction(event.size ?? 1, 'db-message', tableName, null);
2795
+ }
2796
+ subscription.send(event);
2797
+ }
2798
+ if (request.listener) subscription.on('data', request.listener);
2799
+ return subscription;
2800
+ }
2801
+
2802
+ /**
2803
+ * Subscribe on one thread unless this is a per-thread subscription
2804
+ * @param workerIndex
2805
+ * @param options
2806
+ */
2807
+ static subscribeOnThisThread(workerIndex, options) {
2808
+ return workerIndex === 0 || options?.crossThreads === false;
2809
+ }
2810
+ doesExist() {
2811
+ return Boolean(this.#record || this.#savingOperation);
2812
+ }
2813
+
2814
+ /**
2815
+ * Publishing a message to a record adds an (observable) entry in the audit log, but does not change
2816
+ * the record at all. This entries should be replicated and trigger subscription listeners.
2817
+ * @param id
2818
+ * @param message
2819
+ * @param options
2820
+ */
2821
+ publish(target: RequestTarget, message: Record, options?: any) {
2822
+ if (message === undefined || message instanceof URLSearchParams) {
2823
+ // legacy arg format, shift the args
2824
+ this._writePublish(this.getId(), target, message);
2825
+ } else {
2826
+ let allowed = true;
2827
+ const context = this.getContext();
2828
+ if (target.checkPermission) {
2829
+ // requesting authorization verification
2830
+ allowed = this.allowCreate(context.user, message, context);
2831
+ }
2832
+ return when(allowed, (allowed: boolean) => {
2833
+ if (!allowed) {
2834
+ throw new AccessViolation(context.user);
2835
+ }
2836
+ const id = requestTargetToId(target);
2837
+ this._writePublish(id, message, options);
2838
+ });
2839
+ }
2840
+ }
2841
+ _writePublish(id: Id, message, options?: any) {
2842
+ const transaction = txnForContext(this.getContext());
2843
+ id ??= null;
2844
+ if (id !== null) checkValidId(id); // note that we allow the null id for publishing so that you can publish to the root topic
2845
+ const context = this.getContext();
2846
+ transaction.addWrite({
2847
+ key: id,
2848
+ store: primaryStore,
2849
+ entry: this.#entry,
2850
+ nodeName: context?.nodeName,
2851
+ validate: () => {
2852
+ if (!context?.source) {
2853
+ transaction.checkOverloaded();
2854
+ this.validate(message);
2855
+ }
2856
+ },
2857
+ before:
2858
+ this.constructor.source?.publish && !context?.source
2859
+ ? this.constructor.source.publish.bind(this.constructor.source, id, message, context)
2860
+ : undefined,
2861
+ beforeIntermediate: preCommitBlobsForRecordBefore(
2862
+ message,
2863
+ undefined,
2864
+ true // because transaction log entries can be deleted at any point, we must save the blobs in the record, there is no cleanup of them
2865
+ ),
2866
+ commit: (txnTime, existingEntry, _retry, transaction: any) => {
2867
+ // just need to update the version number of the record so it points to the latest audit record
2868
+ // but have to update the version number of the record
2869
+ // TODO: would be faster to use getBinaryFast here and not have the record loaded
2870
+
2871
+ if (existingEntry === undefined && trackDeletes && !audit) {
2872
+ scheduleCleanup();
2873
+ }
2874
+ logger.trace?.(`Publishing message to id: ${id}, timestamp: ${new Date(txnTime).toISOString()}`);
2875
+ // always audit this, but don't change existing version
2876
+ // TODO: Use direct writes in the future (copying binary data is hard because it invalidates the cache)
2877
+ updateRecord(
2878
+ id,
2879
+ existingEntry?.value ?? null,
2880
+ existingEntry,
2881
+ txnTime,
2882
+ 0,
2883
+ true,
2884
+ {
2885
+ user: context?.user,
2886
+ residencyId: options?.residencyId,
2887
+ expiresAt: context?.expiresAt,
2888
+ nodeId: options?.nodeId,
2889
+ viaNodeId: options?.viaNodeId,
2890
+ transaction,
2891
+ tableToTrack: tableName,
2892
+ },
2893
+ 'message',
2894
+ false,
2895
+ message
2896
+ );
2897
+ },
2898
+ });
2899
+ }
2900
+ validate(record: any, patch?: boolean) {
2901
+ let validationErrors;
2902
+ const validateValue = (value, attribute: Attribute, name) => {
2903
+ if (attribute.type && value != null) {
2904
+ if (patch && value.__op__) value = value.value;
2905
+ if (attribute.properties) {
2906
+ if (typeof value !== 'object') {
2907
+ (validationErrors || (validationErrors = [])).push(
2908
+ `Value ${stringify(value)} in property ${name} must be an object${
2909
+ attribute.type ? ' (' + attribute.type + ')' : ''
2910
+ }`
2911
+ );
2912
+ }
2913
+ const properties = attribute.properties;
2914
+ for (let i = 0, l = properties.length; i < l; i++) {
2915
+ const attribute = properties[i];
2916
+ if (attribute.relationship || attribute.computed) {
2917
+ if (record.hasOwnProperty(attribute.name)) {
2918
+ (validationErrors || (validationErrors = [])).push(
2919
+ `Computed property ${name}.${attribute.name} may not be directly assigned a value`
2920
+ );
2921
+ }
2922
+ continue;
2923
+ }
2924
+ const updated = validateValue(value[attribute.name], attribute, name + '.' + attribute.name);
2925
+ if (updated) value[attribute.name] = updated;
2926
+ }
2927
+ if (attribute.sealed && value != null && typeof value === 'object') {
2928
+ for (const key in value) {
2929
+ if (!properties.find((property) => property.name === key)) {
2930
+ (validationErrors || (validationErrors = [])).push(
2931
+ `Property ${key} is not allowed within object in property ${name}`
2932
+ );
2933
+ }
2934
+ }
2935
+ }
2936
+ } else {
2937
+ switch (attribute.type) {
2938
+ case 'Int':
2939
+ if (typeof value !== 'number' || value >> 0 !== value)
2940
+ (validationErrors || (validationErrors = [])).push(
2941
+ `Value ${stringify(value)} in property ${name} must be an integer (from -2147483648 to 2147483647)`
2942
+ );
2943
+ break;
2944
+ case 'Long':
2945
+ if (typeof value !== 'number' || !(Math.floor(value) === value && Math.abs(value) <= 9007199254740992))
2946
+ (validationErrors || (validationErrors = [])).push(
2947
+ `Value ${stringify(
2948
+ value
2949
+ )} in property ${name} must be an integer (from -9007199254740992 to 9007199254740992)`
2950
+ );
2951
+ break;
2952
+ case 'Float':
2953
+ if (typeof value !== 'number')
2954
+ (validationErrors || (validationErrors = [])).push(
2955
+ `Value ${stringify(value)} in property ${name} must be a number`
2956
+ );
2957
+ break;
2958
+ case 'ID':
2959
+ if (
2960
+ !(
2961
+ typeof value === 'string' ||
2962
+ (value?.length > 0 && value.every?.((value) => typeof value === 'string'))
2963
+ )
2964
+ )
2965
+ (validationErrors || (validationErrors = [])).push(
2966
+ `Value ${stringify(value)} in property ${name} must be a string, or an array of strings`
2967
+ );
2968
+ break;
2969
+ case 'String':
2970
+ if (typeof value !== 'string')
2971
+ (validationErrors || (validationErrors = [])).push(
2972
+ `Value ${stringify(value)} in property ${name} must be a string`
2973
+ );
2974
+ break;
2975
+ case 'Boolean':
2976
+ if (typeof value !== 'boolean')
2977
+ (validationErrors || (validationErrors = [])).push(
2978
+ `Value ${stringify(value)} in property ${name} must be a boolean`
2979
+ );
2980
+ break;
2981
+ case 'Date':
2982
+ if (!(value instanceof Date)) {
2983
+ if (typeof value === 'string' || typeof value === 'number') return new Date(value);
2984
+ else
2985
+ (validationErrors || (validationErrors = [])).push(
2986
+ `Value ${stringify(value)} in property ${name} must be a Date`
2987
+ );
2988
+ }
2989
+ break;
2990
+ case 'BigInt':
2991
+ if (typeof value !== 'bigint') {
2992
+ // do coercion because otherwise it is rather difficult to get numbers to consistently be bigints
2993
+ if (typeof value === 'string' || typeof value === 'number') return BigInt(value);
2994
+ (validationErrors || (validationErrors = [])).push(
2995
+ `Value ${stringify(value)} in property ${name} must be a bigint`
2996
+ );
2997
+ }
2998
+ break;
2999
+ case 'Bytes':
3000
+ if (!(value instanceof Uint8Array)) {
3001
+ if (typeof value === 'string') return Buffer.from(value);
3002
+ (validationErrors || (validationErrors = [])).push(
3003
+ `Value ${stringify(value)} in property ${name} must be a Buffer or Uint8Array`
3004
+ );
3005
+ }
3006
+ break;
3007
+ case 'Blob':
3008
+ if (!(value instanceof Blob)) {
3009
+ if (typeof value === 'string') value = Buffer.from(value);
3010
+ if (value instanceof Buffer) {
3011
+ return createBlob(value, { type: 'text/plain' });
3012
+ }
3013
+ (validationErrors || (validationErrors = [])).push(
3014
+ `Value ${stringify(value)} in property ${name} must be a Blob`
3015
+ );
3016
+ }
3017
+ break;
3018
+ case 'array':
3019
+ if (Array.isArray(value)) {
3020
+ if (attribute.elements) {
3021
+ for (let i = 0, l = value.length; i < l; i++) {
3022
+ const element = value[i];
3023
+ const updated = validateValue(element, attribute.elements, name + '[*]');
3024
+ if (updated) value[i] = updated;
3025
+ }
3026
+ }
3027
+ } else
3028
+ (validationErrors || (validationErrors = [])).push(
3029
+ `Value ${stringify(value)} in property ${name} must be an Array`
3030
+ );
3031
+
3032
+ break;
3033
+ }
3034
+ }
3035
+ }
3036
+ if (attribute.nullable === false && value == null) {
3037
+ (validationErrors || (validationErrors = [])).push(
3038
+ `Property ${name} is required (and not does not allow null values)`
3039
+ );
3040
+ }
3041
+ };
3042
+ for (let i = 0, l = attributes.length; i < l; i++) {
3043
+ const attribute = attributes[i];
3044
+ if (attribute.relationship || attribute.computed) {
3045
+ if (Object.hasOwn(record, attribute.name)) {
3046
+ (validationErrors || (validationErrors = [])).push(
3047
+ `Computed property ${attribute.name} may not be directly assigned a value`
3048
+ );
3049
+ }
3050
+ continue;
3051
+ }
3052
+ if (!patch || attribute.name in record) {
3053
+ const updated = validateValue(record[attribute.name], attribute, attribute.name);
3054
+ if (updated !== undefined) record[attribute.name] = updated;
3055
+ }
3056
+ }
3057
+ if (sealed) {
3058
+ for (const key in record) {
3059
+ if (!attributes.find((attribute) => attribute.name === key)) {
3060
+ (validationErrors || (validationErrors = [])).push(`Property ${key} is not allowed`);
3061
+ }
3062
+ }
3063
+ }
3064
+
3065
+ if (validationErrors) {
3066
+ throw new ClientError(validationErrors.join('. '));
3067
+ }
3068
+ }
3069
+ getUpdatedTime() {
3070
+ return this.#version;
3071
+ }
3072
+ static async addAttributes(attributesToAdd: Attribute[]) {
3073
+ const new_attributes = attributes.slice(0);
3074
+ for (const attribute of attributesToAdd) {
3075
+ if (!attribute.name) throw new ClientError('Attribute name is required');
3076
+ if (attribute.name.match(/[`/]/))
3077
+ throw new ClientError('Attribute names cannot include backticks or forward slashes');
3078
+ validateAttribute(attribute.name);
3079
+ new_attributes.push(attribute);
3080
+ }
3081
+ table({
3082
+ table: tableName,
3083
+ database: databaseName,
3084
+ schemaDefined,
3085
+ attributes: new_attributes,
3086
+ });
3087
+ return TableResource.indexingOperation;
3088
+ }
3089
+ static async removeAttributes(names: string[]) {
3090
+ const new_attributes = attributes.filter((attribute) => !names.includes(attribute.name));
3091
+ table({
3092
+ table: tableName,
3093
+ database: databaseName,
3094
+ schemaDefined,
3095
+ attributes: new_attributes,
3096
+ });
3097
+ return TableResource.indexingOperation;
3098
+ }
3099
+ /**
3100
+ * Get the size of the table in bytes (based on amount of pages stored in the database)
3101
+ * @param options
3102
+ */
3103
+ static getSize() {
3104
+ if (isRocksDB) {
3105
+ return primaryStore.getDBIntProperty('rocksdb.estimate-live-data-size') ?? 0;
3106
+ }
3107
+ const stats = primaryStore.getStats();
3108
+ return (stats.treeBranchPageCount + stats.treeLeafPageCount + stats.overflowPages) * stats.pageSize;
3109
+ }
3110
+ static getAuditSize(): number {
3111
+ const stats = auditStore?.getStats();
3112
+ return (
3113
+ stats &&
3114
+ (stats.totalSize ??
3115
+ (stats.treeBranchPageCount + stats.treeLeafPageCount + stats.overflowPages) * stats.pageSize)
3116
+ );
3117
+ }
3118
+ static getStorageStats() {
3119
+ const stats = fs.statfsSync(primaryStore.path);
3120
+ return {
3121
+ available: stats.bavail * stats.bsize,
3122
+ free: stats.bfree * stats.bsize,
3123
+ size: stats.blocks * stats.bsize,
3124
+ };
3125
+ }
3126
+ static async getRecordCount(options?: any) {
3127
+ // iterate through the metadata entries to exclude their count and exclude the deletion counts
3128
+ const entryCount = primaryStore.getStats().entryCount;
3129
+ const TIME_LIMIT = 1000 / 2; // one second time limit, enforced by seeing if we are halfway through at 500ms
3130
+ const start = performance.now();
3131
+ const halfway = Math.floor(entryCount / 2);
3132
+ const exactCount = options?.exactCount;
3133
+ let recordCount = 0;
3134
+ let entriesScanned = 0;
3135
+ let limit: number;
3136
+ for (const { value } of primaryStore.getRange({ start: true, lazy: true, snapshot: false })) {
3137
+ if (value != null) recordCount++;
3138
+ entriesScanned++;
3139
+ await rest();
3140
+ if (!exactCount && entriesScanned < halfway && performance.now() - start > TIME_LIMIT) {
3141
+ // it is taking too long, so we will just take this sample and a sample from the end to estimate
3142
+ limit = entriesScanned;
3143
+ break;
3144
+ }
3145
+ }
3146
+ if (limit) {
3147
+ // in this case we are going to make an estimate of the table count using the first thousand
3148
+ // entries and last thousand entries
3149
+ const firstRecordCount = recordCount;
3150
+ recordCount = 0;
3151
+ for (const { value } of primaryStore.getRange({
3152
+ start: '\uffff',
3153
+ reverse: true,
3154
+ lazy: true,
3155
+ limit,
3156
+ snapshot: false,
3157
+ })) {
3158
+ if (value != null) recordCount++;
3159
+ await rest();
3160
+ }
3161
+ const sampleSize = limit * 2;
3162
+ const recordRate = (recordCount + firstRecordCount) / sampleSize;
3163
+ const variance =
3164
+ Math.pow((recordCount - firstRecordCount + 1) / limit / 2, 2) + // variance between samples
3165
+ (recordRate * (1 - recordRate)) / sampleSize;
3166
+ const sd = Math.max(Math.sqrt(variance) * entryCount, 1);
3167
+ const estimatedRecordCount = Math.round(recordRate * entryCount);
3168
+ // TODO: This uses a normal/Wald interval, but a binomial confidence interval is probably better calculated using
3169
+ // Wilson score interval or Agresti-Coull interval (I think the latter is a little easier to calculate/implement).
3170
+ const lowerCiLimit = Math.max(estimatedRecordCount - 1.96 * sd, recordCount + firstRecordCount);
3171
+ const upperCiLimit = Math.min(estimatedRecordCount + 1.96 * sd, entryCount);
3172
+ let significantUnit = Math.pow(10, Math.round(Math.log10(sd)));
3173
+ if (significantUnit > estimatedRecordCount) significantUnit = significantUnit / 10;
3174
+ recordCount = Math.round(estimatedRecordCount / significantUnit) * significantUnit;
3175
+ return {
3176
+ recordCount,
3177
+ estimatedRange: [Math.round(lowerCiLimit), Math.round(upperCiLimit)],
3178
+ };
3179
+ }
3180
+ return {
3181
+ recordCount,
3182
+ };
3183
+ }
3184
+ /**
3185
+ * When attributes have been changed, we update the accessors that are assigned to this table
3186
+ */
3187
+ static updatedAttributes() {
3188
+ propertyResolvers = this.propertyResolvers = {
3189
+ $id: (object, context, entry) => ({ value: entry.key }),
3190
+ $updatedtime: (object, context, entry) => entry.version,
3191
+ $updatedTime: (object, context, entry) => entry.version,
3192
+ $expiresAt: (object, context, entry) => entry.expiresAt,
3193
+ $record: (object, context, entry) => (entry ? { value: object } : object),
3194
+ $distance: (object, context, entry) => {
3195
+ return entry && (entry.distance ?? context?.vectorDistances?.get(entry));
3196
+ },
3197
+ };
3198
+ for (const attribute of this.attributes) {
3199
+ if (attribute.isPrimaryKey) primaryKeyAttribute = attribute;
3200
+ attribute.resolve = null; // reset this
3201
+ const relationship = attribute.relationship;
3202
+ const computed = attribute.computed;
3203
+ if (relationship) {
3204
+ if (attribute.indexed) {
3205
+ console.error(
3206
+ `A relationship property can not be directly indexed, (but you may want to index the foreign key attribute)`
3207
+ );
3208
+ }
3209
+ if (computed) {
3210
+ console.error(
3211
+ `A relationship property is already computed and can not be combined with a computed function (the relationship will be given precedence)`
3212
+ );
3213
+ }
3214
+ hasRelationships = true;
3215
+ if (relationship.to) {
3216
+ if (attribute.elements?.definition) {
3217
+ propertyResolvers[attribute.name] = attribute.resolve = (object, context, entry, returnEntry?) => {
3218
+ // TODO: Get raw record/entry?
3219
+ const id = object[relationship.from ? relationship.from : primaryKey];
3220
+ const relatedTable = attribute.elements.definition.tableClass;
3221
+ if (returnEntry) {
3222
+ return searchByIndex(
3223
+ { attribute: relationship.to, value: id },
3224
+ txnForContext(context).getReadTxn(),
3225
+ false,
3226
+ relatedTable,
3227
+ false
3228
+ ).map((entry) => {
3229
+ if (entry && entry.key !== undefined) return entry;
3230
+ return relatedTable.primaryStore.getEntry(entry, {
3231
+ transaction: txnForContext(context).getReadTxn(),
3232
+ });
3233
+ }).asArray;
3234
+ }
3235
+ return relatedTable.search([{ attribute: relationship.to, value: id }], context).asArray;
3236
+ };
3237
+ attribute.set = () => {
3238
+ // ideally we want to throw an error here, but if the user had (accidently?) set a property into storage
3239
+ // conflicts with this attribute, we don't want to prevent loading
3240
+ // throw new Error('Setting a one-to-many relationship property is not supported');
3241
+ };
3242
+ attribute.resolve.definition = attribute.elements.definition;
3243
+ // preserve relationship information for searching
3244
+ attribute.resolve.to = relationship.to;
3245
+ if (relationship.from) attribute.resolve.from = relationship.from;
3246
+ } else
3247
+ console.error(
3248
+ `The one-to-many/many-to-many relationship property "${attribute.name}" in table "${tableName}" must have an array type referencing a table as the elements`
3249
+ );
3250
+ } else if (relationship.from) {
3251
+ const definition = attribute.definition || attribute.elements?.definition;
3252
+ if (definition) {
3253
+ propertyResolvers[attribute.name] = attribute.resolve = (object, context, entry, returnEntry?) => {
3254
+ const ids = object[relationship.from];
3255
+ if (ids === undefined) return undefined;
3256
+ if (attribute.elements) {
3257
+ let hasPromises;
3258
+ const results = ids?.map((id) => {
3259
+ const value = definition.tableClass.primaryStore[returnEntry ? 'getEntry' : 'get'](id, {
3260
+ transaction: txnForContext(context).getReadTxn(),
3261
+ });
3262
+ if (value?.then) hasPromises = true;
3263
+ // for now, we shouldn't be getting promises until rocksdb
3264
+ if (TableResource.loadAsInstance === false) Object.freeze(returnEntry ? value?.value : value);
3265
+ return value;
3266
+ });
3267
+ return relationship.filterMissing
3268
+ ? hasPromises
3269
+ ? Promise.all(results).then((results) => results.filter(exists))
3270
+ : results.filter(exists)
3271
+ : hasPromises
3272
+ ? Promise.all(results)
3273
+ : results;
3274
+ }
3275
+ const value = definition.tableClass.primaryStore[returnEntry ? 'getEntry' : 'getSync'](ids, {
3276
+ transaction: txnForContext(context).getReadTxn(),
3277
+ });
3278
+ // for now, we shouldn't be getting promises until rocksdb
3279
+ if (TableResource.loadAsInstance === false) Object.freeze(returnEntry ? value?.value : value);
3280
+ return value;
3281
+ };
3282
+ attribute.set = (object, related) => {
3283
+ if (Array.isArray(related)) {
3284
+ const targetIds = related.map(
3285
+ (related) => related.getId?.() || related[definition.tableClass.primaryKey]
3286
+ );
3287
+ object[relationship.from] = targetIds;
3288
+ } else {
3289
+ const targetId = related.getId?.() || related[definition.tableClass.primaryKey];
3290
+ object[relationship.from] = targetId;
3291
+ }
3292
+ };
3293
+ attribute.resolve.definition = attribute.definition || attribute.elements?.definition;
3294
+ attribute.resolve.from = relationship.from;
3295
+ } else {
3296
+ console.error(
3297
+ `The relationship property "${attribute.name}" in table "${tableName}" must be a type that references a table`
3298
+ );
3299
+ }
3300
+ } else {
3301
+ console.error(
3302
+ `The relationship directive on "${attribute.name}" in table "${tableName}" must use either "from" or "to" arguments`
3303
+ );
3304
+ }
3305
+ } else if (computed) {
3306
+ if (typeof computed.from === 'function') {
3307
+ this.setComputedAttribute(attribute.name, computed.from);
3308
+ }
3309
+ propertyResolvers[attribute.name] = attribute.resolve = (object, context, entry) => {
3310
+ const value = typeof computed.from === 'string' ? object[computed.from] : object;
3311
+ const userResolver = this.userResolvers[attribute.name];
3312
+ if (userResolver) return userResolver(value, context, entry);
3313
+ else {
3314
+ logger.warn?.(
3315
+ `Computed attribute "${attribute.name}" does not have a function assigned to it. Please use setComputedAttribute('${attribute.name}', resolver) to assign a resolver function.`
3316
+ );
3317
+ // silence future warnings but just returning undefined
3318
+ this.userResolvers[attribute.name] = () => {};
3319
+ }
3320
+ };
3321
+ attribute.resolve.directReturn = true;
3322
+ } else if (indices[attribute.name]?.customIndex?.propertyResolver) {
3323
+ const customIndex = indices[attribute.name].customIndex;
3324
+ propertyResolvers[attribute.name] = (object, context, entry) => {
3325
+ const value = object[attribute.name];
3326
+ return customIndex.propertyResolver(value, context, entry);
3327
+ };
3328
+ propertyResolvers[attribute.name].directReturn = true;
3329
+ }
3330
+ }
3331
+ assignTrackedAccessors(this, this);
3332
+ assignTrackedAccessors(Updatable, this, true);
3333
+ for (const attribute of attributes) {
3334
+ const name = attribute.name;
3335
+ if (attribute.resolve) {
3336
+ Object.defineProperty(primaryStore.encoder.structPrototype, name, {
3337
+ get() {
3338
+ return attribute.resolve(this, contextStorage.getStore()); // it is only possible to get the context from ALS, we don't have a direct reference to the current context
3339
+ },
3340
+ set(related) {
3341
+ return attribute.set(this, related);
3342
+ },
3343
+ configurable: true,
3344
+ enumerable: attribute.enumerable,
3345
+ });
3346
+ if (attribute.enumerable && !primaryStore.encoder.structPrototype.toJSON) {
3347
+ Object.defineProperty(primaryStore.encoder.structPrototype, 'toJSON', {
3348
+ configurable: true,
3349
+ value() {
3350
+ const json = {};
3351
+ for (const key in this) {
3352
+ // copy all enumerable properties, including from prototype
3353
+ json[key] = this[key];
3354
+ }
3355
+ return json;
3356
+ },
3357
+ });
3358
+ }
3359
+ }
3360
+ }
3361
+ }
3362
+ static setComputedAttribute(attribute_name, resolver) {
3363
+ const attribute = findAttribute(attributes, attribute_name);
3364
+ if (!attribute) {
3365
+ console.error(`The attribute "${attribute_name}" does not exist in the table "${tableName}"`);
3366
+ return;
3367
+ }
3368
+ if (!attribute.computed) {
3369
+ console.error(`The attribute "${attribute_name}" is not defined as computed in the table "${tableName}"`);
3370
+ return;
3371
+ }
3372
+ this.userResolvers[attribute_name] = resolver;
3373
+ }
3374
+ static async deleteHistory(endTime = 0, cleanupDeletedRecords = false) {
3375
+ let completion: Promise<void>;
3376
+ for (const auditRecord of auditStore.getRange({
3377
+ start: 0,
3378
+ end: endTime,
3379
+ })) {
3380
+ await rest(); // yield to other async operations
3381
+ if (auditRecord.tableId !== tableId) continue;
3382
+ completion = removeAuditEntry(auditStore, auditRecord);
3383
+ }
3384
+ if (cleanupDeletedRecords) {
3385
+ // this is separate procedure we can do if the records are not being cleaned up by the audit log. This shouldn't
3386
+ // ever happen, but if there are cleanup failures for some reason, we can run this to clean up the records
3387
+ for (const entry of primaryStore.getRange({ start: 0, versions: true })) {
3388
+ const { value, localTime } = entry;
3389
+ await rest(); // yield to other async operations
3390
+ if (value === null && localTime < endTime) {
3391
+ completion = removeEntry(primaryStore, entry);
3392
+ }
3393
+ }
3394
+ }
3395
+ await completion;
3396
+ }
3397
+ static async *getHistory(startTime = 0, endTime = Infinity) {
3398
+ for (const auditRecord of auditStore.getRange({
3399
+ start: startTime || 1, // if startTime is 0, we actually want to shift to 1 because 0 is encoded as all zeros with audit store's special encoder, and will include symbols
3400
+ end: endTime,
3401
+ })) {
3402
+ await rest(); // yield to other async operations
3403
+ if (auditRecord.tableId !== tableId) continue;
3404
+ yield {
3405
+ id: auditRecord.recordId,
3406
+ localTime: auditRecord.version,
3407
+ version: auditRecord.version,
3408
+ type: auditRecord.type,
3409
+ value: auditRecord.getValue(primaryStore, true, auditRecord.version),
3410
+ user: auditRecord.user,
3411
+ operation: auditRecord.originatingOperation,
3412
+ };
3413
+ }
3414
+ }
3415
+ static async getHistoryOfRecord(id) {
3416
+ const history = [];
3417
+ if (id == undefined) throw new Error('An id is required');
3418
+ const entry = primaryStore.getEntry(id);
3419
+ if (!entry) return history;
3420
+ let nextVersion = entry.localTime;
3421
+ if (!nextVersion) throw new Error('The entry does not have a local audit time');
3422
+ const count = 0;
3423
+ const auditWindow = 100;
3424
+ do {
3425
+ await rest(); // yield to other async operations
3426
+ let insertionPoint = history.length;
3427
+ let highestPreviousVersion = 0;
3428
+ const start = nextVersion - auditWindow;
3429
+ for (const auditRecord of auditStore.getRange({ start, end: nextVersion + 0.001 })) {
3430
+ if (auditRecord.tableId === tableId && compareKeys(auditRecord.recordId, id) === 0) {
3431
+ history.splice(insertionPoint, 0, {
3432
+ id: auditRecord.recordId,
3433
+ localTime: nextVersion,
3434
+ version: auditRecord.version,
3435
+ type: auditRecord.type,
3436
+ value: auditRecord.getValue(primaryStore, true, nextVersion),
3437
+ user: auditRecord.user,
3438
+ operation: auditRecord.originatingOperation,
3439
+ });
3440
+ if (auditRecord.previousVersion > highestPreviousVersion && auditRecord.previousVersion < start) {
3441
+ highestPreviousVersion = auditRecord.previousVersion;
3442
+ }
3443
+ }
3444
+ }
3445
+ nextVersion = highestPreviousVersion;
3446
+ } while (count < 1000 && nextVersion);
3447
+ return history.reverse();
3448
+ }
3449
+ static clear() {
3450
+ return primaryStore.clear();
3451
+ }
3452
+ static cleanup() {
3453
+ deleteCallbackHandle?.remove();
3454
+ }
3455
+ static _readTxnForContext(context) {
3456
+ return txnForContext(context).getReadTxn();
3457
+ }
3458
+ }
3459
+ const throttledCallToSource = throttle(
3460
+ async (source, id, sourceContext, existingEntry) => {
3461
+ // call the data source if it exists and will fulfill our request for data
3462
+ if (source && source.get && (!source.get.reliesOnPrototype || source.prototype.get)) {
3463
+ if (source.available?.(existingEntry) !== false) {
3464
+ sourceContext.source = source;
3465
+ const resolvedData = await source.get(id, sourceContext);
3466
+ if (resolvedData) return resolvedData;
3467
+ }
3468
+ }
3469
+ },
3470
+ () => {
3471
+ throw new ServerError('Service unavailable, exceeded request queue limit for resolving cache record', 503);
3472
+ }
3473
+ );
3474
+
3475
+ TableResource.updatedAttributes(); // on creation, update accessors as well
3476
+ if (expirationMs) TableResource.setTTLExpiration(expirationMs / 1000);
3477
+ if (expiresAtProperty) runRecordExpirationEviction();
3478
+ return TableResource;
3479
+ function updateIndices(id: any, existingRecord: any, record: any, options: any) {
3480
+ let hasChanges;
3481
+ // iterate the entries from the record
3482
+ // for-in is about 5x as fast as for-of Object.entries, and this is extremely time sensitive since it can be
3483
+ // inside a write transaction
3484
+ // TODO: Make an array version of indices that is faster
3485
+ for (const key in indices) {
3486
+ const index = indices[key];
3487
+ const isIndexing = index.isIndexing;
3488
+ const resolver = propertyResolvers[key];
3489
+ const value = record && (resolver ? resolver(record) : record[key]);
3490
+ const existingValue = existingRecord && (resolver ? resolver(existingRecord) : existingRecord[key]);
3491
+ if (value === existingValue && !isIndexing) {
3492
+ continue;
3493
+ }
3494
+ if (index.customIndex) {
3495
+ index.customIndex.index(id, value, existingValue, options);
3496
+ continue;
3497
+ }
3498
+ hasChanges = true;
3499
+ const indexNulls = index.indexNulls;
3500
+ // determine what index values need to be removed and added
3501
+ let valuesToAdd = getIndexedValues(value, indexNulls) as any[];
3502
+ let valuesToRemove = getIndexedValues(existingValue, indexNulls) as any[];
3503
+ if (valuesToRemove?.length > 0) {
3504
+ // put this in a conditional so we can do a faster version for new records
3505
+ // determine the changes/diff from new values and old values
3506
+ const setToRemove = new Set(valuesToRemove);
3507
+ valuesToAdd = valuesToAdd
3508
+ ? valuesToAdd.filter((value) => {
3509
+ if (setToRemove.has(value)) {
3510
+ // if the value is retained, we don't need to remove or add it, so remove it from the set
3511
+ setToRemove.delete(value);
3512
+ } else {
3513
+ // keep in the list of values to add to index
3514
+ return true;
3515
+ }
3516
+ })
3517
+ : [];
3518
+ valuesToRemove = Array.from(setToRemove);
3519
+ if ((valuesToRemove.length > 0 || valuesToAdd.length > 0) && LMDB_PREFETCH_WRITES) {
3520
+ // prefetch any values that have been removed or added
3521
+ const valuesToPrefetch = valuesToRemove.concat(valuesToAdd).map((v) => ({ key: v, value: id }));
3522
+ index.prefetch?.(valuesToPrefetch, noop);
3523
+ }
3524
+ //if the update cleared out the attribute value we need to delete it from the index
3525
+ for (let i = 0, l = valuesToRemove.length; i < l; i++) {
3526
+ index.remove(valuesToRemove[i], id, options);
3527
+ }
3528
+ } else if (valuesToAdd?.length > 0 && LMDB_PREFETCH_WRITES) {
3529
+ // no old values, just new
3530
+ index.prefetch?.(
3531
+ valuesToAdd.map((v) => ({ key: v, value: id })),
3532
+ noop
3533
+ );
3534
+ }
3535
+ if (valuesToAdd) {
3536
+ for (let i = 0, l = valuesToAdd.length; i < l; i++) {
3537
+ index.put(valuesToAdd[i], id, options);
3538
+ }
3539
+ }
3540
+ }
3541
+ return hasChanges;
3542
+ }
3543
+ function checkValidId(id) {
3544
+ switch (typeof id) {
3545
+ case 'number':
3546
+ return true;
3547
+ case 'string':
3548
+ if (id.length < 659) return true; // max number of characters that can't expand our key size limit
3549
+ if (id.length > MAX_KEY_BYTES) {
3550
+ // we can quickly determine this is too big
3551
+ throw new Error('Primary key size is too large: ' + id.length);
3552
+ }
3553
+ // TODO: We could potentially have a faster test here, Buffer.byteLength is close, but we have to handle characters < 4 that are escaped in ordered-binary
3554
+ break; // otherwise we have to test it, in this range, unicode characters could put it over the limit
3555
+ case 'object':
3556
+ if (id === null) {
3557
+ throw new Error('Invalid primary key of null');
3558
+ }
3559
+ break; // otherwise we have to test it
3560
+ case 'bigint':
3561
+ if (id < 2n ** 64n && id > -(2n ** 64n)) return true;
3562
+ break; // otherwise we have to test it
3563
+ default:
3564
+ throw new Error('Invalid primary key type: ' + typeof id);
3565
+ }
3566
+ // otherwise it is difficult to determine if the key size is too large
3567
+ // without actually attempting to serialize it
3568
+ const length = writeKey(id, TEST_WRITE_KEY_BUFFER, 0);
3569
+ if (length > MAX_KEY_BYTES) throw new Error('Primary key size is too large: ' + id.length);
3570
+ return true;
3571
+ }
3572
+ function requestTargetToId(target: RequestTargetOrId): Id {
3573
+ return typeof target === 'object' && target ? target.id : (target as Id);
3574
+ }
3575
+ function isSearchTarget(target: RequestTargetOrId): target is RequestTarget {
3576
+ return typeof target === 'object' && target && (target as RequestTarget).isCollection;
3577
+ }
3578
+ function loadLocalRecord(id, context, options, sync, withEntry) {
3579
+ if (TableResource.getResidencyById && options.ensureLoaded && context?.replicateFrom !== false) {
3580
+ // this is a special case for when the residency can be determined from the id alone (hash-based sharding),
3581
+ // allow for a fast path to load the record from the correct node
3582
+ const residency = residencyFromFunction(TableResource.getResidencyById(id));
3583
+ if (residency) {
3584
+ if (!residency.includes(server.hostname) && sourceLoad) {
3585
+ // this record is not on this node, so we shouldn't load it here
3586
+ return sourceLoad({ key: id, residency }).then(withEntry);
3587
+ }
3588
+ }
3589
+ }
3590
+ // TODO: determine if we use lazy access properties
3591
+ const whenPrefetched = () => {
3592
+ if (context?.transaction?.stale) context.transaction.stale = false;
3593
+ // if the transaction was closed, which can happen if we are iterating
3594
+ // through query results and the iterator ends (abruptly)
3595
+ if (options.transaction?.isDone) return withEntry(null, id);
3596
+ if (!sync && options) {
3597
+ options.async = true;
3598
+ return when(primaryStore.getEntry(id, options), withLocalEntry);
3599
+ } else {
3600
+ return withLocalEntry(primaryStore.getEntry(id, options));
3601
+ }
3602
+ };
3603
+ function withLocalEntry(entry) {
3604
+ // skip recording reads for most system tables except hdb_analytics
3605
+ // we want to track analytics reads in licensing, etc.
3606
+ if (databaseName !== 'system' && (options.type === 'read' || !options.type)) {
3607
+ harperLogger.trace?.('Recording db-read action for', `${databaseName}.${tableName}`);
3608
+ recordAction(entry?.size ?? 1, 'db-read', tableName, null);
3609
+ }
3610
+
3611
+ // we need to freeze entry records to ensure the integrity of the cache;
3612
+ // but we only do this when users have opted into loadAsInstance/freezeRecords to avoid back-compat
3613
+ // issues
3614
+ Object.freeze(entry?.value);
3615
+ if (
3616
+ entry?.residencyId &&
3617
+ entry.metadataFlags & INVALIDATED &&
3618
+ sourceLoad &&
3619
+ options.ensureLoaded &&
3620
+ context?.replicateFrom !== false
3621
+ ) {
3622
+ // load from other node
3623
+ return sourceLoad(entry).then(
3624
+ (entry) => withEntry(entry, id),
3625
+ (error) => {
3626
+ logger.error?.('Error loading remote record', id, entry, options, error);
3627
+ return withEntry(null, id);
3628
+ }
3629
+ );
3630
+ }
3631
+ if (entry && context) {
3632
+ if (entry?.version > (context.lastModified || 0)) context.lastModified = entry.version;
3633
+ if (entry?.localTime && !context.lastRefreshed) context.lastRefreshed = entry.localTime;
3634
+ }
3635
+ return withEntry(entry, id);
3636
+ }
3637
+ // To prefetch or not to prefetch is one of the biggest questions Harper has to make.
3638
+ // Prefetching has important benefits as it allows any page fault to be executed asynchronously
3639
+ // in the work threads, and it provides event turn yielding, allowing other async functions
3640
+ // to execute. However, prefetching is expensive, and the cost of enqueuing a task with the
3641
+ // worker threads and enqueuing the callback on the JS thread and the downstream promise handling
3642
+ // is usually at least several times more expensive than skipping the prefetch and just directly
3643
+ // getting the entry.
3644
+ // Determining if we should prefetch is challenging. It is not possible to determine if a page
3645
+ // fault will happen, OSes intentionally hide that information. So here we use some heuristics
3646
+ // to evaluate if prefetching is a good idea.
3647
+ // First, the caller can tell us. If the record is in our local cache, we use that as indication
3648
+ // that we can get the value very quickly without a page fault.
3649
+ if (sync || isRocksDB) return whenPrefetched();
3650
+ // Next, we allow for non-prefetch mode where we can execute some gets without prefetching,
3651
+ // but we will limit the number before we do another prefetch
3652
+ if (untilNextPrefetch > 0) {
3653
+ untilNextPrefetch--;
3654
+ return whenPrefetched();
3655
+ }
3656
+ // Now, we are going to prefetch before loading, so need a promise:
3657
+ return new Promise((resolve, reject) => {
3658
+ if (untilNextPrefetch === 0) {
3659
+ // If we were in non-prefetch mode and used up our non-prefetch gets, we immediately trigger
3660
+ // a prefetch for the current id
3661
+ untilNextPrefetch--;
3662
+ primaryStore.prefetch([id], () => {
3663
+ prefetch();
3664
+ load();
3665
+ });
3666
+ } else {
3667
+ // If there is a prefetch in flight, we accumulate ids so we can attempt to batch prefetch
3668
+ // requests into a single or just a few async operations, reducing the cost of async queuing.
3669
+ prefetchIds.push(id);
3670
+ prefetchCallbacks.push(load);
3671
+ if (prefetchIds.length > MAX_PREFETCH_BUNDLE) {
3672
+ untilNextPrefetch--;
3673
+ prefetch();
3674
+ }
3675
+ }
3676
+ function prefetch() {
3677
+ if (prefetchIds.length > 0) {
3678
+ const callbacks = prefetchCallbacks;
3679
+ primaryStore.prefetch(prefetchIds, () => {
3680
+ if (untilNextPrefetch === -1) {
3681
+ prefetch();
3682
+ } else {
3683
+ // if there is another prefetch callback pending, we don't need to trigger another prefetch
3684
+ untilNextPrefetch++;
3685
+ }
3686
+ for (const callback of callbacks) callback();
3687
+ });
3688
+ prefetchIds = [];
3689
+ prefetchCallbacks = [];
3690
+ // Here is the where the feedback mechanism informs future execution. If we were able
3691
+ // to enqueue multiple prefetch requests, this is an indication that we have concurrency
3692
+ // and/or page fault/slow data retrieval, and the prefetches are valuable to us, so
3693
+ // we stay in prefetch mode.
3694
+ // We also reduce the number of non-prefetches we allow in next non-prefetch sequence
3695
+ if (nonPrefetchSequence > 2) nonPrefetchSequence--;
3696
+ } else {
3697
+ // If we have not enqueued any prefetch requests, this is a hint that prefetching may
3698
+ // not have been that advantageous, so we let it go back to the non-prefetch mode,
3699
+ // for the next few requests. We also increment the number of non-prefetches that
3700
+ // we allow so there is a "memory" of how well prefetch vs non-prefetch is going.
3701
+ untilNextPrefetch = nonPrefetchSequence;
3702
+ if (nonPrefetchSequence < MAX_PREFETCH_SEQUENCE) nonPrefetchSequence++;
3703
+ }
3704
+ }
3705
+ function load() {
3706
+ try {
3707
+ resolve(whenPrefetched());
3708
+ } catch (error) {
3709
+ reject(error);
3710
+ }
3711
+ }
3712
+ });
3713
+ }
3714
+ function getTablePermissions(user: User, target?: RequestTarget) {
3715
+ let permission = target?.checkPermission; // first check to see the request target specifically provides the permissions to authorize
3716
+ if (typeof permission !== 'object') {
3717
+ if (!user?.role) return;
3718
+ permission = user.role.permission;
3719
+ }
3720
+ if (permission.super_user) return FULL_PERMISSIONS;
3721
+ const dbPermission = permission[databaseName];
3722
+ let table: any;
3723
+ const tables = dbPermission?.tables;
3724
+ if (tables) {
3725
+ return tables[tableName];
3726
+ } else if (databaseName === 'data' && (table = permission[tableName]) && !table.tables) {
3727
+ return table;
3728
+ }
3729
+ }
3730
+
3731
+ function ensureLoadedFromSource(source: typeof TableResource, id, entry, context, resource?) {
3732
+ if (hasSourceGet) {
3733
+ let needsSourceData = false;
3734
+ if (context.noCache) needsSourceData = true;
3735
+ else {
3736
+ if (entry) {
3737
+ if (
3738
+ !entry.value ||
3739
+ entry.metadataFlags & (INVALIDATED | EVICTED) || // invalidated or evicted should go to load from source
3740
+ (entry.expiresAt != undefined && entry.expiresAt < Date.now())
3741
+ )
3742
+ needsSourceData = true;
3743
+ // else needsSourceData is left falsy
3744
+ // TODO: Allow getEntryByVariation to find a sub-variation of this record and determine if
3745
+ // it still needs to be loaded from source
3746
+ } else needsSourceData = true;
3747
+ recordActionBinary(!needsSourceData, 'cache-hit', tableName);
3748
+ }
3749
+ if (needsSourceData) {
3750
+ const loadingFromSource = getFromSource(source, id, entry, context).then((entry) => {
3751
+ if (entry?.value && entry?.value.getRecord?.())
3752
+ logger.error?.('Can not assign a record that is already a resource');
3753
+ if (context) {
3754
+ if (entry?.version > (context.lastModified || 0)) context.lastModified = entry.version;
3755
+ context.lastRefreshed = Date.now(); // localTime is probably not available yet
3756
+ }
3757
+ return entry;
3758
+ });
3759
+ // if the resource defines a method for indicating if stale-while-revalidate is allowed for a record
3760
+ if (context?.onlyIfCached || (entry?.value && resource?.allowStaleWhileRevalidate?.(entry, id))) {
3761
+ // since we aren't waiting for it any errors won't propagate so we should at least log them
3762
+ loadingFromSource.catch((error) => logger.warn?.(error));
3763
+ if (context?.onlyIfCached && !resource.doesExist()) throw new ServerError('Entry is not cached', 504);
3764
+ return; // go ahead and return and let the current stale value be used while we re-validate
3765
+ } else return loadingFromSource; // return the promise for the resolved value
3766
+ }
3767
+ } else if (entry?.value) {
3768
+ // if we don't have a source, but we have an entry, we check the expiration
3769
+ if (entry.expiresAt != undefined && entry.expiresAt < Date.now()) {
3770
+ // if it has expired and there is no source, we evict it and then return null, using a fake promise to indicate that this is providing the response
3771
+ TableResource.evict(entry.key, entry.value, entry.version);
3772
+ entry.value = null;
3773
+ return {
3774
+ then(callback) {
3775
+ return callback(entry); // return undefined, no source to get data from
3776
+ },
3777
+ };
3778
+ }
3779
+ }
3780
+ }
3781
+ function txnForContext(context: Context) {
3782
+ let transaction = context?.transaction;
3783
+ if (transaction) {
3784
+ if (!transaction.db && isRocksDB) {
3785
+ // this is an uninitialized DatabaseTransaction, we can claim it
3786
+ transaction.db = primaryStore;
3787
+ if (context?.timestamp) transaction.timestamp = context.timestamp;
3788
+ return transaction;
3789
+ }
3790
+ do {
3791
+ // See if this is a transaction for our database and if so, use it
3792
+ if (transaction.db?.path === primaryStore.path) return transaction;
3793
+ // try the next one:
3794
+ const nextTxn = transaction.next;
3795
+ if (!nextTxn) {
3796
+ // no next one, then add our database
3797
+ transaction = transaction.next = isRocksDB ? new DatabaseTransaction() : new LMDBTransaction();
3798
+ transaction.db = primaryStore;
3799
+ return transaction;
3800
+ }
3801
+ transaction = nextTxn;
3802
+ } while (true);
3803
+ } else {
3804
+ transaction = isRocksDB ? new ImmediateTransaction(primaryStore) : new ImmediateLMDBTransaction(primaryStore);
3805
+ if (context) {
3806
+ context.transaction = transaction;
3807
+ if (context.timestamp) transaction.timestamp = context.timestamp;
3808
+ }
3809
+ return transaction;
3810
+ }
3811
+ }
3812
+ function getAttributeValue(entry, attribute_name, context) {
3813
+ if (!entry) {
3814
+ return;
3815
+ }
3816
+ const record = (entry.deref ? entry.deref() : entry.value) ?? primaryStore.getEntry(entry.key)?.value;
3817
+ if (typeof attribute_name === 'object') {
3818
+ // attribute_name is an array of attributes, pointing to nested attribute
3819
+ let resolvers = propertyResolvers;
3820
+ let value = record;
3821
+ for (let i = 0, l = attribute_name.length; i < l; i++) {
3822
+ const attribute = attribute_name[i];
3823
+ const resolver = resolvers?.[attribute];
3824
+ value = resolver && value ? resolver(value, context, entry) : value?.[attribute];
3825
+ entry = null; // can't use this in the nested object
3826
+ resolvers = resolver?.definition?.tableClass?.propertyResolvers;
3827
+ }
3828
+ return value;
3829
+ }
3830
+ const resolver = propertyResolvers[attribute_name];
3831
+ return resolver ? resolver(record, context, entry) : record[attribute_name];
3832
+ }
3833
+ function transformToEntries(ids, select, context, readTxn, filters?) {
3834
+ // TODO: Test and ensure that we break out of these loops when a connection is lost
3835
+ const filtersLength = filters?.length;
3836
+ const loadOptions = {
3837
+ transaction: readTxn,
3838
+ lazy: filtersLength > 0 || typeof select === 'string' || select?.length < 4,
3839
+ alwaysPrefetch: true,
3840
+ };
3841
+ let idFiltersApplied;
3842
+ // for filter operations, we intentionally use async and yield the event turn so that scanning queries
3843
+ // do not hog resources and give more processing opportunity for more efficient index-driven queries.
3844
+ // this also gives an opportunity to prefetch and ensure any page faults happen in a different thread
3845
+ function processEntry(entry: Entry, id?) {
3846
+ const record = entry?.value;
3847
+ if (!record) return SKIP;
3848
+ // apply the record-level filters
3849
+ for (let i = 0; i < filtersLength; i++) {
3850
+ if (idFiltersApplied?.includes(i)) continue; // already applied
3851
+ if (!filters[i](record, entry)) return SKIP; // didn't match filters
3852
+ }
3853
+ if (id !== undefined) entry.key = id;
3854
+ return entry;
3855
+ }
3856
+ if (filtersLength > 0 || !ids.hasEntries) {
3857
+ let results = ids.map((idOrEntry) => {
3858
+ idFiltersApplied = null;
3859
+ if (typeof idOrEntry === 'object' && idOrEntry?.key !== undefined)
3860
+ return filtersLength > 0 ? processEntry(idOrEntry) : idOrEntry; // already an entry
3861
+ if (idOrEntry == undefined) {
3862
+ return SKIP;
3863
+ }
3864
+ // it is an id, so we can try to use id any filters that are available (note that these can come into existence later, during the query)
3865
+ for (let i = 0; i < filtersLength; i++) {
3866
+ const filter = filters[i];
3867
+ const idFilter = filter.idFilter;
3868
+ if (idFilter) {
3869
+ if (!idFilter(idOrEntry)) return SKIP; // didn't match filters
3870
+ if (!idFiltersApplied) idFiltersApplied = [];
3871
+ idFiltersApplied.push(i);
3872
+ }
3873
+ }
3874
+ return loadLocalRecord(idOrEntry, context, loadOptions, false, processEntry);
3875
+ });
3876
+ if (Array.isArray(ids)) results = results.filter((entry) => entry !== SKIP);
3877
+ results.hasEntries = true;
3878
+ return results;
3879
+ }
3880
+ return ids;
3881
+ }
3882
+
3883
+ function precedesExistingVersion(txnTime: number, existingEntry: Entry, nodeId?: number): number {
3884
+ if (nodeId === undefined) {
3885
+ nodeId = server.replication?.getThisNodeId(auditStore);
3886
+ }
3887
+
3888
+ if (txnTime <= existingEntry?.version) {
3889
+ if (existingEntry?.version === txnTime && nodeId !== undefined) {
3890
+ // if we have a timestamp tie, we break the tie by comparing the node name of the
3891
+ // existing entry to the node name of the update
3892
+ const nodeNameToId = server.replication?.exportIdMapping(auditStore);
3893
+ let existingNodeId = existingEntry.nodeId;
3894
+ if (nodeId === existingNodeId) {
3895
+ return 0; // early match for a tie
3896
+ }
3897
+ let updatedNodeName, existingNodeName;
3898
+ for (const node_name in nodeNameToId) {
3899
+ if (nodeNameToId[node_name] === nodeId) updatedNodeName = node_name;
3900
+ if (nodeNameToId[node_name] === existingNodeId) existingNodeName = node_name;
3901
+ }
3902
+ if (updatedNodeName > existingNodeName)
3903
+ // if the updated node name is greater (alphabetically), it wins (it doesn't precede the existing version)
3904
+ return 1;
3905
+ if (updatedNodeName === existingNodeName) return 0; // a tie
3906
+ }
3907
+ // transaction time is older than existing version, so we treat that as an update that loses to the existing record version
3908
+ return -1;
3909
+ }
3910
+ return 1;
3911
+ }
3912
+
3913
+ /**
3914
+ * This is used to record that a retrieve a record from source
3915
+ */
3916
+ async function getFromSource(
3917
+ source: typeof TableResource,
3918
+ id: Id,
3919
+ existingEntry: Entry,
3920
+ context: Context
3921
+ ): Promise<Entry> {
3922
+ const metadataFlags = existingEntry?.metadataFlags;
3923
+
3924
+ const existingVersion = existingEntry?.version;
3925
+ let whenResolved, timer;
3926
+ // We start by locking the record so that there is only one resolution happening at once;
3927
+ // if there is already a resolution in process, we want to use the results of that resolution
3928
+ // tryLock() will return true if we got the lock, and the callback won't be called.
3929
+ // If another thread has the lock it returns false and then the callback is called once
3930
+ // the other thread releases the lock.
3931
+ const callback = () => {
3932
+ // This is called when another thread releases the lock on resolution. Hopefully
3933
+ // it should be resolved now and we can use the value it saved.
3934
+ clearTimeout(timer);
3935
+ const entry = primaryStore.getEntry(id);
3936
+ if (
3937
+ !entry ||
3938
+ !entry.value ||
3939
+ entry.metadataFlags & (INVALIDATED | EVICTED) ||
3940
+ (entry.expiresAt != undefined && entry.expiresAt < Date.now())
3941
+ )
3942
+ // try again
3943
+ whenResolved(getFromSource(source, id, primaryStore.getEntry(id), context));
3944
+ else whenResolved(entry);
3945
+ };
3946
+ const lockAcquired = primaryStore.tryLock(id, callback);
3947
+
3948
+ if (!lockAcquired) {
3949
+ return new Promise((resolve) => {
3950
+ whenResolved = resolve;
3951
+ timer = setTimeout(() => {
3952
+ primaryStore.unlock(id);
3953
+ }, LOCK_TIMEOUT);
3954
+ });
3955
+ }
3956
+
3957
+ const existingRecord = existingEntry?.value;
3958
+ // it is important to remember that this is _NOT_ part of the current transaction; nothing is changing
3959
+ // with the canonical data, we are simply fulfilling our local copy of the canonical data, but still don't
3960
+ // want a timestamp later than the current transaction
3961
+ // we create a new context for the source, we want to determine the timestamp and don't want to
3962
+ // attribute this to the current user
3963
+ const sourceContext = {
3964
+ requestContext: context,
3965
+ // provide access to previous data
3966
+ replacingRecord: existingRecord,
3967
+ replacingEntry: existingEntry,
3968
+ replacingVersion: existingVersion,
3969
+ noCacheStore: false,
3970
+ source: null,
3971
+ // use the same resource cache as a parent context so that if modifications are made to resources,
3972
+ // they are visible in the parent requesting context
3973
+ resourceCache: context?.resourceCache,
3974
+ transaction: undefined,
3975
+ expiresAt: undefined,
3976
+ lastModified: undefined,
3977
+ };
3978
+ const responseHeaders = context?.responseHeaders;
3979
+ return new Promise((resolve, reject) => {
3980
+ // we don't want to wait for the transaction because we want to return as fast as possible
3981
+ // and let the transaction commit in the background
3982
+ let resolved;
3983
+ when(
3984
+ transaction(sourceContext, async (_txn) => {
3985
+ const start = performance.now();
3986
+ let updatedRecord;
3987
+ let hasChanges, invalidated;
3988
+ try {
3989
+ updatedRecord = await throttledCallToSource(source, id, sourceContext, existingEntry);
3990
+ invalidated = metadataFlags & INVALIDATED;
3991
+ let version = sourceContext.lastModified || (invalidated && existingVersion);
3992
+ hasChanges = invalidated || version > existingVersion || !existingRecord;
3993
+ const resolveDuration = performance.now() - start;
3994
+ recordAction(resolveDuration, 'cache-resolution', tableName, null, 'success');
3995
+ if (responseHeaders)
3996
+ appendHeader(responseHeaders, 'Server-Timing', `cache-resolve;dur=${resolveDuration.toFixed(2)}`, true);
3997
+ if (expirationMs && sourceContext.expiresAt == undefined)
3998
+ sourceContext.expiresAt = Date.now() + expirationMs;
3999
+ if (updatedRecord) {
4000
+ if (typeof updatedRecord !== 'object') throw new Error('Only objects can be cached and stored in tables');
4001
+ if (updatedRecord.status > 0 && updatedRecord.headers) {
4002
+ // if the source has a status code and headers, treat it as a response
4003
+ if (updatedRecord.status >= 300) {
4004
+ if (updatedRecord.status === 304) {
4005
+ // revalidation of our current cached record
4006
+ updatedRecord = existingRecord;
4007
+ version = existingVersion;
4008
+ } else {
4009
+ // if the source has an error status, we need to throw an error
4010
+ throw new ServerError(updatedRecord.body || 'Error from source', updatedRecord.status);
4011
+ } // there are definitely more status codes to handle
4012
+ } else {
4013
+ let headers: any;
4014
+ const sourceHeaders = updatedRecord.headers;
4015
+ if (sourceHeaders[Symbol.iterator]) {
4016
+ headers = {};
4017
+ for (let [name, value] of sourceHeaders) {
4018
+ headers[name.toLowerCase()] = value;
4019
+ }
4020
+ } else {
4021
+ headers = sourceHeaders; // just a plain object
4022
+ }
4023
+ const contentType = sourceHeaders.get?.('Content-Type');
4024
+ let data: any;
4025
+ if (contentType === 'application/json' && updatedRecord.json) {
4026
+ // use native .json() if possible
4027
+ data = await updatedRecord.json();
4028
+ } else {
4029
+ const contentTypeHandler = contentType && contentTypes.get(contentType);
4030
+ if (contentTypeHandler?.deserialize) {
4031
+ data = contentTypeHandler.deserialize(
4032
+ await (contentType.startsWith('text/') ? updatedRecord.text() : updatedRecord.bytes())
4033
+ );
4034
+ }
4035
+ }
4036
+ if (data !== undefined) {
4037
+ // we have structured data that we have parsed
4038
+ delete headers['content-type']; // don't store the content type if we have already parsed it
4039
+ updatedRecord = {
4040
+ headers,
4041
+ data,
4042
+ };
4043
+ } else {
4044
+ updatedRecord = {
4045
+ headers,
4046
+ body: createBlob(updatedRecord.body),
4047
+ };
4048
+ }
4049
+ }
4050
+ }
4051
+ if (typeof updatedRecord.toJSON === 'function') updatedRecord = updatedRecord.toJSON();
4052
+ if (primaryKey && updatedRecord[primaryKey] !== id) updatedRecord[primaryKey] = id;
4053
+ }
4054
+ resolved = true;
4055
+ resolve({
4056
+ key: id,
4057
+ version,
4058
+ value: updatedRecord,
4059
+ });
4060
+ } catch (error) {
4061
+ error.message += ` while resolving record ${id} for ${tableName}`;
4062
+ if (
4063
+ existingRecord &&
4064
+ (((error.code === 'ECONNRESET' || error.code === 'ECONNREFUSED' || error.code === 'EAI_AGAIN') &&
4065
+ !context?.mustRevalidate) ||
4066
+ (context?.staleIfError &&
4067
+ (error.statusCode === 500 ||
4068
+ error.statusCode === 502 ||
4069
+ error.statusCode === 503 ||
4070
+ error.statusCode === 504)))
4071
+ ) {
4072
+ // these are conditions under which we can use stale data after an error
4073
+ resolve({
4074
+ key: id,
4075
+ version: existingVersion,
4076
+ value: existingRecord,
4077
+ });
4078
+ logger.trace?.(error.message, '(returned stale record)');
4079
+ } else reject(error);
4080
+ const resolveDuration = performance.now() - start;
4081
+ recordAction(resolveDuration, 'cache-resolution', tableName, null, 'fail');
4082
+ if (responseHeaders)
4083
+ appendHeader(responseHeaders, 'Server-Timing', `cache-resolve;dur=${resolveDuration.toFixed(2)}`, true);
4084
+ sourceContext.transaction.abort();
4085
+ return;
4086
+ }
4087
+ if (context?.noCacheStore || sourceContext.noCacheStore) {
4088
+ // abort before we write any change
4089
+ sourceContext.transaction.abort();
4090
+ return;
4091
+ }
4092
+ const dbTxn = txnForContext(sourceContext);
4093
+ dbTxn.addWrite({
4094
+ key: id,
4095
+ store: primaryStore,
4096
+ entry: existingEntry,
4097
+ nodeName: 'source',
4098
+ before: preCommitBlobsForRecordBefore(updatedRecord),
4099
+ commit: (txnTime, existingEntry, _retry, transaction: any) => {
4100
+ if (existingEntry?.version !== existingVersion) {
4101
+ // don't do anything if the version has changed
4102
+ return;
4103
+ }
4104
+ updateIndices(id, existingRecord, updatedRecord);
4105
+ if (updatedRecord) {
4106
+ if (existingEntry) {
4107
+ context.previousResidency = TableResource.getResidencyRecord(existingEntry.residencyId);
4108
+ }
4109
+ let auditRecord: any;
4110
+ let omitLocalRecord = false;
4111
+ let residencyId: number;
4112
+ const residency = residencyFromFunction(TableResource.getResidency(updatedRecord, context));
4113
+ if (residency) {
4114
+ if (!residency.includes(server.hostname)) {
4115
+ // if we aren't in the residency list, specify that our local record should be omitted or be partial
4116
+ auditRecord = updatedRecord;
4117
+ omitLocalRecord = true;
4118
+ if (TableResource.getResidencyById) {
4119
+ // complete omission of the record that doesn't belong here
4120
+ updatedRecord = undefined;
4121
+ } else {
4122
+ // store the partial record
4123
+ updatedRecord = null;
4124
+ for (const name in indices) {
4125
+ if (!updatedRecord) {
4126
+ updatedRecord = {};
4127
+ }
4128
+ // if there are any indices, we need to preserve a partial invalidated record to ensure we can still do searches
4129
+ updatedRecord[name] = auditRecord[name];
4130
+ }
4131
+ }
4132
+ }
4133
+ residencyId = getResidencyId(residency);
4134
+ }
4135
+ logger.trace?.(
4136
+ `Writing resolved record from source with id: ${id}, timestamp: ${new Date(txnTime).toISOString()}`
4137
+ );
4138
+ // TODO: We are doing a double check for ifVersion that should probably be cleaned out
4139
+ updateRecord(
4140
+ id,
4141
+ updatedRecord,
4142
+ existingEntry,
4143
+ txnTime,
4144
+ omitLocalRecord ? INVALIDATED : 0,
4145
+ (audit && (hasChanges || omitLocalRecord)) || null,
4146
+ {
4147
+ user: sourceContext?.user,
4148
+ expiresAt: sourceContext.expiresAt,
4149
+ residencyId,
4150
+ transaction,
4151
+ tableToTrack: tableName,
4152
+ },
4153
+ 'put',
4154
+ Boolean(invalidated),
4155
+ auditRecord
4156
+ );
4157
+ } else if (existingEntry) {
4158
+ logger.trace?.(
4159
+ `Deleting resolved record from source with id: ${id}, timestamp: ${new Date(txnTime).toISOString()}`
4160
+ );
4161
+ if (audit || trackDeletes) {
4162
+ updateRecord(
4163
+ id,
4164
+ null,
4165
+ existingEntry,
4166
+ txnTime,
4167
+ 0,
4168
+ (audit && hasChanges) || null,
4169
+ { user: sourceContext?.user, transaction, tableToTrack: tableName },
4170
+ 'delete',
4171
+ Boolean(invalidated)
4172
+ );
4173
+ } else {
4174
+ removeEntry(primaryStore, existingEntry, existingVersion);
4175
+ }
4176
+ }
4177
+ },
4178
+ });
4179
+ }),
4180
+ () => {
4181
+ primaryStore.unlock(id);
4182
+ },
4183
+ (error) => {
4184
+ primaryStore.unlock(id);
4185
+ if (resolved) logger.error?.('Error committing cache update', error);
4186
+ // else the error was already propagated as part of the promise that we returned
4187
+ }
4188
+ );
4189
+ });
4190
+ }
4191
+
4192
+ /**
4193
+ * Verify that the context does not have any replication parameters that are not allowed
4194
+ * @param context
4195
+ */
4196
+ function checkContextPermissions(context: Context): boolean {
4197
+ if (!context) return true;
4198
+ if (context.user?.role?.permission?.super_user) return true;
4199
+ if (context.replicateTo)
4200
+ throw new ClientError('Can not specify replication parameters without super user permissions', 403);
4201
+ if (context.replicatedConfirmation)
4202
+ throw new ClientError('Can not specify replication confirmation without super user permissions', 403);
4203
+ return true;
4204
+ }
4205
+ function scheduleCleanup(priority?: number): Promise<void> | void {
4206
+ let runImmediately = false;
4207
+ if (priority) {
4208
+ // run immediately if there is a big increase in priority
4209
+ if (priority - cleanupPriority > 1) runImmediately = true;
4210
+ cleanupPriority = priority;
4211
+ }
4212
+ // Periodically evict expired records and deleted records searching for records who expiresAt timestamp is before now
4213
+ if (cleanupInterval === lastCleanupInterval && !runImmediately) return;
4214
+ lastCleanupInterval = cleanupInterval;
4215
+ if (getWorkerIndex() === getWorkerCount() - 1) {
4216
+ // run on the last thread so we aren't overloading lower-numbered threads
4217
+ if (cleanupTimer) clearTimeout(cleanupTimer);
4218
+ if (!cleanupInterval) return;
4219
+ return new Promise((resolve) => {
4220
+ const startOfYear = new Date();
4221
+ startOfYear.setMonth(0);
4222
+ startOfYear.setDate(1);
4223
+ startOfYear.setHours(0);
4224
+ startOfYear.setMinutes(0);
4225
+ startOfYear.setSeconds(0);
4226
+ const nextInterval = cleanupInterval / (1 + cleanupPriority);
4227
+ // find the next scheduled run based on regular cycles from the beginning of the year (if we restart, this enables a good continuation of scheduling)
4228
+ const nextScheduled = runImmediately
4229
+ ? Date.now()
4230
+ : Math.ceil((Date.now() - startOfYear.getTime()) / nextInterval) * nextInterval + startOfYear.getTime();
4231
+ const startNextTimer = (nextScheduled) => {
4232
+ logger.trace?.(`Scheduled next cleanup scan at ${new Date(nextScheduled)}`);
4233
+ // noinspection JSVoidFunctionReturnValueUsed
4234
+ cleanupTimer = setTimeout(
4235
+ () =>
4236
+ (lastEvictionCompletion = lastEvictionCompletion.then(async () => {
4237
+ // schedule the next run for when the next cleanup interval should occur (or now if it is in the past)
4238
+ startNextTimer(Math.max(nextScheduled + cleanupInterval, Date.now()));
4239
+ const rootStore = primaryStore.rootStore;
4240
+ if (rootStore.status !== 'open') {
4241
+ clearTimeout(cleanupTimer);
4242
+ return;
4243
+ }
4244
+ const MAX_CLEANUP_CONCURRENCY = 50;
4245
+ const outstandingCleanupOperations = new Array(MAX_CLEANUP_CONCURRENCY);
4246
+ let cleanupIndex = 0;
4247
+ const evictThreshold =
4248
+ Math.pow(cleanupPriority, 8) *
4249
+ (envMngr.get(CONFIG_PARAMS.STORAGE_RECLAMATION_EVICTIONFACTOR) ?? 100000);
4250
+ const adjustedEviction = evictionMs / Math.pow(Math.max(cleanupPriority, 1), 4);
4251
+ logger.debug?.(
4252
+ `Starting cleanup scan for ${tableName}, evict threshold ${evictThreshold}, adjusted eviction ${adjustedEviction}ms`
4253
+ );
4254
+ function shouldEvict(expiresAt: number, version: number, metadataFlags: number, record: any) {
4255
+ const evictWhen = expiresAt + adjustedEviction - Date.now();
4256
+ if (evictWhen < 0) return true;
4257
+ else if (cleanupPriority) {
4258
+ let size = primaryStore.lastSize;
4259
+ if (metadataFlags & HAS_BLOBS) {
4260
+ findBlobsInObject(record, (blob) => {
4261
+ if (blob.size) size += blob.size;
4262
+ });
4263
+ }
4264
+ logger.trace?.(
4265
+ `shouldEvict adjusted ${evictWhen} ${size}, ${(evictWhen * (expiresAt - version)) / size} < ${evictThreshold}`
4266
+ );
4267
+ // heuristic to determine if we should perform early eviction based on priority
4268
+ return (evictWhen * (expiresAt - version)) / size < evictThreshold;
4269
+ }
4270
+ return false;
4271
+ }
4272
+
4273
+ try {
4274
+ let count = 0;
4275
+ let removeDeletedRecords = !audit || isRocksDB;
4276
+ // iterate through all entries to find expired records and deleted records
4277
+ for (const entry of primaryStore.getRange({
4278
+ start: false,
4279
+ snapshot: false, // we don't want to keep read transaction snapshots open
4280
+ versions: true,
4281
+ lazy: true, // only want to access metadata most of the time
4282
+ })) {
4283
+ const { key, value: record, version, expiresAt, metadataFlags } = entry;
4284
+ // if there is no auditing cleanup and we are tracking deletion, need to do cleanup of
4285
+ // these deletion entries (LMDB audit cleanup has its own scheduled job for this)
4286
+ let resolution: Promise<void>;
4287
+ if (record === null && removeDeletedRecords && version + auditRetention < Date.now()) {
4288
+ // make sure it is still deleted when we do the removal
4289
+ resolution = removeEntry(primaryStore, entry, version);
4290
+ } else if (expiresAt != undefined && shouldEvict(expiresAt, version, metadataFlags, record)) {
4291
+ // evict!
4292
+ resolution = TableResource.evict(key, record, version);
4293
+ count++;
4294
+ }
4295
+ if (resolution) {
4296
+ await outstandingCleanupOperations[cleanupIndex];
4297
+ outstandingCleanupOperations[cleanupIndex] = resolution.catch((error) => {
4298
+ logger.error?.('Cleanup error', error);
4299
+ });
4300
+ if (++cleanupIndex >= MAX_CLEANUP_CONCURRENCY) cleanupIndex = 0;
4301
+ }
4302
+ await rest();
4303
+ }
4304
+ logger.debug?.(`Finished cleanup scan for ${tableName}, evicted ${count} entries`);
4305
+ } catch (error) {
4306
+ logger.warn?.(`Error in cleanup scan for ${tableName}:`, error);
4307
+ }
4308
+ resolve(undefined);
4309
+ cleanupPriority = 0; // reset the priority
4310
+ })),
4311
+ Math.min(nextScheduled - Date.now(), 0x7fffffff) // make sure it can fit in 32-bit signed number
4312
+ ).unref(); // don't let this prevent closing the thread
4313
+ };
4314
+ startNextTimer(nextScheduled);
4315
+ });
4316
+ }
4317
+ }
4318
+ function addDeleteRemoval() {
4319
+ deleteCallbackHandle = auditStore?.addDeleteRemovalCallback(tableId, primaryStore, (id: Id, version: number) => {
4320
+ primaryStore.remove(id, version);
4321
+ });
4322
+ }
4323
+ function runRecordExpirationEviction() {
4324
+ // Periodically evict expired records, searching for records who expiresAt timestamp is before now
4325
+ if (getWorkerIndex() === 0) {
4326
+ // we want to run the pruning of expired records on only one thread so we don't have conflicts in evicting
4327
+ setInterval(async () => {
4328
+ // go through each database and table and then search for expired entries
4329
+ // find any entries that are set to expire before now
4330
+ if (runningRecordExpiration) return;
4331
+ runningRecordExpiration = true;
4332
+ try {
4333
+ const expiresAtName = expiresAtProperty.name;
4334
+ const index = indices[expiresAtName];
4335
+ if (!index) throw new Error(`expiresAt attribute ${expiresAtProperty} must be indexed`);
4336
+ for (const key of index.getRange({
4337
+ start: true,
4338
+ values: false,
4339
+ end: Date.now(),
4340
+ snapshot: false,
4341
+ })) {
4342
+ for (const id of index.getValues(key)) {
4343
+ const recordEntry = primaryStore.getEntry(id);
4344
+ if (!recordEntry?.value) {
4345
+ // cleanup the index if the record is gone
4346
+ primaryStore.ifVersion(id, recordEntry?.version, () => index.remove(key, id));
4347
+ } else if (recordEntry.value[expiresAtName] < Date.now()) {
4348
+ // make sure the record hasn't changed and won't change while removing
4349
+ TableResource.evict(id, recordEntry.value, recordEntry.version);
4350
+ }
4351
+ }
4352
+ await rest();
4353
+ }
4354
+ } catch (error) {
4355
+ logger.error?.('Error in evicting old records', error);
4356
+ } finally {
4357
+ runningRecordExpiration = false;
4358
+ }
4359
+ }, RECORD_PRUNING_INTERVAL).unref();
4360
+ }
4361
+ }
4362
+ function residencyFromFunction(shardOrResidencyList: ResidencyDefinition): string[] | void {
4363
+ if (shardOrResidencyList == undefined) return;
4364
+ if (Array.isArray(shardOrResidencyList)) return shardOrResidencyList;
4365
+ if (typeof shardOrResidencyList === 'number') {
4366
+ if (shardOrResidencyList >= 65536) throw new Error(`Shard id ${shardOrResidencyList} must be below 65536`);
4367
+ const residencyList = server.shards?.get?.(shardOrResidencyList);
4368
+ if (residencyList) {
4369
+ logger.trace?.(`Shard ${shardOrResidencyList} mapped to ${residencyList.map((node) => node.name).join(', ')}`);
4370
+ return residencyList.map((node) => node.name);
4371
+ }
4372
+ throw new Error(`Shard ${shardOrResidencyList} is not defined`);
4373
+ }
4374
+ throw new Error(
4375
+ `Shard or residency list ${shardOrResidencyList} is not a valid type, must be a shard number or residency list of node hostnames`
4376
+ );
4377
+ }
4378
+ function getResidencyId(ownerNodeNames) {
4379
+ if (ownerNodeNames) {
4380
+ const setKey = ownerNodeNames.join(',');
4381
+ let residencyId = dbisDb.get([Symbol.for('residency_by_set'), setKey]);
4382
+ if (residencyId) return residencyId;
4383
+ dbisDb.put(
4384
+ [Symbol.for('residency_by_set'), setKey],
4385
+ (residencyId = Math.floor(Math.random() * 0x7fff0000) + 0xffff)
4386
+ );
4387
+ dbisDb.put([Symbol.for('residency_by_id'), residencyId], ownerNodeNames);
4388
+ return residencyId;
4389
+ }
4390
+ }
4391
+ function preCommitBlobsForRecordBefore(
4392
+ record: any,
4393
+ before?: () => Promise<void>,
4394
+ saveInRecord?: boolean
4395
+ ): Promise<void> | void {
4396
+ const blobCompletion = startPreCommitBlobsForRecord(record, primaryStore.rootStore, saveInRecord);
4397
+ if (blobCompletion) {
4398
+ // if there are blobs that we have started saving, they need to be saved and completed before we commit, so we need to wait for
4399
+ // them to finish and we return a new callback for the before phase of the commit
4400
+ const callSources = before;
4401
+ return callSources
4402
+ ? async () => {
4403
+ // if we are calling the sources first and waiting for blobs, do those in order
4404
+ await callSources();
4405
+ await blobCompletion();
4406
+ }
4407
+ : () => blobCompletion();
4408
+ }
4409
+ return before;
4410
+ }
4411
+ }
4412
+
4413
+ function attributesAsObject(attribute_permissions, type) {
4414
+ const attrObject = attribute_permissions.attr_object || (attribute_permissions.attr_object = {});
4415
+ let attrsForType = attrObject[type];
4416
+ if (attrsForType) return attrsForType;
4417
+ attrsForType = attrObject[type] = Object.create(null);
4418
+ for (const permission of attribute_permissions) {
4419
+ attrsForType[permission.attribute_name] = permission[type];
4420
+ }
4421
+ return attrsForType;
4422
+ }
4423
+ function noop() {
4424
+ // prefetch callback
4425
+ }
4426
+
4427
+ const ENDS_WITH_TIMEZONE = /[+-][0-9]{2}:[0-9]{2}|[a-zA-Z]$/;
4428
+ /**
4429
+ * Coerce a string to the type defined by the attribute
4430
+ * @param value
4431
+ * @param attribute
4432
+ * @returns
4433
+ */
4434
+ export function coerceType(value: any, attribute: any): any {
4435
+ const type = attribute?.type;
4436
+ //if a type is String is it safe to execute a .toString() on the value and return? Does not work for Array/Object so we would need to detect if is either of those first
4437
+ if (value === null) {
4438
+ return value;
4439
+ } else if (value === '' && type && type !== 'String' && type !== 'Any') {
4440
+ return null;
4441
+ }
4442
+ try {
4443
+ switch (type) {
4444
+ case 'Int':
4445
+ case 'Long':
4446
+ // allow $ prefix as special syntax for more compact numeric representations and then use parseInt to force being an integer (might consider Math.floor, which is a little faster, but rounds in a different way with negative numbers).
4447
+ if (value[0] === '$') return rejectNaN(parseInt(value.slice(1), 36));
4448
+ if (value === 'null') return null;
4449
+ // strict check to make sure it is really an integer (there is also a sensible conversion from dates)
4450
+ if (!/^-?[0-9]+$/.test(value) && !(value instanceof Date)) throw new SyntaxError();
4451
+ return rejectNaN(+value); // numeric conversion is stricter than parseInt
4452
+ case 'Float':
4453
+ return value === 'null' ? null : rejectNaN(+value); // numeric conversion is stricter than parseFloat
4454
+ case 'BigInt':
4455
+ return value === 'null' ? null : BigInt(value);
4456
+ case 'Boolean':
4457
+ return autoCastBooleanStrict(value);
4458
+ case 'Date':
4459
+ if (isNaN(value)) {
4460
+ if (value === 'null') return null;
4461
+ //if the value is not an integer (to handle epoch values) and does not end in a timezone we suffiz with 'Z' tom make sure the Date is GMT timezone
4462
+ if (!ENDS_WITH_TIMEZONE.test(value)) {
4463
+ value += 'Z';
4464
+ }
4465
+ const date = new Date(value);
4466
+ rejectNaN(date.getTime());
4467
+ return date;
4468
+ }
4469
+ return new Date(+value); // epoch ms number
4470
+ case undefined:
4471
+ case 'Any':
4472
+ return autoCast(value);
4473
+ default:
4474
+ return value;
4475
+ }
4476
+ } catch (error) {
4477
+ error.message = `Invalid value for attribute ${attribute.name}: "${value}", expecting ${type}`;
4478
+ error.statusCode = 400;
4479
+ throw error;
4480
+ }
4481
+ }
4482
+ // This is a simple function to throw on NaNs that can come out of parseInt, parseFloat, etc.
4483
+ function rejectNaN(value: number) {
4484
+ if (isNaN(value)) throw new SyntaxError(); // will set the message in the catch block with more context
4485
+ return value;
4486
+ }
4487
+ function isDescendantId(ancestorId, descendantId): boolean {
4488
+ if (ancestorId == null) return true; // ancestor of all ids
4489
+ if (!Array.isArray(descendantId)) return ancestorId === descendantId || descendantId.startsWith?.(ancestorId);
4490
+ if (Array.isArray(ancestorId)) {
4491
+ let al = ancestorId.length;
4492
+ if (ancestorId[al - 1] === null) al--;
4493
+ if (descendantId.length >= al) {
4494
+ for (let i = 0; i < al; i++) {
4495
+ if (descendantId[i] !== ancestorId[i]) return false;
4496
+ }
4497
+ return true;
4498
+ }
4499
+ return false;
4500
+ } else if (descendantId[0] === ancestorId) return true;
4501
+ }
4502
+
4503
+ // wait for an event turn (via a promise)
4504
+ const rest = () => new Promise(setImmediate);
4505
+
4506
+ // for filtering
4507
+ function exists(value) {
4508
+ return value != null;
4509
+ }
4510
+
4511
+ function stringify(value) {
4512
+ try {
4513
+ return JSON.stringify(value);
4514
+ } catch {
4515
+ return value;
4516
+ }
4517
+ }
4518
+ function hasOtherProcesses(store) {
4519
+ const pid = process.pid;
4520
+ return store.env
4521
+ .readerList?.()
4522
+ .slice(1)
4523
+ .some((line) => {
4524
+ // if the pid from the reader list is different than ours, must be another process accessing the database
4525
+ return +line.match(/\d+/)?.[0] != pid;
4526
+ });
4527
+ }