@harperfast/harper 5.0.0-alpha.10 → 5.0.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (444) hide show
  1. package/bin/BinObjects.js +17 -0
  2. package/bin/cliOperations.js +157 -0
  3. package/bin/copyDb.ts +280 -0
  4. package/bin/harper.js +156 -0
  5. package/bin/install.js +15 -0
  6. package/bin/lite.js +5 -0
  7. package/bin/restart.js +201 -0
  8. package/bin/run.js +409 -0
  9. package/bin/status.js +65 -0
  10. package/bin/stop.js +22 -0
  11. package/bin/upgrade.js +134 -0
  12. package/components/Application.ts +646 -0
  13. package/components/ApplicationScope.ts +49 -0
  14. package/components/Component.ts +53 -0
  15. package/components/ComponentV1.ts +342 -0
  16. package/components/DEFAULT_CONFIG.ts +18 -0
  17. package/components/EntryHandler.ts +227 -0
  18. package/components/Logger.ts +14 -0
  19. package/components/OptionsWatcher.ts +354 -0
  20. package/components/PluginModule.ts +6 -0
  21. package/components/Scope.ts +329 -0
  22. package/components/componentLoader.ts +529 -0
  23. package/components/deriveCommonPatternBase.ts +31 -0
  24. package/components/deriveGlobOptions.ts +44 -0
  25. package/components/deriveURLPath.ts +57 -0
  26. package/components/operations.js +658 -0
  27. package/components/operationsValidation.js +246 -0
  28. package/components/packageComponent.ts +39 -0
  29. package/components/requestRestart.ts +26 -0
  30. package/components/resolveBaseURLPath.ts +38 -0
  31. package/components/status/ComponentStatus.ts +110 -0
  32. package/components/status/ComponentStatusRegistry.ts +251 -0
  33. package/components/status/api.ts +153 -0
  34. package/components/status/crossThread.ts +405 -0
  35. package/components/status/errors.ts +152 -0
  36. package/components/status/index.ts +44 -0
  37. package/components/status/internal.ts +65 -0
  38. package/components/status/registry.ts +12 -0
  39. package/components/status/types.ts +96 -0
  40. package/config/RootConfigWatcher.ts +59 -0
  41. package/config/configHelpers.ts +11 -0
  42. package/config/configUtils.js +967 -0
  43. package/config/harperConfigEnvVars.ts +641 -0
  44. package/dataLayer/CreateAttributeObject.js +25 -0
  45. package/dataLayer/CreateTableObject.js +11 -0
  46. package/dataLayer/DataLayerObjects.js +43 -0
  47. package/dataLayer/DeleteBeforeObject.js +22 -0
  48. package/dataLayer/DeleteObject.js +25 -0
  49. package/dataLayer/DropAttributeObject.js +11 -0
  50. package/dataLayer/GetBackupObject.js +22 -0
  51. package/dataLayer/InsertObject.js +24 -0
  52. package/dataLayer/ReadAuditLogObject.js +24 -0
  53. package/dataLayer/SQLSearch.js +1335 -0
  54. package/dataLayer/SearchByConditionsObject.js +61 -0
  55. package/dataLayer/SearchByHashObject.js +21 -0
  56. package/dataLayer/SearchObject.js +45 -0
  57. package/dataLayer/SqlSearchObject.js +14 -0
  58. package/dataLayer/UpdateObject.js +23 -0
  59. package/dataLayer/UpsertObject.js +23 -0
  60. package/dataLayer/bulkLoad.js +813 -0
  61. package/dataLayer/dataObjects/BulkLoadObjects.js +27 -0
  62. package/dataLayer/dataObjects/UpsertObject.js +23 -0
  63. package/dataLayer/delete.js +164 -0
  64. package/dataLayer/export.js +381 -0
  65. package/dataLayer/getBackup.js +40 -0
  66. package/dataLayer/harperBridge/BridgeMethods.js +81 -0
  67. package/dataLayer/harperBridge/ResourceBridge.ts +633 -0
  68. package/dataLayer/harperBridge/bridgeUtility/insertUpdateReturnObj.js +28 -0
  69. package/dataLayer/harperBridge/bridgeUtility/insertUpdateValidate.js +88 -0
  70. package/dataLayer/harperBridge/harperBridge.js +21 -0
  71. package/dataLayer/harperBridge/lmdbBridge/LMDBBridge.js +119 -0
  72. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/DeleteAuditLogsBeforeResults.js +19 -0
  73. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateAttribute.js +112 -0
  74. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateRecords.js +67 -0
  75. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateSchema.js +31 -0
  76. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbCreateTable.js +94 -0
  77. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDeleteAuditLogsBefore.js +98 -0
  78. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDeleteRecords.js +89 -0
  79. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDropAttribute.js +109 -0
  80. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDropSchema.js +107 -0
  81. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbDropTable.js +137 -0
  82. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbFlush.js +35 -0
  83. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbGetBackup.js +111 -0
  84. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbGetDataByHash.js +28 -0
  85. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbGetDataByValue.js +29 -0
  86. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbReadAuditLog.js +207 -0
  87. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbSearchByConditions.js +156 -0
  88. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbSearchByHash.js +21 -0
  89. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbSearchByValue.js +30 -0
  90. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbTransaction.js +19 -0
  91. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbUpdateRecords.js +64 -0
  92. package/dataLayer/harperBridge/lmdbBridge/lmdbMethods/lmdbUpsertRecords.js +70 -0
  93. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBCreateAttributeObject.js +22 -0
  94. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBDeleteTransactionObject.js +23 -0
  95. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBInsertTransactionObject.js +22 -0
  96. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBTransactionObject.js +23 -0
  97. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBUpdateTransactionObject.js +24 -0
  98. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/LMDBUpsertTransactionObject.js +24 -0
  99. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/TableSizeObject.js +25 -0
  100. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/initializeHashSearch.js +21 -0
  101. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/initializePaths.js +157 -0
  102. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbCheckForNewAttributes.js +94 -0
  103. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbCreateTransactionsAuditEnvironment.js +39 -0
  104. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbGetTableSize.js +34 -0
  105. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbProcessRows.js +100 -0
  106. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbSearch.js +371 -0
  107. package/dataLayer/harperBridge/lmdbBridge/lmdbUtility/lmdbWriteTransaction.js +109 -0
  108. package/dataLayer/hdbInfoController.js +254 -0
  109. package/dataLayer/insert.js +266 -0
  110. package/dataLayer/readAuditLog.js +59 -0
  111. package/dataLayer/schema.js +366 -0
  112. package/dataLayer/schemaDescribe.js +289 -0
  113. package/dataLayer/search.js +60 -0
  114. package/dataLayer/transaction.js +17 -0
  115. package/dataLayer/update.js +124 -0
  116. package/dist/components/Logger.d.ts +12 -0
  117. package/dist/components/Logger.js +3 -0
  118. package/dist/components/Logger.js.map +1 -0
  119. package/dist/components/Scope.d.ts +14 -4
  120. package/dist/components/Scope.js +18 -10
  121. package/dist/components/Scope.js.map +1 -1
  122. package/dist/components/componentLoader.js +16 -9
  123. package/dist/components/componentLoader.js.map +1 -1
  124. package/dist/components/operations.js +2 -2
  125. package/dist/components/operations.js.map +1 -1
  126. package/dist/config/configUtils.d.ts +1 -1
  127. package/dist/config/configUtils.js +1 -1
  128. package/dist/config/configUtils.js.map +1 -1
  129. package/dist/dataLayer/CreateTableObject.d.ts +2 -2
  130. package/dist/dataLayer/CreateTableObject.js +2 -2
  131. package/dist/dataLayer/CreateTableObject.js.map +1 -1
  132. package/dist/dataLayer/delete.d.ts +1 -1
  133. package/dist/dataLayer/schema.js +6 -5
  134. package/dist/dataLayer/schema.js.map +1 -1
  135. package/dist/dataLayer/schemaDescribe.js +1 -1
  136. package/dist/dataLayer/schemaDescribe.js.map +1 -1
  137. package/dist/index.d.ts +1 -1
  138. package/dist/index.js +2 -0
  139. package/dist/index.js.map +1 -1
  140. package/dist/resources/DatabaseTransaction.d.ts +1 -1
  141. package/dist/resources/IterableEventQueue.d.ts +1 -1
  142. package/dist/resources/LMDBTransaction.d.ts +5 -1
  143. package/dist/resources/Resource.d.ts +1 -1
  144. package/dist/resources/RocksIndexStore.d.ts +3 -3
  145. package/dist/resources/RocksTransactionLogStore.d.ts +6 -3
  146. package/dist/resources/Table.d.ts +15 -6
  147. package/dist/resources/Table.js +4 -1
  148. package/dist/resources/Table.js.map +1 -1
  149. package/dist/resources/analytics/read.js +32 -22
  150. package/dist/resources/analytics/read.js.map +1 -1
  151. package/dist/resources/analytics/write.js +3 -6
  152. package/dist/resources/analytics/write.js.map +1 -1
  153. package/dist/resources/auditStore.d.ts +3 -3
  154. package/dist/resources/blob.d.ts +25 -2
  155. package/dist/resources/databases.d.ts +12 -2
  156. package/dist/resources/databases.js +22 -19
  157. package/dist/resources/databases.js.map +1 -1
  158. package/dist/resources/search.js +11 -5
  159. package/dist/resources/search.js.map +1 -1
  160. package/dist/resources/transaction.d.ts +2 -1
  161. package/dist/security/auth.js +1 -1
  162. package/dist/security/auth.js.map +1 -1
  163. package/dist/security/cryptoHash.d.ts +2 -2
  164. package/dist/security/jsLoader.js +243 -66
  165. package/dist/security/jsLoader.js.map +1 -1
  166. package/dist/security/keys.js +4 -5
  167. package/dist/security/keys.js.map +1 -1
  168. package/dist/security/user.js +3 -3
  169. package/dist/security/user.js.map +1 -1
  170. package/dist/server/REST.js +16 -2
  171. package/dist/server/REST.js.map +1 -1
  172. package/dist/server/Server.d.ts +2 -1
  173. package/dist/server/Server.js.map +1 -1
  174. package/dist/server/fastifyRoutes/plugins/hdbCore.d.ts +6 -1
  175. package/dist/server/fastifyRoutes.js +2 -0
  176. package/dist/server/fastifyRoutes.js.map +1 -1
  177. package/dist/server/http.js +12 -6
  178. package/dist/server/http.js.map +1 -1
  179. package/dist/server/jobs/JobObject.d.ts +3 -3
  180. package/dist/server/loadRootComponents.js +1 -0
  181. package/dist/server/loadRootComponents.js.map +1 -1
  182. package/dist/server/operationsServer.js +3 -1
  183. package/dist/server/operationsServer.js.map +1 -1
  184. package/dist/server/serverHelpers/JSONStream.d.ts +3 -3
  185. package/dist/server/serverHelpers/Request.d.ts +5 -5
  186. package/dist/server/serverHelpers/requestTimePlugin.d.ts +1 -1
  187. package/dist/server/threads/manageThreads.d.ts +2 -2
  188. package/dist/server/threads/manageThreads.js +50 -35
  189. package/dist/server/threads/manageThreads.js.map +1 -1
  190. package/dist/server/threads/socketRouter.d.ts +1 -1
  191. package/dist/sqlTranslator/deleteTranslator.d.ts +1 -1
  192. package/dist/utility/AWS/AWSConnector.d.ts +3 -2
  193. package/dist/utility/common_utils.d.ts +3 -3
  194. package/dist/utility/environment/systemInformation.d.ts +1 -0
  195. package/dist/utility/functions/date/dateFunctions.d.ts +11 -11
  196. package/dist/utility/globalSchema.d.ts +1 -1
  197. package/dist/utility/hdbTerms.d.ts +3 -0
  198. package/dist/utility/hdbTerms.js +3 -0
  199. package/dist/utility/hdbTerms.js.map +1 -1
  200. package/dist/utility/installation.d.ts +2 -4
  201. package/dist/utility/installation.js.map +1 -1
  202. package/dist/utility/lmdb/commonUtility.d.ts +1 -0
  203. package/dist/utility/lmdb/deleteUtility.d.ts +1 -0
  204. package/dist/utility/lmdb/environmentUtility.d.ts +1 -0
  205. package/dist/utility/lmdb/searchUtility.d.ts +2 -1
  206. package/dist/utility/lmdb/writeUtility.d.ts +1 -0
  207. package/dist/utility/logging/harper_logger.d.ts +6 -6
  208. package/dist/utility/processManagement/processManagement.d.ts +1 -1
  209. package/dist/utility/processManagement/servicesConfig.d.ts +12 -6
  210. package/dist/validation/common_validators.d.ts +4 -3
  211. package/dist/validation/configValidator.d.ts +3 -2
  212. package/index.d.ts +56 -0
  213. package/index.js +41 -0
  214. package/json/systemSchema.json +373 -0
  215. package/launchServiceScripts/launchHarperDB.js +3 -0
  216. package/launchServiceScripts/utility/checkNodeVersion.js +15 -0
  217. package/package.json +21 -3
  218. package/resources/DatabaseTransaction.ts +378 -0
  219. package/resources/ErrorResource.ts +57 -0
  220. package/resources/IterableEventQueue.ts +94 -0
  221. package/resources/LMDBTransaction.ts +349 -0
  222. package/resources/RecordEncoder.ts +702 -0
  223. package/resources/RequestTarget.ts +134 -0
  224. package/resources/Resource.ts +789 -0
  225. package/resources/ResourceInterface.ts +221 -0
  226. package/resources/ResourceInterfaceV2.ts +53 -0
  227. package/resources/ResourceV2.ts +67 -0
  228. package/resources/Resources.ts +162 -0
  229. package/resources/RocksIndexStore.ts +70 -0
  230. package/resources/RocksTransactionLogStore.ts +352 -0
  231. package/resources/Table.ts +4527 -0
  232. package/resources/analytics/hostnames.ts +72 -0
  233. package/resources/analytics/metadata.ts +10 -0
  234. package/resources/analytics/read.ts +252 -0
  235. package/resources/analytics/write.ts +803 -0
  236. package/resources/auditStore.ts +556 -0
  237. package/resources/blob.ts +1268 -0
  238. package/resources/crdt.ts +125 -0
  239. package/resources/dataLoader.ts +527 -0
  240. package/resources/databases.ts +1290 -0
  241. package/resources/graphql.ts +221 -0
  242. package/resources/indexes/HierarchicalNavigableSmallWorld.ts +638 -0
  243. package/resources/indexes/customIndexes.ts +7 -0
  244. package/resources/indexes/vector.ts +38 -0
  245. package/resources/jsResource.ts +86 -0
  246. package/resources/loadEnv.ts +22 -0
  247. package/resources/login.ts +18 -0
  248. package/resources/openApi.ts +409 -0
  249. package/resources/registrationDeprecated.ts +8 -0
  250. package/resources/replayLogs.ts +136 -0
  251. package/resources/roles.ts +98 -0
  252. package/resources/search.ts +1301 -0
  253. package/resources/tracked.ts +584 -0
  254. package/resources/transaction.ts +89 -0
  255. package/resources/transactionBroadcast.ts +258 -0
  256. package/security/auth.ts +376 -0
  257. package/security/certificateVerification/certificateVerificationSource.ts +84 -0
  258. package/security/certificateVerification/configValidation.ts +107 -0
  259. package/security/certificateVerification/crlVerification.ts +623 -0
  260. package/security/certificateVerification/index.ts +121 -0
  261. package/security/certificateVerification/ocspVerification.ts +148 -0
  262. package/security/certificateVerification/pkijs-ed25519-patch.ts +188 -0
  263. package/security/certificateVerification/types.ts +128 -0
  264. package/security/certificateVerification/verificationConfig.ts +138 -0
  265. package/security/certificateVerification/verificationUtils.ts +447 -0
  266. package/security/cryptoHash.js +42 -0
  267. package/security/data_objects/PermissionAttributeResponseObject.js +15 -0
  268. package/security/data_objects/PermissionResponseObject.js +115 -0
  269. package/security/data_objects/PermissionTableResponseObject.js +20 -0
  270. package/security/fastifyAuth.js +169 -0
  271. package/security/impersonation.ts +160 -0
  272. package/security/jsLoader.ts +716 -0
  273. package/security/keys.js +948 -0
  274. package/security/permissionsTranslator.js +300 -0
  275. package/security/role.js +218 -0
  276. package/security/tokenAuthentication.ts +228 -0
  277. package/security/user.ts +449 -0
  278. package/server/DurableSubscriptionsSession.ts +503 -0
  279. package/server/REST.ts +407 -0
  280. package/server/Server.ts +89 -0
  281. package/server/fastifyRoutes/helpers/getCORSOptions.js +36 -0
  282. package/server/fastifyRoutes/helpers/getHeaderTimeoutConfig.js +15 -0
  283. package/server/fastifyRoutes/helpers/getServerOptions.js +33 -0
  284. package/server/fastifyRoutes/plugins/hdbCore.js +39 -0
  285. package/server/fastifyRoutes.ts +205 -0
  286. package/server/graphqlQuerying.ts +700 -0
  287. package/server/http.ts +640 -0
  288. package/server/itc/serverHandlers.js +161 -0
  289. package/server/itc/utility/ITCEventObject.js +10 -0
  290. package/server/jobs/JobObject.js +24 -0
  291. package/server/jobs/jobProcess.js +69 -0
  292. package/server/jobs/jobRunner.js +162 -0
  293. package/server/jobs/jobs.js +304 -0
  294. package/server/loadRootComponents.js +44 -0
  295. package/server/mqtt.ts +485 -0
  296. package/server/nodeName.ts +75 -0
  297. package/server/operationsServer.ts +313 -0
  298. package/server/serverHelpers/Headers.ts +108 -0
  299. package/server/serverHelpers/JSONStream.ts +269 -0
  300. package/server/serverHelpers/OperationFunctionObject.ts +13 -0
  301. package/server/serverHelpers/Request.ts +158 -0
  302. package/server/serverHelpers/contentTypes.ts +637 -0
  303. package/server/serverHelpers/requestTimePlugin.js +57 -0
  304. package/server/serverHelpers/serverHandlers.js +148 -0
  305. package/server/serverHelpers/serverUtilities.ts +473 -0
  306. package/server/serverRegistry.ts +8 -0
  307. package/server/static.ts +187 -0
  308. package/server/status/definitions.ts +37 -0
  309. package/server/status/index.ts +125 -0
  310. package/server/storageReclamation.ts +93 -0
  311. package/server/threads/itc.js +89 -0
  312. package/server/threads/manageThreads.js +594 -0
  313. package/server/threads/socketRouter.ts +360 -0
  314. package/server/threads/threadServer.js +279 -0
  315. package/server/throttle.ts +73 -0
  316. package/sqlTranslator/SelectValidator.js +330 -0
  317. package/sqlTranslator/alasqlFunctionImporter.js +62 -0
  318. package/sqlTranslator/deleteTranslator.js +67 -0
  319. package/sqlTranslator/index.js +242 -0
  320. package/sqlTranslator/sql_statement_bucket.js +472 -0
  321. package/static/defaultConfig.yaml +3 -0
  322. package/studio/web/HDBDogOnly.svg +78 -0
  323. package/studio/web/assets/PPRadioGrotesk-Bold-DDaUYG8E.woff +0 -0
  324. package/studio/web/assets/fa-brands-400-CEJbCg16.woff +0 -0
  325. package/studio/web/assets/fa-brands-400-CSYNqBb_.ttf +0 -0
  326. package/studio/web/assets/fa-brands-400-DnkPfk3o.eot +0 -0
  327. package/studio/web/assets/fa-brands-400-UxlILjvJ.woff2 +0 -0
  328. package/studio/web/assets/fa-brands-400-cH1MgKbP.svg +3717 -0
  329. package/studio/web/assets/fa-regular-400-BhTwtT8w.eot +0 -0
  330. package/studio/web/assets/fa-regular-400-D1vz6WBx.ttf +0 -0
  331. package/studio/web/assets/fa-regular-400-DFnMcJPd.woff +0 -0
  332. package/studio/web/assets/fa-regular-400-DGzu1beS.woff2 +0 -0
  333. package/studio/web/assets/fa-regular-400-gwj8Pxq-.svg +801 -0
  334. package/studio/web/assets/fa-solid-900-B4ZZ7kfP.svg +5034 -0
  335. package/studio/web/assets/fa-solid-900-B6Axprfb.eot +0 -0
  336. package/studio/web/assets/fa-solid-900-BUswJgRo.woff2 +0 -0
  337. package/studio/web/assets/fa-solid-900-DOXgCApm.woff +0 -0
  338. package/studio/web/assets/fa-solid-900-mxuxnBEa.ttf +0 -0
  339. package/studio/web/assets/index-BTgXJX9d.js +235 -0
  340. package/studio/web/assets/index-BTgXJX9d.js.map +1 -0
  341. package/studio/web/assets/index-C-GXfcup.js +37 -0
  342. package/studio/web/assets/index-C-GXfcup.js.map +1 -0
  343. package/studio/web/assets/index-PFlNdimM.js +2 -0
  344. package/studio/web/assets/index-PFlNdimM.js.map +1 -0
  345. package/studio/web/assets/index-Y2g_iFpU.css +1 -0
  346. package/studio/web/assets/index-jiPwkrsB.css +1 -0
  347. package/studio/web/assets/index.lazy-C3TJZJ4o.js +266 -0
  348. package/studio/web/assets/index.lazy-C3TJZJ4o.js.map +1 -0
  349. package/studio/web/assets/profiler-DotzgiCJ.js +2 -0
  350. package/studio/web/assets/profiler-DotzgiCJ.js.map +1 -0
  351. package/studio/web/assets/react-redux-VxUEx_mU.js +6 -0
  352. package/studio/web/assets/react-redux-VxUEx_mU.js.map +1 -0
  353. package/studio/web/assets/startRecording-B_9J9Csd.js +3 -0
  354. package/studio/web/assets/startRecording-B_9J9Csd.js.map +1 -0
  355. package/studio/web/fabric-signup-background.webp +0 -0
  356. package/studio/web/fabric-signup-text.png +0 -0
  357. package/studio/web/favicon_purple.png +0 -0
  358. package/studio/web/github-icon.svg +15 -0
  359. package/studio/web/harper-fabric_black.png +0 -0
  360. package/studio/web/harper-fabric_white.png +0 -0
  361. package/studio/web/harper-studio_white.png +0 -0
  362. package/studio/web/index.html +16 -0
  363. package/studio/web/running.css +148 -0
  364. package/studio/web/running.html +147 -0
  365. package/studio/web/running.js +111 -0
  366. package/upgrade/UpgradeObjects.js +13 -0
  367. package/upgrade/directives/directivesController.js +90 -0
  368. package/upgrade/directivesManager.js +139 -0
  369. package/upgrade/upgradePrompt.js +124 -0
  370. package/upgrade/upgradeUtilities.js +28 -0
  371. package/utility/AWS/AWSConnector.js +29 -0
  372. package/utility/OperationFunctionCaller.js +63 -0
  373. package/utility/assignCmdEnvVariables.js +62 -0
  374. package/utility/common_utils.js +867 -0
  375. package/utility/environment/environmentManager.js +208 -0
  376. package/utility/environment/systemInformation.js +355 -0
  377. package/utility/errors/commonErrors.js +267 -0
  378. package/utility/errors/hdbError.js +146 -0
  379. package/utility/functions/date/dateFunctions.js +65 -0
  380. package/utility/functions/geo.js +355 -0
  381. package/utility/functions/sql/alaSQLExtension.js +104 -0
  382. package/utility/globalSchema.js +35 -0
  383. package/utility/hdbTerms.ts +819 -0
  384. package/utility/install/checkJWTTokensExist.js +62 -0
  385. package/utility/install/harperdb.conf +15 -0
  386. package/utility/install/harperdb.service +14 -0
  387. package/utility/install/installer.js +635 -0
  388. package/utility/installation.ts +30 -0
  389. package/utility/lmdb/DBIDefinition.js +20 -0
  390. package/utility/lmdb/DeleteRecordsResponseObject.js +25 -0
  391. package/utility/lmdb/InsertRecordsResponseObject.js +22 -0
  392. package/utility/lmdb/OpenDBIObject.js +31 -0
  393. package/utility/lmdb/OpenEnvironmentObject.js +41 -0
  394. package/utility/lmdb/UpdateRecordsResponseObject.js +25 -0
  395. package/utility/lmdb/UpsertRecordsResponseObject.js +22 -0
  396. package/utility/lmdb/cleanLMDBMap.js +65 -0
  397. package/utility/lmdb/commonUtility.js +119 -0
  398. package/utility/lmdb/deleteUtility.js +128 -0
  399. package/utility/lmdb/environmentUtility.js +477 -0
  400. package/utility/lmdb/searchCursorFunctions.js +187 -0
  401. package/utility/lmdb/searchUtility.js +918 -0
  402. package/utility/lmdb/terms.js +57 -0
  403. package/utility/lmdb/writeUtility.js +407 -0
  404. package/utility/logging/harper_logger.js +876 -0
  405. package/utility/logging/logRotator.js +157 -0
  406. package/utility/logging/logger.ts +24 -0
  407. package/utility/logging/readLog.js +355 -0
  408. package/utility/logging/transactionLog.js +57 -0
  409. package/utility/mount_hdb.js +59 -0
  410. package/utility/npmUtilities.js +102 -0
  411. package/utility/operationPermissions.ts +112 -0
  412. package/utility/operation_authorization.js +836 -0
  413. package/utility/packageUtils.js +55 -0
  414. package/utility/password.ts +99 -0
  415. package/utility/processManagement/processManagement.js +187 -0
  416. package/utility/processManagement/servicesConfig.js +56 -0
  417. package/utility/scripts/restartHdb.js +24 -0
  418. package/utility/scripts/user_data.sh +13 -0
  419. package/utility/signalling.js +36 -0
  420. package/utility/terms/certificates.js +81 -0
  421. package/utility/when.ts +20 -0
  422. package/v1.d.ts +39 -0
  423. package/v1.js +41 -0
  424. package/v2.d.ts +39 -0
  425. package/v2.js +41 -0
  426. package/validation/bulkDeleteValidator.js +24 -0
  427. package/validation/check_permissions.js +19 -0
  428. package/validation/common_validators.js +95 -0
  429. package/validation/configValidator.js +331 -0
  430. package/validation/deleteValidator.js +15 -0
  431. package/validation/fileLoadValidator.js +153 -0
  432. package/validation/insertValidator.js +40 -0
  433. package/validation/installValidator.js +37 -0
  434. package/validation/readLogValidator.js +64 -0
  435. package/validation/role_validation.js +320 -0
  436. package/validation/schemaMetadataValidator.js +42 -0
  437. package/validation/searchValidator.js +166 -0
  438. package/validation/statusValidator.ts +66 -0
  439. package/validation/transactionLogValidator.js +33 -0
  440. package/validation/user_validation.js +55 -0
  441. package/validation/validationWrapper.js +105 -0
  442. package/dist/resources/analytics/profile.d.ts +0 -2
  443. package/dist/resources/analytics/profile.js +0 -144
  444. package/dist/resources/analytics/profile.js.map +0 -1
@@ -0,0 +1,1290 @@
1
+ import { EventEmitter } from 'node:events';
2
+ import { initSync, getHdbBasePath, get as envGet } from '../utility/environment/environmentManager.js';
3
+ import { INTERNAL_DBIS_NAME } from '../utility/lmdb/terms.js';
4
+ import { open, compareKeys, type Database, type RootDatabase } from 'lmdb';
5
+ import { join, extname, basename } from 'path';
6
+ import { existsSync, readdirSync, readFileSync, mkdirSync } from 'node:fs';
7
+ import { unlink } from 'node:fs/promises';
8
+ import {
9
+ getBaseSchemaPath,
10
+ getTransactionAuditStoreBasePath,
11
+ } from '../dataLayer/harperBridge/lmdbBridge/lmdbUtility/initializePaths.js';
12
+ import { makeTable } from './Table.ts';
13
+ import OpenEnvironmentObject from '../utility/lmdb/OpenEnvironmentObject.js';
14
+ import { CONFIG_PARAMS, LEGACY_DATABASES_DIR_NAME, DATABASES_DIR_NAME } from '../utility/hdbTerms.ts';
15
+ import { _assignPackageExport } from '../globals.js';
16
+ import { getIndexedValues } from '../utility/lmdb/commonUtility.js';
17
+ import * as signalling from '../utility/signalling.js';
18
+ import { SchemaEventMsg } from '../server/threads/itc.js';
19
+ import { workerData } from 'worker_threads';
20
+ import harperLogger from '../utility/logging/harper_logger.js';
21
+ const { forComponent } = harperLogger;
22
+ import * as manageThreads from '../server/threads/manageThreads.js';
23
+ import { openAuditStore, readAuditEntry, createAuditEntry, type AuditRecord } from './auditStore.ts';
24
+ import { handleLocalTimeForGets } from './RecordEncoder.ts';
25
+ import { deleteRootBlobPathsForDB } from './blob.ts';
26
+ import { CUSTOM_INDEXES } from './indexes/customIndexes.ts';
27
+ import { OpenDBIObject } from '../utility/lmdb/OpenDBIObject.js';
28
+ import { RocksDatabase, type RocksDatabaseOptions } from '@harperfast/rocksdb-js';
29
+ import { replayLogs } from './replayLogs.ts';
30
+ import { totalmem } from 'node:os';
31
+ import { RocksIndexStore } from './RocksIndexStore.ts';
32
+ import { when } from '../utility/when.ts';
33
+ import { isProcessRunning } from '../utility/processManagement/processManagement.js';
34
+
35
+ function createOpenDBIObject(dupSort = false, isPrimary = false) {
36
+ return new OpenDBIObject(dupSort, isPrimary);
37
+ }
38
+ const logger = forComponent('storage');
39
+
40
+ const DEFAULT_DATABASE_NAME = 'data';
41
+ const DEFINED_TABLES = Symbol('defined-tables');
42
+ const DEFAULT_COMPRESSION_THRESHOLD = (envGet(CONFIG_PARAMS.STORAGE_PAGESIZE) || 4096) - 60; // larger than this requires multiple pages
43
+ initSync();
44
+ // I don't know if this is the best place for this, but somewhere we need to specify which tables
45
+ // replicate by default:
46
+ export const NON_REPLICATING_SYSTEM_TABLES = [
47
+ 'hdb_temp',
48
+ 'hdb_certificate',
49
+ 'hdb_raw_analytics',
50
+ 'hdb_session_will',
51
+ 'hdb_job',
52
+ 'hdb_info',
53
+ ];
54
+
55
+ export type Table = ReturnType<typeof makeTable> & {
56
+ indexingOperation?: any;
57
+ origin?: string;
58
+ schemaVersion?: number;
59
+ };
60
+ export interface Tables {
61
+ [tableName: string]: Table;
62
+ [DEFINED_TABLES]?: Set<string>;
63
+ }
64
+ export interface Databases {
65
+ [databaseName: string]: Tables;
66
+ }
67
+
68
+ // note: technically `Database` is either a `LMDBStore` or a `CachingStore`
69
+ interface LMDBDatabase extends Database {
70
+ customIndex?: any;
71
+ isIndexing?: boolean;
72
+ indexNulls?: boolean;
73
+ }
74
+ interface LMDBRootDatabase extends RootDatabase {
75
+ auditStore?: LMDBRootDatabase;
76
+ databaseName?: string;
77
+ dbisDb?: LMDBDatabase;
78
+ isLegacy?: boolean;
79
+ needsDeletion?: boolean;
80
+ path?: string;
81
+ status?: 'open' | 'closed';
82
+ }
83
+
84
+ interface RocksDatabaseEx extends RocksDatabase {
85
+ customIndex?: any;
86
+ env: Record<string, any>;
87
+ isLegacy?: boolean;
88
+ isIndexing?: boolean;
89
+ indexNulls?: boolean;
90
+ getEntry?: (id: string | number | (string | number)[] | Buffer, options?: any) => { value: any };
91
+ }
92
+
93
+ interface RocksRootDatabase extends RocksDatabaseEx {
94
+ auditStore?: RocksDatabaseEx;
95
+ databaseName?: string;
96
+ dbisDb?: RocksDatabaseEx;
97
+ }
98
+
99
+ export type RootDatabaseKind = LMDBRootDatabase | RocksRootDatabase;
100
+
101
+ export type DatabaseWatcherEventMap = {
102
+ updateTable: [table: Table, originIsNotCluster?: boolean];
103
+ dropTable: [tableName: string, databaseName: string];
104
+ dropDatabase: [databaseName: string];
105
+ };
106
+
107
+ export const databaseEventsEmitter = new EventEmitter<DatabaseWatcherEventMap>();
108
+
109
+ export const tables: Tables = Object.create(null);
110
+ export const databases: Databases = Object.create(null);
111
+
112
+ const MEMORY_FOR_ROCKS_DB = Math.min(process.constrainedMemory?.() ?? Infinity, totalmem()) * 0.25; // 25% of available memory
113
+
114
+ function openRocksDatabase(path: string, options: RocksDatabaseOptions & { dupSort?: boolean }) {
115
+ options.disableWAL ??= true;
116
+ RocksDatabase.config({ blockCacheSize: MEMORY_FOR_ROCKS_DB });
117
+ if (!existsSync(path)) {
118
+ mkdirSync(path, { recursive: true });
119
+ }
120
+ let db: RocksRootDatabase;
121
+ if (options.dupSort) {
122
+ db = RocksDatabase.open(new RocksIndexStore(path, options)) as RocksDatabaseEx;
123
+ } else {
124
+ db = RocksDatabase.open(path, options) as RocksDatabaseEx;
125
+ db.encoder.name = options.name;
126
+ }
127
+ db.env = {};
128
+ return db;
129
+ }
130
+
131
+ const lmdbDatabaseEnvs = new Map<string, LMDBRootDatabase>();
132
+ const rocksdbDatabaseEnvs = new Map<string, RocksDatabaseEx>();
133
+
134
+ // set the following in both global and exports
135
+ _assignPackageExport('databases', databases);
136
+ _assignPackageExport('tables', tables);
137
+
138
+ const NEXT_TABLE_ID = Symbol.for('next-table-id');
139
+ let loadedDatabases; // indicates if we have loaded databases from the file system yet
140
+
141
+ // This is used to track all the databases that are found when iterating through the file system so that anything that is missing
142
+ // can be removed:
143
+ let definedDatabases: Map<string, Set<string>>;
144
+
145
+ /**
146
+ * This gets the set of tables from the default database ("data").
147
+ */
148
+ export function getTables(): Tables {
149
+ if (!loadedDatabases) {
150
+ getDatabases();
151
+ }
152
+ return tables || {};
153
+ }
154
+
155
+ /**
156
+ * This provides the main entry point for getting the set of all Harper tables (organized by schemas/databases).
157
+ * This proactively scans the known
158
+ * databases/schemas directories and finds any databases and opens them. This done proactively so that there is a fast
159
+ * object available to all consumers that doesn't require runtime checks for database open states.
160
+ * This also attaches the audit store associated with table. Note that legacy tables had a single audit table per db table
161
+ * but in newer multi-table databases, there is one consistent, integrated audit table for the database since transactions
162
+ * can span any tables in the database.
163
+ */
164
+ export function getDatabases(): Databases {
165
+ if (loadedDatabases) {
166
+ return databases;
167
+ }
168
+ loadedDatabases = true;
169
+
170
+ definedDatabases = new Map();
171
+ const hdbBasePath = getHdbBasePath();
172
+ let databasePath = hdbBasePath && join(hdbBasePath, DATABASES_DIR_NAME);
173
+ const schemaConfigs = envGet(CONFIG_PARAMS.DATABASES) || {};
174
+
175
+ // not sure why this doesn't work with the environmemt manager
176
+ if (process.env.SCHEMAS_DATA_PATH) schemaConfigs.data = { path: process.env.SCHEMAS_DATA_PATH };
177
+ databasePath =
178
+ process.env.STORAGE_PATH ||
179
+ envGet(CONFIG_PARAMS.STORAGE_PATH) ||
180
+ (databasePath && (existsSync(databasePath) ? databasePath : join(getHdbBasePath(), LEGACY_DATABASES_DIR_NAME)));
181
+ if (!databasePath) return;
182
+
183
+ if (existsSync(databasePath)) {
184
+ // First load all the databases from our main database folder
185
+ // TODO: Load any databases defined with explicit storage paths from the config
186
+ for (const databaseEntry of readdirSync(databasePath, { withFileTypes: true })) {
187
+ const dbName = basename(databaseEntry.name, '.mdb');
188
+ const dbPath = join(databasePath, databaseEntry.name);
189
+
190
+ if (
191
+ databaseEntry.isFile() &&
192
+ extname(databaseEntry.name).toLowerCase() === '.mdb' &&
193
+ !schemaConfigs[dbName]?.path
194
+ ) {
195
+ logger.trace(`loading lmdb database: ${dbPath}`);
196
+ readMetaDb(dbPath, null, dbName);
197
+ continue;
198
+ }
199
+ try {
200
+ const files = readdirSync(dbPath, { withFileTypes: true });
201
+ if (
202
+ files.find((file) => file.name === 'CURRENT')?.isFile() &&
203
+ files.some((file) => file.name.startsWith('MANIFEST-')) &&
204
+ !schemaConfigs[dbName]?.path
205
+ ) {
206
+ readRocksMetaDb(dbPath, null, dbName);
207
+ continue;
208
+ }
209
+ } catch (err) {
210
+ if (!('code' in err && (err.code === 'ENOENT' || err.code === 'ENOTDIR'))) {
211
+ throw err;
212
+ }
213
+ }
214
+ }
215
+ }
216
+
217
+ // now we load databases from the legacy "schema" directory folder structure
218
+ const baseSchemaPath = getBaseSchemaPath();
219
+ if (existsSync(baseSchemaPath)) {
220
+ for (const schemaEntry of readdirSync(baseSchemaPath, { withFileTypes: true })) {
221
+ if (!schemaEntry.isFile()) {
222
+ const schemaPath = join(baseSchemaPath, schemaEntry.name);
223
+ const schemaAuditPath = join(getTransactionAuditStoreBasePath(), schemaEntry.name);
224
+ for (const tableEntry of readdirSync(schemaPath, { withFileTypes: true })) {
225
+ if (tableEntry.isFile() && extname(tableEntry.name).toLowerCase() === '.mdb') {
226
+ const auditPath = join(schemaAuditPath, tableEntry.name);
227
+ readMetaDb(
228
+ join(schemaPath, tableEntry.name),
229
+ basename(tableEntry.name, '.mdb'),
230
+ schemaEntry.name,
231
+ auditPath,
232
+ true
233
+ );
234
+ }
235
+ }
236
+ }
237
+ }
238
+ }
239
+
240
+ if (schemaConfigs) {
241
+ for (const dbName in schemaConfigs) {
242
+ const schemaConfig = schemaConfigs[dbName];
243
+ const databasePath = schemaConfig.path;
244
+ if (existsSync(databasePath)) {
245
+ for (const databaseEntry of readdirSync(databasePath, { withFileTypes: true })) {
246
+ if (databaseEntry.isFile() && extname(databaseEntry.name).toLowerCase() === '.mdb') {
247
+ readMetaDb(join(databasePath, databaseEntry.name), basename(databaseEntry.name, '.mdb'), dbName);
248
+ } else {
249
+ try {
250
+ const dbPath = join(databasePath, databaseEntry.name);
251
+ const files = readdirSync(dbPath, { withFileTypes: true });
252
+ if (
253
+ files.find((file) => file.name === 'CURRENT')?.isFile() &&
254
+ files.some((file) => file.name.startsWith('MANIFEST-'))
255
+ ) {
256
+ readRocksMetaDb(dbPath, null, dbName);
257
+ continue;
258
+ }
259
+ } catch (err) {
260
+ if (!('code' in err && (err.code === 'ENOENT' || err.code === 'ENOTDIR'))) {
261
+ throw err;
262
+ }
263
+ }
264
+ }
265
+ }
266
+ }
267
+ const tableConfigs = schemaConfig.tables;
268
+ if (tableConfigs) {
269
+ for (const tableName in tableConfigs) {
270
+ const tableConfig = tableConfigs[tableName];
271
+ const tablePath = join(tableConfig.path, basename(tableName + '.mdb'));
272
+ if (existsSync(tablePath)) {
273
+ readMetaDb(tablePath, tableName, dbName, null, true);
274
+ }
275
+ }
276
+ }
277
+ //TODO: Iterate configured table paths
278
+ }
279
+ }
280
+ // now remove any databases or tables that have been removed
281
+ for (const dbName in databases) {
282
+ const definedTables = definedDatabases.get(dbName);
283
+ if (definedTables) {
284
+ const tables = databases[dbName];
285
+ if (dbName.includes('delete')) logger.trace(`defined tables ${Array.from(definedTables.keys())}`);
286
+
287
+ for (const tableName in tables) {
288
+ if (!definedTables.has(tableName)) {
289
+ logger.trace(`delete table class ${tableName}`);
290
+ delete tables[tableName];
291
+ }
292
+ }
293
+ } else {
294
+ delete databases[dbName];
295
+ if (dbName === 'data') {
296
+ for (const tableName in tables) {
297
+ delete tables[tableName];
298
+ }
299
+ delete tables[DEFINED_TABLES];
300
+ }
301
+ }
302
+ }
303
+ if (envGet(CONFIG_PARAMS.ANALYTICS_REPLICATE) === false) {
304
+ if (!NON_REPLICATING_SYSTEM_TABLES.includes('hdb_analytics')) NON_REPLICATING_SYSTEM_TABLES.push('hdb_analytics');
305
+ } else {
306
+ // auditing must be enabled for replication
307
+ databases.system?.hdb_analytics?.enableAuditing();
308
+ databases.system?.hdb_analytics_hostname?.enableAuditing();
309
+ }
310
+ if (databases.system) {
311
+ for (const tableName of NON_REPLICATING_SYSTEM_TABLES) {
312
+ if (databases.system[tableName]) {
313
+ databases.system[tableName].replicate = false;
314
+ }
315
+ }
316
+ }
317
+ return databases;
318
+ }
319
+
320
+ /**
321
+ * This is responsible for reading the internal dbi of a single database file to get a list of all the tables and
322
+ * their indexed or registered attributes
323
+ * @param path
324
+ * @param defaultTable
325
+ * @param databaseName
326
+ */
327
+ export function readMetaDb(
328
+ path: string,
329
+ defaultTable?: string,
330
+ databaseName: string = DEFAULT_DATABASE_NAME,
331
+ auditPath?: string,
332
+ isLegacy?: boolean
333
+ ) {
334
+ const envInit = new OpenEnvironmentObject(path, false);
335
+ try {
336
+ let rootStore = lmdbDatabaseEnvs.get(path);
337
+ if (rootStore) {
338
+ rootStore.needsDeletion = false;
339
+ } else {
340
+ rootStore = open(envInit);
341
+ lmdbDatabaseEnvs.set(path, rootStore);
342
+ }
343
+
344
+ return initStores(path, rootStore, databaseName, defaultTable, auditPath, isLegacy);
345
+ } catch (error) {
346
+ error.message += ` opening database ${path}`;
347
+ throw error;
348
+ }
349
+ }
350
+
351
+ function readRocksMetaDb(path: string, defaultTable?: string, databaseName: string = DEFAULT_DATABASE_NAME) {
352
+ try {
353
+ logger.trace(`loading rocksdb database: ${path}`);
354
+
355
+ if (process.env.HARPER_PARENT_PROCESS_PID) {
356
+ const parentProcessPid = parseInt(process.env.HARPER_PARENT_PROCESS_PID);
357
+ if (isProcessRunning(parentProcessPid)) {
358
+ logger.info(`Parent process ${parentProcessPid} is still running!`);
359
+ }
360
+ }
361
+
362
+ let rootStore: RocksDatabaseEx | undefined = rocksdbDatabaseEnvs.get(path);
363
+ if (rootStore) {
364
+ initStores(path, rootStore, databaseName, defaultTable);
365
+ } else {
366
+ rootStore = openRocksDatabase(path, { disableWAL: false }) as RocksDatabaseEx;
367
+ rocksdbDatabaseEnvs.set(path, rootStore);
368
+ initStores(path, rootStore, databaseName, defaultTable);
369
+ replayLogs(rootStore, databases[databaseName]);
370
+ }
371
+ return rootStore;
372
+ } catch (error) {
373
+ error.message += ` opening database ${path}`;
374
+ throw error;
375
+ }
376
+ }
377
+
378
+ function initStores(
379
+ path: string,
380
+ rootStore: RootDatabaseKind,
381
+ databaseName: string,
382
+ defaultTable?: string,
383
+ auditPath?: string,
384
+ isLegacy?: boolean
385
+ ) {
386
+ const envInit = new OpenEnvironmentObject(path, false);
387
+ const internalDbiInit = createOpenDBIObject(false);
388
+ let dbisStore = rootStore.dbisDb;
389
+ if (!dbisStore) {
390
+ if (rootStore instanceof RocksDatabase) {
391
+ dbisStore = openRocksDatabase(rootStore.path, {
392
+ ...internalDbiInit,
393
+ disableWAL: false,
394
+ name: INTERNAL_DBIS_NAME,
395
+ }) as RocksDatabaseEx;
396
+ } else {
397
+ dbisStore = rootStore.openDB(INTERNAL_DBIS_NAME, internalDbiInit);
398
+ }
399
+ rootStore.dbisDb = dbisStore;
400
+ }
401
+
402
+ let auditStore = rootStore.auditStore;
403
+ if (!auditStore) {
404
+ if (auditPath) {
405
+ if (existsSync(auditPath)) {
406
+ envInit.path = auditPath;
407
+ if (rootStore instanceof RocksDatabase) {
408
+ auditStore = openAuditStore(rootStore);
409
+ } else {
410
+ auditStore = open({
411
+ ...envInit,
412
+ encoder: {
413
+ encode: (auditRecord: AuditRecord) => createAuditEntry(auditRecord),
414
+ decode: (encoding: Buffer) => readAuditEntry(encoding),
415
+ },
416
+ });
417
+ }
418
+ auditStore.isLegacy = true;
419
+ }
420
+ } else {
421
+ auditStore = openAuditStore(rootStore);
422
+ }
423
+ }
424
+
425
+ const tables = ensureDB(databaseName);
426
+ const definedTables = tables[DEFINED_TABLES];
427
+ definedTables.rootStore = rootStore;
428
+ const tablesToLoad = new Map<string, any>();
429
+
430
+ for (const result of dbisStore.getRange({ start: false })) {
431
+ const { key, value } = result as { key: string; value: any };
432
+ let [tableName, attribute_name] = key.toString().split('/');
433
+ if (attribute_name === '') {
434
+ // primary key
435
+ attribute_name = value.name;
436
+ } else if (!attribute_name) {
437
+ attribute_name = tableName;
438
+ tableName = defaultTable;
439
+ if (!value.name) {
440
+ // legacy attribute
441
+ value.name = attribute_name;
442
+ value.indexed = !value.isPrimaryKey;
443
+ }
444
+ }
445
+ definedTables?.add(tableName);
446
+ let tableDef = tablesToLoad.get(tableName);
447
+ if (!tableDef) tablesToLoad.set(tableName, (tableDef = { attributes: [] }));
448
+ if (attribute_name == null || value.isPrimaryKey) tableDef.primary = value;
449
+ if (attribute_name != null) tableDef.attributes.push(value);
450
+ Object.defineProperty(value, 'key', { value: key, configurable: true });
451
+ }
452
+
453
+ for (const [tableName, tableDef] of tablesToLoad) {
454
+ let { attributes, primary: primaryAttribute } = tableDef;
455
+ if (!primaryAttribute) {
456
+ // this isn't defined, find it in the attributes
457
+ for (const attribute of attributes) {
458
+ if (attribute.isPrimaryKey) {
459
+ primaryAttribute = attribute;
460
+ break;
461
+ }
462
+ }
463
+ if (!primaryAttribute) {
464
+ logger.warn(
465
+ `Unable to find a primary key attribute on table ${tableName}, with attributes: ${JSON.stringify(attributes)}`
466
+ );
467
+ continue;
468
+ }
469
+ }
470
+ // if the table has already been defined, use that class, don't create a new one
471
+ let table = tables[tableName];
472
+ let indices = {},
473
+ existingAttributes = [];
474
+ let tableId;
475
+ let primaryStore;
476
+ const audit =
477
+ typeof primaryAttribute.audit === 'boolean' ? primaryAttribute.audit : envGet(CONFIG_PARAMS.LOGGING_AUDITLOG);
478
+ const trackDeletes = primaryAttribute.trackDeletes;
479
+ const expiration = primaryAttribute.expiration;
480
+ const eviction = primaryAttribute.eviction;
481
+ const sealed = primaryAttribute.sealed;
482
+ const splitSegments = primaryAttribute.splitSegments;
483
+ const replicate = primaryAttribute.replicate;
484
+ if (table) {
485
+ indices = table.indices;
486
+ existingAttributes = table.attributes;
487
+ table.schemaVersion++;
488
+ } else {
489
+ tableId = primaryAttribute.tableId;
490
+ if (tableId) {
491
+ if (tableId >= (dbisStore.getSync(NEXT_TABLE_ID) || 0)) {
492
+ dbisStore.putSync(NEXT_TABLE_ID, tableId + 1);
493
+ logger.info(`Updating next table id (it was out of sync) to ${tableId + 1} for ${tableName}`);
494
+ }
495
+ } else {
496
+ primaryAttribute.tableId = tableId = dbisStore.getSync(NEXT_TABLE_ID);
497
+ if (!tableId) tableId = 1;
498
+ logger.debug(`Table {tableName} missing an id, assigning {tableId}`);
499
+ dbisStore.putSync(NEXT_TABLE_ID, tableId + 1);
500
+ dbisStore.putSync(primaryAttribute.key, primaryAttribute);
501
+ }
502
+ const dbiInit = createOpenDBIObject(!primaryAttribute.isPrimaryKey, primaryAttribute.isPrimaryKey);
503
+ dbiInit.compression = primaryAttribute.compression;
504
+ if (dbiInit.compression) {
505
+ const compressionThreshold =
506
+ envGet(CONFIG_PARAMS.STORAGE_COMPRESSION_THRESHOLD) || DEFAULT_COMPRESSION_THRESHOLD; // this is the only thing that can change;
507
+ dbiInit.compression.threshold = compressionThreshold;
508
+ }
509
+ if (rootStore instanceof RocksDatabase) {
510
+ primaryStore = handleLocalTimeForGets(
511
+ openRocksDatabase(rootStore.path, { ...dbiInit, name: primaryAttribute.key }),
512
+ rootStore
513
+ );
514
+ } else {
515
+ primaryStore = handleLocalTimeForGets(rootStore.openDB(primaryAttribute.key, dbiInit), rootStore);
516
+ }
517
+ rootStore.databaseName = databaseName;
518
+ primaryStore.tableId = tableId;
519
+ }
520
+ let attributesUpdated: boolean;
521
+ for (const attribute of attributes) {
522
+ attribute.attribute = attribute.name;
523
+ try {
524
+ // now load the non-primary keys, opening the dbs as necessary for indices
525
+ if (!attribute.isPrimaryKey && (attribute.indexed || (attribute.attribute && !attribute.name))) {
526
+ if (!indices[attribute.name]) {
527
+ const dbi = openIndex(attribute.key, rootStore, attribute);
528
+ indices[attribute.name] = dbi;
529
+ indices[attribute.name].indexNulls = attribute.indexNulls;
530
+ }
531
+ const existingAttribute = existingAttributes.find(
532
+ (existingAttribute) => existingAttribute.name === attribute.name
533
+ );
534
+ if (existingAttribute) existingAttributes.splice(existingAttributes.indexOf(existingAttribute), 1, attribute);
535
+ else existingAttributes.push(attribute);
536
+ attributesUpdated = true;
537
+ }
538
+ } catch (error) {
539
+ logger.error(`Error trying to update attribute`, attribute, existingAttributes, indices, error);
540
+ }
541
+ }
542
+ for (const existingAttribute of existingAttributes) {
543
+ const attribute = attributes.find((attribute) => attribute.name === existingAttribute.name);
544
+ if (!attribute) {
545
+ if (existingAttribute.isPrimaryKey) {
546
+ logger.error('Unable to remove existing primary key attribute', existingAttribute);
547
+ continue;
548
+ }
549
+ if (existingAttribute.indexed) {
550
+ // we only remove attributes if they were indexed, in order to support dropAttribute that removes dynamic indexed attributes
551
+ existingAttributes.splice(existingAttributes.indexOf(existingAttribute), 1);
552
+ attributesUpdated = true;
553
+ }
554
+ }
555
+ }
556
+ if (table) {
557
+ if (attributesUpdated) {
558
+ table.schemaVersion++;
559
+ table.updatedAttributes();
560
+ }
561
+ } else {
562
+ table = setTable(
563
+ tables,
564
+ tableName,
565
+ makeTable({
566
+ primaryStore,
567
+ auditStore,
568
+ audit,
569
+ sealed,
570
+ splitSegments,
571
+ replicate,
572
+ expirationMS: expiration && expiration * 1000,
573
+ evictionMS: eviction && eviction * 1000,
574
+ trackDeletes,
575
+ tableName,
576
+ tableId,
577
+ primaryKey: primaryAttribute.name,
578
+ databasePath: isLegacy ? `${databaseName}/${tableName}` : databaseName,
579
+ databaseName,
580
+ indices,
581
+ attributes,
582
+ schemaDefined: primaryAttribute.schemaDefined,
583
+ dbisDB: dbisStore,
584
+ })
585
+ );
586
+ table.schemaVersion = 1;
587
+ databaseEventsEmitter.emit('updateTable', table);
588
+ }
589
+ }
590
+ return rootStore;
591
+ }
592
+
593
+ export function resetDatabases() {
594
+ loadedDatabases = false;
595
+ for (const store of Object.values(lmdbDatabaseEnvs)) {
596
+ store.needsDeletion = true;
597
+ }
598
+ getDatabases();
599
+ for (const [path, store] of lmdbDatabaseEnvs) {
600
+ if (store.needsDeletion && !path.endsWith('system.mdb')) {
601
+ store.close();
602
+ lmdbDatabaseEnvs.delete(path);
603
+ const db = databases[store.databaseName];
604
+ for (const tableName in db) {
605
+ const table = db[tableName];
606
+ if (table.primaryStore.path === path) {
607
+ delete databases[store.databaseName];
608
+ databaseEventsEmitter.emit('dropDatabase', store.databaseName);
609
+ break;
610
+ }
611
+ }
612
+ }
613
+ }
614
+ return databases;
615
+ }
616
+
617
+ interface TableDefinition {
618
+ table: string;
619
+ database?: string;
620
+ path?: string;
621
+ expiration?: number;
622
+ eviction?: number;
623
+ scanInterval?: number;
624
+ audit?: boolean;
625
+ sealed?: boolean;
626
+ splitSegments?: boolean;
627
+ replicate?: boolean;
628
+ trackDeletes?: boolean;
629
+ attributes: any[];
630
+ schemaDefined?: boolean;
631
+ origin?: string;
632
+ }
633
+ /**
634
+ * Ensure that we have this database object (that holds a set of tables) set up
635
+ * @param databaseName
636
+ * @returns
637
+ */
638
+ function ensureDB(databaseName) {
639
+ let dbTables = databases[databaseName];
640
+ if (!dbTables) {
641
+ if (databaseName === 'data')
642
+ // preserve the data tables objet
643
+ dbTables = databases[databaseName] = tables;
644
+ else if (databaseName === 'system')
645
+ // make system non-enumerable
646
+ Object.defineProperty(databases, 'system', {
647
+ value: (dbTables = Object.create(null)),
648
+ configurable: true, // no enum
649
+ });
650
+ else {
651
+ dbTables = databases[databaseName] = Object.create(null);
652
+ }
653
+ }
654
+ if (definedDatabases && !definedDatabases.has(databaseName)) {
655
+ const definedTables = new Set<string>(); // we create this so we can determine what was found in a reset and remove any removed dbs/tables
656
+ dbTables[DEFINED_TABLES] = definedTables;
657
+ definedDatabases.set(databaseName, definedTables);
658
+ }
659
+ return dbTables;
660
+ }
661
+ /**
662
+ * Set the table class into the database's tables object
663
+ * @param tables
664
+ * @param tableName
665
+ * @param Table
666
+ * @returns
667
+ */
668
+ function setTable(tables, tableName, Table) {
669
+ tables[tableName] = Table;
670
+ return Table;
671
+ }
672
+ /**
673
+ * Get root store for a database
674
+ * @param options
675
+ * @returns
676
+ */
677
+ export function database({ database: databaseName, table: tableName }) {
678
+ if (!databaseName) databaseName = DEFAULT_DATABASE_NAME;
679
+ getDatabases();
680
+ ensureDB(databaseName);
681
+ const definedDatabase = definedDatabases.get(databaseName);
682
+ if (definedDatabase?.rootStore) {
683
+ return definedDatabase.rootStore;
684
+ }
685
+ const databaseConfig = envGet(CONFIG_PARAMS.DATABASES) || {};
686
+ if (process.env.SCHEMAS_DATA_PATH) {
687
+ databaseConfig.data = { path: process.env.SCHEMAS_DATA_PATH };
688
+ }
689
+
690
+ const tablePath = tableName && databaseConfig[databaseName]?.tables?.[tableName]?.path;
691
+
692
+ const hdbBasePath = getHdbBasePath();
693
+ const databasePath =
694
+ tablePath ||
695
+ databaseConfig[databaseName]?.path ||
696
+ process.env.STORAGE_PATH ||
697
+ envGet(CONFIG_PARAMS.STORAGE_PATH) ||
698
+ (existsSync(join(hdbBasePath, DATABASES_DIR_NAME))
699
+ ? join(hdbBasePath, DATABASES_DIR_NAME)
700
+ : join(hdbBasePath, LEGACY_DATABASES_DIR_NAME));
701
+
702
+ let rootStore: RootDatabaseKind;
703
+ const useRocksdb = (process.env.HARPER_STORAGE_ENGINE || envGet(CONFIG_PARAMS.STORAGE_ENGINE)) !== 'lmdb';
704
+ if (useRocksdb) {
705
+ const path = join(databasePath, tablePath ? tableName : databaseName);
706
+ rootStore = rocksdbDatabaseEnvs.get(path);
707
+ if (!rootStore || rootStore.status === 'closed') {
708
+ rootStore = openRocksDatabase(path, {
709
+ disableWAL: false,
710
+ });
711
+ rocksdbDatabaseEnvs.set(path, rootStore);
712
+ }
713
+ } else {
714
+ const path = join(databasePath, `${tablePath ? tableName : databaseName}.mdb`);
715
+ rootStore = lmdbDatabaseEnvs.get(path);
716
+ if (!rootStore || rootStore.status === 'closed') {
717
+ // TODO: validate database name
718
+ const envInit = new OpenEnvironmentObject(path, false);
719
+ rootStore = open(envInit);
720
+ lmdbDatabaseEnvs.set(path, rootStore);
721
+ }
722
+ }
723
+ if (!rootStore.auditStore) {
724
+ rootStore.auditStore = openAuditStore(rootStore);
725
+ }
726
+ if (definedDatabase) definedDatabase.rootStore = rootStore;
727
+ return rootStore;
728
+ }
729
+ /**
730
+ * Delete the database
731
+ * @param databaseName
732
+ */
733
+ export async function dropDatabase(databaseName) {
734
+ if (!databases[databaseName]) throw new Error('Database does not exist');
735
+ const dbTables = databases[databaseName];
736
+ let rootStore;
737
+ for (const tableName in dbTables) {
738
+ const table = dbTables[tableName];
739
+ rootStore = table.primaryStore.rootStore;
740
+
741
+ lmdbDatabaseEnvs.delete(rootStore.path);
742
+ rocksdbDatabaseEnvs.delete(rootStore.path);
743
+
744
+ if (rootStore.status === 'open') {
745
+ if (rootStore instanceof RocksDatabase) {
746
+ rootStore.close();
747
+ rootStore.destroy();
748
+ } else if (rootStore.status === 'open') {
749
+ await rootStore.close();
750
+ await unlink(rootStore.path);
751
+ }
752
+ }
753
+ databaseEventsEmitter.emit('dropTable', tableName, databaseName);
754
+ }
755
+ if (!rootStore) {
756
+ rootStore = database({ database: databaseName, table: null });
757
+ if (rootStore instanceof RocksDatabase) {
758
+ rootStore.destroy();
759
+ } else if (rootStore.status === 'open') {
760
+ await rootStore.close();
761
+ await unlink(rootStore.path);
762
+ }
763
+ }
764
+ if (databaseName === 'data') {
765
+ for (const tableName in tables) {
766
+ delete tables[tableName];
767
+ }
768
+ delete tables[DEFINED_TABLES];
769
+ }
770
+ delete databases[databaseName];
771
+ databaseEventsEmitter.emit('dropDatabase', databaseName);
772
+ await deleteRootBlobPathsForDB(rootStore);
773
+ }
774
+ // opens an index, consulting with custom indexes that may use alternate store configuration
775
+ function openIndex(dbiKey: string, rootStore: RootDatabaseKind, attribute: any) {
776
+ const objectStorage =
777
+ attribute.isPrimaryKey || (attribute.indexed.type && CUSTOM_INDEXES[attribute.indexed.type]?.useObjectStore);
778
+ const dbiInit = createOpenDBIObject(!objectStorage, objectStorage);
779
+ let dbi:
780
+ | LMDBDatabase
781
+ | (RocksDatabase & {
782
+ customIndex?: any;
783
+ isIndexing?: boolean;
784
+ indexNulls?: boolean;
785
+ rootStore?: RocksRootDatabase;
786
+ });
787
+ if (rootStore instanceof RocksDatabase) {
788
+ dbi = openRocksDatabase(rootStore.path, { ...dbiInit, name: dbiKey });
789
+ dbi.rootStore = rootStore;
790
+ } else {
791
+ dbi = rootStore.openDB(dbiKey, dbiInit);
792
+ }
793
+ if (attribute.indexed.type) {
794
+ const CustomIndex = CUSTOM_INDEXES[attribute.indexed.type];
795
+ if (CustomIndex) {
796
+ dbi.customIndex = new CustomIndex(dbi, attribute.indexed);
797
+ } else {
798
+ logger.error(`The indexing type '${attribute.indexed.type}' is unknown`);
799
+ }
800
+ }
801
+ return dbi;
802
+ }
803
+
804
+ /**
805
+ * This can be called to ensure that the specified table exists and if it does not exist, it should be created.
806
+ * @param tableName
807
+ * @param databaseName
808
+ * @param customPath
809
+ * @param expiration
810
+ * @param eviction
811
+ * @param scanInterval
812
+ * @param attributes
813
+ * @param audit
814
+ * @param sealed
815
+ * @param splitSegments
816
+ * @param replicate
817
+ */
818
+ export function table<TableResourceType>(tableDefinition: TableDefinition): TableResourceType {
819
+ let {
820
+ table: tableName,
821
+ database: databaseName,
822
+ expiration,
823
+ eviction,
824
+ scanInterval,
825
+ attributes,
826
+ audit,
827
+ sealed,
828
+ splitSegments,
829
+ replicate,
830
+ trackDeletes,
831
+ schemaDefined,
832
+ origin,
833
+ } = tableDefinition;
834
+ if (!databaseName) databaseName = DEFAULT_DATABASE_NAME;
835
+ const rootStore = database({ database: databaseName, table: tableName });
836
+ const tables = databases[databaseName];
837
+ logger.trace(`Defining ${tableName} in ${databaseName}`);
838
+ let Table = tables?.[tableName];
839
+ if (rootStore.status === 'closed') {
840
+ throw new Error(`Can not use a closed data store for ${tableName}`);
841
+ }
842
+ let primaryKey;
843
+ let primaryKeyAttribute;
844
+ let attributesDbi;
845
+ if (schemaDefined == undefined) schemaDefined = true;
846
+ const internalDbiInit = createOpenDBIObject(false);
847
+
848
+ for (const attribute of attributes) {
849
+ if (attribute.attribute && !attribute.name) {
850
+ // there is some legacy code that calls the attribute's name the attribute's attribute
851
+ attribute.name = attribute.attribute;
852
+ attribute.indexed = true;
853
+ } else attribute.attribute = attribute.name;
854
+ if (attribute.expiresAt) attribute.indexed = true;
855
+ }
856
+ let hasChanges;
857
+ let releaseExclusiveLock: () => void;
858
+ if (Table) {
859
+ primaryKey = Table.primaryKey;
860
+ if (Table.primaryStore.rootStore.status === 'closed') {
861
+ throw new Error(`Can not use a closed data store from ${tableName} class`);
862
+ }
863
+ // it table already exists, get the split segments setting
864
+ if (splitSegments == undefined) splitSegments = Table.splitSegments;
865
+ Table.attributes.splice(0, Table.attributes.length, ...attributes);
866
+ } else {
867
+ const auditStore = rootStore.auditStore;
868
+ primaryKeyAttribute = attributes.find((attribute) => attribute.isPrimaryKey) || {};
869
+ primaryKey = primaryKeyAttribute.name;
870
+ primaryKeyAttribute.isPrimaryKey = true;
871
+ primaryKeyAttribute.schemaDefined = schemaDefined;
872
+ // can't change compression after the fact (except threshold), so save only when we create the table
873
+ primaryKeyAttribute.compression = getDefaultCompression();
874
+ if (trackDeletes) primaryKeyAttribute.trackDeletes = true;
875
+ audit = primaryKeyAttribute.audit = typeof audit === 'boolean' ? audit : envGet(CONFIG_PARAMS.LOGGING_AUDITLOG);
876
+ if (expiration) primaryKeyAttribute.expiration = expiration;
877
+ if (eviction) primaryKeyAttribute.eviction = eviction;
878
+ splitSegments ??= false;
879
+ primaryKeyAttribute.splitSegments = splitSegments; // always default to not splitting segments going forward
880
+ if (typeof sealed === 'boolean') primaryKeyAttribute.sealed = sealed;
881
+ if (typeof replicate === 'boolean') primaryKeyAttribute.replicate = replicate;
882
+ if (origin) {
883
+ if (!primaryKeyAttribute.origins) primaryKeyAttribute.origins = [origin];
884
+ else if (!primaryKeyAttribute.origins.includes(origin)) primaryKeyAttribute.origins.push(origin);
885
+ }
886
+ logger.trace(`${tableName} table loading, opening primary store`);
887
+ const dbiInit = createOpenDBIObject(false, true);
888
+ dbiInit.compression = primaryKeyAttribute.compression;
889
+ const dbiName = tableName + '/';
890
+
891
+ if (rootStore instanceof RocksDatabase) {
892
+ attributesDbi = rootStore.dbisDb = openRocksDatabase(rootStore.path, {
893
+ ...internalDbiInit,
894
+ disableWAL: false,
895
+ name: INTERNAL_DBIS_NAME,
896
+ });
897
+ } else {
898
+ attributesDbi = rootStore.dbisDb = rootStore.openDB(INTERNAL_DBIS_NAME, internalDbiInit);
899
+ }
900
+
901
+ exclusiveLock(); // get an exclusive lock on the database so we can verify that we are the only thread creating the table (and assigning the table id)
902
+ if (attributesDbi.getSync(dbiName)) {
903
+ // table was created while we were setting up
904
+ if (releaseExclusiveLock) releaseExclusiveLock();
905
+ resetDatabases();
906
+ return table(tableDefinition);
907
+ }
908
+
909
+ let primaryStore;
910
+ if (rootStore instanceof RocksDatabase) {
911
+ primaryStore = openRocksDatabase(rootStore.path, { ...dbiInit, name: dbiName });
912
+ } else {
913
+ primaryStore = rootStore.openDB(dbiName, dbiInit);
914
+ }
915
+ primaryStore = handleLocalTimeForGets(primaryStore, rootStore);
916
+ rootStore.databaseName = databaseName;
917
+ primaryStore.tableId = attributesDbi.getSync(NEXT_TABLE_ID);
918
+ logger.trace(`Assigning new table id ${primaryStore.tableId} for ${tableName}`);
919
+ if (!primaryStore.tableId) primaryStore.tableId = 1;
920
+ attributesDbi.put(NEXT_TABLE_ID, primaryStore.tableId + 1);
921
+
922
+ primaryKeyAttribute.tableId = primaryStore.tableId;
923
+ Table = setTable(
924
+ tables,
925
+ tableName,
926
+ makeTable({
927
+ primaryStore,
928
+ auditStore,
929
+ audit,
930
+ sealed,
931
+ splitSegments,
932
+ replicate,
933
+ trackDeletes,
934
+ expirationMS: expiration && expiration * 1000,
935
+ evictionMS: eviction && eviction * 1000,
936
+ primaryKey,
937
+ tableName,
938
+ tableId: primaryStore.tableId,
939
+ databasePath: databaseName,
940
+ databaseName,
941
+ indices: {},
942
+ attributes,
943
+ schemaDefined,
944
+ dbisDB: attributesDbi,
945
+ })
946
+ );
947
+ Table.schemaVersion = 1;
948
+ hasChanges = true;
949
+
950
+ attributesDbi.put(dbiName, primaryKeyAttribute);
951
+ }
952
+ const indices = Table.indices;
953
+ if (!attributesDbi) {
954
+ if (rootStore instanceof RocksDatabase) {
955
+ rootStore.dbisDb = openRocksDatabase(rootStore.path, {
956
+ ...internalDbiInit,
957
+ disableWAL: false,
958
+ name: INTERNAL_DBIS_NAME,
959
+ });
960
+ } else {
961
+ rootStore.dbisDb = rootStore.openDB(INTERNAL_DBIS_NAME, internalDbiInit);
962
+ }
963
+ attributesDbi = rootStore.dbisDb;
964
+ }
965
+ Table.dbisDB = attributesDbi;
966
+ const indicesToRemove = [];
967
+ for (const { key, value } of attributesDbi.getRange({ start: true })) {
968
+ let [attributeTableName, attribute_name] = key.toString().split('/');
969
+ if (attribute_name === '') attribute_name = value.name; // primary key
970
+ if (attribute_name) {
971
+ if (attributeTableName !== tableName) continue;
972
+ } else {
973
+ // table attribute for a table with no primary key, we don't want to remove this, so continue on
974
+ continue;
975
+ }
976
+ const attribute = attributes.find((attribute) => attribute.name === attribute_name);
977
+ const removeIndex = !attribute?.indexed && value.indexed && !value.isPrimaryKey;
978
+ if (!attribute || removeIndex) {
979
+ exclusiveLock();
980
+ hasChanges = true;
981
+ if (!attribute) attributesDbi.remove(key);
982
+ if (removeIndex) {
983
+ const indexDbi = Table.indices[attributeTableName];
984
+ if (indexDbi) indicesToRemove.push(indexDbi);
985
+ }
986
+ }
987
+ }
988
+ const attributesToIndex = [];
989
+ try {
990
+ // TODO: If we have attributes and the schemaDefined flag is not set, turn it on
991
+ // iterate through the attributes to ensure that we have all the dbis created and indexed
992
+ for (const attribute of attributes || []) {
993
+ if (attribute.relationship || attribute.computed) {
994
+ hasChanges = true; // need to update the table so the computed properties are translated to property resolvers
995
+ if (attribute.relationship) continue;
996
+ }
997
+ let dbiKey = tableName + '/' + (attribute.name || '');
998
+ Object.defineProperty(attribute, 'key', { value: dbiKey, configurable: true });
999
+ let attributeDescriptor = attributesDbi.getSync(dbiKey);
1000
+ if (attribute.isPrimaryKey) {
1001
+ attributeDescriptor = attributeDescriptor || attributesDbi.getSync((dbiKey = tableName + '/')) || {};
1002
+ // primary key can't change indexing, but settings can change
1003
+ if (
1004
+ (audit !== undefined && audit !== Table.audit) ||
1005
+ (sealed !== undefined && sealed !== Table.sealed) ||
1006
+ (replicate !== undefined && replicate !== Table.replicate) ||
1007
+ (+expiration || undefined) !== (+attributeDescriptor.expiration || undefined) ||
1008
+ (+eviction || undefined) !== (+attributeDescriptor.eviction || undefined) ||
1009
+ attribute.type !== attributeDescriptor.type
1010
+ ) {
1011
+ const updatedPrimaryAttribute = { ...attributeDescriptor };
1012
+ if (typeof audit === 'boolean') {
1013
+ if (audit) Table.enableAuditing(audit);
1014
+ updatedPrimaryAttribute.audit = audit;
1015
+ }
1016
+ if (expiration) updatedPrimaryAttribute.expiration = +expiration;
1017
+ if (eviction) updatedPrimaryAttribute.eviction = +eviction;
1018
+ if (sealed !== undefined) updatedPrimaryAttribute.sealed = sealed;
1019
+ if (replicate !== undefined) updatedPrimaryAttribute.replicate = replicate;
1020
+ if (attribute.type) updatedPrimaryAttribute.type = attribute.type;
1021
+ hasChanges = true; // send out notification of the change
1022
+ exclusiveLock();
1023
+ attributesDbi.put(dbiKey, updatedPrimaryAttribute);
1024
+ }
1025
+
1026
+ continue;
1027
+ }
1028
+
1029
+ // note that non-indexed attributes do not need a dbi
1030
+ if (attributeDescriptor?.attribute && !attributeDescriptor.name) attributeDescriptor.indexed = true; // legacy descriptor
1031
+ const changed =
1032
+ !attributeDescriptor ||
1033
+ attributeDescriptor.type !== attribute.type ||
1034
+ JSON.stringify(attributeDescriptor.indexed) !== JSON.stringify(attribute.indexed) ||
1035
+ attributeDescriptor.nullable !== attribute.nullable ||
1036
+ attributeDescriptor.version !== attribute.version ||
1037
+ attributeDescriptor.enumerable !== attribute.enumerable ||
1038
+ JSON.stringify(attributeDescriptor.properties) !== JSON.stringify(attribute.properties) ||
1039
+ JSON.stringify(attributeDescriptor.elements) !== JSON.stringify(attribute.elements);
1040
+ if (attribute.indexed) {
1041
+ const dbi = openIndex(dbiKey, rootStore, attribute);
1042
+ if (
1043
+ changed ||
1044
+ (attributeDescriptor.indexingPID && attributeDescriptor.indexingPID !== process.pid) ||
1045
+ attributeDescriptor.restartNumber < workerData?.restartNumber
1046
+ ) {
1047
+ hasChanges = true;
1048
+ exclusiveLock();
1049
+ attributeDescriptor = attributesDbi.getSync(dbiKey);
1050
+ if (
1051
+ changed ||
1052
+ (attributeDescriptor.indexingPID && attributeDescriptor.indexingPID !== process.pid) ||
1053
+ attributeDescriptor.restartNumber < workerData?.restartNumber
1054
+ ) {
1055
+ hasChanges = true;
1056
+ if (attribute.indexNulls === undefined) attribute.indexNulls = true;
1057
+ let hasExistingData = false;
1058
+ for (let _entry of Table.primaryStore.getRange({ start: true })) {
1059
+ hasExistingData = true;
1060
+ break;
1061
+ }
1062
+ if (hasExistingData) {
1063
+ attribute.lastIndexedKey = attributeDescriptor?.lastIndexedKey ?? undefined;
1064
+ attribute.indexingPID = process.pid;
1065
+ dbi.isIndexing = true;
1066
+ Object.defineProperty(attribute, 'dbi', { value: dbi });
1067
+ // we only set indexing nulls to true if new or reindexing, we can't have partial indexing of null
1068
+ attributesToIndex.push(attribute);
1069
+ }
1070
+ }
1071
+ attributesDbi.put(dbiKey, attribute);
1072
+ }
1073
+ if (attributeDescriptor?.indexNulls && attribute.indexNulls === undefined) attribute.indexNulls = true;
1074
+ dbi.indexNulls = attribute.indexNulls;
1075
+ indices[attribute.name] = dbi;
1076
+ } else if (changed) {
1077
+ hasChanges = true;
1078
+ exclusiveLock();
1079
+ attributesDbi.put(dbiKey, attribute);
1080
+ }
1081
+ }
1082
+ } finally {
1083
+ if (releaseExclusiveLock) releaseExclusiveLock();
1084
+ }
1085
+ if (hasChanges) {
1086
+ Table.schemaVersion++;
1087
+ Table.updatedAttributes();
1088
+ }
1089
+ logger.trace(`${tableName} table loading, running index`);
1090
+ if (attributesToIndex.length > 0 || indicesToRemove.length > 0) {
1091
+ Table.indexingOperation = runIndexing(Table, attributesToIndex, indicesToRemove);
1092
+ } else if (hasChanges)
1093
+ signalling.signalSchemaChange(
1094
+ new SchemaEventMsg(process.pid, 'schema-change', Table.databaseName, Table.tableName)
1095
+ );
1096
+
1097
+ Table.origin = origin;
1098
+ if (hasChanges) {
1099
+ databaseEventsEmitter.emit('updateTable', Table, origin !== 'cluster');
1100
+ }
1101
+ if (expiration || eviction || scanInterval)
1102
+ Table.setTTLExpiration({
1103
+ expiration,
1104
+ eviction,
1105
+ scanInterval,
1106
+ });
1107
+ logger.trace(`${tableName} table loaded`);
1108
+
1109
+ return Table as TableResourceType;
1110
+ // Acquire an exclusive lock for attribute updates
1111
+ function exclusiveLock() {
1112
+ if (releaseExclusiveLock) return;
1113
+ if (rootStore instanceof RocksDatabase) {
1114
+ while (!rootStore.tryLock('update-attributes')) {} // use a spin lock, we really need an synchronous exclusive lock here
1115
+ releaseExclusiveLock = () => {
1116
+ rootStore.unlock('update-attributes');
1117
+ };
1118
+ } else {
1119
+ // we only need an exclusive transaction lock in lmdb
1120
+ rootStore.transactionSync(() => {
1121
+ return {
1122
+ then(callback) {
1123
+ releaseExclusiveLock = callback;
1124
+ },
1125
+ };
1126
+ });
1127
+ }
1128
+ }
1129
+ }
1130
+ const MAX_OUTSTANDING_INDEXING = 1000;
1131
+ const MIN_OUTSTANDING_INDEXING = 10;
1132
+ async function runIndexing(Table, attributes, indicesToRemove) {
1133
+ try {
1134
+ logger.info(`Indexing ${Table.tableName} attributes`, attributes);
1135
+ await signalling.signalSchemaChange(
1136
+ new SchemaEventMsg(process.pid, 'schema-change', Table.databaseName, Table.tableName)
1137
+ );
1138
+ let lastResolution;
1139
+ for (const index of indicesToRemove) {
1140
+ lastResolution = index.drop();
1141
+ }
1142
+ let interrupted;
1143
+ const attributeErrorReported = {};
1144
+ let indexed = 0;
1145
+ const attributesLength = attributes.length;
1146
+ await new Promise((resolve) => setImmediate(resolve)); // yield event turn, indexing should consistently take at least one event turn
1147
+ if (attributesLength > 0) {
1148
+ let start: any;
1149
+ for (const attribute of attributes) {
1150
+ // if we are resuming, we need to start from the last key we indexed by all attributes
1151
+ if (compareKeys(attribute.lastIndexedKey, start) < 0) start = attribute.lastIndexedKey;
1152
+ if (attribute.lastIndexedKey == undefined) {
1153
+ // if we are starting from the beginning, clear out any previous index entries since we are rewriting
1154
+ if (attribute.dbi.clearAsync) {
1155
+ // LMDB, note that we don't need to wait for this to complete, just gets enqueued in front of the other writes
1156
+ attribute.dbi.clearAsync();
1157
+ } else {
1158
+ await attribute.dbi.clear();
1159
+ }
1160
+ }
1161
+ }
1162
+ let outstanding = 0;
1163
+ // this means that a new attribute has been introduced that needs to be indexed
1164
+ for (const { key, value: record } of Table.primaryStore.getRange({
1165
+ start,
1166
+ lazy: attributesLength < 4,
1167
+ versions: true,
1168
+ snapshot: false, // don't hold a read transaction this whole time
1169
+ })) {
1170
+ if (!record) continue; // deletion entry
1171
+ // TODO: Do we ever need to interrupt due to a schema change that was not a restart?
1172
+ //if (Table.schemaVersion !== schemaVersion) return; // break out if there are any schema changes and let someone else pick it up
1173
+ outstanding++;
1174
+ // every index operation needs to be guarded by the version still be the same. If it has already changed before
1175
+ // we index, that's fine because indexing is idempotent, we can just put the same values again. If it changes
1176
+ // during the indexing, the indexing here will fail. This is also fine because it means the other thread will have
1177
+ // performed indexing and we don't need to do anything further
1178
+ for (let i = 0; i < attributesLength; i++) {
1179
+ const attribute = attributes[i];
1180
+ const property = attribute.name;
1181
+ const index = attribute.dbi;
1182
+ try {
1183
+ const resolver = attribute.resolve;
1184
+ const value = record && (resolver ? resolver(record) : record[property]);
1185
+ if (index.customIndex) {
1186
+ index.customIndex.index(key, value);
1187
+ continue;
1188
+ }
1189
+ const values = getIndexedValues(value, index.indexNulls);
1190
+ if (values) {
1191
+ for (let i = 0, l = values.length; i < l; i++) {
1192
+ lastResolution = index.put(values[i], key);
1193
+ }
1194
+ }
1195
+ } catch (error) {
1196
+ if (!attributeErrorReported[property]) {
1197
+ // just report an indexing error once per attribute so we don't spam the logs
1198
+ attributeErrorReported[property] = true;
1199
+ logger.error(`Error indexing attribute ${property}`, error);
1200
+ }
1201
+ }
1202
+ }
1203
+ when(
1204
+ lastResolution,
1205
+ () => outstanding--,
1206
+ (error) => {
1207
+ outstanding--;
1208
+ logger.error(error);
1209
+ }
1210
+ );
1211
+ if (workerData && workerData.restartNumber !== manageThreads.restartNumber) {
1212
+ interrupted = true;
1213
+ }
1214
+ if (++indexed % 100 === 0 || interrupted) {
1215
+ // occasionally update our progress so if we crash, we can resume
1216
+ for (const attribute of attributes) {
1217
+ attribute.lastIndexedKey = key;
1218
+ Table.dbisDB.put(attribute.key, attribute);
1219
+ }
1220
+ if (interrupted) return;
1221
+ }
1222
+ if (outstanding > MAX_OUTSTANDING_INDEXING) await lastResolution;
1223
+ else if (outstanding > MIN_OUTSTANDING_INDEXING) await new Promise((resolve) => setImmediate(resolve)); // yield event turn, don't want to use all computation
1224
+ }
1225
+ // update the attributes to indicate that we are finished
1226
+ for (const attribute of attributes) {
1227
+ delete attribute.lastIndexedKey;
1228
+ delete attribute.indexingPID;
1229
+ attribute.dbi.isIndexing = false;
1230
+ lastResolution = Table.dbisDB.put(attribute.key, attribute);
1231
+ }
1232
+ }
1233
+ await lastResolution;
1234
+ // now notify all the threads that we are done and the index is ready to use
1235
+ await signalling.signalSchemaChange(
1236
+ new SchemaEventMsg(process.pid, 'indexing-finished', Table.databaseName, Table.tableName)
1237
+ );
1238
+ logger.info(`Finished indexing ${Table.tableName} attributes`, attributes);
1239
+ } catch (error) {
1240
+ logger.error('Error in indexing', error);
1241
+ }
1242
+ }
1243
+
1244
+ export function dropTableMeta({ table: tableName, database: databaseName }) {
1245
+ const rootStore = database({ database: databaseName, table: tableName });
1246
+ const removals = [];
1247
+ const dbisDb = rootStore.dbisDb;
1248
+ for (const key of dbisDb.getKeys({ start: tableName + '/', end: tableName + '0' })) {
1249
+ removals.push(dbisDb.remove(key));
1250
+ }
1251
+ databaseEventsEmitter.emit('dropTable', tableName, databaseName);
1252
+ return Promise.all(removals);
1253
+ }
1254
+
1255
+ export function onUpdatedTable(listener: (table: Table) => void) {
1256
+ databaseEventsEmitter.on('updateTable', listener);
1257
+ return {
1258
+ remove() {
1259
+ databaseEventsEmitter.off('updateTable', listener);
1260
+ },
1261
+ };
1262
+ }
1263
+ export function onRemovedTable(listener: (tableName: string, databaseName: string) => void) {
1264
+ databaseEventsEmitter.on('dropTable', listener);
1265
+ return {
1266
+ remove() {
1267
+ databaseEventsEmitter.off('dropTable', listener);
1268
+ },
1269
+ };
1270
+ }
1271
+ export function onRemovedDB(listener: (databaseName: string) => void) {
1272
+ databaseEventsEmitter.on('dropDatabase', listener);
1273
+ return {
1274
+ remove() {
1275
+ databaseEventsEmitter.off('dropDatabase', listener);
1276
+ },
1277
+ };
1278
+ }
1279
+
1280
+ export function getDefaultCompression() {
1281
+ const LMDB_COMPRESSION = envGet(CONFIG_PARAMS.STORAGE_COMPRESSION);
1282
+ const STORAGE_COMPRESSION_DICTIONARY = envGet(CONFIG_PARAMS.STORAGE_COMPRESSION_DICTIONARY);
1283
+ const STORAGE_COMPRESSION_THRESHOLD =
1284
+ envGet(CONFIG_PARAMS.STORAGE_COMPRESSION_THRESHOLD) || DEFAULT_COMPRESSION_THRESHOLD;
1285
+ const LMDB_COMPRESSION_OPTS = { startingOffset: 32 };
1286
+ if (STORAGE_COMPRESSION_DICTIONARY)
1287
+ LMDB_COMPRESSION_OPTS['dictionary'] = readFileSync(STORAGE_COMPRESSION_DICTIONARY);
1288
+ if (STORAGE_COMPRESSION_THRESHOLD) LMDB_COMPRESSION_OPTS['threshold'] = STORAGE_COMPRESSION_THRESHOLD;
1289
+ return LMDB_COMPRESSION && LMDB_COMPRESSION_OPTS;
1290
+ }