@rocicorp/zero 1.4.0-canary.1 → 1.4.0-canary.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (175) hide show
  1. package/out/analyze-query/src/analyze-cli.d.ts +0 -1
  2. package/out/analyze-query/src/analyze-cli.d.ts.map +1 -1
  3. package/out/analyze-query/src/analyze-cli.js +0 -1
  4. package/out/analyze-query/src/analyze-cli.js.map +1 -1
  5. package/out/analyze-query/src/bin-analyze.js +11 -10
  6. package/out/analyze-query/src/bin-analyze.js.map +1 -1
  7. package/out/analyze-query/src/bin-transform.js +1 -1
  8. package/out/analyze-query/src/bin-transform.js.map +1 -1
  9. package/out/replicache/src/btree/node.d.ts +1 -1
  10. package/out/replicache/src/btree/node.d.ts.map +1 -1
  11. package/out/replicache/src/btree/node.js +34 -21
  12. package/out/replicache/src/btree/node.js.map +1 -1
  13. package/out/replicache/src/btree/write.js +1 -2
  14. package/out/replicache/src/btree/write.js.map +1 -1
  15. package/out/replicache/src/kv/sqlite-store.d.ts.map +1 -1
  16. package/out/replicache/src/kv/sqlite-store.js +7 -1
  17. package/out/replicache/src/kv/sqlite-store.js.map +1 -1
  18. package/out/replicache/src/with-transactions.d.ts.map +1 -1
  19. package/out/replicache/src/with-transactions.js +16 -2
  20. package/out/replicache/src/with-transactions.js.map +1 -1
  21. package/out/shared/src/btree-set.d.ts +6 -0
  22. package/out/shared/src/btree-set.d.ts.map +1 -1
  23. package/out/shared/src/btree-set.js +34 -0
  24. package/out/shared/src/btree-set.js.map +1 -1
  25. package/out/zero/package.js +8 -2
  26. package/out/zero/package.js.map +1 -1
  27. package/out/zero/src/adapters/kysely.d.ts +2 -0
  28. package/out/zero/src/adapters/kysely.d.ts.map +1 -0
  29. package/out/zero/src/adapters/kysely.js +2 -0
  30. package/out/zero/src/zero.js +2 -1
  31. package/out/zero-cache/src/auth/write-authorizer.d.ts.map +1 -1
  32. package/out/zero-cache/src/auth/write-authorizer.js +14 -1
  33. package/out/zero-cache/src/auth/write-authorizer.js.map +1 -1
  34. package/out/zero-cache/src/config/zero-config.d.ts +18 -0
  35. package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
  36. package/out/zero-cache/src/config/zero-config.js +35 -3
  37. package/out/zero-cache/src/config/zero-config.js.map +1 -1
  38. package/out/zero-cache/src/db/migration-lite.js +8 -1
  39. package/out/zero-cache/src/db/migration-lite.js.map +1 -1
  40. package/out/zero-cache/src/db/pg-to-lite.d.ts +1 -1
  41. package/out/zero-cache/src/db/pg-to-lite.d.ts.map +1 -1
  42. package/out/zero-cache/src/db/pg-to-lite.js +13 -13
  43. package/out/zero-cache/src/db/pg-to-lite.js.map +1 -1
  44. package/out/zero-cache/src/observability/metrics.d.ts +36 -6
  45. package/out/zero-cache/src/observability/metrics.d.ts.map +1 -1
  46. package/out/zero-cache/src/observability/metrics.js +55 -10
  47. package/out/zero-cache/src/observability/metrics.js.map +1 -1
  48. package/out/zero-cache/src/scripts/decommission.d.ts.map +1 -1
  49. package/out/zero-cache/src/scripts/decommission.js +3 -3
  50. package/out/zero-cache/src/scripts/decommission.js.map +1 -1
  51. package/out/zero-cache/src/scripts/deploy-permissions.js +1 -1
  52. package/out/zero-cache/src/scripts/deploy-permissions.js.map +1 -1
  53. package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
  54. package/out/zero-cache/src/server/change-streamer.js +4 -5
  55. package/out/zero-cache/src/server/change-streamer.js.map +1 -1
  56. package/out/zero-cache/src/server/main.d.ts.map +1 -1
  57. package/out/zero-cache/src/server/main.js +6 -1
  58. package/out/zero-cache/src/server/main.js.map +1 -1
  59. package/out/zero-cache/src/server/reaper.d.ts.map +1 -1
  60. package/out/zero-cache/src/server/reaper.js +1 -4
  61. package/out/zero-cache/src/server/reaper.js.map +1 -1
  62. package/out/zero-cache/src/server/shadow-syncer.js +35 -0
  63. package/out/zero-cache/src/server/shadow-syncer.js.map +1 -0
  64. package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
  65. package/out/zero-cache/src/server/syncer.js +2 -8
  66. package/out/zero-cache/src/server/syncer.js.map +1 -1
  67. package/out/zero-cache/src/server/worker-urls.d.ts +1 -0
  68. package/out/zero-cache/src/server/worker-urls.d.ts.map +1 -1
  69. package/out/zero-cache/src/server/worker-urls.js +2 -1
  70. package/out/zero-cache/src/server/worker-urls.js.map +1 -1
  71. package/out/zero-cache/src/services/analyze.d.ts.map +1 -1
  72. package/out/zero-cache/src/services/analyze.js +1 -1
  73. package/out/zero-cache/src/services/analyze.js.map +1 -1
  74. package/out/zero-cache/src/services/change-source/pg/backfill-stream.d.ts +8 -1
  75. package/out/zero-cache/src/services/change-source/pg/backfill-stream.d.ts.map +1 -1
  76. package/out/zero-cache/src/services/change-source/pg/backfill-stream.js +31 -18
  77. package/out/zero-cache/src/services/change-source/pg/backfill-stream.js.map +1 -1
  78. package/out/zero-cache/src/services/change-source/pg/change-source.d.ts.map +1 -1
  79. package/out/zero-cache/src/services/change-source/pg/change-source.js +48 -47
  80. package/out/zero-cache/src/services/change-source/pg/change-source.js.map +1 -1
  81. package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts +6 -1
  82. package/out/zero-cache/src/services/change-source/pg/initial-sync.d.ts.map +1 -1
  83. package/out/zero-cache/src/services/change-source/pg/initial-sync.js +64 -22
  84. package/out/zero-cache/src/services/change-source/pg/initial-sync.js.map +1 -1
  85. package/out/zero-cache/src/services/change-streamer/change-streamer-http.d.ts.map +1 -1
  86. package/out/zero-cache/src/services/change-streamer/change-streamer-http.js +2 -3
  87. package/out/zero-cache/src/services/change-streamer/change-streamer-http.js.map +1 -1
  88. package/out/zero-cache/src/services/change-streamer/schema/tables.js +1 -1
  89. package/out/zero-cache/src/services/replicator/change-processor.d.ts.map +1 -1
  90. package/out/zero-cache/src/services/replicator/change-processor.js +10 -3
  91. package/out/zero-cache/src/services/replicator/change-processor.js.map +1 -1
  92. package/out/zero-cache/src/services/shadow-sync/shadow-sync-service.js +49 -0
  93. package/out/zero-cache/src/services/shadow-sync/shadow-sync-service.js.map +1 -0
  94. package/out/zero-cache/src/services/statz.js +3 -3
  95. package/out/zero-cache/src/services/statz.js.map +1 -1
  96. package/out/zero-cache/src/services/view-syncer/client-handler.js +3 -6
  97. package/out/zero-cache/src/services/view-syncer/client-handler.js.map +1 -1
  98. package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts +1 -0
  99. package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts.map +1 -1
  100. package/out/zero-cache/src/services/view-syncer/cvr-store.js +34 -11
  101. package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
  102. package/out/zero-cache/src/services/view-syncer/cvr.d.ts +16 -1
  103. package/out/zero-cache/src/services/view-syncer/cvr.d.ts.map +1 -1
  104. package/out/zero-cache/src/services/view-syncer/cvr.js +19 -1
  105. package/out/zero-cache/src/services/view-syncer/cvr.js.map +1 -1
  106. package/out/zero-cache/src/services/view-syncer/inspect-handler.js +1 -1
  107. package/out/zero-cache/src/services/view-syncer/inspect-handler.js.map +1 -1
  108. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts +8 -2
  109. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
  110. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +50 -10
  111. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
  112. package/out/zero-cache/src/services/view-syncer/row-record-cache.js +4 -7
  113. package/out/zero-cache/src/services/view-syncer/row-record-cache.js.map +1 -1
  114. package/out/zero-cache/src/services/view-syncer/row-set-signature.d.ts +17 -0
  115. package/out/zero-cache/src/services/view-syncer/row-set-signature.d.ts.map +1 -0
  116. package/out/zero-cache/src/services/view-syncer/row-set-signature.js +29 -0
  117. package/out/zero-cache/src/services/view-syncer/row-set-signature.js.map +1 -0
  118. package/out/zero-cache/src/services/view-syncer/schema/cvr.d.ts +1 -0
  119. package/out/zero-cache/src/services/view-syncer/schema/cvr.d.ts.map +1 -1
  120. package/out/zero-cache/src/services/view-syncer/schema/cvr.js +1 -0
  121. package/out/zero-cache/src/services/view-syncer/schema/cvr.js.map +1 -1
  122. package/out/zero-cache/src/services/view-syncer/schema/init.d.ts.map +1 -1
  123. package/out/zero-cache/src/services/view-syncer/schema/init.js +5 -1
  124. package/out/zero-cache/src/services/view-syncer/schema/init.js.map +1 -1
  125. package/out/zero-cache/src/services/view-syncer/schema/types.d.ts +105 -0
  126. package/out/zero-cache/src/services/view-syncer/schema/types.d.ts.map +1 -1
  127. package/out/zero-cache/src/services/view-syncer/schema/types.js +8 -4
  128. package/out/zero-cache/src/services/view-syncer/schema/types.js.map +1 -1
  129. package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts.map +1 -1
  130. package/out/zero-cache/src/services/view-syncer/view-syncer.js +18 -28
  131. package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
  132. package/out/zero-cache/src/types/pg.d.ts +1 -1
  133. package/out/zero-cache/src/types/pg.d.ts.map +1 -1
  134. package/out/zero-cache/src/types/pg.js +8 -2
  135. package/out/zero-cache/src/types/pg.js.map +1 -1
  136. package/out/zero-cache/src/types/timeout.d.ts +11 -0
  137. package/out/zero-cache/src/types/timeout.d.ts.map +1 -0
  138. package/out/zero-cache/src/types/timeout.js +26 -0
  139. package/out/zero-cache/src/types/timeout.js.map +1 -0
  140. package/out/zero-cache/src/workers/connection.js +3 -3
  141. package/out/zero-cache/src/workers/connection.js.map +1 -1
  142. package/out/zero-client/src/client/version.js +1 -1
  143. package/out/zero-client/src/mod.d.ts +1 -0
  144. package/out/zero-client/src/mod.d.ts.map +1 -1
  145. package/out/zero-client/src/mod.js +1 -0
  146. package/out/zero-react/src/zero.js +1 -0
  147. package/out/zero-server/src/adapters/kysely.d.ts +69 -0
  148. package/out/zero-server/src/adapters/kysely.d.ts.map +1 -0
  149. package/out/zero-server/src/adapters/kysely.js +82 -0
  150. package/out/zero-server/src/adapters/kysely.js.map +1 -0
  151. package/out/zero-server/src/adapters/postgresjs.d.ts.map +1 -1
  152. package/out/zero-server/src/adapters/postgresjs.js +1 -1
  153. package/out/zero-server/src/adapters/postgresjs.js.map +1 -1
  154. package/out/zero-solid/src/zero.js +1 -0
  155. package/out/zql/src/ivm/memory-source.d.ts.map +1 -1
  156. package/out/zql/src/ivm/memory-source.js +3 -3
  157. package/out/zql/src/ivm/memory-source.js.map +1 -1
  158. package/out/zql/src/query/query-internals.d.ts.map +1 -1
  159. package/out/zql/src/query/query-internals.js +1 -1
  160. package/out/zql/src/query/query-internals.js.map +1 -1
  161. package/out/zql/src/query/validate-input.d.ts +8 -0
  162. package/out/zql/src/query/validate-input.d.ts.map +1 -1
  163. package/out/zql/src/query/validate-input.js +15 -2
  164. package/out/zql/src/query/validate-input.js.map +1 -1
  165. package/out/zqlite/src/query-builder.js +19 -7
  166. package/out/zqlite/src/query-builder.js.map +1 -1
  167. package/package.json +10 -2
  168. package/out/analyze-query/src/explain-queries.d.ts +0 -4
  169. package/out/analyze-query/src/explain-queries.d.ts.map +0 -1
  170. package/out/analyze-query/src/explain-queries.js +0 -13
  171. package/out/analyze-query/src/explain-queries.js.map +0 -1
  172. package/out/otel/src/test-log-config.d.ts +0 -8
  173. package/out/otel/src/test-log-config.d.ts.map +0 -1
  174. package/out/otel/src/test-log-config.js +0 -12
  175. package/out/otel/src/test-log-config.js.map +0 -1
@@ -30,19 +30,19 @@ function zeroVersionColumnSpec(defaultVersion) {
30
30
  function warnIfDataTypeSupported(lc, liteTypeString, table, column) {
31
31
  if (liteTypeToZqlValueType(liteTypeString) === void 0) lc.warn?.(`\n\nWARNING: zero does not yet support the "${upstreamDataType(liteTypeString)}" data type.\nThe "${table}"."${column}" column will not be synced to clients.\n\n`);
32
32
  }
33
- var SIMPLE_TOKEN_EXPRESSION_REGEX = /^[^'()]+$/;
34
- var UNSUPPORTED_TOKENS = /\b(current_time|current_date|current_timestamp)\b/i;
35
- var STRING_EXPRESSION_REGEX = /^('.*')::[^']+$/;
36
- function mapPostgresToLiteDefault(table, column, dataType, defaultExpression) {
33
+ var NUMERIC_LITERAL_REGEX = /^-?\d+(\.\d+)?$/;
34
+ var BOOLEAN_LITERAL_REGEX = /^(true|false)$/;
35
+ var QUOTED_STRING_WITH_CAST_REGEX = /^('.*')::(\w+)$/;
36
+ var EMPTY_ARRAY_CONSTRUCTOR_REGEX = /^ARRAY\s*\[\s*\]::\w+\[\]$/i;
37
+ var EMPTY_ARRAY_LITERAL_REGEX = /^'\{\}'::\w+\[\]$/;
38
+ function mapPostgresToLiteDefault(table, column, defaultExpression) {
37
39
  if (!defaultExpression) return null;
38
- if (UNSUPPORTED_TOKENS.test(defaultExpression)) throw new UnsupportedColumnDefaultError(`Cannot ADD a column with CURRENT_TIME, CURRENT_DATE, or CURRENT_TIMESTAMP`);
39
- if (SIMPLE_TOKEN_EXPRESSION_REGEX.test(defaultExpression)) {
40
- if (liteTypeToZqlValueType(dataType) === "boolean") return defaultExpression === "true" ? "1" : "0";
41
- return defaultExpression;
42
- }
43
- const match = STRING_EXPRESSION_REGEX.exec(defaultExpression);
44
- if (!match) throw new UnsupportedColumnDefaultError(`Unsupported default value for ${table}.${column}: ${defaultExpression}`);
45
- return match[1];
40
+ if (NUMERIC_LITERAL_REGEX.test(defaultExpression)) return defaultExpression;
41
+ if (BOOLEAN_LITERAL_REGEX.test(defaultExpression)) return defaultExpression === "true" ? "1" : "0";
42
+ const match = QUOTED_STRING_WITH_CAST_REGEX.exec(defaultExpression);
43
+ if (match) return match[1];
44
+ if (EMPTY_ARRAY_CONSTRUCTOR_REGEX.test(defaultExpression) || EMPTY_ARRAY_LITERAL_REGEX.test(defaultExpression)) return "'[]'";
45
+ throw new UnsupportedColumnDefaultError(`Unsupported default value for ${table}.${column}: ${defaultExpression}`);
46
46
  }
47
47
  function mapPostgresToLiteColumn(table, column, ignoreDefault) {
48
48
  const { pos, dataType, notNull, dflt, elemPgTypeClass = null } = column.spec;
@@ -51,7 +51,7 @@ function mapPostgresToLiteColumn(table, column, ignoreDefault) {
51
51
  dataType: liteTypeString(dataType, notNull, isEnumColumn(column.spec), isArrayColumn(column.spec)),
52
52
  characterMaximumLength: null,
53
53
  notNull: false,
54
- dflt: ignoreDefault === "ignore-default" ? null : mapPostgresToLiteDefault(table, column.name, dataType, dflt),
54
+ dflt: ignoreDefault === "ignore-default" ? null : mapPostgresToLiteDefault(table, column.name, dflt),
55
55
  elemPgTypeClass
56
56
  };
57
57
  }
@@ -1 +1 @@
1
- {"version":3,"file":"pg-to-lite.js","names":[],"sources":["../../../../../zero-cache/src/db/pg-to-lite.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {ZERO_VERSION_COLUMN_NAME} from '../services/replicator/schema/constants.ts';\nimport {\n liteTypeString,\n liteTypeToZqlValueType,\n upstreamDataType,\n type LiteTypeString,\n} from '../types/lite.ts';\nimport {liteTableName} from '../types/names.ts';\nimport * as PostgresTypeClass from './postgres-type-class-enum.ts';\nimport {\n type ColumnSpec,\n type IndexSpec,\n type LiteIndexSpec,\n type LiteTableSpec,\n type TableSpec,\n} from './specs.ts';\n\n/**\n * Determines if a PostgreSQL column is an enum type.\n * This checks both the element type class (for arrays of enums) and the main type class.\n */\nexport function isEnumColumn(\n spec: Pick<ColumnSpec, 'pgTypeClass' | 'elemPgTypeClass'>,\n): boolean {\n return (spec.elemPgTypeClass ?? spec.pgTypeClass) === PostgresTypeClass.Enum;\n}\n\n/**\n * Determines if a PostgreSQL column is an array type.\n * In PostgreSQL's system, array columns have a non-null elemPgTypeClass.\n */\nexport function isArrayColumn(\n spec: Pick<ColumnSpec, 'elemPgTypeClass'>,\n): boolean {\n return spec.elemPgTypeClass !== null && spec.elemPgTypeClass !== undefined;\n}\n\nfunction zeroVersionColumnSpec(defaultVersion: string | undefined): ColumnSpec {\n return {\n pos: Number.MAX_SAFE_INTEGER, // i.e. last\n characterMaximumLength: null,\n dataType: 'text',\n notNull: false,\n dflt: !defaultVersion ? null : `'${defaultVersion}'`,\n elemPgTypeClass: null,\n };\n}\n\nexport function warnIfDataTypeSupported(\n lc: LogContext,\n liteTypeString: LiteTypeString,\n table: string,\n column: string,\n) {\n if (liteTypeToZqlValueType(liteTypeString) === undefined) {\n lc.warn?.(\n `\\n\\nWARNING: zero does not yet support the \"${upstreamDataType(\n liteTypeString,\n )}\" data type.\\n` +\n `The \"${table}\".\"${column}\" column will not be synced to clients.\\n\\n`,\n );\n }\n}\n\n// As per https://www.sqlite.org/lang_altertable.html#altertabaddcol,\n// expressions with parentheses are disallowed ...\nconst SIMPLE_TOKEN_EXPRESSION_REGEX = /^[^'()]+$/; // e.g. true, false, 1234, 1234.5678\n\n// as well as current_time, current_date, and current_timestamp ...\nconst UNSUPPORTED_TOKENS = /\\b(current_time|current_date|current_timestamp)\\b/i;\n\n// For strings and certain incarnations of primitives (e.g. integers greater\n// than 2^31-1, Postgres' nodeToString() represents the values as type-casted\n// 'string' values, e.g. `'2147483648'::bigint`, `'foo'::text`.\n//\n// These type-qualifiers must be removed, as SQLite doesn't understand or\n// care about them.\nconst STRING_EXPRESSION_REGEX = /^('.*')::[^']+$/;\n\n// Exported for testing.\nexport function mapPostgresToLiteDefault(\n table: string,\n column: string,\n dataType: string,\n defaultExpression: string | null | undefined,\n) {\n if (!defaultExpression) {\n return null;\n }\n if (UNSUPPORTED_TOKENS.test(defaultExpression)) {\n throw new UnsupportedColumnDefaultError(\n `Cannot ADD a column with CURRENT_TIME, CURRENT_DATE, or CURRENT_TIMESTAMP`,\n );\n }\n if (SIMPLE_TOKEN_EXPRESSION_REGEX.test(defaultExpression)) {\n if (liteTypeToZqlValueType(dataType) === 'boolean') {\n return defaultExpression === 'true' ? '1' : '0';\n }\n return defaultExpression;\n }\n const match = STRING_EXPRESSION_REGEX.exec(defaultExpression);\n if (!match) {\n throw new UnsupportedColumnDefaultError(\n `Unsupported default value for ${table}.${column}: ${defaultExpression}`,\n );\n }\n return match[1];\n}\n\nexport function mapPostgresToLiteColumn(\n table: string,\n column: {name: string; spec: ColumnSpec},\n ignoreDefault?: 'ignore-default',\n): ColumnSpec {\n const {pos, dataType, notNull, dflt, elemPgTypeClass = null} = column.spec;\n\n // PostgreSQL includes [] in dataType for array types (e.g., 'int4[]',\n // 'int4[][]'). liteTypeString() appends attributes:\n // \"varchar[]|NOT_NULL|TEXT_ARRAY\", \"my_enum[][]|TEXT_ENUM|TEXT_ARRAY\"\n const liteType = liteTypeString(\n dataType,\n notNull,\n isEnumColumn(column.spec),\n isArrayColumn(column.spec),\n );\n\n return {\n pos,\n dataType: liteType,\n characterMaximumLength: null,\n // Note: NOT NULL constraints are always ignored for SQLite (replica) tables.\n // 1. They are enforced by the replication stream.\n // 2. We need nullability for columns with defaults to support\n // write permissions on the \"proposed mutation\" state. Proposed\n // mutations are written to SQLite in a `BEGIN CONCURRENT` transaction in mutagen.\n // Permission policies are run against that state (to get their ruling) then the\n // transaction is rolled back.\n notNull: false,\n // Note: DEFAULT constraints are ignored when creating new tables, but are\n // necessary for adding columns to tables with existing rows.\n dflt:\n ignoreDefault === 'ignore-default'\n ? null\n : mapPostgresToLiteDefault(table, column.name, dataType, dflt),\n elemPgTypeClass,\n };\n}\n\nexport function mapPostgresToLite(\n t: TableSpec,\n defaultVersion?: string,\n): LiteTableSpec {\n // PRIMARY KEYS are not written to the replica. Instead, we rely\n // UNIQUE indexes, including those created for upstream PRIMARY KEYs.\n const {schema: _, primaryKey: _dropped, ...liteSpec} = t;\n const name = liteTableName(t);\n return {\n ...liteSpec,\n name,\n columns: {\n ...Object.fromEntries(\n Object.entries(t.columns).map(([col, spec]) => [\n col,\n // `ignore-default` for create table statements because\n // there are no rows to set the default for.\n mapPostgresToLiteColumn(name, {name: col, spec}, 'ignore-default'),\n ]),\n ),\n [ZERO_VERSION_COLUMN_NAME]: zeroVersionColumnSpec(defaultVersion),\n },\n };\n}\n\nexport function mapPostgresToLiteIndex(index: IndexSpec): LiteIndexSpec {\n const {schema, tableName, name, ...liteIndex} = index;\n return {\n tableName: liteTableName({schema, name: tableName}),\n name: liteTableName({schema, name}),\n ...liteIndex,\n };\n}\n\nexport class UnsupportedColumnDefaultError extends Error {\n readonly name = 'UnsupportedColumnDefaultError';\n}\n"],"mappings":";;;;;;;;;AAsBA,SAAgB,aACd,MACS;AACT,SAAQ,KAAK,mBAAmB,KAAK,iBAAiB;;;;;;AAOxD,SAAgB,cACd,MACS;AACT,QAAO,KAAK,oBAAoB,QAAQ,KAAK,oBAAoB,KAAA;;AAGnE,SAAS,sBAAsB,gBAAgD;AAC7E,QAAO;EACL,KAAK,OAAO;EACZ,wBAAwB;EACxB,UAAU;EACV,SAAS;EACT,MAAM,CAAC,iBAAiB,OAAO,IAAI,eAAe;EAClD,iBAAiB;EAClB;;AAGH,SAAgB,wBACd,IACA,gBACA,OACA,QACA;AACA,KAAI,uBAAuB,eAAe,KAAK,KAAA,EAC7C,IAAG,OACD,+CAA+C,iBAC7C,eACD,CAAC,qBACQ,MAAM,KAAK,OAAO,6CAC7B;;AAML,IAAM,gCAAgC;AAGtC,IAAM,qBAAqB;AAQ3B,IAAM,0BAA0B;AAGhC,SAAgB,yBACd,OACA,QACA,UACA,mBACA;AACA,KAAI,CAAC,kBACH,QAAO;AAET,KAAI,mBAAmB,KAAK,kBAAkB,CAC5C,OAAM,IAAI,8BACR,4EACD;AAEH,KAAI,8BAA8B,KAAK,kBAAkB,EAAE;AACzD,MAAI,uBAAuB,SAAS,KAAK,UACvC,QAAO,sBAAsB,SAAS,MAAM;AAE9C,SAAO;;CAET,MAAM,QAAQ,wBAAwB,KAAK,kBAAkB;AAC7D,KAAI,CAAC,MACH,OAAM,IAAI,8BACR,iCAAiC,MAAM,GAAG,OAAO,IAAI,oBACtD;AAEH,QAAO,MAAM;;AAGf,SAAgB,wBACd,OACA,QACA,eACY;CACZ,MAAM,EAAC,KAAK,UAAU,SAAS,MAAM,kBAAkB,SAAQ,OAAO;AAYtE,QAAO;EACL;EACA,UATe,eACf,UACA,SACA,aAAa,OAAO,KAAK,EACzB,cAAc,OAAO,KAAK,CAC3B;EAKC,wBAAwB;EAQxB,SAAS;EAGT,MACE,kBAAkB,mBACd,OACA,yBAAyB,OAAO,OAAO,MAAM,UAAU,KAAK;EAClE;EACD;;AAGH,SAAgB,kBACd,GACA,gBACe;CAGf,MAAM,EAAC,QAAQ,GAAG,YAAY,UAAU,GAAG,aAAY;CACvD,MAAM,OAAO,cAAc,EAAE;AAC7B,QAAO;EACL,GAAG;EACH;EACA,SAAS;GACP,GAAG,OAAO,YACR,OAAO,QAAQ,EAAE,QAAQ,CAAC,KAAK,CAAC,KAAK,UAAU,CAC7C,KAGA,wBAAwB,MAAM;IAAC,MAAM;IAAK;IAAK,EAAE,iBAAiB,CACnE,CAAC,CACH;IACA,2BAA2B,sBAAsB,eAAe;GAClE;EACF;;AAGH,SAAgB,uBAAuB,OAAiC;CACtE,MAAM,EAAC,QAAQ,WAAW,MAAM,GAAG,cAAa;AAChD,QAAO;EACL,WAAW,cAAc;GAAC;GAAQ,MAAM;GAAU,CAAC;EACnD,MAAM,cAAc;GAAC;GAAQ;GAAK,CAAC;EACnC,GAAG;EACJ;;AAGH,IAAa,gCAAb,cAAmD,MAAM;CACvD,OAAgB"}
1
+ {"version":3,"file":"pg-to-lite.js","names":[],"sources":["../../../../../zero-cache/src/db/pg-to-lite.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {ZERO_VERSION_COLUMN_NAME} from '../services/replicator/schema/constants.ts';\nimport {\n liteTypeString,\n liteTypeToZqlValueType,\n upstreamDataType,\n type LiteTypeString,\n} from '../types/lite.ts';\nimport {liteTableName} from '../types/names.ts';\nimport * as PostgresTypeClass from './postgres-type-class-enum.ts';\nimport {\n type ColumnSpec,\n type IndexSpec,\n type LiteIndexSpec,\n type LiteTableSpec,\n type TableSpec,\n} from './specs.ts';\n\n/**\n * Determines if a PostgreSQL column is an enum type.\n * This checks both the element type class (for arrays of enums) and the main type class.\n */\nexport function isEnumColumn(\n spec: Pick<ColumnSpec, 'pgTypeClass' | 'elemPgTypeClass'>,\n): boolean {\n return (spec.elemPgTypeClass ?? spec.pgTypeClass) === PostgresTypeClass.Enum;\n}\n\n/**\n * Determines if a PostgreSQL column is an array type.\n * In PostgreSQL's system, array columns have a non-null elemPgTypeClass.\n */\nexport function isArrayColumn(\n spec: Pick<ColumnSpec, 'elemPgTypeClass'>,\n): boolean {\n return spec.elemPgTypeClass !== null && spec.elemPgTypeClass !== undefined;\n}\n\nfunction zeroVersionColumnSpec(defaultVersion: string | undefined): ColumnSpec {\n return {\n pos: Number.MAX_SAFE_INTEGER, // i.e. last\n characterMaximumLength: null,\n dataType: 'text',\n notNull: false,\n dflt: !defaultVersion ? null : `'${defaultVersion}'`,\n elemPgTypeClass: null,\n };\n}\n\nexport function warnIfDataTypeSupported(\n lc: LogContext,\n liteTypeString: LiteTypeString,\n table: string,\n column: string,\n) {\n if (liteTypeToZqlValueType(liteTypeString) === undefined) {\n lc.warn?.(\n `\\n\\nWARNING: zero does not yet support the \"${upstreamDataType(\n liteTypeString,\n )}\" data type.\\n` +\n `The \"${table}\".\"${column}\" column will not be synced to clients.\\n\\n`,\n );\n }\n}\n\n// Numeric literals: integers and decimals, optionally negative\nconst NUMERIC_LITERAL_REGEX = /^-?\\d+(\\.\\d+)?$/;\n\n// Boolean literals (PG emits lowercase)\nconst BOOLEAN_LITERAL_REGEX = /^(true|false)$/;\n\n// Quoted string with type cast to a simple scalar type: 'value'::typename\n// For strings and certain incarnations of primitives (e.g. integers greater\n// than 2^31-1, Postgres' nodeToString() represents the values as type-casted\n// 'string' values, e.g. `'2147483648'::bigint`, `'foo'::text`.\n// Only matches simple type names (word characters) - array types like\n// `::text[]` won't match and will trigger backfill.\nconst QUOTED_STRING_WITH_CAST_REGEX = /^('.*')::(\\w+)$/;\n\n// Empty array constructor syntax: ARRAY[]::text[], ARRAY[]::integer[], etc.\n// Maps to '[]' (JSON empty array) in SQLite.\nconst EMPTY_ARRAY_CONSTRUCTOR_REGEX = /^ARRAY\\s*\\[\\s*\\]::\\w+\\[\\]$/i;\n\n// Empty array literal syntax: '{}'::text[], '{}'::integer[], etc.\n// Maps to '[]' (JSON empty array) in SQLite.\nconst EMPTY_ARRAY_LITERAL_REGEX = /^'\\{\\}'::\\w+\\[\\]$/;\n\n// Conservative allowlist approach for SQLite ADD COLUMN defaults.\n// We only allow patterns we know are safe. Everything else triggers\n// backfill from PostgreSQL, which correctly handles complex defaults.\n//\n// Note: We don't validate that the default value matches the column type\n// (e.g., that a numeric literal is used with a numeric column). PostgreSQL\n// already enforces this at schema definition time - you can't define\n// `ALTER TABLE foo ADD bar TEXT DEFAULT 123` in PG. So we trust that any\n// default we receive from the replication stream is type-compatible with\n// whatever we map the type to in SQLite.\n//\n// Example: `true`/`false` literals can only appear as defaults for boolean\n// columns in PG, so we don't need to check the column type before converting\n// to 1/0.\n//\n// See: https://www.sqlite.org/lang_altertable.html#altertabaddcol\n//\n// Exported for testing.\nexport function mapPostgresToLiteDefault(\n table: string,\n column: string,\n defaultExpression: string | null | undefined,\n): string | null {\n if (!defaultExpression) {\n return null;\n }\n\n // Numeric literals pass through unchanged\n if (NUMERIC_LITERAL_REGEX.test(defaultExpression)) {\n return defaultExpression;\n }\n\n // Boolean literals convert to SQLite's 1/0\n if (BOOLEAN_LITERAL_REGEX.test(defaultExpression)) {\n return defaultExpression === 'true' ? '1' : '0';\n }\n\n // Quoted strings with type casts: extract just the quoted part\n const match = QUOTED_STRING_WITH_CAST_REGEX.exec(defaultExpression);\n if (match) {\n return match[1];\n }\n\n // Empty arrays: ARRAY[]::type[] or '{}'::type[] → '[]'\n if (\n EMPTY_ARRAY_CONSTRUCTOR_REGEX.test(defaultExpression) ||\n EMPTY_ARRAY_LITERAL_REGEX.test(defaultExpression)\n ) {\n return \"'[]'\";\n }\n\n // Everything else triggers backfill\n throw new UnsupportedColumnDefaultError(\n `Unsupported default value for ${table}.${column}: ${defaultExpression}`,\n );\n}\n\nexport function mapPostgresToLiteColumn(\n table: string,\n column: {name: string; spec: ColumnSpec},\n ignoreDefault?: 'ignore-default',\n): ColumnSpec {\n const {pos, dataType, notNull, dflt, elemPgTypeClass = null} = column.spec;\n\n // PostgreSQL includes [] in dataType for array types (e.g., 'int4[]',\n // 'int4[][]'). liteTypeString() appends attributes:\n // \"varchar[]|NOT_NULL|TEXT_ARRAY\", \"my_enum[][]|TEXT_ENUM|TEXT_ARRAY\"\n const liteType = liteTypeString(\n dataType,\n notNull,\n isEnumColumn(column.spec),\n isArrayColumn(column.spec),\n );\n\n return {\n pos,\n dataType: liteType,\n characterMaximumLength: null,\n // Note: NOT NULL constraints are always ignored for SQLite (replica) tables.\n // 1. They are enforced by the replication stream.\n // 2. We need nullability for columns with defaults to support\n // write permissions on the \"proposed mutation\" state. Proposed\n // mutations are written to SQLite in a `BEGIN CONCURRENT` transaction in mutagen.\n // Permission policies are run against that state (to get their ruling) then the\n // transaction is rolled back.\n notNull: false,\n // Note: DEFAULT constraints are ignored when creating new tables, but are\n // necessary for adding columns to tables with existing rows.\n dflt:\n ignoreDefault === 'ignore-default'\n ? null\n : mapPostgresToLiteDefault(table, column.name, dflt),\n elemPgTypeClass,\n };\n}\n\nexport function mapPostgresToLite(\n t: TableSpec,\n defaultVersion?: string,\n): LiteTableSpec {\n // PRIMARY KEYS are not written to the replica. Instead, we rely\n // UNIQUE indexes, including those created for upstream PRIMARY KEYs.\n const {schema: _, primaryKey: _dropped, ...liteSpec} = t;\n const name = liteTableName(t);\n return {\n ...liteSpec,\n name,\n columns: {\n ...Object.fromEntries(\n Object.entries(t.columns).map(([col, spec]) => [\n col,\n // `ignore-default` for create table statements because\n // there are no rows to set the default for.\n mapPostgresToLiteColumn(name, {name: col, spec}, 'ignore-default'),\n ]),\n ),\n [ZERO_VERSION_COLUMN_NAME]: zeroVersionColumnSpec(defaultVersion),\n },\n };\n}\n\nexport function mapPostgresToLiteIndex(index: IndexSpec): LiteIndexSpec {\n const {schema, tableName, name, ...liteIndex} = index;\n return {\n tableName: liteTableName({schema, name: tableName}),\n name: liteTableName({schema, name}),\n ...liteIndex,\n };\n}\n\nexport class UnsupportedColumnDefaultError extends Error {\n readonly name = 'UnsupportedColumnDefaultError';\n}\n"],"mappings":";;;;;;;;;AAsBA,SAAgB,aACd,MACS;AACT,SAAQ,KAAK,mBAAmB,KAAK,iBAAiB;;;;;;AAOxD,SAAgB,cACd,MACS;AACT,QAAO,KAAK,oBAAoB,QAAQ,KAAK,oBAAoB,KAAA;;AAGnE,SAAS,sBAAsB,gBAAgD;AAC7E,QAAO;EACL,KAAK,OAAO;EACZ,wBAAwB;EACxB,UAAU;EACV,SAAS;EACT,MAAM,CAAC,iBAAiB,OAAO,IAAI,eAAe;EAClD,iBAAiB;EAClB;;AAGH,SAAgB,wBACd,IACA,gBACA,OACA,QACA;AACA,KAAI,uBAAuB,eAAe,KAAK,KAAA,EAC7C,IAAG,OACD,+CAA+C,iBAC7C,eACD,CAAC,qBACQ,MAAM,KAAK,OAAO,6CAC7B;;AAKL,IAAM,wBAAwB;AAG9B,IAAM,wBAAwB;AAQ9B,IAAM,gCAAgC;AAItC,IAAM,gCAAgC;AAItC,IAAM,4BAA4B;AAoBlC,SAAgB,yBACd,OACA,QACA,mBACe;AACf,KAAI,CAAC,kBACH,QAAO;AAIT,KAAI,sBAAsB,KAAK,kBAAkB,CAC/C,QAAO;AAIT,KAAI,sBAAsB,KAAK,kBAAkB,CAC/C,QAAO,sBAAsB,SAAS,MAAM;CAI9C,MAAM,QAAQ,8BAA8B,KAAK,kBAAkB;AACnE,KAAI,MACF,QAAO,MAAM;AAIf,KACE,8BAA8B,KAAK,kBAAkB,IACrD,0BAA0B,KAAK,kBAAkB,CAEjD,QAAO;AAIT,OAAM,IAAI,8BACR,iCAAiC,MAAM,GAAG,OAAO,IAAI,oBACtD;;AAGH,SAAgB,wBACd,OACA,QACA,eACY;CACZ,MAAM,EAAC,KAAK,UAAU,SAAS,MAAM,kBAAkB,SAAQ,OAAO;AAYtE,QAAO;EACL;EACA,UATe,eACf,UACA,SACA,aAAa,OAAO,KAAK,EACzB,cAAc,OAAO,KAAK,CAC3B;EAKC,wBAAwB;EAQxB,SAAS;EAGT,MACE,kBAAkB,mBACd,OACA,yBAAyB,OAAO,OAAO,MAAM,KAAK;EACxD;EACD;;AAGH,SAAgB,kBACd,GACA,gBACe;CAGf,MAAM,EAAC,QAAQ,GAAG,YAAY,UAAU,GAAG,aAAY;CACvD,MAAM,OAAO,cAAc,EAAE;AAC7B,QAAO;EACL,GAAG;EACH;EACA,SAAS;GACP,GAAG,OAAO,YACR,OAAO,QAAQ,EAAE,QAAQ,CAAC,KAAK,CAAC,KAAK,UAAU,CAC7C,KAGA,wBAAwB,MAAM;IAAC,MAAM;IAAK;IAAK,EAAE,iBAAiB,CACnE,CAAC,CACH;IACA,2BAA2B,sBAAsB,eAAe;GAClE;EACF;;AAGH,SAAgB,uBAAuB,OAAiC;CACtE,MAAM,EAAC,QAAQ,WAAW,MAAM,GAAG,cAAa;AAChD,QAAO;EACL,WAAW,cAAc;GAAC;GAAQ,MAAM;GAAU,CAAC;EACnD,MAAM,cAAc;GAAC;GAAQ;GAAK,CAAC;EACnC,GAAG;EACJ;;AAGH,IAAa,gCAAb,cAAmD,MAAM;CACvD,OAAgB"}
@@ -3,14 +3,44 @@ export type Category = 'replication' | 'replica' | 'sync' | 'mutation' | 'server
3
3
  type Options = MetricOptions & {
4
4
  description: string;
5
5
  };
6
- type OptionsWithUnit = MetricOptions & {
7
- description: string;
8
- unit: string;
9
- };
10
6
  export declare function getOrCreateUpDownCounter(category: Category, name: string, description: string): UpDownCounter;
11
7
  export declare function getOrCreateUpDownCounter(category: Category, name: string, opts: Options): UpDownCounter;
12
- export declare function getOrCreateHistogram(category: Category, name: string, description: string): Histogram;
13
- export declare function getOrCreateHistogram(category: Category, name: string, options: OptionsWithUnit): Histogram;
8
+ /**
9
+ * A latency histogram whose {@link recordMs} method accepts raw millisecond
10
+ * durations and converts them to seconds internally.
11
+ *
12
+ * Use {@link getOrCreateLatencyHistogram} to create one — the unit (`'s'`),
13
+ * bucket boundaries, and ms→s conversion are all baked in
14
+ */
15
+ export type LatencyHistogram = {
16
+ /**
17
+ * Record a duration. Pass the raw elapsed milliseconds — the conversion to
18
+ * seconds (required by the `unit: 's'` OTel histogram) is handled internally.
19
+ *
20
+ * @param durationMs Elapsed time in **milliseconds** (do NOT pre-divide).
21
+ * @param attributes Optional OTel attributes to attach to the observation.
22
+ */
23
+ recordMs(durationMs: number, attributes?: Parameters<Histogram['record']>[1]): void;
24
+ };
25
+ /**
26
+ * Creates (or retrieves) a latency histogram for the given metric.
27
+ *
28
+ * - `unit` is always `'s'` (seconds), matching the OTel convention.
29
+ * - Bucket boundaries are pre-set for zero's typical operation range
30
+ * (1 ms – 5 s); see {@link LATENCY_HISTOGRAM_BOUNDARIES_S}.
31
+ * - The returned {@link LatencyHistogram} accepts **milliseconds** via
32
+ * `recordMs()`, so callers never need to divide by 1000.
33
+ *
34
+ * @example
35
+ * ```ts
36
+ * readonly #hydrationTime = getOrCreateLatencyHistogram(
37
+ * 'sync', 'hydration-time', 'Time to hydrate a query.',
38
+ * );
39
+ * // ...
40
+ * this.#hydrationTime.recordMs(performance.now() - start);
41
+ * ```
42
+ */
43
+ export declare function getOrCreateLatencyHistogram(category: Category, name: string, description: string): LatencyHistogram;
14
44
  export declare function getOrCreateCounter(category: Category, name: string, description: string): Counter;
15
45
  export declare function getOrCreateCounter(category: Category, name: string, opts: Options): Counter;
16
46
  export declare function getOrCreateGauge(category: Category, name: string, description: string): ObservableGauge;
@@ -1 +1 @@
1
- {"version":3,"file":"metrics.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/observability/metrics.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EACV,OAAO,EACP,SAAS,EAET,aAAa,EACb,eAAe,EACf,aAAa,EACd,MAAM,oBAAoB,CAAC;AAK5B,MAAM,MAAM,QAAQ,GAChB,aAAa,GACb,SAAS,GACT,MAAM,GACN,UAAU,GACV,QAAQ,CAAC;AAIb,KAAK,OAAO,GAAG,aAAa,GAAG;IAAC,WAAW,EAAE,MAAM,CAAA;CAAC,CAAC;AACrD,KAAK,eAAe,GAAG,aAAa,GAAG;IAAC,WAAW,EAAE,MAAM,CAAC;IAAC,IAAI,EAAE,MAAM,CAAA;CAAC,CAAC;AA4B3E,wBAAgB,wBAAwB,CACtC,QAAQ,EAAE,QAAQ,EAClB,IAAI,EAAE,MAAM,EACZ,WAAW,EAAE,MAAM,GAClB,aAAa,CAAC;AACjB,wBAAgB,wBAAwB,CACtC,QAAQ,EAAE,QAAQ,EAClB,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,OAAO,GACZ,aAAa,CAAC;AAgBjB,wBAAgB,oBAAoB,CAClC,QAAQ,EAAE,QAAQ,EAClB,IAAI,EAAE,MAAM,EACZ,WAAW,EAAE,MAAM,GAClB,SAAS,CAAC;AACb,wBAAgB,oBAAoB,CAClC,QAAQ,EAAE,QAAQ,EAClB,IAAI,EAAE,MAAM,EACZ,OAAO,EAAE,eAAe,GACvB,SAAS,CAAC;AAqBb,wBAAgB,kBAAkB,CAChC,QAAQ,EAAE,QAAQ,EAClB,IAAI,EAAE,MAAM,EACZ,WAAW,EAAE,MAAM,GAClB,OAAO,CAAC;AACX,wBAAgB,kBAAkB,CAChC,QAAQ,EAAE,QAAQ,EAClB,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,OAAO,GACZ,OAAO,CAAC;AAgBX,wBAAgB,gBAAgB,CAC9B,QAAQ,EAAE,QAAQ,EAClB,IAAI,EAAE,MAAM,EACZ,WAAW,EAAE,MAAM,GAClB,eAAe,CAAC;AACnB,wBAAgB,gBAAgB,CAC9B,QAAQ,EAAE,QAAQ,EAClB,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,OAAO,GACZ,eAAe,CAAC"}
1
+ {"version":3,"file":"metrics.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/observability/metrics.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EACV,OAAO,EACP,SAAS,EAET,aAAa,EACb,eAAe,EACf,aAAa,EACd,MAAM,oBAAoB,CAAC;AAK5B,MAAM,MAAM,QAAQ,GAChB,aAAa,GACb,SAAS,GACT,MAAM,GACN,UAAU,GACV,QAAQ,CAAC;AAIb,KAAK,OAAO,GAAG,aAAa,GAAG;IAAC,WAAW,EAAE,MAAM,CAAA;CAAC,CAAC;AA4BrD,wBAAgB,wBAAwB,CACtC,QAAQ,EAAE,QAAQ,EAClB,IAAI,EAAE,MAAM,EACZ,WAAW,EAAE,MAAM,GAClB,aAAa,CAAC;AACjB,wBAAgB,wBAAwB,CACtC,QAAQ,EAAE,QAAQ,EAClB,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,OAAO,GACZ,aAAa,CAAC;AAcjB;;;;;;GAMG;AACH,MAAM,MAAM,gBAAgB,GAAG;IAC7B;;;;;;OAMG;IACH,QAAQ,CACN,UAAU,EAAE,MAAM,EAClB,UAAU,CAAC,EAAE,UAAU,CAAC,SAAS,CAAC,QAAQ,CAAC,CAAC,CAAC,CAAC,CAAC,GAC9C,IAAI,CAAC;CACT,CAAC;AAoBF;;;;;;;;;;;;;;;;;GAiBG;AACH,wBAAgB,2BAA2B,CACzC,QAAQ,EAAE,QAAQ,EAClB,IAAI,EAAE,MAAM,EACZ,WAAW,EAAE,MAAM,GAClB,gBAAgB,CAclB;AAID,wBAAgB,kBAAkB,CAChC,QAAQ,EAAE,QAAQ,EAClB,IAAI,EAAE,MAAM,EACZ,WAAW,EAAE,MAAM,GAClB,OAAO,CAAC;AACX,wBAAgB,kBAAkB,CAChC,QAAQ,EAAE,QAAQ,EAClB,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,OAAO,GACZ,OAAO,CAAC;AAgBX,wBAAgB,gBAAgB,CAC9B,QAAQ,EAAE,QAAQ,EAClB,IAAI,EAAE,MAAM,EACZ,WAAW,EAAE,MAAM,GAClB,eAAe,CAAC;AACnB,wBAAgB,gBAAgB,CAC9B,QAAQ,EAAE,QAAQ,EAClB,IAAI,EAAE,MAAM,EACZ,IAAI,EAAE,OAAO,GACZ,eAAe,CAAC"}
@@ -19,15 +19,60 @@ var upDownCounters = cache();
19
19
  function getOrCreateUpDownCounter(category, name, opts) {
20
20
  return upDownCounters(name, (name) => getMeter().createUpDownCounter(`zero.${category}.${name}`, typeof opts === "string" ? { description: opts } : opts));
21
21
  }
22
- var histograms = cache();
23
- function getOrCreateHistogram(category, name, opts) {
24
- return histograms(name, (name) => {
25
- const options = typeof opts === "string" ? {
26
- description: opts,
27
- unit: "milliseconds"
28
- } : opts;
29
- return getMeter().createHistogram(`zero.${category}.${name}`, options);
30
- });
22
+ /**
23
+ * Bucket boundaries (in seconds) for zero's latency histograms.
24
+ *
25
+ * The operational range is 1 ms 5,000 ms (including customers actively
26
+ * tuning queries). ~2× logarithmic steps give proportionally consistent
27
+ * `histogram_quantile` accuracy regardless of where values cluster within
28
+ * that range. 10,000 ms and 30,000 ms are overflow catchers for truly broken
29
+ * states.
30
+ *
31
+ * 1 ms, 2 ms, 5 ms, 10 ms, 20 ms, 50 ms, 100 ms, 200 ms, 500 ms,
32
+ * 1 s, 2 s, 5 s, 10 s, 30 s
33
+ */
34
+ var LATENCY_HISTOGRAM_BOUNDARIES_S = [
35
+ .001,
36
+ .002,
37
+ .005,
38
+ .01,
39
+ .02,
40
+ .05,
41
+ .1,
42
+ .2,
43
+ .5,
44
+ 1,
45
+ 2,
46
+ 5,
47
+ 10,
48
+ 30
49
+ ];
50
+ var latencyHistograms = cache();
51
+ /**
52
+ * Creates (or retrieves) a latency histogram for the given metric.
53
+ *
54
+ * - `unit` is always `'s'` (seconds), matching the OTel convention.
55
+ * - Bucket boundaries are pre-set for zero's typical operation range
56
+ * (1 ms – 5 s); see {@link LATENCY_HISTOGRAM_BOUNDARIES_S}.
57
+ * - The returned {@link LatencyHistogram} accepts **milliseconds** via
58
+ * `recordMs()`, so callers never need to divide by 1000.
59
+ *
60
+ * @example
61
+ * ```ts
62
+ * readonly #hydrationTime = getOrCreateLatencyHistogram(
63
+ * 'sync', 'hydration-time', 'Time to hydrate a query.',
64
+ * );
65
+ * // ...
66
+ * this.#hydrationTime.recordMs(performance.now() - start);
67
+ * ```
68
+ */
69
+ function getOrCreateLatencyHistogram(category, name, description) {
70
+ const h = latencyHistograms(name, (name) => getMeter().createHistogram(`zero.${category}.${name}`, {
71
+ description,
72
+ unit: "s",
73
+ advice: { explicitBucketBoundaries: LATENCY_HISTOGRAM_BOUNDARIES_S }
74
+ }));
75
+ return { recordMs: (durationMs, attributes) => h.record(durationMs / 1e3, attributes) };
31
76
  }
32
77
  var counters = cache();
33
78
  function getOrCreateCounter(category, name, opts) {
@@ -38,6 +83,6 @@ function getOrCreateGauge(category, name, opts) {
38
83
  return gauges(name, (name) => getMeter().createObservableGauge(`zero.${category}.${name}`, typeof opts === "string" ? { description: opts } : opts));
39
84
  }
40
85
  //#endregion
41
- export { getOrCreateCounter, getOrCreateGauge, getOrCreateHistogram, getOrCreateUpDownCounter };
86
+ export { getOrCreateCounter, getOrCreateGauge, getOrCreateLatencyHistogram, getOrCreateUpDownCounter };
42
87
 
43
88
  //# sourceMappingURL=metrics.js.map
@@ -1 +1 @@
1
- {"version":3,"file":"metrics.js","names":[],"sources":["../../../../../zero-cache/src/observability/metrics.ts"],"sourcesContent":["import type {\n Counter,\n Histogram,\n Meter,\n MetricOptions,\n ObservableGauge,\n UpDownCounter,\n} from '@opentelemetry/api';\nimport {metrics} from '@opentelemetry/api';\n\n// intentional lazy initialization so it is not started before the SDK is started.\n\nexport type Category =\n | 'replication' // postgres to replica\n | 'replica' // health of replica and litestream backup\n | 'sync' // replica to client\n | 'mutation'\n | 'server';\n\nlet meter: Meter | undefined;\n\ntype Options = MetricOptions & {description: string};\ntype OptionsWithUnit = MetricOptions & {description: string; unit: string};\n\nfunction getMeter() {\n if (!meter) {\n meter = metrics.getMeter('zero');\n }\n return meter;\n}\n\nfunction cache<TRet>(): (\n name: string,\n creator: (name: string) => TRet,\n) => TRet {\n const instruments = new Map<string, TRet>();\n return (name: string, creator: (name: string) => TRet) => {\n const existing = instruments.get(name);\n if (existing) {\n return existing;\n }\n\n const ret = creator(name);\n instruments.set(name, ret);\n return ret;\n };\n}\n\nconst upDownCounters = cache<UpDownCounter>();\n\nexport function getOrCreateUpDownCounter(\n category: Category,\n name: string,\n description: string,\n): UpDownCounter;\nexport function getOrCreateUpDownCounter(\n category: Category,\n name: string,\n opts: Options,\n): UpDownCounter;\nexport function getOrCreateUpDownCounter(\n category: Category,\n name: string,\n opts: string | Options,\n): UpDownCounter {\n return upDownCounters(name, name =>\n getMeter().createUpDownCounter(\n `zero.${category}.${name}`,\n typeof opts === 'string' ? {description: opts} : opts,\n ),\n );\n}\n\nconst histograms = cache<Histogram>();\n\nexport function getOrCreateHistogram(\n category: Category,\n name: string,\n description: string,\n): Histogram;\nexport function getOrCreateHistogram(\n category: Category,\n name: string,\n options: OptionsWithUnit,\n): Histogram;\nexport function getOrCreateHistogram(\n category: Category,\n name: string,\n opts: string | OptionsWithUnit,\n): Histogram {\n return histograms(name, name => {\n const options: {description: string; unit: string; boundaries?: number[]} =\n typeof opts === 'string'\n ? {\n description: opts,\n unit: 'milliseconds',\n }\n : opts;\n\n return getMeter().createHistogram(`zero.${category}.${name}`, options);\n });\n}\n\nconst counters = cache<Counter>();\n\nexport function getOrCreateCounter(\n category: Category,\n name: string,\n description: string,\n): Counter;\nexport function getOrCreateCounter(\n category: Category,\n name: string,\n opts: Options,\n): Counter;\nexport function getOrCreateCounter(\n category: Category,\n name: string,\n opts: string | Options,\n): Counter {\n return counters(name, name =>\n getMeter().createCounter(\n `zero.${category}.${name}`,\n typeof opts === 'string' ? {description: opts} : opts,\n ),\n );\n}\n\nconst gauges = cache<ObservableGauge>();\n\nexport function getOrCreateGauge(\n category: Category,\n name: string,\n description: string,\n): ObservableGauge;\nexport function getOrCreateGauge(\n category: Category,\n name: string,\n opts: Options,\n): ObservableGauge;\nexport function getOrCreateGauge(\n category: Category,\n name: string,\n opts: string | Options,\n): ObservableGauge {\n return gauges(name, name =>\n getMeter().createObservableGauge(\n `zero.${category}.${name}`,\n typeof opts === 'string' ? {description: opts} : opts,\n ),\n );\n}\n"],"mappings":";;AAmBA,IAAI;AAKJ,SAAS,WAAW;AAClB,KAAI,CAAC,MACH,SAAQ,QAAQ,SAAS,OAAO;AAElC,QAAO;;AAGT,SAAS,QAGC;CACR,MAAM,8BAAc,IAAI,KAAmB;AAC3C,SAAQ,MAAc,YAAoC;EACxD,MAAM,WAAW,YAAY,IAAI,KAAK;AACtC,MAAI,SACF,QAAO;EAGT,MAAM,MAAM,QAAQ,KAAK;AACzB,cAAY,IAAI,MAAM,IAAI;AAC1B,SAAO;;;AAIX,IAAM,iBAAiB,OAAsB;AAY7C,SAAgB,yBACd,UACA,MACA,MACe;AACf,QAAO,eAAe,OAAM,SAC1B,UAAU,CAAC,oBACT,QAAQ,SAAS,GAAG,QACpB,OAAO,SAAS,WAAW,EAAC,aAAa,MAAK,GAAG,KAClD,CACF;;AAGH,IAAM,aAAa,OAAkB;AAYrC,SAAgB,qBACd,UACA,MACA,MACW;AACX,QAAO,WAAW,OAAM,SAAQ;EAC9B,MAAM,UACJ,OAAO,SAAS,WACZ;GACE,aAAa;GACb,MAAM;GACP,GACD;AAEN,SAAO,UAAU,CAAC,gBAAgB,QAAQ,SAAS,GAAG,QAAQ,QAAQ;GACtE;;AAGJ,IAAM,WAAW,OAAgB;AAYjC,SAAgB,mBACd,UACA,MACA,MACS;AACT,QAAO,SAAS,OAAM,SACpB,UAAU,CAAC,cACT,QAAQ,SAAS,GAAG,QACpB,OAAO,SAAS,WAAW,EAAC,aAAa,MAAK,GAAG,KAClD,CACF;;AAGH,IAAM,SAAS,OAAwB;AAYvC,SAAgB,iBACd,UACA,MACA,MACiB;AACjB,QAAO,OAAO,OAAM,SAClB,UAAU,CAAC,sBACT,QAAQ,SAAS,GAAG,QACpB,OAAO,SAAS,WAAW,EAAC,aAAa,MAAK,GAAG,KAClD,CACF"}
1
+ {"version":3,"file":"metrics.js","names":[],"sources":["../../../../../zero-cache/src/observability/metrics.ts"],"sourcesContent":["import type {\n Counter,\n Histogram,\n Meter,\n MetricOptions,\n ObservableGauge,\n UpDownCounter,\n} from '@opentelemetry/api';\nimport {metrics} from '@opentelemetry/api';\n\n// intentional lazy initialization so it is not started before the SDK is started.\n\nexport type Category =\n | 'replication' // postgres to replica\n | 'replica' // health of replica and litestream backup\n | 'sync' // replica to client\n | 'mutation'\n | 'server';\n\nlet meter: Meter | undefined;\n\ntype Options = MetricOptions & {description: string};\n\nfunction getMeter() {\n if (!meter) {\n meter = metrics.getMeter('zero');\n }\n return meter;\n}\n\nfunction cache<TRet>(): (\n name: string,\n creator: (name: string) => TRet,\n) => TRet {\n const instruments = new Map<string, TRet>();\n return (name: string, creator: (name: string) => TRet) => {\n const existing = instruments.get(name);\n if (existing) {\n return existing;\n }\n\n const ret = creator(name);\n instruments.set(name, ret);\n return ret;\n };\n}\n\nconst upDownCounters = cache<UpDownCounter>();\n\nexport function getOrCreateUpDownCounter(\n category: Category,\n name: string,\n description: string,\n): UpDownCounter;\nexport function getOrCreateUpDownCounter(\n category: Category,\n name: string,\n opts: Options,\n): UpDownCounter;\nexport function getOrCreateUpDownCounter(\n category: Category,\n name: string,\n opts: string | Options,\n): UpDownCounter {\n return upDownCounters(name, name =>\n getMeter().createUpDownCounter(\n `zero.${category}.${name}`,\n typeof opts === 'string' ? {description: opts} : opts,\n ),\n );\n}\n\n/**\n * A latency histogram whose {@link recordMs} method accepts raw millisecond\n * durations and converts them to seconds internally.\n *\n * Use {@link getOrCreateLatencyHistogram} to create one — the unit (`'s'`),\n * bucket boundaries, and ms→s conversion are all baked in\n */\nexport type LatencyHistogram = {\n /**\n * Record a duration. Pass the raw elapsed milliseconds — the conversion to\n * seconds (required by the `unit: 's'` OTel histogram) is handled internally.\n *\n * @param durationMs Elapsed time in **milliseconds** (do NOT pre-divide).\n * @param attributes Optional OTel attributes to attach to the observation.\n */\n recordMs(\n durationMs: number,\n attributes?: Parameters<Histogram['record']>[1],\n ): void;\n};\n\n/**\n * Bucket boundaries (in seconds) for zero's latency histograms.\n *\n * The operational range is 1 ms – 5,000 ms (including customers actively\n * tuning queries). ~2× logarithmic steps give proportionally consistent\n * `histogram_quantile` accuracy regardless of where values cluster within\n * that range. 10,000 ms and 30,000 ms are overflow catchers for truly broken\n * states.\n *\n * 1 ms, 2 ms, 5 ms, 10 ms, 20 ms, 50 ms, 100 ms, 200 ms, 500 ms,\n * 1 s, 2 s, 5 s, 10 s, 30 s\n */\nconst LATENCY_HISTOGRAM_BOUNDARIES_S = [\n 0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 30,\n];\n\nconst latencyHistograms = cache<Histogram>();\n\n/**\n * Creates (or retrieves) a latency histogram for the given metric.\n *\n * - `unit` is always `'s'` (seconds), matching the OTel convention.\n * - Bucket boundaries are pre-set for zero's typical operation range\n * (1 ms – 5 s); see {@link LATENCY_HISTOGRAM_BOUNDARIES_S}.\n * - The returned {@link LatencyHistogram} accepts **milliseconds** via\n * `recordMs()`, so callers never need to divide by 1000.\n *\n * @example\n * ```ts\n * readonly #hydrationTime = getOrCreateLatencyHistogram(\n * 'sync', 'hydration-time', 'Time to hydrate a query.',\n * );\n * // ...\n * this.#hydrationTime.recordMs(performance.now() - start);\n * ```\n */\nexport function getOrCreateLatencyHistogram(\n category: Category,\n name: string,\n description: string,\n): LatencyHistogram {\n const h = latencyHistograms(name, name =>\n getMeter().createHistogram(`zero.${category}.${name}`, {\n description,\n unit: 's',\n advice: {\n explicitBucketBoundaries: LATENCY_HISTOGRAM_BOUNDARIES_S,\n },\n }),\n );\n return {\n recordMs: (durationMs, attributes) =>\n h.record(durationMs / 1000, attributes),\n };\n}\n\nconst counters = cache<Counter>();\n\nexport function getOrCreateCounter(\n category: Category,\n name: string,\n description: string,\n): Counter;\nexport function getOrCreateCounter(\n category: Category,\n name: string,\n opts: Options,\n): Counter;\nexport function getOrCreateCounter(\n category: Category,\n name: string,\n opts: string | Options,\n): Counter {\n return counters(name, name =>\n getMeter().createCounter(\n `zero.${category}.${name}`,\n typeof opts === 'string' ? {description: opts} : opts,\n ),\n );\n}\n\nconst gauges = cache<ObservableGauge>();\n\nexport function getOrCreateGauge(\n category: Category,\n name: string,\n description: string,\n): ObservableGauge;\nexport function getOrCreateGauge(\n category: Category,\n name: string,\n opts: Options,\n): ObservableGauge;\nexport function getOrCreateGauge(\n category: Category,\n name: string,\n opts: string | Options,\n): ObservableGauge {\n return gauges(name, name =>\n getMeter().createObservableGauge(\n `zero.${category}.${name}`,\n typeof opts === 'string' ? {description: opts} : opts,\n ),\n );\n}\n"],"mappings":";;AAmBA,IAAI;AAIJ,SAAS,WAAW;AAClB,KAAI,CAAC,MACH,SAAQ,QAAQ,SAAS,OAAO;AAElC,QAAO;;AAGT,SAAS,QAGC;CACR,MAAM,8BAAc,IAAI,KAAmB;AAC3C,SAAQ,MAAc,YAAoC;EACxD,MAAM,WAAW,YAAY,IAAI,KAAK;AACtC,MAAI,SACF,QAAO;EAGT,MAAM,MAAM,QAAQ,KAAK;AACzB,cAAY,IAAI,MAAM,IAAI;AAC1B,SAAO;;;AAIX,IAAM,iBAAiB,OAAsB;AAY7C,SAAgB,yBACd,UACA,MACA,MACe;AACf,QAAO,eAAe,OAAM,SAC1B,UAAU,CAAC,oBACT,QAAQ,SAAS,GAAG,QACpB,OAAO,SAAS,WAAW,EAAC,aAAa,MAAK,GAAG,KAClD,CACF;;;;;;;;;;;;;;AAoCH,IAAM,iCAAiC;CACrC;CAAO;CAAO;CAAO;CAAM;CAAM;CAAM;CAAK;CAAK;CAAK;CAAG;CAAG;CAAG;CAAI;CACpE;AAED,IAAM,oBAAoB,OAAkB;;;;;;;;;;;;;;;;;;;AAoB5C,SAAgB,4BACd,UACA,MACA,aACkB;CAClB,MAAM,IAAI,kBAAkB,OAAM,SAChC,UAAU,CAAC,gBAAgB,QAAQ,SAAS,GAAG,QAAQ;EACrD;EACA,MAAM;EACN,QAAQ,EACN,0BAA0B,gCAC3B;EACF,CAAC,CACH;AACD,QAAO,EACL,WAAW,YAAY,eACrB,EAAE,OAAO,aAAa,KAAM,WAAW,EAC1C;;AAGH,IAAM,WAAW,OAAgB;AAYjC,SAAgB,mBACd,UACA,MACA,MACS;AACT,QAAO,SAAS,OAAM,SACpB,UAAU,CAAC,cACT,QAAQ,SAAS,GAAG,QACpB,OAAO,SAAS,WAAW,EAAC,aAAa,MAAK,GAAG,KAClD,CACF;;AAGH,IAAM,SAAS,OAAwB;AAYvC,SAAgB,iBACd,UACA,MACA,MACiB;AACjB,QAAO,OAAO,OAAM,SAClB,UAAU,CAAC,sBACT,QAAQ,SAAS,GAAG,QACpB,OAAO,SAAS,WAAW,EAAC,aAAa,MAAK,GAAG,KAClD,CACF"}
@@ -1 +1 @@
1
- {"version":3,"file":"decommission.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/scripts/decommission.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAAC,MAAM,EAAC,MAAM,gCAAgC,CAAC;AAO3D,eAAO,MAAM,mBAAmB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAuB/B,CAAC;AAEF,MAAM,MAAM,kBAAkB,GAAG,MAAM,CAAC,OAAO,mBAAmB,CAAC,CAAC;AAEpE,wBAAsB,gBAAgB,CACpC,EAAE,EAAE,UAAU,EACd,GAAG,EAAE,kBAAkB,iBA0BxB"}
1
+ {"version":3,"file":"decommission.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/scripts/decommission.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAEjD,OAAO,KAAK,EAAC,MAAM,EAAC,MAAM,gCAAgC,CAAC;AAO3D,eAAO,MAAM,mBAAmB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAuB/B,CAAC;AAEF,MAAM,MAAM,kBAAkB,GAAG,MAAM,CAAC,OAAO,mBAAmB,CAAC,CAAC;AAEpE,wBAAsB,gBAAgB,CACpC,EAAE,EAAE,UAAU,EACd,GAAG,EAAE,kBAAkB,iBA8BxB"}
@@ -24,17 +24,17 @@ async function decommissionZero(lc, cfg) {
24
24
  const shardID = getShardID(cfg);
25
25
  lc.info?.(`Decommissioning app "${app.id}"`);
26
26
  if (cfg.upstream.type === "pg") {
27
- const upstream = pgClient(lc, cfg.upstream.db);
27
+ const upstream = pgClient(lc, cfg.upstream.db, "decommission-upstream");
28
28
  await decommissionShard(lc, upstream, app.id, shard.num);
29
29
  lc.debug?.(`Cleaning up upstream metadata from ${hostPort(upstream)}`);
30
30
  await upstream.unsafe(`DROP SCHEMA IF EXISTS ${id(app.id)} CASCADE`);
31
31
  await upstream.end();
32
32
  }
33
- const cvr = pgClient(lc, cfg.cvr.db ?? cfg.upstream.db);
33
+ const cvr = pgClient(lc, cfg.cvr.db ?? cfg.upstream.db, "decommission-cvr");
34
34
  lc.debug?.(`Cleaning up cvc data from ${hostPort(cvr)}`);
35
35
  await cvr.unsafe(`DROP SCHEMA IF EXISTS ${id(cvrSchema(shardID))} CASCADE`);
36
36
  await cvr.end();
37
- const cdc = pgClient(lc, cfg.change.db ?? cfg.upstream.db);
37
+ const cdc = pgClient(lc, cfg.change.db ?? cfg.upstream.db, "decommission-cdc");
38
38
  lc.debug?.(`Cleaning up cdc data from ${hostPort(cdc)}`);
39
39
  await cdc.unsafe(`DROP SCHEMA IF EXISTS ${id(cdcSchema(shardID))} CASCADE`);
40
40
  await cdc.end();
@@ -1 +1 @@
1
- {"version":3,"file":"decommission.js","names":[],"sources":["../../../../../zero-cache/src/scripts/decommission.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {logOptions} from '../../../otel/src/log-options.ts';\nimport type {Config} from '../../../shared/src/options.ts';\nimport {appOptions, shardOptions, zeroOptions} from '../config/zero-config.ts';\nimport {decommissionShard} from '../services/change-source/pg/decommission.ts';\nimport {pgClient, type PostgresDB} from '../types/pg.ts';\nimport {cdcSchema, cvrSchema, getShardID} from '../types/shards.ts';\nimport {id} from '../types/sql.ts';\n\nexport const decommissionOptions = {\n app: {\n id: appOptions.id,\n },\n\n shard: {\n num: shardOptions.num,\n },\n\n upstream: {\n db: zeroOptions.upstream.db,\n type: zeroOptions.upstream.type,\n },\n\n cvr: {\n db: zeroOptions.cvr.db,\n },\n\n change: {\n db: zeroOptions.change.db,\n },\n\n log: {level: logOptions.level, format: logOptions.format},\n};\n\nexport type DecommissionConfig = Config<typeof decommissionOptions>;\n\nexport async function decommissionZero(\n lc: LogContext,\n cfg: DecommissionConfig,\n) {\n const {app, shard} = cfg;\n const shardID = getShardID(cfg);\n lc.info?.(`Decommissioning app \"${app.id}\"`);\n\n if (cfg.upstream.type === 'pg') {\n const upstream = pgClient(lc, cfg.upstream.db);\n await decommissionShard(lc, upstream, app.id, shard.num);\n\n lc.debug?.(`Cleaning up upstream metadata from ${hostPort(upstream)}`);\n await upstream.unsafe(`DROP SCHEMA IF EXISTS ${id(app.id)} CASCADE`);\n await upstream.end();\n }\n\n const cvr = pgClient(lc, cfg.cvr.db ?? cfg.upstream.db);\n lc.debug?.(`Cleaning up cvc data from ${hostPort(cvr)}`);\n await cvr.unsafe(`DROP SCHEMA IF EXISTS ${id(cvrSchema(shardID))} CASCADE`);\n await cvr.end();\n\n const cdc = pgClient(lc, cfg.change.db ?? cfg.upstream.db);\n lc.debug?.(`Cleaning up cdc data from ${hostPort(cdc)}`);\n await cdc.unsafe(`DROP SCHEMA IF EXISTS ${id(cdcSchema(shardID))} CASCADE`);\n await cdc.end();\n\n lc.info?.(`App \"${app.id}\" decommissioned`);\n}\n\nfunction hostPort(db: PostgresDB) {\n const {host, port} = db.options;\n return `${host.join(',')}:${port?.at(0) ?? 5432}`;\n}\n"],"mappings":";;;;;;;AASA,IAAa,sBAAsB;CACjC,KAAK,EACH,IAAI,WAAW,IAChB;CAED,OAAO,EACL,KAAK,aAAa,KACnB;CAED,UAAU;EACR,IAAI,YAAY,SAAS;EACzB,MAAM,YAAY,SAAS;EAC5B;CAED,KAAK,EACH,IAAI,YAAY,IAAI,IACrB;CAED,QAAQ,EACN,IAAI,YAAY,OAAO,IACxB;CAED,KAAK;EAAC,OAAO,WAAW;EAAO,QAAQ,WAAW;EAAO;CAC1D;AAID,eAAsB,iBACpB,IACA,KACA;CACA,MAAM,EAAC,KAAK,UAAS;CACrB,MAAM,UAAU,WAAW,IAAI;AAC/B,IAAG,OAAO,wBAAwB,IAAI,GAAG,GAAG;AAE5C,KAAI,IAAI,SAAS,SAAS,MAAM;EAC9B,MAAM,WAAW,SAAS,IAAI,IAAI,SAAS,GAAG;AAC9C,QAAM,kBAAkB,IAAI,UAAU,IAAI,IAAI,MAAM,IAAI;AAExD,KAAG,QAAQ,sCAAsC,SAAS,SAAS,GAAG;AACtE,QAAM,SAAS,OAAO,yBAAyB,GAAG,IAAI,GAAG,CAAC,UAAU;AACpE,QAAM,SAAS,KAAK;;CAGtB,MAAM,MAAM,SAAS,IAAI,IAAI,IAAI,MAAM,IAAI,SAAS,GAAG;AACvD,IAAG,QAAQ,6BAA6B,SAAS,IAAI,GAAG;AACxD,OAAM,IAAI,OAAO,yBAAyB,GAAG,UAAU,QAAQ,CAAC,CAAC,UAAU;AAC3E,OAAM,IAAI,KAAK;CAEf,MAAM,MAAM,SAAS,IAAI,IAAI,OAAO,MAAM,IAAI,SAAS,GAAG;AAC1D,IAAG,QAAQ,6BAA6B,SAAS,IAAI,GAAG;AACxD,OAAM,IAAI,OAAO,yBAAyB,GAAG,UAAU,QAAQ,CAAC,CAAC,UAAU;AAC3E,OAAM,IAAI,KAAK;AAEf,IAAG,OAAO,QAAQ,IAAI,GAAG,kBAAkB;;AAG7C,SAAS,SAAS,IAAgB;CAChC,MAAM,EAAC,MAAM,SAAQ,GAAG;AACxB,QAAO,GAAG,KAAK,KAAK,IAAI,CAAC,GAAG,MAAM,GAAG,EAAE,IAAI"}
1
+ {"version":3,"file":"decommission.js","names":[],"sources":["../../../../../zero-cache/src/scripts/decommission.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {logOptions} from '../../../otel/src/log-options.ts';\nimport type {Config} from '../../../shared/src/options.ts';\nimport {appOptions, shardOptions, zeroOptions} from '../config/zero-config.ts';\nimport {decommissionShard} from '../services/change-source/pg/decommission.ts';\nimport {pgClient, type PostgresDB} from '../types/pg.ts';\nimport {cdcSchema, cvrSchema, getShardID} from '../types/shards.ts';\nimport {id} from '../types/sql.ts';\n\nexport const decommissionOptions = {\n app: {\n id: appOptions.id,\n },\n\n shard: {\n num: shardOptions.num,\n },\n\n upstream: {\n db: zeroOptions.upstream.db,\n type: zeroOptions.upstream.type,\n },\n\n cvr: {\n db: zeroOptions.cvr.db,\n },\n\n change: {\n db: zeroOptions.change.db,\n },\n\n log: {level: logOptions.level, format: logOptions.format},\n};\n\nexport type DecommissionConfig = Config<typeof decommissionOptions>;\n\nexport async function decommissionZero(\n lc: LogContext,\n cfg: DecommissionConfig,\n) {\n const {app, shard} = cfg;\n const shardID = getShardID(cfg);\n lc.info?.(`Decommissioning app \"${app.id}\"`);\n\n if (cfg.upstream.type === 'pg') {\n const upstream = pgClient(lc, cfg.upstream.db, 'decommission-upstream');\n await decommissionShard(lc, upstream, app.id, shard.num);\n\n lc.debug?.(`Cleaning up upstream metadata from ${hostPort(upstream)}`);\n await upstream.unsafe(`DROP SCHEMA IF EXISTS ${id(app.id)} CASCADE`);\n await upstream.end();\n }\n\n const cvr = pgClient(lc, cfg.cvr.db ?? cfg.upstream.db, 'decommission-cvr');\n lc.debug?.(`Cleaning up cvc data from ${hostPort(cvr)}`);\n await cvr.unsafe(`DROP SCHEMA IF EXISTS ${id(cvrSchema(shardID))} CASCADE`);\n await cvr.end();\n\n const cdc = pgClient(\n lc,\n cfg.change.db ?? cfg.upstream.db,\n 'decommission-cdc',\n );\n lc.debug?.(`Cleaning up cdc data from ${hostPort(cdc)}`);\n await cdc.unsafe(`DROP SCHEMA IF EXISTS ${id(cdcSchema(shardID))} CASCADE`);\n await cdc.end();\n\n lc.info?.(`App \"${app.id}\" decommissioned`);\n}\n\nfunction hostPort(db: PostgresDB) {\n const {host, port} = db.options;\n return `${host.join(',')}:${port?.at(0) ?? 5432}`;\n}\n"],"mappings":";;;;;;;AASA,IAAa,sBAAsB;CACjC,KAAK,EACH,IAAI,WAAW,IAChB;CAED,OAAO,EACL,KAAK,aAAa,KACnB;CAED,UAAU;EACR,IAAI,YAAY,SAAS;EACzB,MAAM,YAAY,SAAS;EAC5B;CAED,KAAK,EACH,IAAI,YAAY,IAAI,IACrB;CAED,QAAQ,EACN,IAAI,YAAY,OAAO,IACxB;CAED,KAAK;EAAC,OAAO,WAAW;EAAO,QAAQ,WAAW;EAAO;CAC1D;AAID,eAAsB,iBACpB,IACA,KACA;CACA,MAAM,EAAC,KAAK,UAAS;CACrB,MAAM,UAAU,WAAW,IAAI;AAC/B,IAAG,OAAO,wBAAwB,IAAI,GAAG,GAAG;AAE5C,KAAI,IAAI,SAAS,SAAS,MAAM;EAC9B,MAAM,WAAW,SAAS,IAAI,IAAI,SAAS,IAAI,wBAAwB;AACvE,QAAM,kBAAkB,IAAI,UAAU,IAAI,IAAI,MAAM,IAAI;AAExD,KAAG,QAAQ,sCAAsC,SAAS,SAAS,GAAG;AACtE,QAAM,SAAS,OAAO,yBAAyB,GAAG,IAAI,GAAG,CAAC,UAAU;AACpE,QAAM,SAAS,KAAK;;CAGtB,MAAM,MAAM,SAAS,IAAI,IAAI,IAAI,MAAM,IAAI,SAAS,IAAI,mBAAmB;AAC3E,IAAG,QAAQ,6BAA6B,SAAS,IAAI,GAAG;AACxD,OAAM,IAAI,OAAO,yBAAyB,GAAG,UAAU,QAAQ,CAAC,CAAC,UAAU;AAC3E,OAAM,IAAI,KAAK;CAEf,MAAM,MAAM,SACV,IACA,IAAI,OAAO,MAAM,IAAI,SAAS,IAC9B,mBACD;AACD,IAAG,QAAQ,6BAA6B,SAAS,IAAI,GAAG;AACxD,OAAM,IAAI,OAAO,yBAAyB,GAAG,UAAU,QAAQ,CAAC,CAAC,UAAU;AAC3E,OAAM,IAAI,KAAK;AAEf,IAAG,OAAO,QAAQ,IAAI,GAAG,kBAAkB;;AAG7C,SAAS,SAAS,IAAgB;CAChC,MAAM,EAAC,MAAM,SAAQ,GAAG;AACxB,QAAO,GAAG,KAAK,KAAK,IAAI,CAAC,GAAG,MAAM,GAAG,EAAE,IAAI"}
@@ -74,7 +74,7 @@ function failWithMessage(msg) {
74
74
  process.exit(-1);
75
75
  }
76
76
  async function deployPermissions(upstreamURI, permissions, force) {
77
- const db = pgClient(lc, upstreamURI);
77
+ const db = pgClient(lc, upstreamURI, "deploy-permissions");
78
78
  const { host, port } = db.options;
79
79
  colorConsole.debug(`Connecting to upstream@${host}:${port}`);
80
80
  try {
@@ -1 +1 @@
1
- {"version":3,"file":"deploy-permissions.js","names":[],"sources":["../../../../../zero-cache/src/scripts/deploy-permissions.ts"],"sourcesContent":["import '../../../shared/src/dotenv.ts';\n\nimport {writeFile} from 'node:fs/promises';\nimport {ident as id, literal} from 'pg-format';\nimport {colorConsole, createLogContext} from '../../../shared/src/logging.ts';\nimport {parseOptions} from '../../../shared/src/options.ts';\nimport {difference} from '../../../shared/src/set-utils.ts';\nimport {mapCondition} from '../../../zero-protocol/src/ast.ts';\nimport {\n type AssetPermissions,\n type PermissionsConfig,\n type Rule,\n} from '../../../zero-schema/src/compiled-permissions.ts';\nimport {validator} from '../../../zero-schema/src/name-mapper.ts';\nimport {ZERO_ENV_VAR_PREFIX} from '../config/zero-config.ts';\nimport {runTx} from '../db/run-transaction.ts';\nimport {getPublicationInfo} from '../services/change-source/pg/schema/published.ts';\nimport {\n ensureGlobalTables,\n SHARD_CONFIG_TABLE,\n} from '../services/change-source/pg/schema/shard.ts';\nimport {liteTableName} from '../types/names.ts';\nimport {pgClient, type PostgresDB} from '../types/pg.ts';\nimport {appSchema, getShardID, upstreamSchema} from '../types/shards.ts';\nimport {\n deployPermissionsOptions,\n loadSchemaAndPermissions,\n} from './permissions.ts';\n\nconst config = parseOptions(deployPermissionsOptions, {\n argv: process.argv.slice(2),\n envNamePrefix: ZERO_ENV_VAR_PREFIX,\n});\n\nconst shard = getShardID(config);\nconst app = appSchema(shard);\n\nconst lc = createLogContext(config);\n\nasync function validatePermissions(\n db: PostgresDB,\n permissions: PermissionsConfig,\n) {\n const schema = upstreamSchema(shard);\n\n // Check if the shardConfig table has been initialized.\n const result = await db`\n SELECT relname FROM pg_class\n JOIN pg_namespace ON relnamespace = pg_namespace.oid\n WHERE nspname = ${schema} AND relname = ${SHARD_CONFIG_TABLE}`;\n if (result.length === 0) {\n colorConsole.warn(\n `zero-cache has not yet initialized the upstream database.\\n` +\n `Deploying ${app} permissions without validating against published tables/columns.`,\n );\n return;\n }\n\n // Get the publications for the shard\n const config = await db<{publications: string[]}[]>`\n SELECT publications FROM ${db(schema + '.' + SHARD_CONFIG_TABLE)}\n `;\n if (config.length === 0) {\n colorConsole.warn(\n `zero-cache has not yet initialized the upstream database.\\n` +\n `Deploying ${app} permissions without validating against published tables/columns.`,\n );\n return;\n }\n colorConsole.info(\n `Validating permissions against tables and columns published for \"${app}\".`,\n );\n\n const [{publications: shardPublications}] = config;\n const {tables, publications} = await getPublicationInfo(\n db,\n shardPublications,\n );\n const pubnames = publications.map(p => p.pubname);\n const missing = difference(new Set(shardPublications), new Set(pubnames));\n if (missing.size) {\n colorConsole.warn(\n `Upstream is missing expected publications \"${[...missing]}\".\\n` +\n `You may need to re-initialize your replica.\\n` +\n `Deploying ${app} permissions without validating against published tables/columns.`,\n );\n return;\n }\n const tablesToColumns = new Map(\n tables.map(t => [liteTableName(t), Object.keys(t.columns)]),\n );\n const validate = validator(tablesToColumns);\n try {\n for (const [table, perms] of Object.entries(permissions?.tables ?? {})) {\n const validateRule = ([_, cond]: Rule) => {\n mapCondition(cond, table, validate);\n };\n const validateAsset = (asset: AssetPermissions | undefined) => {\n asset?.select?.forEach(validateRule);\n asset?.delete?.forEach(validateRule);\n asset?.insert?.forEach(validateRule);\n asset?.update?.preMutation?.forEach(validateRule);\n asset?.update?.postMutation?.forEach(validateRule);\n };\n validateAsset(perms.row);\n if (perms.cell) {\n Object.values(perms.cell).forEach(validateAsset);\n }\n }\n } catch (e) {\n failWithMessage(String(e));\n }\n}\n\nfunction failWithMessage(msg: string) {\n colorConsole.error(msg);\n colorConsole.info('\\nUse --force to deploy at your own risk.\\n');\n process.exit(-1);\n}\n\nasync function deployPermissions(\n upstreamURI: string,\n permissions: PermissionsConfig,\n force: boolean,\n) {\n const db = pgClient(lc, upstreamURI);\n const {host, port} = db.options;\n colorConsole.debug(`Connecting to upstream@${host}:${port}`);\n try {\n await ensureGlobalTables(db, shard);\n\n const {hash, changed} = await runTx(db, async tx => {\n if (force) {\n colorConsole.warn(`--force specified. Skipping validation.`);\n } else {\n await validatePermissions(tx, permissions);\n }\n\n const {appID} = shard;\n colorConsole.info(\n `Deploying permissions for --app-id \"${appID}\" to upstream@${db.options.host}`,\n );\n const [{hash: beforeHash}] = await tx<{hash: string}[]>`\n SELECT hash from ${tx(app)}.permissions`;\n const [{hash}] = await tx<{hash: string}[]>`\n UPDATE ${tx(app)}.permissions SET ${db({permissions})} RETURNING hash`;\n\n return {hash: hash.substring(0, 7), changed: beforeHash !== hash};\n });\n if (changed) {\n colorConsole.info(`Deployed new permissions (hash=${hash})`);\n } else {\n colorConsole.info(`Permissions unchanged (hash=${hash})`);\n }\n } finally {\n await db.end();\n }\n}\n\nasync function writePermissionsFile(\n perms: PermissionsConfig,\n file: string,\n format: 'sql' | 'json' | 'pretty',\n) {\n const contents =\n format === 'sql'\n ? `UPDATE ${id(app)}.permissions SET permissions = ${literal(\n JSON.stringify(perms),\n )};`\n : JSON.stringify(perms, null, format === 'pretty' ? 2 : 0);\n await writeFile(file, contents);\n colorConsole.info(`Wrote ${format} permissions to ${config.output.file}`);\n}\n\nconst ret = await loadSchemaAndPermissions(config.schema.path, true);\nif (!ret || Object.keys(ret?.permissions ?? {}).length === 0) {\n colorConsole.warn(\n `No permissions found at ${config.schema.path}, so could not deploy ` +\n `permissions. Replicating data, but no tables will be syncable. ` +\n `Create a schema file with permissions to be able to sync data.`,\n );\n} else {\n colorConsole.warn(\n `Permissions are deprecated and will be removed in an upcoming release. See: https://zero.rocicorp.dev/docs/auth.`,\n );\n\n const {permissions} = ret;\n if (config.output.file) {\n await writePermissionsFile(\n permissions,\n config.output.file,\n config.output.format,\n );\n } else if (config.upstream.type !== 'pg') {\n colorConsole.warn(\n `Permissions deployment is not supported for ${config.upstream.type} upstreams`,\n );\n process.exit(-1);\n } else if (config.upstream.db) {\n await deployPermissions(config.upstream.db, permissions, config.force);\n } else {\n colorConsole.error(`No --output-file or --upstream-db specified`);\n // Shows the usage text.\n parseOptions(deployPermissionsOptions, {\n argv: ['--help'],\n envNamePrefix: ZERO_ENV_VAR_PREFIX,\n });\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AA6BA,IAAM,SAAS,aAAa,0BAA0B;CACpD,MAAM,QAAQ,KAAK,MAAM,EAAE;CAC3B,eAAe;CAChB,CAAC;AAEF,IAAM,QAAQ,WAAW,OAAO;AAChC,IAAM,MAAM,UAAU,MAAM;AAE5B,IAAM,KAAK,iBAAiB,OAAO;AAEnC,eAAe,oBACb,IACA,aACA;CACA,MAAM,SAAS,eAAe,MAAM;AAOpC,MAJe,MAAM,EAAE;;;wBAGD,OAAO,iBAAA,iBAClB,WAAW,GAAG;AACvB,eAAa,KACX,wEACe,IAAI,mEACpB;AACD;;CAIF,MAAM,SAAS,MAAM,EAA8B;+BACtB,GAAG,SAAS,MAAM,mBAAmB,CAAC;;AAEnE,KAAI,OAAO,WAAW,GAAG;AACvB,eAAa,KACX,wEACe,IAAI,mEACpB;AACD;;AAEF,cAAa,KACX,oEAAoE,IAAI,IACzE;CAED,MAAM,CAAC,EAAC,cAAc,uBAAsB;CAC5C,MAAM,EAAC,QAAQ,iBAAgB,MAAM,mBACnC,IACA,kBACD;CACD,MAAM,WAAW,aAAa,KAAI,MAAK,EAAE,QAAQ;CACjD,MAAM,UAAU,WAAW,IAAI,IAAI,kBAAkB,EAAE,IAAI,IAAI,SAAS,CAAC;AACzE,KAAI,QAAQ,MAAM;AAChB,eAAa,KACX,8CAA8C,CAAC,GAAG,QAAQ,CAAC,6DAE5C,IAAI,mEACpB;AACD;;CAKF,MAAM,WAAW,UAHO,IAAI,IAC1B,OAAO,KAAI,MAAK,CAAC,cAAc,EAAE,EAAE,OAAO,KAAK,EAAE,QAAQ,CAAC,CAAC,CAC5D,CAC0C;AAC3C,KAAI;AACF,OAAK,MAAM,CAAC,OAAO,UAAU,OAAO,QAAQ,aAAa,UAAU,EAAE,CAAC,EAAE;GACtE,MAAM,gBAAgB,CAAC,GAAG,UAAgB;AACxC,iBAAa,MAAM,OAAO,SAAS;;GAErC,MAAM,iBAAiB,UAAwC;AAC7D,WAAO,QAAQ,QAAQ,aAAa;AACpC,WAAO,QAAQ,QAAQ,aAAa;AACpC,WAAO,QAAQ,QAAQ,aAAa;AACpC,WAAO,QAAQ,aAAa,QAAQ,aAAa;AACjD,WAAO,QAAQ,cAAc,QAAQ,aAAa;;AAEpD,iBAAc,MAAM,IAAI;AACxB,OAAI,MAAM,KACR,QAAO,OAAO,MAAM,KAAK,CAAC,QAAQ,cAAc;;UAG7C,GAAG;AACV,kBAAgB,OAAO,EAAE,CAAC;;;AAI9B,SAAS,gBAAgB,KAAa;AACpC,cAAa,MAAM,IAAI;AACvB,cAAa,KAAK,8CAA8C;AAChE,SAAQ,KAAK,GAAG;;AAGlB,eAAe,kBACb,aACA,aACA,OACA;CACA,MAAM,KAAK,SAAS,IAAI,YAAY;CACpC,MAAM,EAAC,MAAM,SAAQ,GAAG;AACxB,cAAa,MAAM,0BAA0B,KAAK,GAAG,OAAO;AAC5D,KAAI;AACF,QAAM,mBAAmB,IAAI,MAAM;EAEnC,MAAM,EAAC,MAAM,YAAW,MAAM,MAAM,IAAI,OAAM,OAAM;AAClD,OAAI,MACF,cAAa,KAAK,0CAA0C;OAE5D,OAAM,oBAAoB,IAAI,YAAY;GAG5C,MAAM,EAAC,UAAS;AAChB,gBAAa,KACX,uCAAuC,MAAM,gBAAgB,GAAG,QAAQ,OACzE;GACD,MAAM,CAAC,EAAC,MAAM,gBAAe,MAAM,EAAoB;2BAClC,GAAG,IAAI,CAAC;GAC7B,MAAM,CAAC,EAAC,UAAS,MAAM,EAAoB;iBAChC,GAAG,IAAI,CAAC,mBAAmB,GAAG,EAAC,aAAY,CAAC,CAAC;AAExD,UAAO;IAAC,MAAM,KAAK,UAAU,GAAG,EAAE;IAAE,SAAS,eAAe;IAAK;IACjE;AACF,MAAI,QACF,cAAa,KAAK,kCAAkC,KAAK,GAAG;MAE5D,cAAa,KAAK,+BAA+B,KAAK,GAAG;WAEnD;AACR,QAAM,GAAG,KAAK;;;AAIlB,eAAe,qBACb,OACA,MACA,QACA;AAOA,OAAM,UAAU,MALd,WAAW,QACP,UAAU,MAAG,IAAI,CAAC,iCAAiC,QACjD,KAAK,UAAU,MAAM,CACtB,CAAC,KACF,KAAK,UAAU,OAAO,MAAM,WAAW,WAAW,IAAI,EAAE,CAC/B;AAC/B,cAAa,KAAK,SAAS,OAAO,kBAAkB,OAAO,OAAO,OAAO;;AAG3E,IAAM,MAAM,MAAM,yBAAyB,OAAO,OAAO,MAAM,KAAK;AACpE,IAAI,CAAC,OAAO,OAAO,KAAK,KAAK,eAAe,EAAE,CAAC,CAAC,WAAW,EACzD,cAAa,KACX,2BAA2B,OAAO,OAAO,KAAK,qJAG/C;KACI;AACL,cAAa,KACX,mHACD;CAED,MAAM,EAAC,gBAAe;AACtB,KAAI,OAAO,OAAO,KAChB,OAAM,qBACJ,aACA,OAAO,OAAO,MACd,OAAO,OAAO,OACf;UACQ,OAAO,SAAS,SAAS,MAAM;AACxC,eAAa,KACX,+CAA+C,OAAO,SAAS,KAAK,YACrE;AACD,UAAQ,KAAK,GAAG;YACP,OAAO,SAAS,GACzB,OAAM,kBAAkB,OAAO,SAAS,IAAI,aAAa,OAAO,MAAM;MACjE;AACL,eAAa,MAAM,8CAA8C;AAEjE,eAAa,0BAA0B;GACrC,MAAM,CAAC,SAAS;GAChB,eAAe;GAChB,CAAC"}
1
+ {"version":3,"file":"deploy-permissions.js","names":[],"sources":["../../../../../zero-cache/src/scripts/deploy-permissions.ts"],"sourcesContent":["import '../../../shared/src/dotenv.ts';\n\nimport {writeFile} from 'node:fs/promises';\nimport {ident as id, literal} from 'pg-format';\nimport {colorConsole, createLogContext} from '../../../shared/src/logging.ts';\nimport {parseOptions} from '../../../shared/src/options.ts';\nimport {difference} from '../../../shared/src/set-utils.ts';\nimport {mapCondition} from '../../../zero-protocol/src/ast.ts';\nimport {\n type AssetPermissions,\n type PermissionsConfig,\n type Rule,\n} from '../../../zero-schema/src/compiled-permissions.ts';\nimport {validator} from '../../../zero-schema/src/name-mapper.ts';\nimport {ZERO_ENV_VAR_PREFIX} from '../config/zero-config.ts';\nimport {runTx} from '../db/run-transaction.ts';\nimport {getPublicationInfo} from '../services/change-source/pg/schema/published.ts';\nimport {\n ensureGlobalTables,\n SHARD_CONFIG_TABLE,\n} from '../services/change-source/pg/schema/shard.ts';\nimport {liteTableName} from '../types/names.ts';\nimport {pgClient, type PostgresDB} from '../types/pg.ts';\nimport {appSchema, getShardID, upstreamSchema} from '../types/shards.ts';\nimport {\n deployPermissionsOptions,\n loadSchemaAndPermissions,\n} from './permissions.ts';\n\nconst config = parseOptions(deployPermissionsOptions, {\n argv: process.argv.slice(2),\n envNamePrefix: ZERO_ENV_VAR_PREFIX,\n});\n\nconst shard = getShardID(config);\nconst app = appSchema(shard);\n\nconst lc = createLogContext(config);\n\nasync function validatePermissions(\n db: PostgresDB,\n permissions: PermissionsConfig,\n) {\n const schema = upstreamSchema(shard);\n\n // Check if the shardConfig table has been initialized.\n const result = await db`\n SELECT relname FROM pg_class\n JOIN pg_namespace ON relnamespace = pg_namespace.oid\n WHERE nspname = ${schema} AND relname = ${SHARD_CONFIG_TABLE}`;\n if (result.length === 0) {\n colorConsole.warn(\n `zero-cache has not yet initialized the upstream database.\\n` +\n `Deploying ${app} permissions without validating against published tables/columns.`,\n );\n return;\n }\n\n // Get the publications for the shard\n const config = await db<{publications: string[]}[]>`\n SELECT publications FROM ${db(schema + '.' + SHARD_CONFIG_TABLE)}\n `;\n if (config.length === 0) {\n colorConsole.warn(\n `zero-cache has not yet initialized the upstream database.\\n` +\n `Deploying ${app} permissions without validating against published tables/columns.`,\n );\n return;\n }\n colorConsole.info(\n `Validating permissions against tables and columns published for \"${app}\".`,\n );\n\n const [{publications: shardPublications}] = config;\n const {tables, publications} = await getPublicationInfo(\n db,\n shardPublications,\n );\n const pubnames = publications.map(p => p.pubname);\n const missing = difference(new Set(shardPublications), new Set(pubnames));\n if (missing.size) {\n colorConsole.warn(\n `Upstream is missing expected publications \"${[...missing]}\".\\n` +\n `You may need to re-initialize your replica.\\n` +\n `Deploying ${app} permissions without validating against published tables/columns.`,\n );\n return;\n }\n const tablesToColumns = new Map(\n tables.map(t => [liteTableName(t), Object.keys(t.columns)]),\n );\n const validate = validator(tablesToColumns);\n try {\n for (const [table, perms] of Object.entries(permissions?.tables ?? {})) {\n const validateRule = ([_, cond]: Rule) => {\n mapCondition(cond, table, validate);\n };\n const validateAsset = (asset: AssetPermissions | undefined) => {\n asset?.select?.forEach(validateRule);\n asset?.delete?.forEach(validateRule);\n asset?.insert?.forEach(validateRule);\n asset?.update?.preMutation?.forEach(validateRule);\n asset?.update?.postMutation?.forEach(validateRule);\n };\n validateAsset(perms.row);\n if (perms.cell) {\n Object.values(perms.cell).forEach(validateAsset);\n }\n }\n } catch (e) {\n failWithMessage(String(e));\n }\n}\n\nfunction failWithMessage(msg: string) {\n colorConsole.error(msg);\n colorConsole.info('\\nUse --force to deploy at your own risk.\\n');\n process.exit(-1);\n}\n\nasync function deployPermissions(\n upstreamURI: string,\n permissions: PermissionsConfig,\n force: boolean,\n) {\n const db = pgClient(lc, upstreamURI, 'deploy-permissions');\n const {host, port} = db.options;\n colorConsole.debug(`Connecting to upstream@${host}:${port}`);\n try {\n await ensureGlobalTables(db, shard);\n\n const {hash, changed} = await runTx(db, async tx => {\n if (force) {\n colorConsole.warn(`--force specified. Skipping validation.`);\n } else {\n await validatePermissions(tx, permissions);\n }\n\n const {appID} = shard;\n colorConsole.info(\n `Deploying permissions for --app-id \"${appID}\" to upstream@${db.options.host}`,\n );\n const [{hash: beforeHash}] = await tx<{hash: string}[]>`\n SELECT hash from ${tx(app)}.permissions`;\n const [{hash}] = await tx<{hash: string}[]>`\n UPDATE ${tx(app)}.permissions SET ${db({permissions})} RETURNING hash`;\n\n return {hash: hash.substring(0, 7), changed: beforeHash !== hash};\n });\n if (changed) {\n colorConsole.info(`Deployed new permissions (hash=${hash})`);\n } else {\n colorConsole.info(`Permissions unchanged (hash=${hash})`);\n }\n } finally {\n await db.end();\n }\n}\n\nasync function writePermissionsFile(\n perms: PermissionsConfig,\n file: string,\n format: 'sql' | 'json' | 'pretty',\n) {\n const contents =\n format === 'sql'\n ? `UPDATE ${id(app)}.permissions SET permissions = ${literal(\n JSON.stringify(perms),\n )};`\n : JSON.stringify(perms, null, format === 'pretty' ? 2 : 0);\n await writeFile(file, contents);\n colorConsole.info(`Wrote ${format} permissions to ${config.output.file}`);\n}\n\nconst ret = await loadSchemaAndPermissions(config.schema.path, true);\nif (!ret || Object.keys(ret?.permissions ?? {}).length === 0) {\n colorConsole.warn(\n `No permissions found at ${config.schema.path}, so could not deploy ` +\n `permissions. Replicating data, but no tables will be syncable. ` +\n `Create a schema file with permissions to be able to sync data.`,\n );\n} else {\n colorConsole.warn(\n `Permissions are deprecated and will be removed in an upcoming release. See: https://zero.rocicorp.dev/docs/auth.`,\n );\n\n const {permissions} = ret;\n if (config.output.file) {\n await writePermissionsFile(\n permissions,\n config.output.file,\n config.output.format,\n );\n } else if (config.upstream.type !== 'pg') {\n colorConsole.warn(\n `Permissions deployment is not supported for ${config.upstream.type} upstreams`,\n );\n process.exit(-1);\n } else if (config.upstream.db) {\n await deployPermissions(config.upstream.db, permissions, config.force);\n } else {\n colorConsole.error(`No --output-file or --upstream-db specified`);\n // Shows the usage text.\n parseOptions(deployPermissionsOptions, {\n argv: ['--help'],\n envNamePrefix: ZERO_ENV_VAR_PREFIX,\n });\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AA6BA,IAAM,SAAS,aAAa,0BAA0B;CACpD,MAAM,QAAQ,KAAK,MAAM,EAAE;CAC3B,eAAe;CAChB,CAAC;AAEF,IAAM,QAAQ,WAAW,OAAO;AAChC,IAAM,MAAM,UAAU,MAAM;AAE5B,IAAM,KAAK,iBAAiB,OAAO;AAEnC,eAAe,oBACb,IACA,aACA;CACA,MAAM,SAAS,eAAe,MAAM;AAOpC,MAJe,MAAM,EAAE;;;wBAGD,OAAO,iBAAA,iBAClB,WAAW,GAAG;AACvB,eAAa,KACX,wEACe,IAAI,mEACpB;AACD;;CAIF,MAAM,SAAS,MAAM,EAA8B;+BACtB,GAAG,SAAS,MAAM,mBAAmB,CAAC;;AAEnE,KAAI,OAAO,WAAW,GAAG;AACvB,eAAa,KACX,wEACe,IAAI,mEACpB;AACD;;AAEF,cAAa,KACX,oEAAoE,IAAI,IACzE;CAED,MAAM,CAAC,EAAC,cAAc,uBAAsB;CAC5C,MAAM,EAAC,QAAQ,iBAAgB,MAAM,mBACnC,IACA,kBACD;CACD,MAAM,WAAW,aAAa,KAAI,MAAK,EAAE,QAAQ;CACjD,MAAM,UAAU,WAAW,IAAI,IAAI,kBAAkB,EAAE,IAAI,IAAI,SAAS,CAAC;AACzE,KAAI,QAAQ,MAAM;AAChB,eAAa,KACX,8CAA8C,CAAC,GAAG,QAAQ,CAAC,6DAE5C,IAAI,mEACpB;AACD;;CAKF,MAAM,WAAW,UAHO,IAAI,IAC1B,OAAO,KAAI,MAAK,CAAC,cAAc,EAAE,EAAE,OAAO,KAAK,EAAE,QAAQ,CAAC,CAAC,CAC5D,CAC0C;AAC3C,KAAI;AACF,OAAK,MAAM,CAAC,OAAO,UAAU,OAAO,QAAQ,aAAa,UAAU,EAAE,CAAC,EAAE;GACtE,MAAM,gBAAgB,CAAC,GAAG,UAAgB;AACxC,iBAAa,MAAM,OAAO,SAAS;;GAErC,MAAM,iBAAiB,UAAwC;AAC7D,WAAO,QAAQ,QAAQ,aAAa;AACpC,WAAO,QAAQ,QAAQ,aAAa;AACpC,WAAO,QAAQ,QAAQ,aAAa;AACpC,WAAO,QAAQ,aAAa,QAAQ,aAAa;AACjD,WAAO,QAAQ,cAAc,QAAQ,aAAa;;AAEpD,iBAAc,MAAM,IAAI;AACxB,OAAI,MAAM,KACR,QAAO,OAAO,MAAM,KAAK,CAAC,QAAQ,cAAc;;UAG7C,GAAG;AACV,kBAAgB,OAAO,EAAE,CAAC;;;AAI9B,SAAS,gBAAgB,KAAa;AACpC,cAAa,MAAM,IAAI;AACvB,cAAa,KAAK,8CAA8C;AAChE,SAAQ,KAAK,GAAG;;AAGlB,eAAe,kBACb,aACA,aACA,OACA;CACA,MAAM,KAAK,SAAS,IAAI,aAAa,qBAAqB;CAC1D,MAAM,EAAC,MAAM,SAAQ,GAAG;AACxB,cAAa,MAAM,0BAA0B,KAAK,GAAG,OAAO;AAC5D,KAAI;AACF,QAAM,mBAAmB,IAAI,MAAM;EAEnC,MAAM,EAAC,MAAM,YAAW,MAAM,MAAM,IAAI,OAAM,OAAM;AAClD,OAAI,MACF,cAAa,KAAK,0CAA0C;OAE5D,OAAM,oBAAoB,IAAI,YAAY;GAG5C,MAAM,EAAC,UAAS;AAChB,gBAAa,KACX,uCAAuC,MAAM,gBAAgB,GAAG,QAAQ,OACzE;GACD,MAAM,CAAC,EAAC,MAAM,gBAAe,MAAM,EAAoB;2BAClC,GAAG,IAAI,CAAC;GAC7B,MAAM,CAAC,EAAC,UAAS,MAAM,EAAoB;iBAChC,GAAG,IAAI,CAAC,mBAAmB,GAAG,EAAC,aAAY,CAAC,CAAC;AAExD,UAAO;IAAC,MAAM,KAAK,UAAU,GAAG,EAAE;IAAE,SAAS,eAAe;IAAK;IACjE;AACF,MAAI,QACF,cAAa,KAAK,kCAAkC,KAAK,GAAG;MAE5D,cAAa,KAAK,+BAA+B,KAAK,GAAG;WAEnD;AACR,QAAM,GAAG,KAAK;;;AAIlB,eAAe,qBACb,OACA,MACA,QACA;AAOA,OAAM,UAAU,MALd,WAAW,QACP,UAAU,MAAG,IAAI,CAAC,iCAAiC,QACjD,KAAK,UAAU,MAAM,CACtB,CAAC,KACF,KAAK,UAAU,OAAO,MAAM,WAAW,WAAW,IAAI,EAAE,CAC/B;AAC/B,cAAa,KAAK,SAAS,OAAO,kBAAkB,OAAO,OAAO,OAAO;;AAG3E,IAAM,MAAM,MAAM,yBAAyB,OAAO,OAAO,MAAM,KAAK;AACpE,IAAI,CAAC,OAAO,OAAO,KAAK,KAAK,eAAe,EAAE,CAAC,CAAC,WAAW,EACzD,cAAa,KACX,2BAA2B,OAAO,OAAO,KAAK,qJAG/C;KACI;AACL,cAAa,KACX,mHACD;CAED,MAAM,EAAC,gBAAe;AACtB,KAAI,OAAO,OAAO,KAChB,OAAM,qBACJ,aACA,OAAO,OAAO,MACd,OAAO,OAAO,OACf;UACQ,OAAO,SAAS,SAAS,MAAM;AACxC,eAAa,KACX,+CAA+C,OAAO,SAAS,KAAK,YACrE;AACD,UAAQ,KAAK,GAAG;YACP,OAAO,SAAS,GACzB,OAAM,kBAAkB,OAAO,SAAS,IAAI,aAAa,OAAO,MAAM;MACjE;AACL,eAAa,MAAM,8CAA8C;AAEjE,eAAa,0BAA0B;GACrC,MAAM,CAAC,SAAS;GAChB,eAAe;GAChB,CAAC"}
@@ -1 +1 @@
1
- {"version":3,"file":"change-streamer.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/change-streamer.ts"],"names":[],"mappings":"AAgCA,OAAO,EAGL,KAAK,MAAM,EACZ,MAAM,uBAAuB,CAAC;AAK/B,wBAA8B,SAAS,CACrC,MAAM,EAAE,MAAM,EACd,GAAG,EAAE,MAAM,CAAC,UAAU,EACtB,GAAG,IAAI,EAAE,MAAM,EAAE,GAChB,OAAO,CAAC,IAAI,CAAC,CA0Lf"}
1
+ {"version":3,"file":"change-streamer.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/change-streamer.ts"],"names":[],"mappings":"AA6BA,OAAO,EAGL,KAAK,MAAM,EACZ,MAAM,uBAAuB,CAAC;AAK/B,wBAA8B,SAAS,CACrC,MAAM,EAAE,MAAM,EACd,GAAG,EAAE,MAAM,CAAC,UAAU,EACtB,GAAG,IAAI,EAAE,MAAM,EAAE,GAChB,OAAO,CAAC,IAAI,CAAC,CAuMf"}
@@ -11,7 +11,7 @@ import { getServerContext } from "../config/server-context.js";
11
11
  import { deleteLiteDB } from "../db/delete-lite-db.js";
12
12
  import { warmupConnections } from "../db/warmup.js";
13
13
  import { initEventSink, publishCriticalEvent } from "../observability/events.js";
14
- import { AutoResetSignal, CHANGE_STREAMER_APP_NAME } from "../services/change-streamer/schema/tables.js";
14
+ import { AutoResetSignal } from "../services/change-streamer/schema/tables.js";
15
15
  import { upgradeReplica } from "../services/change-source/common/replica-schema.js";
16
16
  import { ReplicationStatusPublisher, replicationStatusError } from "../services/replicator/replication-status.js";
17
17
  import { initializeCustomChangeSource } from "../services/change-source/custom/change-source.js";
@@ -35,10 +35,7 @@ async function runWorker(parent, env, ...argv) {
35
35
  startOtelAuto(createLogContext(config, "change-streamer", 0, false), "change-streamer", 0);
36
36
  const lc = createLogContext(config, "change-streamer");
37
37
  initEventSink(lc, config);
38
- const changeDB = pgClient(lc, change.db, {
39
- max: change.maxConns,
40
- connection: { ["application_name"]: CHANGE_STREAMER_APP_NAME }
41
- }, { sendStringAsJson: true });
38
+ const changeDB = pgClient(lc, change.db, "change-streamer", { max: change.maxConns }, { sendStringAsJson: true });
42
39
  warmupConnections(lc, changeDB, "change").catch(() => {});
43
40
  const { autoReset, replicationLag } = config;
44
41
  const shard = getShardConfig(config);
@@ -68,6 +65,8 @@ async function runWorker(parent, env, ...argv) {
68
65
  if (first && e instanceof AutoResetSignal) {
69
66
  lc.warn?.(`resetting replica ${replica.file}`, e);
70
67
  deleteLiteDB(replica.file);
68
+ await purgeLock?.release();
69
+ purgeLock = null;
71
70
  continue;
72
71
  }
73
72
  await publishCriticalEvent(lc, replicationStatusError(lc, "Initializing", e));
@@ -1 +1 @@
1
- {"version":3,"file":"change-streamer.js","names":[],"sources":["../../../../../zero-cache/src/server/change-streamer.ts"],"sourcesContent":["import {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {DatabaseInitError} from '../../../zqlite/src/db.ts';\nimport {getServerContext} from '../config/server-context.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {deleteLiteDB} from '../db/delete-lite-db.ts';\nimport {warmupConnections} from '../db/warmup.ts';\nimport {initEventSink, publishCriticalEvent} from '../observability/events.ts';\nimport {upgradeReplica} from '../services/change-source/common/replica-schema.ts';\nimport {initializeCustomChangeSource} from '../services/change-source/custom/change-source.ts';\nimport {initializePostgresChangeSource} from '../services/change-source/pg/change-source.ts';\nimport {BackupMonitor} from '../services/change-streamer/backup-monitor.ts';\nimport {ChangeStreamerHttpServer} from '../services/change-streamer/change-streamer-http.ts';\nimport {initializeStreamer} from '../services/change-streamer/change-streamer-service.ts';\nimport type {ChangeStreamerService} from '../services/change-streamer/change-streamer.ts';\nimport {ReplicaMonitor} from '../services/change-streamer/replica-monitor.ts';\nimport {initChangeStreamerSchema} from '../services/change-streamer/schema/init.ts';\nimport {\n AutoResetSignal,\n CHANGE_STREAMER_APP_NAME,\n} from '../services/change-streamer/schema/tables.ts';\nimport {PurgeLocker} from '../services/change-streamer/storer.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {\n BackupNotFoundException,\n restoreReplica,\n} from '../services/litestream/commands.ts';\nimport {\n replicationStatusError,\n ReplicationStatusPublisher,\n} from '../services/replicator/replication-status.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardConfig} from '../types/shards.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\n\nexport default async function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...argv: string[]\n): Promise<void> {\n const workerStartTime = Date.now();\n const config = getNormalizedZeroConfig({env, argv});\n const {\n taskID,\n changeStreamer: {\n port,\n address,\n protocol,\n startupDelayMs,\n backPressureLimitHeapProportion,\n flowControlConsensusPaddingSeconds,\n },\n upstream,\n change,\n replica,\n initialSync,\n litestream,\n } = config;\n\n startOtelAuto(\n createLogContext(config, 'change-streamer', 0, false),\n 'change-streamer',\n 0,\n );\n const lc = createLogContext(config, 'change-streamer');\n initEventSink(lc, config);\n\n // Kick off DB connection warmup in the background.\n const changeDB = pgClient(\n lc,\n change.db,\n {\n max: change.maxConns,\n connection: {['application_name']: CHANGE_STREAMER_APP_NAME},\n },\n {sendStringAsJson: true},\n );\n void warmupConnections(lc, changeDB, 'change').catch(() => {});\n\n const {autoReset, replicationLag} = config;\n const shard = getShardConfig(config);\n\n // Ensure the change DB schema is initialized/up-to-date, then acquire\n // a lock to prevent change-lock purges. This ensures that (this)\n // change-streamer will be able to resume from the backup.\n await initChangeStreamerSchema(lc, changeDB, shard);\n let purgeLock = await new PurgeLocker(lc, shard, changeDB).acquire();\n\n // Restore from litestream if the change-log has entries.\n if (purgeLock) {\n try {\n await restoreReplica(lc, config, purgeLock);\n } catch (e) {\n // If the restore failed, e.g. due to a corrupt or missing backup, the\n // replication-manager recovers by re-syncing.\n const log = e instanceof BackupNotFoundException ? 'warn' : 'error';\n lc[log]?.(\n `error restoring backup. resyncing the replica: ${String(e)}`,\n e,\n );\n\n // The purgeLock must be released if the backup could not be restored,\n // or it will otherwise prevent the change-db update after the resync\n // completes.\n await purgeLock.release();\n purgeLock = null;\n }\n }\n\n let changeStreamer: ChangeStreamerService | undefined;\n\n const context = getServerContext(config);\n\n for (const first of [true, false]) {\n try {\n // Note: This performs initial sync of the replica if necessary.\n const {changeSource, subscriptionState} =\n upstream.type === 'pg'\n ? await initializePostgresChangeSource(\n lc,\n upstream.db,\n shard,\n replica.file,\n {\n ...initialSync,\n replicationSlotFailover: upstream.pgReplicationSlotFailover,\n },\n context,\n replicationLag.reportIntervalMs,\n )\n : await initializeCustomChangeSource(\n lc,\n upstream.db,\n shard,\n replica.file,\n context,\n );\n\n const replicationStatusPublisher =\n ReplicationStatusPublisher.forReplicaFile(replica.file);\n\n changeStreamer = await initializeStreamer(\n lc,\n shard,\n taskID,\n address,\n protocol,\n changeDB,\n changeSource,\n replicationStatusPublisher,\n subscriptionState,\n purgeLock,\n autoReset ?? false,\n {\n backPressureLimitHeapProportion,\n flowControlConsensusPaddingSeconds,\n statementTimeoutMs: change.statementTimeoutMs,\n },\n setTimeout,\n );\n break;\n } catch (e) {\n if (first && e instanceof AutoResetSignal) {\n lc.warn?.(`resetting replica ${replica.file}`, e);\n // TODO: Make deleteLiteDB work with litestream. It will probably have to be\n // a semantic wipe instead of a file delete.\n deleteLiteDB(replica.file);\n continue; // execute again with a fresh initial-sync\n }\n await publishCriticalEvent(\n lc,\n replicationStatusError(lc, 'Initializing', e),\n );\n if (e instanceof DatabaseInitError) {\n throw new Error(\n `Cannot open ZERO_REPLICA_FILE at \"${replica.file}\". Please check that the path is valid.`,\n {cause: e},\n );\n }\n throw e;\n }\n }\n // impossible: upstream must have advanced in order for replication to be stuck.\n assert(changeStreamer, `resetting replica did not advance replicaVersion`);\n\n // Perform any upgrades to the replica in case it was restored from an\n // earlier version. Note that this upgrade is done by the replicator worker\n // as well (in both the replication-manager and the view-syncer), but the\n // change-streamer independently reads the replica, and it is fine run the\n // upgrade logic redundantly since it is idempotent.\n await upgradeReplica(lc, 'change-streamer-init', replica.file);\n\n const {backupURL, port: metricsPort} = litestream;\n const monitor = backupURL\n ? new BackupMonitor(\n lc,\n replica.file,\n backupURL,\n `http://localhost:${metricsPort}/metrics`,\n changeStreamer,\n // The time between when the zero-cache was started to when the\n // change-streamer is ready to start serves as the initial delay for\n // watermark cleanup (as it either includes a similar replica\n // restoration/preparation step, or an initial-sync, which\n // generally takes longer).\n //\n // Consider: Also account for permanent volumes?\n Date.now() - workerStartTime,\n )\n : new ReplicaMonitor(lc, replica.file, changeStreamer);\n\n const changeStreamerWebServer = new ChangeStreamerHttpServer(\n lc,\n config,\n {port, startupDelayMs},\n parent,\n changeStreamer,\n monitor instanceof BackupMonitor ? monitor : null,\n );\n\n parent.send(['ready', {ready: true}]);\n\n // Note: The changeStreamer itself is not started here; it is started by the\n // changeStreamerWebServer.\n return runUntilKilled(lc, parent, changeStreamerWebServer, monitor);\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;AAyCA,eAA8B,UAC5B,QACA,KACA,GAAG,MACY;CACf,MAAM,kBAAkB,KAAK,KAAK;CAClC,MAAM,SAAS,wBAAwB;EAAC;EAAK;EAAK,CAAC;CACnD,MAAM,EACJ,QACA,gBAAgB,EACd,MACA,SACA,UACA,gBACA,iCACA,sCAEF,UACA,QACA,SACA,aACA,eACE;AAEJ,eACE,iBAAiB,QAAQ,mBAAmB,GAAG,MAAM,EACrD,mBACA,EACD;CACD,MAAM,KAAK,iBAAiB,QAAQ,kBAAkB;AACtD,eAAc,IAAI,OAAO;CAGzB,MAAM,WAAW,SACf,IACA,OAAO,IACP;EACE,KAAK,OAAO;EACZ,YAAY,GAAE,qBAAqB,0BAAyB;EAC7D,EACD,EAAC,kBAAkB,MAAK,CACzB;AACI,mBAAkB,IAAI,UAAU,SAAS,CAAC,YAAY,GAAG;CAE9D,MAAM,EAAC,WAAW,mBAAkB;CACpC,MAAM,QAAQ,eAAe,OAAO;AAKpC,OAAM,yBAAyB,IAAI,UAAU,MAAM;CACnD,IAAI,YAAY,MAAM,IAAI,YAAY,IAAI,OAAO,SAAS,CAAC,SAAS;AAGpE,KAAI,UACF,KAAI;AACF,QAAM,eAAe,IAAI,QAAQ,UAAU;UACpC,GAAG;AAIV,KADY,aAAa,0BAA0B,SAAS,WAE1D,kDAAkD,OAAO,EAAE,IAC3D,EACD;AAKD,QAAM,UAAU,SAAS;AACzB,cAAY;;CAIhB,IAAI;CAEJ,MAAM,UAAU,iBAAiB,OAAO;AAExC,MAAK,MAAM,SAAS,CAAC,MAAM,MAAM,CAC/B,KAAI;EAEF,MAAM,EAAC,cAAc,sBACnB,SAAS,SAAS,OACd,MAAM,+BACJ,IACA,SAAS,IACT,OACA,QAAQ,MACR;GACE,GAAG;GACH,yBAAyB,SAAS;GACnC,EACD,SACA,eAAe,iBAChB,GACD,MAAM,6BACJ,IACA,SAAS,IACT,OACA,QAAQ,MACR,QACD;AAKP,mBAAiB,MAAM,mBACrB,IACA,OACA,QACA,SACA,UACA,UACA,cATA,2BAA2B,eAAe,QAAQ,KAAK,EAWvD,mBACA,WACA,aAAa,OACb;GACE;GACA;GACA,oBAAoB,OAAO;GAC5B,EACD,WACD;AACD;UACO,GAAG;AACV,MAAI,SAAS,aAAa,iBAAiB;AACzC,MAAG,OAAO,qBAAqB,QAAQ,QAAQ,EAAE;AAGjD,gBAAa,QAAQ,KAAK;AAC1B;;AAEF,QAAM,qBACJ,IACA,uBAAuB,IAAI,gBAAgB,EAAE,CAC9C;AACD,MAAI,aAAa,kBACf,OAAM,IAAI,MACR,qCAAqC,QAAQ,KAAK,0CAClD,EAAC,OAAO,GAAE,CACX;AAEH,QAAM;;AAIV,QAAO,gBAAgB,mDAAmD;AAO1E,OAAM,eAAe,IAAI,wBAAwB,QAAQ,KAAK;CAE9D,MAAM,EAAC,WAAW,MAAM,gBAAe;CACvC,MAAM,UAAU,YACZ,IAAI,cACF,IACA,QAAQ,MACR,WACA,oBAAoB,YAAY,WAChC,gBAQA,KAAK,KAAK,GAAG,gBACd,GACD,IAAI,eAAe,IAAI,QAAQ,MAAM,eAAe;CAExD,MAAM,0BAA0B,IAAI,yBAClC,IACA,QACA;EAAC;EAAM;EAAe,EACtB,QACA,gBACA,mBAAmB,gBAAgB,UAAU,KAC9C;AAED,QAAO,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC;AAIrC,QAAO,eAAe,IAAI,QAAQ,yBAAyB,QAAQ;;AAIrE,IAAI,CAAC,mBAAmB,CACjB,iBACH,UAAU,KAAK,aAAa,EAAE,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,EAAE,CAAC,CACrE"}
1
+ {"version":3,"file":"change-streamer.js","names":[],"sources":["../../../../../zero-cache/src/server/change-streamer.ts"],"sourcesContent":["import {assert} from '../../../shared/src/asserts.ts';\nimport {must} from '../../../shared/src/must.ts';\nimport {DatabaseInitError} from '../../../zqlite/src/db.ts';\nimport {getServerContext} from '../config/server-context.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {deleteLiteDB} from '../db/delete-lite-db.ts';\nimport {warmupConnections} from '../db/warmup.ts';\nimport {initEventSink, publishCriticalEvent} from '../observability/events.ts';\nimport {upgradeReplica} from '../services/change-source/common/replica-schema.ts';\nimport {initializeCustomChangeSource} from '../services/change-source/custom/change-source.ts';\nimport {initializePostgresChangeSource} from '../services/change-source/pg/change-source.ts';\nimport {BackupMonitor} from '../services/change-streamer/backup-monitor.ts';\nimport {ChangeStreamerHttpServer} from '../services/change-streamer/change-streamer-http.ts';\nimport {initializeStreamer} from '../services/change-streamer/change-streamer-service.ts';\nimport type {ChangeStreamerService} from '../services/change-streamer/change-streamer.ts';\nimport {ReplicaMonitor} from '../services/change-streamer/replica-monitor.ts';\nimport {initChangeStreamerSchema} from '../services/change-streamer/schema/init.ts';\nimport {AutoResetSignal} from '../services/change-streamer/schema/tables.ts';\nimport {PurgeLocker} from '../services/change-streamer/storer.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {\n BackupNotFoundException,\n restoreReplica,\n} from '../services/litestream/commands.ts';\nimport {\n replicationStatusError,\n ReplicationStatusPublisher,\n} from '../services/replicator/replication-status.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardConfig} from '../types/shards.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\n\nexport default async function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...argv: string[]\n): Promise<void> {\n const workerStartTime = Date.now();\n const config = getNormalizedZeroConfig({env, argv});\n const {\n taskID,\n changeStreamer: {\n port,\n address,\n protocol,\n startupDelayMs,\n backPressureLimitHeapProportion,\n flowControlConsensusPaddingSeconds,\n },\n upstream,\n change,\n replica,\n initialSync,\n litestream,\n } = config;\n\n startOtelAuto(\n createLogContext(config, 'change-streamer', 0, false),\n 'change-streamer',\n 0,\n );\n const lc = createLogContext(config, 'change-streamer');\n initEventSink(lc, config);\n\n // Kick off DB connection warmup in the background.\n const changeDB = pgClient(\n lc,\n change.db,\n 'change-streamer',\n {\n max: change.maxConns,\n },\n {sendStringAsJson: true},\n );\n void warmupConnections(lc, changeDB, 'change').catch(() => {});\n\n const {autoReset, replicationLag} = config;\n const shard = getShardConfig(config);\n\n // Ensure the change DB schema is initialized/up-to-date, then acquire\n // a lock to prevent change-lock purges. This ensures that (this)\n // change-streamer will be able to resume from the backup.\n await initChangeStreamerSchema(lc, changeDB, shard);\n let purgeLock = await new PurgeLocker(lc, shard, changeDB).acquire();\n\n // Restore from litestream if the change-log has entries.\n if (purgeLock) {\n try {\n await restoreReplica(lc, config, purgeLock);\n } catch (e) {\n // If the restore failed, e.g. due to a corrupt or missing backup, the\n // replication-manager recovers by re-syncing.\n const log = e instanceof BackupNotFoundException ? 'warn' : 'error';\n lc[log]?.(\n `error restoring backup. resyncing the replica: ${String(e)}`,\n e,\n );\n\n // The purgeLock must be released if the backup could not be restored,\n // or it will otherwise prevent the change-db update after the resync\n // completes.\n await purgeLock.release();\n purgeLock = null;\n }\n }\n\n let changeStreamer: ChangeStreamerService | undefined;\n\n const context = getServerContext(config);\n\n for (const first of [true, false]) {\n try {\n // Note: This performs initial sync of the replica if necessary.\n const {changeSource, subscriptionState} =\n upstream.type === 'pg'\n ? await initializePostgresChangeSource(\n lc,\n upstream.db,\n shard,\n replica.file,\n {\n ...initialSync,\n replicationSlotFailover: upstream.pgReplicationSlotFailover,\n },\n context,\n replicationLag.reportIntervalMs,\n )\n : await initializeCustomChangeSource(\n lc,\n upstream.db,\n shard,\n replica.file,\n context,\n );\n\n const replicationStatusPublisher =\n ReplicationStatusPublisher.forReplicaFile(replica.file);\n\n changeStreamer = await initializeStreamer(\n lc,\n shard,\n taskID,\n address,\n protocol,\n changeDB,\n changeSource,\n replicationStatusPublisher,\n subscriptionState,\n purgeLock,\n autoReset ?? false,\n {\n backPressureLimitHeapProportion,\n flowControlConsensusPaddingSeconds,\n statementTimeoutMs: change.statementTimeoutMs,\n },\n setTimeout,\n );\n break;\n } catch (e) {\n if (first && e instanceof AutoResetSignal) {\n lc.warn?.(`resetting replica ${replica.file}`, e);\n // TODO: Make deleteLiteDB work with litestream. It will probably have to be\n // a semantic wipe instead of a file delete.\n deleteLiteDB(replica.file);\n // Release the purge lock before retrying. This is safe because the\n // purge lock exists to preserve change-log entries so the new\n // change-streamer can resume from the backup replica's watermark.\n // An AutoResetSignal means we cant resume from the backup replica\n // (e.g. its replication slot is gone), so the change-log entries the lock\n // was protecting are no longer needed. The retry performs a fresh\n // initial sync with a new replication slot, independent of the old\n // change-log. Releasing is also necessary to avoid a\n // self-deadlock when CHANGE_DB == UPSTREAM_DB:\n // CREATE_REPLICATION_SLOT waits for all older transactions to\n // finish, including this lock's open transaction.\n await purgeLock?.release();\n purgeLock = null;\n continue; // execute again with a fresh initial-sync\n }\n await publishCriticalEvent(\n lc,\n replicationStatusError(lc, 'Initializing', e),\n );\n if (e instanceof DatabaseInitError) {\n throw new Error(\n `Cannot open ZERO_REPLICA_FILE at \"${replica.file}\". Please check that the path is valid.`,\n {cause: e},\n );\n }\n throw e;\n }\n }\n // impossible: upstream must have advanced in order for replication to be stuck.\n assert(changeStreamer, `resetting replica did not advance replicaVersion`);\n\n // Perform any upgrades to the replica in case it was restored from an\n // earlier version. Note that this upgrade is done by the replicator worker\n // as well (in both the replication-manager and the view-syncer), but the\n // change-streamer independently reads the replica, and it is fine run the\n // upgrade logic redundantly since it is idempotent.\n await upgradeReplica(lc, 'change-streamer-init', replica.file);\n\n const {backupURL, port: metricsPort} = litestream;\n const monitor = backupURL\n ? new BackupMonitor(\n lc,\n replica.file,\n backupURL,\n `http://localhost:${metricsPort}/metrics`,\n changeStreamer,\n // The time between when the zero-cache was started to when the\n // change-streamer is ready to start serves as the initial delay for\n // watermark cleanup (as it either includes a similar replica\n // restoration/preparation step, or an initial-sync, which\n // generally takes longer).\n //\n // Consider: Also account for permanent volumes?\n Date.now() - workerStartTime,\n )\n : new ReplicaMonitor(lc, replica.file, changeStreamer);\n\n const changeStreamerWebServer = new ChangeStreamerHttpServer(\n lc,\n config,\n {port, startupDelayMs},\n parent,\n changeStreamer,\n monitor instanceof BackupMonitor ? monitor : null,\n );\n\n parent.send(['ready', {ready: true}]);\n\n // Note: The changeStreamer itself is not started here; it is started by the\n // changeStreamerWebServer.\n return runUntilKilled(lc, parent, changeStreamerWebServer, monitor);\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;AAsCA,eAA8B,UAC5B,QACA,KACA,GAAG,MACY;CACf,MAAM,kBAAkB,KAAK,KAAK;CAClC,MAAM,SAAS,wBAAwB;EAAC;EAAK;EAAK,CAAC;CACnD,MAAM,EACJ,QACA,gBAAgB,EACd,MACA,SACA,UACA,gBACA,iCACA,sCAEF,UACA,QACA,SACA,aACA,eACE;AAEJ,eACE,iBAAiB,QAAQ,mBAAmB,GAAG,MAAM,EACrD,mBACA,EACD;CACD,MAAM,KAAK,iBAAiB,QAAQ,kBAAkB;AACtD,eAAc,IAAI,OAAO;CAGzB,MAAM,WAAW,SACf,IACA,OAAO,IACP,mBACA,EACE,KAAK,OAAO,UACb,EACD,EAAC,kBAAkB,MAAK,CACzB;AACI,mBAAkB,IAAI,UAAU,SAAS,CAAC,YAAY,GAAG;CAE9D,MAAM,EAAC,WAAW,mBAAkB;CACpC,MAAM,QAAQ,eAAe,OAAO;AAKpC,OAAM,yBAAyB,IAAI,UAAU,MAAM;CACnD,IAAI,YAAY,MAAM,IAAI,YAAY,IAAI,OAAO,SAAS,CAAC,SAAS;AAGpE,KAAI,UACF,KAAI;AACF,QAAM,eAAe,IAAI,QAAQ,UAAU;UACpC,GAAG;AAIV,KADY,aAAa,0BAA0B,SAAS,WAE1D,kDAAkD,OAAO,EAAE,IAC3D,EACD;AAKD,QAAM,UAAU,SAAS;AACzB,cAAY;;CAIhB,IAAI;CAEJ,MAAM,UAAU,iBAAiB,OAAO;AAExC,MAAK,MAAM,SAAS,CAAC,MAAM,MAAM,CAC/B,KAAI;EAEF,MAAM,EAAC,cAAc,sBACnB,SAAS,SAAS,OACd,MAAM,+BACJ,IACA,SAAS,IACT,OACA,QAAQ,MACR;GACE,GAAG;GACH,yBAAyB,SAAS;GACnC,EACD,SACA,eAAe,iBAChB,GACD,MAAM,6BACJ,IACA,SAAS,IACT,OACA,QAAQ,MACR,QACD;AAKP,mBAAiB,MAAM,mBACrB,IACA,OACA,QACA,SACA,UACA,UACA,cATA,2BAA2B,eAAe,QAAQ,KAAK,EAWvD,mBACA,WACA,aAAa,OACb;GACE;GACA;GACA,oBAAoB,OAAO;GAC5B,EACD,WACD;AACD;UACO,GAAG;AACV,MAAI,SAAS,aAAa,iBAAiB;AACzC,MAAG,OAAO,qBAAqB,QAAQ,QAAQ,EAAE;AAGjD,gBAAa,QAAQ,KAAK;AAY1B,SAAM,WAAW,SAAS;AAC1B,eAAY;AACZ;;AAEF,QAAM,qBACJ,IACA,uBAAuB,IAAI,gBAAgB,EAAE,CAC9C;AACD,MAAI,aAAa,kBACf,OAAM,IAAI,MACR,qCAAqC,QAAQ,KAAK,0CAClD,EAAC,OAAO,GAAE,CACX;AAEH,QAAM;;AAIV,QAAO,gBAAgB,mDAAmD;AAO1E,OAAM,eAAe,IAAI,wBAAwB,QAAQ,KAAK;CAE9D,MAAM,EAAC,WAAW,MAAM,gBAAe;CACvC,MAAM,UAAU,YACZ,IAAI,cACF,IACA,QAAQ,MACR,WACA,oBAAoB,YAAY,WAChC,gBAQA,KAAK,KAAK,GAAG,gBACd,GACD,IAAI,eAAe,IAAI,QAAQ,MAAM,eAAe;CAExD,MAAM,0BAA0B,IAAI,yBAClC,IACA,QACA;EAAC;EAAM;EAAe,EACtB,QACA,gBACA,mBAAmB,gBAAgB,UAAU,KAC9C;AAED,QAAO,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC;AAIrC,QAAO,eAAe,IAAI,QAAQ,yBAAyB,QAAQ;;AAIrE,IAAI,CAAC,mBAAmB,CACjB,iBACH,UAAU,KAAK,aAAa,EAAE,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,EAAE,CAAC,CACrE"}
@@ -1 +1 @@
1
- {"version":3,"file":"main.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/main.ts"],"names":[],"mappings":"AAeA,OAAO,EAIL,KAAK,MAAM,EACZ,MAAM,uBAAuB,CAAC;AAoB/B,wBAA8B,SAAS,CACrC,MAAM,EAAE,MAAM,EACd,GAAG,EAAE,MAAM,CAAC,UAAU,GACrB,OAAO,CAAC,IAAI,CAAC,CAoKf"}
1
+ {"version":3,"file":"main.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/main.ts"],"names":[],"mappings":"AAeA,OAAO,EAIL,KAAK,MAAM,EACZ,MAAM,uBAAuB,CAAC;AAqB/B,wBAA8B,SAAS,CACrC,MAAM,EAAE,MAAM,EACd,GAAG,EAAE,MAAM,CAAC,UAAU,GACrB,OAAO,CAAC,IAAI,CAAC,CA6Kf"}
@@ -3,7 +3,7 @@ import { childWorker, parentWorker, singleProcessMode } from "../types/processes
3
3
  import { getNormalizedZeroConfig } from "../config/zero-config.js";
4
4
  import { ProcessManager, exitAfter, runUntilKilled } from "../services/life-cycle.js";
5
5
  import { createLogContext } from "./logging.js";
6
- import { CHANGE_STREAMER_URL, REAPER_URL, REPLICATOR_URL, SYNCER_URL } from "./worker-urls.js";
6
+ import { CHANGE_STREAMER_URL, REAPER_URL, REPLICATOR_URL, SHADOW_SYNCER_URL, SYNCER_URL } from "./worker-urls.js";
7
7
  import { initEventSink } from "../observability/events.js";
8
8
  import { restoreReplica, startReplicaBackupProcess } from "../services/litestream/commands.js";
9
9
  import { startOtelAuto } from "./otel-start.js";
@@ -58,6 +58,11 @@ async function runWorker(parent, env) {
58
58
  loadWorker(REAPER_URL, "supporting").once("message", reaperStarted);
59
59
  await reaperReady;
60
60
  }
61
+ if (config.shadowSync.enabled && runChangeStreamer) {
62
+ const { promise: shadowReady, resolve: shadowStarted } = resolver();
63
+ loadWorker(SHADOW_SYNCER_URL, "supporting").once("message", shadowStarted);
64
+ await shadowReady;
65
+ }
61
66
  const syncers = [];
62
67
  if (numSyncers) {
63
68
  const mode = runChangeStreamer && litestream.backupURL ? "serving-copy" : "serving";
@@ -1 +1 @@
1
- {"version":3,"file":"main.js","names":[],"sources":["../../../../../zero-cache/src/server/main.ts"],"sourcesContent":["import path from 'node:path';\nimport {resolver} from '@rocicorp/resolver';\nimport {must} from '../../../shared/src/must.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {initEventSink} from '../observability/events.ts';\nimport {\n exitAfter,\n ProcessManager,\n runUntilKilled,\n type WorkerType,\n} from '../services/life-cycle.ts';\nimport {\n restoreReplica,\n startReplicaBackupProcess,\n} from '../services/litestream/commands.ts';\nimport {\n childWorker,\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {\n createNotifierFrom,\n handleSubscriptionsFrom,\n type ReplicaFileMode,\n subscribeTo,\n} from '../workers/replicator.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\nimport {WorkerDispatcher} from './worker-dispatcher.ts';\nimport {\n CHANGE_STREAMER_URL,\n MUTATOR_URL,\n REAPER_URL,\n REPLICATOR_URL,\n SYNCER_URL,\n} from './worker-urls.ts';\n\nconst clientConnectionBifurcated = false;\n\nexport default async function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n): Promise<void> {\n const startMs = Date.now();\n const config = getNormalizedZeroConfig({env});\n\n startOtelAuto(\n createLogContext(config, 'dispatcher', 0, false),\n 'dispatcher',\n 0,\n );\n const lc = createLogContext(config, 'dispatcher');\n initEventSink(lc, config);\n\n const processes = new ProcessManager(lc, parent);\n\n const {numSyncWorkers: numSyncers} = config;\n if (config.enableCrudMutations && config.upstream.maxConns < numSyncers) {\n throw new Error(\n `Insufficient upstream connections (${config.upstream.maxConns}) for ${numSyncers} syncers.` +\n `Increase ZERO_UPSTREAM_MAX_CONNS or decrease ZERO_NUM_SYNC_WORKERS (which defaults to available cores).`,\n );\n }\n if (config.cvr.maxConns < numSyncers) {\n throw new Error(\n `Insufficient cvr connections (${config.cvr.maxConns}) for ${numSyncers} syncers.` +\n `Increase ZERO_CVR_MAX_CONNS or decrease ZERO_NUM_SYNC_WORKERS (which defaults to available cores).`,\n );\n }\n\n const internalFlags: string[] =\n numSyncers === 0\n ? []\n : [\n '--upstream-max-conns-per-worker',\n String(Math.floor(config.upstream.maxConns / numSyncers)),\n '--cvr-max-conns-per-worker',\n String(Math.floor(config.cvr.maxConns / numSyncers)),\n ];\n\n function loadWorker(\n moduleUrl: URL,\n type: WorkerType,\n id?: string | number,\n ...args: string[]\n ): Worker {\n const worker = childWorker(moduleUrl, env, ...args, ...internalFlags);\n const name = path.basename(moduleUrl.pathname) + (id ? ` (${id})` : '');\n return processes.addWorker(worker, type, name);\n }\n\n const {\n taskID,\n changeStreamer: {mode: changeStreamerMode, uri: changeStreamerURI},\n litestream,\n } = config;\n const runChangeStreamer =\n changeStreamerMode === 'dedicated' && changeStreamerURI === undefined;\n\n let changeStreamer: Worker | undefined;\n\n if (!runChangeStreamer) {\n changeStreamer = undefined;\n if (litestream.executable) {\n // For view-syncers, the backup is restored here. For the replication-manager,\n // the backup is restored in the change-streamer worker.\n await restoreReplica(lc, config, null);\n }\n } else {\n const {promise: changeStreamerReady, resolve: changeStreamerStarted} =\n resolver();\n changeStreamer = loadWorker(CHANGE_STREAMER_URL, 'supporting').once(\n 'message',\n changeStreamerStarted,\n );\n\n // Wait for the change-streamer to be ready to guarantee that a replica\n // file is present.\n await changeStreamerReady;\n\n if (litestream.backupURL) {\n // Start a backup replicator and corresponding litestream backup process.\n const {promise: backupReady, resolve} = resolver();\n const mode: ReplicaFileMode = 'backup';\n loadWorker(REPLICATOR_URL, 'supporting', mode, mode).once(\n // Wait for the Replicator's first message (i.e. \"ready\") before starting\n // litestream backup in order to avoid contending on the lock when the\n // replicator first prepares the db file.\n 'message',\n () => {\n processes.addSubprocess(\n startReplicaBackupProcess(lc, config),\n 'supporting',\n 'litestream',\n );\n resolve();\n },\n );\n await backupReady;\n }\n }\n\n if (numSyncers > 0) {\n const {promise: reaperReady, resolve: reaperStarted} = resolver();\n loadWorker(REAPER_URL, 'supporting').once('message', reaperStarted);\n // Before starting the view-syncers, ensure that the reaper has started\n // up, indicating that any CVR db migrations have been performed.\n await reaperReady;\n }\n\n const syncers: Worker[] = [];\n if (numSyncers) {\n const mode: ReplicaFileMode =\n runChangeStreamer && litestream.backupURL ? 'serving-copy' : 'serving';\n const {promise: replicaReady, resolve} = resolver();\n const replicator = loadWorker(\n REPLICATOR_URL,\n 'supporting',\n mode,\n mode,\n ).once('message', () => {\n subscribeTo(lc, replicator);\n resolve();\n });\n await replicaReady;\n\n const notifier = createNotifierFrom(lc, replicator);\n for (let i = 0; i < numSyncers; i++) {\n syncers.push(loadWorker(SYNCER_URL, 'user-facing', i, mode, String(i)));\n }\n syncers.forEach(syncer => handleSubscriptionsFrom(lc, syncer, notifier));\n }\n let mutator: Worker | undefined;\n if (clientConnectionBifurcated) {\n mutator = loadWorker(MUTATOR_URL, 'supporting', 'mutator');\n }\n\n lc.info?.('waiting for workers to be ready ...');\n const logWaiting = setInterval(\n () => lc.info?.(`still waiting for ${processes.initializing().join(', ')}`),\n 10_000,\n );\n await processes.allWorkersReady();\n clearInterval(logWaiting);\n lc.info?.(`all workers ready (${Date.now() - startMs} ms)`);\n\n parent.send(['ready', {ready: true}]);\n\n try {\n await runUntilKilled(\n lc,\n parent,\n new WorkerDispatcher(\n lc,\n taskID,\n parent,\n syncers,\n mutator,\n changeStreamer,\n ),\n );\n } catch (err) {\n processes.logErrorAndExit(err, 'dispatcher');\n }\n\n await processes.done();\n}\n\nif (!singleProcessMode()) {\n void exitAfter(() => runWorker(must(parentWorker), process.env));\n}\n"],"mappings":";;;;;;;;;;;;;;AAwCA,eAA8B,UAC5B,QACA,KACe;CACf,MAAM,UAAU,KAAK,KAAK;CAC1B,MAAM,SAAS,wBAAwB,EAAC,KAAI,CAAC;AAE7C,eACE,iBAAiB,QAAQ,cAAc,GAAG,MAAM,EAChD,cACA,EACD;CACD,MAAM,KAAK,iBAAiB,QAAQ,aAAa;AACjD,eAAc,IAAI,OAAO;CAEzB,MAAM,YAAY,IAAI,eAAe,IAAI,OAAO;CAEhD,MAAM,EAAC,gBAAgB,eAAc;AACrC,KAAI,OAAO,uBAAuB,OAAO,SAAS,WAAW,WAC3D,OAAM,IAAI,MACR,sCAAsC,OAAO,SAAS,SAAS,QAAQ,WAAW,kHAEnF;AAEH,KAAI,OAAO,IAAI,WAAW,WACxB,OAAM,IAAI,MACR,iCAAiC,OAAO,IAAI,SAAS,QAAQ,WAAW,6GAEzE;CAGH,MAAM,gBACJ,eAAe,IACX,EAAE,GACF;EACE;EACA,OAAO,KAAK,MAAM,OAAO,SAAS,WAAW,WAAW,CAAC;EACzD;EACA,OAAO,KAAK,MAAM,OAAO,IAAI,WAAW,WAAW,CAAC;EACrD;CAEP,SAAS,WACP,WACA,MACA,IACA,GAAG,MACK;EACR,MAAM,SAAS,YAAY,WAAW,KAAK,GAAG,MAAM,GAAG,cAAc;EACrE,MAAM,OAAO,KAAK,SAAS,UAAU,SAAS,IAAI,KAAK,KAAK,GAAG,KAAK;AACpE,SAAO,UAAU,UAAU,QAAQ,MAAM,KAAK;;CAGhD,MAAM,EACJ,QACA,gBAAgB,EAAC,MAAM,oBAAoB,KAAK,qBAChD,eACE;CACJ,MAAM,oBACJ,uBAAuB,eAAe,sBAAsB,KAAA;CAE9D,IAAI;AAEJ,KAAI,CAAC,mBAAmB;AACtB,mBAAiB,KAAA;AACjB,MAAI,WAAW,WAGb,OAAM,eAAe,IAAI,QAAQ,KAAK;QAEnC;EACL,MAAM,EAAC,SAAS,qBAAqB,SAAS,0BAC5C,UAAU;AACZ,mBAAiB,WAAW,qBAAqB,aAAa,CAAC,KAC7D,WACA,sBACD;AAID,QAAM;AAEN,MAAI,WAAW,WAAW;GAExB,MAAM,EAAC,SAAS,aAAa,YAAW,UAAU;GAClD,MAAM,OAAwB;AAC9B,cAAW,gBAAgB,cAAc,MAAM,KAAK,CAAC,KAInD,iBACM;AACJ,cAAU,cACR,0BAA0B,IAAI,OAAO,EACrC,cACA,aACD;AACD,aAAS;KAEZ;AACD,SAAM;;;AAIV,KAAI,aAAa,GAAG;EAClB,MAAM,EAAC,SAAS,aAAa,SAAS,kBAAiB,UAAU;AACjE,aAAW,YAAY,aAAa,CAAC,KAAK,WAAW,cAAc;AAGnE,QAAM;;CAGR,MAAM,UAAoB,EAAE;AAC5B,KAAI,YAAY;EACd,MAAM,OACJ,qBAAqB,WAAW,YAAY,iBAAiB;EAC/D,MAAM,EAAC,SAAS,cAAc,YAAW,UAAU;EACnD,MAAM,aAAa,WACjB,gBACA,cACA,MACA,KACD,CAAC,KAAK,iBAAiB;AACtB,eAAY,IAAI,WAAW;AAC3B,YAAS;IACT;AACF,QAAM;EAEN,MAAM,WAAW,mBAAmB,IAAI,WAAW;AACnD,OAAK,IAAI,IAAI,GAAG,IAAI,YAAY,IAC9B,SAAQ,KAAK,WAAW,YAAY,eAAe,GAAG,MAAM,OAAO,EAAE,CAAC,CAAC;AAEzE,UAAQ,SAAQ,WAAU,wBAAwB,IAAI,QAAQ,SAAS,CAAC;;CAE1E,IAAI;AAKJ,IAAG,OAAO,sCAAsC;CAChD,MAAM,aAAa,kBACX,GAAG,OAAO,qBAAqB,UAAU,cAAc,CAAC,KAAK,KAAK,GAAG,EAC3E,IACD;AACD,OAAM,UAAU,iBAAiB;AACjC,eAAc,WAAW;AACzB,IAAG,OAAO,sBAAsB,KAAK,KAAK,GAAG,QAAQ,MAAM;AAE3D,QAAO,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC;AAErC,KAAI;AACF,QAAM,eACJ,IACA,QACA,IAAI,iBACF,IACA,QACA,QACA,SACA,SACA,eACD,CACF;UACM,KAAK;AACZ,YAAU,gBAAgB,KAAK,aAAa;;AAG9C,OAAM,UAAU,MAAM;;AAGxB,IAAI,CAAC,mBAAmB,CACjB,iBAAgB,UAAU,KAAK,aAAa,EAAE,QAAQ,IAAI,CAAC"}
1
+ {"version":3,"file":"main.js","names":[],"sources":["../../../../../zero-cache/src/server/main.ts"],"sourcesContent":["import path from 'node:path';\nimport {resolver} from '@rocicorp/resolver';\nimport {must} from '../../../shared/src/must.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {initEventSink} from '../observability/events.ts';\nimport {\n exitAfter,\n ProcessManager,\n runUntilKilled,\n type WorkerType,\n} from '../services/life-cycle.ts';\nimport {\n restoreReplica,\n startReplicaBackupProcess,\n} from '../services/litestream/commands.ts';\nimport {\n childWorker,\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {\n createNotifierFrom,\n handleSubscriptionsFrom,\n type ReplicaFileMode,\n subscribeTo,\n} from '../workers/replicator.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\nimport {WorkerDispatcher} from './worker-dispatcher.ts';\nimport {\n CHANGE_STREAMER_URL,\n MUTATOR_URL,\n REAPER_URL,\n REPLICATOR_URL,\n SHADOW_SYNCER_URL,\n SYNCER_URL,\n} from './worker-urls.ts';\n\nconst clientConnectionBifurcated = false;\n\nexport default async function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n): Promise<void> {\n const startMs = Date.now();\n const config = getNormalizedZeroConfig({env});\n\n startOtelAuto(\n createLogContext(config, 'dispatcher', 0, false),\n 'dispatcher',\n 0,\n );\n const lc = createLogContext(config, 'dispatcher');\n initEventSink(lc, config);\n\n const processes = new ProcessManager(lc, parent);\n\n const {numSyncWorkers: numSyncers} = config;\n if (config.enableCrudMutations && config.upstream.maxConns < numSyncers) {\n throw new Error(\n `Insufficient upstream connections (${config.upstream.maxConns}) for ${numSyncers} syncers.` +\n `Increase ZERO_UPSTREAM_MAX_CONNS or decrease ZERO_NUM_SYNC_WORKERS (which defaults to available cores).`,\n );\n }\n if (config.cvr.maxConns < numSyncers) {\n throw new Error(\n `Insufficient cvr connections (${config.cvr.maxConns}) for ${numSyncers} syncers.` +\n `Increase ZERO_CVR_MAX_CONNS or decrease ZERO_NUM_SYNC_WORKERS (which defaults to available cores).`,\n );\n }\n\n const internalFlags: string[] =\n numSyncers === 0\n ? []\n : [\n '--upstream-max-conns-per-worker',\n String(Math.floor(config.upstream.maxConns / numSyncers)),\n '--cvr-max-conns-per-worker',\n String(Math.floor(config.cvr.maxConns / numSyncers)),\n ];\n\n function loadWorker(\n moduleUrl: URL,\n type: WorkerType,\n id?: string | number,\n ...args: string[]\n ): Worker {\n const worker = childWorker(moduleUrl, env, ...args, ...internalFlags);\n const name = path.basename(moduleUrl.pathname) + (id ? ` (${id})` : '');\n return processes.addWorker(worker, type, name);\n }\n\n const {\n taskID,\n changeStreamer: {mode: changeStreamerMode, uri: changeStreamerURI},\n litestream,\n } = config;\n const runChangeStreamer =\n changeStreamerMode === 'dedicated' && changeStreamerURI === undefined;\n\n let changeStreamer: Worker | undefined;\n\n if (!runChangeStreamer) {\n changeStreamer = undefined;\n if (litestream.executable) {\n // For view-syncers, the backup is restored here. For the replication-manager,\n // the backup is restored in the change-streamer worker.\n await restoreReplica(lc, config, null);\n }\n } else {\n const {promise: changeStreamerReady, resolve: changeStreamerStarted} =\n resolver();\n changeStreamer = loadWorker(CHANGE_STREAMER_URL, 'supporting').once(\n 'message',\n changeStreamerStarted,\n );\n\n // Wait for the change-streamer to be ready to guarantee that a replica\n // file is present.\n await changeStreamerReady;\n\n if (litestream.backupURL) {\n // Start a backup replicator and corresponding litestream backup process.\n const {promise: backupReady, resolve} = resolver();\n const mode: ReplicaFileMode = 'backup';\n loadWorker(REPLICATOR_URL, 'supporting', mode, mode).once(\n // Wait for the Replicator's first message (i.e. \"ready\") before starting\n // litestream backup in order to avoid contending on the lock when the\n // replicator first prepares the db file.\n 'message',\n () => {\n processes.addSubprocess(\n startReplicaBackupProcess(lc, config),\n 'supporting',\n 'litestream',\n );\n resolve();\n },\n );\n await backupReady;\n }\n }\n\n if (numSyncers > 0) {\n const {promise: reaperReady, resolve: reaperStarted} = resolver();\n loadWorker(REAPER_URL, 'supporting').once('message', reaperStarted);\n // Before starting the view-syncers, ensure that the reaper has started\n // up, indicating that any CVR db migrations have been performed.\n await reaperReady;\n }\n\n // Only run the shadow-sync canary on the replication-manager (or in\n // single-node mode, where it also owns upstream). Running on every\n // view-syncer would hammer the upstream with N redundant canaries.\n if (config.shadowSync.enabled && runChangeStreamer) {\n const {promise: shadowReady, resolve: shadowStarted} = resolver();\n loadWorker(SHADOW_SYNCER_URL, 'supporting').once('message', shadowStarted);\n await shadowReady;\n }\n\n const syncers: Worker[] = [];\n if (numSyncers) {\n const mode: ReplicaFileMode =\n runChangeStreamer && litestream.backupURL ? 'serving-copy' : 'serving';\n const {promise: replicaReady, resolve} = resolver();\n const replicator = loadWorker(\n REPLICATOR_URL,\n 'supporting',\n mode,\n mode,\n ).once('message', () => {\n subscribeTo(lc, replicator);\n resolve();\n });\n await replicaReady;\n\n const notifier = createNotifierFrom(lc, replicator);\n for (let i = 0; i < numSyncers; i++) {\n syncers.push(loadWorker(SYNCER_URL, 'user-facing', i, mode, String(i)));\n }\n syncers.forEach(syncer => handleSubscriptionsFrom(lc, syncer, notifier));\n }\n let mutator: Worker | undefined;\n if (clientConnectionBifurcated) {\n mutator = loadWorker(MUTATOR_URL, 'supporting', 'mutator');\n }\n\n lc.info?.('waiting for workers to be ready ...');\n const logWaiting = setInterval(\n () => lc.info?.(`still waiting for ${processes.initializing().join(', ')}`),\n 10_000,\n );\n await processes.allWorkersReady();\n clearInterval(logWaiting);\n lc.info?.(`all workers ready (${Date.now() - startMs} ms)`);\n\n parent.send(['ready', {ready: true}]);\n\n try {\n await runUntilKilled(\n lc,\n parent,\n new WorkerDispatcher(\n lc,\n taskID,\n parent,\n syncers,\n mutator,\n changeStreamer,\n ),\n );\n } catch (err) {\n processes.logErrorAndExit(err, 'dispatcher');\n }\n\n await processes.done();\n}\n\nif (!singleProcessMode()) {\n void exitAfter(() => runWorker(must(parentWorker), process.env));\n}\n"],"mappings":";;;;;;;;;;;;;;AAyCA,eAA8B,UAC5B,QACA,KACe;CACf,MAAM,UAAU,KAAK,KAAK;CAC1B,MAAM,SAAS,wBAAwB,EAAC,KAAI,CAAC;AAE7C,eACE,iBAAiB,QAAQ,cAAc,GAAG,MAAM,EAChD,cACA,EACD;CACD,MAAM,KAAK,iBAAiB,QAAQ,aAAa;AACjD,eAAc,IAAI,OAAO;CAEzB,MAAM,YAAY,IAAI,eAAe,IAAI,OAAO;CAEhD,MAAM,EAAC,gBAAgB,eAAc;AACrC,KAAI,OAAO,uBAAuB,OAAO,SAAS,WAAW,WAC3D,OAAM,IAAI,MACR,sCAAsC,OAAO,SAAS,SAAS,QAAQ,WAAW,kHAEnF;AAEH,KAAI,OAAO,IAAI,WAAW,WACxB,OAAM,IAAI,MACR,iCAAiC,OAAO,IAAI,SAAS,QAAQ,WAAW,6GAEzE;CAGH,MAAM,gBACJ,eAAe,IACX,EAAE,GACF;EACE;EACA,OAAO,KAAK,MAAM,OAAO,SAAS,WAAW,WAAW,CAAC;EACzD;EACA,OAAO,KAAK,MAAM,OAAO,IAAI,WAAW,WAAW,CAAC;EACrD;CAEP,SAAS,WACP,WACA,MACA,IACA,GAAG,MACK;EACR,MAAM,SAAS,YAAY,WAAW,KAAK,GAAG,MAAM,GAAG,cAAc;EACrE,MAAM,OAAO,KAAK,SAAS,UAAU,SAAS,IAAI,KAAK,KAAK,GAAG,KAAK;AACpE,SAAO,UAAU,UAAU,QAAQ,MAAM,KAAK;;CAGhD,MAAM,EACJ,QACA,gBAAgB,EAAC,MAAM,oBAAoB,KAAK,qBAChD,eACE;CACJ,MAAM,oBACJ,uBAAuB,eAAe,sBAAsB,KAAA;CAE9D,IAAI;AAEJ,KAAI,CAAC,mBAAmB;AACtB,mBAAiB,KAAA;AACjB,MAAI,WAAW,WAGb,OAAM,eAAe,IAAI,QAAQ,KAAK;QAEnC;EACL,MAAM,EAAC,SAAS,qBAAqB,SAAS,0BAC5C,UAAU;AACZ,mBAAiB,WAAW,qBAAqB,aAAa,CAAC,KAC7D,WACA,sBACD;AAID,QAAM;AAEN,MAAI,WAAW,WAAW;GAExB,MAAM,EAAC,SAAS,aAAa,YAAW,UAAU;GAClD,MAAM,OAAwB;AAC9B,cAAW,gBAAgB,cAAc,MAAM,KAAK,CAAC,KAInD,iBACM;AACJ,cAAU,cACR,0BAA0B,IAAI,OAAO,EACrC,cACA,aACD;AACD,aAAS;KAEZ;AACD,SAAM;;;AAIV,KAAI,aAAa,GAAG;EAClB,MAAM,EAAC,SAAS,aAAa,SAAS,kBAAiB,UAAU;AACjE,aAAW,YAAY,aAAa,CAAC,KAAK,WAAW,cAAc;AAGnE,QAAM;;AAMR,KAAI,OAAO,WAAW,WAAW,mBAAmB;EAClD,MAAM,EAAC,SAAS,aAAa,SAAS,kBAAiB,UAAU;AACjE,aAAW,mBAAmB,aAAa,CAAC,KAAK,WAAW,cAAc;AAC1E,QAAM;;CAGR,MAAM,UAAoB,EAAE;AAC5B,KAAI,YAAY;EACd,MAAM,OACJ,qBAAqB,WAAW,YAAY,iBAAiB;EAC/D,MAAM,EAAC,SAAS,cAAc,YAAW,UAAU;EACnD,MAAM,aAAa,WACjB,gBACA,cACA,MACA,KACD,CAAC,KAAK,iBAAiB;AACtB,eAAY,IAAI,WAAW;AAC3B,YAAS;IACT;AACF,QAAM;EAEN,MAAM,WAAW,mBAAmB,IAAI,WAAW;AACnD,OAAK,IAAI,IAAI,GAAG,IAAI,YAAY,IAC9B,SAAQ,KAAK,WAAW,YAAY,eAAe,GAAG,MAAM,OAAO,EAAE,CAAC,CAAC;AAEzE,UAAQ,SAAQ,WAAU,wBAAwB,IAAI,QAAQ,SAAS,CAAC;;CAE1E,IAAI;AAKJ,IAAG,OAAO,sCAAsC;CAChD,MAAM,aAAa,kBACX,GAAG,OAAO,qBAAqB,UAAU,cAAc,CAAC,KAAK,KAAK,GAAG,EAC3E,IACD;AACD,OAAM,UAAU,iBAAiB;AACjC,eAAc,WAAW;AACzB,IAAG,OAAO,sBAAsB,KAAK,KAAK,GAAG,QAAQ,MAAM;AAE3D,QAAO,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC;AAErC,KAAI;AACF,QAAM,eACJ,IACA,QACA,IAAI,iBACF,IACA,QACA,QACA,SACA,SACA,eACD,CACF;UACM,KAAK;AACZ,YAAU,gBAAgB,KAAK,aAAa;;AAG9C,OAAM,UAAU,MAAM;;AAGxB,IAAI,CAAC,mBAAmB,CACjB,iBAAgB,UAAU,KAAK,aAAa,EAAE,QAAQ,IAAI,CAAC"}
@@ -1 +1 @@
1
- {"version":3,"file":"reaper.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/reaper.ts"],"names":[],"mappings":"AAQA,OAAO,EAGL,KAAK,MAAM,EACZ,MAAM,uBAAuB,CAAC;AAQ/B,wBAA8B,SAAS,CACrC,MAAM,EAAE,MAAM,EACd,GAAG,EAAE,MAAM,CAAC,UAAU,EACtB,GAAG,IAAI,EAAE,MAAM,EAAE,GAChB,OAAO,CAAC,IAAI,CAAC,CAgCf"}
1
+ {"version":3,"file":"reaper.d.ts","sourceRoot":"","sources":["../../../../../zero-cache/src/server/reaper.ts"],"names":[],"mappings":"AAQA,OAAO,EAGL,KAAK,MAAM,EACZ,MAAM,uBAAuB,CAAC;AAQ/B,wBAA8B,SAAS,CACrC,MAAM,EAAE,MAAM,EACd,GAAG,EAAE,MAAM,CAAC,UAAU,EACtB,GAAG,IAAI,EAAE,MAAM,EAAE,GAChB,OAAO,CAAC,IAAI,CAAC,CA+Bf"}
@@ -24,10 +24,7 @@ async function runWorker(parent, env, ...argv) {
24
24
  startAnonymousTelemetry(lc, config);
25
25
  const { cvr } = config;
26
26
  const shard = getShardID(config);
27
- const cvrDB = pgClient(lc, cvr.db, {
28
- max: 1,
29
- connection: { ["application_name"]: `zero-sync-cvr-purger` }
30
- });
27
+ const cvrDB = pgClient(lc, cvr.db, `sync-cvr-purger`, { max: 1 });
31
28
  await initViewSyncerSchema(lc, cvrDB, shard);
32
29
  parent.send(["ready", { ready: true }]);
33
30
  return runUntilKilled(lc, parent, new CVRPurger(lc, cvrDB, shard, {
@@ -1 +1 @@
1
- {"version":3,"file":"reaper.js","names":[],"sources":["../../../../../zero-cache/src/server/reaper.ts"],"sourcesContent":["import {must} from '../../../shared/src/must.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {initEventSink} from '../observability/events.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {ActiveUsersGauge} from '../services/view-syncer/active-users-gauge.ts';\nimport {CVRPurger} from '../services/view-syncer/cvr-purger.ts';\nimport {initViewSyncerSchema} from '../services/view-syncer/schema/init.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardID} from '../types/shards.ts';\nimport {startAnonymousTelemetry} from './anonymous-otel-start.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\n\nconst MS_PER_HOUR = 1000 * 60 * 60;\n\nexport default async function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...argv: string[]\n): Promise<void> {\n const config = getNormalizedZeroConfig({env, argv});\n\n startOtelAuto(createLogContext(config, 'reaper', 0, false), 'reaper', 0);\n const lc = createLogContext(config, 'reaper');\n initEventSink(lc, config);\n startAnonymousTelemetry(lc, config);\n\n const {cvr} = config;\n const shard = getShardID(config);\n const cvrDB = pgClient(lc, cvr.db, {\n max: 1,\n connection: {['application_name']: `zero-sync-cvr-purger`},\n });\n await initViewSyncerSchema(lc, cvrDB, shard);\n parent.send(['ready', {ready: true}]);\n\n return runUntilKilled(\n lc,\n parent,\n new CVRPurger(lc, cvrDB, shard, {\n inactivityThresholdMs:\n cvr.garbageCollectionInactivityThresholdHours * MS_PER_HOUR,\n initialBatchSize: cvr.garbageCollectionInitialBatchSize,\n initialIntervalMs: cvr.garbageCollectionInitialIntervalSeconds * 1000,\n }),\n // Periodically computes and exports active users gauge to anonymous telemetry\n new ActiveUsersGauge(lc, cvrDB, shard, {\n // Default 10minutes refresh; can be made configurable later if needed\n updateIntervalMs: 10 * 60 * 1000,\n }),\n );\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"mappings":";;;;;;;;;;;;;;AAkBA,IAAM,cAAc,MAAO,KAAK;AAEhC,eAA8B,UAC5B,QACA,KACA,GAAG,MACY;CACf,MAAM,SAAS,wBAAwB;EAAC;EAAK;EAAK,CAAC;AAEnD,eAAc,iBAAiB,QAAQ,UAAU,GAAG,MAAM,EAAE,UAAU,EAAE;CACxE,MAAM,KAAK,iBAAiB,QAAQ,SAAS;AAC7C,eAAc,IAAI,OAAO;AACzB,yBAAwB,IAAI,OAAO;CAEnC,MAAM,EAAC,QAAO;CACd,MAAM,QAAQ,WAAW,OAAO;CAChC,MAAM,QAAQ,SAAS,IAAI,IAAI,IAAI;EACjC,KAAK;EACL,YAAY,GAAE,qBAAqB,wBAAuB;EAC3D,CAAC;AACF,OAAM,qBAAqB,IAAI,OAAO,MAAM;AAC5C,QAAO,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC;AAErC,QAAO,eACL,IACA,QACA,IAAI,UAAU,IAAI,OAAO,OAAO;EAC9B,uBACE,IAAI,4CAA4C;EAClD,kBAAkB,IAAI;EACtB,mBAAmB,IAAI,0CAA0C;EAClE,CAAC,EAEF,IAAI,iBAAiB,IAAI,OAAO,OAAO,EAErC,kBAAkB,MAAU,KAC7B,CAAC,CACH;;AAIH,IAAI,CAAC,mBAAmB,CACjB,iBACH,UAAU,KAAK,aAAa,EAAE,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,EAAE,CAAC,CACrE"}
1
+ {"version":3,"file":"reaper.js","names":[],"sources":["../../../../../zero-cache/src/server/reaper.ts"],"sourcesContent":["import {must} from '../../../shared/src/must.ts';\nimport {getNormalizedZeroConfig} from '../config/zero-config.ts';\nimport {initEventSink} from '../observability/events.ts';\nimport {exitAfter, runUntilKilled} from '../services/life-cycle.ts';\nimport {ActiveUsersGauge} from '../services/view-syncer/active-users-gauge.ts';\nimport {CVRPurger} from '../services/view-syncer/cvr-purger.ts';\nimport {initViewSyncerSchema} from '../services/view-syncer/schema/init.ts';\nimport {pgClient} from '../types/pg.ts';\nimport {\n parentWorker,\n singleProcessMode,\n type Worker,\n} from '../types/processes.ts';\nimport {getShardID} from '../types/shards.ts';\nimport {startAnonymousTelemetry} from './anonymous-otel-start.ts';\nimport {createLogContext} from './logging.ts';\nimport {startOtelAuto} from './otel-start.ts';\n\nconst MS_PER_HOUR = 1000 * 60 * 60;\n\nexport default async function runWorker(\n parent: Worker,\n env: NodeJS.ProcessEnv,\n ...argv: string[]\n): Promise<void> {\n const config = getNormalizedZeroConfig({env, argv});\n\n startOtelAuto(createLogContext(config, 'reaper', 0, false), 'reaper', 0);\n const lc = createLogContext(config, 'reaper');\n initEventSink(lc, config);\n startAnonymousTelemetry(lc, config);\n\n const {cvr} = config;\n const shard = getShardID(config);\n const cvrDB = pgClient(lc, cvr.db, `sync-cvr-purger`, {\n max: 1,\n });\n await initViewSyncerSchema(lc, cvrDB, shard);\n parent.send(['ready', {ready: true}]);\n\n return runUntilKilled(\n lc,\n parent,\n new CVRPurger(lc, cvrDB, shard, {\n inactivityThresholdMs:\n cvr.garbageCollectionInactivityThresholdHours * MS_PER_HOUR,\n initialBatchSize: cvr.garbageCollectionInitialBatchSize,\n initialIntervalMs: cvr.garbageCollectionInitialIntervalSeconds * 1000,\n }),\n // Periodically computes and exports active users gauge to anonymous telemetry\n new ActiveUsersGauge(lc, cvrDB, shard, {\n // Default 10minutes refresh; can be made configurable later if needed\n updateIntervalMs: 10 * 60 * 1000,\n }),\n );\n}\n\n// fork()\nif (!singleProcessMode()) {\n void exitAfter(() =>\n runWorker(must(parentWorker), process.env, ...process.argv.slice(2)),\n );\n}\n"],"mappings":";;;;;;;;;;;;;;AAkBA,IAAM,cAAc,MAAO,KAAK;AAEhC,eAA8B,UAC5B,QACA,KACA,GAAG,MACY;CACf,MAAM,SAAS,wBAAwB;EAAC;EAAK;EAAK,CAAC;AAEnD,eAAc,iBAAiB,QAAQ,UAAU,GAAG,MAAM,EAAE,UAAU,EAAE;CACxE,MAAM,KAAK,iBAAiB,QAAQ,SAAS;AAC7C,eAAc,IAAI,OAAO;AACzB,yBAAwB,IAAI,OAAO;CAEnC,MAAM,EAAC,QAAO;CACd,MAAM,QAAQ,WAAW,OAAO;CAChC,MAAM,QAAQ,SAAS,IAAI,IAAI,IAAI,mBAAmB,EACpD,KAAK,GACN,CAAC;AACF,OAAM,qBAAqB,IAAI,OAAO,MAAM;AAC5C,QAAO,KAAK,CAAC,SAAS,EAAC,OAAO,MAAK,CAAC,CAAC;AAErC,QAAO,eACL,IACA,QACA,IAAI,UAAU,IAAI,OAAO,OAAO;EAC9B,uBACE,IAAI,4CAA4C;EAClD,kBAAkB,IAAI;EACtB,mBAAmB,IAAI,0CAA0C;EAClE,CAAC,EAEF,IAAI,iBAAiB,IAAI,OAAO,OAAO,EAErC,kBAAkB,MAAU,KAC7B,CAAC,CACH;;AAIH,IAAI,CAAC,mBAAmB,CACjB,iBACH,UAAU,KAAK,aAAa,EAAE,QAAQ,KAAK,GAAG,QAAQ,KAAK,MAAM,EAAE,CAAC,CACrE"}