@rocicorp/zero 0.26.0-canary.0 → 0.26.0-canary.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (138) hide show
  1. package/out/shared/src/custom-key-map.d.ts +4 -4
  2. package/out/shared/src/custom-key-map.d.ts.map +1 -1
  3. package/out/shared/src/custom-key-map.js.map +1 -1
  4. package/out/shared/src/iterables.d.ts +6 -8
  5. package/out/shared/src/iterables.d.ts.map +1 -1
  6. package/out/shared/src/iterables.js +13 -7
  7. package/out/shared/src/iterables.js.map +1 -1
  8. package/out/shared/src/options.d.ts +1 -0
  9. package/out/shared/src/options.d.ts.map +1 -1
  10. package/out/shared/src/options.js +5 -1
  11. package/out/shared/src/options.js.map +1 -1
  12. package/out/zero/package.json.js +1 -1
  13. package/out/zero/src/zero-cache-dev.js +7 -3
  14. package/out/zero/src/zero-cache-dev.js.map +1 -1
  15. package/out/zero-cache/src/config/zero-config.d.ts +10 -1
  16. package/out/zero-cache/src/config/zero-config.d.ts.map +1 -1
  17. package/out/zero-cache/src/config/zero-config.js +27 -7
  18. package/out/zero-cache/src/config/zero-config.js.map +1 -1
  19. package/out/zero-cache/src/observability/events.d.ts.map +1 -1
  20. package/out/zero-cache/src/observability/events.js +15 -5
  21. package/out/zero-cache/src/observability/events.js.map +1 -1
  22. package/out/zero-cache/src/server/change-streamer.d.ts.map +1 -1
  23. package/out/zero-cache/src/server/change-streamer.js +10 -2
  24. package/out/zero-cache/src/server/change-streamer.js.map +1 -1
  25. package/out/zero-cache/src/server/syncer.d.ts +1 -0
  26. package/out/zero-cache/src/server/syncer.d.ts.map +1 -1
  27. package/out/zero-cache/src/server/syncer.js +22 -4
  28. package/out/zero-cache/src/server/syncer.js.map +1 -1
  29. package/out/zero-cache/src/services/change-source/custom/change-source.js +0 -4
  30. package/out/zero-cache/src/services/change-source/custom/change-source.js.map +1 -1
  31. package/out/zero-cache/src/services/change-source/pg/schema/ddl.d.ts.map +1 -1
  32. package/out/zero-cache/src/services/change-source/pg/schema/ddl.js +1 -10
  33. package/out/zero-cache/src/services/change-source/pg/schema/ddl.js.map +1 -1
  34. package/out/zero-cache/src/services/change-source/pg/schema/init.d.ts.map +1 -1
  35. package/out/zero-cache/src/services/change-source/pg/schema/init.js +8 -2
  36. package/out/zero-cache/src/services/change-source/pg/schema/init.js.map +1 -1
  37. package/out/zero-cache/src/services/change-source/pg/schema/shard.d.ts.map +1 -1
  38. package/out/zero-cache/src/services/change-source/pg/schema/shard.js +1 -14
  39. package/out/zero-cache/src/services/change-source/pg/schema/shard.js.map +1 -1
  40. package/out/zero-cache/src/services/change-source/replica-schema.d.ts.map +1 -1
  41. package/out/zero-cache/src/services/change-source/replica-schema.js +8 -1
  42. package/out/zero-cache/src/services/change-source/replica-schema.js.map +1 -1
  43. package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts +1 -1
  44. package/out/zero-cache/src/services/change-streamer/change-streamer-service.d.ts.map +1 -1
  45. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js +5 -3
  46. package/out/zero-cache/src/services/change-streamer/change-streamer-service.js.map +1 -1
  47. package/out/zero-cache/src/services/change-streamer/storer.d.ts +1 -1
  48. package/out/zero-cache/src/services/change-streamer/storer.d.ts.map +1 -1
  49. package/out/zero-cache/src/services/change-streamer/storer.js +16 -5
  50. package/out/zero-cache/src/services/change-streamer/storer.js.map +1 -1
  51. package/out/zero-cache/src/services/life-cycle.d.ts +1 -1
  52. package/out/zero-cache/src/services/life-cycle.d.ts.map +1 -1
  53. package/out/zero-cache/src/services/life-cycle.js.map +1 -1
  54. package/out/zero-cache/src/services/litestream/commands.d.ts.map +1 -1
  55. package/out/zero-cache/src/services/litestream/commands.js +3 -1
  56. package/out/zero-cache/src/services/litestream/commands.js.map +1 -1
  57. package/out/zero-cache/src/services/litestream/config.yml +1 -0
  58. package/out/zero-cache/src/services/mutagen/mutagen.d.ts +4 -4
  59. package/out/zero-cache/src/services/mutagen/mutagen.d.ts.map +1 -1
  60. package/out/zero-cache/src/services/mutagen/mutagen.js +9 -24
  61. package/out/zero-cache/src/services/mutagen/mutagen.js.map +1 -1
  62. package/out/zero-cache/src/services/mutagen/pusher.d.ts +1 -2
  63. package/out/zero-cache/src/services/mutagen/pusher.d.ts.map +1 -1
  64. package/out/zero-cache/src/services/mutagen/pusher.js +51 -12
  65. package/out/zero-cache/src/services/mutagen/pusher.js.map +1 -1
  66. package/out/zero-cache/src/services/replicator/change-processor.js +4 -3
  67. package/out/zero-cache/src/services/replicator/change-processor.js.map +1 -1
  68. package/out/zero-cache/src/services/replicator/schema/change-log.d.ts +3 -2
  69. package/out/zero-cache/src/services/replicator/schema/change-log.d.ts.map +1 -1
  70. package/out/zero-cache/src/services/replicator/schema/change-log.js +36 -31
  71. package/out/zero-cache/src/services/replicator/schema/change-log.js.map +1 -1
  72. package/out/zero-cache/src/services/view-syncer/client-handler.d.ts +5 -6
  73. package/out/zero-cache/src/services/view-syncer/client-handler.d.ts.map +1 -1
  74. package/out/zero-cache/src/services/view-syncer/client-handler.js +5 -23
  75. package/out/zero-cache/src/services/view-syncer/client-handler.js.map +1 -1
  76. package/out/zero-cache/src/services/view-syncer/cvr-store.d.ts.map +1 -1
  77. package/out/zero-cache/src/services/view-syncer/cvr-store.js +6 -4
  78. package/out/zero-cache/src/services/view-syncer/cvr-store.js.map +1 -1
  79. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts +1 -8
  80. package/out/zero-cache/src/services/view-syncer/pipeline-driver.d.ts.map +1 -1
  81. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js +2 -11
  82. package/out/zero-cache/src/services/view-syncer/pipeline-driver.js.map +1 -1
  83. package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts +0 -2
  84. package/out/zero-cache/src/services/view-syncer/snapshotter.d.ts.map +1 -1
  85. package/out/zero-cache/src/services/view-syncer/snapshotter.js +2 -10
  86. package/out/zero-cache/src/services/view-syncer/snapshotter.js.map +1 -1
  87. package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts +1 -2
  88. package/out/zero-cache/src/services/view-syncer/view-syncer.d.ts.map +1 -1
  89. package/out/zero-cache/src/services/view-syncer/view-syncer.js +40 -42
  90. package/out/zero-cache/src/services/view-syncer/view-syncer.js.map +1 -1
  91. package/out/zero-cache/src/workers/connect-params.d.ts +0 -1
  92. package/out/zero-cache/src/workers/connect-params.d.ts.map +1 -1
  93. package/out/zero-cache/src/workers/connect-params.js +0 -2
  94. package/out/zero-cache/src/workers/connect-params.js.map +1 -1
  95. package/out/zero-cache/src/workers/replicator.d.ts.map +1 -1
  96. package/out/zero-cache/src/workers/replicator.js +2 -5
  97. package/out/zero-cache/src/workers/replicator.js.map +1 -1
  98. package/out/zero-cache/src/workers/syncer-ws-message-handler.d.ts.map +1 -1
  99. package/out/zero-cache/src/workers/syncer-ws-message-handler.js +1 -4
  100. package/out/zero-cache/src/workers/syncer-ws-message-handler.js.map +1 -1
  101. package/out/zero-client/src/client/context.js +1 -0
  102. package/out/zero-client/src/client/context.js.map +1 -1
  103. package/out/zero-client/src/client/options.d.ts +1 -1
  104. package/out/zero-client/src/client/options.js.map +1 -1
  105. package/out/zero-client/src/client/version.js +1 -1
  106. package/out/zero-client/src/client/zero.d.ts +2 -4
  107. package/out/zero-client/src/client/zero.d.ts.map +1 -1
  108. package/out/zero-client/src/client/zero.js +1 -1
  109. package/out/zero-client/src/client/zero.js.map +1 -1
  110. package/out/zero-protocol/src/push.d.ts +7 -0
  111. package/out/zero-protocol/src/push.d.ts.map +1 -1
  112. package/out/zero-protocol/src/push.js +9 -1
  113. package/out/zero-protocol/src/push.js.map +1 -1
  114. package/out/zero-server/src/process-mutations.d.ts +1 -0
  115. package/out/zero-server/src/process-mutations.d.ts.map +1 -1
  116. package/out/zero-server/src/process-mutations.js +41 -2
  117. package/out/zero-server/src/process-mutations.js.map +1 -1
  118. package/out/zero-server/src/zql-database.d.ts.map +1 -1
  119. package/out/zero-server/src/zql-database.js +9 -0
  120. package/out/zero-server/src/zql-database.js.map +1 -1
  121. package/out/zero-solid/src/solid-view.js +1 -0
  122. package/out/zero-solid/src/solid-view.js.map +1 -1
  123. package/out/zero-solid/src/use-query.js +1 -0
  124. package/out/zero-solid/src/use-query.js.map +1 -1
  125. package/out/zql/src/ivm/stream.d.ts.map +1 -1
  126. package/out/zql/src/ivm/stream.js +1 -1
  127. package/out/zql/src/ivm/stream.js.map +1 -1
  128. package/out/zql/src/mutate/mutator.js +4 -4
  129. package/out/zql/src/mutate/mutator.js.map +1 -1
  130. package/out/zql/src/query/create-builder.js +3 -5
  131. package/out/zql/src/query/create-builder.js.map +1 -1
  132. package/out/zql/src/query/query-registry.js +4 -4
  133. package/out/zql/src/query/query-registry.js.map +1 -1
  134. package/package.json +3 -3
  135. package/out/zero-cache/src/types/schema-versions.d.ts +0 -12
  136. package/out/zero-cache/src/types/schema-versions.d.ts.map +0 -1
  137. package/out/zero-cache/src/types/schema-versions.js +0 -28
  138. package/out/zero-cache/src/types/schema-versions.js.map +0 -1
@@ -98,7 +98,6 @@ DECLARE
98
98
  publications TEXT[];
99
99
  cmd RECORD;
100
100
  relevant RECORD;
101
- deprecated RECORD;
102
101
  schema_specs TEXT;
103
102
  message TEXT;
104
103
  event TEXT;
@@ -180,15 +179,7 @@ BEGIN
180
179
  END IF;
181
180
 
182
181
  -- Construct and emit the DdlUpdateEvent message.
183
-
184
- -- TODO: Remove backwards-compatibility fields after a few releases.
185
- SELECT 'deprecated' as "schema", 'deprecated' as "name" INTO deprecated;
186
-
187
- SELECT json_build_object(
188
- 'tag', tag,
189
- 'table', deprecated,
190
- 'index', deprecated
191
- ) INTO event;
182
+ SELECT json_build_object('tag', tag) INTO event;
192
183
 
193
184
  SELECT ${schema}.schema_specs() INTO schema_specs;
194
185
 
@@ -1 +1 @@
1
- {"version":3,"file":"ddl.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/ddl.ts"],"sourcesContent":["import {literal as lit} from 'pg-format';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {upstreamSchema, type ShardConfig} from '../../../../types/shards.ts';\nimport {id} from '../../../../types/sql.ts';\nimport {\n indexDefinitionsQuery,\n publishedSchema,\n publishedTableQuery,\n} from './published.ts';\n\n// Sent in the 'version' tag of \"ddlStart\" and \"ddlUpdate\" event messages.\n// This is used to ensure that the message constructed in the upstream\n// Trigger function is compatible with the code processing it in the zero-cache.\n//\n// Increment this when changing the format of the contents of the \"ddl\" events.\n// This will allow old / incompatible code to detect the change and abort.\nexport const PROTOCOL_VERSION = 1;\n\nconst triggerEvent = v.object({\n context: v.object({query: v.string()}).rest(v.string()),\n});\n\n// All DDL events contain a snapshot of the current tables and indexes that\n// are published / relevant to the shard.\nexport const ddlEventSchema = triggerEvent.extend({\n version: v.literal(PROTOCOL_VERSION),\n schema: publishedSchema,\n});\n\n// The `ddlStart` message is computed before every DDL event, regardless of\n// whether the subsequent event affects the shard. Downstream processing should\n// capture the contained schema information in order to determine the schema\n// changes necessary to apply a subsequent `ddlUpdate` message. Note that a\n// `ddlUpdate` message may not follow, as updates determined to be irrelevant\n// to the shard will not result in a message. However, all `ddlUpdate` messages\n// are guaranteed to be preceded by a `ddlStart` message.\nexport const ddlStartEventSchema = ddlEventSchema.extend({\n type: v.literal('ddlStart'),\n});\n\nexport type DdlStartEvent = v.Infer<typeof ddlStartEventSchema>;\n\n/**\n * The {@link DdlUpdateEvent} contains an updated schema resulting from\n * a particular ddl event. The event type provides information\n * (i.e. constraints) on the difference from the schema of the preceding\n * {@link DdlStartEvent}.\n *\n * Note that in almost all cases (the exception being `CREATE` events),\n * it is possible that there is no relevant difference between the\n * ddl-start schema and the ddl-update schema, as many aspects of the\n * schema (e.g. column constraints) are not relevant to downstream\n * replication.\n */\nexport const ddlUpdateEventSchema = ddlEventSchema.extend({\n type: v.literal('ddlUpdate'),\n event: v.object({tag: v.string()}),\n});\n\nexport type DdlUpdateEvent = v.Infer<typeof ddlUpdateEventSchema>;\n\nexport const replicationEventSchema = v.union(\n ddlStartEventSchema,\n ddlUpdateEventSchema,\n);\n\nexport type ReplicationEvent = v.Infer<typeof replicationEventSchema>;\n\n// Creates a function that appends `_{shard-num}` to the input and\n// quotes the result to be a valid identifier.\nfunction append(shardNum: number) {\n return (name: string) => id(name + '_' + String(shardNum));\n}\n\n/**\n * Event trigger functions contain the core logic that are invoked by triggers.\n *\n * Note that although many of these functions can theoretically be parameterized and\n * shared across shards, it is advantageous to keep the functions in each shard\n * isolated from each other in order to avoid the complexity of shared-function\n * versioning.\n *\n * In a sense, shards (and their triggers and functions) should be thought of as\n * execution environments that can be updated at different schedules. If per-shard\n * triggers called into shared functions, we would have to consider versioning the\n * functions when changing their behavior, backwards compatibility, removal of\n * unused versions, etc. (not unlike versioning of npm packages).\n *\n * Instead, we opt for the simplicity and isolation of having each shard\n * completely own (and maintain) the entirety of its trigger/function stack.\n */\nfunction createEventFunctionStatements(shard: ShardConfig) {\n const {appID, shardNum, publications} = shard;\n const schema = id(upstreamSchema(shard)); // e.g. \"{APP_ID}_{SHARD_ID}\"\n return /*sql*/ `\nCREATE SCHEMA IF NOT EXISTS ${schema};\n\nCREATE OR REPLACE FUNCTION ${schema}.get_trigger_context()\nRETURNS record AS $$\nDECLARE\n result record;\nBEGIN\n SELECT current_query() AS \"query\" into result;\n RETURN result;\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.notice_ignore(object_id TEXT)\nRETURNS void AS $$\nBEGIN\n RAISE NOTICE 'zero(%) ignoring %', ${lit(shardNum)}, object_id;\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.schema_specs()\nRETURNS TEXT AS $$\nDECLARE\n tables record;\n indexes record;\nBEGIN\n ${publishedTableQuery(publications)} INTO tables;\n ${indexDefinitionsQuery(publications)} INTO indexes;\n RETURN json_build_object(\n 'tables', tables.tables,\n 'indexes', indexes.indexes\n );\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.emit_ddl_start()\nRETURNS event_trigger AS $$\nDECLARE\n schema_specs TEXT;\n message TEXT;\nBEGIN\n SELECT ${schema}.schema_specs() INTO schema_specs;\n\n SELECT json_build_object(\n 'type', 'ddlStart',\n 'version', ${PROTOCOL_VERSION},\n 'schema', schema_specs::json,\n 'context', ${schema}.get_trigger_context()\n ) INTO message;\n\n PERFORM pg_logical_emit_message(true, ${lit(\n `${appID}/${shardNum}`,\n )}, message);\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.emit_ddl_end(tag TEXT)\nRETURNS void AS $$\nDECLARE\n publications TEXT[];\n cmd RECORD;\n relevant RECORD;\n deprecated RECORD;\n schema_specs TEXT;\n message TEXT;\n event TEXT;\nBEGIN\n publications := ARRAY[${lit(publications)}];\n\n SELECT objid, object_type, object_identity \n FROM pg_event_trigger_ddl_commands() \n WHERE object_type IN (\n 'table',\n 'table column',\n 'index',\n 'publication relation',\n 'publication namespace',\n 'schema')\n LIMIT 1 INTO cmd;\n\n -- Filter DDL updates that are not relevant to the shard (i.e. publications) when possible.\n\n IF cmd.object_type = 'table' OR cmd.object_type = 'table column' THEN\n SELECT ns.nspname AS \"schema\", c.relname AS \"name\" FROM pg_class AS c\n JOIN pg_namespace AS ns ON c.relnamespace = ns.oid\n JOIN pg_publication_tables AS pb ON pb.schemaname = ns.nspname AND pb.tablename = c.relname\n WHERE c.oid = cmd.objid AND pb.pubname = ANY (publications)\n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n cmd.object_type := 'table'; -- normalize the 'table column' target to 'table'\n\n ELSIF cmd.object_type = 'index' THEN\n SELECT ns.nspname AS \"schema\", c.relname AS \"name\" FROM pg_class AS c\n JOIN pg_namespace AS ns ON c.relnamespace = ns.oid\n JOIN pg_indexes as ind ON ind.schemaname = ns.nspname AND ind.indexname = c.relname\n JOIN pg_publication_tables AS pb ON pb.schemaname = ns.nspname AND pb.tablename = ind.tablename\n WHERE c.oid = cmd.objid AND pb.pubname = ANY (publications)\n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n ELSIF cmd.object_type = 'publication relation' THEN\n SELECT pb.pubname FROM pg_publication_rel AS rel\n JOIN pg_publication AS pb ON pb.oid = rel.prpubid\n WHERE rel.oid = cmd.objid AND pb.pubname = ANY (publications) \n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n ELSIF cmd.object_type = 'publication namespace' THEN\n SELECT pb.pubname FROM pg_publication_namespace AS ns\n JOIN pg_publication AS pb ON pb.oid = ns.pnpubid\n WHERE ns.oid = cmd.objid AND pb.pubname = ANY (publications) \n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n ELSIF cmd.object_type = 'schema' THEN\n SELECT ns.nspname AS \"schema\", c.relname AS \"name\" FROM pg_class AS c\n JOIN pg_namespace AS ns ON c.relnamespace = ns.oid\n JOIN pg_publication_tables AS pb ON pb.schemaname = ns.nspname AND pb.tablename = c.relname\n WHERE ns.oid = cmd.objid AND pb.pubname = ANY (publications)\n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n ELSIF tag LIKE 'CREATE %' THEN\n PERFORM ${schema}.notice_ignore('noop ' || tag);\n RETURN;\n END IF;\n\n -- Construct and emit the DdlUpdateEvent message.\n\n -- TODO: Remove backwards-compatibility fields after a few releases.\n SELECT 'deprecated' as \"schema\", 'deprecated' as \"name\" INTO deprecated;\n\n SELECT json_build_object(\n 'tag', tag,\n 'table', deprecated,\n 'index', deprecated\n ) INTO event;\n \n SELECT ${schema}.schema_specs() INTO schema_specs;\n\n SELECT json_build_object(\n 'type', 'ddlUpdate',\n 'version', ${PROTOCOL_VERSION},\n 'schema', schema_specs::json,\n 'event', event::json,\n 'context', ${schema}.get_trigger_context()\n ) INTO message;\n\n PERFORM pg_logical_emit_message(true, ${lit(\n `${appID}/${shardNum}`,\n )}, message);\nEND\n$$ LANGUAGE plpgsql;\n`;\n}\n\n// Exported for testing.\nexport const TAGS = [\n 'CREATE TABLE',\n 'ALTER TABLE',\n 'CREATE INDEX',\n 'DROP TABLE',\n 'DROP INDEX',\n 'ALTER PUBLICATION',\n 'ALTER SCHEMA',\n] as const;\n\nexport function createEventTriggerStatements(shard: ShardConfig) {\n // Better to assert here than get a cryptic syntax error from Postgres.\n assert(shard.publications.length, `shard publications must be non-empty`);\n\n // Unlike functions, which are namespaced in shard-specific schemas,\n // EVENT TRIGGER names are in the global namespace and thus must include\n // the appID and shardNum.\n const {appID, shardNum} = shard;\n const sharded = append(shardNum);\n const schema = id(upstreamSchema(shard));\n\n const triggers = [\n dropEventTriggerStatements(shard.appID, shard.shardNum),\n createEventFunctionStatements(shard),\n ];\n\n // A single ddl_command_start trigger covering all relevant tags.\n triggers.push(/*sql*/ `\nCREATE EVENT TRIGGER ${sharded(`${appID}_ddl_start`)}\n ON ddl_command_start\n WHEN TAG IN (${lit(TAGS)})\n EXECUTE PROCEDURE ${schema}.emit_ddl_start();\n`);\n\n // A per-tag ddl_command_end trigger that dispatches to ${schema}.emit_ddl_end(tag)\n for (const tag of TAGS) {\n const tagID = tag.toLowerCase().replace(' ', '_');\n triggers.push(/*sql*/ `\nCREATE OR REPLACE FUNCTION ${schema}.emit_${tagID}() \nRETURNS event_trigger AS $$\nBEGIN\n PERFORM ${schema}.emit_ddl_end(${lit(tag)});\nEND\n$$ LANGUAGE plpgsql;\n\nCREATE EVENT TRIGGER ${sharded(`${appID}_${tagID}`)}\n ON ddl_command_end\n WHEN TAG IN (${lit(tag)})\n EXECUTE PROCEDURE ${schema}.emit_${tagID}();\n`);\n }\n return triggers.join('');\n}\n\n// Exported for testing.\nexport function dropEventTriggerStatements(\n appID: string,\n shardID: string | number,\n) {\n const stmts: string[] = [];\n // A single ddl_command_start trigger covering all relevant tags.\n stmts.push(/*sql*/ `\n DROP EVENT TRIGGER IF EXISTS ${id(`${appID}_ddl_start_${shardID}`)};\n `);\n\n // A per-tag ddl_command_end trigger that dispatches to ${schema}.emit_ddl_end(tag)\n for (const tag of TAGS) {\n const tagID = tag.toLowerCase().replace(' ', '_');\n stmts.push(/*sql*/ `\n DROP EVENT TRIGGER IF EXISTS ${id(`${appID}_${tagID}_${shardID}`)};\n `);\n }\n return stmts.join('');\n}\n"],"names":["v.object","v.string","v.literal","v.union","lit"],"mappings":";;;;;;;AAiBO,MAAM,mBAAmB;AAEhC,MAAM,eAAeA,OAAS;AAAA,EAC5B,SAASA,OAAS,EAAC,OAAOC,OAAE,GAAS,EAAE,KAAKA,QAAU;AACxD,CAAC;AAIM,MAAM,iBAAiB,aAAa,OAAO;AAAA,EAChD,SAASC,QAAU,gBAAgB;AAAA,EACnC,QAAQ;AACV,CAAC;AASM,MAAM,sBAAsB,eAAe,OAAO;AAAA,EACvD,MAAMA,QAAU,UAAU;AAC5B,CAAC;AAgBM,MAAM,uBAAuB,eAAe,OAAO;AAAA,EACxD,MAAMA,QAAU,WAAW;AAAA,EAC3B,OAAOF,OAAS,EAAC,KAAKC,OAAE,GAAS;AACnC,CAAC;AAIM,MAAM,yBAAyBE;AAAAA,EACpC;AAAA,EACA;AACF;AAMA,SAAS,OAAO,UAAkB;AAChC,SAAO,CAAC,SAAiB,GAAG,OAAO,MAAM,OAAO,QAAQ,CAAC;AAC3D;AAmBA,SAAS,8BAA8B,OAAoB;AACzD,QAAM,EAAC,OAAO,UAAU,aAAA,IAAgB;AACxC,QAAM,SAAS,GAAG,eAAe,KAAK,CAAC;AACvC;AAAA;AAAA,IAAe;AAAA,8BACa,MAAM;AAAA;AAAA,6BAEP,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,6BAWN,MAAM;AAAA;AAAA;AAAA,uCAGIC,UAAI,QAAQ,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA,6BAKvB,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAM/B,oBAAoB,YAAY,CAAC;AAAA,IACjC,sBAAsB,YAAY,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,6BASV,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,WAMxB,MAAM;AAAA;AAAA;AAAA;AAAA,iBAIA,gBAAgB;AAAA;AAAA,iBAEhB,MAAM;AAAA;AAAA;AAAA,0CAGmBA;AAAAA,MACtC,GAAG,KAAK,IAAI,QAAQ;AAAA,IAAA,CACrB;AAAA;AAAA;AAAA;AAAA;AAAA,6BAK0B,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,0BAWTA,UAAI,YAAY,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAsB3B,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAcN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAUN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAUN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAWN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA,cAKR,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,WAeT,MAAM;AAAA;AAAA;AAAA;AAAA,iBAIA,gBAAgB;AAAA;AAAA;AAAA,iBAGhB,MAAM;AAAA;AAAA;AAAA,0CAGmBA;AAAAA,MACtC,GAAG,KAAK,IAAI,QAAQ;AAAA,IAAA,CACrB;AAAA;AAAA;AAAA;AAAA;AAIH;AAGO,MAAM,OAAO;AAAA,EAClB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AAEO,SAAS,6BAA6B,OAAoB;AAE/D,SAAO,MAAM,aAAa,QAAQ,sCAAsC;AAKxE,QAAM,EAAC,OAAO,SAAA,IAAY;AAC1B,QAAM,UAAU,OAAO,QAAQ;AAC/B,QAAM,SAAS,GAAG,eAAe,KAAK,CAAC;AAEvC,QAAM,WAAW;AAAA,IACf,2BAA2B,MAAM,OAAO,MAAM,QAAQ;AAAA,IACtD,8BAA8B,KAAK;AAAA,EAAA;AAIrC,WAAS;AAAA;AAAA,IAAa;AAAA,uBACD,QAAQ,GAAG,KAAK,YAAY,CAAC;AAAA;AAAA,iBAEnCA,UAAI,IAAI,CAAC;AAAA,sBACJ,MAAM;AAAA;AAAA,EAAA;AAI1B,aAAW,OAAO,MAAM;AACtB,UAAM,QAAQ,IAAI,YAAA,EAAc,QAAQ,KAAK,GAAG;AAChD,aAAS;AAAA;AAAA,MAAa;AAAA,6BACG,MAAM,SAAS,KAAK;AAAA;AAAA;AAAA,YAGrC,MAAM,iBAAiBA,UAAI,GAAG,CAAC;AAAA;AAAA;AAAA;AAAA,uBAIpB,QAAQ,GAAG,KAAK,IAAI,KAAK,EAAE,CAAC;AAAA;AAAA,iBAElCA,UAAI,GAAG,CAAC;AAAA,sBACH,MAAM,SAAS,KAAK;AAAA;AAAA,IAAA;AAAA,EAExC;AACA,SAAO,SAAS,KAAK,EAAE;AACzB;AAGO,SAAS,2BACd,OACA,SACA;AACA,QAAM,QAAkB,CAAA;AAExB,QAAM;AAAA;AAAA,IAAa;AAAA,mCACc,GAAG,GAAG,KAAK,cAAc,OAAO,EAAE,CAAC;AAAA;AAAA,EAAA;AAIpE,aAAW,OAAO,MAAM;AACtB,UAAM,QAAQ,IAAI,YAAA,EAAc,QAAQ,KAAK,GAAG;AAChD,UAAM;AAAA;AAAA,MAAa;AAAA,qCACc,GAAG,GAAG,KAAK,IAAI,KAAK,IAAI,OAAO,EAAE,CAAC;AAAA;AAAA,IAAA;AAAA,EAErE;AACA,SAAO,MAAM,KAAK,EAAE;AACtB;"}
1
+ {"version":3,"file":"ddl.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/ddl.ts"],"sourcesContent":["import {literal as lit} from 'pg-format';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {upstreamSchema, type ShardConfig} from '../../../../types/shards.ts';\nimport {id} from '../../../../types/sql.ts';\nimport {\n indexDefinitionsQuery,\n publishedSchema,\n publishedTableQuery,\n} from './published.ts';\n\n// Sent in the 'version' tag of \"ddlStart\" and \"ddlUpdate\" event messages.\n// This is used to ensure that the message constructed in the upstream\n// Trigger function is compatible with the code processing it in the zero-cache.\n//\n// Increment this when changing the format of the contents of the \"ddl\" events.\n// This will allow old / incompatible code to detect the change and abort.\nexport const PROTOCOL_VERSION = 1;\n\nconst triggerEvent = v.object({\n context: v.object({query: v.string()}).rest(v.string()),\n});\n\n// All DDL events contain a snapshot of the current tables and indexes that\n// are published / relevant to the shard.\nexport const ddlEventSchema = triggerEvent.extend({\n version: v.literal(PROTOCOL_VERSION),\n schema: publishedSchema,\n});\n\n// The `ddlStart` message is computed before every DDL event, regardless of\n// whether the subsequent event affects the shard. Downstream processing should\n// capture the contained schema information in order to determine the schema\n// changes necessary to apply a subsequent `ddlUpdate` message. Note that a\n// `ddlUpdate` message may not follow, as updates determined to be irrelevant\n// to the shard will not result in a message. However, all `ddlUpdate` messages\n// are guaranteed to be preceded by a `ddlStart` message.\nexport const ddlStartEventSchema = ddlEventSchema.extend({\n type: v.literal('ddlStart'),\n});\n\nexport type DdlStartEvent = v.Infer<typeof ddlStartEventSchema>;\n\n/**\n * The {@link DdlUpdateEvent} contains an updated schema resulting from\n * a particular ddl event. The event type provides information\n * (i.e. constraints) on the difference from the schema of the preceding\n * {@link DdlStartEvent}.\n *\n * Note that in almost all cases (the exception being `CREATE` events),\n * it is possible that there is no relevant difference between the\n * ddl-start schema and the ddl-update schema, as many aspects of the\n * schema (e.g. column constraints) are not relevant to downstream\n * replication.\n */\nexport const ddlUpdateEventSchema = ddlEventSchema.extend({\n type: v.literal('ddlUpdate'),\n event: v.object({tag: v.string()}),\n});\n\nexport type DdlUpdateEvent = v.Infer<typeof ddlUpdateEventSchema>;\n\nexport const replicationEventSchema = v.union(\n ddlStartEventSchema,\n ddlUpdateEventSchema,\n);\n\nexport type ReplicationEvent = v.Infer<typeof replicationEventSchema>;\n\n// Creates a function that appends `_{shard-num}` to the input and\n// quotes the result to be a valid identifier.\nfunction append(shardNum: number) {\n return (name: string) => id(name + '_' + String(shardNum));\n}\n\n/**\n * Event trigger functions contain the core logic that are invoked by triggers.\n *\n * Note that although many of these functions can theoretically be parameterized and\n * shared across shards, it is advantageous to keep the functions in each shard\n * isolated from each other in order to avoid the complexity of shared-function\n * versioning.\n *\n * In a sense, shards (and their triggers and functions) should be thought of as\n * execution environments that can be updated at different schedules. If per-shard\n * triggers called into shared functions, we would have to consider versioning the\n * functions when changing their behavior, backwards compatibility, removal of\n * unused versions, etc. (not unlike versioning of npm packages).\n *\n * Instead, we opt for the simplicity and isolation of having each shard\n * completely own (and maintain) the entirety of its trigger/function stack.\n */\nfunction createEventFunctionStatements(shard: ShardConfig) {\n const {appID, shardNum, publications} = shard;\n const schema = id(upstreamSchema(shard)); // e.g. \"{APP_ID}_{SHARD_ID}\"\n return /*sql*/ `\nCREATE SCHEMA IF NOT EXISTS ${schema};\n\nCREATE OR REPLACE FUNCTION ${schema}.get_trigger_context()\nRETURNS record AS $$\nDECLARE\n result record;\nBEGIN\n SELECT current_query() AS \"query\" into result;\n RETURN result;\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.notice_ignore(object_id TEXT)\nRETURNS void AS $$\nBEGIN\n RAISE NOTICE 'zero(%) ignoring %', ${lit(shardNum)}, object_id;\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.schema_specs()\nRETURNS TEXT AS $$\nDECLARE\n tables record;\n indexes record;\nBEGIN\n ${publishedTableQuery(publications)} INTO tables;\n ${indexDefinitionsQuery(publications)} INTO indexes;\n RETURN json_build_object(\n 'tables', tables.tables,\n 'indexes', indexes.indexes\n );\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.emit_ddl_start()\nRETURNS event_trigger AS $$\nDECLARE\n schema_specs TEXT;\n message TEXT;\nBEGIN\n SELECT ${schema}.schema_specs() INTO schema_specs;\n\n SELECT json_build_object(\n 'type', 'ddlStart',\n 'version', ${PROTOCOL_VERSION},\n 'schema', schema_specs::json,\n 'context', ${schema}.get_trigger_context()\n ) INTO message;\n\n PERFORM pg_logical_emit_message(true, ${lit(\n `${appID}/${shardNum}`,\n )}, message);\nEND\n$$ LANGUAGE plpgsql;\n\n\nCREATE OR REPLACE FUNCTION ${schema}.emit_ddl_end(tag TEXT)\nRETURNS void AS $$\nDECLARE\n publications TEXT[];\n cmd RECORD;\n relevant RECORD;\n schema_specs TEXT;\n message TEXT;\n event TEXT;\nBEGIN\n publications := ARRAY[${lit(publications)}];\n\n SELECT objid, object_type, object_identity \n FROM pg_event_trigger_ddl_commands() \n WHERE object_type IN (\n 'table',\n 'table column',\n 'index',\n 'publication relation',\n 'publication namespace',\n 'schema')\n LIMIT 1 INTO cmd;\n\n -- Filter DDL updates that are not relevant to the shard (i.e. publications) when possible.\n\n IF cmd.object_type = 'table' OR cmd.object_type = 'table column' THEN\n SELECT ns.nspname AS \"schema\", c.relname AS \"name\" FROM pg_class AS c\n JOIN pg_namespace AS ns ON c.relnamespace = ns.oid\n JOIN pg_publication_tables AS pb ON pb.schemaname = ns.nspname AND pb.tablename = c.relname\n WHERE c.oid = cmd.objid AND pb.pubname = ANY (publications)\n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n cmd.object_type := 'table'; -- normalize the 'table column' target to 'table'\n\n ELSIF cmd.object_type = 'index' THEN\n SELECT ns.nspname AS \"schema\", c.relname AS \"name\" FROM pg_class AS c\n JOIN pg_namespace AS ns ON c.relnamespace = ns.oid\n JOIN pg_indexes as ind ON ind.schemaname = ns.nspname AND ind.indexname = c.relname\n JOIN pg_publication_tables AS pb ON pb.schemaname = ns.nspname AND pb.tablename = ind.tablename\n WHERE c.oid = cmd.objid AND pb.pubname = ANY (publications)\n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n ELSIF cmd.object_type = 'publication relation' THEN\n SELECT pb.pubname FROM pg_publication_rel AS rel\n JOIN pg_publication AS pb ON pb.oid = rel.prpubid\n WHERE rel.oid = cmd.objid AND pb.pubname = ANY (publications) \n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n ELSIF cmd.object_type = 'publication namespace' THEN\n SELECT pb.pubname FROM pg_publication_namespace AS ns\n JOIN pg_publication AS pb ON pb.oid = ns.pnpubid\n WHERE ns.oid = cmd.objid AND pb.pubname = ANY (publications) \n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n ELSIF cmd.object_type = 'schema' THEN\n SELECT ns.nspname AS \"schema\", c.relname AS \"name\" FROM pg_class AS c\n JOIN pg_namespace AS ns ON c.relnamespace = ns.oid\n JOIN pg_publication_tables AS pb ON pb.schemaname = ns.nspname AND pb.tablename = c.relname\n WHERE ns.oid = cmd.objid AND pb.pubname = ANY (publications)\n INTO relevant;\n IF relevant IS NULL THEN\n PERFORM ${schema}.notice_ignore(cmd.object_identity);\n RETURN;\n END IF;\n\n ELSIF tag LIKE 'CREATE %' THEN\n PERFORM ${schema}.notice_ignore('noop ' || tag);\n RETURN;\n END IF;\n\n -- Construct and emit the DdlUpdateEvent message.\n SELECT json_build_object('tag', tag) INTO event;\n \n SELECT ${schema}.schema_specs() INTO schema_specs;\n\n SELECT json_build_object(\n 'type', 'ddlUpdate',\n 'version', ${PROTOCOL_VERSION},\n 'schema', schema_specs::json,\n 'event', event::json,\n 'context', ${schema}.get_trigger_context()\n ) INTO message;\n\n PERFORM pg_logical_emit_message(true, ${lit(\n `${appID}/${shardNum}`,\n )}, message);\nEND\n$$ LANGUAGE plpgsql;\n`;\n}\n\n// Exported for testing.\nexport const TAGS = [\n 'CREATE TABLE',\n 'ALTER TABLE',\n 'CREATE INDEX',\n 'DROP TABLE',\n 'DROP INDEX',\n 'ALTER PUBLICATION',\n 'ALTER SCHEMA',\n] as const;\n\nexport function createEventTriggerStatements(shard: ShardConfig) {\n // Better to assert here than get a cryptic syntax error from Postgres.\n assert(shard.publications.length, `shard publications must be non-empty`);\n\n // Unlike functions, which are namespaced in shard-specific schemas,\n // EVENT TRIGGER names are in the global namespace and thus must include\n // the appID and shardNum.\n const {appID, shardNum} = shard;\n const sharded = append(shardNum);\n const schema = id(upstreamSchema(shard));\n\n const triggers = [\n dropEventTriggerStatements(shard.appID, shard.shardNum),\n createEventFunctionStatements(shard),\n ];\n\n // A single ddl_command_start trigger covering all relevant tags.\n triggers.push(/*sql*/ `\nCREATE EVENT TRIGGER ${sharded(`${appID}_ddl_start`)}\n ON ddl_command_start\n WHEN TAG IN (${lit(TAGS)})\n EXECUTE PROCEDURE ${schema}.emit_ddl_start();\n`);\n\n // A per-tag ddl_command_end trigger that dispatches to ${schema}.emit_ddl_end(tag)\n for (const tag of TAGS) {\n const tagID = tag.toLowerCase().replace(' ', '_');\n triggers.push(/*sql*/ `\nCREATE OR REPLACE FUNCTION ${schema}.emit_${tagID}() \nRETURNS event_trigger AS $$\nBEGIN\n PERFORM ${schema}.emit_ddl_end(${lit(tag)});\nEND\n$$ LANGUAGE plpgsql;\n\nCREATE EVENT TRIGGER ${sharded(`${appID}_${tagID}`)}\n ON ddl_command_end\n WHEN TAG IN (${lit(tag)})\n EXECUTE PROCEDURE ${schema}.emit_${tagID}();\n`);\n }\n return triggers.join('');\n}\n\n// Exported for testing.\nexport function dropEventTriggerStatements(\n appID: string,\n shardID: string | number,\n) {\n const stmts: string[] = [];\n // A single ddl_command_start trigger covering all relevant tags.\n stmts.push(/*sql*/ `\n DROP EVENT TRIGGER IF EXISTS ${id(`${appID}_ddl_start_${shardID}`)};\n `);\n\n // A per-tag ddl_command_end trigger that dispatches to ${schema}.emit_ddl_end(tag)\n for (const tag of TAGS) {\n const tagID = tag.toLowerCase().replace(' ', '_');\n stmts.push(/*sql*/ `\n DROP EVENT TRIGGER IF EXISTS ${id(`${appID}_${tagID}_${shardID}`)};\n `);\n }\n return stmts.join('');\n}\n"],"names":["v.object","v.string","v.literal","v.union","lit"],"mappings":";;;;;;;AAiBO,MAAM,mBAAmB;AAEhC,MAAM,eAAeA,OAAS;AAAA,EAC5B,SAASA,OAAS,EAAC,OAAOC,OAAE,GAAS,EAAE,KAAKA,QAAU;AACxD,CAAC;AAIM,MAAM,iBAAiB,aAAa,OAAO;AAAA,EAChD,SAASC,QAAU,gBAAgB;AAAA,EACnC,QAAQ;AACV,CAAC;AASM,MAAM,sBAAsB,eAAe,OAAO;AAAA,EACvD,MAAMA,QAAU,UAAU;AAC5B,CAAC;AAgBM,MAAM,uBAAuB,eAAe,OAAO;AAAA,EACxD,MAAMA,QAAU,WAAW;AAAA,EAC3B,OAAOF,OAAS,EAAC,KAAKC,OAAE,GAAS;AACnC,CAAC;AAIM,MAAM,yBAAyBE;AAAAA,EACpC;AAAA,EACA;AACF;AAMA,SAAS,OAAO,UAAkB;AAChC,SAAO,CAAC,SAAiB,GAAG,OAAO,MAAM,OAAO,QAAQ,CAAC;AAC3D;AAmBA,SAAS,8BAA8B,OAAoB;AACzD,QAAM,EAAC,OAAO,UAAU,aAAA,IAAgB;AACxC,QAAM,SAAS,GAAG,eAAe,KAAK,CAAC;AACvC;AAAA;AAAA,IAAe;AAAA,8BACa,MAAM;AAAA;AAAA,6BAEP,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,6BAWN,MAAM;AAAA;AAAA;AAAA,uCAGIC,UAAI,QAAQ,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA,6BAKvB,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAM/B,oBAAoB,YAAY,CAAC;AAAA,IACjC,sBAAsB,YAAY,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,6BASV,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,WAMxB,MAAM;AAAA;AAAA;AAAA;AAAA,iBAIA,gBAAgB;AAAA;AAAA,iBAEhB,MAAM;AAAA;AAAA;AAAA,0CAGmBA;AAAAA,MACtC,GAAG,KAAK,IAAI,QAAQ;AAAA,IAAA,CACrB;AAAA;AAAA;AAAA;AAAA;AAAA,6BAK0B,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,0BAUTA,UAAI,YAAY,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAsB3B,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAcN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAUN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAUN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAWN,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA,cAKR,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,WAOT,MAAM;AAAA;AAAA;AAAA;AAAA,iBAIA,gBAAgB;AAAA;AAAA;AAAA,iBAGhB,MAAM;AAAA;AAAA;AAAA,0CAGmBA;AAAAA,MACtC,GAAG,KAAK,IAAI,QAAQ;AAAA,IAAA,CACrB;AAAA;AAAA;AAAA;AAAA;AAIH;AAGO,MAAM,OAAO;AAAA,EAClB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AAEO,SAAS,6BAA6B,OAAoB;AAE/D,SAAO,MAAM,aAAa,QAAQ,sCAAsC;AAKxE,QAAM,EAAC,OAAO,SAAA,IAAY;AAC1B,QAAM,UAAU,OAAO,QAAQ;AAC/B,QAAM,SAAS,GAAG,eAAe,KAAK,CAAC;AAEvC,QAAM,WAAW;AAAA,IACf,2BAA2B,MAAM,OAAO,MAAM,QAAQ;AAAA,IACtD,8BAA8B,KAAK;AAAA,EAAA;AAIrC,WAAS;AAAA;AAAA,IAAa;AAAA,uBACD,QAAQ,GAAG,KAAK,YAAY,CAAC;AAAA;AAAA,iBAEnCA,UAAI,IAAI,CAAC;AAAA,sBACJ,MAAM;AAAA;AAAA,EAAA;AAI1B,aAAW,OAAO,MAAM;AACtB,UAAM,QAAQ,IAAI,YAAA,EAAc,QAAQ,KAAK,GAAG;AAChD,aAAS;AAAA;AAAA,MAAa;AAAA,6BACG,MAAM,SAAS,KAAK;AAAA;AAAA;AAAA,YAGrC,MAAM,iBAAiBA,UAAI,GAAG,CAAC;AAAA;AAAA;AAAA;AAAA,uBAIpB,QAAQ,GAAG,KAAK,IAAI,KAAK,EAAE,CAAC;AAAA;AAAA,iBAElCA,UAAI,GAAG,CAAC;AAAA,sBACH,MAAM,SAAS,KAAK;AAAA;AAAA,IAAA;AAAA,EAExC;AACA,SAAO,SAAS,KAAK,EAAE;AACzB;AAGO,SAAS,2BACd,OACA,SACA;AACA,QAAM,QAAkB,CAAA;AAExB,QAAM;AAAA;AAAA,IAAa;AAAA,mCACc,GAAG,GAAG,KAAK,cAAc,OAAO,EAAE,CAAC;AAAA;AAAA,EAAA;AAIpE,aAAW,OAAO,MAAM;AACtB,UAAM,QAAQ,IAAI,YAAA,EAAc,QAAQ,KAAK,GAAG;AAChD,UAAM;AAAA;AAAA,MAAa;AAAA,qCACc,GAAG,GAAG,KAAK,IAAI,KAAK,IAAI,OAAO,EAAE,CAAC;AAAA;AAAA,IAAA;AAAA,EAErE;AACA,SAAO,MAAM,KAAK,EAAE;AACtB;"}
@@ -1 +1 @@
1
- {"version":3,"file":"init.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/init.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AASjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,yBAAyB,CAAC;AACxD,OAAO,EAAiB,KAAK,WAAW,EAAC,MAAM,6BAA6B,CAAC;AAa7E;;GAEG;AACH,wBAAsB,iBAAiB,CACrC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,GACjB,OAAO,CAAC,IAAI,CAAC,CAgBf;AAED;;GAEG;AACH,wBAAsB,iBAAiB,CACrC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,EAClB,cAAc,EAAE,MAAM,GACrB,OAAO,CAAC,IAAI,CAAC,CAqBf;AAuHD,wBAAsB,uBAAuB,CAC3C,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,iBAYnB"}
1
+ {"version":3,"file":"init.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/init.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AASjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,yBAAyB,CAAC;AACxD,OAAO,EAGL,KAAK,WAAW,EACjB,MAAM,6BAA6B,CAAC;AAarC;;GAEG;AACH,wBAAsB,iBAAiB,CACrC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,GACjB,OAAO,CAAC,IAAI,CAAC,CAgBf;AAED;;GAEG;AACH,wBAAsB,iBAAiB,CACrC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,EAClB,cAAc,EAAE,MAAM,GACrB,OAAO,CAAC,IAAI,CAAC,CAqBf;AA8HD,wBAAsB,uBAAuB,CAC3C,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,WAAW,iBAYnB"}
@@ -1,12 +1,12 @@
1
1
  import { assert } from "../../../../../../shared/src/asserts.js";
2
2
  import { parse } from "../../../../../../shared/src/valita.js";
3
3
  import { runSchemaMigrations, getVersionHistory } from "../../../../db/migration.js";
4
- import { upstreamSchema } from "../../../../types/shards.js";
4
+ import { upstreamSchema, appSchema } from "../../../../types/shards.js";
5
+ import { id } from "../../../../types/sql.js";
5
6
  import { AutoResetSignal } from "../../../change-streamer/schema/tables.js";
6
7
  import { decommissionShard } from "../decommission.js";
7
8
  import { publishedSchema } from "./published.js";
8
9
  import { getMutationsTableDefinition, metadataPublicationName, setupTriggers, legacyReplicationSlot, setupTablesAndReplication } from "./shard.js";
9
- import { id } from "../../../../types/sql.js";
10
10
  import { object, string } from "@badrap/valita";
11
11
  async function ensureShardSchema(lc, db, shard) {
12
12
  const initialSetup = {
@@ -149,6 +149,12 @@ function getIncrementalMigrations(shard, replicaVersion) {
149
149
  );
150
150
  lc.info?.("Upgraded schema with new mutations table");
151
151
  }
152
+ },
153
+ 11: {
154
+ migrateSchema: async (lc, sql) => {
155
+ await sql`DROP TABLE IF EXISTS ${sql(appSchema(shard))}."schemaVersions"`;
156
+ lc.info?.(`Dropped legacy schemaVersions table`);
157
+ }
152
158
  }
153
159
  };
154
160
  }
@@ -1 +1 @@
1
- {"version":3,"file":"init.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/init.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {\n getVersionHistory,\n runSchemaMigrations,\n type IncrementalMigrationMap,\n type Migration,\n} from '../../../../db/migration.ts';\nimport type {PostgresDB} from '../../../../types/pg.ts';\nimport {upstreamSchema, type ShardConfig} from '../../../../types/shards.ts';\nimport {AutoResetSignal} from '../../../change-streamer/schema/tables.ts';\nimport {decommissionShard} from '../decommission.ts';\nimport {publishedSchema} from './published.ts';\nimport {\n getMutationsTableDefinition,\n legacyReplicationSlot,\n metadataPublicationName,\n setupTablesAndReplication,\n setupTriggers,\n} from './shard.ts';\nimport {id} from '../../../../types/sql.ts';\n\n/**\n * Ensures that a shard is set up for initial sync.\n */\nexport async function ensureShardSchema(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n): Promise<void> {\n const initialSetup: Migration = {\n migrateSchema: (lc, tx) => setupTablesAndReplication(lc, tx, shard),\n minSafeVersion: 1,\n };\n await runSchemaMigrations(\n lc,\n `upstream-shard-${shard.appID}`,\n upstreamSchema(shard),\n db,\n initialSetup,\n // The incremental migration of any existing replicas will be replaced by\n // the incoming replica being synced, so the replicaVersion here is\n // unnecessary.\n getIncrementalMigrations(shard, 'obsolete'),\n );\n}\n\n/**\n * Updates the schema for an existing shard.\n */\nexport async function updateShardSchema(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n replicaVersion: string,\n): Promise<void> {\n await runSchemaMigrations(\n lc,\n `upstream-shard-${shard.appID}`,\n upstreamSchema(shard),\n db,\n {\n // If the expected existing shard is absent, throw an\n // AutoResetSignal to backtrack and initial sync.\n migrateSchema: () => {\n throw new AutoResetSignal(\n `upstream shard ${upstreamSchema(shard)} is not initialized`,\n );\n },\n },\n getIncrementalMigrations(shard, replicaVersion),\n );\n\n // The decommission check is run in updateShardSchema so that it happens\n // after initial sync, and not when the shard schema is initially set up.\n await decommissionLegacyShard(lc, db, shard);\n}\n\nfunction getIncrementalMigrations(\n shard: ShardConfig,\n replicaVersion?: string,\n): IncrementalMigrationMap {\n const shardConfigTable = `${upstreamSchema(shard)}.shardConfig`;\n\n return {\n 4: {\n migrateSchema: () => {\n throw new AutoResetSignal('resetting to upgrade shard schema');\n },\n minSafeVersion: 3,\n },\n\n // v5 changes the upstream schema organization from \"zero_{SHARD_ID}\" to\n // the \"{APP_ID}_0\". An incremental migration indicates that the previous\n // SHARD_ID was \"0\" and the new APP_ID is \"zero\" (i.e. the default values\n // for those options). In this case, the upstream format is identical, and\n // no migration is necessary. However, the version is bumped to v5 to\n // indicate that it was created with the {APP_ID} configuration and should\n // not be decommissioned as a legacy shard.\n 5: {},\n\n 6: {\n migrateSchema: async (lc, sql) => {\n assert(\n replicaVersion,\n `replicaVersion is always passed for incremental migrations`,\n );\n await Promise.all([\n sql`\n ALTER TABLE ${sql(shardConfigTable)} ADD \"replicaVersion\" TEXT`,\n sql`\n UPDATE ${sql(shardConfigTable)} SET ${sql({replicaVersion})}`,\n ]);\n lc.info?.(\n `Recorded replicaVersion ${replicaVersion} in upstream shardConfig`,\n );\n },\n },\n\n // Updates the DDL event trigger protocol to v2, and adds support for\n // ALTER SCHEMA x RENAME TO y\n 7: {\n migrateSchema: async (lc, sql) => {\n const [{publications}] = await sql<{publications: string[]}[]>`\n SELECT publications FROM ${sql(shardConfigTable)}`;\n await setupTriggers(lc, sql, {...shard, publications});\n lc.info?.(`Upgraded to v2 event triggers`);\n },\n },\n\n // Adds support for non-disruptive resyncs, which tracks multiple\n // replicas with different slot names.\n 8: {\n migrateSchema: async (lc, sql) => {\n const legacyShardConfigSchema = v.object({\n replicaVersion: v.string().nullable(),\n initialSchema: publishedSchema.nullable(),\n });\n const result = await sql`\n SELECT \"replicaVersion\", \"initialSchema\" FROM ${sql(shardConfigTable)}`;\n assert(result.length === 1);\n const {replicaVersion, initialSchema} = v.parse(\n result[0],\n legacyShardConfigSchema,\n 'passthrough',\n );\n\n await Promise.all([\n sql`\n CREATE TABLE ${sql(upstreamSchema(shard))}.replicas (\n \"slot\" TEXT PRIMARY KEY,\n \"version\" TEXT NOT NULL,\n \"initialSchema\" JSON NOT NULL\n );\n `,\n sql`\n INSERT INTO ${sql(upstreamSchema(shard))}.replicas ${sql({\n slot: legacyReplicationSlot(shard),\n version: replicaVersion,\n initialSchema,\n })}\n `,\n sql`\n ALTER TABLE ${sql(shardConfigTable)} DROP \"replicaVersion\", DROP \"initialSchema\"\n `,\n ]);\n lc.info?.(`Upgraded schema to support non-disruptive resyncs`);\n },\n },\n\n // Fixes field ordering of compound indexes. This incremental migration\n // only fixes indexes resulting from new schema changes. A full resync is\n // required to fix existing indexes.\n 9: {\n migrateSchema: async (lc, sql) => {\n const [{publications}] = await sql<{publications: string[]}[]>`\n SELECT publications FROM ${sql(shardConfigTable)}`;\n await setupTriggers(lc, sql, {...shard, publications});\n lc.info?.(`Upgraded DDL event triggers`);\n },\n },\n\n // Adds the `mutations` table used to track mutation results.\n 10: {\n migrateSchema: async (lc, sql) => {\n await sql.unsafe(/*sql*/ `\n ${getMutationsTableDefinition(upstreamSchema(shard))}\n ALTER PUBLICATION ${id(metadataPublicationName(shard.appID, shard.shardNum))} ADD TABLE ${id(upstreamSchema(shard))}.\"mutations\";\n `);\n lc.info?.('Upgraded schema with new mutations table');\n },\n },\n };\n}\n\nexport async function decommissionLegacyShard(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n) {\n if (shard.appID !== 'zero') {\n // When migration from non-default shard ids, e.g. \"zero_prod\" => \"prod_0\",\n // clean up the old \"zero_prod\" shard if it is pre-v5. Note that the v5\n // check is important to guard against cleaning up a **new** \"zero_0\" app\n // that coexists with the current App (with app-id === \"0\").\n const versionHistory = await getVersionHistory(db, `zero_${shard.appID}`);\n if (versionHistory !== null && versionHistory.schemaVersion < 5) {\n await decommissionShard(lc, db, 'zero', shard.appID);\n }\n }\n}\n"],"names":["lc","v.object","v.string","replicaVersion","v.parse"],"mappings":";;;;;;;;;;AA0BA,eAAsB,kBACpB,IACA,IACA,OACe;AACf,QAAM,eAA0B;AAAA,IAC9B,eAAe,CAACA,KAAI,OAAO,0BAA0BA,KAAI,IAAI,KAAK;AAAA,IAClE,gBAAgB;AAAA,EAAA;AAElB,QAAM;AAAA,IACJ;AAAA,IACA,kBAAkB,MAAM,KAAK;AAAA,IAC7B,eAAe,KAAK;AAAA,IACpB;AAAA,IACA;AAAA;AAAA;AAAA;AAAA,IAIA,yBAAyB,OAAO,UAAU;AAAA,EAAA;AAE9C;AAKA,eAAsB,kBACpB,IACA,IACA,OACA,gBACe;AACf,QAAM;AAAA,IACJ;AAAA,IACA,kBAAkB,MAAM,KAAK;AAAA,IAC7B,eAAe,KAAK;AAAA,IACpB;AAAA,IACA;AAAA;AAAA;AAAA,MAGE,eAAe,MAAM;AACnB,cAAM,IAAI;AAAA,UACR,kBAAkB,eAAe,KAAK,CAAC;AAAA,QAAA;AAAA,MAE3C;AAAA,IAAA;AAAA,IAEF,yBAAyB,OAAO,cAAc;AAAA,EAAA;AAKhD,QAAM,wBAAwB,IAAI,IAAI,KAAK;AAC7C;AAEA,SAAS,yBACP,OACA,gBACyB;AACzB,QAAM,mBAAmB,GAAG,eAAe,KAAK,CAAC;AAEjD,SAAO;AAAA,IACL,GAAG;AAAA,MACD,eAAe,MAAM;AACnB,cAAM,IAAI,gBAAgB,mCAAmC;AAAA,MAC/D;AAAA,MACA,gBAAgB;AAAA,IAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAUlB,GAAG,CAAA;AAAA,IAEH,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC;AAAA,UACE;AAAA,UACA;AAAA,QAAA;AAEF,cAAM,QAAQ,IAAI;AAAA,UAChB;AAAA,wBACc,IAAI,gBAAgB,CAAC;AAAA,UACnC;AAAA,mBACS,IAAI,gBAAgB,CAAC,QAAQ,IAAI,EAAC,eAAA,CAAe,CAAC;AAAA,QAAA,CAC5D;AACD,WAAG;AAAA,UACD,2BAA2B,cAAc;AAAA,QAAA;AAAA,MAE7C;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,CAAC,EAAC,cAAa,IAAI,MAAM;AAAA,qCACF,IAAI,gBAAgB,CAAC;AAClD,cAAM,cAAc,IAAI,KAAK,EAAC,GAAG,OAAO,cAAa;AACrD,WAAG,OAAO,+BAA+B;AAAA,MAC3C;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,0BAA0BC,OAAS;AAAA,UACvC,gBAAgBC,OAAE,EAAS,SAAA;AAAA,UAC3B,eAAe,gBAAgB,SAAA;AAAA,QAAS,CACzC;AACD,cAAM,SAAS,MAAM;AAAA,0DAC6B,IAAI,gBAAgB,CAAC;AACvE,eAAO,OAAO,WAAW,CAAC;AAC1B,cAAM,EAAC,gBAAAC,iBAAgB,cAAA,IAAiBC;AAAAA,UACtC,OAAO,CAAC;AAAA,UACR;AAAA,UACA;AAAA,QAAA;AAGF,cAAM,QAAQ,IAAI;AAAA,UAChB;AAAA,yBACe,IAAI,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,UAMzC;AAAA,wBACc,IAAI,eAAe,KAAK,CAAC,CAAC,aAAa,IAAI;AAAA,YACvD,MAAM,sBAAsB,KAAK;AAAA,YACjC,SAASD;AAAAA,YACT;AAAA,UAAA,CACD,CAAC;AAAA;AAAA,UAEF;AAAA,wBACc,IAAI,gBAAgB,CAAC;AAAA;AAAA,QAAA,CAEpC;AACD,WAAG,OAAO,mDAAmD;AAAA,MAC/D;AAAA,IAAA;AAAA;AAAA;AAAA;AAAA,IAMF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,CAAC,EAAC,cAAa,IAAI,MAAM;AAAA,qCACF,IAAI,gBAAgB,CAAC;AAClD,cAAM,cAAc,IAAI,KAAK,EAAC,GAAG,OAAO,cAAa;AACrD,WAAG,OAAO,6BAA6B;AAAA,MACzC;AAAA,IAAA;AAAA;AAAA,IAIF,IAAI;AAAA,MACF,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,IAAI;AAAA;AAAA,UAAe;AAAA,YACrB,4BAA4B,eAAe,KAAK,CAAC,CAAC;AAAA,8BAChC,GAAG,wBAAwB,MAAM,OAAO,MAAM,QAAQ,CAAC,CAAC,cAAc,GAAG,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA,QAAA;AAErH,WAAG,OAAO,0CAA0C;AAAA,MACtD;AAAA,IAAA;AAAA,EACF;AAEJ;AAEA,eAAsB,wBACpB,IACA,IACA,OACA;AACA,MAAI,MAAM,UAAU,QAAQ;AAK1B,UAAM,iBAAiB,MAAM,kBAAkB,IAAI,QAAQ,MAAM,KAAK,EAAE;AACxE,QAAI,mBAAmB,QAAQ,eAAe,gBAAgB,GAAG;AAC/D,YAAM,kBAAkB,IAAI,IAAI,QAAQ,MAAM,KAAK;AAAA,IACrD;AAAA,EACF;AACF;"}
1
+ {"version":3,"file":"init.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/init.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {\n getVersionHistory,\n runSchemaMigrations,\n type IncrementalMigrationMap,\n type Migration,\n} from '../../../../db/migration.ts';\nimport type {PostgresDB} from '../../../../types/pg.ts';\nimport {\n appSchema,\n upstreamSchema,\n type ShardConfig,\n} from '../../../../types/shards.ts';\nimport {id} from '../../../../types/sql.ts';\nimport {AutoResetSignal} from '../../../change-streamer/schema/tables.ts';\nimport {decommissionShard} from '../decommission.ts';\nimport {publishedSchema} from './published.ts';\nimport {\n getMutationsTableDefinition,\n legacyReplicationSlot,\n metadataPublicationName,\n setupTablesAndReplication,\n setupTriggers,\n} from './shard.ts';\n\n/**\n * Ensures that a shard is set up for initial sync.\n */\nexport async function ensureShardSchema(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n): Promise<void> {\n const initialSetup: Migration = {\n migrateSchema: (lc, tx) => setupTablesAndReplication(lc, tx, shard),\n minSafeVersion: 1,\n };\n await runSchemaMigrations(\n lc,\n `upstream-shard-${shard.appID}`,\n upstreamSchema(shard),\n db,\n initialSetup,\n // The incremental migration of any existing replicas will be replaced by\n // the incoming replica being synced, so the replicaVersion here is\n // unnecessary.\n getIncrementalMigrations(shard, 'obsolete'),\n );\n}\n\n/**\n * Updates the schema for an existing shard.\n */\nexport async function updateShardSchema(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n replicaVersion: string,\n): Promise<void> {\n await runSchemaMigrations(\n lc,\n `upstream-shard-${shard.appID}`,\n upstreamSchema(shard),\n db,\n {\n // If the expected existing shard is absent, throw an\n // AutoResetSignal to backtrack and initial sync.\n migrateSchema: () => {\n throw new AutoResetSignal(\n `upstream shard ${upstreamSchema(shard)} is not initialized`,\n );\n },\n },\n getIncrementalMigrations(shard, replicaVersion),\n );\n\n // The decommission check is run in updateShardSchema so that it happens\n // after initial sync, and not when the shard schema is initially set up.\n await decommissionLegacyShard(lc, db, shard);\n}\n\nfunction getIncrementalMigrations(\n shard: ShardConfig,\n replicaVersion?: string,\n): IncrementalMigrationMap {\n const shardConfigTable = `${upstreamSchema(shard)}.shardConfig`;\n\n return {\n 4: {\n migrateSchema: () => {\n throw new AutoResetSignal('resetting to upgrade shard schema');\n },\n minSafeVersion: 3,\n },\n\n // v5 changes the upstream schema organization from \"zero_{SHARD_ID}\" to\n // the \"{APP_ID}_0\". An incremental migration indicates that the previous\n // SHARD_ID was \"0\" and the new APP_ID is \"zero\" (i.e. the default values\n // for those options). In this case, the upstream format is identical, and\n // no migration is necessary. However, the version is bumped to v5 to\n // indicate that it was created with the {APP_ID} configuration and should\n // not be decommissioned as a legacy shard.\n 5: {},\n\n 6: {\n migrateSchema: async (lc, sql) => {\n assert(\n replicaVersion,\n `replicaVersion is always passed for incremental migrations`,\n );\n await Promise.all([\n sql`\n ALTER TABLE ${sql(shardConfigTable)} ADD \"replicaVersion\" TEXT`,\n sql`\n UPDATE ${sql(shardConfigTable)} SET ${sql({replicaVersion})}`,\n ]);\n lc.info?.(\n `Recorded replicaVersion ${replicaVersion} in upstream shardConfig`,\n );\n },\n },\n\n // Updates the DDL event trigger protocol to v2, and adds support for\n // ALTER SCHEMA x RENAME TO y\n 7: {\n migrateSchema: async (lc, sql) => {\n const [{publications}] = await sql<{publications: string[]}[]>`\n SELECT publications FROM ${sql(shardConfigTable)}`;\n await setupTriggers(lc, sql, {...shard, publications});\n lc.info?.(`Upgraded to v2 event triggers`);\n },\n },\n\n // Adds support for non-disruptive resyncs, which tracks multiple\n // replicas with different slot names.\n 8: {\n migrateSchema: async (lc, sql) => {\n const legacyShardConfigSchema = v.object({\n replicaVersion: v.string().nullable(),\n initialSchema: publishedSchema.nullable(),\n });\n const result = await sql`\n SELECT \"replicaVersion\", \"initialSchema\" FROM ${sql(shardConfigTable)}`;\n assert(result.length === 1);\n const {replicaVersion, initialSchema} = v.parse(\n result[0],\n legacyShardConfigSchema,\n 'passthrough',\n );\n\n await Promise.all([\n sql`\n CREATE TABLE ${sql(upstreamSchema(shard))}.replicas (\n \"slot\" TEXT PRIMARY KEY,\n \"version\" TEXT NOT NULL,\n \"initialSchema\" JSON NOT NULL\n );\n `,\n sql`\n INSERT INTO ${sql(upstreamSchema(shard))}.replicas ${sql({\n slot: legacyReplicationSlot(shard),\n version: replicaVersion,\n initialSchema,\n })}\n `,\n sql`\n ALTER TABLE ${sql(shardConfigTable)} DROP \"replicaVersion\", DROP \"initialSchema\"\n `,\n ]);\n lc.info?.(`Upgraded schema to support non-disruptive resyncs`);\n },\n },\n\n // Fixes field ordering of compound indexes. This incremental migration\n // only fixes indexes resulting from new schema changes. A full resync is\n // required to fix existing indexes.\n 9: {\n migrateSchema: async (lc, sql) => {\n const [{publications}] = await sql<{publications: string[]}[]>`\n SELECT publications FROM ${sql(shardConfigTable)}`;\n await setupTriggers(lc, sql, {...shard, publications});\n lc.info?.(`Upgraded DDL event triggers`);\n },\n },\n\n // Adds the `mutations` table used to track mutation results.\n 10: {\n migrateSchema: async (lc, sql) => {\n await sql.unsafe(/*sql*/ `\n ${getMutationsTableDefinition(upstreamSchema(shard))}\n ALTER PUBLICATION ${id(metadataPublicationName(shard.appID, shard.shardNum))} ADD TABLE ${id(upstreamSchema(shard))}.\"mutations\";\n `);\n lc.info?.('Upgraded schema with new mutations table');\n },\n },\n\n 11: {\n migrateSchema: async (lc, sql) => {\n await sql`DROP TABLE IF EXISTS ${sql(appSchema(shard))}.\"schemaVersions\"`;\n lc.info?.(`Dropped legacy schemaVersions table`);\n },\n },\n };\n}\n\nexport async function decommissionLegacyShard(\n lc: LogContext,\n db: PostgresDB,\n shard: ShardConfig,\n) {\n if (shard.appID !== 'zero') {\n // When migration from non-default shard ids, e.g. \"zero_prod\" => \"prod_0\",\n // clean up the old \"zero_prod\" shard if it is pre-v5. Note that the v5\n // check is important to guard against cleaning up a **new** \"zero_0\" app\n // that coexists with the current App (with app-id === \"0\").\n const versionHistory = await getVersionHistory(db, `zero_${shard.appID}`);\n if (versionHistory !== null && versionHistory.schemaVersion < 5) {\n await decommissionShard(lc, db, 'zero', shard.appID);\n }\n }\n}\n"],"names":["lc","v.object","v.string","replicaVersion","v.parse"],"mappings":";;;;;;;;;;AA8BA,eAAsB,kBACpB,IACA,IACA,OACe;AACf,QAAM,eAA0B;AAAA,IAC9B,eAAe,CAACA,KAAI,OAAO,0BAA0BA,KAAI,IAAI,KAAK;AAAA,IAClE,gBAAgB;AAAA,EAAA;AAElB,QAAM;AAAA,IACJ;AAAA,IACA,kBAAkB,MAAM,KAAK;AAAA,IAC7B,eAAe,KAAK;AAAA,IACpB;AAAA,IACA;AAAA;AAAA;AAAA;AAAA,IAIA,yBAAyB,OAAO,UAAU;AAAA,EAAA;AAE9C;AAKA,eAAsB,kBACpB,IACA,IACA,OACA,gBACe;AACf,QAAM;AAAA,IACJ;AAAA,IACA,kBAAkB,MAAM,KAAK;AAAA,IAC7B,eAAe,KAAK;AAAA,IACpB;AAAA,IACA;AAAA;AAAA;AAAA,MAGE,eAAe,MAAM;AACnB,cAAM,IAAI;AAAA,UACR,kBAAkB,eAAe,KAAK,CAAC;AAAA,QAAA;AAAA,MAE3C;AAAA,IAAA;AAAA,IAEF,yBAAyB,OAAO,cAAc;AAAA,EAAA;AAKhD,QAAM,wBAAwB,IAAI,IAAI,KAAK;AAC7C;AAEA,SAAS,yBACP,OACA,gBACyB;AACzB,QAAM,mBAAmB,GAAG,eAAe,KAAK,CAAC;AAEjD,SAAO;AAAA,IACL,GAAG;AAAA,MACD,eAAe,MAAM;AACnB,cAAM,IAAI,gBAAgB,mCAAmC;AAAA,MAC/D;AAAA,MACA,gBAAgB;AAAA,IAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAUlB,GAAG,CAAA;AAAA,IAEH,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC;AAAA,UACE;AAAA,UACA;AAAA,QAAA;AAEF,cAAM,QAAQ,IAAI;AAAA,UAChB;AAAA,wBACc,IAAI,gBAAgB,CAAC;AAAA,UACnC;AAAA,mBACS,IAAI,gBAAgB,CAAC,QAAQ,IAAI,EAAC,eAAA,CAAe,CAAC;AAAA,QAAA,CAC5D;AACD,WAAG;AAAA,UACD,2BAA2B,cAAc;AAAA,QAAA;AAAA,MAE7C;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,CAAC,EAAC,cAAa,IAAI,MAAM;AAAA,qCACF,IAAI,gBAAgB,CAAC;AAClD,cAAM,cAAc,IAAI,KAAK,EAAC,GAAG,OAAO,cAAa;AACrD,WAAG,OAAO,+BAA+B;AAAA,MAC3C;AAAA,IAAA;AAAA;AAAA;AAAA,IAKF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,0BAA0BC,OAAS;AAAA,UACvC,gBAAgBC,OAAE,EAAS,SAAA;AAAA,UAC3B,eAAe,gBAAgB,SAAA;AAAA,QAAS,CACzC;AACD,cAAM,SAAS,MAAM;AAAA,0DAC6B,IAAI,gBAAgB,CAAC;AACvE,eAAO,OAAO,WAAW,CAAC;AAC1B,cAAM,EAAC,gBAAAC,iBAAgB,cAAA,IAAiBC;AAAAA,UACtC,OAAO,CAAC;AAAA,UACR;AAAA,UACA;AAAA,QAAA;AAGF,cAAM,QAAQ,IAAI;AAAA,UAChB;AAAA,yBACe,IAAI,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,UAMzC;AAAA,wBACc,IAAI,eAAe,KAAK,CAAC,CAAC,aAAa,IAAI;AAAA,YACvD,MAAM,sBAAsB,KAAK;AAAA,YACjC,SAASD;AAAAA,YACT;AAAA,UAAA,CACD,CAAC;AAAA;AAAA,UAEF;AAAA,wBACc,IAAI,gBAAgB,CAAC;AAAA;AAAA,QAAA,CAEpC;AACD,WAAG,OAAO,mDAAmD;AAAA,MAC/D;AAAA,IAAA;AAAA;AAAA;AAAA;AAAA,IAMF,GAAG;AAAA,MACD,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,CAAC,EAAC,cAAa,IAAI,MAAM;AAAA,qCACF,IAAI,gBAAgB,CAAC;AAClD,cAAM,cAAc,IAAI,KAAK,EAAC,GAAG,OAAO,cAAa;AACrD,WAAG,OAAO,6BAA6B;AAAA,MACzC;AAAA,IAAA;AAAA;AAAA,IAIF,IAAI;AAAA,MACF,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,IAAI;AAAA;AAAA,UAAe;AAAA,YACrB,4BAA4B,eAAe,KAAK,CAAC,CAAC;AAAA,8BAChC,GAAG,wBAAwB,MAAM,OAAO,MAAM,QAAQ,CAAC,CAAC,cAAc,GAAG,eAAe,KAAK,CAAC,CAAC;AAAA;AAAA,QAAA;AAErH,WAAG,OAAO,0CAA0C;AAAA,MACtD;AAAA,IAAA;AAAA,IAGF,IAAI;AAAA,MACF,eAAe,OAAO,IAAI,QAAQ;AAChC,cAAM,2BAA2B,IAAI,UAAU,KAAK,CAAC,CAAC;AACtD,WAAG,OAAO,qCAAqC;AAAA,MACjD;AAAA,IAAA;AAAA,EACF;AAEJ;AAEA,eAAsB,wBACpB,IACA,IACA,OACA;AACA,MAAI,MAAM,UAAU,QAAQ;AAK1B,UAAM,iBAAiB,MAAM,kBAAkB,IAAI,QAAQ,MAAM,KAAK,EAAE;AACxE,QAAI,mBAAmB,QAAQ,eAAe,gBAAgB,GAAG;AAC/D,YAAM,kBAAkB,IAAI,IAAI,QAAQ,MAAM,KAAK;AAAA,IACrD;AAAA,EACF;AACF;"}
@@ -1 +1 @@
1
- {"version":3,"file":"shard.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/shard.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAIjD,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAE5D,OAAO,KAAK,EAAC,UAAU,EAAE,mBAAmB,EAAC,MAAM,yBAAyB,CAAC;AAC7E,OAAO,KAAK,EAAC,KAAK,EAAE,WAAW,EAAE,OAAO,EAAC,MAAM,6BAA6B,CAAC;AAI7E,OAAO,EAGL,KAAK,eAAe,EACpB,KAAK,eAAe,EACrB,MAAM,gBAAgB,CAAC;AAGxB,wBAAgB,yBAAyB,CAAC,EAAC,KAAK,EAAC,EAAE,KAAK,UAEvD;AAED,wBAAgB,qBAAqB,CAAC,EAAC,KAAK,EAAE,QAAQ,EAAC,EAAE,OAAO,UAE/D;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,OAAO,UAGnD;AAED;;;GAGG;AACH,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,OAAO,UAIvD;AAED,wBAAgB,kBAAkB,CAAC,KAAK,EAAE,OAAO,UAEhD;AAMD,wBAAgB,uBAAuB,CACrC,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,MAAM,GAAG,MAAM,UAGzB;AAiDD,wBAAsB,kBAAkB,CAAC,EAAE,EAAE,UAAU,EAAE,KAAK,EAAE,KAAK,iBAEpE;AAED,wBAAgB,yBAAyB,CAAC,MAAM,EAAE,MAAM,UASvD;AAED;;;;;;;GAOG;AACH,wBAAgB,2BAA2B,CAAC,MAAM,EAAE,MAAM,UASzD;AAED,eAAO,MAAM,kBAAkB,gBAAgB,CAAC;AAEhD,wBAAgB,UAAU,CACxB,WAAW,EAAE,WAAW,EACxB,mBAAmB,EAAE,MAAM,GAC1B,MAAM,CAuCR;AAED,wBAAgB,SAAS,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,GAAG,MAAM,GAAG,MAAM,CAYzE;AAED,QAAA,MAAM,yBAAyB;;;aAG7B,CAAC;AAEH,MAAM,MAAM,mBAAmB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,yBAAyB,CAAC,CAAC;AAE5E,QAAA,MAAM,aAAa;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAIjB,CAAC;AAEH,MAAM,MAAM,OAAO,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,aAAa,CAAC,CAAC;AAcpD,wBAAsB,UAAU,CAC9B,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,EACd,IAAI,EAAE,MAAM,EACZ,cAAc,EAAE,MAAM,EACtB,EAAC,MAAM,EAAE,OAAO,EAAC,EAAE,eAAe,iBAOnC;AAED,wBAAsB,mBAAmB,CACvC,EAAE,EAAE,UAAU,EACd,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,EACd,cAAc,EAAE,MAAM,GACrB,OAAO,CAAC,OAAO,GAAG,IAAI,CAAC,CAczB;AAED,wBAAsB,sBAAsB,CAC1C,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,GACb,OAAO,CAAC,mBAAmB,CAAC,CAO9B;AAED;;;GAGG;AACH,wBAAsB,yBAAyB,CAC7C,EAAE,EAAE,UAAU,EACd,GAAG,EAAE,mBAAmB,EACxB,SAAS,EAAE,WAAW,iBAwDvB;AAED,wBAAsB,aAAa,CACjC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,mBAAmB,EACvB,KAAK,EAAE,WAAW,iBAqBnB;AAED,wBAAgB,oBAAoB,CAClC,EAAE,EAAE,UAAU,EACd,SAAS,EAAE,eAAe,QAkB3B;AAED,KAAK,iBAAiB,GAAG;IACvB,KAAK,CAAC,EAAE,EAAE,UAAU,EAAE,EAAE,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;CACtD,CAAC;AAEF,wBAAgB,4CAA4C,CAC1D,IAAI,EAAE,eAAe,GACpB,iBAAiB,GAAG,SAAS,CA+C/B"}
1
+ {"version":3,"file":"shard.d.ts","sourceRoot":"","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/shard.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAIjD,OAAO,KAAK,CAAC,MAAM,wCAAwC,CAAC;AAE5D,OAAO,KAAK,EAAC,UAAU,EAAE,mBAAmB,EAAC,MAAM,yBAAyB,CAAC;AAC7E,OAAO,KAAK,EAAC,KAAK,EAAE,WAAW,EAAE,OAAO,EAAC,MAAM,6BAA6B,CAAC;AAI7E,OAAO,EAGL,KAAK,eAAe,EACpB,KAAK,eAAe,EACrB,MAAM,gBAAgB,CAAC;AAGxB,wBAAgB,yBAAyB,CAAC,EAAC,KAAK,EAAC,EAAE,KAAK,UAEvD;AAED,wBAAgB,qBAAqB,CAAC,EAAC,KAAK,EAAE,QAAQ,EAAC,EAAE,OAAO,UAE/D;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,OAAO,UAGnD;AAED;;;GAGG;AACH,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,OAAO,UAIvD;AAED,wBAAgB,kBAAkB,CAAC,KAAK,EAAE,OAAO,UAEhD;AAMD,wBAAgB,uBAAuB,CACrC,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,MAAM,GAAG,MAAM,UAGzB;AAoCD,wBAAsB,kBAAkB,CAAC,EAAE,EAAE,UAAU,EAAE,KAAK,EAAE,KAAK,iBAEpE;AAED,wBAAgB,yBAAyB,CAAC,MAAM,EAAE,MAAM,UASvD;AAED;;;;;;;GAOG;AACH,wBAAgB,2BAA2B,CAAC,MAAM,EAAE,MAAM,UASzD;AAED,eAAO,MAAM,kBAAkB,gBAAgB,CAAC;AAEhD,wBAAgB,UAAU,CACxB,WAAW,EAAE,WAAW,EACxB,mBAAmB,EAAE,MAAM,GAC1B,MAAM,CAuCR;AAED,wBAAgB,SAAS,CAAC,KAAK,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,GAAG,MAAM,GAAG,MAAM,CAYzE;AAED,QAAA,MAAM,yBAAyB;;;aAG7B,CAAC;AAEH,MAAM,MAAM,mBAAmB,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,yBAAyB,CAAC,CAAC;AAE5E,QAAA,MAAM,aAAa;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;aAIjB,CAAC;AAEH,MAAM,MAAM,OAAO,GAAG,CAAC,CAAC,KAAK,CAAC,OAAO,aAAa,CAAC,CAAC;AAcpD,wBAAsB,UAAU,CAC9B,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,EACd,IAAI,EAAE,MAAM,EACZ,cAAc,EAAE,MAAM,EACtB,EAAC,MAAM,EAAE,OAAO,EAAC,EAAE,eAAe,iBAOnC;AAED,wBAAsB,mBAAmB,CACvC,EAAE,EAAE,UAAU,EACd,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,EACd,cAAc,EAAE,MAAM,GACrB,OAAO,CAAC,OAAO,GAAG,IAAI,CAAC,CAczB;AAED,wBAAsB,sBAAsB,CAC1C,GAAG,EAAE,UAAU,EACf,KAAK,EAAE,OAAO,GACb,OAAO,CAAC,mBAAmB,CAAC,CAO9B;AAED;;;GAGG;AACH,wBAAsB,yBAAyB,CAC7C,EAAE,EAAE,UAAU,EACd,GAAG,EAAE,mBAAmB,EACxB,SAAS,EAAE,WAAW,iBAwDvB;AAED,wBAAsB,aAAa,CACjC,EAAE,EAAE,UAAU,EACd,EAAE,EAAE,mBAAmB,EACvB,KAAK,EAAE,WAAW,iBAqBnB;AAED,wBAAgB,oBAAoB,CAClC,EAAE,EAAE,UAAU,EACd,SAAS,EAAE,eAAe,QAkB3B;AAED,KAAK,iBAAiB,GAAG;IACvB,KAAK,CAAC,EAAE,EAAE,UAAU,EAAE,EAAE,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC,CAAC;CACtD,CAAC;AAEF,wBAAgB,4CAA4C,CAC1D,IAAI,EAAE,eAAe,GACpB,iBAAiB,GAAG,SAAS,CA+C/B"}
@@ -39,19 +39,6 @@ function globalSetup(appID) {
39
39
  `
40
40
  CREATE SCHEMA IF NOT EXISTS ${app};
41
41
 
42
- CREATE TABLE IF NOT EXISTS ${app}."schemaVersions" (
43
- "minSupportedVersion" INT4,
44
- "maxSupportedVersion" INT4,
45
-
46
- -- Ensure that there is only a single row in the table.
47
- -- Application code can be agnostic to this column, and
48
- -- simply invoke UPDATE statements on the version columns.
49
- "lock" BOOL PRIMARY KEY DEFAULT true CHECK (lock)
50
- );
51
-
52
- INSERT INTO ${app}."schemaVersions" ("lock", "minSupportedVersion", "maxSupportedVersion")
53
- VALUES (true, 1, 1) ON CONFLICT DO NOTHING;
54
-
55
42
  CREATE TABLE IF NOT EXISTS ${app}.permissions (
56
43
  "permissions" JSONB,
57
44
  "hash" TEXT,
@@ -124,7 +111,7 @@ function shardSetup(shardConfig, metadataPublication) {
124
111
 
125
112
  DROP PUBLICATION IF EXISTS ${id(metadataPublication)};
126
113
  CREATE PUBLICATION ${id(metadataPublication)}
127
- FOR TABLE ${app}."schemaVersions", ${app}."permissions", TABLE ${shard}."clients", ${shard}."mutations";
114
+ FOR TABLE ${app}."permissions", TABLE ${shard}."clients", ${shard}."mutations";
128
115
 
129
116
  CREATE TABLE ${shard}."${SHARD_CONFIG_TABLE}" (
130
117
  "publications" TEXT[] NOT NULL,
@@ -1 +1 @@
1
- {"version":3,"file":"shard.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/shard.ts"],"sourcesContent":["import {PG_INSUFFICIENT_PRIVILEGE} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport {literal} from 'pg-format';\nimport postgres from 'postgres';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {Default} from '../../../../db/postgres-replica-identity-enum.ts';\nimport type {PostgresDB, PostgresTransaction} from '../../../../types/pg.ts';\nimport type {AppID, ShardConfig, ShardID} from '../../../../types/shards.ts';\nimport {appSchema, check, upstreamSchema} from '../../../../types/shards.ts';\nimport {id} from '../../../../types/sql.ts';\nimport {createEventTriggerStatements} from './ddl.ts';\nimport {\n getPublicationInfo,\n publishedSchema,\n type PublicationInfo,\n type PublishedSchema,\n} from './published.ts';\nimport {validate} from './validation.ts';\n\nexport function internalPublicationPrefix({appID}: AppID) {\n return `_${appID}_`;\n}\n\nexport function legacyReplicationSlot({appID, shardNum}: ShardID) {\n return `${appID}_${shardNum}`;\n}\n\nexport function replicationSlotPrefix(shard: ShardID) {\n const {appID, shardNum} = check(shard);\n return `${appID}_${shardNum}_`;\n}\n\n/**\n * An expression used to match replication slots in the shard\n * in a Postgres `LIKE` operator.\n */\nexport function replicationSlotExpression(shard: ShardID) {\n // Underscores have a special meaning in LIKE values\n // so they have to be escaped.\n return `${replicationSlotPrefix(shard)}%`.replaceAll('_', '\\\\_');\n}\n\nexport function newReplicationSlot(shard: ShardID) {\n return replicationSlotPrefix(shard) + Date.now();\n}\n\nfunction defaultPublicationName(appID: string, shardID: string | number) {\n return `_${appID}_public_${shardID}`;\n}\n\nexport function metadataPublicationName(\n appID: string,\n shardID: string | number,\n) {\n return `_${appID}_metadata_${shardID}`;\n}\n\n// The GLOBAL_SETUP must be idempotent as it can be run multiple times for different shards.\nfunction globalSetup(appID: AppID): string {\n const app = id(appSchema(appID));\n\n return /*sql*/ `\n CREATE SCHEMA IF NOT EXISTS ${app};\n\n CREATE TABLE IF NOT EXISTS ${app}.\"schemaVersions\" (\n \"minSupportedVersion\" INT4,\n \"maxSupportedVersion\" INT4,\n\n -- Ensure that there is only a single row in the table.\n -- Application code can be agnostic to this column, and\n -- simply invoke UPDATE statements on the version columns.\n \"lock\" BOOL PRIMARY KEY DEFAULT true CHECK (lock)\n );\n\n INSERT INTO ${app}.\"schemaVersions\" (\"lock\", \"minSupportedVersion\", \"maxSupportedVersion\")\n VALUES (true, 1, 1) ON CONFLICT DO NOTHING;\n\n CREATE TABLE IF NOT EXISTS ${app}.permissions (\n \"permissions\" JSONB,\n \"hash\" TEXT,\n\n -- Ensure that there is only a single row in the table.\n -- Application code can be agnostic to this column, and\n -- simply invoke UPDATE statements on the version columns.\n \"lock\" BOOL PRIMARY KEY DEFAULT true CHECK (lock)\n );\n\n CREATE OR REPLACE FUNCTION ${app}.set_permissions_hash()\n RETURNS TRIGGER AS $$\n BEGIN\n NEW.hash = md5(NEW.permissions::text);\n RETURN NEW;\n END;\n $$ LANGUAGE plpgsql;\n\n CREATE OR REPLACE TRIGGER on_set_permissions \n BEFORE INSERT OR UPDATE ON ${app}.permissions\n FOR EACH ROW\n EXECUTE FUNCTION ${app}.set_permissions_hash();\n\n INSERT INTO ${app}.permissions (permissions) VALUES (NULL) ON CONFLICT DO NOTHING;\n`;\n}\n\nexport async function ensureGlobalTables(db: PostgresDB, appID: AppID) {\n await db.unsafe(globalSetup(appID));\n}\n\nexport function getClientsTableDefinition(schema: string) {\n return /*sql*/ `\n CREATE TABLE ${schema}.\"clients\" (\n \"clientGroupID\" TEXT NOT NULL,\n \"clientID\" TEXT NOT NULL,\n \"lastMutationID\" BIGINT NOT NULL,\n \"userID\" TEXT,\n PRIMARY KEY(\"clientGroupID\", \"clientID\")\n );`;\n}\n\n/**\n * Tracks the results of mutations.\n * 1. It is an error for the same mutation ID to be used twice.\n * 2. The result is JSONB to allow for arbitrary results.\n *\n * The tables must be cleaned up as the clients\n * receive the mutation responses and as clients are removed.\n */\nexport function getMutationsTableDefinition(schema: string) {\n return /*sql*/ `\n CREATE TABLE ${schema}.\"mutations\" (\n \"clientGroupID\" TEXT NOT NULL,\n \"clientID\" TEXT NOT NULL,\n \"mutationID\" BIGINT NOT NULL,\n \"result\" JSON NOT NULL,\n PRIMARY KEY(\"clientGroupID\", \"clientID\", \"mutationID\")\n );`;\n}\n\nexport const SHARD_CONFIG_TABLE = 'shardConfig';\n\nexport function shardSetup(\n shardConfig: ShardConfig,\n metadataPublication: string,\n): string {\n const app = id(appSchema(shardConfig));\n const shard = id(upstreamSchema(shardConfig));\n\n const pubs = [...shardConfig.publications].sort();\n assert(pubs.includes(metadataPublication));\n\n return /*sql*/ `\n CREATE SCHEMA IF NOT EXISTS ${shard};\n\n ${getClientsTableDefinition(shard)}\n ${getMutationsTableDefinition(shard)}\n\n DROP PUBLICATION IF EXISTS ${id(metadataPublication)};\n CREATE PUBLICATION ${id(metadataPublication)}\n FOR TABLE ${app}.\"schemaVersions\", ${app}.\"permissions\", TABLE ${shard}.\"clients\", ${shard}.\"mutations\";\n\n CREATE TABLE ${shard}.\"${SHARD_CONFIG_TABLE}\" (\n \"publications\" TEXT[] NOT NULL,\n \"ddlDetection\" BOOL NOT NULL,\n\n -- Ensure that there is only a single row in the table.\n \"lock\" BOOL PRIMARY KEY DEFAULT true CHECK (lock)\n );\n\n INSERT INTO ${shard}.\"${SHARD_CONFIG_TABLE}\" (\n \"publications\",\n \"ddlDetection\" \n ) VALUES (\n ARRAY[${literal(pubs)}], \n false -- set in SAVEPOINT with triggerSetup() statements\n );\n\n CREATE TABLE ${shard}.replicas (\n \"slot\" TEXT PRIMARY KEY,\n \"version\" TEXT NOT NULL,\n \"initialSchema\" JSON NOT NULL\n );\n `;\n}\n\nexport function dropShard(appID: string, shardID: string | number): string {\n const schema = `${appID}_${shardID}`;\n const metadataPublication = metadataPublicationName(appID, shardID);\n const defaultPublication = defaultPublicationName(appID, shardID);\n\n // DROP SCHEMA ... CASCADE does not drop dependent PUBLICATIONS,\n // so PUBLICATIONs must be dropped explicitly.\n return /*sql*/ `\n DROP PUBLICATION IF EXISTS ${id(defaultPublication)};\n DROP PUBLICATION IF EXISTS ${id(metadataPublication)};\n DROP SCHEMA IF EXISTS ${id(schema)} CASCADE;\n `;\n}\n\nconst internalShardConfigSchema = v.object({\n publications: v.array(v.string()),\n ddlDetection: v.boolean(),\n});\n\nexport type InternalShardConfig = v.Infer<typeof internalShardConfigSchema>;\n\nconst replicaSchema = internalShardConfigSchema.extend({\n slot: v.string(),\n version: v.string(),\n initialSchema: publishedSchema,\n});\n\nexport type Replica = v.Infer<typeof replicaSchema>;\n\n// triggerSetup is run separately in a sub-transaction (i.e. SAVEPOINT) so\n// that a failure (e.g. due to lack of superuser permissions) can be handled\n// by continuing in a degraded mode (ddlDetection = false).\nfunction triggerSetup(shard: ShardConfig): string {\n const schema = id(upstreamSchema(shard));\n return (\n createEventTriggerStatements(shard) +\n /*sql*/ `UPDATE ${schema}.\"shardConfig\" SET \"ddlDetection\" = true;`\n );\n}\n\n// Called in initial-sync to store the exact schema that was initially synced.\nexport async function addReplica(\n sql: PostgresDB,\n shard: ShardID,\n slot: string,\n replicaVersion: string,\n {tables, indexes}: PublishedSchema,\n) {\n const schema = upstreamSchema(shard);\n const synced: PublishedSchema = {tables, indexes};\n await sql`\n INSERT INTO ${sql(schema)}.replicas (\"slot\", \"version\", \"initialSchema\")\n VALUES (${slot}, ${replicaVersion}, ${synced})`;\n}\n\nexport async function getReplicaAtVersion(\n lc: LogContext,\n sql: PostgresDB,\n shard: ShardID,\n replicaVersion: string,\n): Promise<Replica | null> {\n const schema = sql(upstreamSchema(shard));\n const result = await sql`\n SELECT * FROM ${schema}.replicas JOIN ${schema}.\"shardConfig\" ON true\n WHERE version = ${replicaVersion};\n `;\n if (result.length === 0) {\n // log out all the replicas and the joined shardConfig\n const allReplicas = await sql`\n SELECT * FROM ${schema}.replicas JOIN ${schema}.\"shardConfig\" ON true`;\n lc.debug?.(`Replica not found in: ${JSON.stringify(allReplicas)}`);\n return null;\n }\n return v.parse(result[0], replicaSchema, 'passthrough');\n}\n\nexport async function getInternalShardConfig(\n sql: PostgresDB,\n shard: ShardID,\n): Promise<InternalShardConfig> {\n const result = await sql`\n SELECT \"publications\", \"ddlDetection\"\n FROM ${sql(upstreamSchema(shard))}.\"shardConfig\";\n `;\n assert(result.length === 1);\n return v.parse(result[0], internalShardConfigSchema, 'passthrough');\n}\n\n/**\n * Sets up and returns all publications (including internal ones) for\n * the given shard.\n */\nexport async function setupTablesAndReplication(\n lc: LogContext,\n sql: PostgresTransaction,\n requested: ShardConfig,\n) {\n const {publications} = requested;\n // Validate requested publications.\n for (const pub of publications) {\n if (pub.startsWith('_')) {\n throw new Error(\n `Publication names starting with \"_\" are reserved for internal use.\\n` +\n `Please use a different name for publication \"${pub}\".`,\n );\n }\n }\n const allPublications: string[] = [];\n\n // Setup application publications.\n if (publications.length) {\n const results = await sql<{pubname: string}[]>`\n SELECT pubname from pg_publication WHERE pubname IN ${sql(\n publications,\n )}`.values();\n\n if (results.length !== publications.length) {\n throw new Error(\n `Unknown or invalid publications. Specified: [${publications}]. Found: [${results.flat()}]`,\n );\n }\n allPublications.push(...publications);\n } else {\n const defaultPublication = defaultPublicationName(\n requested.appID,\n requested.shardNum,\n );\n await sql`\n DROP PUBLICATION IF EXISTS ${sql(defaultPublication)}`;\n await sql`\n CREATE PUBLICATION ${sql(defaultPublication)} \n FOR TABLES IN SCHEMA public\n WITH (publish_via_partition_root = true)`;\n allPublications.push(defaultPublication);\n }\n\n const metadataPublication = metadataPublicationName(\n requested.appID,\n requested.shardNum,\n );\n allPublications.push(metadataPublication);\n\n const shard = {...requested, publications: allPublications};\n\n // Setup the global tables and shard tables / publications.\n await sql.unsafe(globalSetup(shard) + shardSetup(shard, metadataPublication));\n\n const pubs = await getPublicationInfo(sql, allPublications);\n await replicaIdentitiesForTablesWithoutPrimaryKeys(pubs)?.apply(lc, sql);\n\n await setupTriggers(lc, sql, shard);\n}\n\nexport async function setupTriggers(\n lc: LogContext,\n tx: PostgresTransaction,\n shard: ShardConfig,\n) {\n try {\n await tx.savepoint(sub => sub.unsafe(triggerSetup(shard)));\n } catch (e) {\n if (\n !(\n e instanceof postgres.PostgresError &&\n e.code === PG_INSUFFICIENT_PRIVILEGE\n )\n ) {\n throw e;\n }\n // If triggerSetup() fails, replication continues in ddlDetection=false mode.\n lc.warn?.(\n `Unable to create event triggers for schema change detection:\\n\\n` +\n `\"${e.hint ?? e.message}\"\\n\\n` +\n `Proceeding in degraded mode: schema changes will halt replication,\\n` +\n `requiring the replica to be reset (manually or with --auto-reset).`,\n );\n }\n}\n\nexport function validatePublications(\n lc: LogContext,\n published: PublicationInfo,\n) {\n // Verify that all publications export the proper events.\n published.publications.forEach(pub => {\n if (\n !pub.pubinsert ||\n !pub.pubtruncate ||\n !pub.pubdelete ||\n !pub.pubtruncate\n ) {\n // TODO: Make APIError?\n throw new Error(\n `PUBLICATION ${pub.pubname} must publish insert, update, delete, and truncate`,\n );\n }\n });\n\n published.tables.forEach(table => validate(lc, table, published.indexes));\n}\n\ntype ReplicaIdentities = {\n apply(lc: LogContext, db: PostgresDB): Promise<void>;\n};\n\nexport function replicaIdentitiesForTablesWithoutPrimaryKeys(\n pubs: PublishedSchema,\n): ReplicaIdentities | undefined {\n const replicaIdentities: {\n schema: string;\n tableName: string;\n indexName: string;\n }[] = [];\n for (const table of pubs.tables) {\n if (!table.primaryKey?.length && table.replicaIdentity === Default) {\n // Look for an index that can serve as the REPLICA IDENTITY USING INDEX. It must be:\n // - UNIQUE\n // - NOT NULL columns\n // - not deferrable (i.e. isImmediate)\n // - not partial (are already filtered out)\n //\n // https://www.postgresql.org/docs/current/sql-altertable.html#SQL-ALTERTABLE-REPLICA-IDENTITY\n const {schema, name: tableName} = table;\n for (const {columns, name: indexName} of pubs.indexes.filter(\n idx =>\n idx.schema === schema &&\n idx.tableName === tableName &&\n idx.unique &&\n idx.isImmediate,\n )) {\n if (Object.keys(columns).some(col => !table.columns[col].notNull)) {\n continue; // Only indexes with all NOT NULL columns are suitable.\n }\n replicaIdentities.push({schema, tableName, indexName});\n break;\n }\n }\n }\n\n if (replicaIdentities.length === 0) {\n return undefined;\n }\n return {\n apply: async (lc: LogContext, sql: PostgresDB) => {\n for (const {schema, tableName, indexName} of replicaIdentities) {\n lc.info?.(\n `setting \"${indexName}\" as the REPLICA IDENTITY for \"${tableName}\"`,\n );\n await sql`\n ALTER TABLE ${sql(schema)}.${sql(tableName)} \n REPLICA IDENTITY USING INDEX ${sql(indexName)}`;\n }\n },\n };\n}\n"],"names":["v.object","v.array","v.string","v.boolean","v.parse"],"mappings":";;;;;;;;;;;;AAoBO,SAAS,0BAA0B,EAAC,SAAe;AACxD,SAAO,IAAI,KAAK;AAClB;AAEO,SAAS,sBAAsB,EAAC,OAAO,YAAoB;AAChE,SAAO,GAAG,KAAK,IAAI,QAAQ;AAC7B;AAEO,SAAS,sBAAsB,OAAgB;AACpD,QAAM,EAAC,OAAO,aAAY,MAAM,KAAK;AACrC,SAAO,GAAG,KAAK,IAAI,QAAQ;AAC7B;AAMO,SAAS,0BAA0B,OAAgB;AAGxD,SAAO,GAAG,sBAAsB,KAAK,CAAC,IAAI,WAAW,KAAK,KAAK;AACjE;AAEO,SAAS,mBAAmB,OAAgB;AACjD,SAAO,sBAAsB,KAAK,IAAI,KAAK,IAAA;AAC7C;AAEA,SAAS,uBAAuB,OAAe,SAA0B;AACvE,SAAO,IAAI,KAAK,WAAW,OAAO;AACpC;AAEO,SAAS,wBACd,OACA,SACA;AACA,SAAO,IAAI,KAAK,aAAa,OAAO;AACtC;AAGA,SAAS,YAAY,OAAsB;AACzC,QAAM,MAAM,GAAG,UAAU,KAAK,CAAC;AAE/B;AAAA;AAAA,IAAe;AAAA,gCACe,GAAG;AAAA;AAAA,+BAEJ,GAAG;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAUlB,GAAG;AAAA;AAAA;AAAA,+BAGY,GAAG;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,+BAUH,GAAG;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,iCASD,GAAG;AAAA;AAAA,uBAEb,GAAG;AAAA;AAAA,gBAEV,GAAG;AAAA;AAAA;AAEnB;AAEA,eAAsB,mBAAmB,IAAgB,OAAc;AACrE,QAAM,GAAG,OAAO,YAAY,KAAK,CAAC;AACpC;AAEO,SAAS,0BAA0B,QAAgB;AACxD;AAAA;AAAA,IAAe;AAAA,iBACA,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAOvB;AAUO,SAAS,4BAA4B,QAAgB;AAC1D;AAAA;AAAA,IAAe;AAAA,iBACA,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAOvB;AAEO,MAAM,qBAAqB;AAE3B,SAAS,WACd,aACA,qBACQ;AACR,QAAM,MAAM,GAAG,UAAU,WAAW,CAAC;AACrC,QAAM,QAAQ,GAAG,eAAe,WAAW,CAAC;AAE5C,QAAM,OAAO,CAAC,GAAG,YAAY,YAAY,EAAE,KAAA;AAC3C,SAAO,KAAK,SAAS,mBAAmB,CAAC;AAEzC;AAAA;AAAA,IAAe;AAAA,gCACe,KAAK;AAAA;AAAA,IAEjC,0BAA0B,KAAK,CAAC;AAAA,IAChC,4BAA4B,KAAK,CAAC;AAAA;AAAA,+BAEP,GAAG,mBAAmB,CAAC;AAAA,uBAC/B,GAAG,mBAAmB,CAAC;AAAA,gBAC9B,GAAG,sBAAsB,GAAG,yBAAyB,KAAK,eAAe,KAAK;AAAA;AAAA,iBAE7E,KAAK,KAAK,kBAAkB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAQ7B,KAAK,KAAK,kBAAkB;AAAA;AAAA;AAAA;AAAA,cAI9B,QAAQ,IAAI,CAAC;AAAA;AAAA;AAAA;AAAA,iBAIV,KAAK;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAMtB;AAEO,SAAS,UAAU,OAAe,SAAkC;AACzE,QAAM,SAAS,GAAG,KAAK,IAAI,OAAO;AAClC,QAAM,sBAAsB,wBAAwB,OAAO,OAAO;AAClE,QAAM,qBAAqB,uBAAuB,OAAO,OAAO;AAIhE;AAAA;AAAA,IAAe;AAAA,iCACgB,GAAG,kBAAkB,CAAC;AAAA,iCACtB,GAAG,mBAAmB,CAAC;AAAA,4BAC5B,GAAG,MAAM,CAAC;AAAA;AAAA;AAEtC;AAEA,MAAM,4BAA4BA,OAAS;AAAA,EACzC,cAAcC,MAAQC,QAAU;AAAA,EAChC,cAAcC,QAAE;AAClB,CAAC;AAID,MAAM,gBAAgB,0BAA0B,OAAO;AAAA,EACrD,MAAMD,OAAE;AAAA,EACR,SAASA,OAAE;AAAA,EACX,eAAe;AACjB,CAAC;AAOD,SAAS,aAAa,OAA4B;AAChD,QAAM,SAAS,GAAG,eAAe,KAAK,CAAC;AACvC,SACE,6BAA6B,KAAK;AAAA,EAC1B,UAAU,MAAM;AAE5B;AAGA,eAAsB,WACpB,KACA,OACA,MACA,gBACA,EAAC,QAAQ,WACT;AACA,QAAM,SAAS,eAAe,KAAK;AACnC,QAAM,SAA0B,EAAC,QAAQ,QAAA;AACzC,QAAM;AAAA,kBACU,IAAI,MAAM,CAAC;AAAA,gBACb,IAAI,KAAK,cAAc,KAAK,MAAM;AAClD;AAEA,eAAsB,oBACpB,IACA,KACA,OACA,gBACyB;AACzB,QAAM,SAAS,IAAI,eAAe,KAAK,CAAC;AACxC,QAAM,SAAS,MAAM;AAAA,oBACH,MAAM,kBAAkB,MAAM;AAAA,wBAC1B,cAAc;AAAA;AAEpC,MAAI,OAAO,WAAW,GAAG;AAEvB,UAAM,cAAc,MAAM;AAAA,sBACR,MAAM,kBAAkB,MAAM;AAChD,OAAG,QAAQ,yBAAyB,KAAK,UAAU,WAAW,CAAC,EAAE;AACjE,WAAO;AAAA,EACT;AACA,SAAOE,MAAQ,OAAO,CAAC,GAAG,eAAe,aAAa;AACxD;AAEA,eAAsB,uBACpB,KACA,OAC8B;AAC9B,QAAM,SAAS,MAAM;AAAA;AAAA,aAEV,IAAI,eAAe,KAAK,CAAC,CAAC;AAAA;AAErC,SAAO,OAAO,WAAW,CAAC;AAC1B,SAAOA,MAAQ,OAAO,CAAC,GAAG,2BAA2B,aAAa;AACpE;AAMA,eAAsB,0BACpB,IACA,KACA,WACA;AACA,QAAM,EAAC,iBAAgB;AAEvB,aAAW,OAAO,cAAc;AAC9B,QAAI,IAAI,WAAW,GAAG,GAAG;AACvB,YAAM,IAAI;AAAA,QACR;AAAA,+CACkD,GAAG;AAAA,MAAA;AAAA,IAEzD;AAAA,EACF;AACA,QAAM,kBAA4B,CAAA;AAGlC,MAAI,aAAa,QAAQ;AACvB,UAAM,UAAU,MAAM;AAAA,0DACgC;AAAA,MACpD;AAAA,IAAA,CACD,GAAG,OAAA;AAEJ,QAAI,QAAQ,WAAW,aAAa,QAAQ;AAC1C,YAAM,IAAI;AAAA,QACR,gDAAgD,YAAY,cAAc,QAAQ,MAAM;AAAA,MAAA;AAAA,IAE5F;AACA,oBAAgB,KAAK,GAAG,YAAY;AAAA,EACtC,OAAO;AACL,UAAM,qBAAqB;AAAA,MACzB,UAAU;AAAA,MACV,UAAU;AAAA,IAAA;AAEZ,UAAM;AAAA,mCACyB,IAAI,kBAAkB,CAAC;AACtD,UAAM;AAAA,2BACiB,IAAI,kBAAkB,CAAC;AAAA;AAAA;AAG9C,oBAAgB,KAAK,kBAAkB;AAAA,EACzC;AAEA,QAAM,sBAAsB;AAAA,IAC1B,UAAU;AAAA,IACV,UAAU;AAAA,EAAA;AAEZ,kBAAgB,KAAK,mBAAmB;AAExC,QAAM,QAAQ,EAAC,GAAG,WAAW,cAAc,gBAAA;AAG3C,QAAM,IAAI,OAAO,YAAY,KAAK,IAAI,WAAW,OAAO,mBAAmB,CAAC;AAE5E,QAAM,OAAO,MAAM,mBAAmB,KAAK,eAAe;AAC1D,QAAM,6CAA6C,IAAI,GAAG,MAAM,IAAI,GAAG;AAEvE,QAAM,cAAc,IAAI,KAAK,KAAK;AACpC;AAEA,eAAsB,cACpB,IACA,IACA,OACA;AACA,MAAI;AACF,UAAM,GAAG,UAAU,CAAA,QAAO,IAAI,OAAO,aAAa,KAAK,CAAC,CAAC;AAAA,EAC3D,SAAS,GAAG;AACV,QACE,EACE,aAAa,SAAS,iBACtB,EAAE,SAAS,4BAEb;AACA,YAAM;AAAA,IACR;AAEA,OAAG;AAAA,MACD;AAAA;AAAA,GACM,EAAE,QAAQ,EAAE,OAAO;AAAA;AAAA;AAAA;AAAA,IAAA;AAAA,EAI7B;AACF;AAEO,SAAS,qBACd,IACA,WACA;AAEA,YAAU,aAAa,QAAQ,CAAA,QAAO;AACpC,QACE,CAAC,IAAI,aACL,CAAC,IAAI,eACL,CAAC,IAAI,aACL,CAAC,IAAI,aACL;AAEA,YAAM,IAAI;AAAA,QACR,eAAe,IAAI,OAAO;AAAA,MAAA;AAAA,IAE9B;AAAA,EACF,CAAC;AAED,YAAU,OAAO,QAAQ,CAAA,UAAS,SAAS,IAAI,OAAO,UAAU,OAAO,CAAC;AAC1E;AAMO,SAAS,6CACd,MAC+B;AAC/B,QAAM,oBAIA,CAAA;AACN,aAAW,SAAS,KAAK,QAAQ;AAC/B,QAAI,CAAC,MAAM,YAAY,UAAU,MAAM,oBAAoB,SAAS;AAQlE,YAAM,EAAC,QAAQ,MAAM,UAAA,IAAa;AAClC,iBAAW,EAAC,SAAS,MAAM,UAAA,KAAc,KAAK,QAAQ;AAAA,QACpD,CAAA,QACE,IAAI,WAAW,UACf,IAAI,cAAc,aAClB,IAAI,UACJ,IAAI;AAAA,MAAA,GACL;AACD,YAAI,OAAO,KAAK,OAAO,EAAE,KAAK,CAAA,QAAO,CAAC,MAAM,QAAQ,GAAG,EAAE,OAAO,GAAG;AACjE;AAAA,QACF;AACA,0BAAkB,KAAK,EAAC,QAAQ,WAAW,WAAU;AACrD;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,MAAI,kBAAkB,WAAW,GAAG;AAClC,WAAO;AAAA,EACT;AACA,SAAO;AAAA,IACL,OAAO,OAAO,IAAgB,QAAoB;AAChD,iBAAW,EAAC,QAAQ,WAAW,UAAA,KAAc,mBAAmB;AAC9D,WAAG;AAAA,UACD,YAAY,SAAS,kCAAkC,SAAS;AAAA,QAAA;AAElE,cAAM;AAAA,sBACQ,IAAI,MAAM,CAAC,IAAI,IAAI,SAAS,CAAC;AAAA,yCACV,IAAI,SAAS,CAAC;AAAA,MACjD;AAAA,IACF;AAAA,EAAA;AAEJ;"}
1
+ {"version":3,"file":"shard.js","sources":["../../../../../../../../zero-cache/src/services/change-source/pg/schema/shard.ts"],"sourcesContent":["import {PG_INSUFFICIENT_PRIVILEGE} from '@drdgvhbh/postgres-error-codes';\nimport type {LogContext} from '@rocicorp/logger';\nimport {literal} from 'pg-format';\nimport postgres from 'postgres';\nimport {assert} from '../../../../../../shared/src/asserts.ts';\nimport * as v from '../../../../../../shared/src/valita.ts';\nimport {Default} from '../../../../db/postgres-replica-identity-enum.ts';\nimport type {PostgresDB, PostgresTransaction} from '../../../../types/pg.ts';\nimport type {AppID, ShardConfig, ShardID} from '../../../../types/shards.ts';\nimport {appSchema, check, upstreamSchema} from '../../../../types/shards.ts';\nimport {id} from '../../../../types/sql.ts';\nimport {createEventTriggerStatements} from './ddl.ts';\nimport {\n getPublicationInfo,\n publishedSchema,\n type PublicationInfo,\n type PublishedSchema,\n} from './published.ts';\nimport {validate} from './validation.ts';\n\nexport function internalPublicationPrefix({appID}: AppID) {\n return `_${appID}_`;\n}\n\nexport function legacyReplicationSlot({appID, shardNum}: ShardID) {\n return `${appID}_${shardNum}`;\n}\n\nexport function replicationSlotPrefix(shard: ShardID) {\n const {appID, shardNum} = check(shard);\n return `${appID}_${shardNum}_`;\n}\n\n/**\n * An expression used to match replication slots in the shard\n * in a Postgres `LIKE` operator.\n */\nexport function replicationSlotExpression(shard: ShardID) {\n // Underscores have a special meaning in LIKE values\n // so they have to be escaped.\n return `${replicationSlotPrefix(shard)}%`.replaceAll('_', '\\\\_');\n}\n\nexport function newReplicationSlot(shard: ShardID) {\n return replicationSlotPrefix(shard) + Date.now();\n}\n\nfunction defaultPublicationName(appID: string, shardID: string | number) {\n return `_${appID}_public_${shardID}`;\n}\n\nexport function metadataPublicationName(\n appID: string,\n shardID: string | number,\n) {\n return `_${appID}_metadata_${shardID}`;\n}\n\n// The GLOBAL_SETUP must be idempotent as it can be run multiple times for different shards.\nfunction globalSetup(appID: AppID): string {\n const app = id(appSchema(appID));\n\n return /*sql*/ `\n CREATE SCHEMA IF NOT EXISTS ${app};\n\n CREATE TABLE IF NOT EXISTS ${app}.permissions (\n \"permissions\" JSONB,\n \"hash\" TEXT,\n\n -- Ensure that there is only a single row in the table.\n -- Application code can be agnostic to this column, and\n -- simply invoke UPDATE statements on the version columns.\n \"lock\" BOOL PRIMARY KEY DEFAULT true CHECK (lock)\n );\n\n CREATE OR REPLACE FUNCTION ${app}.set_permissions_hash()\n RETURNS TRIGGER AS $$\n BEGIN\n NEW.hash = md5(NEW.permissions::text);\n RETURN NEW;\n END;\n $$ LANGUAGE plpgsql;\n\n CREATE OR REPLACE TRIGGER on_set_permissions \n BEFORE INSERT OR UPDATE ON ${app}.permissions\n FOR EACH ROW\n EXECUTE FUNCTION ${app}.set_permissions_hash();\n\n INSERT INTO ${app}.permissions (permissions) VALUES (NULL) ON CONFLICT DO NOTHING;\n`;\n}\n\nexport async function ensureGlobalTables(db: PostgresDB, appID: AppID) {\n await db.unsafe(globalSetup(appID));\n}\n\nexport function getClientsTableDefinition(schema: string) {\n return /*sql*/ `\n CREATE TABLE ${schema}.\"clients\" (\n \"clientGroupID\" TEXT NOT NULL,\n \"clientID\" TEXT NOT NULL,\n \"lastMutationID\" BIGINT NOT NULL,\n \"userID\" TEXT,\n PRIMARY KEY(\"clientGroupID\", \"clientID\")\n );`;\n}\n\n/**\n * Tracks the results of mutations.\n * 1. It is an error for the same mutation ID to be used twice.\n * 2. The result is JSONB to allow for arbitrary results.\n *\n * The tables must be cleaned up as the clients\n * receive the mutation responses and as clients are removed.\n */\nexport function getMutationsTableDefinition(schema: string) {\n return /*sql*/ `\n CREATE TABLE ${schema}.\"mutations\" (\n \"clientGroupID\" TEXT NOT NULL,\n \"clientID\" TEXT NOT NULL,\n \"mutationID\" BIGINT NOT NULL,\n \"result\" JSON NOT NULL,\n PRIMARY KEY(\"clientGroupID\", \"clientID\", \"mutationID\")\n );`;\n}\n\nexport const SHARD_CONFIG_TABLE = 'shardConfig';\n\nexport function shardSetup(\n shardConfig: ShardConfig,\n metadataPublication: string,\n): string {\n const app = id(appSchema(shardConfig));\n const shard = id(upstreamSchema(shardConfig));\n\n const pubs = [...shardConfig.publications].sort();\n assert(pubs.includes(metadataPublication));\n\n return /*sql*/ `\n CREATE SCHEMA IF NOT EXISTS ${shard};\n\n ${getClientsTableDefinition(shard)}\n ${getMutationsTableDefinition(shard)}\n\n DROP PUBLICATION IF EXISTS ${id(metadataPublication)};\n CREATE PUBLICATION ${id(metadataPublication)}\n FOR TABLE ${app}.\"permissions\", TABLE ${shard}.\"clients\", ${shard}.\"mutations\";\n\n CREATE TABLE ${shard}.\"${SHARD_CONFIG_TABLE}\" (\n \"publications\" TEXT[] NOT NULL,\n \"ddlDetection\" BOOL NOT NULL,\n\n -- Ensure that there is only a single row in the table.\n \"lock\" BOOL PRIMARY KEY DEFAULT true CHECK (lock)\n );\n\n INSERT INTO ${shard}.\"${SHARD_CONFIG_TABLE}\" (\n \"publications\",\n \"ddlDetection\" \n ) VALUES (\n ARRAY[${literal(pubs)}], \n false -- set in SAVEPOINT with triggerSetup() statements\n );\n\n CREATE TABLE ${shard}.replicas (\n \"slot\" TEXT PRIMARY KEY,\n \"version\" TEXT NOT NULL,\n \"initialSchema\" JSON NOT NULL\n );\n `;\n}\n\nexport function dropShard(appID: string, shardID: string | number): string {\n const schema = `${appID}_${shardID}`;\n const metadataPublication = metadataPublicationName(appID, shardID);\n const defaultPublication = defaultPublicationName(appID, shardID);\n\n // DROP SCHEMA ... CASCADE does not drop dependent PUBLICATIONS,\n // so PUBLICATIONs must be dropped explicitly.\n return /*sql*/ `\n DROP PUBLICATION IF EXISTS ${id(defaultPublication)};\n DROP PUBLICATION IF EXISTS ${id(metadataPublication)};\n DROP SCHEMA IF EXISTS ${id(schema)} CASCADE;\n `;\n}\n\nconst internalShardConfigSchema = v.object({\n publications: v.array(v.string()),\n ddlDetection: v.boolean(),\n});\n\nexport type InternalShardConfig = v.Infer<typeof internalShardConfigSchema>;\n\nconst replicaSchema = internalShardConfigSchema.extend({\n slot: v.string(),\n version: v.string(),\n initialSchema: publishedSchema,\n});\n\nexport type Replica = v.Infer<typeof replicaSchema>;\n\n// triggerSetup is run separately in a sub-transaction (i.e. SAVEPOINT) so\n// that a failure (e.g. due to lack of superuser permissions) can be handled\n// by continuing in a degraded mode (ddlDetection = false).\nfunction triggerSetup(shard: ShardConfig): string {\n const schema = id(upstreamSchema(shard));\n return (\n createEventTriggerStatements(shard) +\n /*sql*/ `UPDATE ${schema}.\"shardConfig\" SET \"ddlDetection\" = true;`\n );\n}\n\n// Called in initial-sync to store the exact schema that was initially synced.\nexport async function addReplica(\n sql: PostgresDB,\n shard: ShardID,\n slot: string,\n replicaVersion: string,\n {tables, indexes}: PublishedSchema,\n) {\n const schema = upstreamSchema(shard);\n const synced: PublishedSchema = {tables, indexes};\n await sql`\n INSERT INTO ${sql(schema)}.replicas (\"slot\", \"version\", \"initialSchema\")\n VALUES (${slot}, ${replicaVersion}, ${synced})`;\n}\n\nexport async function getReplicaAtVersion(\n lc: LogContext,\n sql: PostgresDB,\n shard: ShardID,\n replicaVersion: string,\n): Promise<Replica | null> {\n const schema = sql(upstreamSchema(shard));\n const result = await sql`\n SELECT * FROM ${schema}.replicas JOIN ${schema}.\"shardConfig\" ON true\n WHERE version = ${replicaVersion};\n `;\n if (result.length === 0) {\n // log out all the replicas and the joined shardConfig\n const allReplicas = await sql`\n SELECT * FROM ${schema}.replicas JOIN ${schema}.\"shardConfig\" ON true`;\n lc.debug?.(`Replica not found in: ${JSON.stringify(allReplicas)}`);\n return null;\n }\n return v.parse(result[0], replicaSchema, 'passthrough');\n}\n\nexport async function getInternalShardConfig(\n sql: PostgresDB,\n shard: ShardID,\n): Promise<InternalShardConfig> {\n const result = await sql`\n SELECT \"publications\", \"ddlDetection\"\n FROM ${sql(upstreamSchema(shard))}.\"shardConfig\";\n `;\n assert(result.length === 1);\n return v.parse(result[0], internalShardConfigSchema, 'passthrough');\n}\n\n/**\n * Sets up and returns all publications (including internal ones) for\n * the given shard.\n */\nexport async function setupTablesAndReplication(\n lc: LogContext,\n sql: PostgresTransaction,\n requested: ShardConfig,\n) {\n const {publications} = requested;\n // Validate requested publications.\n for (const pub of publications) {\n if (pub.startsWith('_')) {\n throw new Error(\n `Publication names starting with \"_\" are reserved for internal use.\\n` +\n `Please use a different name for publication \"${pub}\".`,\n );\n }\n }\n const allPublications: string[] = [];\n\n // Setup application publications.\n if (publications.length) {\n const results = await sql<{pubname: string}[]>`\n SELECT pubname from pg_publication WHERE pubname IN ${sql(\n publications,\n )}`.values();\n\n if (results.length !== publications.length) {\n throw new Error(\n `Unknown or invalid publications. Specified: [${publications}]. Found: [${results.flat()}]`,\n );\n }\n allPublications.push(...publications);\n } else {\n const defaultPublication = defaultPublicationName(\n requested.appID,\n requested.shardNum,\n );\n await sql`\n DROP PUBLICATION IF EXISTS ${sql(defaultPublication)}`;\n await sql`\n CREATE PUBLICATION ${sql(defaultPublication)} \n FOR TABLES IN SCHEMA public\n WITH (publish_via_partition_root = true)`;\n allPublications.push(defaultPublication);\n }\n\n const metadataPublication = metadataPublicationName(\n requested.appID,\n requested.shardNum,\n );\n allPublications.push(metadataPublication);\n\n const shard = {...requested, publications: allPublications};\n\n // Setup the global tables and shard tables / publications.\n await sql.unsafe(globalSetup(shard) + shardSetup(shard, metadataPublication));\n\n const pubs = await getPublicationInfo(sql, allPublications);\n await replicaIdentitiesForTablesWithoutPrimaryKeys(pubs)?.apply(lc, sql);\n\n await setupTriggers(lc, sql, shard);\n}\n\nexport async function setupTriggers(\n lc: LogContext,\n tx: PostgresTransaction,\n shard: ShardConfig,\n) {\n try {\n await tx.savepoint(sub => sub.unsafe(triggerSetup(shard)));\n } catch (e) {\n if (\n !(\n e instanceof postgres.PostgresError &&\n e.code === PG_INSUFFICIENT_PRIVILEGE\n )\n ) {\n throw e;\n }\n // If triggerSetup() fails, replication continues in ddlDetection=false mode.\n lc.warn?.(\n `Unable to create event triggers for schema change detection:\\n\\n` +\n `\"${e.hint ?? e.message}\"\\n\\n` +\n `Proceeding in degraded mode: schema changes will halt replication,\\n` +\n `requiring the replica to be reset (manually or with --auto-reset).`,\n );\n }\n}\n\nexport function validatePublications(\n lc: LogContext,\n published: PublicationInfo,\n) {\n // Verify that all publications export the proper events.\n published.publications.forEach(pub => {\n if (\n !pub.pubinsert ||\n !pub.pubtruncate ||\n !pub.pubdelete ||\n !pub.pubtruncate\n ) {\n // TODO: Make APIError?\n throw new Error(\n `PUBLICATION ${pub.pubname} must publish insert, update, delete, and truncate`,\n );\n }\n });\n\n published.tables.forEach(table => validate(lc, table, published.indexes));\n}\n\ntype ReplicaIdentities = {\n apply(lc: LogContext, db: PostgresDB): Promise<void>;\n};\n\nexport function replicaIdentitiesForTablesWithoutPrimaryKeys(\n pubs: PublishedSchema,\n): ReplicaIdentities | undefined {\n const replicaIdentities: {\n schema: string;\n tableName: string;\n indexName: string;\n }[] = [];\n for (const table of pubs.tables) {\n if (!table.primaryKey?.length && table.replicaIdentity === Default) {\n // Look for an index that can serve as the REPLICA IDENTITY USING INDEX. It must be:\n // - UNIQUE\n // - NOT NULL columns\n // - not deferrable (i.e. isImmediate)\n // - not partial (are already filtered out)\n //\n // https://www.postgresql.org/docs/current/sql-altertable.html#SQL-ALTERTABLE-REPLICA-IDENTITY\n const {schema, name: tableName} = table;\n for (const {columns, name: indexName} of pubs.indexes.filter(\n idx =>\n idx.schema === schema &&\n idx.tableName === tableName &&\n idx.unique &&\n idx.isImmediate,\n )) {\n if (Object.keys(columns).some(col => !table.columns[col].notNull)) {\n continue; // Only indexes with all NOT NULL columns are suitable.\n }\n replicaIdentities.push({schema, tableName, indexName});\n break;\n }\n }\n }\n\n if (replicaIdentities.length === 0) {\n return undefined;\n }\n return {\n apply: async (lc: LogContext, sql: PostgresDB) => {\n for (const {schema, tableName, indexName} of replicaIdentities) {\n lc.info?.(\n `setting \"${indexName}\" as the REPLICA IDENTITY for \"${tableName}\"`,\n );\n await sql`\n ALTER TABLE ${sql(schema)}.${sql(tableName)} \n REPLICA IDENTITY USING INDEX ${sql(indexName)}`;\n }\n },\n };\n}\n"],"names":["v.object","v.array","v.string","v.boolean","v.parse"],"mappings":";;;;;;;;;;;;AAoBO,SAAS,0BAA0B,EAAC,SAAe;AACxD,SAAO,IAAI,KAAK;AAClB;AAEO,SAAS,sBAAsB,EAAC,OAAO,YAAoB;AAChE,SAAO,GAAG,KAAK,IAAI,QAAQ;AAC7B;AAEO,SAAS,sBAAsB,OAAgB;AACpD,QAAM,EAAC,OAAO,aAAY,MAAM,KAAK;AACrC,SAAO,GAAG,KAAK,IAAI,QAAQ;AAC7B;AAMO,SAAS,0BAA0B,OAAgB;AAGxD,SAAO,GAAG,sBAAsB,KAAK,CAAC,IAAI,WAAW,KAAK,KAAK;AACjE;AAEO,SAAS,mBAAmB,OAAgB;AACjD,SAAO,sBAAsB,KAAK,IAAI,KAAK,IAAA;AAC7C;AAEA,SAAS,uBAAuB,OAAe,SAA0B;AACvE,SAAO,IAAI,KAAK,WAAW,OAAO;AACpC;AAEO,SAAS,wBACd,OACA,SACA;AACA,SAAO,IAAI,KAAK,aAAa,OAAO;AACtC;AAGA,SAAS,YAAY,OAAsB;AACzC,QAAM,MAAM,GAAG,UAAU,KAAK,CAAC;AAE/B;AAAA;AAAA,IAAe;AAAA,gCACe,GAAG;AAAA;AAAA,+BAEJ,GAAG;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,+BAUH,GAAG;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,iCASD,GAAG;AAAA;AAAA,uBAEb,GAAG;AAAA;AAAA,gBAEV,GAAG;AAAA;AAAA;AAEnB;AAEA,eAAsB,mBAAmB,IAAgB,OAAc;AACrE,QAAM,GAAG,OAAO,YAAY,KAAK,CAAC;AACpC;AAEO,SAAS,0BAA0B,QAAgB;AACxD;AAAA;AAAA,IAAe;AAAA,iBACA,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAOvB;AAUO,SAAS,4BAA4B,QAAgB;AAC1D;AAAA;AAAA,IAAe;AAAA,iBACA,MAAM;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAOvB;AAEO,MAAM,qBAAqB;AAE3B,SAAS,WACd,aACA,qBACQ;AACR,QAAM,MAAM,GAAG,UAAU,WAAW,CAAC;AACrC,QAAM,QAAQ,GAAG,eAAe,WAAW,CAAC;AAE5C,QAAM,OAAO,CAAC,GAAG,YAAY,YAAY,EAAE,KAAA;AAC3C,SAAO,KAAK,SAAS,mBAAmB,CAAC;AAEzC;AAAA;AAAA,IAAe;AAAA,gCACe,KAAK;AAAA;AAAA,IAEjC,0BAA0B,KAAK,CAAC;AAAA,IAChC,4BAA4B,KAAK,CAAC;AAAA;AAAA,+BAEP,GAAG,mBAAmB,CAAC;AAAA,uBAC/B,GAAG,mBAAmB,CAAC;AAAA,gBAC9B,GAAG,yBAAyB,KAAK,eAAe,KAAK;AAAA;AAAA,iBAEpD,KAAK,KAAK,kBAAkB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,gBAQ7B,KAAK,KAAK,kBAAkB;AAAA;AAAA;AAAA;AAAA,cAI9B,QAAQ,IAAI,CAAC;AAAA;AAAA;AAAA;AAAA,iBAIV,KAAK;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAMtB;AAEO,SAAS,UAAU,OAAe,SAAkC;AACzE,QAAM,SAAS,GAAG,KAAK,IAAI,OAAO;AAClC,QAAM,sBAAsB,wBAAwB,OAAO,OAAO;AAClE,QAAM,qBAAqB,uBAAuB,OAAO,OAAO;AAIhE;AAAA;AAAA,IAAe;AAAA,iCACgB,GAAG,kBAAkB,CAAC;AAAA,iCACtB,GAAG,mBAAmB,CAAC;AAAA,4BAC5B,GAAG,MAAM,CAAC;AAAA;AAAA;AAEtC;AAEA,MAAM,4BAA4BA,OAAS;AAAA,EACzC,cAAcC,MAAQC,QAAU;AAAA,EAChC,cAAcC,QAAE;AAClB,CAAC;AAID,MAAM,gBAAgB,0BAA0B,OAAO;AAAA,EACrD,MAAMD,OAAE;AAAA,EACR,SAASA,OAAE;AAAA,EACX,eAAe;AACjB,CAAC;AAOD,SAAS,aAAa,OAA4B;AAChD,QAAM,SAAS,GAAG,eAAe,KAAK,CAAC;AACvC,SACE,6BAA6B,KAAK;AAAA,EAC1B,UAAU,MAAM;AAE5B;AAGA,eAAsB,WACpB,KACA,OACA,MACA,gBACA,EAAC,QAAQ,WACT;AACA,QAAM,SAAS,eAAe,KAAK;AACnC,QAAM,SAA0B,EAAC,QAAQ,QAAA;AACzC,QAAM;AAAA,kBACU,IAAI,MAAM,CAAC;AAAA,gBACb,IAAI,KAAK,cAAc,KAAK,MAAM;AAClD;AAEA,eAAsB,oBACpB,IACA,KACA,OACA,gBACyB;AACzB,QAAM,SAAS,IAAI,eAAe,KAAK,CAAC;AACxC,QAAM,SAAS,MAAM;AAAA,oBACH,MAAM,kBAAkB,MAAM;AAAA,wBAC1B,cAAc;AAAA;AAEpC,MAAI,OAAO,WAAW,GAAG;AAEvB,UAAM,cAAc,MAAM;AAAA,sBACR,MAAM,kBAAkB,MAAM;AAChD,OAAG,QAAQ,yBAAyB,KAAK,UAAU,WAAW,CAAC,EAAE;AACjE,WAAO;AAAA,EACT;AACA,SAAOE,MAAQ,OAAO,CAAC,GAAG,eAAe,aAAa;AACxD;AAEA,eAAsB,uBACpB,KACA,OAC8B;AAC9B,QAAM,SAAS,MAAM;AAAA;AAAA,aAEV,IAAI,eAAe,KAAK,CAAC,CAAC;AAAA;AAErC,SAAO,OAAO,WAAW,CAAC;AAC1B,SAAOA,MAAQ,OAAO,CAAC,GAAG,2BAA2B,aAAa;AACpE;AAMA,eAAsB,0BACpB,IACA,KACA,WACA;AACA,QAAM,EAAC,iBAAgB;AAEvB,aAAW,OAAO,cAAc;AAC9B,QAAI,IAAI,WAAW,GAAG,GAAG;AACvB,YAAM,IAAI;AAAA,QACR;AAAA,+CACkD,GAAG;AAAA,MAAA;AAAA,IAEzD;AAAA,EACF;AACA,QAAM,kBAA4B,CAAA;AAGlC,MAAI,aAAa,QAAQ;AACvB,UAAM,UAAU,MAAM;AAAA,0DACgC;AAAA,MACpD;AAAA,IAAA,CACD,GAAG,OAAA;AAEJ,QAAI,QAAQ,WAAW,aAAa,QAAQ;AAC1C,YAAM,IAAI;AAAA,QACR,gDAAgD,YAAY,cAAc,QAAQ,MAAM;AAAA,MAAA;AAAA,IAE5F;AACA,oBAAgB,KAAK,GAAG,YAAY;AAAA,EACtC,OAAO;AACL,UAAM,qBAAqB;AAAA,MACzB,UAAU;AAAA,MACV,UAAU;AAAA,IAAA;AAEZ,UAAM;AAAA,mCACyB,IAAI,kBAAkB,CAAC;AACtD,UAAM;AAAA,2BACiB,IAAI,kBAAkB,CAAC;AAAA;AAAA;AAG9C,oBAAgB,KAAK,kBAAkB;AAAA,EACzC;AAEA,QAAM,sBAAsB;AAAA,IAC1B,UAAU;AAAA,IACV,UAAU;AAAA,EAAA;AAEZ,kBAAgB,KAAK,mBAAmB;AAExC,QAAM,QAAQ,EAAC,GAAG,WAAW,cAAc,gBAAA;AAG3C,QAAM,IAAI,OAAO,YAAY,KAAK,IAAI,WAAW,OAAO,mBAAmB,CAAC;AAE5E,QAAM,OAAO,MAAM,mBAAmB,KAAK,eAAe;AAC1D,QAAM,6CAA6C,IAAI,GAAG,MAAM,IAAI,GAAG;AAEvE,QAAM,cAAc,IAAI,KAAK,KAAK;AACpC;AAEA,eAAsB,cACpB,IACA,IACA,OACA;AACA,MAAI;AACF,UAAM,GAAG,UAAU,CAAA,QAAO,IAAI,OAAO,aAAa,KAAK,CAAC,CAAC;AAAA,EAC3D,SAAS,GAAG;AACV,QACE,EACE,aAAa,SAAS,iBACtB,EAAE,SAAS,4BAEb;AACA,YAAM;AAAA,IACR;AAEA,OAAG;AAAA,MACD;AAAA;AAAA,GACM,EAAE,QAAQ,EAAE,OAAO;AAAA;AAAA;AAAA;AAAA,IAAA;AAAA,EAI7B;AACF;AAEO,SAAS,qBACd,IACA,WACA;AAEA,YAAU,aAAa,QAAQ,CAAA,QAAO;AACpC,QACE,CAAC,IAAI,aACL,CAAC,IAAI,eACL,CAAC,IAAI,aACL,CAAC,IAAI,aACL;AAEA,YAAM,IAAI;AAAA,QACR,eAAe,IAAI,OAAO;AAAA,MAAA;AAAA,IAE9B;AAAA,EACF,CAAC;AAED,YAAU,OAAO,QAAQ,CAAA,UAAS,SAAS,IAAI,OAAO,UAAU,OAAO,CAAC;AAC1E;AAMO,SAAS,6CACd,MAC+B;AAC/B,QAAM,oBAIA,CAAA;AACN,aAAW,SAAS,KAAK,QAAQ;AAC/B,QAAI,CAAC,MAAM,YAAY,UAAU,MAAM,oBAAoB,SAAS;AAQlE,YAAM,EAAC,QAAQ,MAAM,UAAA,IAAa;AAClC,iBAAW,EAAC,SAAS,MAAM,UAAA,KAAc,KAAK,QAAQ;AAAA,QACpD,CAAA,QACE,IAAI,WAAW,UACf,IAAI,cAAc,aAClB,IAAI,UACJ,IAAI;AAAA,MAAA,GACL;AACD,YAAI,OAAO,KAAK,OAAO,EAAE,KAAK,CAAA,QAAO,CAAC,MAAM,QAAQ,GAAG,EAAE,OAAO,GAAG;AACjE;AAAA,QACF;AACA,0BAAkB,KAAK,EAAC,QAAQ,WAAW,WAAU;AACrD;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAEA,MAAI,kBAAkB,WAAW,GAAG;AAClC,WAAO;AAAA,EACT;AACA,SAAO;AAAA,IACL,OAAO,OAAO,IAAgB,QAAoB;AAChD,iBAAW,EAAC,QAAQ,WAAW,UAAA,KAAc,mBAAmB;AAC9D,WAAG;AAAA,UACD,YAAY,SAAS,kCAAkC,SAAS;AAAA,QAAA;AAElE,cAAM;AAAA,sBACQ,IAAI,MAAM,CAAC,IAAI,IAAI,SAAS,CAAC;AAAA,yCACV,IAAI,SAAS,CAAC;AAAA,MACjD;AAAA,IACF;AAAA,EAAA;AAEJ;"}
@@ -1 +1 @@
1
- {"version":3,"file":"replica-schema.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-source/replica-schema.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAGjD,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,8BAA8B,CAAC;AAC3D,OAAO,EAEL,KAAK,uBAAuB,EAE7B,MAAM,4BAA4B,CAAC;AAYpC,wBAAsB,WAAW,CAC/B,GAAG,EAAE,UAAU,EACf,SAAS,EAAE,MAAM,EACjB,MAAM,EAAE,MAAM,EACd,WAAW,EAAE,CAAC,EAAE,EAAE,UAAU,EAAE,EAAE,EAAE,QAAQ,KAAK,OAAO,CAAC,IAAI,CAAC,GAC3D,OAAO,CAAC,IAAI,CAAC,CAoBf;AAED,wBAAsB,cAAc,CAClC,GAAG,EAAE,UAAU,EACf,SAAS,EAAE,MAAM,EACjB,MAAM,EAAE,MAAM,iBAgBf;AAED,eAAO,MAAM,yBAAyB,EAAE,uBA4BvC,CAAC"}
1
+ {"version":3,"file":"replica-schema.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-source/replica-schema.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAGjD,OAAO,KAAK,EAAC,QAAQ,EAAC,MAAM,8BAA8B,CAAC;AAE3D,OAAO,EAEL,KAAK,uBAAuB,EAE7B,MAAM,4BAA4B,CAAC;AAYpC,wBAAsB,WAAW,CAC/B,GAAG,EAAE,UAAU,EACf,SAAS,EAAE,MAAM,EACjB,MAAM,EAAE,MAAM,EACd,WAAW,EAAE,CAAC,EAAE,EAAE,UAAU,EAAE,EAAE,EAAE,QAAQ,KAAK,OAAO,CAAC,IAAI,CAAC,GAC3D,OAAO,CAAC,IAAI,CAAC,CAoBf;AAED,wBAAsB,cAAc,CAClC,GAAG,EAAE,UAAU,EACf,SAAS,EAAE,MAAM,EACjB,MAAM,EAAE,MAAM,iBAgBf;AAED,eAAO,MAAM,yBAAyB,EAAE,uBAsCvC,CAAC"}
@@ -1,8 +1,9 @@
1
1
  import { SqliteError } from "@rocicorp/zero-sqlite3";
2
2
  import { must } from "../../../../shared/src/must.js";
3
- import { runSchemaMigrations } from "../../db/migration-lite.js";
4
3
  import { listTables } from "../../db/lite-tables.js";
4
+ import { runSchemaMigrations } from "../../db/migration-lite.js";
5
5
  import { AutoResetSignal } from "../change-streamer/schema/tables.js";
6
+ import { initChangeLog } from "../replicator/schema/change-log.js";
6
7
  import { recordEvent, CREATE_RUNTIME_EVENTS_TABLE } from "../replicator/schema/replication-state.js";
7
8
  import { ColumnMetadataStore, CREATE_COLUMN_METADATA_TABLE } from "./column-metadata.js";
8
9
  async function initReplica(log, debugName, dbPath, initialSync) {
@@ -66,6 +67,12 @@ const schemaVersionMigrationMap = {
66
67
  const tables = listTables(db);
67
68
  must(store).populateFromExistingTables(tables);
68
69
  }
70
+ },
71
+ 7: {
72
+ migrateSchema: (_, db) => {
73
+ db.exec(`DELETE FROM "_zero.changeLog"`);
74
+ initChangeLog(db);
75
+ }
69
76
  }
70
77
  };
71
78
  export {
@@ -1 +1 @@
1
- {"version":3,"file":"replica-schema.js","sources":["../../../../../../zero-cache/src/services/change-source/replica-schema.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {SqliteError} from '@rocicorp/zero-sqlite3';\nimport {must} from '../../../../shared/src/must.ts';\nimport type {Database} from '../../../../zqlite/src/db.ts';\nimport {\n runSchemaMigrations,\n type IncrementalMigrationMap,\n type Migration,\n} from '../../db/migration-lite.ts';\nimport {listTables} from '../../db/lite-tables.ts';\nimport {AutoResetSignal} from '../change-streamer/schema/tables.ts';\nimport {\n CREATE_RUNTIME_EVENTS_TABLE,\n recordEvent,\n} from '../replicator/schema/replication-state.ts';\nimport {\n ColumnMetadataStore,\n CREATE_COLUMN_METADATA_TABLE,\n} from './column-metadata.ts';\n\nexport async function initReplica(\n log: LogContext,\n debugName: string,\n dbPath: string,\n initialSync: (lc: LogContext, tx: Database) => Promise<void>,\n): Promise<void> {\n const setupMigration: Migration = {\n migrateSchema: (log, tx) => initialSync(log, tx),\n minSafeVersion: 1,\n };\n\n try {\n await runSchemaMigrations(\n log,\n debugName,\n dbPath,\n setupMigration,\n schemaVersionMigrationMap,\n );\n } catch (e) {\n if (e instanceof SqliteError && e.code === 'SQLITE_CORRUPT') {\n throw new AutoResetSignal(e.message);\n }\n throw e;\n }\n}\n\nexport async function upgradeReplica(\n log: LogContext,\n debugName: string,\n dbPath: string,\n) {\n await runSchemaMigrations(\n log,\n debugName,\n dbPath,\n // setupMigration should never be invoked\n {\n migrateSchema: () => {\n throw new Error(\n 'This should only be called for already synced replicas',\n );\n },\n },\n schemaVersionMigrationMap,\n );\n}\n\nexport const schemaVersionMigrationMap: IncrementalMigrationMap = {\n // There's no incremental migration from v1. Just reset the replica.\n 4: {\n migrateSchema: () => {\n throw new AutoResetSignal('upgrading replica to new schema');\n },\n minSafeVersion: 3,\n },\n\n 5: {\n migrateSchema: (_, db) => {\n db.exec(CREATE_RUNTIME_EVENTS_TABLE);\n },\n migrateData: (_, db) => {\n recordEvent(db, 'upgrade');\n },\n },\n\n 6: {\n migrateSchema: (_, db) => {\n db.exec(CREATE_COLUMN_METADATA_TABLE);\n },\n migrateData: (_, db) => {\n const store = ColumnMetadataStore.getInstance(db);\n const tables = listTables(db);\n must(store).populateFromExistingTables(tables);\n },\n },\n};\n"],"names":["log"],"mappings":";;;;;;;AAoBA,eAAsB,YACpB,KACA,WACA,QACA,aACe;AACf,QAAM,iBAA4B;AAAA,IAChC,eAAe,CAACA,MAAK,OAAO,YAAYA,MAAK,EAAE;AAAA,IAC/C,gBAAgB;AAAA,EAAA;AAGlB,MAAI;AACF,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EAEJ,SAAS,GAAG;AACV,QAAI,aAAa,eAAe,EAAE,SAAS,kBAAkB;AAC3D,YAAM,IAAI,gBAAgB,EAAE,OAAO;AAAA,IACrC;AACA,UAAM;AAAA,EACR;AACF;AAEA,eAAsB,eACpB,KACA,WACA,QACA;AACA,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA;AAAA,IAEA;AAAA,MACE,eAAe,MAAM;AACnB,cAAM,IAAI;AAAA,UACR;AAAA,QAAA;AAAA,MAEJ;AAAA,IAAA;AAAA,IAEF;AAAA,EAAA;AAEJ;AAEO,MAAM,4BAAqD;AAAA;AAAA,EAEhE,GAAG;AAAA,IACD,eAAe,MAAM;AACnB,YAAM,IAAI,gBAAgB,iCAAiC;AAAA,IAC7D;AAAA,IACA,gBAAgB;AAAA,EAAA;AAAA,EAGlB,GAAG;AAAA,IACD,eAAe,CAAC,GAAG,OAAO;AACxB,SAAG,KAAK,2BAA2B;AAAA,IACrC;AAAA,IACA,aAAa,CAAC,GAAG,OAAO;AACtB,kBAAY,IAAI,SAAS;AAAA,IAC3B;AAAA,EAAA;AAAA,EAGF,GAAG;AAAA,IACD,eAAe,CAAC,GAAG,OAAO;AACxB,SAAG,KAAK,4BAA4B;AAAA,IACtC;AAAA,IACA,aAAa,CAAC,GAAG,OAAO;AACtB,YAAM,QAAQ,oBAAoB,YAAY,EAAE;AAChD,YAAM,SAAS,WAAW,EAAE;AAC5B,WAAK,KAAK,EAAE,2BAA2B,MAAM;AAAA,IAC/C;AAAA,EAAA;AAEJ;"}
1
+ {"version":3,"file":"replica-schema.js","sources":["../../../../../../zero-cache/src/services/change-source/replica-schema.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {SqliteError} from '@rocicorp/zero-sqlite3';\nimport {must} from '../../../../shared/src/must.ts';\nimport type {Database} from '../../../../zqlite/src/db.ts';\nimport {listTables} from '../../db/lite-tables.ts';\nimport {\n runSchemaMigrations,\n type IncrementalMigrationMap,\n type Migration,\n} from '../../db/migration-lite.ts';\nimport {AutoResetSignal} from '../change-streamer/schema/tables.ts';\nimport {initChangeLog} from '../replicator/schema/change-log.ts';\nimport {\n CREATE_RUNTIME_EVENTS_TABLE,\n recordEvent,\n} from '../replicator/schema/replication-state.ts';\nimport {\n ColumnMetadataStore,\n CREATE_COLUMN_METADATA_TABLE,\n} from './column-metadata.ts';\n\nexport async function initReplica(\n log: LogContext,\n debugName: string,\n dbPath: string,\n initialSync: (lc: LogContext, tx: Database) => Promise<void>,\n): Promise<void> {\n const setupMigration: Migration = {\n migrateSchema: (log, tx) => initialSync(log, tx),\n minSafeVersion: 1,\n };\n\n try {\n await runSchemaMigrations(\n log,\n debugName,\n dbPath,\n setupMigration,\n schemaVersionMigrationMap,\n );\n } catch (e) {\n if (e instanceof SqliteError && e.code === 'SQLITE_CORRUPT') {\n throw new AutoResetSignal(e.message);\n }\n throw e;\n }\n}\n\nexport async function upgradeReplica(\n log: LogContext,\n debugName: string,\n dbPath: string,\n) {\n await runSchemaMigrations(\n log,\n debugName,\n dbPath,\n // setupMigration should never be invoked\n {\n migrateSchema: () => {\n throw new Error(\n 'This should only be called for already synced replicas',\n );\n },\n },\n schemaVersionMigrationMap,\n );\n}\n\nexport const schemaVersionMigrationMap: IncrementalMigrationMap = {\n // There's no incremental migration from v1. Just reset the replica.\n 4: {\n migrateSchema: () => {\n throw new AutoResetSignal('upgrading replica to new schema');\n },\n minSafeVersion: 3,\n },\n\n 5: {\n migrateSchema: (_, db) => {\n db.exec(CREATE_RUNTIME_EVENTS_TABLE);\n },\n migrateData: (_, db) => {\n recordEvent(db, 'upgrade');\n },\n },\n\n 6: {\n migrateSchema: (_, db) => {\n db.exec(CREATE_COLUMN_METADATA_TABLE);\n },\n migrateData: (_, db) => {\n const store = ColumnMetadataStore.getInstance(db);\n const tables = listTables(db);\n must(store).populateFromExistingTables(tables);\n },\n },\n\n 7: {\n migrateSchema: (_, db) => {\n // Note: The original \"changeLog\" table is kept so that the replica file\n // is compatible with older zero-caches. However, it is truncated for\n // space savings (since historic changes were never read).\n db.exec(`DELETE FROM \"_zero.changeLog\"`);\n initChangeLog(db); // Creates _zero.changeLog2\n },\n },\n};\n"],"names":["log"],"mappings":";;;;;;;;AAqBA,eAAsB,YACpB,KACA,WACA,QACA,aACe;AACf,QAAM,iBAA4B;AAAA,IAChC,eAAe,CAACA,MAAK,OAAO,YAAYA,MAAK,EAAE;AAAA,IAC/C,gBAAgB;AAAA,EAAA;AAGlB,MAAI;AACF,UAAM;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAAA,EAEJ,SAAS,GAAG;AACV,QAAI,aAAa,eAAe,EAAE,SAAS,kBAAkB;AAC3D,YAAM,IAAI,gBAAgB,EAAE,OAAO;AAAA,IACrC;AACA,UAAM;AAAA,EACR;AACF;AAEA,eAAsB,eACpB,KACA,WACA,QACA;AACA,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA;AAAA,IAEA;AAAA,MACE,eAAe,MAAM;AACnB,cAAM,IAAI;AAAA,UACR;AAAA,QAAA;AAAA,MAEJ;AAAA,IAAA;AAAA,IAEF;AAAA,EAAA;AAEJ;AAEO,MAAM,4BAAqD;AAAA;AAAA,EAEhE,GAAG;AAAA,IACD,eAAe,MAAM;AACnB,YAAM,IAAI,gBAAgB,iCAAiC;AAAA,IAC7D;AAAA,IACA,gBAAgB;AAAA,EAAA;AAAA,EAGlB,GAAG;AAAA,IACD,eAAe,CAAC,GAAG,OAAO;AACxB,SAAG,KAAK,2BAA2B;AAAA,IACrC;AAAA,IACA,aAAa,CAAC,GAAG,OAAO;AACtB,kBAAY,IAAI,SAAS;AAAA,IAC3B;AAAA,EAAA;AAAA,EAGF,GAAG;AAAA,IACD,eAAe,CAAC,GAAG,OAAO;AACxB,SAAG,KAAK,4BAA4B;AAAA,IACtC;AAAA,IACA,aAAa,CAAC,GAAG,OAAO;AACtB,YAAM,QAAQ,oBAAoB,YAAY,EAAE;AAChD,YAAM,SAAS,WAAW,EAAE;AAC5B,WAAK,KAAK,EAAE,2BAA2B,MAAM;AAAA,IAC/C;AAAA,EAAA;AAAA,EAGF,GAAG;AAAA,IACD,eAAe,CAAC,GAAG,OAAO;AAIxB,SAAG,KAAK,+BAA+B;AACvC,oBAAc,EAAE;AAAA,IAClB;AAAA,EAAA;AAEJ;"}
@@ -9,7 +9,7 @@ import { type ChangeStreamerService } from './change-streamer.ts';
9
9
  /**
10
10
  * Performs initialization and schema migrations to initialize a ChangeStreamerImpl.
11
11
  */
12
- export declare function initializeStreamer(lc: LogContext, shard: ShardID, taskID: string, discoveryAddress: string, discoveryProtocol: string, changeDB: PostgresDB, changeSource: ChangeSource, subscriptionState: SubscriptionState, autoReset: boolean, setTimeoutFn?: typeof setTimeout): Promise<ChangeStreamerService>;
12
+ export declare function initializeStreamer(lc: LogContext, shard: ShardID, taskID: string, discoveryAddress: string, discoveryProtocol: string, changeDB: PostgresDB, changeSource: ChangeSource, subscriptionState: SubscriptionState, autoReset: boolean, backPressureThreshold: number, setTimeoutFn?: typeof setTimeout): Promise<ChangeStreamerService>;
13
13
  /**
14
14
  * Internally all Downstream messages (not just commits) are given a watermark.
15
15
  * These are used for internal ordering for:
@@ -1 +1 @@
1
- {"version":3,"file":"change-streamer-service.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-service.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AASjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,mBAAmB,CAAC;AAClD,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,uBAAuB,CAAC;AACnD,OAAO,KAAK,EAAC,IAAI,EAAE,MAAM,EAAC,MAAM,wBAAwB,CAAC;AAEzD,OAAO,EAEL,KAAK,gBAAgB,EACrB,KAAK,mBAAmB,EACzB,MAAM,iDAAiD,CAAC;AACzD,OAAO,KAAK,EAAC,oBAAoB,EAAC,MAAM,+CAA+C,CAAC;AAExF,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,2CAA2C,CAAC;AAMjF,OAAO,EACL,KAAK,qBAAqB,EAG3B,MAAM,sBAAsB,CAAC;AAY9B;;GAEG;AACH,wBAAsB,kBAAkB,CACtC,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,EACd,MAAM,EAAE,MAAM,EACd,gBAAgB,EAAE,MAAM,EACxB,iBAAiB,EAAE,MAAM,EACzB,QAAQ,EAAE,UAAU,EACpB,YAAY,EAAE,YAAY,EAC1B,iBAAiB,EAAE,iBAAiB,EACpC,SAAS,EAAE,OAAO,EAClB,YAAY,oBAAa,GACxB,OAAO,CAAC,qBAAqB,CAAC,CAwBhC;AAED;;;;;;;;;GASG;AACH,MAAM,MAAM,iBAAiB,GAAG,CAAC,SAAS,EAAE,MAAM,EAAE,gBAAgB,CAAC,CAAC;AAEtE,MAAM,MAAM,YAAY,GAAG;IACzB,OAAO,EAAE,MAAM,CAAC,mBAAmB,CAAC,CAAC;IAErC;;;;OAIG;IACH,IAAI,EAAE,IAAI,CAAC,oBAAoB,CAAC,CAAC;CAClC,CAAC;AAEF,+EAA+E;AAC/E,MAAM,WAAW,YAAY;IAC3B;;;OAGG;IACH,WAAW,CAAC,cAAc,EAAE,MAAM,GAAG,OAAO,CAAC,YAAY,CAAC,CAAC;CAC5D"}
1
+ {"version":3,"file":"change-streamer-service.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-service.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AASjD,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,mBAAmB,CAAC;AAClD,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,uBAAuB,CAAC;AACnD,OAAO,KAAK,EAAC,IAAI,EAAE,MAAM,EAAC,MAAM,wBAAwB,CAAC;AAEzD,OAAO,EAEL,KAAK,gBAAgB,EACrB,KAAK,mBAAmB,EACzB,MAAM,iDAAiD,CAAC;AACzD,OAAO,KAAK,EAAC,oBAAoB,EAAC,MAAM,+CAA+C,CAAC;AAExF,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,2CAA2C,CAAC;AAMjF,OAAO,EACL,KAAK,qBAAqB,EAG3B,MAAM,sBAAsB,CAAC;AAY9B;;GAEG;AACH,wBAAsB,kBAAkB,CACtC,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,EACd,MAAM,EAAE,MAAM,EACd,gBAAgB,EAAE,MAAM,EACxB,iBAAiB,EAAE,MAAM,EACzB,QAAQ,EAAE,UAAU,EACpB,YAAY,EAAE,YAAY,EAC1B,iBAAiB,EAAE,iBAAiB,EACpC,SAAS,EAAE,OAAO,EAClB,qBAAqB,EAAE,MAAM,EAC7B,YAAY,oBAAa,GACxB,OAAO,CAAC,qBAAqB,CAAC,CAyBhC;AAED;;;;;;;;;GASG;AACH,MAAM,MAAM,iBAAiB,GAAG,CAAC,SAAS,EAAE,MAAM,EAAE,gBAAgB,CAAC,CAAC;AAEtE,MAAM,MAAM,YAAY,GAAG;IACzB,OAAO,EAAE,MAAM,CAAC,mBAAmB,CAAC,CAAC;IAErC;;;;OAIG;IACH,IAAI,EAAE,IAAI,CAAC,oBAAoB,CAAC,CAAC;CAClC,CAAC;AAEF,+EAA+E;AAC/E,MAAM,WAAW,YAAY;IAC3B;;;OAGG;IACH,WAAW,CAAC,cAAc,EAAE,MAAM,GAAG,OAAO,CAAC,YAAY,CAAC,CAAC;CAC5D"}
@@ -13,7 +13,7 @@ import { initChangeStreamerSchema } from "./schema/init.js";
13
13
  import { ensureReplicationConfig, markResetRequired, AutoResetSignal } from "./schema/tables.js";
14
14
  import { Storer } from "./storer.js";
15
15
  import { Subscriber } from "./subscriber.js";
16
- async function initializeStreamer(lc, shard, taskID, discoveryAddress, discoveryProtocol, changeDB, changeSource, subscriptionState, autoReset, setTimeoutFn = setTimeout) {
16
+ async function initializeStreamer(lc, shard, taskID, discoveryAddress, discoveryProtocol, changeDB, changeSource, subscriptionState, autoReset, backPressureThreshold, setTimeoutFn = setTimeout) {
17
17
  await initChangeStreamerSchema(lc, changeDB, shard);
18
18
  await ensureReplicationConfig(
19
19
  lc,
@@ -33,6 +33,7 @@ async function initializeStreamer(lc, shard, taskID, discoveryAddress, discovery
33
33
  replicaVersion,
34
34
  changeSource,
35
35
  autoReset,
36
+ backPressureThreshold,
36
37
  setTimeoutFn
37
38
  );
38
39
  }
@@ -63,7 +64,7 @@ class ChangeStreamerImpl {
63
64
  "Count of replicated transactions"
64
65
  );
65
66
  #stream;
66
- constructor(lc, shard, taskID, discoveryAddress, discoveryProtocol, changeDB, replicaVersion, source, autoReset, setTimeoutFn = setTimeout) {
67
+ constructor(lc, shard, taskID, discoveryAddress, discoveryProtocol, changeDB, replicaVersion, source, autoReset, backPressureThreshold, setTimeoutFn = setTimeout) {
67
68
  this.id = `change-streamer`;
68
69
  this.#lc = lc.withContext("component", "change-streamer");
69
70
  this.#shard = shard;
@@ -79,7 +80,8 @@ class ChangeStreamerImpl {
79
80
  changeDB,
80
81
  replicaVersion,
81
82
  (consumed) => this.#stream?.acks.push(["status", consumed[1], consumed[2]]),
82
- (err) => this.stop(err)
83
+ (err) => this.stop(err),
84
+ backPressureThreshold
83
85
  );
84
86
  this.#forwarder = new Forwarder();
85
87
  this.#autoReset = autoReset;
@@ -1 +1 @@
1
- {"version":3,"file":"change-streamer-service.js","sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-service.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {resolver} from '@rocicorp/resolver';\nimport {unreachable} from '../../../../shared/src/asserts.ts';\nimport {getOrCreateCounter} from '../../observability/metrics.ts';\nimport {\n min,\n type AtLeastOne,\n type LexiVersion,\n} from '../../types/lexi-version.ts';\nimport type {PostgresDB} from '../../types/pg.ts';\nimport type {ShardID} from '../../types/shards.ts';\nimport type {Sink, Source} from '../../types/streams.ts';\nimport {Subscription} from '../../types/subscription.ts';\nimport {\n type ChangeStreamControl,\n type ChangeStreamData,\n type ChangeStreamMessage,\n} from '../change-source/protocol/current/downstream.ts';\nimport type {ChangeSourceUpstream} from '../change-source/protocol/current/upstream.ts';\nimport {publishReplicationError} from '../replicator/replication-status.ts';\nimport type {SubscriptionState} from '../replicator/schema/replication-state.ts';\nimport {\n DEFAULT_MAX_RETRY_DELAY_MS,\n RunningState,\n UnrecoverableError,\n} from '../running-state.ts';\nimport {\n type ChangeStreamerService,\n type Downstream,\n type SubscriberContext,\n} from './change-streamer.ts';\nimport * as ErrorType from './error-type-enum.ts';\nimport {Forwarder} from './forwarder.ts';\nimport {initChangeStreamerSchema} from './schema/init.ts';\nimport {\n AutoResetSignal,\n ensureReplicationConfig,\n markResetRequired,\n} from './schema/tables.ts';\nimport {Storer} from './storer.ts';\nimport {Subscriber} from './subscriber.ts';\n\n/**\n * Performs initialization and schema migrations to initialize a ChangeStreamerImpl.\n */\nexport async function initializeStreamer(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n changeSource: ChangeSource,\n subscriptionState: SubscriptionState,\n autoReset: boolean,\n setTimeoutFn = setTimeout,\n): Promise<ChangeStreamerService> {\n // Make sure the ChangeLog DB is set up.\n await initChangeStreamerSchema(lc, changeDB, shard);\n await ensureReplicationConfig(\n lc,\n changeDB,\n subscriptionState,\n shard,\n autoReset,\n );\n\n const {replicaVersion} = subscriptionState;\n return new ChangeStreamerImpl(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n changeSource,\n autoReset,\n setTimeoutFn,\n );\n}\n\n/**\n * Internally all Downstream messages (not just commits) are given a watermark.\n * These are used for internal ordering for:\n * 1. Replaying new changes in the Storer\n * 2. Filtering old changes in the Subscriber\n *\n * However, only the watermark for `Commit` messages are exposed to\n * subscribers, as that is the only semantically correct watermark to\n * use for tracking a position in a replication stream.\n */\nexport type WatermarkedChange = [watermark: string, ChangeStreamData];\n\nexport type ChangeStream = {\n changes: Source<ChangeStreamMessage>;\n\n /**\n * A Sink to push the {@link StatusMessage}s that reflect Commits\n * that have been successfully stored by the {@link Storer}, or\n * downstream {@link StatusMessage}s henceforth.\n */\n acks: Sink<ChangeSourceUpstream>;\n};\n\n/** Encapsulates an upstream-specific implementation of a stream of Changes. */\nexport interface ChangeSource {\n /**\n * Starts a stream of changes starting after the specific watermark,\n * with a corresponding sink for upstream acknowledgements.\n */\n startStream(afterWatermark: string): Promise<ChangeStream>;\n}\n\n/**\n * Upstream-agnostic dispatch of messages in a {@link ChangeStreamMessage} to a\n * {@link Forwarder} and {@link Storer} to execute the forward-store-ack\n * procedure described in {@link ChangeStreamer}.\n *\n * ### Subscriber Catchup\n *\n * Connecting clients first need to be \"caught up\" to the current watermark\n * (from stored change log entries) before new entries are forwarded to\n * them. This is non-trivial because the replication stream may be in the\n * middle of a pending streamed Transaction for which some entries have\n * already been forwarded but are not yet committed to the store.\n *\n *\n * ```\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * | Historic changes in storage | Pending (streamed) tx | Next tx\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * Replication stream\n * > > > > > > > > >\n * ^ ---> required catchup ---> ^\n * Subscriber watermark Subscription begins\n * ```\n *\n * Preemptively buffering the changes of every pending transaction\n * would be wasteful and consume too much memory for large transactions.\n *\n * Instead, the streamer synchronously dispatches changes and subscriptions\n * to the {@link Forwarder} and the {@link Storer} such that the two\n * components are aligned as to where in the stream the subscription started.\n * The two components then coordinate catchup and handoff via the\n * {@link Subscriber} object with the following algorithm:\n *\n * * If the streamer is in the middle of a pending Transaction, the\n * Subscriber is \"queued\" on both the Forwarder and the Storer. In this\n * state, new changes are *not* forwarded to the Subscriber, and catchup\n * is not yet executed.\n * * Once the commit message for the pending Transaction is processed\n * by the Storer, it begins catchup on the Subscriber (with a READONLY\n * snapshot so that it does not block subsequent storage operations).\n * This catchup is thus guaranteed to load the change log entries of\n * that last Transaction.\n * * When the Forwarder processes that same commit message, it moves the\n * Subscriber from the \"queued\" to the \"active\" set of clients such that\n * the Subscriber begins receiving new changes, starting from the next\n * Transaction.\n * * The Subscriber does not forward those changes, however, if its catchup\n * is not complete. Until then, it buffers the changes in memory.\n * * Once catchup is complete, the buffered changes are immediately sent\n * and the Subscriber henceforth forwards changes as they are received.\n *\n * In the (common) case where the streamer is not in the middle of a pending\n * transaction when a subscription begins, the Storer begins catchup\n * immediately and the Forwarder directly adds the Subscriber to its active\n * set. However, the Subscriber still buffers any forwarded messages until\n * its catchup is complete.\n *\n * ### Watermarks and ordering\n *\n * The ChangeStreamerService depends on its {@link ChangeSource} to send\n * changes in contiguous [`begin`, `data` ..., `data`, `commit`] sequences\n * in commit order. This follows Postgres's Logical Replication Protocol\n * Message Flow:\n *\n * https://www.postgresql.org/docs/16/protocol-logical-replication.html#PROTOCOL-LOGICAL-MESSAGES-FLOW\n *\n * > The logical replication protocol sends individual transactions one by one.\n * > This means that all messages between a pair of Begin and Commit messages belong to the same transaction.\n *\n * In order to correctly replay (new) and filter (old) messages to subscribers\n * at different points in the replication stream, these changes must be assigned\n * watermarks such that they preserve the order in which they were received\n * from the ChangeSource.\n *\n * A previous implementation incorrectly derived these watermarks from the Postgres\n * Log Sequence Numbers (LSN) of each message. However, LSNs from concurrent,\n * non-conflicting transactions can overlap, which can result in a `begin` message\n * with an earlier LSN arriving after a `commit` message. For example, the\n * changes for these transactions:\n *\n * ```\n * LSN: 1 2 3 4 5 6 7 8 9 10\n * tx1: begin data data data commit\n * tx2: begin data data data commit\n * ```\n *\n * will arrive as:\n *\n * ```\n * begin1, data2, data4, data6, commit8, begin3, data5, data7, data9, commit10\n * ```\n *\n * Thus, LSN of non-commit messages are not suitable for tracking the sorting\n * order of the replication stream.\n *\n * Instead, the ChangeStreamer uses the following algorithm for deterministic\n * catchup and filtering of changes:\n *\n * * A `commit` message is assigned to a watermark corresponding to its LSN.\n * These are guaranteed to be in commit order by definition.\n *\n * * `begin` and `data` messages are assigned to the watermark of the\n * preceding `commit` (the previous transaction, or the replication\n * slot's starting LSN) plus 1. This guarantees that they will be sorted\n * after the previously commit transaction even if their LSNs came before it.\n * This is referred to as the `preCommitWatermark`.\n *\n * * In the ChangeLog DB, messages have a secondary sort column `pos`, which is\n * the position of the message within its transaction, with the `begin` message\n * starting at `0`. This guarantees that `begin` and `data` messages will be\n * fetched in the original ChangeSource order during catchup.\n *\n * `begin` and `data` messages share the same watermark, but this is sufficient for\n * Subscriber filtering because subscribers only know about the `commit` watermarks\n * exposed in the `Downstream` `Commit` message. The Subscriber object thus compares\n * the internal watermarks of the incoming messages against the commit watermark of\n * the caller, updating the watermark at every `Commit` message that is forwarded.\n *\n * ### Cleanup\n *\n * As mentioned in the {@link ChangeStreamer} documentation: \"the ChangeStreamer\n * uses a combination of [the \"initial\", i.e. backup-derived watermark and] ACK\n * responses from connected subscribers to determine the watermark up\n * to which it is safe to purge old change log entries.\"\n *\n * More concretely:\n *\n * * The `initial`, backup-derived watermark is the earliest to which cleanup\n * should ever happen.\n *\n * * However, it is possible for the replica backup to be *ahead* of a connected\n * subscriber; and if a network error causes that subscriber to retry from its\n * last watermark, the change streamer must support it.\n *\n * Thus, before cleaning up to an `initial` backup-derived watermark, the change\n * streamer first confirms that all connected subscribers have also passed\n * that watermark.\n */\nclass ChangeStreamerImpl implements ChangeStreamerService {\n readonly id: string;\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #changeDB: PostgresDB;\n readonly #replicaVersion: string;\n readonly #source: ChangeSource;\n readonly #storer: Storer;\n readonly #forwarder: Forwarder;\n\n readonly #autoReset: boolean;\n readonly #state: RunningState;\n readonly #initialWatermarks = new Set<string>();\n\n // Starting the (Postgres) ChangeStream results in killing the previous\n // Postgres subscriber, potentially creating a gap in which the old\n // change-streamer has shut down and the new change-streamer has not yet\n // been recognized as \"healthy\" (and thus does not get any requests).\n //\n // To minimize this gap, delay starting the ChangeStream until the first\n // request from a `serving` replicator, indicating that higher level\n // load-balancing / routing logic has begun routing requests to this task.\n readonly #serving = resolver();\n\n readonly #txCounter = getOrCreateCounter(\n 'replication',\n 'transactions',\n 'Count of replicated transactions',\n );\n\n #stream: ChangeStream | undefined;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n replicaVersion: string,\n source: ChangeSource,\n autoReset: boolean,\n setTimeoutFn = setTimeout,\n ) {\n this.id = `change-streamer`;\n this.#lc = lc.withContext('component', 'change-streamer');\n this.#shard = shard;\n this.#changeDB = changeDB;\n this.#replicaVersion = replicaVersion;\n this.#source = source;\n this.#storer = new Storer(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n consumed => this.#stream?.acks.push(['status', consumed[1], consumed[2]]),\n err => this.stop(err),\n );\n this.#forwarder = new Forwarder();\n this.#autoReset = autoReset;\n this.#state = new RunningState(this.id, undefined, setTimeoutFn);\n }\n\n async run() {\n this.#lc.info?.('starting change stream');\n\n // Once this change-streamer acquires \"ownership\" of the change DB,\n // it is safe to start the storer.\n await this.#storer.assumeOwnership();\n // The storer will, in turn, detect changes to ownership and stop\n // the change-streamer appropriately.\n this.#storer\n .run()\n .then(() => this.stop())\n .catch(e => this.stop(e));\n\n while (this.#state.shouldRun()) {\n let err: unknown;\n let watermark: string | null = null;\n try {\n const startAfter = await this.#storer.getLastWatermarkToStartStream();\n const stream = await this.#source.startStream(startAfter);\n this.#stream = stream;\n this.#state.resetBackoff();\n watermark = null;\n\n for await (const change of stream.changes) {\n const [type, msg] = change;\n switch (type) {\n case 'status':\n this.#storer.status(change); // storer acks once it gets through its queue\n continue;\n case 'control':\n await this.#handleControlMessage(msg);\n continue; // control messages are not stored/forwarded\n case 'begin':\n watermark = change[2].commitWatermark;\n break;\n case 'commit':\n if (watermark !== change[2].watermark) {\n throw new UnrecoverableError(\n `commit watermark ${change[2].watermark} does not match 'begin' watermark ${watermark}`,\n );\n }\n this.#txCounter.add(1);\n break;\n default:\n if (watermark === null) {\n throw new UnrecoverableError(\n `${type} change (${msg.tag}) received before 'begin' message`,\n );\n }\n break;\n }\n\n this.#storer.store([watermark, change]);\n this.#forwarder.forward([watermark, change]);\n\n if (type === 'commit') {\n watermark = null;\n }\n\n // Allow the storer to exert back pressure.\n const readyForMore = this.#storer.readyForMore();\n if (readyForMore) {\n await readyForMore;\n }\n }\n } catch (e) {\n err = e;\n } finally {\n this.#stream?.changes.cancel();\n this.#stream = undefined;\n }\n\n // When the change stream is interrupted, abort any pending transaction.\n if (watermark) {\n this.#lc.warn?.(`aborting interrupted transaction ${watermark}`);\n this.#storer.abort();\n this.#forwarder.forward([watermark, ['rollback', {tag: 'rollback'}]]);\n }\n\n await this.#state.backoff(this.#lc, err);\n }\n this.#lc.info?.('ChangeStreamer stopped');\n }\n\n async #handleControlMessage(msg: ChangeStreamControl[1]) {\n this.#lc.info?.('received control message', msg);\n const {tag} = msg;\n\n switch (tag) {\n case 'reset-required':\n await markResetRequired(this.#changeDB, this.#shard);\n await publishReplicationError(\n this.#lc,\n 'Replicating',\n msg.message ?? 'Resync required',\n msg.errorDetails,\n );\n if (this.#autoReset) {\n this.#lc.warn?.('shutting down for auto-reset');\n await this.stop(new AutoResetSignal());\n }\n break;\n default:\n unreachable(tag);\n }\n }\n\n subscribe(ctx: SubscriberContext): Promise<Source<Downstream>> {\n const {protocolVersion, id, mode, replicaVersion, watermark, initial} = ctx;\n if (mode === 'serving') {\n this.#serving.resolve();\n }\n const downstream = Subscription.create<Downstream>({\n cleanup: () => this.#forwarder.remove(subscriber),\n });\n const subscriber = new Subscriber(\n protocolVersion,\n id,\n watermark,\n downstream,\n );\n if (replicaVersion !== this.#replicaVersion) {\n this.#lc.warn?.(\n `rejecting subscriber at replica version ${replicaVersion}`,\n );\n subscriber.close(\n ErrorType.WrongReplicaVersion,\n `current replica version is ${\n this.#replicaVersion\n } (requested ${replicaVersion})`,\n );\n } else {\n this.#lc.debug?.(`adding subscriber ${subscriber.id}`);\n\n this.#forwarder.add(subscriber);\n this.#storer.catchup(subscriber, mode);\n\n if (initial) {\n this.scheduleCleanup(watermark);\n }\n }\n return Promise.resolve(downstream);\n }\n\n scheduleCleanup(watermark: string) {\n const origSize = this.#initialWatermarks.size;\n this.#initialWatermarks.add(watermark);\n\n if (origSize === 0) {\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n\n async getChangeLogState(): Promise<{\n replicaVersion: string;\n minWatermark: string;\n }> {\n const minWatermark = await this.#storer.getMinWatermarkForCatchup();\n return {\n replicaVersion: this.#replicaVersion,\n minWatermark: minWatermark ?? this.#replicaVersion,\n };\n }\n\n async #purgeOldChanges(): Promise<void> {\n const initial = [...this.#initialWatermarks];\n if (initial.length === 0) {\n this.#lc.warn?.('No initial watermarks to check for cleanup'); // Not expected.\n return;\n }\n const current = [...this.#forwarder.getAcks()];\n if (current.length === 0) {\n // Also not expected, but possible (e.g. subscriber connects, then disconnects).\n // Bail to be safe.\n this.#lc.warn?.('No subscribers to confirm cleanup');\n return;\n }\n try {\n const earliestInitial = min(...(initial as AtLeastOne<LexiVersion>));\n const earliestCurrent = min(...(current as AtLeastOne<LexiVersion>));\n if (earliestCurrent < earliestInitial) {\n this.#lc.info?.(\n `At least one client is behind backup (${earliestCurrent} < ${earliestInitial})`,\n );\n } else {\n const deleted = await this.#storer.purgeRecordsBefore(earliestInitial);\n this.#lc.info?.(`Purged ${deleted} changes before ${earliestInitial}`);\n this.#initialWatermarks.delete(earliestInitial);\n }\n } finally {\n if (this.#initialWatermarks.size) {\n // If there are unpurged watermarks to check, schedule the next purge.\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n }\n\n async stop(err?: unknown) {\n this.#state.stop(this.#lc, err);\n this.#stream?.changes.cancel();\n await this.#storer.stop();\n }\n}\n\n// The delay between receiving an initial, backup-based watermark\n// and performing a check of whether to purge records before it.\n// This delay should be long enough to handle situations like the following:\n//\n// 1. `litestream restore` downloads a backup for the `replication-manager`\n// 2. `replication-manager` starts up and runs this `change-streamer`\n// 3. `zero-cache`s that are running on a different replica connect to this\n// `change-streamer` after exponential backoff retries.\n//\n// It is possible for a `zero-cache`[3] to be behind the backup restored [1].\n// This cleanup delay (30 seconds) is thus set to be a value comfortably\n// longer than the max delay for exponential backoff (10 seconds) in\n// `services/running-state.ts`. This allows the `zero-cache` [3] to reconnect\n// so that the `change-streamer` can track its progress and know when it has\n// surpassed the initial watermark of the backup [1].\nconst CLEANUP_DELAY_MS = DEFAULT_MAX_RETRY_DELAY_MS * 3;\n"],"names":["ErrorType.WrongReplicaVersion"],"mappings":";;;;;;;;;;;;;;;AA6CA,eAAsB,mBACpB,IACA,OACA,QACA,kBACA,mBACA,UACA,cACA,mBACA,WACA,eAAe,YACiB;AAEhC,QAAM,yBAAyB,IAAI,UAAU,KAAK;AAClD,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAGF,QAAM,EAAC,mBAAkB;AACzB,SAAO,IAAI;AAAA,IACT;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAEJ;AA4KA,MAAM,mBAAoD;AAAA,EAC/C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEA;AAAA,EACA;AAAA,EACA,yCAAyB,IAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUzB,WAAW,SAAA;AAAA,EAEX,aAAa;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAAA,EAGF;AAAA,EAEA,YACE,IACA,OACA,QACA,kBACA,mBACA,UACA,gBACA,QACA,WACA,eAAe,YACf;AACA,SAAK,KAAK;AACV,SAAK,MAAM,GAAG,YAAY,aAAa,iBAAiB;AACxD,SAAK,SAAS;AACd,SAAK,YAAY;AACjB,SAAK,kBAAkB;AACvB,SAAK,UAAU;AACf,SAAK,UAAU,IAAI;AAAA,MACjB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,CAAA,aAAY,KAAK,SAAS,KAAK,KAAK,CAAC,UAAU,SAAS,CAAC,GAAG,SAAS,CAAC,CAAC,CAAC;AAAA,MACxE,CAAA,QAAO,KAAK,KAAK,GAAG;AAAA,IAAA;AAEtB,SAAK,aAAa,IAAI,UAAA;AACtB,SAAK,aAAa;AAClB,SAAK,SAAS,IAAI,aAAa,KAAK,IAAI,QAAW,YAAY;AAAA,EACjE;AAAA,EAEA,MAAM,MAAM;AACV,SAAK,IAAI,OAAO,wBAAwB;AAIxC,UAAM,KAAK,QAAQ,gBAAA;AAGnB,SAAK,QACF,IAAA,EACA,KAAK,MAAM,KAAK,KAAA,CAAM,EACtB,MAAM,CAAA,MAAK,KAAK,KAAK,CAAC,CAAC;AAE1B,WAAO,KAAK,OAAO,aAAa;AAC9B,UAAI;AACJ,UAAI,YAA2B;AAC/B,UAAI;AACF,cAAM,aAAa,MAAM,KAAK,QAAQ,8BAAA;AACtC,cAAM,SAAS,MAAM,KAAK,QAAQ,YAAY,UAAU;AACxD,aAAK,UAAU;AACf,aAAK,OAAO,aAAA;AACZ,oBAAY;AAEZ,yBAAiB,UAAU,OAAO,SAAS;AACzC,gBAAM,CAAC,MAAM,GAAG,IAAI;AACpB,kBAAQ,MAAA;AAAA,YACN,KAAK;AACH,mBAAK,QAAQ,OAAO,MAAM;AAC1B;AAAA,YACF,KAAK;AACH,oBAAM,KAAK,sBAAsB,GAAG;AACpC;AAAA;AAAA,YACF,KAAK;AACH,0BAAY,OAAO,CAAC,EAAE;AACtB;AAAA,YACF,KAAK;AACH,kBAAI,cAAc,OAAO,CAAC,EAAE,WAAW;AACrC,sBAAM,IAAI;AAAA,kBACR,oBAAoB,OAAO,CAAC,EAAE,SAAS,qCAAqC,SAAS;AAAA,gBAAA;AAAA,cAEzF;AACA,mBAAK,WAAW,IAAI,CAAC;AACrB;AAAA,YACF;AACE,kBAAI,cAAc,MAAM;AACtB,sBAAM,IAAI;AAAA,kBACR,GAAG,IAAI,YAAY,IAAI,GAAG;AAAA,gBAAA;AAAA,cAE9B;AACA;AAAA,UAAA;AAGJ,eAAK,QAAQ,MAAM,CAAC,WAAW,MAAM,CAAC;AACtC,eAAK,WAAW,QAAQ,CAAC,WAAW,MAAM,CAAC;AAE3C,cAAI,SAAS,UAAU;AACrB,wBAAY;AAAA,UACd;AAGA,gBAAM,eAAe,KAAK,QAAQ,aAAA;AAClC,cAAI,cAAc;AAChB,kBAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF,SAAS,GAAG;AACV,cAAM;AAAA,MACR,UAAA;AACE,aAAK,SAAS,QAAQ,OAAA;AACtB,aAAK,UAAU;AAAA,MACjB;AAGA,UAAI,WAAW;AACb,aAAK,IAAI,OAAO,oCAAoC,SAAS,EAAE;AAC/D,aAAK,QAAQ,MAAA;AACb,aAAK,WAAW,QAAQ,CAAC,WAAW,CAAC,YAAY,EAAC,KAAK,WAAA,CAAW,CAAC,CAAC;AAAA,MACtE;AAEA,YAAM,KAAK,OAAO,QAAQ,KAAK,KAAK,GAAG;AAAA,IACzC;AACA,SAAK,IAAI,OAAO,wBAAwB;AAAA,EAC1C;AAAA,EAEA,MAAM,sBAAsB,KAA6B;AACvD,SAAK,IAAI,OAAO,4BAA4B,GAAG;AAC/C,UAAM,EAAC,QAAO;AAEd,YAAQ,KAAA;AAAA,MACN,KAAK;AACH,cAAM,kBAAkB,KAAK,WAAW,KAAK,MAAM;AACnD,cAAM;AAAA,UACJ,KAAK;AAAA,UACL;AAAA,UACA,IAAI,WAAW;AAAA,UACf,IAAI;AAAA,QAAA;AAEN,YAAI,KAAK,YAAY;AACnB,eAAK,IAAI,OAAO,8BAA8B;AAC9C,gBAAM,KAAK,KAAK,IAAI,iBAAiB;AAAA,QACvC;AACA;AAAA,MACF;AACE,oBAAe;AAAA,IAAA;AAAA,EAErB;AAAA,EAEA,UAAU,KAAqD;AAC7D,UAAM,EAAC,iBAAiB,IAAI,MAAM,gBAAgB,WAAW,YAAW;AACxE,QAAI,SAAS,WAAW;AACtB,WAAK,SAAS,QAAA;AAAA,IAChB;AACA,UAAM,aAAa,aAAa,OAAmB;AAAA,MACjD,SAAS,MAAM,KAAK,WAAW,OAAO,UAAU;AAAA,IAAA,CACjD;AACD,UAAM,aAAa,IAAI;AAAA,MACrB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAEF,QAAI,mBAAmB,KAAK,iBAAiB;AAC3C,WAAK,IAAI;AAAA,QACP,2CAA2C,cAAc;AAAA,MAAA;AAE3D,iBAAW;AAAA,QACTA;AAAAA,QACA,8BACE,KAAK,eACP,eAAe,cAAc;AAAA,MAAA;AAAA,IAEjC,OAAO;AACL,WAAK,IAAI,QAAQ,qBAAqB,WAAW,EAAE,EAAE;AAErD,WAAK,WAAW,IAAI,UAAU;AAC9B,WAAK,QAAQ,QAAQ,YAAY,IAAI;AAErC,UAAI,SAAS;AACX,aAAK,gBAAgB,SAAS;AAAA,MAChC;AAAA,IACF;AACA,WAAO,QAAQ,QAAQ,UAAU;AAAA,EACnC;AAAA,EAEA,gBAAgB,WAAmB;AACjC,UAAM,WAAW,KAAK,mBAAmB;AACzC,SAAK,mBAAmB,IAAI,SAAS;AAErC,QAAI,aAAa,GAAG;AAClB,WAAK,OAAO,WAAW,MAAM,KAAK,iBAAA,GAAoB,gBAAgB;AAAA,IACxE;AAAA,EACF;AAAA,EAEA,MAAM,oBAGH;AACD,UAAM,eAAe,MAAM,KAAK,QAAQ,0BAAA;AACxC,WAAO;AAAA,MACL,gBAAgB,KAAK;AAAA,MACrB,cAAc,gBAAgB,KAAK;AAAA,IAAA;AAAA,EAEvC;AAAA,EAEA,MAAM,mBAAkC;AACtC,UAAM,UAAU,CAAC,GAAG,KAAK,kBAAkB;AAC3C,QAAI,QAAQ,WAAW,GAAG;AACxB,WAAK,IAAI,OAAO,4CAA4C;AAC5D;AAAA,IACF;AACA,UAAM,UAAU,CAAC,GAAG,KAAK,WAAW,SAAS;AAC7C,QAAI,QAAQ,WAAW,GAAG;AAGxB,WAAK,IAAI,OAAO,mCAAmC;AACnD;AAAA,IACF;AACA,QAAI;AACF,YAAM,kBAAkB,IAAI,GAAI,OAAmC;AACnE,YAAM,kBAAkB,IAAI,GAAI,OAAmC;AACnE,UAAI,kBAAkB,iBAAiB;AACrC,aAAK,IAAI;AAAA,UACP,yCAAyC,eAAe,MAAM,eAAe;AAAA,QAAA;AAAA,MAEjF,OAAO;AACL,cAAM,UAAU,MAAM,KAAK,QAAQ,mBAAmB,eAAe;AACrE,aAAK,IAAI,OAAO,UAAU,OAAO,mBAAmB,eAAe,EAAE;AACrE,aAAK,mBAAmB,OAAO,eAAe;AAAA,MAChD;AAAA,IACF,UAAA;AACE,UAAI,KAAK,mBAAmB,MAAM;AAEhC,aAAK,OAAO,WAAW,MAAM,KAAK,iBAAA,GAAoB,gBAAgB;AAAA,MACxE;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,KAAK,KAAe;AACxB,SAAK,OAAO,KAAK,KAAK,KAAK,GAAG;AAC9B,SAAK,SAAS,QAAQ,OAAA;AACtB,UAAM,KAAK,QAAQ,KAAA;AAAA,EACrB;AACF;AAiBA,MAAM,mBAAmB,6BAA6B;"}
1
+ {"version":3,"file":"change-streamer-service.js","sources":["../../../../../../zero-cache/src/services/change-streamer/change-streamer-service.ts"],"sourcesContent":["import type {LogContext} from '@rocicorp/logger';\nimport {resolver} from '@rocicorp/resolver';\nimport {unreachable} from '../../../../shared/src/asserts.ts';\nimport {getOrCreateCounter} from '../../observability/metrics.ts';\nimport {\n min,\n type AtLeastOne,\n type LexiVersion,\n} from '../../types/lexi-version.ts';\nimport type {PostgresDB} from '../../types/pg.ts';\nimport type {ShardID} from '../../types/shards.ts';\nimport type {Sink, Source} from '../../types/streams.ts';\nimport {Subscription} from '../../types/subscription.ts';\nimport {\n type ChangeStreamControl,\n type ChangeStreamData,\n type ChangeStreamMessage,\n} from '../change-source/protocol/current/downstream.ts';\nimport type {ChangeSourceUpstream} from '../change-source/protocol/current/upstream.ts';\nimport {publishReplicationError} from '../replicator/replication-status.ts';\nimport type {SubscriptionState} from '../replicator/schema/replication-state.ts';\nimport {\n DEFAULT_MAX_RETRY_DELAY_MS,\n RunningState,\n UnrecoverableError,\n} from '../running-state.ts';\nimport {\n type ChangeStreamerService,\n type Downstream,\n type SubscriberContext,\n} from './change-streamer.ts';\nimport * as ErrorType from './error-type-enum.ts';\nimport {Forwarder} from './forwarder.ts';\nimport {initChangeStreamerSchema} from './schema/init.ts';\nimport {\n AutoResetSignal,\n ensureReplicationConfig,\n markResetRequired,\n} from './schema/tables.ts';\nimport {Storer} from './storer.ts';\nimport {Subscriber} from './subscriber.ts';\n\n/**\n * Performs initialization and schema migrations to initialize a ChangeStreamerImpl.\n */\nexport async function initializeStreamer(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n changeSource: ChangeSource,\n subscriptionState: SubscriptionState,\n autoReset: boolean,\n backPressureThreshold: number,\n setTimeoutFn = setTimeout,\n): Promise<ChangeStreamerService> {\n // Make sure the ChangeLog DB is set up.\n await initChangeStreamerSchema(lc, changeDB, shard);\n await ensureReplicationConfig(\n lc,\n changeDB,\n subscriptionState,\n shard,\n autoReset,\n );\n\n const {replicaVersion} = subscriptionState;\n return new ChangeStreamerImpl(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n changeSource,\n autoReset,\n backPressureThreshold,\n setTimeoutFn,\n );\n}\n\n/**\n * Internally all Downstream messages (not just commits) are given a watermark.\n * These are used for internal ordering for:\n * 1. Replaying new changes in the Storer\n * 2. Filtering old changes in the Subscriber\n *\n * However, only the watermark for `Commit` messages are exposed to\n * subscribers, as that is the only semantically correct watermark to\n * use for tracking a position in a replication stream.\n */\nexport type WatermarkedChange = [watermark: string, ChangeStreamData];\n\nexport type ChangeStream = {\n changes: Source<ChangeStreamMessage>;\n\n /**\n * A Sink to push the {@link StatusMessage}s that reflect Commits\n * that have been successfully stored by the {@link Storer}, or\n * downstream {@link StatusMessage}s henceforth.\n */\n acks: Sink<ChangeSourceUpstream>;\n};\n\n/** Encapsulates an upstream-specific implementation of a stream of Changes. */\nexport interface ChangeSource {\n /**\n * Starts a stream of changes starting after the specific watermark,\n * with a corresponding sink for upstream acknowledgements.\n */\n startStream(afterWatermark: string): Promise<ChangeStream>;\n}\n\n/**\n * Upstream-agnostic dispatch of messages in a {@link ChangeStreamMessage} to a\n * {@link Forwarder} and {@link Storer} to execute the forward-store-ack\n * procedure described in {@link ChangeStreamer}.\n *\n * ### Subscriber Catchup\n *\n * Connecting clients first need to be \"caught up\" to the current watermark\n * (from stored change log entries) before new entries are forwarded to\n * them. This is non-trivial because the replication stream may be in the\n * middle of a pending streamed Transaction for which some entries have\n * already been forwarded but are not yet committed to the store.\n *\n *\n * ```\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * | Historic changes in storage | Pending (streamed) tx | Next tx\n * ------------------------------- - - - - - - - - - - - - - - - - - - -\n * Replication stream\n * > > > > > > > > >\n * ^ ---> required catchup ---> ^\n * Subscriber watermark Subscription begins\n * ```\n *\n * Preemptively buffering the changes of every pending transaction\n * would be wasteful and consume too much memory for large transactions.\n *\n * Instead, the streamer synchronously dispatches changes and subscriptions\n * to the {@link Forwarder} and the {@link Storer} such that the two\n * components are aligned as to where in the stream the subscription started.\n * The two components then coordinate catchup and handoff via the\n * {@link Subscriber} object with the following algorithm:\n *\n * * If the streamer is in the middle of a pending Transaction, the\n * Subscriber is \"queued\" on both the Forwarder and the Storer. In this\n * state, new changes are *not* forwarded to the Subscriber, and catchup\n * is not yet executed.\n * * Once the commit message for the pending Transaction is processed\n * by the Storer, it begins catchup on the Subscriber (with a READONLY\n * snapshot so that it does not block subsequent storage operations).\n * This catchup is thus guaranteed to load the change log entries of\n * that last Transaction.\n * * When the Forwarder processes that same commit message, it moves the\n * Subscriber from the \"queued\" to the \"active\" set of clients such that\n * the Subscriber begins receiving new changes, starting from the next\n * Transaction.\n * * The Subscriber does not forward those changes, however, if its catchup\n * is not complete. Until then, it buffers the changes in memory.\n * * Once catchup is complete, the buffered changes are immediately sent\n * and the Subscriber henceforth forwards changes as they are received.\n *\n * In the (common) case where the streamer is not in the middle of a pending\n * transaction when a subscription begins, the Storer begins catchup\n * immediately and the Forwarder directly adds the Subscriber to its active\n * set. However, the Subscriber still buffers any forwarded messages until\n * its catchup is complete.\n *\n * ### Watermarks and ordering\n *\n * The ChangeStreamerService depends on its {@link ChangeSource} to send\n * changes in contiguous [`begin`, `data` ..., `data`, `commit`] sequences\n * in commit order. This follows Postgres's Logical Replication Protocol\n * Message Flow:\n *\n * https://www.postgresql.org/docs/16/protocol-logical-replication.html#PROTOCOL-LOGICAL-MESSAGES-FLOW\n *\n * > The logical replication protocol sends individual transactions one by one.\n * > This means that all messages between a pair of Begin and Commit messages belong to the same transaction.\n *\n * In order to correctly replay (new) and filter (old) messages to subscribers\n * at different points in the replication stream, these changes must be assigned\n * watermarks such that they preserve the order in which they were received\n * from the ChangeSource.\n *\n * A previous implementation incorrectly derived these watermarks from the Postgres\n * Log Sequence Numbers (LSN) of each message. However, LSNs from concurrent,\n * non-conflicting transactions can overlap, which can result in a `begin` message\n * with an earlier LSN arriving after a `commit` message. For example, the\n * changes for these transactions:\n *\n * ```\n * LSN: 1 2 3 4 5 6 7 8 9 10\n * tx1: begin data data data commit\n * tx2: begin data data data commit\n * ```\n *\n * will arrive as:\n *\n * ```\n * begin1, data2, data4, data6, commit8, begin3, data5, data7, data9, commit10\n * ```\n *\n * Thus, LSN of non-commit messages are not suitable for tracking the sorting\n * order of the replication stream.\n *\n * Instead, the ChangeStreamer uses the following algorithm for deterministic\n * catchup and filtering of changes:\n *\n * * A `commit` message is assigned to a watermark corresponding to its LSN.\n * These are guaranteed to be in commit order by definition.\n *\n * * `begin` and `data` messages are assigned to the watermark of the\n * preceding `commit` (the previous transaction, or the replication\n * slot's starting LSN) plus 1. This guarantees that they will be sorted\n * after the previously commit transaction even if their LSNs came before it.\n * This is referred to as the `preCommitWatermark`.\n *\n * * In the ChangeLog DB, messages have a secondary sort column `pos`, which is\n * the position of the message within its transaction, with the `begin` message\n * starting at `0`. This guarantees that `begin` and `data` messages will be\n * fetched in the original ChangeSource order during catchup.\n *\n * `begin` and `data` messages share the same watermark, but this is sufficient for\n * Subscriber filtering because subscribers only know about the `commit` watermarks\n * exposed in the `Downstream` `Commit` message. The Subscriber object thus compares\n * the internal watermarks of the incoming messages against the commit watermark of\n * the caller, updating the watermark at every `Commit` message that is forwarded.\n *\n * ### Cleanup\n *\n * As mentioned in the {@link ChangeStreamer} documentation: \"the ChangeStreamer\n * uses a combination of [the \"initial\", i.e. backup-derived watermark and] ACK\n * responses from connected subscribers to determine the watermark up\n * to which it is safe to purge old change log entries.\"\n *\n * More concretely:\n *\n * * The `initial`, backup-derived watermark is the earliest to which cleanup\n * should ever happen.\n *\n * * However, it is possible for the replica backup to be *ahead* of a connected\n * subscriber; and if a network error causes that subscriber to retry from its\n * last watermark, the change streamer must support it.\n *\n * Thus, before cleaning up to an `initial` backup-derived watermark, the change\n * streamer first confirms that all connected subscribers have also passed\n * that watermark.\n */\nclass ChangeStreamerImpl implements ChangeStreamerService {\n readonly id: string;\n readonly #lc: LogContext;\n readonly #shard: ShardID;\n readonly #changeDB: PostgresDB;\n readonly #replicaVersion: string;\n readonly #source: ChangeSource;\n readonly #storer: Storer;\n readonly #forwarder: Forwarder;\n\n readonly #autoReset: boolean;\n readonly #state: RunningState;\n readonly #initialWatermarks = new Set<string>();\n\n // Starting the (Postgres) ChangeStream results in killing the previous\n // Postgres subscriber, potentially creating a gap in which the old\n // change-streamer has shut down and the new change-streamer has not yet\n // been recognized as \"healthy\" (and thus does not get any requests).\n //\n // To minimize this gap, delay starting the ChangeStream until the first\n // request from a `serving` replicator, indicating that higher level\n // load-balancing / routing logic has begun routing requests to this task.\n readonly #serving = resolver();\n\n readonly #txCounter = getOrCreateCounter(\n 'replication',\n 'transactions',\n 'Count of replicated transactions',\n );\n\n #stream: ChangeStream | undefined;\n\n constructor(\n lc: LogContext,\n shard: ShardID,\n taskID: string,\n discoveryAddress: string,\n discoveryProtocol: string,\n changeDB: PostgresDB,\n replicaVersion: string,\n source: ChangeSource,\n autoReset: boolean,\n backPressureThreshold: number,\n setTimeoutFn = setTimeout,\n ) {\n this.id = `change-streamer`;\n this.#lc = lc.withContext('component', 'change-streamer');\n this.#shard = shard;\n this.#changeDB = changeDB;\n this.#replicaVersion = replicaVersion;\n this.#source = source;\n this.#storer = new Storer(\n lc,\n shard,\n taskID,\n discoveryAddress,\n discoveryProtocol,\n changeDB,\n replicaVersion,\n consumed => this.#stream?.acks.push(['status', consumed[1], consumed[2]]),\n err => this.stop(err),\n backPressureThreshold,\n );\n this.#forwarder = new Forwarder();\n this.#autoReset = autoReset;\n this.#state = new RunningState(this.id, undefined, setTimeoutFn);\n }\n\n async run() {\n this.#lc.info?.('starting change stream');\n\n // Once this change-streamer acquires \"ownership\" of the change DB,\n // it is safe to start the storer.\n await this.#storer.assumeOwnership();\n // The storer will, in turn, detect changes to ownership and stop\n // the change-streamer appropriately.\n this.#storer\n .run()\n .then(() => this.stop())\n .catch(e => this.stop(e));\n\n while (this.#state.shouldRun()) {\n let err: unknown;\n let watermark: string | null = null;\n try {\n const startAfter = await this.#storer.getLastWatermarkToStartStream();\n const stream = await this.#source.startStream(startAfter);\n this.#stream = stream;\n this.#state.resetBackoff();\n watermark = null;\n\n for await (const change of stream.changes) {\n const [type, msg] = change;\n switch (type) {\n case 'status':\n this.#storer.status(change); // storer acks once it gets through its queue\n continue;\n case 'control':\n await this.#handleControlMessage(msg);\n continue; // control messages are not stored/forwarded\n case 'begin':\n watermark = change[2].commitWatermark;\n break;\n case 'commit':\n if (watermark !== change[2].watermark) {\n throw new UnrecoverableError(\n `commit watermark ${change[2].watermark} does not match 'begin' watermark ${watermark}`,\n );\n }\n this.#txCounter.add(1);\n break;\n default:\n if (watermark === null) {\n throw new UnrecoverableError(\n `${type} change (${msg.tag}) received before 'begin' message`,\n );\n }\n break;\n }\n\n this.#storer.store([watermark, change]);\n this.#forwarder.forward([watermark, change]);\n\n if (type === 'commit') {\n watermark = null;\n }\n\n // Allow the storer to exert back pressure.\n const readyForMore = this.#storer.readyForMore();\n if (readyForMore) {\n await readyForMore;\n }\n }\n } catch (e) {\n err = e;\n } finally {\n this.#stream?.changes.cancel();\n this.#stream = undefined;\n }\n\n // When the change stream is interrupted, abort any pending transaction.\n if (watermark) {\n this.#lc.warn?.(`aborting interrupted transaction ${watermark}`);\n this.#storer.abort();\n this.#forwarder.forward([watermark, ['rollback', {tag: 'rollback'}]]);\n }\n\n await this.#state.backoff(this.#lc, err);\n }\n this.#lc.info?.('ChangeStreamer stopped');\n }\n\n async #handleControlMessage(msg: ChangeStreamControl[1]) {\n this.#lc.info?.('received control message', msg);\n const {tag} = msg;\n\n switch (tag) {\n case 'reset-required':\n await markResetRequired(this.#changeDB, this.#shard);\n await publishReplicationError(\n this.#lc,\n 'Replicating',\n msg.message ?? 'Resync required',\n msg.errorDetails,\n );\n if (this.#autoReset) {\n this.#lc.warn?.('shutting down for auto-reset');\n await this.stop(new AutoResetSignal());\n }\n break;\n default:\n unreachable(tag);\n }\n }\n\n subscribe(ctx: SubscriberContext): Promise<Source<Downstream>> {\n const {protocolVersion, id, mode, replicaVersion, watermark, initial} = ctx;\n if (mode === 'serving') {\n this.#serving.resolve();\n }\n const downstream = Subscription.create<Downstream>({\n cleanup: () => this.#forwarder.remove(subscriber),\n });\n const subscriber = new Subscriber(\n protocolVersion,\n id,\n watermark,\n downstream,\n );\n if (replicaVersion !== this.#replicaVersion) {\n this.#lc.warn?.(\n `rejecting subscriber at replica version ${replicaVersion}`,\n );\n subscriber.close(\n ErrorType.WrongReplicaVersion,\n `current replica version is ${\n this.#replicaVersion\n } (requested ${replicaVersion})`,\n );\n } else {\n this.#lc.debug?.(`adding subscriber ${subscriber.id}`);\n\n this.#forwarder.add(subscriber);\n this.#storer.catchup(subscriber, mode);\n\n if (initial) {\n this.scheduleCleanup(watermark);\n }\n }\n return Promise.resolve(downstream);\n }\n\n scheduleCleanup(watermark: string) {\n const origSize = this.#initialWatermarks.size;\n this.#initialWatermarks.add(watermark);\n\n if (origSize === 0) {\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n\n async getChangeLogState(): Promise<{\n replicaVersion: string;\n minWatermark: string;\n }> {\n const minWatermark = await this.#storer.getMinWatermarkForCatchup();\n return {\n replicaVersion: this.#replicaVersion,\n minWatermark: minWatermark ?? this.#replicaVersion,\n };\n }\n\n async #purgeOldChanges(): Promise<void> {\n const initial = [...this.#initialWatermarks];\n if (initial.length === 0) {\n this.#lc.warn?.('No initial watermarks to check for cleanup'); // Not expected.\n return;\n }\n const current = [...this.#forwarder.getAcks()];\n if (current.length === 0) {\n // Also not expected, but possible (e.g. subscriber connects, then disconnects).\n // Bail to be safe.\n this.#lc.warn?.('No subscribers to confirm cleanup');\n return;\n }\n try {\n const earliestInitial = min(...(initial as AtLeastOne<LexiVersion>));\n const earliestCurrent = min(...(current as AtLeastOne<LexiVersion>));\n if (earliestCurrent < earliestInitial) {\n this.#lc.info?.(\n `At least one client is behind backup (${earliestCurrent} < ${earliestInitial})`,\n );\n } else {\n const deleted = await this.#storer.purgeRecordsBefore(earliestInitial);\n this.#lc.info?.(`Purged ${deleted} changes before ${earliestInitial}`);\n this.#initialWatermarks.delete(earliestInitial);\n }\n } finally {\n if (this.#initialWatermarks.size) {\n // If there are unpurged watermarks to check, schedule the next purge.\n this.#state.setTimeout(() => this.#purgeOldChanges(), CLEANUP_DELAY_MS);\n }\n }\n }\n\n async stop(err?: unknown) {\n this.#state.stop(this.#lc, err);\n this.#stream?.changes.cancel();\n await this.#storer.stop();\n }\n}\n\n// The delay between receiving an initial, backup-based watermark\n// and performing a check of whether to purge records before it.\n// This delay should be long enough to handle situations like the following:\n//\n// 1. `litestream restore` downloads a backup for the `replication-manager`\n// 2. `replication-manager` starts up and runs this `change-streamer`\n// 3. `zero-cache`s that are running on a different replica connect to this\n// `change-streamer` after exponential backoff retries.\n//\n// It is possible for a `zero-cache`[3] to be behind the backup restored [1].\n// This cleanup delay (30 seconds) is thus set to be a value comfortably\n// longer than the max delay for exponential backoff (10 seconds) in\n// `services/running-state.ts`. This allows the `zero-cache` [3] to reconnect\n// so that the `change-streamer` can track its progress and know when it has\n// surpassed the initial watermark of the backup [1].\nconst CLEANUP_DELAY_MS = DEFAULT_MAX_RETRY_DELAY_MS * 3;\n"],"names":["ErrorType.WrongReplicaVersion"],"mappings":";;;;;;;;;;;;;;;AA6CA,eAAsB,mBACpB,IACA,OACA,QACA,kBACA,mBACA,UACA,cACA,mBACA,WACA,uBACA,eAAe,YACiB;AAEhC,QAAM,yBAAyB,IAAI,UAAU,KAAK;AAClD,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAGF,QAAM,EAAC,mBAAkB;AACzB,SAAO,IAAI;AAAA,IACT;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAEJ;AA4KA,MAAM,mBAAoD;AAAA,EAC/C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEA;AAAA,EACA;AAAA,EACA,yCAAyB,IAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUzB,WAAW,SAAA;AAAA,EAEX,aAAa;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,EAAA;AAAA,EAGF;AAAA,EAEA,YACE,IACA,OACA,QACA,kBACA,mBACA,UACA,gBACA,QACA,WACA,uBACA,eAAe,YACf;AACA,SAAK,KAAK;AACV,SAAK,MAAM,GAAG,YAAY,aAAa,iBAAiB;AACxD,SAAK,SAAS;AACd,SAAK,YAAY;AACjB,SAAK,kBAAkB;AACvB,SAAK,UAAU;AACf,SAAK,UAAU,IAAI;AAAA,MACjB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,CAAA,aAAY,KAAK,SAAS,KAAK,KAAK,CAAC,UAAU,SAAS,CAAC,GAAG,SAAS,CAAC,CAAC,CAAC;AAAA,MACxE,CAAA,QAAO,KAAK,KAAK,GAAG;AAAA,MACpB;AAAA,IAAA;AAEF,SAAK,aAAa,IAAI,UAAA;AACtB,SAAK,aAAa;AAClB,SAAK,SAAS,IAAI,aAAa,KAAK,IAAI,QAAW,YAAY;AAAA,EACjE;AAAA,EAEA,MAAM,MAAM;AACV,SAAK,IAAI,OAAO,wBAAwB;AAIxC,UAAM,KAAK,QAAQ,gBAAA;AAGnB,SAAK,QACF,IAAA,EACA,KAAK,MAAM,KAAK,KAAA,CAAM,EACtB,MAAM,CAAA,MAAK,KAAK,KAAK,CAAC,CAAC;AAE1B,WAAO,KAAK,OAAO,aAAa;AAC9B,UAAI;AACJ,UAAI,YAA2B;AAC/B,UAAI;AACF,cAAM,aAAa,MAAM,KAAK,QAAQ,8BAAA;AACtC,cAAM,SAAS,MAAM,KAAK,QAAQ,YAAY,UAAU;AACxD,aAAK,UAAU;AACf,aAAK,OAAO,aAAA;AACZ,oBAAY;AAEZ,yBAAiB,UAAU,OAAO,SAAS;AACzC,gBAAM,CAAC,MAAM,GAAG,IAAI;AACpB,kBAAQ,MAAA;AAAA,YACN,KAAK;AACH,mBAAK,QAAQ,OAAO,MAAM;AAC1B;AAAA,YACF,KAAK;AACH,oBAAM,KAAK,sBAAsB,GAAG;AACpC;AAAA;AAAA,YACF,KAAK;AACH,0BAAY,OAAO,CAAC,EAAE;AACtB;AAAA,YACF,KAAK;AACH,kBAAI,cAAc,OAAO,CAAC,EAAE,WAAW;AACrC,sBAAM,IAAI;AAAA,kBACR,oBAAoB,OAAO,CAAC,EAAE,SAAS,qCAAqC,SAAS;AAAA,gBAAA;AAAA,cAEzF;AACA,mBAAK,WAAW,IAAI,CAAC;AACrB;AAAA,YACF;AACE,kBAAI,cAAc,MAAM;AACtB,sBAAM,IAAI;AAAA,kBACR,GAAG,IAAI,YAAY,IAAI,GAAG;AAAA,gBAAA;AAAA,cAE9B;AACA;AAAA,UAAA;AAGJ,eAAK,QAAQ,MAAM,CAAC,WAAW,MAAM,CAAC;AACtC,eAAK,WAAW,QAAQ,CAAC,WAAW,MAAM,CAAC;AAE3C,cAAI,SAAS,UAAU;AACrB,wBAAY;AAAA,UACd;AAGA,gBAAM,eAAe,KAAK,QAAQ,aAAA;AAClC,cAAI,cAAc;AAChB,kBAAM;AAAA,UACR;AAAA,QACF;AAAA,MACF,SAAS,GAAG;AACV,cAAM;AAAA,MACR,UAAA;AACE,aAAK,SAAS,QAAQ,OAAA;AACtB,aAAK,UAAU;AAAA,MACjB;AAGA,UAAI,WAAW;AACb,aAAK,IAAI,OAAO,oCAAoC,SAAS,EAAE;AAC/D,aAAK,QAAQ,MAAA;AACb,aAAK,WAAW,QAAQ,CAAC,WAAW,CAAC,YAAY,EAAC,KAAK,WAAA,CAAW,CAAC,CAAC;AAAA,MACtE;AAEA,YAAM,KAAK,OAAO,QAAQ,KAAK,KAAK,GAAG;AAAA,IACzC;AACA,SAAK,IAAI,OAAO,wBAAwB;AAAA,EAC1C;AAAA,EAEA,MAAM,sBAAsB,KAA6B;AACvD,SAAK,IAAI,OAAO,4BAA4B,GAAG;AAC/C,UAAM,EAAC,QAAO;AAEd,YAAQ,KAAA;AAAA,MACN,KAAK;AACH,cAAM,kBAAkB,KAAK,WAAW,KAAK,MAAM;AACnD,cAAM;AAAA,UACJ,KAAK;AAAA,UACL;AAAA,UACA,IAAI,WAAW;AAAA,UACf,IAAI;AAAA,QAAA;AAEN,YAAI,KAAK,YAAY;AACnB,eAAK,IAAI,OAAO,8BAA8B;AAC9C,gBAAM,KAAK,KAAK,IAAI,iBAAiB;AAAA,QACvC;AACA;AAAA,MACF;AACE,oBAAe;AAAA,IAAA;AAAA,EAErB;AAAA,EAEA,UAAU,KAAqD;AAC7D,UAAM,EAAC,iBAAiB,IAAI,MAAM,gBAAgB,WAAW,YAAW;AACxE,QAAI,SAAS,WAAW;AACtB,WAAK,SAAS,QAAA;AAAA,IAChB;AACA,UAAM,aAAa,aAAa,OAAmB;AAAA,MACjD,SAAS,MAAM,KAAK,WAAW,OAAO,UAAU;AAAA,IAAA,CACjD;AACD,UAAM,aAAa,IAAI;AAAA,MACrB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IAAA;AAEF,QAAI,mBAAmB,KAAK,iBAAiB;AAC3C,WAAK,IAAI;AAAA,QACP,2CAA2C,cAAc;AAAA,MAAA;AAE3D,iBAAW;AAAA,QACTA;AAAAA,QACA,8BACE,KAAK,eACP,eAAe,cAAc;AAAA,MAAA;AAAA,IAEjC,OAAO;AACL,WAAK,IAAI,QAAQ,qBAAqB,WAAW,EAAE,EAAE;AAErD,WAAK,WAAW,IAAI,UAAU;AAC9B,WAAK,QAAQ,QAAQ,YAAY,IAAI;AAErC,UAAI,SAAS;AACX,aAAK,gBAAgB,SAAS;AAAA,MAChC;AAAA,IACF;AACA,WAAO,QAAQ,QAAQ,UAAU;AAAA,EACnC;AAAA,EAEA,gBAAgB,WAAmB;AACjC,UAAM,WAAW,KAAK,mBAAmB;AACzC,SAAK,mBAAmB,IAAI,SAAS;AAErC,QAAI,aAAa,GAAG;AAClB,WAAK,OAAO,WAAW,MAAM,KAAK,iBAAA,GAAoB,gBAAgB;AAAA,IACxE;AAAA,EACF;AAAA,EAEA,MAAM,oBAGH;AACD,UAAM,eAAe,MAAM,KAAK,QAAQ,0BAAA;AACxC,WAAO;AAAA,MACL,gBAAgB,KAAK;AAAA,MACrB,cAAc,gBAAgB,KAAK;AAAA,IAAA;AAAA,EAEvC;AAAA,EAEA,MAAM,mBAAkC;AACtC,UAAM,UAAU,CAAC,GAAG,KAAK,kBAAkB;AAC3C,QAAI,QAAQ,WAAW,GAAG;AACxB,WAAK,IAAI,OAAO,4CAA4C;AAC5D;AAAA,IACF;AACA,UAAM,UAAU,CAAC,GAAG,KAAK,WAAW,SAAS;AAC7C,QAAI,QAAQ,WAAW,GAAG;AAGxB,WAAK,IAAI,OAAO,mCAAmC;AACnD;AAAA,IACF;AACA,QAAI;AACF,YAAM,kBAAkB,IAAI,GAAI,OAAmC;AACnE,YAAM,kBAAkB,IAAI,GAAI,OAAmC;AACnE,UAAI,kBAAkB,iBAAiB;AACrC,aAAK,IAAI;AAAA,UACP,yCAAyC,eAAe,MAAM,eAAe;AAAA,QAAA;AAAA,MAEjF,OAAO;AACL,cAAM,UAAU,MAAM,KAAK,QAAQ,mBAAmB,eAAe;AACrE,aAAK,IAAI,OAAO,UAAU,OAAO,mBAAmB,eAAe,EAAE;AACrE,aAAK,mBAAmB,OAAO,eAAe;AAAA,MAChD;AAAA,IACF,UAAA;AACE,UAAI,KAAK,mBAAmB,MAAM;AAEhC,aAAK,OAAO,WAAW,MAAM,KAAK,iBAAA,GAAoB,gBAAgB;AAAA,MACxE;AAAA,IACF;AAAA,EACF;AAAA,EAEA,MAAM,KAAK,KAAe;AACxB,SAAK,OAAO,KAAK,KAAK,KAAK,GAAG;AAC9B,SAAK,SAAS,QAAQ,OAAA;AACtB,UAAM,KAAK,QAAQ,KAAA;AAAA,EACrB;AACF;AAiBA,MAAM,mBAAmB,6BAA6B;"}
@@ -40,7 +40,7 @@ import type { Subscriber } from './subscriber.ts';
40
40
  export declare class Storer implements Service {
41
41
  #private;
42
42
  readonly id = "storer";
43
- constructor(lc: LogContext, shard: ShardID, taskID: string, discoveryAddress: string, discoveryProtocol: string, db: PostgresDB, replicaVersion: string, onConsumed: (c: Commit | StatusMessage) => void, onFatal: (err: Error) => void);
43
+ constructor(lc: LogContext, shard: ShardID, taskID: string, discoveryAddress: string, discoveryProtocol: string, db: PostgresDB, replicaVersion: string, onConsumed: (c: Commit | StatusMessage) => void, onFatal: (err: Error) => void, backPressureThreshold: number);
44
44
  assumeOwnership(): Promise<void>;
45
45
  getLastWatermarkToStartStream(): Promise<string>;
46
46
  getMinWatermarkForCatchup(): Promise<string | null>;
@@ -1 +1 @@
1
- {"version":3,"file":"storer.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAUjD,OAAO,EAA0B,KAAK,UAAU,EAAC,MAAM,mBAAmB,CAAC;AAC3E,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAC9D,OAAO,EAAC,KAAK,MAAM,EAAC,MAAM,iDAAiD,CAAC;AAC5E,OAAO,KAAK,EAAC,aAAa,EAAC,MAAM,6CAA6C,CAAC;AAC/E,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,6BAA6B,CAAC;AAChE,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,eAAe,CAAC;AAC3C,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,8BAA8B,CAAC;AAQpE,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,iBAAiB,CAAC;AAoChD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AACH,qBAAa,MAAO,YAAW,OAAO;;IACpC,QAAQ,CAAC,EAAE,YAAY;gBAerB,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,EACd,MAAM,EAAE,MAAM,EACd,gBAAgB,EAAE,MAAM,EACxB,iBAAiB,EAAE,MAAM,EACzB,EAAE,EAAE,UAAU,EACd,cAAc,EAAE,MAAM,EACtB,UAAU,EAAE,CAAC,CAAC,EAAE,MAAM,GAAG,aAAa,KAAK,IAAI,EAC/C,OAAO,EAAE,CAAC,GAAG,EAAE,KAAK,KAAK,IAAI;IAkBzB,eAAe;IAcf,6BAA6B,IAAI,OAAO,CAAC,MAAM,CAAC;IAahD,yBAAyB,IAAI,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC;IAQzD,kBAAkB,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;IAyBtD,KAAK,CAAC,KAAK,EAAE,iBAAiB;IAI9B,KAAK;IAIL,MAAM,CAAC,CAAC,EAAE,aAAa;IAIvB,OAAO,CAAC,UAAU,EAAE,UAAU,EAAE,IAAI,EAAE,cAAc;IAMpD,YAAY,IAAI,OAAO,CAAC,IAAI,CAAC,GAAG,SAAS;IA8BnC,GAAG;IA6PT,IAAI;CAIL"}
1
+ {"version":3,"file":"storer.d.ts","sourceRoot":"","sources":["../../../../../../zero-cache/src/services/change-streamer/storer.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,kBAAkB,CAAC;AAUjD,OAAO,EAA0B,KAAK,UAAU,EAAC,MAAM,mBAAmB,CAAC;AAC3E,OAAO,EAAY,KAAK,OAAO,EAAC,MAAM,uBAAuB,CAAC;AAC9D,OAAO,EAAC,KAAK,MAAM,EAAC,MAAM,iDAAiD,CAAC;AAC5E,OAAO,KAAK,EAAC,aAAa,EAAC,MAAM,6CAA6C,CAAC;AAC/E,OAAO,KAAK,EAAC,cAAc,EAAC,MAAM,6BAA6B,CAAC;AAChE,OAAO,KAAK,EAAC,OAAO,EAAC,MAAM,eAAe,CAAC;AAC3C,OAAO,KAAK,EAAC,iBAAiB,EAAC,MAAM,8BAA8B,CAAC;AAQpE,OAAO,KAAK,EAAC,UAAU,EAAC,MAAM,iBAAiB,CAAC;AAsBhD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GA6BG;AACH,qBAAa,MAAO,YAAW,OAAO;;IACpC,QAAQ,CAAC,EAAE,YAAY;gBAgBrB,EAAE,EAAE,UAAU,EACd,KAAK,EAAE,OAAO,EACd,MAAM,EAAE,MAAM,EACd,gBAAgB,EAAE,MAAM,EACxB,iBAAiB,EAAE,MAAM,EACzB,EAAE,EAAE,UAAU,EACd,cAAc,EAAE,MAAM,EACtB,UAAU,EAAE,CAAC,CAAC,EAAE,MAAM,GAAG,aAAa,KAAK,IAAI,EAC/C,OAAO,EAAE,CAAC,GAAG,EAAE,KAAK,KAAK,IAAI,EAC7B,qBAAqB,EAAE,MAAM;IAmBzB,eAAe;IAcf,6BAA6B,IAAI,OAAO,CAAC,MAAM,CAAC;IAahD,yBAAyB,IAAI,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC;IAQzD,kBAAkB,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC;IAyBtD,KAAK,CAAC,KAAK,EAAE,iBAAiB;IAI9B,KAAK;IAIL,MAAM,CAAC,CAAC,EAAE,aAAa;IAIvB,OAAO,CAAC,UAAU,EAAE,UAAU,EAAE,IAAI,EAAE,cAAc;IAMpD,YAAY,IAAI,OAAO,CAAC,IAAI,CAAC,GAAG,SAAS;IAwCnC,GAAG;IA6PT,IAAI;CAIL"}