@openhi/constructs 0.0.109 → 0.0.111

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,79 @@
1
+ // src/components/postgres/data-store-postgres-schema.ts
2
+ var SCHEMA_NAME_PATTERN = /^[a-z_][a-z0-9_]{0,62}$/;
3
+ function quoteSchemaIdentifier(schemaName) {
4
+ if (!SCHEMA_NAME_PATTERN.test(schemaName)) {
5
+ throw new Error(
6
+ `Invalid Postgres schema name: ${JSON.stringify(schemaName)}; expected /[a-z_][a-z0-9_]{0,62}/`
7
+ );
8
+ }
9
+ return `"${schemaName}"`;
10
+ }
11
+ function buildSchemaBootstrapStatements(schemaName) {
12
+ const s = quoteSchemaIdentifier(schemaName);
13
+ return [
14
+ `CREATE SCHEMA IF NOT EXISTS ${s};`,
15
+ [
16
+ `CREATE TABLE IF NOT EXISTS ${s}.resources (`,
17
+ ` tenant_id text NOT NULL,`,
18
+ ` workspace_id text NOT NULL,`,
19
+ ` resource_type text NOT NULL,`,
20
+ ` resource_id text NOT NULL,`,
21
+ ` version text NOT NULL,`,
22
+ ` last_updated timestamptz NOT NULL,`,
23
+ ` deleted_at timestamptz,`,
24
+ ` resource jsonb NOT NULL,`,
25
+ ` PRIMARY KEY (tenant_id, workspace_id, resource_type, resource_id)`,
26
+ `);`
27
+ ].join("\n"),
28
+ [
29
+ `CREATE INDEX IF NOT EXISTS resources_jsonb_gin`,
30
+ ` ON ${s}.resources USING gin (resource);`
31
+ ].join("\n"),
32
+ [
33
+ `CREATE INDEX IF NOT EXISTS resources_listing`,
34
+ ` ON ${s}.resources (tenant_id, workspace_id, resource_type, last_updated);`
35
+ ].join("\n")
36
+ ];
37
+ }
38
+ function buildSchemaBootstrapSql(schemaName) {
39
+ return buildSchemaBootstrapStatements(schemaName).join("\n");
40
+ }
41
+ function buildResourceUpsertSql(schemaName) {
42
+ const s = quoteSchemaIdentifier(schemaName);
43
+ return [
44
+ `INSERT INTO ${s}.resources`,
45
+ ` (tenant_id, workspace_id, resource_type, resource_id, version, last_updated, deleted_at, resource)`,
46
+ `VALUES ($1, $2, $3, $4, $5, $6, NULL, $7::jsonb)`,
47
+ `ON CONFLICT (tenant_id, workspace_id, resource_type, resource_id)`,
48
+ `DO UPDATE SET`,
49
+ ` version = EXCLUDED.version,`,
50
+ ` last_updated = EXCLUDED.last_updated,`,
51
+ ` deleted_at = NULL,`,
52
+ ` resource = EXCLUDED.resource`,
53
+ `WHERE EXCLUDED.version > ${s}.resources.version;`
54
+ ].join("\n");
55
+ }
56
+ function buildResourceSoftDeleteSql(schemaName) {
57
+ const s = quoteSchemaIdentifier(schemaName);
58
+ return [
59
+ `UPDATE ${s}.resources`,
60
+ `SET deleted_at = $6,`,
61
+ ` version = $5`,
62
+ `WHERE tenant_id = $1`,
63
+ ` AND workspace_id = $2`,
64
+ ` AND resource_type = $3`,
65
+ ` AND resource_id = $4`,
66
+ ` AND $5 > version;`
67
+ ].join("\n");
68
+ }
69
+ async function ensureSchemaBootstrap(client, schemaName) {
70
+ await client.query(buildSchemaBootstrapSql(schemaName));
71
+ }
72
+
73
+ export {
74
+ buildSchemaBootstrapStatements,
75
+ buildResourceUpsertSql,
76
+ buildResourceSoftDeleteSql,
77
+ ensureSchemaBootstrap
78
+ };
79
+ //# sourceMappingURL=chunk-2O3CXY2C.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/components/postgres/data-store-postgres-schema.ts"],"sourcesContent":["import type { Pool, PoolClient } from \"pg\";\n\n/**\n * @see sites/www-docs/content/architecture/adr/2026-04-17-01-ad-hoc-query-support-fhir-api.md\n *\n * SQL strings for the Postgres replication tier (ADR 2026-04-17-01, phase 1):\n * the JSONB `resources` table that mirrors current FHIR resources from the\n * DynamoDB single-table store. Phase 1 covers replication only; query routing\n * and indexes for individual SearchParameters are deferred to a follow-on phase.\n *\n * The `version` column stores the resource's `vid` (a ULID — see\n * `data-entity-common.ts`). ULIDs are monotonically lexically sortable, so the\n * UPSERT/UPDATE guards use plain `>` text comparison to reject out-of-order\n * Kinesis deliveries.\n */\n\nexport const POSTGRES_REPLICATION_SCHEMA_VERSION = 1;\n\nconst SCHEMA_NAME_PATTERN = /^[a-z_][a-z0-9_]{0,62}$/;\n\n/**\n * Validate that a schema name is a safe Postgres identifier and quote it for\n * inclusion in DDL/DML. Throws on anything that doesn't match the lower-snake\n * pattern OpenHI uses for branch-derived schema names (e.g. `b_a1b2c3`).\n */\nexport function quoteSchemaIdentifier(schemaName: string): string {\n if (!SCHEMA_NAME_PATTERN.test(schemaName)) {\n throw new Error(\n `Invalid Postgres schema name: ${JSON.stringify(schemaName)}; expected /[a-z_][a-z0-9_]{0,62}/`,\n );\n }\n return `\"${schemaName}\"`;\n}\n\n/**\n * Build the bootstrap DDL as a list of independent statements. Use this form\n * when sending the DDL through a transport that does not accept multi-statement\n * SQL in a single call — most importantly the AWS RDS Data API's\n * `ExecuteStatement`, which the REST API runner uses.\n *\n * Each statement is fully self-contained and idempotent (`IF NOT EXISTS`),\n * so the array can be executed in order with no transaction wrapper.\n */\nexport function buildSchemaBootstrapStatements(\n schemaName: string,\n): ReadonlyArray<string> {\n const s = quoteSchemaIdentifier(schemaName);\n return [\n `CREATE SCHEMA IF NOT EXISTS ${s};`,\n [\n `CREATE TABLE IF NOT EXISTS ${s}.resources (`,\n ` tenant_id text NOT NULL,`,\n ` workspace_id text NOT NULL,`,\n ` resource_type text NOT NULL,`,\n ` resource_id text NOT NULL,`,\n ` version text NOT NULL,`,\n ` last_updated timestamptz NOT NULL,`,\n ` deleted_at timestamptz,`,\n ` resource jsonb NOT NULL,`,\n ` PRIMARY KEY (tenant_id, workspace_id, resource_type, resource_id)`,\n `);`,\n ].join(\"\\n\"),\n [\n `CREATE INDEX IF NOT EXISTS resources_jsonb_gin`,\n ` ON ${s}.resources USING gin (resource);`,\n ].join(\"\\n\"),\n [\n `CREATE INDEX IF NOT EXISTS resources_listing`,\n ` ON ${s}.resources (tenant_id, workspace_id, resource_type, last_updated);`,\n ].join(\"\\n\"),\n ];\n}\n\nexport function buildSchemaBootstrapSql(schemaName: string): string {\n return buildSchemaBootstrapStatements(schemaName).join(\"\\n\");\n}\n\n/**\n * INSERT/MODIFY UPSERT with a monotonic `version` (ULID) guard. If a record\n * arrives out of order — e.g. a retried Kinesis delivery for an older `vid`\n * after a newer one — the WHERE clause in DO UPDATE leaves the row unchanged.\n *\n * Param order: $1 tenant_id, $2 workspace_id, $3 resource_type, $4 resource_id,\n * $5 version, $6 last_updated, $7 resource (jsonb-serialized).\n */\nexport function buildResourceUpsertSql(schemaName: string): string {\n const s = quoteSchemaIdentifier(schemaName);\n return [\n `INSERT INTO ${s}.resources`,\n ` (tenant_id, workspace_id, resource_type, resource_id, version, last_updated, deleted_at, resource)`,\n `VALUES ($1, $2, $3, $4, $5, $6, NULL, $7::jsonb)`,\n `ON CONFLICT (tenant_id, workspace_id, resource_type, resource_id)`,\n `DO UPDATE SET`,\n ` version = EXCLUDED.version,`,\n ` last_updated = EXCLUDED.last_updated,`,\n ` deleted_at = NULL,`,\n ` resource = EXCLUDED.resource`,\n `WHERE EXCLUDED.version > ${s}.resources.version;`,\n ].join(\"\\n\");\n}\n\n/**\n * REMOVE soft-delete with the same monotonic `version` guard. Using `>` (not\n * `>=`) means a duplicate REMOVE for the same version is a no-op. Param order\n * matches {@link buildResourceUpsertSql} for the first five params.\n *\n * Param order: $1 tenant_id, $2 workspace_id, $3 resource_type, $4 resource_id,\n * $5 version (incoming), $6 deleted_at.\n */\nexport function buildResourceSoftDeleteSql(schemaName: string): string {\n const s = quoteSchemaIdentifier(schemaName);\n return [\n `UPDATE ${s}.resources`,\n `SET deleted_at = $6,`,\n ` version = $5`,\n `WHERE tenant_id = $1`,\n ` AND workspace_id = $2`,\n ` AND resource_type = $3`,\n ` AND resource_id = $4`,\n ` AND $5 > version;`,\n ].join(\"\\n\");\n}\n\n/**\n * Run schema DDL idempotently. Safe to call from every Lambda cold start; the\n * `IF NOT EXISTS` clauses make repeats no-ops. Caller is responsible for\n * memoizing across invocations on the same warm container.\n */\nexport async function ensureSchemaBootstrap(\n client: Pool | PoolClient,\n schemaName: string,\n): Promise<void> {\n await client.query(buildSchemaBootstrapSql(schemaName));\n}\n"],"mappings":";AAkBA,IAAM,sBAAsB;AAOrB,SAAS,sBAAsB,YAA4B;AAChE,MAAI,CAAC,oBAAoB,KAAK,UAAU,GAAG;AACzC,UAAM,IAAI;AAAA,MACR,iCAAiC,KAAK,UAAU,UAAU,CAAC;AAAA,IAC7D;AAAA,EACF;AACA,SAAO,IAAI,UAAU;AACvB;AAWO,SAAS,+BACd,YACuB;AACvB,QAAM,IAAI,sBAAsB,UAAU;AAC1C,SAAO;AAAA,IACL,+BAA+B,CAAC;AAAA,IAChC;AAAA,MACE,8BAA8B,CAAC;AAAA,MAC/B;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF,EAAE,KAAK,IAAI;AAAA,IACX;AAAA,MACE;AAAA,MACA,QAAQ,CAAC;AAAA,IACX,EAAE,KAAK,IAAI;AAAA,IACX;AAAA,MACE;AAAA,MACA,QAAQ,CAAC;AAAA,IACX,EAAE,KAAK,IAAI;AAAA,EACb;AACF;AAEO,SAAS,wBAAwB,YAA4B;AAClE,SAAO,+BAA+B,UAAU,EAAE,KAAK,IAAI;AAC7D;AAUO,SAAS,uBAAuB,YAA4B;AACjE,QAAM,IAAI,sBAAsB,UAAU;AAC1C,SAAO;AAAA,IACL,eAAe,CAAC;AAAA,IAChB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,4BAA4B,CAAC;AAAA,EAC/B,EAAE,KAAK,IAAI;AACb;AAUO,SAAS,2BAA2B,YAA4B;AACrE,QAAM,IAAI,sBAAsB,UAAU;AAC1C,SAAO;AAAA,IACL,UAAU,CAAC;AAAA,IACX;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACb;AAOA,eAAsB,sBACpB,QACA,YACe;AACf,QAAM,OAAO,MAAM,wBAAwB,UAAU,CAAC;AACxD;","names":[]}
@@ -39,26 +39,35 @@ function quoteSchemaIdentifier(schemaName) {
39
39
  }
40
40
  return `"${schemaName}"`;
41
41
  }
42
- function buildSchemaBootstrapSql(schemaName) {
42
+ function buildSchemaBootstrapStatements(schemaName) {
43
43
  const s = quoteSchemaIdentifier(schemaName);
44
44
  return [
45
45
  `CREATE SCHEMA IF NOT EXISTS ${s};`,
46
- `CREATE TABLE IF NOT EXISTS ${s}.resources (`,
47
- ` tenant_id text NOT NULL,`,
48
- ` workspace_id text NOT NULL,`,
49
- ` resource_type text NOT NULL,`,
50
- ` resource_id text NOT NULL,`,
51
- ` version text NOT NULL,`,
52
- ` last_updated timestamptz NOT NULL,`,
53
- ` deleted_at timestamptz,`,
54
- ` resource jsonb NOT NULL,`,
55
- ` PRIMARY KEY (tenant_id, workspace_id, resource_type, resource_id)`,
56
- `);`,
57
- `CREATE INDEX IF NOT EXISTS resources_jsonb_gin`,
58
- ` ON ${s}.resources USING gin (resource);`,
59
- `CREATE INDEX IF NOT EXISTS resources_listing`,
60
- ` ON ${s}.resources (tenant_id, workspace_id, resource_type, last_updated);`
61
- ].join("\n");
46
+ [
47
+ `CREATE TABLE IF NOT EXISTS ${s}.resources (`,
48
+ ` tenant_id text NOT NULL,`,
49
+ ` workspace_id text NOT NULL,`,
50
+ ` resource_type text NOT NULL,`,
51
+ ` resource_id text NOT NULL,`,
52
+ ` version text NOT NULL,`,
53
+ ` last_updated timestamptz NOT NULL,`,
54
+ ` deleted_at timestamptz,`,
55
+ ` resource jsonb NOT NULL,`,
56
+ ` PRIMARY KEY (tenant_id, workspace_id, resource_type, resource_id)`,
57
+ `);`
58
+ ].join("\n"),
59
+ [
60
+ `CREATE INDEX IF NOT EXISTS resources_jsonb_gin`,
61
+ ` ON ${s}.resources USING gin (resource);`
62
+ ].join("\n"),
63
+ [
64
+ `CREATE INDEX IF NOT EXISTS resources_listing`,
65
+ ` ON ${s}.resources (tenant_id, workspace_id, resource_type, last_updated);`
66
+ ].join("\n")
67
+ ];
68
+ }
69
+ function buildSchemaBootstrapSql(schemaName) {
70
+ return buildSchemaBootstrapStatements(schemaName).join("\n");
62
71
  }
63
72
  function buildResourceUpsertSql(schemaName) {
64
73
  const s = quoteSchemaIdentifier(schemaName);
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/components/postgres/data-store-postgres-replication.handler.ts","../src/components/postgres/data-store-postgres-schema.ts","../src/components/dynamodb/dynamodb-stream-record.ts","../src/components/dynamodb/firehose-archive-transform.handler.ts","../src/components/dynamodb/data-store-change-events.ts"],"sourcesContent":["import {\n GetSecretValueCommand,\n SecretsManagerClient,\n} from \"@aws-sdk/client-secrets-manager\";\nimport type {\n KinesisStreamBatchResponse,\n KinesisStreamEvent,\n KinesisStreamRecord,\n} from \"aws-lambda\";\nimport { Pool } from \"pg\";\nimport {\n buildResourceSoftDeleteSql,\n buildResourceUpsertSql,\n ensureSchemaBootstrap,\n} from \"./data-store-postgres-schema\";\nimport {\n type DynamoDbStreamKinesisRecord,\n dynamodbImageToPlain,\n} from \"../dynamodb/dynamodb-stream-record\";\nimport {\n parseCurrentResourceKeys,\n shouldDropAsGlobalTableReplicationRecord,\n} from \"../dynamodb/firehose-archive-transform.handler\";\n\n/**\n * Postgres replication consumer for the data store change Kinesis stream\n * (ADR 2026-04-17-01, phase 1). Reads the same stream that feeds the\n * Firehose-to-S3 archive and projects each current FHIR resource into the\n * `resources` JSONB table on the Aurora Serverless v2 cluster provisioned by\n * {@link DataStorePostgresReplica}. This phase implements replication only;\n * no read-side query routing is wired up here.\n */\n\nconst REQUIRED_ENV_VARS = [\n \"OPENHI_PG_HOST\",\n \"OPENHI_PG_PORT\",\n \"OPENHI_PG_DATABASE\",\n \"OPENHI_PG_SCHEMA\",\n] as const;\n\ntype RequiredEnvVar = (typeof REQUIRED_ENV_VARS)[number];\n\ninterface PostgresConnectionConfig {\n host: string;\n port: number;\n database: string;\n schema: string;\n user: string;\n password: string;\n ssl: boolean;\n}\n\ninterface SecretsManagerCredentials {\n username: string;\n password: string;\n}\n\nlet cachedConfig: PostgresConnectionConfig | undefined;\nlet pool: Pool | undefined;\nlet bootstrapPromise: Promise<void> | undefined;\nlet secretsClient: SecretsManagerClient | undefined;\n\nfunction readEnv(name: RequiredEnvVar): string {\n const v = process.env[name]?.trim();\n if (!v) {\n throw new Error(\n `Missing required environment variable for postgres replication: ${name}`,\n );\n }\n return v;\n}\n\nasync function loadConfig(): Promise<PostgresConnectionConfig> {\n if (cachedConfig) {\n return cachedConfig;\n }\n const host = readEnv(\"OPENHI_PG_HOST\");\n const port = Number.parseInt(readEnv(\"OPENHI_PG_PORT\"), 10);\n const database = readEnv(\"OPENHI_PG_DATABASE\");\n const schema = readEnv(\"OPENHI_PG_SCHEMA\");\n\n // Test/integration setups can supply user/password directly via env to skip\n // the Secrets Manager round-trip. Production always goes through the secret.\n const directUser = process.env.OPENHI_PG_USER?.trim();\n const directPassword = process.env.OPENHI_PG_PASSWORD;\n if (directUser && directPassword !== undefined) {\n cachedConfig = {\n host,\n port,\n database,\n schema,\n user: directUser,\n password: directPassword,\n ssl: process.env.OPENHI_PG_SSL?.trim() === \"true\",\n };\n return cachedConfig;\n }\n\n const secretArn = process.env.OPENHI_PG_SECRET_ARN?.trim();\n if (!secretArn) {\n throw new Error(\n \"Either OPENHI_PG_SECRET_ARN or both OPENHI_PG_USER and OPENHI_PG_PASSWORD must be set\",\n );\n }\n if (!secretsClient) {\n secretsClient = new SecretsManagerClient({});\n }\n const out = await secretsClient.send(\n new GetSecretValueCommand({ SecretId: secretArn }),\n );\n if (!out.SecretString) {\n throw new Error(\n `Secret ${secretArn} returned no SecretString; binary secrets are not supported`,\n );\n }\n const parsed = JSON.parse(out.SecretString) as SecretsManagerCredentials;\n cachedConfig = {\n host,\n port,\n database,\n schema,\n user: parsed.username,\n password: parsed.password,\n ssl: true,\n };\n return cachedConfig;\n}\n\nasync function getPool(): Promise<Pool> {\n if (pool) {\n return pool;\n }\n const cfg = await loadConfig();\n pool = new Pool({\n host: cfg.host,\n port: cfg.port,\n database: cfg.database,\n user: cfg.user,\n password: cfg.password,\n ssl: cfg.ssl ? { rejectUnauthorized: false } : false,\n max: 2,\n idleTimeoutMillis: 60_000,\n connectionTimeoutMillis: 10_000,\n });\n return pool;\n}\n\nasync function ensureBootstrapped(): Promise<void> {\n if (!bootstrapPromise) {\n bootstrapPromise = (async () => {\n const cfg = await loadConfig();\n const p = await getPool();\n await ensureSchemaBootstrap(p, cfg.schema);\n })().catch((err) => {\n // Reset so a subsequent invocation retries the bootstrap on a fresh\n // container; otherwise a transient failure during cold start would\n // poison every record forever.\n bootstrapPromise = undefined;\n throw err;\n });\n }\n return bootstrapPromise;\n}\n\n/**\n * Reset module-scoped caches. Test-only helper; do not invoke from production\n * code paths. Exposed so unit and integration tests can run multiple\n * scenarios within a single Jest worker without leaking state across cases.\n */\nexport function __resetReplicationModuleStateForTests(): void {\n cachedConfig = undefined;\n pool = undefined;\n bootstrapPromise = undefined;\n secretsClient = undefined;\n}\n\nfunction decodeKinesisRecord(\n record: KinesisStreamRecord,\n): DynamoDbStreamKinesisRecord {\n const json = Buffer.from(record.kinesis.data, \"base64\").toString(\"utf8\");\n return JSON.parse(json) as DynamoDbStreamKinesisRecord;\n}\n\nfunction deriveLastUpdated(\n resource: unknown,\n approximateCreationEpochSec: number | undefined,\n): Date {\n if (resource && typeof resource === \"object\") {\n const meta = (resource as { meta?: { lastUpdated?: unknown } }).meta;\n const lu = meta?.lastUpdated;\n if (typeof lu === \"string\") {\n const parsed = new Date(lu);\n if (!Number.isNaN(parsed.getTime())) {\n return parsed;\n }\n }\n }\n if (\n typeof approximateCreationEpochSec === \"number\" &&\n Number.isFinite(approximateCreationEpochSec)\n ) {\n return new Date(approximateCreationEpochSec * 1000);\n }\n return new Date();\n}\n\ninterface UpsertParams {\n tenantId: string;\n workspaceId: string;\n resourceType: string;\n resourceId: string;\n version: string;\n lastUpdated: Date;\n resourceJson: string;\n}\n\ninterface SoftDeleteParams {\n tenantId: string;\n workspaceId: string;\n resourceType: string;\n resourceId: string;\n version: string;\n deletedAt: Date;\n}\n\ntype WriteIntent =\n | { kind: \"upsert\"; params: UpsertParams }\n | { kind: \"delete\"; params: SoftDeleteParams }\n | { kind: \"drop\" };\n\n/**\n * Translate a single Kinesis-wrapped DynamoDB stream record into a write\n * intent against the `resources` table. Returns `drop` for replica-side\n * global-table records, non-CURRENT items, and any record whose resource\n * payload cannot be decoded as JSON.\n */\nexport function buildWriteIntent(\n change: DynamoDbStreamKinesisRecord,\n awsRegion: string,\n): WriteIntent {\n if (shouldDropAsGlobalTableReplicationRecord(change, awsRegion)) {\n return { kind: \"drop\" };\n }\n const keys = parseCurrentResourceKeys(change);\n if (!keys) {\n return { kind: \"drop\" };\n }\n\n const isRemove = change.eventName === \"REMOVE\";\n const image = isRemove\n ? change.dynamodb?.OldImage\n : change.dynamodb?.NewImage;\n if (!image) {\n return { kind: \"drop\" };\n }\n\n const plain = dynamodbImageToPlain(image);\n const approxEpochSec = change.dynamodb?.ApproximateCreationDateTime;\n\n if (isRemove) {\n return {\n kind: \"delete\",\n params: {\n tenantId: keys.tenantId,\n workspaceId: keys.workspaceId,\n resourceType: keys.resourceType,\n resourceId: keys.resourceId,\n version: keys.version,\n deletedAt: deriveLastUpdated(undefined, approxEpochSec),\n },\n };\n }\n\n if (typeof plain.resource !== \"string\") {\n return { kind: \"drop\" };\n }\n let resourceObj: unknown;\n try {\n resourceObj = JSON.parse(plain.resource);\n } catch {\n return { kind: \"drop\" };\n }\n\n return {\n kind: \"upsert\",\n params: {\n tenantId: keys.tenantId,\n workspaceId: keys.workspaceId,\n resourceType: keys.resourceType,\n resourceId: keys.resourceId,\n version: keys.version,\n lastUpdated: deriveLastUpdated(resourceObj, approxEpochSec),\n resourceJson: plain.resource,\n },\n };\n}\n\ninterface PgQueryable {\n query: (\n text: string,\n values?: ReadonlyArray<unknown>,\n ) => Promise<unknown> | unknown;\n}\n\n/**\n * Execute a write intent against an open Postgres connection or pool. Pure\n * SQL dispatch — extracted from the handler so unit tests can drive it with a\n * mock client and integration tests can drive it with a real pool.\n */\nexport async function applyWriteIntent(\n client: PgQueryable,\n schemaName: string,\n intent: WriteIntent,\n): Promise<void> {\n if (intent.kind === \"drop\") {\n return;\n }\n if (intent.kind === \"upsert\") {\n const p = intent.params;\n await Promise.resolve(\n client.query(buildResourceUpsertSql(schemaName), [\n p.tenantId,\n p.workspaceId,\n p.resourceType,\n p.resourceId,\n p.version,\n p.lastUpdated,\n p.resourceJson,\n ]),\n );\n return;\n }\n const p = intent.params;\n await Promise.resolve(\n client.query(buildResourceSoftDeleteSql(schemaName), [\n p.tenantId,\n p.workspaceId,\n p.resourceType,\n p.resourceId,\n p.version,\n p.deletedAt,\n ]),\n );\n}\n\nexport async function handler(\n event: KinesisStreamEvent,\n): Promise<KinesisStreamBatchResponse> {\n const awsRegion = process.env.AWS_REGION ?? \"\";\n const batchItemFailures: Array<{ itemIdentifier: string }> = [];\n\n await ensureBootstrapped();\n const cfg = await loadConfig();\n const p = await getPool();\n\n for (const record of event.Records) {\n try {\n const change = decodeKinesisRecord(record);\n const intent = buildWriteIntent(change, awsRegion);\n await applyWriteIntent(p, cfg.schema, intent);\n } catch (err) {\n // eslint-disable-next-line no-console\n console.error(\n \"postgres replication record failed\",\n record.kinesis.sequenceNumber,\n err,\n );\n batchItemFailures.push({\n itemIdentifier: record.kinesis.sequenceNumber,\n });\n }\n }\n\n return { batchItemFailures };\n}\n","import type { Pool, PoolClient } from \"pg\";\n\n/**\n * @see sites/www-docs/content/architecture/adr/2026-04-17-01-ad-hoc-query-support-fhir-api.md\n *\n * SQL strings for the Postgres replication tier (ADR 2026-04-17-01, phase 1):\n * the JSONB `resources` table that mirrors current FHIR resources from the\n * DynamoDB single-table store. Phase 1 covers replication only; query routing\n * and indexes for individual SearchParameters are deferred to a follow-on phase.\n *\n * The `version` column stores the resource's `vid` (a ULID — see\n * `data-entity-common.ts`). ULIDs are monotonically lexically sortable, so the\n * UPSERT/UPDATE guards use plain `>` text comparison to reject out-of-order\n * Kinesis deliveries.\n */\n\nexport const POSTGRES_REPLICATION_SCHEMA_VERSION = 1;\n\nconst SCHEMA_NAME_PATTERN = /^[a-z_][a-z0-9_]{0,62}$/;\n\n/**\n * Validate that a schema name is a safe Postgres identifier and quote it for\n * inclusion in DDL/DML. Throws on anything that doesn't match the lower-snake\n * pattern OpenHI uses for branch-derived schema names (e.g. `b_a1b2c3`).\n */\nexport function quoteSchemaIdentifier(schemaName: string): string {\n if (!SCHEMA_NAME_PATTERN.test(schemaName)) {\n throw new Error(\n `Invalid Postgres schema name: ${JSON.stringify(schemaName)}; expected /[a-z_][a-z0-9_]{0,62}/`,\n );\n }\n return `\"${schemaName}\"`;\n}\n\nexport function buildSchemaBootstrapSql(schemaName: string): string {\n const s = quoteSchemaIdentifier(schemaName);\n return [\n `CREATE SCHEMA IF NOT EXISTS ${s};`,\n `CREATE TABLE IF NOT EXISTS ${s}.resources (`,\n ` tenant_id text NOT NULL,`,\n ` workspace_id text NOT NULL,`,\n ` resource_type text NOT NULL,`,\n ` resource_id text NOT NULL,`,\n ` version text NOT NULL,`,\n ` last_updated timestamptz NOT NULL,`,\n ` deleted_at timestamptz,`,\n ` resource jsonb NOT NULL,`,\n ` PRIMARY KEY (tenant_id, workspace_id, resource_type, resource_id)`,\n `);`,\n `CREATE INDEX IF NOT EXISTS resources_jsonb_gin`,\n ` ON ${s}.resources USING gin (resource);`,\n `CREATE INDEX IF NOT EXISTS resources_listing`,\n ` ON ${s}.resources (tenant_id, workspace_id, resource_type, last_updated);`,\n ].join(\"\\n\");\n}\n\n/**\n * INSERT/MODIFY UPSERT with a monotonic `version` (ULID) guard. If a record\n * arrives out of order — e.g. a retried Kinesis delivery for an older `vid`\n * after a newer one — the WHERE clause in DO UPDATE leaves the row unchanged.\n *\n * Param order: $1 tenant_id, $2 workspace_id, $3 resource_type, $4 resource_id,\n * $5 version, $6 last_updated, $7 resource (jsonb-serialized).\n */\nexport function buildResourceUpsertSql(schemaName: string): string {\n const s = quoteSchemaIdentifier(schemaName);\n return [\n `INSERT INTO ${s}.resources`,\n ` (tenant_id, workspace_id, resource_type, resource_id, version, last_updated, deleted_at, resource)`,\n `VALUES ($1, $2, $3, $4, $5, $6, NULL, $7::jsonb)`,\n `ON CONFLICT (tenant_id, workspace_id, resource_type, resource_id)`,\n `DO UPDATE SET`,\n ` version = EXCLUDED.version,`,\n ` last_updated = EXCLUDED.last_updated,`,\n ` deleted_at = NULL,`,\n ` resource = EXCLUDED.resource`,\n `WHERE EXCLUDED.version > ${s}.resources.version;`,\n ].join(\"\\n\");\n}\n\n/**\n * REMOVE soft-delete with the same monotonic `version` guard. Using `>` (not\n * `>=`) means a duplicate REMOVE for the same version is a no-op. Param order\n * matches {@link buildResourceUpsertSql} for the first five params.\n *\n * Param order: $1 tenant_id, $2 workspace_id, $3 resource_type, $4 resource_id,\n * $5 version (incoming), $6 deleted_at.\n */\nexport function buildResourceSoftDeleteSql(schemaName: string): string {\n const s = quoteSchemaIdentifier(schemaName);\n return [\n `UPDATE ${s}.resources`,\n `SET deleted_at = $6,`,\n ` version = $5`,\n `WHERE tenant_id = $1`,\n ` AND workspace_id = $2`,\n ` AND resource_type = $3`,\n ` AND resource_id = $4`,\n ` AND $5 > version;`,\n ].join(\"\\n\");\n}\n\n/**\n * Run schema DDL idempotently. Safe to call from every Lambda cold start; the\n * `IF NOT EXISTS` clauses make repeats no-ops. Caller is responsible for\n * memoizing across invocations on the same warm container.\n */\nexport async function ensureSchemaBootstrap(\n client: Pool | PoolClient,\n schemaName: string,\n): Promise<void> {\n await client.query(buildSchemaBootstrapSql(schemaName));\n}\n","import type { AttributeValue } from \"@aws-sdk/client-dynamodb\";\n\n/**\n * Shape of a DynamoDB change record as delivered inside Kinesis (table stream\n * destination) and decoded by the Firehose transform Lambda.\n */\nexport interface DynamoDbStreamKinesisRecord {\n eventName?: string;\n userIdentity?: unknown;\n dynamodb?: {\n Keys?: Record<string, AttributeValue>;\n NewImage?: Record<string, AttributeValue>;\n OldImage?: Record<string, AttributeValue>;\n SequenceNumber?: string;\n ApproximateCreationDateTime?: number;\n };\n}\n\nexport function dynamodbValueToJs(av: AttributeValue): unknown {\n if (av.S !== undefined) {\n return av.S;\n }\n if (av.N !== undefined) {\n return av.N.includes(\".\")\n ? Number.parseFloat(av.N)\n : Number.parseInt(av.N, 10);\n }\n if (av.BOOL !== undefined) {\n return av.BOOL;\n }\n if (av.NULL !== undefined) {\n return null;\n }\n if (av.M !== undefined) {\n return dynamodbImageToPlain(av.M);\n }\n if (av.L !== undefined) {\n return av.L.map((x: AttributeValue) => dynamodbValueToJs(x));\n }\n if (av.SS !== undefined) {\n return av.SS;\n }\n if (av.NS !== undefined) {\n return av.NS.map((n: string) =>\n n.includes(\".\") ? Number.parseFloat(n) : Number.parseInt(n, 10),\n );\n }\n return undefined;\n}\n\nexport function dynamodbImageToPlain(\n image: Record<string, AttributeValue>,\n): Record<string, unknown> {\n const out: Record<string, unknown> = {};\n for (const [k, v] of Object.entries(image)) {\n out[k] = dynamodbValueToJs(v);\n }\n return out;\n}\n","import { randomUUID } from \"node:crypto\";\nimport type { AttributeValue } from \"@aws-sdk/client-dynamodb\";\nimport {\n EventBridgeClient,\n PutEventsCommand,\n type PutEventsRequestEntry,\n type PutEventsResultEntry,\n} from \"@aws-sdk/client-eventbridge\";\nimport { PutObjectCommand, S3Client } from \"@aws-sdk/client-s3\";\nimport type {\n FirehoseTransformationEvent,\n FirehoseTransformationResult,\n FirehoseTransformationResultRecord,\n} from \"aws-lambda\";\nimport {\n DATA_STORE_CHANGE_DETAIL_MAX_UTF8_BYTES,\n DATA_STORE_CHANGE_DETAIL_TYPE,\n DATA_STORE_CHANGE_EVENT_SOURCE,\n buildFhirCurrentResourceChangeDetail,\n} from \"./data-store-change-events\";\nimport {\n type DynamoDbStreamKinesisRecord,\n dynamodbImageToPlain,\n} from \"./dynamodb-stream-record\";\n\nexport type { DynamoDbStreamKinesisRecord } from \"./dynamodb-stream-record\";\n\n/**\n * Firehose data-transformation handler: filters DynamoDB change records to\n * current FHIR resource items (SK = CURRENT, TID#…#WID#…#RT#…#ID#… PK),\n * writes archive JSON to S3 via Firehose, sets dynamic partition keys per\n * ADR 2026-03-11-02, and publishes de-identified change notifications to the\n * data event bus via PutEvents per ADR 2026-03-02-01, with retries and an S3\n * dead-letter bucket for entries that still fail.\n */\n\nconst CURRENT_SK = \"CURRENT\";\nconst PK_PATTERN =\n /^TID#(?<tenantId>[^#]+)#WID#(?<workspaceId>[^#]+)#RT#(?<resourceType>[^#]+)#ID#(?<resourceId>.+)$/;\n\n/** DynamoDB-managed attribute on global table items (see AWS Global Tables legacy / replication docs). */\nconst AWS_REP_UPDATE_REGION = \"aws:rep:updateregion\";\n\nfunction getDynamoDbStringAttr(\n image: Record<string, AttributeValue> | undefined,\n name: string,\n): string | undefined {\n if (!image) {\n return undefined;\n }\n const av = image[name];\n if (typeof av?.S === \"string\" && av.S.trim() !== \"\") {\n return av.S.trim();\n }\n return undefined;\n}\n\nfunction primaryImageForReplicationCheck(\n record: DynamoDbStreamKinesisRecord,\n): Record<string, AttributeValue> | undefined {\n if (record.eventName === \"REMOVE\") {\n return record.dynamodb?.OldImage;\n }\n return record.dynamodb?.NewImage;\n}\n\n/**\n * Returns true when this stream/Kinesis record should not be archived because it\n * represents a **replica-side application** of a global-table change (the logical\n * write originated in another Region).\n *\n * - If `aws:rep:updateregion` is present on the item image and differs from\n * `archiveLambdaRegion`, the change was replicated into this Region (archive\n * only in the Region that matches `aws:rep:updateregion`).\n * - Otherwise, if `userIdentity` matches the DynamoDB replication service SLR,\n * treat as replication. **Excluded:** TTL deletes (`type` Service and\n * `principalId` `dynamodb.amazonaws.com`) per AWS stream Identity docs.\n *\n * For MREC global tables version 2019.11.21, AWS documents that stream records\n * may not carry distinguishable metadata; the recommended approach is a custom\n * “write region” attribute on items. If neither that attribute nor\n * `aws:rep:updateregion` nor replication `userIdentity` applies, this function\n * returns false (no drop)—duplicate archives are possible if identical pipelines\n * run in every Region without those signals.\n */\nexport function shouldDropAsGlobalTableReplicationRecord(\n record: DynamoDbStreamKinesisRecord,\n archiveLambdaRegion: string,\n): boolean {\n const image = primaryImageForReplicationCheck(record);\n const updateRegion = getDynamoDbStringAttr(image, AWS_REP_UPDATE_REGION);\n if (\n updateRegion &&\n archiveLambdaRegion &&\n updateRegion !== archiveLambdaRegion\n ) {\n return true;\n }\n\n return isDynamoDbReplicationUserIdentity(record.userIdentity);\n}\n\nfunction isDynamoDbReplicationUserIdentity(userIdentity: unknown): boolean {\n if (!userIdentity || typeof userIdentity !== \"object\") {\n return false;\n }\n const ui = userIdentity as Record<string, unknown>;\n const principalRaw = ui.principalId ?? ui.PrincipalId;\n const typeRaw = ui.type ?? ui.Type;\n const principal =\n typeof principalRaw === \"string\" ? principalRaw.toLowerCase() : \"\";\n const type = typeof typeRaw === \"string\" ? typeRaw.toLowerCase() : \"\";\n\n if (type === \"service\" && principal === \"dynamodb.amazonaws.com\") {\n return false;\n }\n\n const replicationMarkers = [\n \"awsservicerolefordynamodbreplication\",\n \"replication.dynamodb.amazonaws.com\",\n ];\n return replicationMarkers.some((m) => principal.includes(m));\n}\n\nexport function parseCurrentResourceKeys(record: DynamoDbStreamKinesisRecord): {\n tenantId: string;\n workspaceId: string;\n resourceType: string;\n resourceId: string;\n version: string;\n} | null {\n const keys = record.dynamodb?.Keys;\n if (!keys) {\n return null;\n }\n const pkAttr = keys.PK?.S;\n const skAttr = keys.SK?.S;\n if (!pkAttr || skAttr !== CURRENT_SK) {\n return null;\n }\n const m = PK_PATTERN.exec(pkAttr);\n if (!m?.groups) {\n return null;\n }\n const { tenantId, workspaceId, resourceType, resourceId } = m.groups;\n const image =\n record.eventName === \"REMOVE\"\n ? record.dynamodb?.OldImage\n : record.dynamodb?.NewImage;\n if (!image) {\n return null;\n }\n const plain = dynamodbImageToPlain(image as Record<string, AttributeValue>);\n const version = typeof plain.vid === \"string\" ? plain.vid : null;\n if (!version) {\n return null;\n }\n return { tenantId, workspaceId, resourceType, resourceId, version };\n}\n\nfunction partitionToken(value: string): string {\n if (!value || value.trim() === \"\") {\n return \"-\";\n }\n return value.replace(/[/\\\\]/g, \"_\");\n}\n\nfunction buildArchivePayload(\n record: DynamoDbStreamKinesisRecord,\n keys: ReturnType<typeof parseCurrentResourceKeys>,\n): Record<string, unknown> {\n const newImage = record.dynamodb?.NewImage;\n const oldImage = record.dynamodb?.OldImage;\n const resourceImage = record.eventName === \"REMOVE\" ? oldImage : newImage;\n const resourcePlain = resourceImage\n ? dynamodbImageToPlain(resourceImage as Record<string, AttributeValue>)\n : {};\n\n if (typeof resourcePlain.resource === \"string\") {\n try {\n resourcePlain.resource = JSON.parse(resourcePlain.resource) as unknown;\n } catch {\n /* keep raw string if not valid JSON */\n }\n }\n\n return {\n eventName: record.eventName,\n archivedAt: new Date().toISOString(),\n tenantId: keys!.tenantId,\n workspaceId: keys!.workspaceId,\n resourceType: keys!.resourceType,\n resourceId: keys!.resourceId,\n version: keys!.version,\n resource: resourcePlain,\n };\n}\n\nconst PUT_EVENTS_BATCH_SIZE = 10;\n\n/** Full PutEvents rounds per chunk (initial attempt + failure-driven retries). */\nconst MAX_PUT_EVENTS_ROUNDS = 3;\n\nlet eventBridgeClient: EventBridgeClient | undefined;\n\nfunction getEventBridgeClient(): EventBridgeClient | undefined {\n const bus = process.env.DATA_EVENT_BUS_NAME?.trim();\n if (!bus) {\n return undefined;\n }\n if (!eventBridgeClient) {\n eventBridgeClient = new EventBridgeClient({});\n }\n return eventBridgeClient;\n}\n\nlet s3ClientForDlq: S3Client | undefined;\n\nfunction getS3ClientForDlq(): S3Client | undefined {\n const bucket = process.env.DATA_STORE_PUT_EVENTS_DLQ_BUCKET?.trim();\n if (!bucket) {\n return undefined;\n }\n if (!s3ClientForDlq) {\n s3ClientForDlq = new S3Client({});\n }\n return s3ClientForDlq;\n}\n\ntype PutEventsEntry = PutEventsRequestEntry;\n\ninterface PutEventsDlqPayload {\n dlqSchemaVersion: 1;\n failedAt: string;\n reason: \"put_events_partial_failure\" | \"put_events_sdk_error\";\n attemptRounds: number;\n entries: PutEventsEntry[];\n putEventsResultEntries?: PutEventsResultEntry[];\n sdkError?: string;\n}\n\nasync function writePutEventsFailuresToDlq(\n payload: PutEventsDlqPayload,\n): Promise<void> {\n const bucket = process.env.DATA_STORE_PUT_EVENTS_DLQ_BUCKET?.trim();\n const client = getS3ClientForDlq();\n if (!bucket || !client) {\n throw new Error(\n `PutEvents exhausted retries but DATA_STORE_PUT_EVENTS_DLQ_BUCKET is not set (${payload.reason})`,\n );\n }\n const day = payload.failedAt.slice(0, 10);\n const key = `put-events-failed/${day}/${randomUUID()}.json`;\n await client.send(\n new PutObjectCommand({\n Bucket: bucket,\n Key: key,\n Body: JSON.stringify(payload),\n ContentType: \"application/json\",\n }),\n );\n}\n\n/**\n * Sends one PutEvents batch (≤10 entries) with up to {@link MAX_PUT_EVENTS_ROUNDS}\n * rounds. After the last round, remaining failures or a final SDK error are\n * written to the DLQ S3 bucket (if configured); DLQ write failure throws.\n */\nasync function putEventsChunkWithRetriesAndDlq(\n client: EventBridgeClient,\n entries: PutEventsEntry[],\n): Promise<void> {\n if (entries.length === 0) {\n return;\n }\n\n let pending = [...entries];\n\n for (let round = 1; round <= MAX_PUT_EVENTS_ROUNDS; round++) {\n try {\n const out = await client.send(new PutEventsCommand({ Entries: pending }));\n const failed = out.FailedEntryCount ?? 0;\n if (failed === 0) {\n return;\n }\n\n const nextPending: PutEventsEntry[] = [];\n out.Entries?.forEach((e: PutEventsResultEntry | undefined, i: number) => {\n if (e?.ErrorCode && pending[i]) {\n nextPending.push(pending[i]!);\n }\n });\n pending = nextPending;\n\n if (pending.length === 0) {\n return;\n }\n\n if (round === MAX_PUT_EVENTS_ROUNDS) {\n await writePutEventsFailuresToDlq({\n dlqSchemaVersion: 1,\n failedAt: new Date().toISOString(),\n reason: \"put_events_partial_failure\",\n attemptRounds: MAX_PUT_EVENTS_ROUNDS,\n entries: pending,\n putEventsResultEntries: out.Entries,\n });\n return;\n }\n } catch (sdkErr) {\n const sdkMessage =\n sdkErr instanceof Error ? sdkErr.message : String(sdkErr);\n if (round === MAX_PUT_EVENTS_ROUNDS) {\n await writePutEventsFailuresToDlq({\n dlqSchemaVersion: 1,\n failedAt: new Date().toISOString(),\n reason: \"put_events_sdk_error\",\n attemptRounds: MAX_PUT_EVENTS_ROUNDS,\n entries: pending,\n sdkError: sdkMessage,\n });\n return;\n }\n await new Promise((r) => setTimeout(r, 50 * round));\n }\n }\n}\n\nasync function publishDataStoreChangeEvents(\n pending: Array<{\n change: DynamoDbStreamKinesisRecord;\n keys: NonNullable<ReturnType<typeof parseCurrentResourceKeys>>;\n }>,\n): Promise<void> {\n const client = getEventBridgeClient();\n const busName = process.env.DATA_EVENT_BUS_NAME?.trim();\n if (!client || !busName || pending.length === 0) {\n return;\n }\n\n const entries: PutEventsEntry[] = [];\n for (const { change, keys } of pending) {\n const detailObj = buildFhirCurrentResourceChangeDetail(change, keys);\n const detail = JSON.stringify(detailObj);\n const detailBytes = Buffer.byteLength(detail, \"utf8\");\n if (detailBytes > DATA_STORE_CHANGE_DETAIL_MAX_UTF8_BYTES) {\n throw new Error(\n `Event detail is ${detailBytes} bytes (max ${DATA_STORE_CHANGE_DETAIL_MAX_UTF8_BYTES}); ` +\n `oversize strategy deferred per ADR 2026-03-02-01 (${keys.resourceType}/${keys.resourceId}).`,\n );\n }\n entries.push({\n Source: DATA_STORE_CHANGE_EVENT_SOURCE,\n DetailType: DATA_STORE_CHANGE_DETAIL_TYPE,\n Detail: detail,\n EventBusName: busName,\n });\n }\n\n for (let i = 0; i < entries.length; i += PUT_EVENTS_BATCH_SIZE) {\n const chunk = entries.slice(i, i + PUT_EVENTS_BATCH_SIZE);\n await putEventsChunkWithRetriesAndDlq(client, chunk);\n }\n}\n\nexport async function handler(\n event: FirehoseTransformationEvent,\n): Promise<FirehoseTransformationResult> {\n const records: FirehoseTransformationResultRecord[] = [];\n const archiveLambdaRegion = process.env.AWS_REGION ?? \"\";\n const pendingPublish: Array<{\n change: DynamoDbStreamKinesisRecord;\n keys: NonNullable<ReturnType<typeof parseCurrentResourceKeys>>;\n }> = [];\n\n for (const rec of event.records) {\n try {\n const payload = Buffer.from(rec.data, \"base64\").toString(\"utf8\");\n const change = JSON.parse(payload) as DynamoDbStreamKinesisRecord;\n\n if (\n shouldDropAsGlobalTableReplicationRecord(change, archiveLambdaRegion)\n ) {\n records.push({\n recordId: rec.recordId,\n result: \"Dropped\",\n data: rec.data,\n });\n continue;\n }\n\n const keys = parseCurrentResourceKeys(change);\n\n if (!keys) {\n records.push({\n recordId: rec.recordId,\n result: \"Dropped\",\n data: rec.data,\n });\n continue;\n }\n\n const archive = buildArchivePayload(change, keys);\n const out = Buffer.from(`${JSON.stringify(archive)}\\n`).toString(\n \"base64\",\n );\n\n pendingPublish.push({ change, keys });\n\n records.push({\n recordId: rec.recordId,\n result: \"Ok\",\n data: out,\n metadata: {\n partitionKeys: {\n tenantId: partitionToken(keys.tenantId),\n workspaceId: partitionToken(keys.workspaceId),\n resourceType: partitionToken(keys.resourceType),\n resourceId: partitionToken(keys.resourceId),\n version: partitionToken(keys.version),\n },\n },\n });\n } catch {\n records.push({\n recordId: rec.recordId,\n result: \"ProcessingFailed\",\n data: rec.data,\n });\n }\n }\n\n await publishDataStoreChangeEvents(pendingPublish);\n\n return { records };\n}\n","import type { AttributeValue } from \"@aws-sdk/client-dynamodb\";\nimport type { DynamoDbStreamKinesisRecord } from \"./dynamodb-stream-record\";\nimport { dynamodbImageToPlain } from \"./dynamodb-stream-record\";\n\n/**\n * EventBridge envelope constants for data-store CDC (no CDK imports).\n * @see docs/architecture/adr/2026-03-02-01-dynamodb-stream-to-data-event-bus.md\n */\nexport const DATA_STORE_CHANGE_EVENT_SOURCE = \"openhi.data.store\";\n\nexport const DATA_STORE_CHANGE_DETAIL_TYPE = \"FhirCurrentResourceChanged\";\n\n/** AWS PutEvents per-entry detail limit is 256 KiB; stay under for headroom. */\nexport const DATA_STORE_CHANGE_DETAIL_MAX_UTF8_BYTES = 250 * 1024;\n\nconst EXCLUDED_CHANGE_DETAIL_KEYS = new Set([\n \"PK\",\n \"SK\",\n \"GSI1PK\",\n \"GSI1SK\",\n \"GSI2PK\",\n \"GSI2SK\",\n /** Full FHIR JSON may contain PII; never list or ship in the bus payload. */\n \"resource\",\n]);\n\nfunction shallowValueEqual(a: unknown, b: unknown): boolean {\n return JSON.stringify(a) === JSON.stringify(b);\n}\n\nfunction changedNonResourceAttributeNames(\n oldImage: Record<string, unknown> | undefined,\n newImage: Record<string, unknown> | undefined,\n): string[] | undefined {\n if (!oldImage || !newImage) {\n return undefined;\n }\n const names = new Set<string>();\n const keys = new Set([...Object.keys(oldImage), ...Object.keys(newImage)]);\n for (const k of keys) {\n if (EXCLUDED_CHANGE_DETAIL_KEYS.has(k)) {\n continue;\n }\n if (!shallowValueEqual(oldImage[k], newImage[k])) {\n names.add(k);\n }\n }\n return names.size > 0 ? [...names].sort() : undefined;\n}\n\n/** Non-excluded attribute names present on an item (for INSERT / REMOVE). */\nfunction presentMetadataAttributeNames(\n image: Record<string, unknown> | undefined,\n): string[] | undefined {\n if (!image) {\n return undefined;\n }\n const names = Object.keys(image).filter(\n (k) => !EXCLUDED_CHANGE_DETAIL_KEYS.has(k),\n );\n return names.length > 0 ? names.sort() : undefined;\n}\n\nfunction plainImage(\n image: Record<string, AttributeValue> | undefined,\n): Record<string, unknown> | undefined {\n if (!image) {\n return undefined;\n }\n return dynamodbImageToPlain(image);\n}\n\nexport interface FhirCurrentResourceChangeDetail {\n changeType: \"INSERT\" | \"MODIFY\" | \"REMOVE\";\n tenantId: string;\n workspaceId: string;\n resourceType: string;\n resourceId: string;\n resourceVersion: string;\n streamSequenceNumber?: string;\n /** Seconds since UNIX epoch (DynamoDB Streams `ApproximateCreationDateTime`). */\n approximateCreationEpochSec?: number;\n /**\n * MODIFY: attributes whose values differ between old and new images.\n * INSERT / REMOVE: attributes present on the written or removed image (metadata only).\n */\n changedAttributeNames?: string[];\n}\n\nexport function buildFhirCurrentResourceChangeDetail(\n record: DynamoDbStreamKinesisRecord,\n keys: {\n tenantId: string;\n workspaceId: string;\n resourceType: string;\n resourceId: string;\n version: string;\n },\n): FhirCurrentResourceChangeDetail {\n const rawName = record.eventName;\n const changeType =\n rawName === \"INSERT\" || rawName === \"MODIFY\" || rawName === \"REMOVE\"\n ? rawName\n : \"MODIFY\";\n\n const seq = record.dynamodb?.SequenceNumber;\n const approxEpochSec = record.dynamodb?.ApproximateCreationDateTime;\n\n const newPlain = plainImage(\n record.dynamodb?.NewImage as Record<string, AttributeValue> | undefined,\n );\n const oldPlain = plainImage(\n record.dynamodb?.OldImage as Record<string, AttributeValue> | undefined,\n );\n\n let changedAttributeNames: string[] | undefined;\n if (changeType === \"MODIFY\") {\n changedAttributeNames = changedNonResourceAttributeNames(\n oldPlain,\n newPlain,\n );\n } else if (changeType === \"INSERT\") {\n changedAttributeNames = presentMetadataAttributeNames(newPlain);\n } else {\n changedAttributeNames = presentMetadataAttributeNames(oldPlain);\n }\n\n return {\n changeType,\n tenantId: keys.tenantId,\n workspaceId: keys.workspaceId,\n resourceType: keys.resourceType,\n resourceId: keys.resourceId,\n resourceVersion: keys.version,\n ...(typeof seq === \"string\" && seq.length > 0\n ? { streamSequenceNumber: seq }\n : {}),\n ...(typeof approxEpochSec === \"number\" && Number.isFinite(approxEpochSec)\n ? { approximateCreationEpochSec: approxEpochSec }\n : {}),\n ...(changedAttributeNames ? { changedAttributeNames } : {}),\n };\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,oCAGO;AAMP,gBAAqB;;;ACSrB,IAAM,sBAAsB;AAOrB,SAAS,sBAAsB,YAA4B;AAChE,MAAI,CAAC,oBAAoB,KAAK,UAAU,GAAG;AACzC,UAAM,IAAI;AAAA,MACR,iCAAiC,KAAK,UAAU,UAAU,CAAC;AAAA,IAC7D;AAAA,EACF;AACA,SAAO,IAAI,UAAU;AACvB;AAEO,SAAS,wBAAwB,YAA4B;AAClE,QAAM,IAAI,sBAAsB,UAAU;AAC1C,SAAO;AAAA,IACL,+BAA+B,CAAC;AAAA,IAChC,8BAA8B,CAAC;AAAA,IAC/B;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,QAAQ,CAAC;AAAA,IACT;AAAA,IACA,QAAQ,CAAC;AAAA,EACX,EAAE,KAAK,IAAI;AACb;AAUO,SAAS,uBAAuB,YAA4B;AACjE,QAAM,IAAI,sBAAsB,UAAU;AAC1C,SAAO;AAAA,IACL,eAAe,CAAC;AAAA,IAChB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,4BAA4B,CAAC;AAAA,EAC/B,EAAE,KAAK,IAAI;AACb;AAUO,SAAS,2BAA2B,YAA4B;AACrE,QAAM,IAAI,sBAAsB,UAAU;AAC1C,SAAO;AAAA,IACL,UAAU,CAAC;AAAA,IACX;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACb;AAOA,eAAsB,sBACpB,QACA,YACe;AACf,QAAM,OAAO,MAAM,wBAAwB,UAAU,CAAC;AACxD;;;AC9FO,SAAS,kBAAkB,IAA6B;AAC7D,MAAI,GAAG,MAAM,QAAW;AACtB,WAAO,GAAG;AAAA,EACZ;AACA,MAAI,GAAG,MAAM,QAAW;AACtB,WAAO,GAAG,EAAE,SAAS,GAAG,IACpB,OAAO,WAAW,GAAG,CAAC,IACtB,OAAO,SAAS,GAAG,GAAG,EAAE;AAAA,EAC9B;AACA,MAAI,GAAG,SAAS,QAAW;AACzB,WAAO,GAAG;AAAA,EACZ;AACA,MAAI,GAAG,SAAS,QAAW;AACzB,WAAO;AAAA,EACT;AACA,MAAI,GAAG,MAAM,QAAW;AACtB,WAAO,qBAAqB,GAAG,CAAC;AAAA,EAClC;AACA,MAAI,GAAG,MAAM,QAAW;AACtB,WAAO,GAAG,EAAE,IAAI,CAAC,MAAsB,kBAAkB,CAAC,CAAC;AAAA,EAC7D;AACA,MAAI,GAAG,OAAO,QAAW;AACvB,WAAO,GAAG;AAAA,EACZ;AACA,MAAI,GAAG,OAAO,QAAW;AACvB,WAAO,GAAG,GAAG;AAAA,MAAI,CAAC,MAChB,EAAE,SAAS,GAAG,IAAI,OAAO,WAAW,CAAC,IAAI,OAAO,SAAS,GAAG,EAAE;AAAA,IAChE;AAAA,EACF;AACA,SAAO;AACT;AAEO,SAAS,qBACd,OACyB;AACzB,QAAM,MAA+B,CAAC;AACtC,aAAW,CAAC,GAAG,CAAC,KAAK,OAAO,QAAQ,KAAK,GAAG;AAC1C,QAAI,CAAC,IAAI,kBAAkB,CAAC;AAAA,EAC9B;AACA,SAAO;AACT;;;AC1DA,yBAA2B;AAE3B,gCAKO;AACP,uBAA2C;;;ACKpC,IAAM,0CAA0C,MAAM;;;ADuB7D,IAAM,aAAa;AACnB,IAAM,aACJ;AAGF,IAAM,wBAAwB;AAE9B,SAAS,sBACP,OACA,MACoB;AACpB,MAAI,CAAC,OAAO;AACV,WAAO;AAAA,EACT;AACA,QAAM,KAAK,MAAM,IAAI;AACrB,MAAI,OAAO,IAAI,MAAM,YAAY,GAAG,EAAE,KAAK,MAAM,IAAI;AACnD,WAAO,GAAG,EAAE,KAAK;AAAA,EACnB;AACA,SAAO;AACT;AAEA,SAAS,gCACP,QAC4C;AAC5C,MAAI,OAAO,cAAc,UAAU;AACjC,WAAO,OAAO,UAAU;AAAA,EAC1B;AACA,SAAO,OAAO,UAAU;AAC1B;AAqBO,SAAS,yCACd,QACA,qBACS;AACT,QAAM,QAAQ,gCAAgC,MAAM;AACpD,QAAM,eAAe,sBAAsB,OAAO,qBAAqB;AACvE,MACE,gBACA,uBACA,iBAAiB,qBACjB;AACA,WAAO;AAAA,EACT;AAEA,SAAO,kCAAkC,OAAO,YAAY;AAC9D;AAEA,SAAS,kCAAkC,cAAgC;AACzE,MAAI,CAAC,gBAAgB,OAAO,iBAAiB,UAAU;AACrD,WAAO;AAAA,EACT;AACA,QAAM,KAAK;AACX,QAAM,eAAe,GAAG,eAAe,GAAG;AAC1C,QAAM,UAAU,GAAG,QAAQ,GAAG;AAC9B,QAAM,YACJ,OAAO,iBAAiB,WAAW,aAAa,YAAY,IAAI;AAClE,QAAM,OAAO,OAAO,YAAY,WAAW,QAAQ,YAAY,IAAI;AAEnE,MAAI,SAAS,aAAa,cAAc,0BAA0B;AAChE,WAAO;AAAA,EACT;AAEA,QAAM,qBAAqB;AAAA,IACzB;AAAA,IACA;AAAA,EACF;AACA,SAAO,mBAAmB,KAAK,CAAC,MAAM,UAAU,SAAS,CAAC,CAAC;AAC7D;AAEO,SAAS,yBAAyB,QAMhC;AACP,QAAM,OAAO,OAAO,UAAU;AAC9B,MAAI,CAAC,MAAM;AACT,WAAO;AAAA,EACT;AACA,QAAM,SAAS,KAAK,IAAI;AACxB,QAAM,SAAS,KAAK,IAAI;AACxB,MAAI,CAAC,UAAU,WAAW,YAAY;AACpC,WAAO;AAAA,EACT;AACA,QAAM,IAAI,WAAW,KAAK,MAAM;AAChC,MAAI,CAAC,GAAG,QAAQ;AACd,WAAO;AAAA,EACT;AACA,QAAM,EAAE,UAAU,aAAa,cAAc,WAAW,IAAI,EAAE;AAC9D,QAAM,QACJ,OAAO,cAAc,WACjB,OAAO,UAAU,WACjB,OAAO,UAAU;AACvB,MAAI,CAAC,OAAO;AACV,WAAO;AAAA,EACT;AACA,QAAM,QAAQ,qBAAqB,KAAuC;AAC1E,QAAM,UAAU,OAAO,MAAM,QAAQ,WAAW,MAAM,MAAM;AAC5D,MAAI,CAAC,SAAS;AACZ,WAAO;AAAA,EACT;AACA,SAAO,EAAE,UAAU,aAAa,cAAc,YAAY,QAAQ;AACpE;;;AHrGA,IAAI;AACJ,IAAI;AACJ,IAAI;AACJ,IAAI;AAEJ,SAAS,QAAQ,MAA8B;AAC7C,QAAM,IAAI,QAAQ,IAAI,IAAI,GAAG,KAAK;AAClC,MAAI,CAAC,GAAG;AACN,UAAM,IAAI;AAAA,MACR,mEAAmE,IAAI;AAAA,IACzE;AAAA,EACF;AACA,SAAO;AACT;AAEA,eAAe,aAAgD;AAC7D,MAAI,cAAc;AAChB,WAAO;AAAA,EACT;AACA,QAAM,OAAO,QAAQ,gBAAgB;AACrC,QAAM,OAAO,OAAO,SAAS,QAAQ,gBAAgB,GAAG,EAAE;AAC1D,QAAM,WAAW,QAAQ,oBAAoB;AAC7C,QAAM,SAAS,QAAQ,kBAAkB;AAIzC,QAAM,aAAa,QAAQ,IAAI,gBAAgB,KAAK;AACpD,QAAM,iBAAiB,QAAQ,IAAI;AACnC,MAAI,cAAc,mBAAmB,QAAW;AAC9C,mBAAe;AAAA,MACb;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,MAAM;AAAA,MACN,UAAU;AAAA,MACV,KAAK,QAAQ,IAAI,eAAe,KAAK,MAAM;AAAA,IAC7C;AACA,WAAO;AAAA,EACT;AAEA,QAAM,YAAY,QAAQ,IAAI,sBAAsB,KAAK;AACzD,MAAI,CAAC,WAAW;AACd,UAAM,IAAI;AAAA,MACR;AAAA,IACF;AAAA,EACF;AACA,MAAI,CAAC,eAAe;AAClB,oBAAgB,IAAI,mDAAqB,CAAC,CAAC;AAAA,EAC7C;AACA,QAAM,MAAM,MAAM,cAAc;AAAA,IAC9B,IAAI,oDAAsB,EAAE,UAAU,UAAU,CAAC;AAAA,EACnD;AACA,MAAI,CAAC,IAAI,cAAc;AACrB,UAAM,IAAI;AAAA,MACR,UAAU,SAAS;AAAA,IACrB;AAAA,EACF;AACA,QAAM,SAAS,KAAK,MAAM,IAAI,YAAY;AAC1C,iBAAe;AAAA,IACb;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,MAAM,OAAO;AAAA,IACb,UAAU,OAAO;AAAA,IACjB,KAAK;AAAA,EACP;AACA,SAAO;AACT;AAEA,eAAe,UAAyB;AACtC,MAAI,MAAM;AACR,WAAO;AAAA,EACT;AACA,QAAM,MAAM,MAAM,WAAW;AAC7B,SAAO,IAAI,eAAK;AAAA,IACd,MAAM,IAAI;AAAA,IACV,MAAM,IAAI;AAAA,IACV,UAAU,IAAI;AAAA,IACd,MAAM,IAAI;AAAA,IACV,UAAU,IAAI;AAAA,IACd,KAAK,IAAI,MAAM,EAAE,oBAAoB,MAAM,IAAI;AAAA,IAC/C,KAAK;AAAA,IACL,mBAAmB;AAAA,IACnB,yBAAyB;AAAA,EAC3B,CAAC;AACD,SAAO;AACT;AAEA,eAAe,qBAAoC;AACjD,MAAI,CAAC,kBAAkB;AACrB,wBAAoB,YAAY;AAC9B,YAAM,MAAM,MAAM,WAAW;AAC7B,YAAM,IAAI,MAAM,QAAQ;AACxB,YAAM,sBAAsB,GAAG,IAAI,MAAM;AAAA,IAC3C,GAAG,EAAE,MAAM,CAAC,QAAQ;AAIlB,yBAAmB;AACnB,YAAM;AAAA,IACR,CAAC;AAAA,EACH;AACA,SAAO;AACT;AAOO,SAAS,wCAA8C;AAC5D,iBAAe;AACf,SAAO;AACP,qBAAmB;AACnB,kBAAgB;AAClB;AAEA,SAAS,oBACP,QAC6B;AAC7B,QAAM,OAAO,OAAO,KAAK,OAAO,QAAQ,MAAM,QAAQ,EAAE,SAAS,MAAM;AACvE,SAAO,KAAK,MAAM,IAAI;AACxB;AAEA,SAAS,kBACP,UACA,6BACM;AACN,MAAI,YAAY,OAAO,aAAa,UAAU;AAC5C,UAAM,OAAQ,SAAkD;AAChE,UAAM,KAAK,MAAM;AACjB,QAAI,OAAO,OAAO,UAAU;AAC1B,YAAM,SAAS,IAAI,KAAK,EAAE;AAC1B,UAAI,CAAC,OAAO,MAAM,OAAO,QAAQ,CAAC,GAAG;AACnC,eAAO;AAAA,MACT;AAAA,IACF;AAAA,EACF;AACA,MACE,OAAO,gCAAgC,YACvC,OAAO,SAAS,2BAA2B,GAC3C;AACA,WAAO,IAAI,KAAK,8BAA8B,GAAI;AAAA,EACpD;AACA,SAAO,oBAAI,KAAK;AAClB;AAgCO,SAAS,iBACd,QACA,WACa;AACb,MAAI,yCAAyC,QAAQ,SAAS,GAAG;AAC/D,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AACA,QAAM,OAAO,yBAAyB,MAAM;AAC5C,MAAI,CAAC,MAAM;AACT,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AAEA,QAAM,WAAW,OAAO,cAAc;AACtC,QAAM,QAAQ,WACV,OAAO,UAAU,WACjB,OAAO,UAAU;AACrB,MAAI,CAAC,OAAO;AACV,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AAEA,QAAM,QAAQ,qBAAqB,KAAK;AACxC,QAAM,iBAAiB,OAAO,UAAU;AAExC,MAAI,UAAU;AACZ,WAAO;AAAA,MACL,MAAM;AAAA,MACN,QAAQ;AAAA,QACN,UAAU,KAAK;AAAA,QACf,aAAa,KAAK;AAAA,QAClB,cAAc,KAAK;AAAA,QACnB,YAAY,KAAK;AAAA,QACjB,SAAS,KAAK;AAAA,QACd,WAAW,kBAAkB,QAAW,cAAc;AAAA,MACxD;AAAA,IACF;AAAA,EACF;AAEA,MAAI,OAAO,MAAM,aAAa,UAAU;AACtC,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AACA,MAAI;AACJ,MAAI;AACF,kBAAc,KAAK,MAAM,MAAM,QAAQ;AAAA,EACzC,QAAQ;AACN,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AAEA,SAAO;AAAA,IACL,MAAM;AAAA,IACN,QAAQ;AAAA,MACN,UAAU,KAAK;AAAA,MACf,aAAa,KAAK;AAAA,MAClB,cAAc,KAAK;AAAA,MACnB,YAAY,KAAK;AAAA,MACjB,SAAS,KAAK;AAAA,MACd,aAAa,kBAAkB,aAAa,cAAc;AAAA,MAC1D,cAAc,MAAM;AAAA,IACtB;AAAA,EACF;AACF;AAcA,eAAsB,iBACpB,QACA,YACA,QACe;AACf,MAAI,OAAO,SAAS,QAAQ;AAC1B;AAAA,EACF;AACA,MAAI,OAAO,SAAS,UAAU;AAC5B,UAAMA,KAAI,OAAO;AACjB,UAAM,QAAQ;AAAA,MACZ,OAAO,MAAM,uBAAuB,UAAU,GAAG;AAAA,QAC/CA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,MACJ,CAAC;AAAA,IACH;AACA;AAAA,EACF;AACA,QAAM,IAAI,OAAO;AACjB,QAAM,QAAQ;AAAA,IACZ,OAAO,MAAM,2BAA2B,UAAU,GAAG;AAAA,MACnD,EAAE;AAAA,MACF,EAAE;AAAA,MACF,EAAE;AAAA,MACF,EAAE;AAAA,MACF,EAAE;AAAA,MACF,EAAE;AAAA,IACJ,CAAC;AAAA,EACH;AACF;AAEA,eAAsB,QACpB,OACqC;AACrC,QAAM,YAAY,QAAQ,IAAI,cAAc;AAC5C,QAAM,oBAAuD,CAAC;AAE9D,QAAM,mBAAmB;AACzB,QAAM,MAAM,MAAM,WAAW;AAC7B,QAAM,IAAI,MAAM,QAAQ;AAExB,aAAW,UAAU,MAAM,SAAS;AAClC,QAAI;AACF,YAAM,SAAS,oBAAoB,MAAM;AACzC,YAAM,SAAS,iBAAiB,QAAQ,SAAS;AACjD,YAAM,iBAAiB,GAAG,IAAI,QAAQ,MAAM;AAAA,IAC9C,SAAS,KAAK;AAEZ,cAAQ;AAAA,QACN;AAAA,QACA,OAAO,QAAQ;AAAA,QACf;AAAA,MACF;AACA,wBAAkB,KAAK;AAAA,QACrB,gBAAgB,OAAO,QAAQ;AAAA,MACjC,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO,EAAE,kBAAkB;AAC7B;","names":["p"]}
1
+ {"version":3,"sources":["../src/components/postgres/data-store-postgres-replication.handler.ts","../src/components/postgres/data-store-postgres-schema.ts","../src/components/dynamodb/dynamodb-stream-record.ts","../src/components/dynamodb/firehose-archive-transform.handler.ts","../src/components/dynamodb/data-store-change-events.ts"],"sourcesContent":["import {\n GetSecretValueCommand,\n SecretsManagerClient,\n} from \"@aws-sdk/client-secrets-manager\";\nimport type {\n KinesisStreamBatchResponse,\n KinesisStreamEvent,\n KinesisStreamRecord,\n} from \"aws-lambda\";\nimport { Pool } from \"pg\";\nimport {\n buildResourceSoftDeleteSql,\n buildResourceUpsertSql,\n ensureSchemaBootstrap,\n} from \"./data-store-postgres-schema\";\nimport {\n type DynamoDbStreamKinesisRecord,\n dynamodbImageToPlain,\n} from \"../dynamodb/dynamodb-stream-record\";\nimport {\n parseCurrentResourceKeys,\n shouldDropAsGlobalTableReplicationRecord,\n} from \"../dynamodb/firehose-archive-transform.handler\";\n\n/**\n * Postgres replication consumer for the data store change Kinesis stream\n * (ADR 2026-04-17-01, phase 1). Reads the same stream that feeds the\n * Firehose-to-S3 archive and projects each current FHIR resource into the\n * `resources` JSONB table on the Aurora Serverless v2 cluster provisioned by\n * {@link DataStorePostgresReplica}. This phase implements replication only;\n * no read-side query routing is wired up here.\n */\n\nconst REQUIRED_ENV_VARS = [\n \"OPENHI_PG_HOST\",\n \"OPENHI_PG_PORT\",\n \"OPENHI_PG_DATABASE\",\n \"OPENHI_PG_SCHEMA\",\n] as const;\n\ntype RequiredEnvVar = (typeof REQUIRED_ENV_VARS)[number];\n\ninterface PostgresConnectionConfig {\n host: string;\n port: number;\n database: string;\n schema: string;\n user: string;\n password: string;\n ssl: boolean;\n}\n\ninterface SecretsManagerCredentials {\n username: string;\n password: string;\n}\n\nlet cachedConfig: PostgresConnectionConfig | undefined;\nlet pool: Pool | undefined;\nlet bootstrapPromise: Promise<void> | undefined;\nlet secretsClient: SecretsManagerClient | undefined;\n\nfunction readEnv(name: RequiredEnvVar): string {\n const v = process.env[name]?.trim();\n if (!v) {\n throw new Error(\n `Missing required environment variable for postgres replication: ${name}`,\n );\n }\n return v;\n}\n\nasync function loadConfig(): Promise<PostgresConnectionConfig> {\n if (cachedConfig) {\n return cachedConfig;\n }\n const host = readEnv(\"OPENHI_PG_HOST\");\n const port = Number.parseInt(readEnv(\"OPENHI_PG_PORT\"), 10);\n const database = readEnv(\"OPENHI_PG_DATABASE\");\n const schema = readEnv(\"OPENHI_PG_SCHEMA\");\n\n // Test/integration setups can supply user/password directly via env to skip\n // the Secrets Manager round-trip. Production always goes through the secret.\n const directUser = process.env.OPENHI_PG_USER?.trim();\n const directPassword = process.env.OPENHI_PG_PASSWORD;\n if (directUser && directPassword !== undefined) {\n cachedConfig = {\n host,\n port,\n database,\n schema,\n user: directUser,\n password: directPassword,\n ssl: process.env.OPENHI_PG_SSL?.trim() === \"true\",\n };\n return cachedConfig;\n }\n\n const secretArn = process.env.OPENHI_PG_SECRET_ARN?.trim();\n if (!secretArn) {\n throw new Error(\n \"Either OPENHI_PG_SECRET_ARN or both OPENHI_PG_USER and OPENHI_PG_PASSWORD must be set\",\n );\n }\n if (!secretsClient) {\n secretsClient = new SecretsManagerClient({});\n }\n const out = await secretsClient.send(\n new GetSecretValueCommand({ SecretId: secretArn }),\n );\n if (!out.SecretString) {\n throw new Error(\n `Secret ${secretArn} returned no SecretString; binary secrets are not supported`,\n );\n }\n const parsed = JSON.parse(out.SecretString) as SecretsManagerCredentials;\n cachedConfig = {\n host,\n port,\n database,\n schema,\n user: parsed.username,\n password: parsed.password,\n ssl: true,\n };\n return cachedConfig;\n}\n\nasync function getPool(): Promise<Pool> {\n if (pool) {\n return pool;\n }\n const cfg = await loadConfig();\n pool = new Pool({\n host: cfg.host,\n port: cfg.port,\n database: cfg.database,\n user: cfg.user,\n password: cfg.password,\n ssl: cfg.ssl ? { rejectUnauthorized: false } : false,\n max: 2,\n idleTimeoutMillis: 60_000,\n connectionTimeoutMillis: 10_000,\n });\n return pool;\n}\n\nasync function ensureBootstrapped(): Promise<void> {\n if (!bootstrapPromise) {\n bootstrapPromise = (async () => {\n const cfg = await loadConfig();\n const p = await getPool();\n await ensureSchemaBootstrap(p, cfg.schema);\n })().catch((err) => {\n // Reset so a subsequent invocation retries the bootstrap on a fresh\n // container; otherwise a transient failure during cold start would\n // poison every record forever.\n bootstrapPromise = undefined;\n throw err;\n });\n }\n return bootstrapPromise;\n}\n\n/**\n * Reset module-scoped caches. Test-only helper; do not invoke from production\n * code paths. Exposed so unit and integration tests can run multiple\n * scenarios within a single Jest worker without leaking state across cases.\n */\nexport function __resetReplicationModuleStateForTests(): void {\n cachedConfig = undefined;\n pool = undefined;\n bootstrapPromise = undefined;\n secretsClient = undefined;\n}\n\nfunction decodeKinesisRecord(\n record: KinesisStreamRecord,\n): DynamoDbStreamKinesisRecord {\n const json = Buffer.from(record.kinesis.data, \"base64\").toString(\"utf8\");\n return JSON.parse(json) as DynamoDbStreamKinesisRecord;\n}\n\nfunction deriveLastUpdated(\n resource: unknown,\n approximateCreationEpochSec: number | undefined,\n): Date {\n if (resource && typeof resource === \"object\") {\n const meta = (resource as { meta?: { lastUpdated?: unknown } }).meta;\n const lu = meta?.lastUpdated;\n if (typeof lu === \"string\") {\n const parsed = new Date(lu);\n if (!Number.isNaN(parsed.getTime())) {\n return parsed;\n }\n }\n }\n if (\n typeof approximateCreationEpochSec === \"number\" &&\n Number.isFinite(approximateCreationEpochSec)\n ) {\n return new Date(approximateCreationEpochSec * 1000);\n }\n return new Date();\n}\n\ninterface UpsertParams {\n tenantId: string;\n workspaceId: string;\n resourceType: string;\n resourceId: string;\n version: string;\n lastUpdated: Date;\n resourceJson: string;\n}\n\ninterface SoftDeleteParams {\n tenantId: string;\n workspaceId: string;\n resourceType: string;\n resourceId: string;\n version: string;\n deletedAt: Date;\n}\n\ntype WriteIntent =\n | { kind: \"upsert\"; params: UpsertParams }\n | { kind: \"delete\"; params: SoftDeleteParams }\n | { kind: \"drop\" };\n\n/**\n * Translate a single Kinesis-wrapped DynamoDB stream record into a write\n * intent against the `resources` table. Returns `drop` for replica-side\n * global-table records, non-CURRENT items, and any record whose resource\n * payload cannot be decoded as JSON.\n */\nexport function buildWriteIntent(\n change: DynamoDbStreamKinesisRecord,\n awsRegion: string,\n): WriteIntent {\n if (shouldDropAsGlobalTableReplicationRecord(change, awsRegion)) {\n return { kind: \"drop\" };\n }\n const keys = parseCurrentResourceKeys(change);\n if (!keys) {\n return { kind: \"drop\" };\n }\n\n const isRemove = change.eventName === \"REMOVE\";\n const image = isRemove\n ? change.dynamodb?.OldImage\n : change.dynamodb?.NewImage;\n if (!image) {\n return { kind: \"drop\" };\n }\n\n const plain = dynamodbImageToPlain(image);\n const approxEpochSec = change.dynamodb?.ApproximateCreationDateTime;\n\n if (isRemove) {\n return {\n kind: \"delete\",\n params: {\n tenantId: keys.tenantId,\n workspaceId: keys.workspaceId,\n resourceType: keys.resourceType,\n resourceId: keys.resourceId,\n version: keys.version,\n deletedAt: deriveLastUpdated(undefined, approxEpochSec),\n },\n };\n }\n\n if (typeof plain.resource !== \"string\") {\n return { kind: \"drop\" };\n }\n let resourceObj: unknown;\n try {\n resourceObj = JSON.parse(plain.resource);\n } catch {\n return { kind: \"drop\" };\n }\n\n return {\n kind: \"upsert\",\n params: {\n tenantId: keys.tenantId,\n workspaceId: keys.workspaceId,\n resourceType: keys.resourceType,\n resourceId: keys.resourceId,\n version: keys.version,\n lastUpdated: deriveLastUpdated(resourceObj, approxEpochSec),\n resourceJson: plain.resource,\n },\n };\n}\n\ninterface PgQueryable {\n query: (\n text: string,\n values?: ReadonlyArray<unknown>,\n ) => Promise<unknown> | unknown;\n}\n\n/**\n * Execute a write intent against an open Postgres connection or pool. Pure\n * SQL dispatch — extracted from the handler so unit tests can drive it with a\n * mock client and integration tests can drive it with a real pool.\n */\nexport async function applyWriteIntent(\n client: PgQueryable,\n schemaName: string,\n intent: WriteIntent,\n): Promise<void> {\n if (intent.kind === \"drop\") {\n return;\n }\n if (intent.kind === \"upsert\") {\n const p = intent.params;\n await Promise.resolve(\n client.query(buildResourceUpsertSql(schemaName), [\n p.tenantId,\n p.workspaceId,\n p.resourceType,\n p.resourceId,\n p.version,\n p.lastUpdated,\n p.resourceJson,\n ]),\n );\n return;\n }\n const p = intent.params;\n await Promise.resolve(\n client.query(buildResourceSoftDeleteSql(schemaName), [\n p.tenantId,\n p.workspaceId,\n p.resourceType,\n p.resourceId,\n p.version,\n p.deletedAt,\n ]),\n );\n}\n\nexport async function handler(\n event: KinesisStreamEvent,\n): Promise<KinesisStreamBatchResponse> {\n const awsRegion = process.env.AWS_REGION ?? \"\";\n const batchItemFailures: Array<{ itemIdentifier: string }> = [];\n\n await ensureBootstrapped();\n const cfg = await loadConfig();\n const p = await getPool();\n\n for (const record of event.Records) {\n try {\n const change = decodeKinesisRecord(record);\n const intent = buildWriteIntent(change, awsRegion);\n await applyWriteIntent(p, cfg.schema, intent);\n } catch (err) {\n // eslint-disable-next-line no-console\n console.error(\n \"postgres replication record failed\",\n record.kinesis.sequenceNumber,\n err,\n );\n batchItemFailures.push({\n itemIdentifier: record.kinesis.sequenceNumber,\n });\n }\n }\n\n return { batchItemFailures };\n}\n","import type { Pool, PoolClient } from \"pg\";\n\n/**\n * @see sites/www-docs/content/architecture/adr/2026-04-17-01-ad-hoc-query-support-fhir-api.md\n *\n * SQL strings for the Postgres replication tier (ADR 2026-04-17-01, phase 1):\n * the JSONB `resources` table that mirrors current FHIR resources from the\n * DynamoDB single-table store. Phase 1 covers replication only; query routing\n * and indexes for individual SearchParameters are deferred to a follow-on phase.\n *\n * The `version` column stores the resource's `vid` (a ULID — see\n * `data-entity-common.ts`). ULIDs are monotonically lexically sortable, so the\n * UPSERT/UPDATE guards use plain `>` text comparison to reject out-of-order\n * Kinesis deliveries.\n */\n\nexport const POSTGRES_REPLICATION_SCHEMA_VERSION = 1;\n\nconst SCHEMA_NAME_PATTERN = /^[a-z_][a-z0-9_]{0,62}$/;\n\n/**\n * Validate that a schema name is a safe Postgres identifier and quote it for\n * inclusion in DDL/DML. Throws on anything that doesn't match the lower-snake\n * pattern OpenHI uses for branch-derived schema names (e.g. `b_a1b2c3`).\n */\nexport function quoteSchemaIdentifier(schemaName: string): string {\n if (!SCHEMA_NAME_PATTERN.test(schemaName)) {\n throw new Error(\n `Invalid Postgres schema name: ${JSON.stringify(schemaName)}; expected /[a-z_][a-z0-9_]{0,62}/`,\n );\n }\n return `\"${schemaName}\"`;\n}\n\n/**\n * Build the bootstrap DDL as a list of independent statements. Use this form\n * when sending the DDL through a transport that does not accept multi-statement\n * SQL in a single call — most importantly the AWS RDS Data API's\n * `ExecuteStatement`, which the REST API runner uses.\n *\n * Each statement is fully self-contained and idempotent (`IF NOT EXISTS`),\n * so the array can be executed in order with no transaction wrapper.\n */\nexport function buildSchemaBootstrapStatements(\n schemaName: string,\n): ReadonlyArray<string> {\n const s = quoteSchemaIdentifier(schemaName);\n return [\n `CREATE SCHEMA IF NOT EXISTS ${s};`,\n [\n `CREATE TABLE IF NOT EXISTS ${s}.resources (`,\n ` tenant_id text NOT NULL,`,\n ` workspace_id text NOT NULL,`,\n ` resource_type text NOT NULL,`,\n ` resource_id text NOT NULL,`,\n ` version text NOT NULL,`,\n ` last_updated timestamptz NOT NULL,`,\n ` deleted_at timestamptz,`,\n ` resource jsonb NOT NULL,`,\n ` PRIMARY KEY (tenant_id, workspace_id, resource_type, resource_id)`,\n `);`,\n ].join(\"\\n\"),\n [\n `CREATE INDEX IF NOT EXISTS resources_jsonb_gin`,\n ` ON ${s}.resources USING gin (resource);`,\n ].join(\"\\n\"),\n [\n `CREATE INDEX IF NOT EXISTS resources_listing`,\n ` ON ${s}.resources (tenant_id, workspace_id, resource_type, last_updated);`,\n ].join(\"\\n\"),\n ];\n}\n\nexport function buildSchemaBootstrapSql(schemaName: string): string {\n return buildSchemaBootstrapStatements(schemaName).join(\"\\n\");\n}\n\n/**\n * INSERT/MODIFY UPSERT with a monotonic `version` (ULID) guard. If a record\n * arrives out of order — e.g. a retried Kinesis delivery for an older `vid`\n * after a newer one — the WHERE clause in DO UPDATE leaves the row unchanged.\n *\n * Param order: $1 tenant_id, $2 workspace_id, $3 resource_type, $4 resource_id,\n * $5 version, $6 last_updated, $7 resource (jsonb-serialized).\n */\nexport function buildResourceUpsertSql(schemaName: string): string {\n const s = quoteSchemaIdentifier(schemaName);\n return [\n `INSERT INTO ${s}.resources`,\n ` (tenant_id, workspace_id, resource_type, resource_id, version, last_updated, deleted_at, resource)`,\n `VALUES ($1, $2, $3, $4, $5, $6, NULL, $7::jsonb)`,\n `ON CONFLICT (tenant_id, workspace_id, resource_type, resource_id)`,\n `DO UPDATE SET`,\n ` version = EXCLUDED.version,`,\n ` last_updated = EXCLUDED.last_updated,`,\n ` deleted_at = NULL,`,\n ` resource = EXCLUDED.resource`,\n `WHERE EXCLUDED.version > ${s}.resources.version;`,\n ].join(\"\\n\");\n}\n\n/**\n * REMOVE soft-delete with the same monotonic `version` guard. Using `>` (not\n * `>=`) means a duplicate REMOVE for the same version is a no-op. Param order\n * matches {@link buildResourceUpsertSql} for the first five params.\n *\n * Param order: $1 tenant_id, $2 workspace_id, $3 resource_type, $4 resource_id,\n * $5 version (incoming), $6 deleted_at.\n */\nexport function buildResourceSoftDeleteSql(schemaName: string): string {\n const s = quoteSchemaIdentifier(schemaName);\n return [\n `UPDATE ${s}.resources`,\n `SET deleted_at = $6,`,\n ` version = $5`,\n `WHERE tenant_id = $1`,\n ` AND workspace_id = $2`,\n ` AND resource_type = $3`,\n ` AND resource_id = $4`,\n ` AND $5 > version;`,\n ].join(\"\\n\");\n}\n\n/**\n * Run schema DDL idempotently. Safe to call from every Lambda cold start; the\n * `IF NOT EXISTS` clauses make repeats no-ops. Caller is responsible for\n * memoizing across invocations on the same warm container.\n */\nexport async function ensureSchemaBootstrap(\n client: Pool | PoolClient,\n schemaName: string,\n): Promise<void> {\n await client.query(buildSchemaBootstrapSql(schemaName));\n}\n","import type { AttributeValue } from \"@aws-sdk/client-dynamodb\";\n\n/**\n * Shape of a DynamoDB change record as delivered inside Kinesis (table stream\n * destination) and decoded by the Firehose transform Lambda.\n */\nexport interface DynamoDbStreamKinesisRecord {\n eventName?: string;\n userIdentity?: unknown;\n dynamodb?: {\n Keys?: Record<string, AttributeValue>;\n NewImage?: Record<string, AttributeValue>;\n OldImage?: Record<string, AttributeValue>;\n SequenceNumber?: string;\n ApproximateCreationDateTime?: number;\n };\n}\n\nexport function dynamodbValueToJs(av: AttributeValue): unknown {\n if (av.S !== undefined) {\n return av.S;\n }\n if (av.N !== undefined) {\n return av.N.includes(\".\")\n ? Number.parseFloat(av.N)\n : Number.parseInt(av.N, 10);\n }\n if (av.BOOL !== undefined) {\n return av.BOOL;\n }\n if (av.NULL !== undefined) {\n return null;\n }\n if (av.M !== undefined) {\n return dynamodbImageToPlain(av.M);\n }\n if (av.L !== undefined) {\n return av.L.map((x: AttributeValue) => dynamodbValueToJs(x));\n }\n if (av.SS !== undefined) {\n return av.SS;\n }\n if (av.NS !== undefined) {\n return av.NS.map((n: string) =>\n n.includes(\".\") ? Number.parseFloat(n) : Number.parseInt(n, 10),\n );\n }\n return undefined;\n}\n\nexport function dynamodbImageToPlain(\n image: Record<string, AttributeValue>,\n): Record<string, unknown> {\n const out: Record<string, unknown> = {};\n for (const [k, v] of Object.entries(image)) {\n out[k] = dynamodbValueToJs(v);\n }\n return out;\n}\n","import { randomUUID } from \"node:crypto\";\nimport type { AttributeValue } from \"@aws-sdk/client-dynamodb\";\nimport {\n EventBridgeClient,\n PutEventsCommand,\n type PutEventsRequestEntry,\n type PutEventsResultEntry,\n} from \"@aws-sdk/client-eventbridge\";\nimport { PutObjectCommand, S3Client } from \"@aws-sdk/client-s3\";\nimport type {\n FirehoseTransformationEvent,\n FirehoseTransformationResult,\n FirehoseTransformationResultRecord,\n} from \"aws-lambda\";\nimport {\n DATA_STORE_CHANGE_DETAIL_MAX_UTF8_BYTES,\n DATA_STORE_CHANGE_DETAIL_TYPE,\n DATA_STORE_CHANGE_EVENT_SOURCE,\n buildFhirCurrentResourceChangeDetail,\n} from \"./data-store-change-events\";\nimport {\n type DynamoDbStreamKinesisRecord,\n dynamodbImageToPlain,\n} from \"./dynamodb-stream-record\";\n\nexport type { DynamoDbStreamKinesisRecord } from \"./dynamodb-stream-record\";\n\n/**\n * Firehose data-transformation handler: filters DynamoDB change records to\n * current FHIR resource items (SK = CURRENT, TID#…#WID#…#RT#…#ID#… PK),\n * writes archive JSON to S3 via Firehose, sets dynamic partition keys per\n * ADR 2026-03-11-02, and publishes de-identified change notifications to the\n * data event bus via PutEvents per ADR 2026-03-02-01, with retries and an S3\n * dead-letter bucket for entries that still fail.\n */\n\nconst CURRENT_SK = \"CURRENT\";\nconst PK_PATTERN =\n /^TID#(?<tenantId>[^#]+)#WID#(?<workspaceId>[^#]+)#RT#(?<resourceType>[^#]+)#ID#(?<resourceId>.+)$/;\n\n/** DynamoDB-managed attribute on global table items (see AWS Global Tables legacy / replication docs). */\nconst AWS_REP_UPDATE_REGION = \"aws:rep:updateregion\";\n\nfunction getDynamoDbStringAttr(\n image: Record<string, AttributeValue> | undefined,\n name: string,\n): string | undefined {\n if (!image) {\n return undefined;\n }\n const av = image[name];\n if (typeof av?.S === \"string\" && av.S.trim() !== \"\") {\n return av.S.trim();\n }\n return undefined;\n}\n\nfunction primaryImageForReplicationCheck(\n record: DynamoDbStreamKinesisRecord,\n): Record<string, AttributeValue> | undefined {\n if (record.eventName === \"REMOVE\") {\n return record.dynamodb?.OldImage;\n }\n return record.dynamodb?.NewImage;\n}\n\n/**\n * Returns true when this stream/Kinesis record should not be archived because it\n * represents a **replica-side application** of a global-table change (the logical\n * write originated in another Region).\n *\n * - If `aws:rep:updateregion` is present on the item image and differs from\n * `archiveLambdaRegion`, the change was replicated into this Region (archive\n * only in the Region that matches `aws:rep:updateregion`).\n * - Otherwise, if `userIdentity` matches the DynamoDB replication service SLR,\n * treat as replication. **Excluded:** TTL deletes (`type` Service and\n * `principalId` `dynamodb.amazonaws.com`) per AWS stream Identity docs.\n *\n * For MREC global tables version 2019.11.21, AWS documents that stream records\n * may not carry distinguishable metadata; the recommended approach is a custom\n * “write region” attribute on items. If neither that attribute nor\n * `aws:rep:updateregion` nor replication `userIdentity` applies, this function\n * returns false (no drop)—duplicate archives are possible if identical pipelines\n * run in every Region without those signals.\n */\nexport function shouldDropAsGlobalTableReplicationRecord(\n record: DynamoDbStreamKinesisRecord,\n archiveLambdaRegion: string,\n): boolean {\n const image = primaryImageForReplicationCheck(record);\n const updateRegion = getDynamoDbStringAttr(image, AWS_REP_UPDATE_REGION);\n if (\n updateRegion &&\n archiveLambdaRegion &&\n updateRegion !== archiveLambdaRegion\n ) {\n return true;\n }\n\n return isDynamoDbReplicationUserIdentity(record.userIdentity);\n}\n\nfunction isDynamoDbReplicationUserIdentity(userIdentity: unknown): boolean {\n if (!userIdentity || typeof userIdentity !== \"object\") {\n return false;\n }\n const ui = userIdentity as Record<string, unknown>;\n const principalRaw = ui.principalId ?? ui.PrincipalId;\n const typeRaw = ui.type ?? ui.Type;\n const principal =\n typeof principalRaw === \"string\" ? principalRaw.toLowerCase() : \"\";\n const type = typeof typeRaw === \"string\" ? typeRaw.toLowerCase() : \"\";\n\n if (type === \"service\" && principal === \"dynamodb.amazonaws.com\") {\n return false;\n }\n\n const replicationMarkers = [\n \"awsservicerolefordynamodbreplication\",\n \"replication.dynamodb.amazonaws.com\",\n ];\n return replicationMarkers.some((m) => principal.includes(m));\n}\n\nexport function parseCurrentResourceKeys(record: DynamoDbStreamKinesisRecord): {\n tenantId: string;\n workspaceId: string;\n resourceType: string;\n resourceId: string;\n version: string;\n} | null {\n const keys = record.dynamodb?.Keys;\n if (!keys) {\n return null;\n }\n const pkAttr = keys.PK?.S;\n const skAttr = keys.SK?.S;\n if (!pkAttr || skAttr !== CURRENT_SK) {\n return null;\n }\n const m = PK_PATTERN.exec(pkAttr);\n if (!m?.groups) {\n return null;\n }\n const { tenantId, workspaceId, resourceType, resourceId } = m.groups;\n const image =\n record.eventName === \"REMOVE\"\n ? record.dynamodb?.OldImage\n : record.dynamodb?.NewImage;\n if (!image) {\n return null;\n }\n const plain = dynamodbImageToPlain(image as Record<string, AttributeValue>);\n const version = typeof plain.vid === \"string\" ? plain.vid : null;\n if (!version) {\n return null;\n }\n return { tenantId, workspaceId, resourceType, resourceId, version };\n}\n\nfunction partitionToken(value: string): string {\n if (!value || value.trim() === \"\") {\n return \"-\";\n }\n return value.replace(/[/\\\\]/g, \"_\");\n}\n\nfunction buildArchivePayload(\n record: DynamoDbStreamKinesisRecord,\n keys: ReturnType<typeof parseCurrentResourceKeys>,\n): Record<string, unknown> {\n const newImage = record.dynamodb?.NewImage;\n const oldImage = record.dynamodb?.OldImage;\n const resourceImage = record.eventName === \"REMOVE\" ? oldImage : newImage;\n const resourcePlain = resourceImage\n ? dynamodbImageToPlain(resourceImage as Record<string, AttributeValue>)\n : {};\n\n if (typeof resourcePlain.resource === \"string\") {\n try {\n resourcePlain.resource = JSON.parse(resourcePlain.resource) as unknown;\n } catch {\n /* keep raw string if not valid JSON */\n }\n }\n\n return {\n eventName: record.eventName,\n archivedAt: new Date().toISOString(),\n tenantId: keys!.tenantId,\n workspaceId: keys!.workspaceId,\n resourceType: keys!.resourceType,\n resourceId: keys!.resourceId,\n version: keys!.version,\n resource: resourcePlain,\n };\n}\n\nconst PUT_EVENTS_BATCH_SIZE = 10;\n\n/** Full PutEvents rounds per chunk (initial attempt + failure-driven retries). */\nconst MAX_PUT_EVENTS_ROUNDS = 3;\n\nlet eventBridgeClient: EventBridgeClient | undefined;\n\nfunction getEventBridgeClient(): EventBridgeClient | undefined {\n const bus = process.env.DATA_EVENT_BUS_NAME?.trim();\n if (!bus) {\n return undefined;\n }\n if (!eventBridgeClient) {\n eventBridgeClient = new EventBridgeClient({});\n }\n return eventBridgeClient;\n}\n\nlet s3ClientForDlq: S3Client | undefined;\n\nfunction getS3ClientForDlq(): S3Client | undefined {\n const bucket = process.env.DATA_STORE_PUT_EVENTS_DLQ_BUCKET?.trim();\n if (!bucket) {\n return undefined;\n }\n if (!s3ClientForDlq) {\n s3ClientForDlq = new S3Client({});\n }\n return s3ClientForDlq;\n}\n\ntype PutEventsEntry = PutEventsRequestEntry;\n\ninterface PutEventsDlqPayload {\n dlqSchemaVersion: 1;\n failedAt: string;\n reason: \"put_events_partial_failure\" | \"put_events_sdk_error\";\n attemptRounds: number;\n entries: PutEventsEntry[];\n putEventsResultEntries?: PutEventsResultEntry[];\n sdkError?: string;\n}\n\nasync function writePutEventsFailuresToDlq(\n payload: PutEventsDlqPayload,\n): Promise<void> {\n const bucket = process.env.DATA_STORE_PUT_EVENTS_DLQ_BUCKET?.trim();\n const client = getS3ClientForDlq();\n if (!bucket || !client) {\n throw new Error(\n `PutEvents exhausted retries but DATA_STORE_PUT_EVENTS_DLQ_BUCKET is not set (${payload.reason})`,\n );\n }\n const day = payload.failedAt.slice(0, 10);\n const key = `put-events-failed/${day}/${randomUUID()}.json`;\n await client.send(\n new PutObjectCommand({\n Bucket: bucket,\n Key: key,\n Body: JSON.stringify(payload),\n ContentType: \"application/json\",\n }),\n );\n}\n\n/**\n * Sends one PutEvents batch (≤10 entries) with up to {@link MAX_PUT_EVENTS_ROUNDS}\n * rounds. After the last round, remaining failures or a final SDK error are\n * written to the DLQ S3 bucket (if configured); DLQ write failure throws.\n */\nasync function putEventsChunkWithRetriesAndDlq(\n client: EventBridgeClient,\n entries: PutEventsEntry[],\n): Promise<void> {\n if (entries.length === 0) {\n return;\n }\n\n let pending = [...entries];\n\n for (let round = 1; round <= MAX_PUT_EVENTS_ROUNDS; round++) {\n try {\n const out = await client.send(new PutEventsCommand({ Entries: pending }));\n const failed = out.FailedEntryCount ?? 0;\n if (failed === 0) {\n return;\n }\n\n const nextPending: PutEventsEntry[] = [];\n out.Entries?.forEach((e: PutEventsResultEntry | undefined, i: number) => {\n if (e?.ErrorCode && pending[i]) {\n nextPending.push(pending[i]!);\n }\n });\n pending = nextPending;\n\n if (pending.length === 0) {\n return;\n }\n\n if (round === MAX_PUT_EVENTS_ROUNDS) {\n await writePutEventsFailuresToDlq({\n dlqSchemaVersion: 1,\n failedAt: new Date().toISOString(),\n reason: \"put_events_partial_failure\",\n attemptRounds: MAX_PUT_EVENTS_ROUNDS,\n entries: pending,\n putEventsResultEntries: out.Entries,\n });\n return;\n }\n } catch (sdkErr) {\n const sdkMessage =\n sdkErr instanceof Error ? sdkErr.message : String(sdkErr);\n if (round === MAX_PUT_EVENTS_ROUNDS) {\n await writePutEventsFailuresToDlq({\n dlqSchemaVersion: 1,\n failedAt: new Date().toISOString(),\n reason: \"put_events_sdk_error\",\n attemptRounds: MAX_PUT_EVENTS_ROUNDS,\n entries: pending,\n sdkError: sdkMessage,\n });\n return;\n }\n await new Promise((r) => setTimeout(r, 50 * round));\n }\n }\n}\n\nasync function publishDataStoreChangeEvents(\n pending: Array<{\n change: DynamoDbStreamKinesisRecord;\n keys: NonNullable<ReturnType<typeof parseCurrentResourceKeys>>;\n }>,\n): Promise<void> {\n const client = getEventBridgeClient();\n const busName = process.env.DATA_EVENT_BUS_NAME?.trim();\n if (!client || !busName || pending.length === 0) {\n return;\n }\n\n const entries: PutEventsEntry[] = [];\n for (const { change, keys } of pending) {\n const detailObj = buildFhirCurrentResourceChangeDetail(change, keys);\n const detail = JSON.stringify(detailObj);\n const detailBytes = Buffer.byteLength(detail, \"utf8\");\n if (detailBytes > DATA_STORE_CHANGE_DETAIL_MAX_UTF8_BYTES) {\n throw new Error(\n `Event detail is ${detailBytes} bytes (max ${DATA_STORE_CHANGE_DETAIL_MAX_UTF8_BYTES}); ` +\n `oversize strategy deferred per ADR 2026-03-02-01 (${keys.resourceType}/${keys.resourceId}).`,\n );\n }\n entries.push({\n Source: DATA_STORE_CHANGE_EVENT_SOURCE,\n DetailType: DATA_STORE_CHANGE_DETAIL_TYPE,\n Detail: detail,\n EventBusName: busName,\n });\n }\n\n for (let i = 0; i < entries.length; i += PUT_EVENTS_BATCH_SIZE) {\n const chunk = entries.slice(i, i + PUT_EVENTS_BATCH_SIZE);\n await putEventsChunkWithRetriesAndDlq(client, chunk);\n }\n}\n\nexport async function handler(\n event: FirehoseTransformationEvent,\n): Promise<FirehoseTransformationResult> {\n const records: FirehoseTransformationResultRecord[] = [];\n const archiveLambdaRegion = process.env.AWS_REGION ?? \"\";\n const pendingPublish: Array<{\n change: DynamoDbStreamKinesisRecord;\n keys: NonNullable<ReturnType<typeof parseCurrentResourceKeys>>;\n }> = [];\n\n for (const rec of event.records) {\n try {\n const payload = Buffer.from(rec.data, \"base64\").toString(\"utf8\");\n const change = JSON.parse(payload) as DynamoDbStreamKinesisRecord;\n\n if (\n shouldDropAsGlobalTableReplicationRecord(change, archiveLambdaRegion)\n ) {\n records.push({\n recordId: rec.recordId,\n result: \"Dropped\",\n data: rec.data,\n });\n continue;\n }\n\n const keys = parseCurrentResourceKeys(change);\n\n if (!keys) {\n records.push({\n recordId: rec.recordId,\n result: \"Dropped\",\n data: rec.data,\n });\n continue;\n }\n\n const archive = buildArchivePayload(change, keys);\n const out = Buffer.from(`${JSON.stringify(archive)}\\n`).toString(\n \"base64\",\n );\n\n pendingPublish.push({ change, keys });\n\n records.push({\n recordId: rec.recordId,\n result: \"Ok\",\n data: out,\n metadata: {\n partitionKeys: {\n tenantId: partitionToken(keys.tenantId),\n workspaceId: partitionToken(keys.workspaceId),\n resourceType: partitionToken(keys.resourceType),\n resourceId: partitionToken(keys.resourceId),\n version: partitionToken(keys.version),\n },\n },\n });\n } catch {\n records.push({\n recordId: rec.recordId,\n result: \"ProcessingFailed\",\n data: rec.data,\n });\n }\n }\n\n await publishDataStoreChangeEvents(pendingPublish);\n\n return { records };\n}\n","import type { AttributeValue } from \"@aws-sdk/client-dynamodb\";\nimport type { DynamoDbStreamKinesisRecord } from \"./dynamodb-stream-record\";\nimport { dynamodbImageToPlain } from \"./dynamodb-stream-record\";\n\n/**\n * EventBridge envelope constants for data-store CDC (no CDK imports).\n * @see docs/architecture/adr/2026-03-02-01-dynamodb-stream-to-data-event-bus.md\n */\nexport const DATA_STORE_CHANGE_EVENT_SOURCE = \"openhi.data.store\";\n\nexport const DATA_STORE_CHANGE_DETAIL_TYPE = \"FhirCurrentResourceChanged\";\n\n/** AWS PutEvents per-entry detail limit is 256 KiB; stay under for headroom. */\nexport const DATA_STORE_CHANGE_DETAIL_MAX_UTF8_BYTES = 250 * 1024;\n\nconst EXCLUDED_CHANGE_DETAIL_KEYS = new Set([\n \"PK\",\n \"SK\",\n \"GSI1PK\",\n \"GSI1SK\",\n \"GSI2PK\",\n \"GSI2SK\",\n /** Full FHIR JSON may contain PII; never list or ship in the bus payload. */\n \"resource\",\n]);\n\nfunction shallowValueEqual(a: unknown, b: unknown): boolean {\n return JSON.stringify(a) === JSON.stringify(b);\n}\n\nfunction changedNonResourceAttributeNames(\n oldImage: Record<string, unknown> | undefined,\n newImage: Record<string, unknown> | undefined,\n): string[] | undefined {\n if (!oldImage || !newImage) {\n return undefined;\n }\n const names = new Set<string>();\n const keys = new Set([...Object.keys(oldImage), ...Object.keys(newImage)]);\n for (const k of keys) {\n if (EXCLUDED_CHANGE_DETAIL_KEYS.has(k)) {\n continue;\n }\n if (!shallowValueEqual(oldImage[k], newImage[k])) {\n names.add(k);\n }\n }\n return names.size > 0 ? [...names].sort() : undefined;\n}\n\n/** Non-excluded attribute names present on an item (for INSERT / REMOVE). */\nfunction presentMetadataAttributeNames(\n image: Record<string, unknown> | undefined,\n): string[] | undefined {\n if (!image) {\n return undefined;\n }\n const names = Object.keys(image).filter(\n (k) => !EXCLUDED_CHANGE_DETAIL_KEYS.has(k),\n );\n return names.length > 0 ? names.sort() : undefined;\n}\n\nfunction plainImage(\n image: Record<string, AttributeValue> | undefined,\n): Record<string, unknown> | undefined {\n if (!image) {\n return undefined;\n }\n return dynamodbImageToPlain(image);\n}\n\nexport interface FhirCurrentResourceChangeDetail {\n changeType: \"INSERT\" | \"MODIFY\" | \"REMOVE\";\n tenantId: string;\n workspaceId: string;\n resourceType: string;\n resourceId: string;\n resourceVersion: string;\n streamSequenceNumber?: string;\n /** Seconds since UNIX epoch (DynamoDB Streams `ApproximateCreationDateTime`). */\n approximateCreationEpochSec?: number;\n /**\n * MODIFY: attributes whose values differ between old and new images.\n * INSERT / REMOVE: attributes present on the written or removed image (metadata only).\n */\n changedAttributeNames?: string[];\n}\n\nexport function buildFhirCurrentResourceChangeDetail(\n record: DynamoDbStreamKinesisRecord,\n keys: {\n tenantId: string;\n workspaceId: string;\n resourceType: string;\n resourceId: string;\n version: string;\n },\n): FhirCurrentResourceChangeDetail {\n const rawName = record.eventName;\n const changeType =\n rawName === \"INSERT\" || rawName === \"MODIFY\" || rawName === \"REMOVE\"\n ? rawName\n : \"MODIFY\";\n\n const seq = record.dynamodb?.SequenceNumber;\n const approxEpochSec = record.dynamodb?.ApproximateCreationDateTime;\n\n const newPlain = plainImage(\n record.dynamodb?.NewImage as Record<string, AttributeValue> | undefined,\n );\n const oldPlain = plainImage(\n record.dynamodb?.OldImage as Record<string, AttributeValue> | undefined,\n );\n\n let changedAttributeNames: string[] | undefined;\n if (changeType === \"MODIFY\") {\n changedAttributeNames = changedNonResourceAttributeNames(\n oldPlain,\n newPlain,\n );\n } else if (changeType === \"INSERT\") {\n changedAttributeNames = presentMetadataAttributeNames(newPlain);\n } else {\n changedAttributeNames = presentMetadataAttributeNames(oldPlain);\n }\n\n return {\n changeType,\n tenantId: keys.tenantId,\n workspaceId: keys.workspaceId,\n resourceType: keys.resourceType,\n resourceId: keys.resourceId,\n resourceVersion: keys.version,\n ...(typeof seq === \"string\" && seq.length > 0\n ? { streamSequenceNumber: seq }\n : {}),\n ...(typeof approxEpochSec === \"number\" && Number.isFinite(approxEpochSec)\n ? { approximateCreationEpochSec: approxEpochSec }\n : {}),\n ...(changedAttributeNames ? { changedAttributeNames } : {}),\n };\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,oCAGO;AAMP,gBAAqB;;;ACSrB,IAAM,sBAAsB;AAOrB,SAAS,sBAAsB,YAA4B;AAChE,MAAI,CAAC,oBAAoB,KAAK,UAAU,GAAG;AACzC,UAAM,IAAI;AAAA,MACR,iCAAiC,KAAK,UAAU,UAAU,CAAC;AAAA,IAC7D;AAAA,EACF;AACA,SAAO,IAAI,UAAU;AACvB;AAWO,SAAS,+BACd,YACuB;AACvB,QAAM,IAAI,sBAAsB,UAAU;AAC1C,SAAO;AAAA,IACL,+BAA+B,CAAC;AAAA,IAChC;AAAA,MACE,8BAA8B,CAAC;AAAA,MAC/B;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF,EAAE,KAAK,IAAI;AAAA,IACX;AAAA,MACE;AAAA,MACA,QAAQ,CAAC;AAAA,IACX,EAAE,KAAK,IAAI;AAAA,IACX;AAAA,MACE;AAAA,MACA,QAAQ,CAAC;AAAA,IACX,EAAE,KAAK,IAAI;AAAA,EACb;AACF;AAEO,SAAS,wBAAwB,YAA4B;AAClE,SAAO,+BAA+B,UAAU,EAAE,KAAK,IAAI;AAC7D;AAUO,SAAS,uBAAuB,YAA4B;AACjE,QAAM,IAAI,sBAAsB,UAAU;AAC1C,SAAO;AAAA,IACL,eAAe,CAAC;AAAA,IAChB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,4BAA4B,CAAC;AAAA,EAC/B,EAAE,KAAK,IAAI;AACb;AAUO,SAAS,2BAA2B,YAA4B;AACrE,QAAM,IAAI,sBAAsB,UAAU;AAC1C,SAAO;AAAA,IACL,UAAU,CAAC;AAAA,IACX;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACb;AAOA,eAAsB,sBACpB,QACA,YACe;AACf,QAAM,OAAO,MAAM,wBAAwB,UAAU,CAAC;AACxD;;;ACnHO,SAAS,kBAAkB,IAA6B;AAC7D,MAAI,GAAG,MAAM,QAAW;AACtB,WAAO,GAAG;AAAA,EACZ;AACA,MAAI,GAAG,MAAM,QAAW;AACtB,WAAO,GAAG,EAAE,SAAS,GAAG,IACpB,OAAO,WAAW,GAAG,CAAC,IACtB,OAAO,SAAS,GAAG,GAAG,EAAE;AAAA,EAC9B;AACA,MAAI,GAAG,SAAS,QAAW;AACzB,WAAO,GAAG;AAAA,EACZ;AACA,MAAI,GAAG,SAAS,QAAW;AACzB,WAAO;AAAA,EACT;AACA,MAAI,GAAG,MAAM,QAAW;AACtB,WAAO,qBAAqB,GAAG,CAAC;AAAA,EAClC;AACA,MAAI,GAAG,MAAM,QAAW;AACtB,WAAO,GAAG,EAAE,IAAI,CAAC,MAAsB,kBAAkB,CAAC,CAAC;AAAA,EAC7D;AACA,MAAI,GAAG,OAAO,QAAW;AACvB,WAAO,GAAG;AAAA,EACZ;AACA,MAAI,GAAG,OAAO,QAAW;AACvB,WAAO,GAAG,GAAG;AAAA,MAAI,CAAC,MAChB,EAAE,SAAS,GAAG,IAAI,OAAO,WAAW,CAAC,IAAI,OAAO,SAAS,GAAG,EAAE;AAAA,IAChE;AAAA,EACF;AACA,SAAO;AACT;AAEO,SAAS,qBACd,OACyB;AACzB,QAAM,MAA+B,CAAC;AACtC,aAAW,CAAC,GAAG,CAAC,KAAK,OAAO,QAAQ,KAAK,GAAG;AAC1C,QAAI,CAAC,IAAI,kBAAkB,CAAC;AAAA,EAC9B;AACA,SAAO;AACT;;;AC1DA,yBAA2B;AAE3B,gCAKO;AACP,uBAA2C;;;ACKpC,IAAM,0CAA0C,MAAM;;;ADuB7D,IAAM,aAAa;AACnB,IAAM,aACJ;AAGF,IAAM,wBAAwB;AAE9B,SAAS,sBACP,OACA,MACoB;AACpB,MAAI,CAAC,OAAO;AACV,WAAO;AAAA,EACT;AACA,QAAM,KAAK,MAAM,IAAI;AACrB,MAAI,OAAO,IAAI,MAAM,YAAY,GAAG,EAAE,KAAK,MAAM,IAAI;AACnD,WAAO,GAAG,EAAE,KAAK;AAAA,EACnB;AACA,SAAO;AACT;AAEA,SAAS,gCACP,QAC4C;AAC5C,MAAI,OAAO,cAAc,UAAU;AACjC,WAAO,OAAO,UAAU;AAAA,EAC1B;AACA,SAAO,OAAO,UAAU;AAC1B;AAqBO,SAAS,yCACd,QACA,qBACS;AACT,QAAM,QAAQ,gCAAgC,MAAM;AACpD,QAAM,eAAe,sBAAsB,OAAO,qBAAqB;AACvE,MACE,gBACA,uBACA,iBAAiB,qBACjB;AACA,WAAO;AAAA,EACT;AAEA,SAAO,kCAAkC,OAAO,YAAY;AAC9D;AAEA,SAAS,kCAAkC,cAAgC;AACzE,MAAI,CAAC,gBAAgB,OAAO,iBAAiB,UAAU;AACrD,WAAO;AAAA,EACT;AACA,QAAM,KAAK;AACX,QAAM,eAAe,GAAG,eAAe,GAAG;AAC1C,QAAM,UAAU,GAAG,QAAQ,GAAG;AAC9B,QAAM,YACJ,OAAO,iBAAiB,WAAW,aAAa,YAAY,IAAI;AAClE,QAAM,OAAO,OAAO,YAAY,WAAW,QAAQ,YAAY,IAAI;AAEnE,MAAI,SAAS,aAAa,cAAc,0BAA0B;AAChE,WAAO;AAAA,EACT;AAEA,QAAM,qBAAqB;AAAA,IACzB;AAAA,IACA;AAAA,EACF;AACA,SAAO,mBAAmB,KAAK,CAAC,MAAM,UAAU,SAAS,CAAC,CAAC;AAC7D;AAEO,SAAS,yBAAyB,QAMhC;AACP,QAAM,OAAO,OAAO,UAAU;AAC9B,MAAI,CAAC,MAAM;AACT,WAAO;AAAA,EACT;AACA,QAAM,SAAS,KAAK,IAAI;AACxB,QAAM,SAAS,KAAK,IAAI;AACxB,MAAI,CAAC,UAAU,WAAW,YAAY;AACpC,WAAO;AAAA,EACT;AACA,QAAM,IAAI,WAAW,KAAK,MAAM;AAChC,MAAI,CAAC,GAAG,QAAQ;AACd,WAAO;AAAA,EACT;AACA,QAAM,EAAE,UAAU,aAAa,cAAc,WAAW,IAAI,EAAE;AAC9D,QAAM,QACJ,OAAO,cAAc,WACjB,OAAO,UAAU,WACjB,OAAO,UAAU;AACvB,MAAI,CAAC,OAAO;AACV,WAAO;AAAA,EACT;AACA,QAAM,QAAQ,qBAAqB,KAAuC;AAC1E,QAAM,UAAU,OAAO,MAAM,QAAQ,WAAW,MAAM,MAAM;AAC5D,MAAI,CAAC,SAAS;AACZ,WAAO;AAAA,EACT;AACA,SAAO,EAAE,UAAU,aAAa,cAAc,YAAY,QAAQ;AACpE;;;AHrGA,IAAI;AACJ,IAAI;AACJ,IAAI;AACJ,IAAI;AAEJ,SAAS,QAAQ,MAA8B;AAC7C,QAAM,IAAI,QAAQ,IAAI,IAAI,GAAG,KAAK;AAClC,MAAI,CAAC,GAAG;AACN,UAAM,IAAI;AAAA,MACR,mEAAmE,IAAI;AAAA,IACzE;AAAA,EACF;AACA,SAAO;AACT;AAEA,eAAe,aAAgD;AAC7D,MAAI,cAAc;AAChB,WAAO;AAAA,EACT;AACA,QAAM,OAAO,QAAQ,gBAAgB;AACrC,QAAM,OAAO,OAAO,SAAS,QAAQ,gBAAgB,GAAG,EAAE;AAC1D,QAAM,WAAW,QAAQ,oBAAoB;AAC7C,QAAM,SAAS,QAAQ,kBAAkB;AAIzC,QAAM,aAAa,QAAQ,IAAI,gBAAgB,KAAK;AACpD,QAAM,iBAAiB,QAAQ,IAAI;AACnC,MAAI,cAAc,mBAAmB,QAAW;AAC9C,mBAAe;AAAA,MACb;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,MAAM;AAAA,MACN,UAAU;AAAA,MACV,KAAK,QAAQ,IAAI,eAAe,KAAK,MAAM;AAAA,IAC7C;AACA,WAAO;AAAA,EACT;AAEA,QAAM,YAAY,QAAQ,IAAI,sBAAsB,KAAK;AACzD,MAAI,CAAC,WAAW;AACd,UAAM,IAAI;AAAA,MACR;AAAA,IACF;AAAA,EACF;AACA,MAAI,CAAC,eAAe;AAClB,oBAAgB,IAAI,mDAAqB,CAAC,CAAC;AAAA,EAC7C;AACA,QAAM,MAAM,MAAM,cAAc;AAAA,IAC9B,IAAI,oDAAsB,EAAE,UAAU,UAAU,CAAC;AAAA,EACnD;AACA,MAAI,CAAC,IAAI,cAAc;AACrB,UAAM,IAAI;AAAA,MACR,UAAU,SAAS;AAAA,IACrB;AAAA,EACF;AACA,QAAM,SAAS,KAAK,MAAM,IAAI,YAAY;AAC1C,iBAAe;AAAA,IACb;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,MAAM,OAAO;AAAA,IACb,UAAU,OAAO;AAAA,IACjB,KAAK;AAAA,EACP;AACA,SAAO;AACT;AAEA,eAAe,UAAyB;AACtC,MAAI,MAAM;AACR,WAAO;AAAA,EACT;AACA,QAAM,MAAM,MAAM,WAAW;AAC7B,SAAO,IAAI,eAAK;AAAA,IACd,MAAM,IAAI;AAAA,IACV,MAAM,IAAI;AAAA,IACV,UAAU,IAAI;AAAA,IACd,MAAM,IAAI;AAAA,IACV,UAAU,IAAI;AAAA,IACd,KAAK,IAAI,MAAM,EAAE,oBAAoB,MAAM,IAAI;AAAA,IAC/C,KAAK;AAAA,IACL,mBAAmB;AAAA,IACnB,yBAAyB;AAAA,EAC3B,CAAC;AACD,SAAO;AACT;AAEA,eAAe,qBAAoC;AACjD,MAAI,CAAC,kBAAkB;AACrB,wBAAoB,YAAY;AAC9B,YAAM,MAAM,MAAM,WAAW;AAC7B,YAAM,IAAI,MAAM,QAAQ;AACxB,YAAM,sBAAsB,GAAG,IAAI,MAAM;AAAA,IAC3C,GAAG,EAAE,MAAM,CAAC,QAAQ;AAIlB,yBAAmB;AACnB,YAAM;AAAA,IACR,CAAC;AAAA,EACH;AACA,SAAO;AACT;AAOO,SAAS,wCAA8C;AAC5D,iBAAe;AACf,SAAO;AACP,qBAAmB;AACnB,kBAAgB;AAClB;AAEA,SAAS,oBACP,QAC6B;AAC7B,QAAM,OAAO,OAAO,KAAK,OAAO,QAAQ,MAAM,QAAQ,EAAE,SAAS,MAAM;AACvE,SAAO,KAAK,MAAM,IAAI;AACxB;AAEA,SAAS,kBACP,UACA,6BACM;AACN,MAAI,YAAY,OAAO,aAAa,UAAU;AAC5C,UAAM,OAAQ,SAAkD;AAChE,UAAM,KAAK,MAAM;AACjB,QAAI,OAAO,OAAO,UAAU;AAC1B,YAAM,SAAS,IAAI,KAAK,EAAE;AAC1B,UAAI,CAAC,OAAO,MAAM,OAAO,QAAQ,CAAC,GAAG;AACnC,eAAO;AAAA,MACT;AAAA,IACF;AAAA,EACF;AACA,MACE,OAAO,gCAAgC,YACvC,OAAO,SAAS,2BAA2B,GAC3C;AACA,WAAO,IAAI,KAAK,8BAA8B,GAAI;AAAA,EACpD;AACA,SAAO,oBAAI,KAAK;AAClB;AAgCO,SAAS,iBACd,QACA,WACa;AACb,MAAI,yCAAyC,QAAQ,SAAS,GAAG;AAC/D,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AACA,QAAM,OAAO,yBAAyB,MAAM;AAC5C,MAAI,CAAC,MAAM;AACT,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AAEA,QAAM,WAAW,OAAO,cAAc;AACtC,QAAM,QAAQ,WACV,OAAO,UAAU,WACjB,OAAO,UAAU;AACrB,MAAI,CAAC,OAAO;AACV,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AAEA,QAAM,QAAQ,qBAAqB,KAAK;AACxC,QAAM,iBAAiB,OAAO,UAAU;AAExC,MAAI,UAAU;AACZ,WAAO;AAAA,MACL,MAAM;AAAA,MACN,QAAQ;AAAA,QACN,UAAU,KAAK;AAAA,QACf,aAAa,KAAK;AAAA,QAClB,cAAc,KAAK;AAAA,QACnB,YAAY,KAAK;AAAA,QACjB,SAAS,KAAK;AAAA,QACd,WAAW,kBAAkB,QAAW,cAAc;AAAA,MACxD;AAAA,IACF;AAAA,EACF;AAEA,MAAI,OAAO,MAAM,aAAa,UAAU;AACtC,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AACA,MAAI;AACJ,MAAI;AACF,kBAAc,KAAK,MAAM,MAAM,QAAQ;AAAA,EACzC,QAAQ;AACN,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AAEA,SAAO;AAAA,IACL,MAAM;AAAA,IACN,QAAQ;AAAA,MACN,UAAU,KAAK;AAAA,MACf,aAAa,KAAK;AAAA,MAClB,cAAc,KAAK;AAAA,MACnB,YAAY,KAAK;AAAA,MACjB,SAAS,KAAK;AAAA,MACd,aAAa,kBAAkB,aAAa,cAAc;AAAA,MAC1D,cAAc,MAAM;AAAA,IACtB;AAAA,EACF;AACF;AAcA,eAAsB,iBACpB,QACA,YACA,QACe;AACf,MAAI,OAAO,SAAS,QAAQ;AAC1B;AAAA,EACF;AACA,MAAI,OAAO,SAAS,UAAU;AAC5B,UAAMA,KAAI,OAAO;AACjB,UAAM,QAAQ;AAAA,MACZ,OAAO,MAAM,uBAAuB,UAAU,GAAG;AAAA,QAC/CA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,MACJ,CAAC;AAAA,IACH;AACA;AAAA,EACF;AACA,QAAM,IAAI,OAAO;AACjB,QAAM,QAAQ;AAAA,IACZ,OAAO,MAAM,2BAA2B,UAAU,GAAG;AAAA,MACnD,EAAE;AAAA,MACF,EAAE;AAAA,MACF,EAAE;AAAA,MACF,EAAE;AAAA,MACF,EAAE;AAAA,MACF,EAAE;AAAA,IACJ,CAAC;AAAA,EACH;AACF;AAEA,eAAsB,QACpB,OACqC;AACrC,QAAM,YAAY,QAAQ,IAAI,cAAc;AAC5C,QAAM,oBAAuD,CAAC;AAE9D,QAAM,mBAAmB;AACzB,QAAM,MAAM,MAAM,WAAW;AAC7B,QAAM,IAAI,MAAM,QAAQ;AAExB,aAAW,UAAU,MAAM,SAAS;AAClC,QAAI;AACF,YAAM,SAAS,oBAAoB,MAAM;AACzC,YAAM,SAAS,iBAAiB,QAAQ,SAAS;AACjD,YAAM,iBAAiB,GAAG,IAAI,QAAQ,MAAM;AAAA,IAC9C,SAAS,KAAK;AAEZ,cAAQ;AAAA,QACN;AAAA,QACA,OAAO,QAAQ;AAAA,QACf;AAAA,MACF;AACA,wBAAkB,KAAK;AAAA,QACrB,gBAAgB,OAAO,QAAQ;AAAA,MACjC,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO,EAAE,kBAAkB;AAC7B;","names":["p"]}
@@ -5,6 +5,11 @@ import {
5
5
  import {
6
6
  dynamodbImageToPlain
7
7
  } from "./chunk-CEOAGPYY.mjs";
8
+ import {
9
+ buildResourceSoftDeleteSql,
10
+ buildResourceUpsertSql,
11
+ ensureSchemaBootstrap
12
+ } from "./chunk-2O3CXY2C.mjs";
8
13
  import "./chunk-LZOMFHX3.mjs";
9
14
 
10
15
  // src/components/postgres/data-store-postgres-replication.handler.ts
@@ -13,71 +18,6 @@ import {
13
18
  SecretsManagerClient
14
19
  } from "@aws-sdk/client-secrets-manager";
15
20
  import { Pool } from "pg";
16
-
17
- // src/components/postgres/data-store-postgres-schema.ts
18
- var SCHEMA_NAME_PATTERN = /^[a-z_][a-z0-9_]{0,62}$/;
19
- function quoteSchemaIdentifier(schemaName) {
20
- if (!SCHEMA_NAME_PATTERN.test(schemaName)) {
21
- throw new Error(
22
- `Invalid Postgres schema name: ${JSON.stringify(schemaName)}; expected /[a-z_][a-z0-9_]{0,62}/`
23
- );
24
- }
25
- return `"${schemaName}"`;
26
- }
27
- function buildSchemaBootstrapSql(schemaName) {
28
- const s = quoteSchemaIdentifier(schemaName);
29
- return [
30
- `CREATE SCHEMA IF NOT EXISTS ${s};`,
31
- `CREATE TABLE IF NOT EXISTS ${s}.resources (`,
32
- ` tenant_id text NOT NULL,`,
33
- ` workspace_id text NOT NULL,`,
34
- ` resource_type text NOT NULL,`,
35
- ` resource_id text NOT NULL,`,
36
- ` version text NOT NULL,`,
37
- ` last_updated timestamptz NOT NULL,`,
38
- ` deleted_at timestamptz,`,
39
- ` resource jsonb NOT NULL,`,
40
- ` PRIMARY KEY (tenant_id, workspace_id, resource_type, resource_id)`,
41
- `);`,
42
- `CREATE INDEX IF NOT EXISTS resources_jsonb_gin`,
43
- ` ON ${s}.resources USING gin (resource);`,
44
- `CREATE INDEX IF NOT EXISTS resources_listing`,
45
- ` ON ${s}.resources (tenant_id, workspace_id, resource_type, last_updated);`
46
- ].join("\n");
47
- }
48
- function buildResourceUpsertSql(schemaName) {
49
- const s = quoteSchemaIdentifier(schemaName);
50
- return [
51
- `INSERT INTO ${s}.resources`,
52
- ` (tenant_id, workspace_id, resource_type, resource_id, version, last_updated, deleted_at, resource)`,
53
- `VALUES ($1, $2, $3, $4, $5, $6, NULL, $7::jsonb)`,
54
- `ON CONFLICT (tenant_id, workspace_id, resource_type, resource_id)`,
55
- `DO UPDATE SET`,
56
- ` version = EXCLUDED.version,`,
57
- ` last_updated = EXCLUDED.last_updated,`,
58
- ` deleted_at = NULL,`,
59
- ` resource = EXCLUDED.resource`,
60
- `WHERE EXCLUDED.version > ${s}.resources.version;`
61
- ].join("\n");
62
- }
63
- function buildResourceSoftDeleteSql(schemaName) {
64
- const s = quoteSchemaIdentifier(schemaName);
65
- return [
66
- `UPDATE ${s}.resources`,
67
- `SET deleted_at = $6,`,
68
- ` version = $5`,
69
- `WHERE tenant_id = $1`,
70
- ` AND workspace_id = $2`,
71
- ` AND resource_type = $3`,
72
- ` AND resource_id = $4`,
73
- ` AND $5 > version;`
74
- ].join("\n");
75
- }
76
- async function ensureSchemaBootstrap(client, schemaName) {
77
- await client.query(buildSchemaBootstrapSql(schemaName));
78
- }
79
-
80
- // src/components/postgres/data-store-postgres-replication.handler.ts
81
21
  var cachedConfig;
82
22
  var pool;
83
23
  var bootstrapPromise;
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/components/postgres/data-store-postgres-replication.handler.ts","../src/components/postgres/data-store-postgres-schema.ts"],"sourcesContent":["import {\n GetSecretValueCommand,\n SecretsManagerClient,\n} from \"@aws-sdk/client-secrets-manager\";\nimport type {\n KinesisStreamBatchResponse,\n KinesisStreamEvent,\n KinesisStreamRecord,\n} from \"aws-lambda\";\nimport { Pool } from \"pg\";\nimport {\n buildResourceSoftDeleteSql,\n buildResourceUpsertSql,\n ensureSchemaBootstrap,\n} from \"./data-store-postgres-schema\";\nimport {\n type DynamoDbStreamKinesisRecord,\n dynamodbImageToPlain,\n} from \"../dynamodb/dynamodb-stream-record\";\nimport {\n parseCurrentResourceKeys,\n shouldDropAsGlobalTableReplicationRecord,\n} from \"../dynamodb/firehose-archive-transform.handler\";\n\n/**\n * Postgres replication consumer for the data store change Kinesis stream\n * (ADR 2026-04-17-01, phase 1). Reads the same stream that feeds the\n * Firehose-to-S3 archive and projects each current FHIR resource into the\n * `resources` JSONB table on the Aurora Serverless v2 cluster provisioned by\n * {@link DataStorePostgresReplica}. This phase implements replication only;\n * no read-side query routing is wired up here.\n */\n\nconst REQUIRED_ENV_VARS = [\n \"OPENHI_PG_HOST\",\n \"OPENHI_PG_PORT\",\n \"OPENHI_PG_DATABASE\",\n \"OPENHI_PG_SCHEMA\",\n] as const;\n\ntype RequiredEnvVar = (typeof REQUIRED_ENV_VARS)[number];\n\ninterface PostgresConnectionConfig {\n host: string;\n port: number;\n database: string;\n schema: string;\n user: string;\n password: string;\n ssl: boolean;\n}\n\ninterface SecretsManagerCredentials {\n username: string;\n password: string;\n}\n\nlet cachedConfig: PostgresConnectionConfig | undefined;\nlet pool: Pool | undefined;\nlet bootstrapPromise: Promise<void> | undefined;\nlet secretsClient: SecretsManagerClient | undefined;\n\nfunction readEnv(name: RequiredEnvVar): string {\n const v = process.env[name]?.trim();\n if (!v) {\n throw new Error(\n `Missing required environment variable for postgres replication: ${name}`,\n );\n }\n return v;\n}\n\nasync function loadConfig(): Promise<PostgresConnectionConfig> {\n if (cachedConfig) {\n return cachedConfig;\n }\n const host = readEnv(\"OPENHI_PG_HOST\");\n const port = Number.parseInt(readEnv(\"OPENHI_PG_PORT\"), 10);\n const database = readEnv(\"OPENHI_PG_DATABASE\");\n const schema = readEnv(\"OPENHI_PG_SCHEMA\");\n\n // Test/integration setups can supply user/password directly via env to skip\n // the Secrets Manager round-trip. Production always goes through the secret.\n const directUser = process.env.OPENHI_PG_USER?.trim();\n const directPassword = process.env.OPENHI_PG_PASSWORD;\n if (directUser && directPassword !== undefined) {\n cachedConfig = {\n host,\n port,\n database,\n schema,\n user: directUser,\n password: directPassword,\n ssl: process.env.OPENHI_PG_SSL?.trim() === \"true\",\n };\n return cachedConfig;\n }\n\n const secretArn = process.env.OPENHI_PG_SECRET_ARN?.trim();\n if (!secretArn) {\n throw new Error(\n \"Either OPENHI_PG_SECRET_ARN or both OPENHI_PG_USER and OPENHI_PG_PASSWORD must be set\",\n );\n }\n if (!secretsClient) {\n secretsClient = new SecretsManagerClient({});\n }\n const out = await secretsClient.send(\n new GetSecretValueCommand({ SecretId: secretArn }),\n );\n if (!out.SecretString) {\n throw new Error(\n `Secret ${secretArn} returned no SecretString; binary secrets are not supported`,\n );\n }\n const parsed = JSON.parse(out.SecretString) as SecretsManagerCredentials;\n cachedConfig = {\n host,\n port,\n database,\n schema,\n user: parsed.username,\n password: parsed.password,\n ssl: true,\n };\n return cachedConfig;\n}\n\nasync function getPool(): Promise<Pool> {\n if (pool) {\n return pool;\n }\n const cfg = await loadConfig();\n pool = new Pool({\n host: cfg.host,\n port: cfg.port,\n database: cfg.database,\n user: cfg.user,\n password: cfg.password,\n ssl: cfg.ssl ? { rejectUnauthorized: false } : false,\n max: 2,\n idleTimeoutMillis: 60_000,\n connectionTimeoutMillis: 10_000,\n });\n return pool;\n}\n\nasync function ensureBootstrapped(): Promise<void> {\n if (!bootstrapPromise) {\n bootstrapPromise = (async () => {\n const cfg = await loadConfig();\n const p = await getPool();\n await ensureSchemaBootstrap(p, cfg.schema);\n })().catch((err) => {\n // Reset so a subsequent invocation retries the bootstrap on a fresh\n // container; otherwise a transient failure during cold start would\n // poison every record forever.\n bootstrapPromise = undefined;\n throw err;\n });\n }\n return bootstrapPromise;\n}\n\n/**\n * Reset module-scoped caches. Test-only helper; do not invoke from production\n * code paths. Exposed so unit and integration tests can run multiple\n * scenarios within a single Jest worker without leaking state across cases.\n */\nexport function __resetReplicationModuleStateForTests(): void {\n cachedConfig = undefined;\n pool = undefined;\n bootstrapPromise = undefined;\n secretsClient = undefined;\n}\n\nfunction decodeKinesisRecord(\n record: KinesisStreamRecord,\n): DynamoDbStreamKinesisRecord {\n const json = Buffer.from(record.kinesis.data, \"base64\").toString(\"utf8\");\n return JSON.parse(json) as DynamoDbStreamKinesisRecord;\n}\n\nfunction deriveLastUpdated(\n resource: unknown,\n approximateCreationEpochSec: number | undefined,\n): Date {\n if (resource && typeof resource === \"object\") {\n const meta = (resource as { meta?: { lastUpdated?: unknown } }).meta;\n const lu = meta?.lastUpdated;\n if (typeof lu === \"string\") {\n const parsed = new Date(lu);\n if (!Number.isNaN(parsed.getTime())) {\n return parsed;\n }\n }\n }\n if (\n typeof approximateCreationEpochSec === \"number\" &&\n Number.isFinite(approximateCreationEpochSec)\n ) {\n return new Date(approximateCreationEpochSec * 1000);\n }\n return new Date();\n}\n\ninterface UpsertParams {\n tenantId: string;\n workspaceId: string;\n resourceType: string;\n resourceId: string;\n version: string;\n lastUpdated: Date;\n resourceJson: string;\n}\n\ninterface SoftDeleteParams {\n tenantId: string;\n workspaceId: string;\n resourceType: string;\n resourceId: string;\n version: string;\n deletedAt: Date;\n}\n\ntype WriteIntent =\n | { kind: \"upsert\"; params: UpsertParams }\n | { kind: \"delete\"; params: SoftDeleteParams }\n | { kind: \"drop\" };\n\n/**\n * Translate a single Kinesis-wrapped DynamoDB stream record into a write\n * intent against the `resources` table. Returns `drop` for replica-side\n * global-table records, non-CURRENT items, and any record whose resource\n * payload cannot be decoded as JSON.\n */\nexport function buildWriteIntent(\n change: DynamoDbStreamKinesisRecord,\n awsRegion: string,\n): WriteIntent {\n if (shouldDropAsGlobalTableReplicationRecord(change, awsRegion)) {\n return { kind: \"drop\" };\n }\n const keys = parseCurrentResourceKeys(change);\n if (!keys) {\n return { kind: \"drop\" };\n }\n\n const isRemove = change.eventName === \"REMOVE\";\n const image = isRemove\n ? change.dynamodb?.OldImage\n : change.dynamodb?.NewImage;\n if (!image) {\n return { kind: \"drop\" };\n }\n\n const plain = dynamodbImageToPlain(image);\n const approxEpochSec = change.dynamodb?.ApproximateCreationDateTime;\n\n if (isRemove) {\n return {\n kind: \"delete\",\n params: {\n tenantId: keys.tenantId,\n workspaceId: keys.workspaceId,\n resourceType: keys.resourceType,\n resourceId: keys.resourceId,\n version: keys.version,\n deletedAt: deriveLastUpdated(undefined, approxEpochSec),\n },\n };\n }\n\n if (typeof plain.resource !== \"string\") {\n return { kind: \"drop\" };\n }\n let resourceObj: unknown;\n try {\n resourceObj = JSON.parse(plain.resource);\n } catch {\n return { kind: \"drop\" };\n }\n\n return {\n kind: \"upsert\",\n params: {\n tenantId: keys.tenantId,\n workspaceId: keys.workspaceId,\n resourceType: keys.resourceType,\n resourceId: keys.resourceId,\n version: keys.version,\n lastUpdated: deriveLastUpdated(resourceObj, approxEpochSec),\n resourceJson: plain.resource,\n },\n };\n}\n\ninterface PgQueryable {\n query: (\n text: string,\n values?: ReadonlyArray<unknown>,\n ) => Promise<unknown> | unknown;\n}\n\n/**\n * Execute a write intent against an open Postgres connection or pool. Pure\n * SQL dispatch — extracted from the handler so unit tests can drive it with a\n * mock client and integration tests can drive it with a real pool.\n */\nexport async function applyWriteIntent(\n client: PgQueryable,\n schemaName: string,\n intent: WriteIntent,\n): Promise<void> {\n if (intent.kind === \"drop\") {\n return;\n }\n if (intent.kind === \"upsert\") {\n const p = intent.params;\n await Promise.resolve(\n client.query(buildResourceUpsertSql(schemaName), [\n p.tenantId,\n p.workspaceId,\n p.resourceType,\n p.resourceId,\n p.version,\n p.lastUpdated,\n p.resourceJson,\n ]),\n );\n return;\n }\n const p = intent.params;\n await Promise.resolve(\n client.query(buildResourceSoftDeleteSql(schemaName), [\n p.tenantId,\n p.workspaceId,\n p.resourceType,\n p.resourceId,\n p.version,\n p.deletedAt,\n ]),\n );\n}\n\nexport async function handler(\n event: KinesisStreamEvent,\n): Promise<KinesisStreamBatchResponse> {\n const awsRegion = process.env.AWS_REGION ?? \"\";\n const batchItemFailures: Array<{ itemIdentifier: string }> = [];\n\n await ensureBootstrapped();\n const cfg = await loadConfig();\n const p = await getPool();\n\n for (const record of event.Records) {\n try {\n const change = decodeKinesisRecord(record);\n const intent = buildWriteIntent(change, awsRegion);\n await applyWriteIntent(p, cfg.schema, intent);\n } catch (err) {\n // eslint-disable-next-line no-console\n console.error(\n \"postgres replication record failed\",\n record.kinesis.sequenceNumber,\n err,\n );\n batchItemFailures.push({\n itemIdentifier: record.kinesis.sequenceNumber,\n });\n }\n }\n\n return { batchItemFailures };\n}\n","import type { Pool, PoolClient } from \"pg\";\n\n/**\n * @see sites/www-docs/content/architecture/adr/2026-04-17-01-ad-hoc-query-support-fhir-api.md\n *\n * SQL strings for the Postgres replication tier (ADR 2026-04-17-01, phase 1):\n * the JSONB `resources` table that mirrors current FHIR resources from the\n * DynamoDB single-table store. Phase 1 covers replication only; query routing\n * and indexes for individual SearchParameters are deferred to a follow-on phase.\n *\n * The `version` column stores the resource's `vid` (a ULID — see\n * `data-entity-common.ts`). ULIDs are monotonically lexically sortable, so the\n * UPSERT/UPDATE guards use plain `>` text comparison to reject out-of-order\n * Kinesis deliveries.\n */\n\nexport const POSTGRES_REPLICATION_SCHEMA_VERSION = 1;\n\nconst SCHEMA_NAME_PATTERN = /^[a-z_][a-z0-9_]{0,62}$/;\n\n/**\n * Validate that a schema name is a safe Postgres identifier and quote it for\n * inclusion in DDL/DML. Throws on anything that doesn't match the lower-snake\n * pattern OpenHI uses for branch-derived schema names (e.g. `b_a1b2c3`).\n */\nexport function quoteSchemaIdentifier(schemaName: string): string {\n if (!SCHEMA_NAME_PATTERN.test(schemaName)) {\n throw new Error(\n `Invalid Postgres schema name: ${JSON.stringify(schemaName)}; expected /[a-z_][a-z0-9_]{0,62}/`,\n );\n }\n return `\"${schemaName}\"`;\n}\n\nexport function buildSchemaBootstrapSql(schemaName: string): string {\n const s = quoteSchemaIdentifier(schemaName);\n return [\n `CREATE SCHEMA IF NOT EXISTS ${s};`,\n `CREATE TABLE IF NOT EXISTS ${s}.resources (`,\n ` tenant_id text NOT NULL,`,\n ` workspace_id text NOT NULL,`,\n ` resource_type text NOT NULL,`,\n ` resource_id text NOT NULL,`,\n ` version text NOT NULL,`,\n ` last_updated timestamptz NOT NULL,`,\n ` deleted_at timestamptz,`,\n ` resource jsonb NOT NULL,`,\n ` PRIMARY KEY (tenant_id, workspace_id, resource_type, resource_id)`,\n `);`,\n `CREATE INDEX IF NOT EXISTS resources_jsonb_gin`,\n ` ON ${s}.resources USING gin (resource);`,\n `CREATE INDEX IF NOT EXISTS resources_listing`,\n ` ON ${s}.resources (tenant_id, workspace_id, resource_type, last_updated);`,\n ].join(\"\\n\");\n}\n\n/**\n * INSERT/MODIFY UPSERT with a monotonic `version` (ULID) guard. If a record\n * arrives out of order — e.g. a retried Kinesis delivery for an older `vid`\n * after a newer one — the WHERE clause in DO UPDATE leaves the row unchanged.\n *\n * Param order: $1 tenant_id, $2 workspace_id, $3 resource_type, $4 resource_id,\n * $5 version, $6 last_updated, $7 resource (jsonb-serialized).\n */\nexport function buildResourceUpsertSql(schemaName: string): string {\n const s = quoteSchemaIdentifier(schemaName);\n return [\n `INSERT INTO ${s}.resources`,\n ` (tenant_id, workspace_id, resource_type, resource_id, version, last_updated, deleted_at, resource)`,\n `VALUES ($1, $2, $3, $4, $5, $6, NULL, $7::jsonb)`,\n `ON CONFLICT (tenant_id, workspace_id, resource_type, resource_id)`,\n `DO UPDATE SET`,\n ` version = EXCLUDED.version,`,\n ` last_updated = EXCLUDED.last_updated,`,\n ` deleted_at = NULL,`,\n ` resource = EXCLUDED.resource`,\n `WHERE EXCLUDED.version > ${s}.resources.version;`,\n ].join(\"\\n\");\n}\n\n/**\n * REMOVE soft-delete with the same monotonic `version` guard. Using `>` (not\n * `>=`) means a duplicate REMOVE for the same version is a no-op. Param order\n * matches {@link buildResourceUpsertSql} for the first five params.\n *\n * Param order: $1 tenant_id, $2 workspace_id, $3 resource_type, $4 resource_id,\n * $5 version (incoming), $6 deleted_at.\n */\nexport function buildResourceSoftDeleteSql(schemaName: string): string {\n const s = quoteSchemaIdentifier(schemaName);\n return [\n `UPDATE ${s}.resources`,\n `SET deleted_at = $6,`,\n ` version = $5`,\n `WHERE tenant_id = $1`,\n ` AND workspace_id = $2`,\n ` AND resource_type = $3`,\n ` AND resource_id = $4`,\n ` AND $5 > version;`,\n ].join(\"\\n\");\n}\n\n/**\n * Run schema DDL idempotently. Safe to call from every Lambda cold start; the\n * `IF NOT EXISTS` clauses make repeats no-ops. Caller is responsible for\n * memoizing across invocations on the same warm container.\n */\nexport async function ensureSchemaBootstrap(\n client: Pool | PoolClient,\n schemaName: string,\n): Promise<void> {\n await client.query(buildSchemaBootstrapSql(schemaName));\n}\n"],"mappings":";;;;;;;;;;AAAA;AAAA,EACE;AAAA,EACA;AAAA,OACK;AAMP,SAAS,YAAY;;;ACSrB,IAAM,sBAAsB;AAOrB,SAAS,sBAAsB,YAA4B;AAChE,MAAI,CAAC,oBAAoB,KAAK,UAAU,GAAG;AACzC,UAAM,IAAI;AAAA,MACR,iCAAiC,KAAK,UAAU,UAAU,CAAC;AAAA,IAC7D;AAAA,EACF;AACA,SAAO,IAAI,UAAU;AACvB;AAEO,SAAS,wBAAwB,YAA4B;AAClE,QAAM,IAAI,sBAAsB,UAAU;AAC1C,SAAO;AAAA,IACL,+BAA+B,CAAC;AAAA,IAChC,8BAA8B,CAAC;AAAA,IAC/B;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,QAAQ,CAAC;AAAA,IACT;AAAA,IACA,QAAQ,CAAC;AAAA,EACX,EAAE,KAAK,IAAI;AACb;AAUO,SAAS,uBAAuB,YAA4B;AACjE,QAAM,IAAI,sBAAsB,UAAU;AAC1C,SAAO;AAAA,IACL,eAAe,CAAC;AAAA,IAChB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,4BAA4B,CAAC;AAAA,EAC/B,EAAE,KAAK,IAAI;AACb;AAUO,SAAS,2BAA2B,YAA4B;AACrE,QAAM,IAAI,sBAAsB,UAAU;AAC1C,SAAO;AAAA,IACL,UAAU,CAAC;AAAA,IACX;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACb;AAOA,eAAsB,sBACpB,QACA,YACe;AACf,QAAM,OAAO,MAAM,wBAAwB,UAAU,CAAC;AACxD;;;ADvDA,IAAI;AACJ,IAAI;AACJ,IAAI;AACJ,IAAI;AAEJ,SAAS,QAAQ,MAA8B;AAC7C,QAAM,IAAI,QAAQ,IAAI,IAAI,GAAG,KAAK;AAClC,MAAI,CAAC,GAAG;AACN,UAAM,IAAI;AAAA,MACR,mEAAmE,IAAI;AAAA,IACzE;AAAA,EACF;AACA,SAAO;AACT;AAEA,eAAe,aAAgD;AAC7D,MAAI,cAAc;AAChB,WAAO;AAAA,EACT;AACA,QAAM,OAAO,QAAQ,gBAAgB;AACrC,QAAM,OAAO,OAAO,SAAS,QAAQ,gBAAgB,GAAG,EAAE;AAC1D,QAAM,WAAW,QAAQ,oBAAoB;AAC7C,QAAM,SAAS,QAAQ,kBAAkB;AAIzC,QAAM,aAAa,QAAQ,IAAI,gBAAgB,KAAK;AACpD,QAAM,iBAAiB,QAAQ,IAAI;AACnC,MAAI,cAAc,mBAAmB,QAAW;AAC9C,mBAAe;AAAA,MACb;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,MAAM;AAAA,MACN,UAAU;AAAA,MACV,KAAK,QAAQ,IAAI,eAAe,KAAK,MAAM;AAAA,IAC7C;AACA,WAAO;AAAA,EACT;AAEA,QAAM,YAAY,QAAQ,IAAI,sBAAsB,KAAK;AACzD,MAAI,CAAC,WAAW;AACd,UAAM,IAAI;AAAA,MACR;AAAA,IACF;AAAA,EACF;AACA,MAAI,CAAC,eAAe;AAClB,oBAAgB,IAAI,qBAAqB,CAAC,CAAC;AAAA,EAC7C;AACA,QAAM,MAAM,MAAM,cAAc;AAAA,IAC9B,IAAI,sBAAsB,EAAE,UAAU,UAAU,CAAC;AAAA,EACnD;AACA,MAAI,CAAC,IAAI,cAAc;AACrB,UAAM,IAAI;AAAA,MACR,UAAU,SAAS;AAAA,IACrB;AAAA,EACF;AACA,QAAM,SAAS,KAAK,MAAM,IAAI,YAAY;AAC1C,iBAAe;AAAA,IACb;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,MAAM,OAAO;AAAA,IACb,UAAU,OAAO;AAAA,IACjB,KAAK;AAAA,EACP;AACA,SAAO;AACT;AAEA,eAAe,UAAyB;AACtC,MAAI,MAAM;AACR,WAAO;AAAA,EACT;AACA,QAAM,MAAM,MAAM,WAAW;AAC7B,SAAO,IAAI,KAAK;AAAA,IACd,MAAM,IAAI;AAAA,IACV,MAAM,IAAI;AAAA,IACV,UAAU,IAAI;AAAA,IACd,MAAM,IAAI;AAAA,IACV,UAAU,IAAI;AAAA,IACd,KAAK,IAAI,MAAM,EAAE,oBAAoB,MAAM,IAAI;AAAA,IAC/C,KAAK;AAAA,IACL,mBAAmB;AAAA,IACnB,yBAAyB;AAAA,EAC3B,CAAC;AACD,SAAO;AACT;AAEA,eAAe,qBAAoC;AACjD,MAAI,CAAC,kBAAkB;AACrB,wBAAoB,YAAY;AAC9B,YAAM,MAAM,MAAM,WAAW;AAC7B,YAAM,IAAI,MAAM,QAAQ;AACxB,YAAM,sBAAsB,GAAG,IAAI,MAAM;AAAA,IAC3C,GAAG,EAAE,MAAM,CAAC,QAAQ;AAIlB,yBAAmB;AACnB,YAAM;AAAA,IACR,CAAC;AAAA,EACH;AACA,SAAO;AACT;AAOO,SAAS,wCAA8C;AAC5D,iBAAe;AACf,SAAO;AACP,qBAAmB;AACnB,kBAAgB;AAClB;AAEA,SAAS,oBACP,QAC6B;AAC7B,QAAM,OAAO,OAAO,KAAK,OAAO,QAAQ,MAAM,QAAQ,EAAE,SAAS,MAAM;AACvE,SAAO,KAAK,MAAM,IAAI;AACxB;AAEA,SAAS,kBACP,UACA,6BACM;AACN,MAAI,YAAY,OAAO,aAAa,UAAU;AAC5C,UAAM,OAAQ,SAAkD;AAChE,UAAM,KAAK,MAAM;AACjB,QAAI,OAAO,OAAO,UAAU;AAC1B,YAAM,SAAS,IAAI,KAAK,EAAE;AAC1B,UAAI,CAAC,OAAO,MAAM,OAAO,QAAQ,CAAC,GAAG;AACnC,eAAO;AAAA,MACT;AAAA,IACF;AAAA,EACF;AACA,MACE,OAAO,gCAAgC,YACvC,OAAO,SAAS,2BAA2B,GAC3C;AACA,WAAO,IAAI,KAAK,8BAA8B,GAAI;AAAA,EACpD;AACA,SAAO,oBAAI,KAAK;AAClB;AAgCO,SAAS,iBACd,QACA,WACa;AACb,MAAI,yCAAyC,QAAQ,SAAS,GAAG;AAC/D,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AACA,QAAM,OAAO,yBAAyB,MAAM;AAC5C,MAAI,CAAC,MAAM;AACT,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AAEA,QAAM,WAAW,OAAO,cAAc;AACtC,QAAM,QAAQ,WACV,OAAO,UAAU,WACjB,OAAO,UAAU;AACrB,MAAI,CAAC,OAAO;AACV,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AAEA,QAAM,QAAQ,qBAAqB,KAAK;AACxC,QAAM,iBAAiB,OAAO,UAAU;AAExC,MAAI,UAAU;AACZ,WAAO;AAAA,MACL,MAAM;AAAA,MACN,QAAQ;AAAA,QACN,UAAU,KAAK;AAAA,QACf,aAAa,KAAK;AAAA,QAClB,cAAc,KAAK;AAAA,QACnB,YAAY,KAAK;AAAA,QACjB,SAAS,KAAK;AAAA,QACd,WAAW,kBAAkB,QAAW,cAAc;AAAA,MACxD;AAAA,IACF;AAAA,EACF;AAEA,MAAI,OAAO,MAAM,aAAa,UAAU;AACtC,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AACA,MAAI;AACJ,MAAI;AACF,kBAAc,KAAK,MAAM,MAAM,QAAQ;AAAA,EACzC,QAAQ;AACN,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AAEA,SAAO;AAAA,IACL,MAAM;AAAA,IACN,QAAQ;AAAA,MACN,UAAU,KAAK;AAAA,MACf,aAAa,KAAK;AAAA,MAClB,cAAc,KAAK;AAAA,MACnB,YAAY,KAAK;AAAA,MACjB,SAAS,KAAK;AAAA,MACd,aAAa,kBAAkB,aAAa,cAAc;AAAA,MAC1D,cAAc,MAAM;AAAA,IACtB;AAAA,EACF;AACF;AAcA,eAAsB,iBACpB,QACA,YACA,QACe;AACf,MAAI,OAAO,SAAS,QAAQ;AAC1B;AAAA,EACF;AACA,MAAI,OAAO,SAAS,UAAU;AAC5B,UAAMA,KAAI,OAAO;AACjB,UAAM,QAAQ;AAAA,MACZ,OAAO,MAAM,uBAAuB,UAAU,GAAG;AAAA,QAC/CA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,MACJ,CAAC;AAAA,IACH;AACA;AAAA,EACF;AACA,QAAM,IAAI,OAAO;AACjB,QAAM,QAAQ;AAAA,IACZ,OAAO,MAAM,2BAA2B,UAAU,GAAG;AAAA,MACnD,EAAE;AAAA,MACF,EAAE;AAAA,MACF,EAAE;AAAA,MACF,EAAE;AAAA,MACF,EAAE;AAAA,MACF,EAAE;AAAA,IACJ,CAAC;AAAA,EACH;AACF;AAEA,eAAsB,QACpB,OACqC;AACrC,QAAM,YAAY,QAAQ,IAAI,cAAc;AAC5C,QAAM,oBAAuD,CAAC;AAE9D,QAAM,mBAAmB;AACzB,QAAM,MAAM,MAAM,WAAW;AAC7B,QAAM,IAAI,MAAM,QAAQ;AAExB,aAAW,UAAU,MAAM,SAAS;AAClC,QAAI;AACF,YAAM,SAAS,oBAAoB,MAAM;AACzC,YAAM,SAAS,iBAAiB,QAAQ,SAAS;AACjD,YAAM,iBAAiB,GAAG,IAAI,QAAQ,MAAM;AAAA,IAC9C,SAAS,KAAK;AAEZ,cAAQ;AAAA,QACN;AAAA,QACA,OAAO,QAAQ;AAAA,QACf;AAAA,MACF;AACA,wBAAkB,KAAK;AAAA,QACrB,gBAAgB,OAAO,QAAQ;AAAA,MACjC,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO,EAAE,kBAAkB;AAC7B;","names":["p"]}
1
+ {"version":3,"sources":["../src/components/postgres/data-store-postgres-replication.handler.ts"],"sourcesContent":["import {\n GetSecretValueCommand,\n SecretsManagerClient,\n} from \"@aws-sdk/client-secrets-manager\";\nimport type {\n KinesisStreamBatchResponse,\n KinesisStreamEvent,\n KinesisStreamRecord,\n} from \"aws-lambda\";\nimport { Pool } from \"pg\";\nimport {\n buildResourceSoftDeleteSql,\n buildResourceUpsertSql,\n ensureSchemaBootstrap,\n} from \"./data-store-postgres-schema\";\nimport {\n type DynamoDbStreamKinesisRecord,\n dynamodbImageToPlain,\n} from \"../dynamodb/dynamodb-stream-record\";\nimport {\n parseCurrentResourceKeys,\n shouldDropAsGlobalTableReplicationRecord,\n} from \"../dynamodb/firehose-archive-transform.handler\";\n\n/**\n * Postgres replication consumer for the data store change Kinesis stream\n * (ADR 2026-04-17-01, phase 1). Reads the same stream that feeds the\n * Firehose-to-S3 archive and projects each current FHIR resource into the\n * `resources` JSONB table on the Aurora Serverless v2 cluster provisioned by\n * {@link DataStorePostgresReplica}. This phase implements replication only;\n * no read-side query routing is wired up here.\n */\n\nconst REQUIRED_ENV_VARS = [\n \"OPENHI_PG_HOST\",\n \"OPENHI_PG_PORT\",\n \"OPENHI_PG_DATABASE\",\n \"OPENHI_PG_SCHEMA\",\n] as const;\n\ntype RequiredEnvVar = (typeof REQUIRED_ENV_VARS)[number];\n\ninterface PostgresConnectionConfig {\n host: string;\n port: number;\n database: string;\n schema: string;\n user: string;\n password: string;\n ssl: boolean;\n}\n\ninterface SecretsManagerCredentials {\n username: string;\n password: string;\n}\n\nlet cachedConfig: PostgresConnectionConfig | undefined;\nlet pool: Pool | undefined;\nlet bootstrapPromise: Promise<void> | undefined;\nlet secretsClient: SecretsManagerClient | undefined;\n\nfunction readEnv(name: RequiredEnvVar): string {\n const v = process.env[name]?.trim();\n if (!v) {\n throw new Error(\n `Missing required environment variable for postgres replication: ${name}`,\n );\n }\n return v;\n}\n\nasync function loadConfig(): Promise<PostgresConnectionConfig> {\n if (cachedConfig) {\n return cachedConfig;\n }\n const host = readEnv(\"OPENHI_PG_HOST\");\n const port = Number.parseInt(readEnv(\"OPENHI_PG_PORT\"), 10);\n const database = readEnv(\"OPENHI_PG_DATABASE\");\n const schema = readEnv(\"OPENHI_PG_SCHEMA\");\n\n // Test/integration setups can supply user/password directly via env to skip\n // the Secrets Manager round-trip. Production always goes through the secret.\n const directUser = process.env.OPENHI_PG_USER?.trim();\n const directPassword = process.env.OPENHI_PG_PASSWORD;\n if (directUser && directPassword !== undefined) {\n cachedConfig = {\n host,\n port,\n database,\n schema,\n user: directUser,\n password: directPassword,\n ssl: process.env.OPENHI_PG_SSL?.trim() === \"true\",\n };\n return cachedConfig;\n }\n\n const secretArn = process.env.OPENHI_PG_SECRET_ARN?.trim();\n if (!secretArn) {\n throw new Error(\n \"Either OPENHI_PG_SECRET_ARN or both OPENHI_PG_USER and OPENHI_PG_PASSWORD must be set\",\n );\n }\n if (!secretsClient) {\n secretsClient = new SecretsManagerClient({});\n }\n const out = await secretsClient.send(\n new GetSecretValueCommand({ SecretId: secretArn }),\n );\n if (!out.SecretString) {\n throw new Error(\n `Secret ${secretArn} returned no SecretString; binary secrets are not supported`,\n );\n }\n const parsed = JSON.parse(out.SecretString) as SecretsManagerCredentials;\n cachedConfig = {\n host,\n port,\n database,\n schema,\n user: parsed.username,\n password: parsed.password,\n ssl: true,\n };\n return cachedConfig;\n}\n\nasync function getPool(): Promise<Pool> {\n if (pool) {\n return pool;\n }\n const cfg = await loadConfig();\n pool = new Pool({\n host: cfg.host,\n port: cfg.port,\n database: cfg.database,\n user: cfg.user,\n password: cfg.password,\n ssl: cfg.ssl ? { rejectUnauthorized: false } : false,\n max: 2,\n idleTimeoutMillis: 60_000,\n connectionTimeoutMillis: 10_000,\n });\n return pool;\n}\n\nasync function ensureBootstrapped(): Promise<void> {\n if (!bootstrapPromise) {\n bootstrapPromise = (async () => {\n const cfg = await loadConfig();\n const p = await getPool();\n await ensureSchemaBootstrap(p, cfg.schema);\n })().catch((err) => {\n // Reset so a subsequent invocation retries the bootstrap on a fresh\n // container; otherwise a transient failure during cold start would\n // poison every record forever.\n bootstrapPromise = undefined;\n throw err;\n });\n }\n return bootstrapPromise;\n}\n\n/**\n * Reset module-scoped caches. Test-only helper; do not invoke from production\n * code paths. Exposed so unit and integration tests can run multiple\n * scenarios within a single Jest worker without leaking state across cases.\n */\nexport function __resetReplicationModuleStateForTests(): void {\n cachedConfig = undefined;\n pool = undefined;\n bootstrapPromise = undefined;\n secretsClient = undefined;\n}\n\nfunction decodeKinesisRecord(\n record: KinesisStreamRecord,\n): DynamoDbStreamKinesisRecord {\n const json = Buffer.from(record.kinesis.data, \"base64\").toString(\"utf8\");\n return JSON.parse(json) as DynamoDbStreamKinesisRecord;\n}\n\nfunction deriveLastUpdated(\n resource: unknown,\n approximateCreationEpochSec: number | undefined,\n): Date {\n if (resource && typeof resource === \"object\") {\n const meta = (resource as { meta?: { lastUpdated?: unknown } }).meta;\n const lu = meta?.lastUpdated;\n if (typeof lu === \"string\") {\n const parsed = new Date(lu);\n if (!Number.isNaN(parsed.getTime())) {\n return parsed;\n }\n }\n }\n if (\n typeof approximateCreationEpochSec === \"number\" &&\n Number.isFinite(approximateCreationEpochSec)\n ) {\n return new Date(approximateCreationEpochSec * 1000);\n }\n return new Date();\n}\n\ninterface UpsertParams {\n tenantId: string;\n workspaceId: string;\n resourceType: string;\n resourceId: string;\n version: string;\n lastUpdated: Date;\n resourceJson: string;\n}\n\ninterface SoftDeleteParams {\n tenantId: string;\n workspaceId: string;\n resourceType: string;\n resourceId: string;\n version: string;\n deletedAt: Date;\n}\n\ntype WriteIntent =\n | { kind: \"upsert\"; params: UpsertParams }\n | { kind: \"delete\"; params: SoftDeleteParams }\n | { kind: \"drop\" };\n\n/**\n * Translate a single Kinesis-wrapped DynamoDB stream record into a write\n * intent against the `resources` table. Returns `drop` for replica-side\n * global-table records, non-CURRENT items, and any record whose resource\n * payload cannot be decoded as JSON.\n */\nexport function buildWriteIntent(\n change: DynamoDbStreamKinesisRecord,\n awsRegion: string,\n): WriteIntent {\n if (shouldDropAsGlobalTableReplicationRecord(change, awsRegion)) {\n return { kind: \"drop\" };\n }\n const keys = parseCurrentResourceKeys(change);\n if (!keys) {\n return { kind: \"drop\" };\n }\n\n const isRemove = change.eventName === \"REMOVE\";\n const image = isRemove\n ? change.dynamodb?.OldImage\n : change.dynamodb?.NewImage;\n if (!image) {\n return { kind: \"drop\" };\n }\n\n const plain = dynamodbImageToPlain(image);\n const approxEpochSec = change.dynamodb?.ApproximateCreationDateTime;\n\n if (isRemove) {\n return {\n kind: \"delete\",\n params: {\n tenantId: keys.tenantId,\n workspaceId: keys.workspaceId,\n resourceType: keys.resourceType,\n resourceId: keys.resourceId,\n version: keys.version,\n deletedAt: deriveLastUpdated(undefined, approxEpochSec),\n },\n };\n }\n\n if (typeof plain.resource !== \"string\") {\n return { kind: \"drop\" };\n }\n let resourceObj: unknown;\n try {\n resourceObj = JSON.parse(plain.resource);\n } catch {\n return { kind: \"drop\" };\n }\n\n return {\n kind: \"upsert\",\n params: {\n tenantId: keys.tenantId,\n workspaceId: keys.workspaceId,\n resourceType: keys.resourceType,\n resourceId: keys.resourceId,\n version: keys.version,\n lastUpdated: deriveLastUpdated(resourceObj, approxEpochSec),\n resourceJson: plain.resource,\n },\n };\n}\n\ninterface PgQueryable {\n query: (\n text: string,\n values?: ReadonlyArray<unknown>,\n ) => Promise<unknown> | unknown;\n}\n\n/**\n * Execute a write intent against an open Postgres connection or pool. Pure\n * SQL dispatch — extracted from the handler so unit tests can drive it with a\n * mock client and integration tests can drive it with a real pool.\n */\nexport async function applyWriteIntent(\n client: PgQueryable,\n schemaName: string,\n intent: WriteIntent,\n): Promise<void> {\n if (intent.kind === \"drop\") {\n return;\n }\n if (intent.kind === \"upsert\") {\n const p = intent.params;\n await Promise.resolve(\n client.query(buildResourceUpsertSql(schemaName), [\n p.tenantId,\n p.workspaceId,\n p.resourceType,\n p.resourceId,\n p.version,\n p.lastUpdated,\n p.resourceJson,\n ]),\n );\n return;\n }\n const p = intent.params;\n await Promise.resolve(\n client.query(buildResourceSoftDeleteSql(schemaName), [\n p.tenantId,\n p.workspaceId,\n p.resourceType,\n p.resourceId,\n p.version,\n p.deletedAt,\n ]),\n );\n}\n\nexport async function handler(\n event: KinesisStreamEvent,\n): Promise<KinesisStreamBatchResponse> {\n const awsRegion = process.env.AWS_REGION ?? \"\";\n const batchItemFailures: Array<{ itemIdentifier: string }> = [];\n\n await ensureBootstrapped();\n const cfg = await loadConfig();\n const p = await getPool();\n\n for (const record of event.Records) {\n try {\n const change = decodeKinesisRecord(record);\n const intent = buildWriteIntent(change, awsRegion);\n await applyWriteIntent(p, cfg.schema, intent);\n } catch (err) {\n // eslint-disable-next-line no-console\n console.error(\n \"postgres replication record failed\",\n record.kinesis.sequenceNumber,\n err,\n );\n batchItemFailures.push({\n itemIdentifier: record.kinesis.sequenceNumber,\n });\n }\n }\n\n return { batchItemFailures };\n}\n"],"mappings":";;;;;;;;;;;;;;;AAAA;AAAA,EACE;AAAA,EACA;AAAA,OACK;AAMP,SAAS,YAAY;AAgDrB,IAAI;AACJ,IAAI;AACJ,IAAI;AACJ,IAAI;AAEJ,SAAS,QAAQ,MAA8B;AAC7C,QAAM,IAAI,QAAQ,IAAI,IAAI,GAAG,KAAK;AAClC,MAAI,CAAC,GAAG;AACN,UAAM,IAAI;AAAA,MACR,mEAAmE,IAAI;AAAA,IACzE;AAAA,EACF;AACA,SAAO;AACT;AAEA,eAAe,aAAgD;AAC7D,MAAI,cAAc;AAChB,WAAO;AAAA,EACT;AACA,QAAM,OAAO,QAAQ,gBAAgB;AACrC,QAAM,OAAO,OAAO,SAAS,QAAQ,gBAAgB,GAAG,EAAE;AAC1D,QAAM,WAAW,QAAQ,oBAAoB;AAC7C,QAAM,SAAS,QAAQ,kBAAkB;AAIzC,QAAM,aAAa,QAAQ,IAAI,gBAAgB,KAAK;AACpD,QAAM,iBAAiB,QAAQ,IAAI;AACnC,MAAI,cAAc,mBAAmB,QAAW;AAC9C,mBAAe;AAAA,MACb;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,MAAM;AAAA,MACN,UAAU;AAAA,MACV,KAAK,QAAQ,IAAI,eAAe,KAAK,MAAM;AAAA,IAC7C;AACA,WAAO;AAAA,EACT;AAEA,QAAM,YAAY,QAAQ,IAAI,sBAAsB,KAAK;AACzD,MAAI,CAAC,WAAW;AACd,UAAM,IAAI;AAAA,MACR;AAAA,IACF;AAAA,EACF;AACA,MAAI,CAAC,eAAe;AAClB,oBAAgB,IAAI,qBAAqB,CAAC,CAAC;AAAA,EAC7C;AACA,QAAM,MAAM,MAAM,cAAc;AAAA,IAC9B,IAAI,sBAAsB,EAAE,UAAU,UAAU,CAAC;AAAA,EACnD;AACA,MAAI,CAAC,IAAI,cAAc;AACrB,UAAM,IAAI;AAAA,MACR,UAAU,SAAS;AAAA,IACrB;AAAA,EACF;AACA,QAAM,SAAS,KAAK,MAAM,IAAI,YAAY;AAC1C,iBAAe;AAAA,IACb;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA,MAAM,OAAO;AAAA,IACb,UAAU,OAAO;AAAA,IACjB,KAAK;AAAA,EACP;AACA,SAAO;AACT;AAEA,eAAe,UAAyB;AACtC,MAAI,MAAM;AACR,WAAO;AAAA,EACT;AACA,QAAM,MAAM,MAAM,WAAW;AAC7B,SAAO,IAAI,KAAK;AAAA,IACd,MAAM,IAAI;AAAA,IACV,MAAM,IAAI;AAAA,IACV,UAAU,IAAI;AAAA,IACd,MAAM,IAAI;AAAA,IACV,UAAU,IAAI;AAAA,IACd,KAAK,IAAI,MAAM,EAAE,oBAAoB,MAAM,IAAI;AAAA,IAC/C,KAAK;AAAA,IACL,mBAAmB;AAAA,IACnB,yBAAyB;AAAA,EAC3B,CAAC;AACD,SAAO;AACT;AAEA,eAAe,qBAAoC;AACjD,MAAI,CAAC,kBAAkB;AACrB,wBAAoB,YAAY;AAC9B,YAAM,MAAM,MAAM,WAAW;AAC7B,YAAM,IAAI,MAAM,QAAQ;AACxB,YAAM,sBAAsB,GAAG,IAAI,MAAM;AAAA,IAC3C,GAAG,EAAE,MAAM,CAAC,QAAQ;AAIlB,yBAAmB;AACnB,YAAM;AAAA,IACR,CAAC;AAAA,EACH;AACA,SAAO;AACT;AAOO,SAAS,wCAA8C;AAC5D,iBAAe;AACf,SAAO;AACP,qBAAmB;AACnB,kBAAgB;AAClB;AAEA,SAAS,oBACP,QAC6B;AAC7B,QAAM,OAAO,OAAO,KAAK,OAAO,QAAQ,MAAM,QAAQ,EAAE,SAAS,MAAM;AACvE,SAAO,KAAK,MAAM,IAAI;AACxB;AAEA,SAAS,kBACP,UACA,6BACM;AACN,MAAI,YAAY,OAAO,aAAa,UAAU;AAC5C,UAAM,OAAQ,SAAkD;AAChE,UAAM,KAAK,MAAM;AACjB,QAAI,OAAO,OAAO,UAAU;AAC1B,YAAM,SAAS,IAAI,KAAK,EAAE;AAC1B,UAAI,CAAC,OAAO,MAAM,OAAO,QAAQ,CAAC,GAAG;AACnC,eAAO;AAAA,MACT;AAAA,IACF;AAAA,EACF;AACA,MACE,OAAO,gCAAgC,YACvC,OAAO,SAAS,2BAA2B,GAC3C;AACA,WAAO,IAAI,KAAK,8BAA8B,GAAI;AAAA,EACpD;AACA,SAAO,oBAAI,KAAK;AAClB;AAgCO,SAAS,iBACd,QACA,WACa;AACb,MAAI,yCAAyC,QAAQ,SAAS,GAAG;AAC/D,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AACA,QAAM,OAAO,yBAAyB,MAAM;AAC5C,MAAI,CAAC,MAAM;AACT,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AAEA,QAAM,WAAW,OAAO,cAAc;AACtC,QAAM,QAAQ,WACV,OAAO,UAAU,WACjB,OAAO,UAAU;AACrB,MAAI,CAAC,OAAO;AACV,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AAEA,QAAM,QAAQ,qBAAqB,KAAK;AACxC,QAAM,iBAAiB,OAAO,UAAU;AAExC,MAAI,UAAU;AACZ,WAAO;AAAA,MACL,MAAM;AAAA,MACN,QAAQ;AAAA,QACN,UAAU,KAAK;AAAA,QACf,aAAa,KAAK;AAAA,QAClB,cAAc,KAAK;AAAA,QACnB,YAAY,KAAK;AAAA,QACjB,SAAS,KAAK;AAAA,QACd,WAAW,kBAAkB,QAAW,cAAc;AAAA,MACxD;AAAA,IACF;AAAA,EACF;AAEA,MAAI,OAAO,MAAM,aAAa,UAAU;AACtC,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AACA,MAAI;AACJ,MAAI;AACF,kBAAc,KAAK,MAAM,MAAM,QAAQ;AAAA,EACzC,QAAQ;AACN,WAAO,EAAE,MAAM,OAAO;AAAA,EACxB;AAEA,SAAO;AAAA,IACL,MAAM;AAAA,IACN,QAAQ;AAAA,MACN,UAAU,KAAK;AAAA,MACf,aAAa,KAAK;AAAA,MAClB,cAAc,KAAK;AAAA,MACnB,YAAY,KAAK;AAAA,MACjB,SAAS,KAAK;AAAA,MACd,aAAa,kBAAkB,aAAa,cAAc;AAAA,MAC1D,cAAc,MAAM;AAAA,IACtB;AAAA,EACF;AACF;AAcA,eAAsB,iBACpB,QACA,YACA,QACe;AACf,MAAI,OAAO,SAAS,QAAQ;AAC1B;AAAA,EACF;AACA,MAAI,OAAO,SAAS,UAAU;AAC5B,UAAMA,KAAI,OAAO;AACjB,UAAM,QAAQ;AAAA,MACZ,OAAO,MAAM,uBAAuB,UAAU,GAAG;AAAA,QAC/CA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,QACFA,GAAE;AAAA,MACJ,CAAC;AAAA,IACH;AACA;AAAA,EACF;AACA,QAAM,IAAI,OAAO;AACjB,QAAM,QAAQ;AAAA,IACZ,OAAO,MAAM,2BAA2B,UAAU,GAAG;AAAA,MACnD,EAAE;AAAA,MACF,EAAE;AAAA,MACF,EAAE;AAAA,MACF,EAAE;AAAA,MACF,EAAE;AAAA,MACF,EAAE;AAAA,IACJ,CAAC;AAAA,EACH;AACF;AAEA,eAAsB,QACpB,OACqC;AACrC,QAAM,YAAY,QAAQ,IAAI,cAAc;AAC5C,QAAM,oBAAuD,CAAC;AAE9D,QAAM,mBAAmB;AACzB,QAAM,MAAM,MAAM,WAAW;AAC7B,QAAM,IAAI,MAAM,QAAQ;AAExB,aAAW,UAAU,MAAM,SAAS;AAClC,QAAI;AACF,YAAM,SAAS,oBAAoB,MAAM;AACzC,YAAM,SAAS,iBAAiB,QAAQ,SAAS;AACjD,YAAM,iBAAiB,GAAG,IAAI,QAAQ,MAAM;AAAA,IAC9C,SAAS,KAAK;AAEZ,cAAQ;AAAA,QACN;AAAA,QACA,OAAO,QAAQ;AAAA,QACf;AAAA,MACF;AACA,wBAAkB,KAAK;AAAA,QACrB,gBAAgB,OAAO,QAAQ;AAAA,MACjC,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO,EAAE,kBAAkB;AAC7B;","names":["p"]}
@@ -6446,6 +6446,46 @@ async function listAppointmentsOperation(params) {
6446
6446
 
6447
6447
  // src/data/postgres/data-api-postgres-query-runner.ts
6448
6448
  var import_client_rds_data = require("@aws-sdk/client-rds-data");
6449
+
6450
+ // src/components/postgres/data-store-postgres-schema.ts
6451
+ var SCHEMA_NAME_PATTERN = /^[a-z_][a-z0-9_]{0,62}$/;
6452
+ function quoteSchemaIdentifier(schemaName) {
6453
+ if (!SCHEMA_NAME_PATTERN.test(schemaName)) {
6454
+ throw new Error(
6455
+ `Invalid Postgres schema name: ${JSON.stringify(schemaName)}; expected /[a-z_][a-z0-9_]{0,62}/`
6456
+ );
6457
+ }
6458
+ return `"${schemaName}"`;
6459
+ }
6460
+ function buildSchemaBootstrapStatements(schemaName) {
6461
+ const s = quoteSchemaIdentifier(schemaName);
6462
+ return [
6463
+ `CREATE SCHEMA IF NOT EXISTS ${s};`,
6464
+ [
6465
+ `CREATE TABLE IF NOT EXISTS ${s}.resources (`,
6466
+ ` tenant_id text NOT NULL,`,
6467
+ ` workspace_id text NOT NULL,`,
6468
+ ` resource_type text NOT NULL,`,
6469
+ ` resource_id text NOT NULL,`,
6470
+ ` version text NOT NULL,`,
6471
+ ` last_updated timestamptz NOT NULL,`,
6472
+ ` deleted_at timestamptz,`,
6473
+ ` resource jsonb NOT NULL,`,
6474
+ ` PRIMARY KEY (tenant_id, workspace_id, resource_type, resource_id)`,
6475
+ `);`
6476
+ ].join("\n"),
6477
+ [
6478
+ `CREATE INDEX IF NOT EXISTS resources_jsonb_gin`,
6479
+ ` ON ${s}.resources USING gin (resource);`
6480
+ ].join("\n"),
6481
+ [
6482
+ `CREATE INDEX IF NOT EXISTS resources_listing`,
6483
+ ` ON ${s}.resources (tenant_id, workspace_id, resource_type, last_updated);`
6484
+ ].join("\n")
6485
+ ];
6486
+ }
6487
+
6488
+ // src/data/postgres/data-api-postgres-query-runner.ts
6449
6489
  var DataApiPostgresQueryRunner = class {
6450
6490
  constructor(options) {
6451
6491
  this.client = options.client ?? new import_client_rds_data.RDSDataClient({});
@@ -6455,6 +6495,7 @@ var DataApiPostgresQueryRunner = class {
6455
6495
  this.schema = options.schema;
6456
6496
  }
6457
6497
  async query(sql, params) {
6498
+ await this.ensureSchemaBootstrapped();
6458
6499
  const out = await this.client.send(
6459
6500
  new import_client_rds_data.ExecuteStatementCommand({
6460
6501
  resourceArn: this.clusterArn,
@@ -6480,6 +6521,42 @@ var DataApiPostgresQueryRunner = class {
6480
6521
  }
6481
6522
  return JSON.parse(out.formattedRecords);
6482
6523
  }
6524
+ /**
6525
+ * Run `CREATE SCHEMA / CREATE TABLE / CREATE INDEX` idempotent DDL once per
6526
+ * runner instance. The replication Lambda's cold start does the same thing
6527
+ * (data-store-postgres-replication.handler) but in a fresh environment with
6528
+ * no DynamoDB writes yet the read path can be the first thing that touches
6529
+ * the cluster — without this, every search returns
6530
+ * `relation "<schema>.resources" does not exist` until something has been
6531
+ * written. Memoized so subsequent queries on a warm container don't pay the
6532
+ * DDL cost; resets the promise on failure so the next call retries.
6533
+ *
6534
+ * Data API's `ExecuteStatement` accepts only a single SQL statement per
6535
+ * call, so each DDL statement from `buildSchemaBootstrapStatements` is
6536
+ * issued individually. `IF NOT EXISTS` on every statement makes this safe
6537
+ * to race across concurrent containers.
6538
+ */
6539
+ async ensureSchemaBootstrapped() {
6540
+ if (!this.bootstrapPromise) {
6541
+ this.bootstrapPromise = this.runBootstrap().catch((err) => {
6542
+ this.bootstrapPromise = void 0;
6543
+ throw err;
6544
+ });
6545
+ }
6546
+ return this.bootstrapPromise;
6547
+ }
6548
+ async runBootstrap() {
6549
+ for (const statement of buildSchemaBootstrapStatements(this.schema)) {
6550
+ await this.client.send(
6551
+ new import_client_rds_data.ExecuteStatementCommand({
6552
+ resourceArn: this.clusterArn,
6553
+ secretArn: this.secretArn,
6554
+ database: this.database,
6555
+ sql: statement
6556
+ })
6557
+ );
6558
+ }
6559
+ }
6483
6560
  };
6484
6561
  function qualifyResourcesTable(sql, schema) {
6485
6562
  return sql.replace(