@openhi/constructs 0.0.85 → 0.0.86

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,295 +1,10 @@
1
1
  import {
2
- DATA_STORE_CHANGE_DETAIL_MAX_UTF8_BYTES,
3
- DATA_STORE_CHANGE_DETAIL_TYPE,
4
- DATA_STORE_CHANGE_EVENT_SOURCE,
5
- buildFhirCurrentResourceChangeDetail,
6
- dynamodbImageToPlain
7
- } from "./chunk-SWSN6GDD.mjs";
2
+ handler,
3
+ parseCurrentResourceKeys,
4
+ shouldDropAsGlobalTableReplicationRecord
5
+ } from "./chunk-X5MHU7DA.mjs";
6
+ import "./chunk-CEOAGPYY.mjs";
8
7
  import "./chunk-LZOMFHX3.mjs";
9
-
10
- // src/components/dynamodb/firehose-archive-transform.handler.ts
11
- import { randomUUID } from "crypto";
12
- import {
13
- EventBridgeClient,
14
- PutEventsCommand
15
- } from "@aws-sdk/client-eventbridge";
16
- import { PutObjectCommand, S3Client } from "@aws-sdk/client-s3";
17
- var CURRENT_SK = "CURRENT";
18
- var PK_PATTERN = /^TID#(?<tenantId>[^#]+)#WID#(?<workspaceId>[^#]+)#RT#(?<resourceType>[^#]+)#ID#(?<resourceId>.+)$/;
19
- var AWS_REP_UPDATE_REGION = "aws:rep:updateregion";
20
- function getDynamoDbStringAttr(image, name) {
21
- if (!image) {
22
- return void 0;
23
- }
24
- const av = image[name];
25
- if (typeof av?.S === "string" && av.S.trim() !== "") {
26
- return av.S.trim();
27
- }
28
- return void 0;
29
- }
30
- function primaryImageForReplicationCheck(record) {
31
- if (record.eventName === "REMOVE") {
32
- return record.dynamodb?.OldImage;
33
- }
34
- return record.dynamodb?.NewImage;
35
- }
36
- function shouldDropAsGlobalTableReplicationRecord(record, archiveLambdaRegion) {
37
- const image = primaryImageForReplicationCheck(record);
38
- const updateRegion = getDynamoDbStringAttr(image, AWS_REP_UPDATE_REGION);
39
- if (updateRegion && archiveLambdaRegion && updateRegion !== archiveLambdaRegion) {
40
- return true;
41
- }
42
- return isDynamoDbReplicationUserIdentity(record.userIdentity);
43
- }
44
- function isDynamoDbReplicationUserIdentity(userIdentity) {
45
- if (!userIdentity || typeof userIdentity !== "object") {
46
- return false;
47
- }
48
- const ui = userIdentity;
49
- const principalRaw = ui.principalId ?? ui.PrincipalId;
50
- const typeRaw = ui.type ?? ui.Type;
51
- const principal = typeof principalRaw === "string" ? principalRaw.toLowerCase() : "";
52
- const type = typeof typeRaw === "string" ? typeRaw.toLowerCase() : "";
53
- if (type === "service" && principal === "dynamodb.amazonaws.com") {
54
- return false;
55
- }
56
- const replicationMarkers = [
57
- "awsservicerolefordynamodbreplication",
58
- "replication.dynamodb.amazonaws.com"
59
- ];
60
- return replicationMarkers.some((m) => principal.includes(m));
61
- }
62
- function parseCurrentResourceKeys(record) {
63
- const keys = record.dynamodb?.Keys;
64
- if (!keys) {
65
- return null;
66
- }
67
- const pkAttr = keys.PK?.S;
68
- const skAttr = keys.SK?.S;
69
- if (!pkAttr || skAttr !== CURRENT_SK) {
70
- return null;
71
- }
72
- const m = PK_PATTERN.exec(pkAttr);
73
- if (!m?.groups) {
74
- return null;
75
- }
76
- const { tenantId, workspaceId, resourceType, resourceId } = m.groups;
77
- const image = record.eventName === "REMOVE" ? record.dynamodb?.OldImage : record.dynamodb?.NewImage;
78
- if (!image) {
79
- return null;
80
- }
81
- const plain = dynamodbImageToPlain(image);
82
- const version = typeof plain.vid === "string" ? plain.vid : null;
83
- if (!version) {
84
- return null;
85
- }
86
- return { tenantId, workspaceId, resourceType, resourceId, version };
87
- }
88
- function partitionToken(value) {
89
- if (!value || value.trim() === "") {
90
- return "-";
91
- }
92
- return value.replace(/[/\\]/g, "_");
93
- }
94
- function buildArchivePayload(record, keys) {
95
- const newImage = record.dynamodb?.NewImage;
96
- const oldImage = record.dynamodb?.OldImage;
97
- const resourceImage = record.eventName === "REMOVE" ? oldImage : newImage;
98
- const resourcePlain = resourceImage ? dynamodbImageToPlain(resourceImage) : {};
99
- if (typeof resourcePlain.resource === "string") {
100
- try {
101
- resourcePlain.resource = JSON.parse(resourcePlain.resource);
102
- } catch {
103
- }
104
- }
105
- return {
106
- eventName: record.eventName,
107
- archivedAt: (/* @__PURE__ */ new Date()).toISOString(),
108
- tenantId: keys.tenantId,
109
- workspaceId: keys.workspaceId,
110
- resourceType: keys.resourceType,
111
- resourceId: keys.resourceId,
112
- version: keys.version,
113
- resource: resourcePlain
114
- };
115
- }
116
- var PUT_EVENTS_BATCH_SIZE = 10;
117
- var MAX_PUT_EVENTS_ROUNDS = 3;
118
- var eventBridgeClient;
119
- function getEventBridgeClient() {
120
- const bus = process.env.DATA_EVENT_BUS_NAME?.trim();
121
- if (!bus) {
122
- return void 0;
123
- }
124
- if (!eventBridgeClient) {
125
- eventBridgeClient = new EventBridgeClient({});
126
- }
127
- return eventBridgeClient;
128
- }
129
- var s3ClientForDlq;
130
- function getS3ClientForDlq() {
131
- const bucket = process.env.DATA_STORE_PUT_EVENTS_DLQ_BUCKET?.trim();
132
- if (!bucket) {
133
- return void 0;
134
- }
135
- if (!s3ClientForDlq) {
136
- s3ClientForDlq = new S3Client({});
137
- }
138
- return s3ClientForDlq;
139
- }
140
- async function writePutEventsFailuresToDlq(payload) {
141
- const bucket = process.env.DATA_STORE_PUT_EVENTS_DLQ_BUCKET?.trim();
142
- const client = getS3ClientForDlq();
143
- if (!bucket || !client) {
144
- throw new Error(
145
- `PutEvents exhausted retries but DATA_STORE_PUT_EVENTS_DLQ_BUCKET is not set (${payload.reason})`
146
- );
147
- }
148
- const day = payload.failedAt.slice(0, 10);
149
- const key = `put-events-failed/${day}/${randomUUID()}.json`;
150
- await client.send(
151
- new PutObjectCommand({
152
- Bucket: bucket,
153
- Key: key,
154
- Body: JSON.stringify(payload),
155
- ContentType: "application/json"
156
- })
157
- );
158
- }
159
- async function putEventsChunkWithRetriesAndDlq(client, entries) {
160
- if (entries.length === 0) {
161
- return;
162
- }
163
- let pending = [...entries];
164
- for (let round = 1; round <= MAX_PUT_EVENTS_ROUNDS; round++) {
165
- try {
166
- const out = await client.send(new PutEventsCommand({ Entries: pending }));
167
- const failed = out.FailedEntryCount ?? 0;
168
- if (failed === 0) {
169
- return;
170
- }
171
- const nextPending = [];
172
- out.Entries?.forEach((e, i) => {
173
- if (e?.ErrorCode && pending[i]) {
174
- nextPending.push(pending[i]);
175
- }
176
- });
177
- pending = nextPending;
178
- if (pending.length === 0) {
179
- return;
180
- }
181
- if (round === MAX_PUT_EVENTS_ROUNDS) {
182
- await writePutEventsFailuresToDlq({
183
- dlqSchemaVersion: 1,
184
- failedAt: (/* @__PURE__ */ new Date()).toISOString(),
185
- reason: "put_events_partial_failure",
186
- attemptRounds: MAX_PUT_EVENTS_ROUNDS,
187
- entries: pending,
188
- putEventsResultEntries: out.Entries
189
- });
190
- return;
191
- }
192
- } catch (sdkErr) {
193
- const sdkMessage = sdkErr instanceof Error ? sdkErr.message : String(sdkErr);
194
- if (round === MAX_PUT_EVENTS_ROUNDS) {
195
- await writePutEventsFailuresToDlq({
196
- dlqSchemaVersion: 1,
197
- failedAt: (/* @__PURE__ */ new Date()).toISOString(),
198
- reason: "put_events_sdk_error",
199
- attemptRounds: MAX_PUT_EVENTS_ROUNDS,
200
- entries: pending,
201
- sdkError: sdkMessage
202
- });
203
- return;
204
- }
205
- await new Promise((r) => setTimeout(r, 50 * round));
206
- }
207
- }
208
- }
209
- async function publishDataStoreChangeEvents(pending) {
210
- const client = getEventBridgeClient();
211
- const busName = process.env.DATA_EVENT_BUS_NAME?.trim();
212
- if (!client || !busName || pending.length === 0) {
213
- return;
214
- }
215
- const entries = [];
216
- for (const { change, keys } of pending) {
217
- const detailObj = buildFhirCurrentResourceChangeDetail(change, keys);
218
- const detail = JSON.stringify(detailObj);
219
- const detailBytes = Buffer.byteLength(detail, "utf8");
220
- if (detailBytes > DATA_STORE_CHANGE_DETAIL_MAX_UTF8_BYTES) {
221
- throw new Error(
222
- `Event detail is ${detailBytes} bytes (max ${DATA_STORE_CHANGE_DETAIL_MAX_UTF8_BYTES}); oversize strategy deferred per ADR 2026-03-02-01 (${keys.resourceType}/${keys.resourceId}).`
223
- );
224
- }
225
- entries.push({
226
- Source: DATA_STORE_CHANGE_EVENT_SOURCE,
227
- DetailType: DATA_STORE_CHANGE_DETAIL_TYPE,
228
- Detail: detail,
229
- EventBusName: busName
230
- });
231
- }
232
- for (let i = 0; i < entries.length; i += PUT_EVENTS_BATCH_SIZE) {
233
- const chunk = entries.slice(i, i + PUT_EVENTS_BATCH_SIZE);
234
- await putEventsChunkWithRetriesAndDlq(client, chunk);
235
- }
236
- }
237
- async function handler(event) {
238
- const records = [];
239
- const archiveLambdaRegion = process.env.AWS_REGION ?? "";
240
- const pendingPublish = [];
241
- for (const rec of event.records) {
242
- try {
243
- const payload = Buffer.from(rec.data, "base64").toString("utf8");
244
- const change = JSON.parse(payload);
245
- if (shouldDropAsGlobalTableReplicationRecord(change, archiveLambdaRegion)) {
246
- records.push({
247
- recordId: rec.recordId,
248
- result: "Dropped",
249
- data: rec.data
250
- });
251
- continue;
252
- }
253
- const keys = parseCurrentResourceKeys(change);
254
- if (!keys) {
255
- records.push({
256
- recordId: rec.recordId,
257
- result: "Dropped",
258
- data: rec.data
259
- });
260
- continue;
261
- }
262
- const archive = buildArchivePayload(change, keys);
263
- const out = Buffer.from(`${JSON.stringify(archive)}
264
- `).toString(
265
- "base64"
266
- );
267
- pendingPublish.push({ change, keys });
268
- records.push({
269
- recordId: rec.recordId,
270
- result: "Ok",
271
- data: out,
272
- metadata: {
273
- partitionKeys: {
274
- tenantId: partitionToken(keys.tenantId),
275
- workspaceId: partitionToken(keys.workspaceId),
276
- resourceType: partitionToken(keys.resourceType),
277
- resourceId: partitionToken(keys.resourceId),
278
- version: partitionToken(keys.version)
279
- }
280
- }
281
- });
282
- } catch {
283
- records.push({
284
- recordId: rec.recordId,
285
- result: "ProcessingFailed",
286
- data: rec.data
287
- });
288
- }
289
- }
290
- await publishDataStoreChangeEvents(pendingPublish);
291
- return { records };
292
- }
293
8
  export {
294
9
  handler,
295
10
  parseCurrentResourceKeys,
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/components/dynamodb/firehose-archive-transform.handler.ts"],"sourcesContent":["import { randomUUID } from \"node:crypto\";\nimport type { AttributeValue } from \"@aws-sdk/client-dynamodb\";\nimport {\n EventBridgeClient,\n PutEventsCommand,\n type PutEventsRequestEntry,\n type PutEventsResultEntry,\n} from \"@aws-sdk/client-eventbridge\";\nimport { PutObjectCommand, S3Client } from \"@aws-sdk/client-s3\";\nimport type {\n FirehoseTransformationEvent,\n FirehoseTransformationResult,\n FirehoseTransformationResultRecord,\n} from \"aws-lambda\";\nimport {\n DATA_STORE_CHANGE_DETAIL_MAX_UTF8_BYTES,\n DATA_STORE_CHANGE_DETAIL_TYPE,\n DATA_STORE_CHANGE_EVENT_SOURCE,\n buildFhirCurrentResourceChangeDetail,\n} from \"./data-store-change-events\";\nimport {\n type DynamoDbStreamKinesisRecord,\n dynamodbImageToPlain,\n} from \"./dynamodb-stream-record\";\n\nexport type { DynamoDbStreamKinesisRecord } from \"./dynamodb-stream-record\";\n\n/**\n * Firehose data-transformation handler: filters DynamoDB change records to\n * current FHIR resource items (SK = CURRENT, TID#…#WID#…#RT#…#ID#… PK),\n * writes archive JSON to S3 via Firehose, sets dynamic partition keys per\n * ADR 2026-03-11-02, and publishes de-identified change notifications to the\n * data event bus via PutEvents per ADR 2026-03-02-01, with retries and an S3\n * dead-letter bucket for entries that still fail.\n */\n\nconst CURRENT_SK = \"CURRENT\";\nconst PK_PATTERN =\n /^TID#(?<tenantId>[^#]+)#WID#(?<workspaceId>[^#]+)#RT#(?<resourceType>[^#]+)#ID#(?<resourceId>.+)$/;\n\n/** DynamoDB-managed attribute on global table items (see AWS Global Tables legacy / replication docs). */\nconst AWS_REP_UPDATE_REGION = \"aws:rep:updateregion\";\n\nfunction getDynamoDbStringAttr(\n image: Record<string, AttributeValue> | undefined,\n name: string,\n): string | undefined {\n if (!image) {\n return undefined;\n }\n const av = image[name];\n if (typeof av?.S === \"string\" && av.S.trim() !== \"\") {\n return av.S.trim();\n }\n return undefined;\n}\n\nfunction primaryImageForReplicationCheck(\n record: DynamoDbStreamKinesisRecord,\n): Record<string, AttributeValue> | undefined {\n if (record.eventName === \"REMOVE\") {\n return record.dynamodb?.OldImage;\n }\n return record.dynamodb?.NewImage;\n}\n\n/**\n * Returns true when this stream/Kinesis record should not be archived because it\n * represents a **replica-side application** of a global-table change (the logical\n * write originated in another Region).\n *\n * - If `aws:rep:updateregion` is present on the item image and differs from\n * `archiveLambdaRegion`, the change was replicated into this Region (archive\n * only in the Region that matches `aws:rep:updateregion`).\n * - Otherwise, if `userIdentity` matches the DynamoDB replication service SLR,\n * treat as replication. **Excluded:** TTL deletes (`type` Service and\n * `principalId` `dynamodb.amazonaws.com`) per AWS stream Identity docs.\n *\n * For MREC global tables version 2019.11.21, AWS documents that stream records\n * may not carry distinguishable metadata; the recommended approach is a custom\n * “write region” attribute on items. If neither that attribute nor\n * `aws:rep:updateregion` nor replication `userIdentity` applies, this function\n * returns false (no drop)—duplicate archives are possible if identical pipelines\n * run in every Region without those signals.\n */\nexport function shouldDropAsGlobalTableReplicationRecord(\n record: DynamoDbStreamKinesisRecord,\n archiveLambdaRegion: string,\n): boolean {\n const image = primaryImageForReplicationCheck(record);\n const updateRegion = getDynamoDbStringAttr(image, AWS_REP_UPDATE_REGION);\n if (\n updateRegion &&\n archiveLambdaRegion &&\n updateRegion !== archiveLambdaRegion\n ) {\n return true;\n }\n\n return isDynamoDbReplicationUserIdentity(record.userIdentity);\n}\n\nfunction isDynamoDbReplicationUserIdentity(userIdentity: unknown): boolean {\n if (!userIdentity || typeof userIdentity !== \"object\") {\n return false;\n }\n const ui = userIdentity as Record<string, unknown>;\n const principalRaw = ui.principalId ?? ui.PrincipalId;\n const typeRaw = ui.type ?? ui.Type;\n const principal =\n typeof principalRaw === \"string\" ? principalRaw.toLowerCase() : \"\";\n const type = typeof typeRaw === \"string\" ? typeRaw.toLowerCase() : \"\";\n\n if (type === \"service\" && principal === \"dynamodb.amazonaws.com\") {\n return false;\n }\n\n const replicationMarkers = [\n \"awsservicerolefordynamodbreplication\",\n \"replication.dynamodb.amazonaws.com\",\n ];\n return replicationMarkers.some((m) => principal.includes(m));\n}\n\nexport function parseCurrentResourceKeys(record: DynamoDbStreamKinesisRecord): {\n tenantId: string;\n workspaceId: string;\n resourceType: string;\n resourceId: string;\n version: string;\n} | null {\n const keys = record.dynamodb?.Keys;\n if (!keys) {\n return null;\n }\n const pkAttr = keys.PK?.S;\n const skAttr = keys.SK?.S;\n if (!pkAttr || skAttr !== CURRENT_SK) {\n return null;\n }\n const m = PK_PATTERN.exec(pkAttr);\n if (!m?.groups) {\n return null;\n }\n const { tenantId, workspaceId, resourceType, resourceId } = m.groups;\n const image =\n record.eventName === \"REMOVE\"\n ? record.dynamodb?.OldImage\n : record.dynamodb?.NewImage;\n if (!image) {\n return null;\n }\n const plain = dynamodbImageToPlain(image as Record<string, AttributeValue>);\n const version = typeof plain.vid === \"string\" ? plain.vid : null;\n if (!version) {\n return null;\n }\n return { tenantId, workspaceId, resourceType, resourceId, version };\n}\n\nfunction partitionToken(value: string): string {\n if (!value || value.trim() === \"\") {\n return \"-\";\n }\n return value.replace(/[/\\\\]/g, \"_\");\n}\n\nfunction buildArchivePayload(\n record: DynamoDbStreamKinesisRecord,\n keys: ReturnType<typeof parseCurrentResourceKeys>,\n): Record<string, unknown> {\n const newImage = record.dynamodb?.NewImage;\n const oldImage = record.dynamodb?.OldImage;\n const resourceImage = record.eventName === \"REMOVE\" ? oldImage : newImage;\n const resourcePlain = resourceImage\n ? dynamodbImageToPlain(resourceImage as Record<string, AttributeValue>)\n : {};\n\n if (typeof resourcePlain.resource === \"string\") {\n try {\n resourcePlain.resource = JSON.parse(resourcePlain.resource) as unknown;\n } catch {\n /* keep raw string if not valid JSON */\n }\n }\n\n return {\n eventName: record.eventName,\n archivedAt: new Date().toISOString(),\n tenantId: keys!.tenantId,\n workspaceId: keys!.workspaceId,\n resourceType: keys!.resourceType,\n resourceId: keys!.resourceId,\n version: keys!.version,\n resource: resourcePlain,\n };\n}\n\nconst PUT_EVENTS_BATCH_SIZE = 10;\n\n/** Full PutEvents rounds per chunk (initial attempt + failure-driven retries). */\nconst MAX_PUT_EVENTS_ROUNDS = 3;\n\nlet eventBridgeClient: EventBridgeClient | undefined;\n\nfunction getEventBridgeClient(): EventBridgeClient | undefined {\n const bus = process.env.DATA_EVENT_BUS_NAME?.trim();\n if (!bus) {\n return undefined;\n }\n if (!eventBridgeClient) {\n eventBridgeClient = new EventBridgeClient({});\n }\n return eventBridgeClient;\n}\n\nlet s3ClientForDlq: S3Client | undefined;\n\nfunction getS3ClientForDlq(): S3Client | undefined {\n const bucket = process.env.DATA_STORE_PUT_EVENTS_DLQ_BUCKET?.trim();\n if (!bucket) {\n return undefined;\n }\n if (!s3ClientForDlq) {\n s3ClientForDlq = new S3Client({});\n }\n return s3ClientForDlq;\n}\n\ntype PutEventsEntry = PutEventsRequestEntry;\n\ninterface PutEventsDlqPayload {\n dlqSchemaVersion: 1;\n failedAt: string;\n reason: \"put_events_partial_failure\" | \"put_events_sdk_error\";\n attemptRounds: number;\n entries: PutEventsEntry[];\n putEventsResultEntries?: PutEventsResultEntry[];\n sdkError?: string;\n}\n\nasync function writePutEventsFailuresToDlq(\n payload: PutEventsDlqPayload,\n): Promise<void> {\n const bucket = process.env.DATA_STORE_PUT_EVENTS_DLQ_BUCKET?.trim();\n const client = getS3ClientForDlq();\n if (!bucket || !client) {\n throw new Error(\n `PutEvents exhausted retries but DATA_STORE_PUT_EVENTS_DLQ_BUCKET is not set (${payload.reason})`,\n );\n }\n const day = payload.failedAt.slice(0, 10);\n const key = `put-events-failed/${day}/${randomUUID()}.json`;\n await client.send(\n new PutObjectCommand({\n Bucket: bucket,\n Key: key,\n Body: JSON.stringify(payload),\n ContentType: \"application/json\",\n }),\n );\n}\n\n/**\n * Sends one PutEvents batch (≤10 entries) with up to {@link MAX_PUT_EVENTS_ROUNDS}\n * rounds. After the last round, remaining failures or a final SDK error are\n * written to the DLQ S3 bucket (if configured); DLQ write failure throws.\n */\nasync function putEventsChunkWithRetriesAndDlq(\n client: EventBridgeClient,\n entries: PutEventsEntry[],\n): Promise<void> {\n if (entries.length === 0) {\n return;\n }\n\n let pending = [...entries];\n\n for (let round = 1; round <= MAX_PUT_EVENTS_ROUNDS; round++) {\n try {\n const out = await client.send(new PutEventsCommand({ Entries: pending }));\n const failed = out.FailedEntryCount ?? 0;\n if (failed === 0) {\n return;\n }\n\n const nextPending: PutEventsEntry[] = [];\n out.Entries?.forEach((e: PutEventsResultEntry | undefined, i: number) => {\n if (e?.ErrorCode && pending[i]) {\n nextPending.push(pending[i]!);\n }\n });\n pending = nextPending;\n\n if (pending.length === 0) {\n return;\n }\n\n if (round === MAX_PUT_EVENTS_ROUNDS) {\n await writePutEventsFailuresToDlq({\n dlqSchemaVersion: 1,\n failedAt: new Date().toISOString(),\n reason: \"put_events_partial_failure\",\n attemptRounds: MAX_PUT_EVENTS_ROUNDS,\n entries: pending,\n putEventsResultEntries: out.Entries,\n });\n return;\n }\n } catch (sdkErr) {\n const sdkMessage =\n sdkErr instanceof Error ? sdkErr.message : String(sdkErr);\n if (round === MAX_PUT_EVENTS_ROUNDS) {\n await writePutEventsFailuresToDlq({\n dlqSchemaVersion: 1,\n failedAt: new Date().toISOString(),\n reason: \"put_events_sdk_error\",\n attemptRounds: MAX_PUT_EVENTS_ROUNDS,\n entries: pending,\n sdkError: sdkMessage,\n });\n return;\n }\n await new Promise((r) => setTimeout(r, 50 * round));\n }\n }\n}\n\nasync function publishDataStoreChangeEvents(\n pending: Array<{\n change: DynamoDbStreamKinesisRecord;\n keys: NonNullable<ReturnType<typeof parseCurrentResourceKeys>>;\n }>,\n): Promise<void> {\n const client = getEventBridgeClient();\n const busName = process.env.DATA_EVENT_BUS_NAME?.trim();\n if (!client || !busName || pending.length === 0) {\n return;\n }\n\n const entries: PutEventsEntry[] = [];\n for (const { change, keys } of pending) {\n const detailObj = buildFhirCurrentResourceChangeDetail(change, keys);\n const detail = JSON.stringify(detailObj);\n const detailBytes = Buffer.byteLength(detail, \"utf8\");\n if (detailBytes > DATA_STORE_CHANGE_DETAIL_MAX_UTF8_BYTES) {\n throw new Error(\n `Event detail is ${detailBytes} bytes (max ${DATA_STORE_CHANGE_DETAIL_MAX_UTF8_BYTES}); ` +\n `oversize strategy deferred per ADR 2026-03-02-01 (${keys.resourceType}/${keys.resourceId}).`,\n );\n }\n entries.push({\n Source: DATA_STORE_CHANGE_EVENT_SOURCE,\n DetailType: DATA_STORE_CHANGE_DETAIL_TYPE,\n Detail: detail,\n EventBusName: busName,\n });\n }\n\n for (let i = 0; i < entries.length; i += PUT_EVENTS_BATCH_SIZE) {\n const chunk = entries.slice(i, i + PUT_EVENTS_BATCH_SIZE);\n await putEventsChunkWithRetriesAndDlq(client, chunk);\n }\n}\n\nexport async function handler(\n event: FirehoseTransformationEvent,\n): Promise<FirehoseTransformationResult> {\n const records: FirehoseTransformationResultRecord[] = [];\n const archiveLambdaRegion = process.env.AWS_REGION ?? \"\";\n const pendingPublish: Array<{\n change: DynamoDbStreamKinesisRecord;\n keys: NonNullable<ReturnType<typeof parseCurrentResourceKeys>>;\n }> = [];\n\n for (const rec of event.records) {\n try {\n const payload = Buffer.from(rec.data, \"base64\").toString(\"utf8\");\n const change = JSON.parse(payload) as DynamoDbStreamKinesisRecord;\n\n if (\n shouldDropAsGlobalTableReplicationRecord(change, archiveLambdaRegion)\n ) {\n records.push({\n recordId: rec.recordId,\n result: \"Dropped\",\n data: rec.data,\n });\n continue;\n }\n\n const keys = parseCurrentResourceKeys(change);\n\n if (!keys) {\n records.push({\n recordId: rec.recordId,\n result: \"Dropped\",\n data: rec.data,\n });\n continue;\n }\n\n const archive = buildArchivePayload(change, keys);\n const out = Buffer.from(`${JSON.stringify(archive)}\\n`).toString(\n \"base64\",\n );\n\n pendingPublish.push({ change, keys });\n\n records.push({\n recordId: rec.recordId,\n result: \"Ok\",\n data: out,\n metadata: {\n partitionKeys: {\n tenantId: partitionToken(keys.tenantId),\n workspaceId: partitionToken(keys.workspaceId),\n resourceType: partitionToken(keys.resourceType),\n resourceId: partitionToken(keys.resourceId),\n version: partitionToken(keys.version),\n },\n },\n });\n } catch {\n records.push({\n recordId: rec.recordId,\n result: \"ProcessingFailed\",\n data: rec.data,\n });\n }\n }\n\n await publishDataStoreChangeEvents(pendingPublish);\n\n return { records };\n}\n"],"mappings":";;;;;;;;;;AAAA,SAAS,kBAAkB;AAE3B;AAAA,EACE;AAAA,EACA;AAAA,OAGK;AACP,SAAS,kBAAkB,gBAAgB;AA4B3C,IAAM,aAAa;AACnB,IAAM,aACJ;AAGF,IAAM,wBAAwB;AAE9B,SAAS,sBACP,OACA,MACoB;AACpB,MAAI,CAAC,OAAO;AACV,WAAO;AAAA,EACT;AACA,QAAM,KAAK,MAAM,IAAI;AACrB,MAAI,OAAO,IAAI,MAAM,YAAY,GAAG,EAAE,KAAK,MAAM,IAAI;AACnD,WAAO,GAAG,EAAE,KAAK;AAAA,EACnB;AACA,SAAO;AACT;AAEA,SAAS,gCACP,QAC4C;AAC5C,MAAI,OAAO,cAAc,UAAU;AACjC,WAAO,OAAO,UAAU;AAAA,EAC1B;AACA,SAAO,OAAO,UAAU;AAC1B;AAqBO,SAAS,yCACd,QACA,qBACS;AACT,QAAM,QAAQ,gCAAgC,MAAM;AACpD,QAAM,eAAe,sBAAsB,OAAO,qBAAqB;AACvE,MACE,gBACA,uBACA,iBAAiB,qBACjB;AACA,WAAO;AAAA,EACT;AAEA,SAAO,kCAAkC,OAAO,YAAY;AAC9D;AAEA,SAAS,kCAAkC,cAAgC;AACzE,MAAI,CAAC,gBAAgB,OAAO,iBAAiB,UAAU;AACrD,WAAO;AAAA,EACT;AACA,QAAM,KAAK;AACX,QAAM,eAAe,GAAG,eAAe,GAAG;AAC1C,QAAM,UAAU,GAAG,QAAQ,GAAG;AAC9B,QAAM,YACJ,OAAO,iBAAiB,WAAW,aAAa,YAAY,IAAI;AAClE,QAAM,OAAO,OAAO,YAAY,WAAW,QAAQ,YAAY,IAAI;AAEnE,MAAI,SAAS,aAAa,cAAc,0BAA0B;AAChE,WAAO;AAAA,EACT;AAEA,QAAM,qBAAqB;AAAA,IACzB;AAAA,IACA;AAAA,EACF;AACA,SAAO,mBAAmB,KAAK,CAAC,MAAM,UAAU,SAAS,CAAC,CAAC;AAC7D;AAEO,SAAS,yBAAyB,QAMhC;AACP,QAAM,OAAO,OAAO,UAAU;AAC9B,MAAI,CAAC,MAAM;AACT,WAAO;AAAA,EACT;AACA,QAAM,SAAS,KAAK,IAAI;AACxB,QAAM,SAAS,KAAK,IAAI;AACxB,MAAI,CAAC,UAAU,WAAW,YAAY;AACpC,WAAO;AAAA,EACT;AACA,QAAM,IAAI,WAAW,KAAK,MAAM;AAChC,MAAI,CAAC,GAAG,QAAQ;AACd,WAAO;AAAA,EACT;AACA,QAAM,EAAE,UAAU,aAAa,cAAc,WAAW,IAAI,EAAE;AAC9D,QAAM,QACJ,OAAO,cAAc,WACjB,OAAO,UAAU,WACjB,OAAO,UAAU;AACvB,MAAI,CAAC,OAAO;AACV,WAAO;AAAA,EACT;AACA,QAAM,QAAQ,qBAAqB,KAAuC;AAC1E,QAAM,UAAU,OAAO,MAAM,QAAQ,WAAW,MAAM,MAAM;AAC5D,MAAI,CAAC,SAAS;AACZ,WAAO;AAAA,EACT;AACA,SAAO,EAAE,UAAU,aAAa,cAAc,YAAY,QAAQ;AACpE;AAEA,SAAS,eAAe,OAAuB;AAC7C,MAAI,CAAC,SAAS,MAAM,KAAK,MAAM,IAAI;AACjC,WAAO;AAAA,EACT;AACA,SAAO,MAAM,QAAQ,UAAU,GAAG;AACpC;AAEA,SAAS,oBACP,QACA,MACyB;AACzB,QAAM,WAAW,OAAO,UAAU;AAClC,QAAM,WAAW,OAAO,UAAU;AAClC,QAAM,gBAAgB,OAAO,cAAc,WAAW,WAAW;AACjE,QAAM,gBAAgB,gBAClB,qBAAqB,aAA+C,IACpE,CAAC;AAEL,MAAI,OAAO,cAAc,aAAa,UAAU;AAC9C,QAAI;AACF,oBAAc,WAAW,KAAK,MAAM,cAAc,QAAQ;AAAA,IAC5D,QAAQ;AAAA,IAER;AAAA,EACF;AAEA,SAAO;AAAA,IACL,WAAW,OAAO;AAAA,IAClB,aAAY,oBAAI,KAAK,GAAE,YAAY;AAAA,IACnC,UAAU,KAAM;AAAA,IAChB,aAAa,KAAM;AAAA,IACnB,cAAc,KAAM;AAAA,IACpB,YAAY,KAAM;AAAA,IAClB,SAAS,KAAM;AAAA,IACf,UAAU;AAAA,EACZ;AACF;AAEA,IAAM,wBAAwB;AAG9B,IAAM,wBAAwB;AAE9B,IAAI;AAEJ,SAAS,uBAAsD;AAC7D,QAAM,MAAM,QAAQ,IAAI,qBAAqB,KAAK;AAClD,MAAI,CAAC,KAAK;AACR,WAAO;AAAA,EACT;AACA,MAAI,CAAC,mBAAmB;AACtB,wBAAoB,IAAI,kBAAkB,CAAC,CAAC;AAAA,EAC9C;AACA,SAAO;AACT;AAEA,IAAI;AAEJ,SAAS,oBAA0C;AACjD,QAAM,SAAS,QAAQ,IAAI,kCAAkC,KAAK;AAClE,MAAI,CAAC,QAAQ;AACX,WAAO;AAAA,EACT;AACA,MAAI,CAAC,gBAAgB;AACnB,qBAAiB,IAAI,SAAS,CAAC,CAAC;AAAA,EAClC;AACA,SAAO;AACT;AAcA,eAAe,4BACb,SACe;AACf,QAAM,SAAS,QAAQ,IAAI,kCAAkC,KAAK;AAClE,QAAM,SAAS,kBAAkB;AACjC,MAAI,CAAC,UAAU,CAAC,QAAQ;AACtB,UAAM,IAAI;AAAA,MACR,gFAAgF,QAAQ,MAAM;AAAA,IAChG;AAAA,EACF;AACA,QAAM,MAAM,QAAQ,SAAS,MAAM,GAAG,EAAE;AACxC,QAAM,MAAM,qBAAqB,GAAG,IAAI,WAAW,CAAC;AACpD,QAAM,OAAO;AAAA,IACX,IAAI,iBAAiB;AAAA,MACnB,QAAQ;AAAA,MACR,KAAK;AAAA,MACL,MAAM,KAAK,UAAU,OAAO;AAAA,MAC5B,aAAa;AAAA,IACf,CAAC;AAAA,EACH;AACF;AAOA,eAAe,gCACb,QACA,SACe;AACf,MAAI,QAAQ,WAAW,GAAG;AACxB;AAAA,EACF;AAEA,MAAI,UAAU,CAAC,GAAG,OAAO;AAEzB,WAAS,QAAQ,GAAG,SAAS,uBAAuB,SAAS;AAC3D,QAAI;AACF,YAAM,MAAM,MAAM,OAAO,KAAK,IAAI,iBAAiB,EAAE,SAAS,QAAQ,CAAC,CAAC;AACxE,YAAM,SAAS,IAAI,oBAAoB;AACvC,UAAI,WAAW,GAAG;AAChB;AAAA,MACF;AAEA,YAAM,cAAgC,CAAC;AACvC,UAAI,SAAS,QAAQ,CAAC,GAAqC,MAAc;AACvE,YAAI,GAAG,aAAa,QAAQ,CAAC,GAAG;AAC9B,sBAAY,KAAK,QAAQ,CAAC,CAAE;AAAA,QAC9B;AAAA,MACF,CAAC;AACD,gBAAU;AAEV,UAAI,QAAQ,WAAW,GAAG;AACxB;AAAA,MACF;AAEA,UAAI,UAAU,uBAAuB;AACnC,cAAM,4BAA4B;AAAA,UAChC,kBAAkB;AAAA,UAClB,WAAU,oBAAI,KAAK,GAAE,YAAY;AAAA,UACjC,QAAQ;AAAA,UACR,eAAe;AAAA,UACf,SAAS;AAAA,UACT,wBAAwB,IAAI;AAAA,QAC9B,CAAC;AACD;AAAA,MACF;AAAA,IACF,SAAS,QAAQ;AACf,YAAM,aACJ,kBAAkB,QAAQ,OAAO,UAAU,OAAO,MAAM;AAC1D,UAAI,UAAU,uBAAuB;AACnC,cAAM,4BAA4B;AAAA,UAChC,kBAAkB;AAAA,UAClB,WAAU,oBAAI,KAAK,GAAE,YAAY;AAAA,UACjC,QAAQ;AAAA,UACR,eAAe;AAAA,UACf,SAAS;AAAA,UACT,UAAU;AAAA,QACZ,CAAC;AACD;AAAA,MACF;AACA,YAAM,IAAI,QAAQ,CAAC,MAAM,WAAW,GAAG,KAAK,KAAK,CAAC;AAAA,IACpD;AAAA,EACF;AACF;AAEA,eAAe,6BACb,SAIe;AACf,QAAM,SAAS,qBAAqB;AACpC,QAAM,UAAU,QAAQ,IAAI,qBAAqB,KAAK;AACtD,MAAI,CAAC,UAAU,CAAC,WAAW,QAAQ,WAAW,GAAG;AAC/C;AAAA,EACF;AAEA,QAAM,UAA4B,CAAC;AACnC,aAAW,EAAE,QAAQ,KAAK,KAAK,SAAS;AACtC,UAAM,YAAY,qCAAqC,QAAQ,IAAI;AACnE,UAAM,SAAS,KAAK,UAAU,SAAS;AACvC,UAAM,cAAc,OAAO,WAAW,QAAQ,MAAM;AACpD,QAAI,cAAc,yCAAyC;AACzD,YAAM,IAAI;AAAA,QACR,mBAAmB,WAAW,eAAe,uCAAuC,wDAC7B,KAAK,YAAY,IAAI,KAAK,UAAU;AAAA,MAC7F;AAAA,IACF;AACA,YAAQ,KAAK;AAAA,MACX,QAAQ;AAAA,MACR,YAAY;AAAA,MACZ,QAAQ;AAAA,MACR,cAAc;AAAA,IAChB,CAAC;AAAA,EACH;AAEA,WAAS,IAAI,GAAG,IAAI,QAAQ,QAAQ,KAAK,uBAAuB;AAC9D,UAAM,QAAQ,QAAQ,MAAM,GAAG,IAAI,qBAAqB;AACxD,UAAM,gCAAgC,QAAQ,KAAK;AAAA,EACrD;AACF;AAEA,eAAsB,QACpB,OACuC;AACvC,QAAM,UAAgD,CAAC;AACvD,QAAM,sBAAsB,QAAQ,IAAI,cAAc;AACtD,QAAM,iBAGD,CAAC;AAEN,aAAW,OAAO,MAAM,SAAS;AAC/B,QAAI;AACF,YAAM,UAAU,OAAO,KAAK,IAAI,MAAM,QAAQ,EAAE,SAAS,MAAM;AAC/D,YAAM,SAAS,KAAK,MAAM,OAAO;AAEjC,UACE,yCAAyC,QAAQ,mBAAmB,GACpE;AACA,gBAAQ,KAAK;AAAA,UACX,UAAU,IAAI;AAAA,UACd,QAAQ;AAAA,UACR,MAAM,IAAI;AAAA,QACZ,CAAC;AACD;AAAA,MACF;AAEA,YAAM,OAAO,yBAAyB,MAAM;AAE5C,UAAI,CAAC,MAAM;AACT,gBAAQ,KAAK;AAAA,UACX,UAAU,IAAI;AAAA,UACd,QAAQ;AAAA,UACR,MAAM,IAAI;AAAA,QACZ,CAAC;AACD;AAAA,MACF;AAEA,YAAM,UAAU,oBAAoB,QAAQ,IAAI;AAChD,YAAM,MAAM,OAAO,KAAK,GAAG,KAAK,UAAU,OAAO,CAAC;AAAA,CAAI,EAAE;AAAA,QACtD;AAAA,MACF;AAEA,qBAAe,KAAK,EAAE,QAAQ,KAAK,CAAC;AAEpC,cAAQ,KAAK;AAAA,QACX,UAAU,IAAI;AAAA,QACd,QAAQ;AAAA,QACR,MAAM;AAAA,QACN,UAAU;AAAA,UACR,eAAe;AAAA,YACb,UAAU,eAAe,KAAK,QAAQ;AAAA,YACtC,aAAa,eAAe,KAAK,WAAW;AAAA,YAC5C,cAAc,eAAe,KAAK,YAAY;AAAA,YAC9C,YAAY,eAAe,KAAK,UAAU;AAAA,YAC1C,SAAS,eAAe,KAAK,OAAO;AAAA,UACtC;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH,QAAQ;AACN,cAAQ,KAAK;AAAA,QACX,UAAU,IAAI;AAAA,QACd,QAAQ;AAAA,QACR,MAAM,IAAI;AAAA,MACZ,CAAC;AAAA,IACH;AAAA,EACF;AAEA,QAAM,6BAA6B,cAAc;AAEjD,SAAO,EAAE,QAAQ;AACnB;","names":[]}
1
+ {"version":3,"sources":[],"sourcesContent":[],"mappings":"","names":[]}
package/lib/index.d.mts CHANGED
@@ -4,7 +4,7 @@ import { IConstruct, Construct } from 'constructs';
4
4
  import { Certificate, CertificateProps, ICertificate } from 'aws-cdk-lib/aws-certificatemanager';
5
5
  import { HttpApiProps, HttpApi, IHttpApi, DomainName } from 'aws-cdk-lib/aws-apigatewayv2';
6
6
  import { GraphqlApi, IGraphqlApi, GraphqlApiProps } from 'aws-cdk-lib/aws-appsync';
7
- import { UserPool, UserPoolProps, UserPoolClient, UserPoolClientProps, UserPoolDomain, UserPoolDomainProps, IUserPool, IUserPoolClient, IUserPoolDomain } from 'aws-cdk-lib/aws-cognito';
7
+ import { UserPoolClient, UserPoolClientProps, IUserPool, UserPool, UserPoolProps, UserPoolDomain, UserPoolDomainProps, IUserPoolClient, IUserPoolDomain } from 'aws-cdk-lib/aws-cognito';
8
8
  import { Key, KeyProps, IKey } from 'aws-cdk-lib/aws-kms';
9
9
  import { NodejsFunction } from 'aws-cdk-lib/aws-lambda-nodejs';
10
10
  import { D as DynamoDbStreamKinesisRecord } from './dynamodb-stream-record-CJtV6a1t.mjs';
@@ -15,6 +15,8 @@ import * as kinesisfirehose from 'aws-cdk-lib/aws-kinesisfirehose';
15
15
  import * as s3 from 'aws-cdk-lib/aws-s3';
16
16
  import { IBucket, BucketProps } from 'aws-cdk-lib/aws-s3';
17
17
  import { Table, TableProps, ITable } from 'aws-cdk-lib/aws-dynamodb';
18
+ import * as ec2 from 'aws-cdk-lib/aws-ec2';
19
+ import * as rds from 'aws-cdk-lib/aws-rds';
18
20
  import { HostedZone, HostedZoneProps, IHostedZone, HostedZoneAttributes } from 'aws-cdk-lib/aws-route53';
19
21
  import { StringParameterProps, StringParameter } from 'aws-cdk-lib/aws-ssm';
20
22
  import { Distribution, DistributionProps } from 'aws-cdk-lib/aws-cloudfront';
@@ -387,6 +389,47 @@ declare class RootGraphqlApi extends GraphqlApi {
387
389
  constructor(scope: Construct, props?: Omit<RootGraphqlApiProps, "name">);
388
390
  }
389
391
 
392
+ interface CognitoFixtureSeederClientProps extends Partial<Omit<UserPoolClientProps, "userPool" | "generateSecret">> {
393
+ readonly userPool: IUserPool;
394
+ }
395
+ /**
396
+ * Dedicated Cognito app client for the OpenHI fixture-seeder CLI
397
+ * (`@openhi/seed-fixtures`).
398
+ *
399
+ * Why a dedicated client (vs reusing the SPA client):
400
+ * - Tightly scoped: only the seeder consumes tokens issued here, so an
401
+ * audit trail of seeder activity is cleanly separable.
402
+ * - Decoupled from the SPA client's OAuth flows — no risk of breaking
403
+ * web-app sign-in by tweaking auth-flow settings here.
404
+ * - Stage-conditional creation upstream (only provisioned in non-prod
405
+ * environments) means prod stacks never carry a code path that could
406
+ * issue a fixture-seeder token in the first place.
407
+ *
408
+ * Why USER_PASSWORD_AUTH (vs M2M client-credentials):
409
+ * - Cognito's M2M tier has a per-app-client monthly fee plus per-token
410
+ * activity charges. For sporadic non-prod fixture runs the per-client
411
+ * fee dominates the bill, especially if every dev branch spins up
412
+ * its own auth stack.
413
+ * - USER_PASSWORD_AUTH against a service `fixture-seeder` user keeps
414
+ * the cost in MAU territory (free under the 50K MAU tier).
415
+ * - Tradeoff: passwords need rotation and the service user must be
416
+ * provisioned per non-prod environment (manual or scripted post-deploy).
417
+ *
418
+ * No client secret (`generateSecret: false`): USER_PASSWORD_AUTH
419
+ * authenticates with the password directly; a secret would just add
420
+ * another credential to manage without strengthening anything.
421
+ */
422
+ declare class CognitoFixtureSeederClient extends UserPoolClient {
423
+ /**
424
+ * SSM parameter name suffix used to publish this client's ID for
425
+ * cross-stack lookups. Built into a full parameter name via
426
+ * `buildParameterName` with `serviceType` AUTH (since the auth stack
427
+ * owns this resource).
428
+ */
429
+ static readonly SSM_PARAM_NAME = "COGNITO_FIXTURE_SEEDER_CLIENT";
430
+ constructor(scope: Construct, props: CognitoFixtureSeederClientProps);
431
+ }
432
+
390
433
  /**
391
434
  * @see sites/www-docs/content/packages/@openhi/constructs/components/cognito/cognito-user-pool.md
392
435
  */
@@ -524,13 +567,25 @@ interface DynamoDbDataStoreProps extends Omit<TableProps, "tableName" | "removal
524
567
  readonly removalPolicy?: RemovalPolicy;
525
568
  }
526
569
  /**
527
- * DynamoDB table implementing the single-table design for app data (e.g. FHIR
528
- * resources and configuration).
570
+ * DynamoDB table implementing the single-table design for app data (FHIR
571
+ * resources data plane and platform control plane), per planning ADR-011 and
572
+ * DR-004.
529
573
  *
530
574
  * @see {@link https://github.com/codedrifters/openhi/blob/main/sites/www-docs/content/architecture/dynamodb-single-table-design.md | DynamoDB Single-Table Design}
531
575
  *
532
576
  * Primary key: PK (String), SK (String).
533
- * GSIs: GSI1 (reverse reference), GSI2 (identifier lookup), GSI3 (facility ops), GSI4 (resource type list).
577
+ *
578
+ * GSIs:
579
+ * - **GSI1 — Unified Sharded List** (`GSI1PK`/`GSI1SK`, INCLUDE projection per
580
+ * DR-004). Primary list/lookup index for both data-plane FHIR resources and
581
+ * control-plane entities (User, Tenant, Workspace, Membership, Role,
582
+ * RoleAssignment, Configuration). PK shape:
583
+ * `TID#<tid>#WID#<wid>#RT#<Type>#SHARD#<n>` with 4 shards
584
+ * (`n = hash(id) mod 4`). SK shape per `extractSortKey`: labeled types use
585
+ * `<normalizedLabel>#<id>`; unlabeled use `<ISO-8601 lastUpdated>#<id>`.
586
+ * - **GSI2 — Sub-Lookup** (`GSI2PK`/`GSI2SK`, INCLUDE projection). Resolves
587
+ * `UserEntity` from a Cognito `sub` for the Pre Token Generation Lambda.
588
+ * PK shape: `USER#SUB#<cognitoSub>`. SK shape: `CURRENT`.
534
589
  *
535
590
  * For historical archive to S3, pass `kinesisStream` and `stream` (e.g.
536
591
  * `StreamViewType.NEW_AND_OLD_IMAGES`) on the table props per ADR 2026-03-11-02.
@@ -569,6 +624,111 @@ declare class OpsEventBus extends EventBus {
569
624
  constructor(scope: Construct, props?: EventBusProps);
570
625
  }
571
626
 
627
+ /**
628
+ * SSM parameter names that publish the Postgres replica's coordinates so other
629
+ * stacks (notably the REST API stack) can discover them without a direct CDK
630
+ * cross-stack reference. The schema name is intentionally NOT published — it
631
+ * is a deterministic function of `branchHash` and consumers compute it locally
632
+ * via {@link getPostgresReplicaSchemaName}.
633
+ */
634
+ declare const POSTGRES_REPLICA_CLUSTER_ARN_SSM_NAME = "POSTGRES_REPLICA_CLUSTER_ARN";
635
+ declare const POSTGRES_REPLICA_SECRET_ARN_SSM_NAME = "POSTGRES_REPLICA_SECRET_ARN";
636
+ declare const POSTGRES_REPLICA_DATABASE_NAME_SSM_NAME = "POSTGRES_REPLICA_DATABASE_NAME";
637
+ /**
638
+ * Derive the per-branch Postgres schema name from a branch hash. The `b_`
639
+ * prefix guarantees a leading letter (Postgres identifier rule). Branch hashes
640
+ * are 6 hex chars from {@link OpenHiService.branchHash} so the resulting
641
+ * `b_xxxxxx` is well within the 63-byte identifier limit.
642
+ */
643
+ declare function getPostgresReplicaSchemaName(branchHash: string): string;
644
+ interface DataStorePostgresReplicaProps {
645
+ /**
646
+ * Kinesis stream that receives DynamoDB item-level changes (the same stream
647
+ * that backs {@link DataStoreHistoricalArchive}). The replication Lambda is
648
+ * registered as a parallel consumer.
649
+ */
650
+ readonly kinesisStream: kinesis.IStream;
651
+ /**
652
+ * Removal policy for the cluster, secret, and dependent resources.
653
+ */
654
+ readonly removalPolicy: RemovalPolicy;
655
+ /**
656
+ * Short hash unique to the stack — used in the cluster identifier.
657
+ */
658
+ readonly stackHash: string;
659
+ /**
660
+ * Short hash unique to the branch — used to derive the per-branch schema
661
+ * name (`b_<branchHash>`) inside the Postgres database.
662
+ */
663
+ readonly branchHash: string;
664
+ /**
665
+ * Optional VPC override. If absent, the construct creates a minimal isolated
666
+ * VPC (2 AZs, no NAT gateways) just for the cluster and replication Lambda.
667
+ */
668
+ readonly vpc?: ec2.IVpc;
669
+ /**
670
+ * Optional database name override.
671
+ * @default "openhi"
672
+ */
673
+ readonly databaseName?: string;
674
+ /**
675
+ * Aurora Serverless v2 minimum capacity in ACUs. Defaults to 1 so the
676
+ * writer stays warm — avoids the ~10–20s scale-up wait that a cold
677
+ * (0 ACU) cluster imposes on the next request. Set explicitly to 0 to
678
+ * opt back into scale-to-zero if idle cost becomes the dominant concern.
679
+ */
680
+ readonly minCapacity?: number;
681
+ /**
682
+ * Aurora Serverless v2 maximum capacity in ACUs. Defaults to 2 — adequate
683
+ * for the PoC's replication-only workload.
684
+ */
685
+ readonly maxCapacity?: number;
686
+ }
687
+ /**
688
+ * DynamoDB change stream → Postgres replication tier (ADR 2026-04-17-01,
689
+ * phase 1). Provisions an Aurora Serverless v2 PostgreSQL cluster and a
690
+ * Lambda consumer on the existing change-stream that projects each current
691
+ * FHIR resource into a JSONB `resources` table under a per-branch schema.
692
+ *
693
+ * Phase 1 is replication-only; query routing and SearchParameter-specific
694
+ * indexes are intentionally deferred. Per-branch *clusters* (rather than the
695
+ * shared cluster suggested by the ADR) are an explicit PoC simplification —
696
+ * see the ADR's "Operational notes" section for the long-term direction.
697
+ *
698
+ * @see sites/www-docs/content/architecture/adr/2026-04-17-01-ad-hoc-query-support-fhir-api.md
699
+ */
700
+ declare class DataStorePostgresReplica extends Construct {
701
+ /**
702
+ * Resolve the cluster ARN published by an upstream {@link DataStorePostgresReplica}.
703
+ * Use from any stack that needs to grant `rds-data:ExecuteStatement` against
704
+ * the cluster.
705
+ */
706
+ static clusterArnFromConstruct(scope: Construct): string;
707
+ /**
708
+ * Resolve the credentials secret ARN published by an upstream
709
+ * {@link DataStorePostgresReplica}. Use from any stack that needs to grant
710
+ * `secretsmanager:GetSecretValue` against the secret.
711
+ */
712
+ static secretArnFromConstruct(scope: Construct): string;
713
+ /**
714
+ * Resolve the database name published by an upstream
715
+ * {@link DataStorePostgresReplica}.
716
+ */
717
+ static databaseNameFromConstruct(scope: Construct): string;
718
+ readonly vpc: ec2.IVpc;
719
+ readonly cluster: rds.DatabaseCluster;
720
+ readonly replicationFunction: NodejsFunction;
721
+ readonly databaseName: string;
722
+ readonly schemaName: string;
723
+ constructor(scope: Construct, id: string, props: DataStorePostgresReplicaProps);
724
+ /**
725
+ * Publishes the cluster ARN, secret ARN, and database name as discoverable
726
+ * SSM parameters so the REST API stack (and any future read-side consumer)
727
+ * can wire RDS Data API access without a direct CDK cross-stack reference.
728
+ */
729
+ private publishCoordinatesToSsm;
730
+ }
731
+
572
732
  /**
573
733
  * @see sites/www-docs/content/packages/@openhi/constructs/components/route-53/child-hosted-zone.md
574
734
  */
@@ -747,6 +907,17 @@ declare class OpenHiAuthService extends OpenHiService {
747
907
  * Returns an IUserPoolClient by looking up the Auth stack's User Pool Client ID from SSM.
748
908
  */
749
909
  static userPoolClientFromConstruct(scope: Construct): IUserPoolClient;
910
+ /**
911
+ * Returns the dedicated fixture-seeder IUserPoolClient by looking up
912
+ * its ID from SSM. Only non-prod auth stacks publish this parameter
913
+ * (per the conditional in {@link createFixtureSeederClient}); calling
914
+ * this against a prod-deployed stack will fail at lookup time.
915
+ *
916
+ * Consumed by `OpenHiRestApiService` (in non-prod) so the authorizer
917
+ * accepts tokens issued by this client, and by the seed-fixtures CLI
918
+ * to drive USER_PASSWORD_AUTH against this client's ID.
919
+ */
920
+ static fixtureSeederClientFromConstruct(scope: Construct): IUserPoolClient;
750
921
  /**
751
922
  * Returns an IUserPoolDomain by looking up the Auth stack's User Pool Domain from SSM.
752
923
  */
@@ -763,6 +934,12 @@ declare class OpenHiAuthService extends OpenHiService {
763
934
  readonly userPool: IUserPool;
764
935
  readonly userPoolClient: IUserPoolClient;
765
936
  readonly userPoolDomain: IUserPoolDomain;
937
+ /**
938
+ * Dedicated USER_PASSWORD_AUTH client for the seed-fixtures CLI.
939
+ * Only created in non-prod environments (see
940
+ * {@link createFixtureSeederClient}). `undefined` in prod.
941
+ */
942
+ readonly fixtureSeederClient?: IUserPoolClient;
766
943
  constructor(ohEnv: OpenHiEnvironment, props?: OpenHiAuthServiceProps);
767
944
  /**
768
945
  * Creates the KMS key for the Cognito User Pool and exports its ARN to SSM.
@@ -787,6 +964,18 @@ declare class OpenHiAuthService extends OpenHiService {
787
964
  * Override to customize.
788
965
  */
789
966
  protected createUserPoolClient(): IUserPoolClient;
967
+ /**
968
+ * Creates the dedicated USER_PASSWORD_AUTH app client for the
969
+ * `@openhi/seed-fixtures` CLI, **only** in non-prod environments.
970
+ * Returns `undefined` when this stack is being deployed to a prod
971
+ * stage so the prod auth stack carries no fixture-seeder code path.
972
+ *
973
+ * Operator post-deploy: create a `fixture-seeder` Cognito user with
974
+ * a service password (manually via console or scripted with
975
+ * `aws cognito-idp admin-create-user`); the CLI consumes those creds
976
+ * via env vars to drive `InitiateAuth`.
977
+ */
978
+ protected createFixtureSeederClient(): IUserPoolClient | undefined;
790
979
  /**
791
980
  * Creates the User Pool Domain (Cognito hosted UI) and exports domain name to SSM.
792
981
  * Look up via {@link OpenHiAuthService.userPoolDomainFromConstruct}.
@@ -983,6 +1172,13 @@ declare class OpenHiDataService extends OpenHiService {
983
1172
  * notifications for current FHIR resources (ADRs 2026-03-11-02, 2026-03-02-01).
984
1173
  */
985
1174
  readonly dataStoreHistoricalArchive: DataStoreHistoricalArchive;
1175
+ /**
1176
+ * Postgres replication tier (ADR 2026-04-17-01, phase 1). A second consumer
1177
+ * on the change stream that projects current FHIR resources into a JSONB
1178
+ * `resources` table on Aurora Serverless v2. Phase 1 is replication-only;
1179
+ * the read path is not wired up yet.
1180
+ */
1181
+ readonly dataStorePostgresReplica: DataStorePostgresReplica;
986
1182
  constructor(ohEnv: OpenHiEnvironment, props?: OpenHiDataServiceProps);
987
1183
  /**
988
1184
  * Creates the data event bus.
@@ -1023,4 +1219,4 @@ declare class OpenHiGraphqlService extends OpenHiService {
1023
1219
  protected createRootGraphqlApi(): RootGraphqlApi;
1024
1220
  }
1025
1221
 
1026
- export { type BuildParameterNameProps, ChildHostedZone, type ChildHostedZoneProps, CognitoUserPool, CognitoUserPoolClient, CognitoUserPoolDomain, CognitoUserPoolKmsKey, DATA_STORE_CHANGE_DETAIL_MAX_UTF8_BYTES, DATA_STORE_CHANGE_DETAIL_TYPE, DATA_STORE_CHANGE_EVENT_SOURCE, DataEventBus, DataStoreHistoricalArchive, type DataStoreHistoricalArchiveProps, DiscoverableStringParameter, type DiscoverableStringParameterProps, DynamoDbDataStore, type DynamoDbDataStoreProps, type FhirCurrentResourceChangeDetail, OpenHiApp, type OpenHiAppProps, OpenHiAuthService, type OpenHiAuthServiceProps, OpenHiDataService, type OpenHiDataServiceProps, OpenHiEnvironment, type OpenHiEnvironmentProps, OpenHiGlobalService, type OpenHiGlobalServiceProps, OpenHiGraphqlService, type OpenHiGraphqlServiceProps, OpenHiRestApiService, type OpenHiRestApiServiceProps, OpenHiService, type OpenHiServiceProps, type OpenHiServiceType, OpenHiStage, type OpenHiStageProps, OpsEventBus, PreTokenGenerationLambda, REST_API_BASE_URL_SSM_NAME, RootGraphqlApi, type RootGraphqlApiProps, RootHostedZone, RootHttpApi, type RootHttpApiProps, RootWildcardCertificate, STATIC_HOSTING_SERVICE_TYPE, StaticHosting, type StaticHostingProps, buildFhirCurrentResourceChangeDetail, getDynamoDbDataStoreTableName };
1222
+ export { type BuildParameterNameProps, ChildHostedZone, type ChildHostedZoneProps, CognitoFixtureSeederClient, type CognitoFixtureSeederClientProps, CognitoUserPool, CognitoUserPoolClient, CognitoUserPoolDomain, CognitoUserPoolKmsKey, DATA_STORE_CHANGE_DETAIL_MAX_UTF8_BYTES, DATA_STORE_CHANGE_DETAIL_TYPE, DATA_STORE_CHANGE_EVENT_SOURCE, DataEventBus, DataStoreHistoricalArchive, type DataStoreHistoricalArchiveProps, DataStorePostgresReplica, type DataStorePostgresReplicaProps, DiscoverableStringParameter, type DiscoverableStringParameterProps, DynamoDbDataStore, type DynamoDbDataStoreProps, type FhirCurrentResourceChangeDetail, OpenHiApp, type OpenHiAppProps, OpenHiAuthService, type OpenHiAuthServiceProps, OpenHiDataService, type OpenHiDataServiceProps, OpenHiEnvironment, type OpenHiEnvironmentProps, OpenHiGlobalService, type OpenHiGlobalServiceProps, OpenHiGraphqlService, type OpenHiGraphqlServiceProps, OpenHiRestApiService, type OpenHiRestApiServiceProps, OpenHiService, type OpenHiServiceProps, type OpenHiServiceType, OpenHiStage, type OpenHiStageProps, OpsEventBus, POSTGRES_REPLICA_CLUSTER_ARN_SSM_NAME, POSTGRES_REPLICA_DATABASE_NAME_SSM_NAME, POSTGRES_REPLICA_SECRET_ARN_SSM_NAME, PreTokenGenerationLambda, REST_API_BASE_URL_SSM_NAME, RootGraphqlApi, type RootGraphqlApiProps, RootHostedZone, RootHttpApi, type RootHttpApiProps, RootWildcardCertificate, STATIC_HOSTING_SERVICE_TYPE, StaticHosting, type StaticHostingProps, buildFhirCurrentResourceChangeDetail, getDynamoDbDataStoreTableName, getPostgresReplicaSchemaName };