@openhi/constructs 0.0.69 → 0.0.71
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/firehose-archive-transform.handler.d.mts +42 -0
- package/lib/firehose-archive-transform.handler.d.ts +42 -0
- package/lib/firehose-archive-transform.handler.js +222 -0
- package/lib/firehose-archive-transform.handler.js.map +1 -0
- package/lib/firehose-archive-transform.handler.mjs +197 -0
- package/lib/firehose-archive-transform.handler.mjs.map +1 -0
- package/lib/index.d.mts +41 -2
- package/lib/index.d.ts +42 -3
- package/lib/index.js +138 -35
- package/lib/index.js.map +1 -1
- package/lib/index.mjs +146 -44
- package/lib/index.mjs.map +1 -1
- package/package.json +3 -3
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import { AttributeValue } from '@aws-sdk/client-dynamodb';
|
|
2
|
+
import { FirehoseTransformationEvent, FirehoseTransformationResult } from 'aws-lambda';
|
|
3
|
+
|
|
4
|
+
interface DynamoDbStreamKinesisRecord {
|
|
5
|
+
eventName?: string;
|
|
6
|
+
userIdentity?: unknown;
|
|
7
|
+
dynamodb?: {
|
|
8
|
+
Keys?: Record<string, AttributeValue>;
|
|
9
|
+
NewImage?: Record<string, AttributeValue>;
|
|
10
|
+
OldImage?: Record<string, AttributeValue>;
|
|
11
|
+
};
|
|
12
|
+
}
|
|
13
|
+
/**
|
|
14
|
+
* Returns true when this stream/Kinesis record should not be archived because it
|
|
15
|
+
* represents a **replica-side application** of a global-table change (the logical
|
|
16
|
+
* write originated in another Region).
|
|
17
|
+
*
|
|
18
|
+
* - If `aws:rep:updateregion` is present on the item image and differs from
|
|
19
|
+
* `archiveLambdaRegion`, the change was replicated into this Region (archive
|
|
20
|
+
* only in the Region that matches `aws:rep:updateregion`).
|
|
21
|
+
* - Otherwise, if `userIdentity` matches the DynamoDB replication service SLR,
|
|
22
|
+
* treat as replication. **Excluded:** TTL deletes (`type` Service and
|
|
23
|
+
* `principalId` `dynamodb.amazonaws.com`) per AWS stream Identity docs.
|
|
24
|
+
*
|
|
25
|
+
* For MREC global tables version 2019.11.21, AWS documents that stream records
|
|
26
|
+
* may not carry distinguishable metadata; the recommended approach is a custom
|
|
27
|
+
* “write region” attribute on items. If neither that attribute nor
|
|
28
|
+
* `aws:rep:updateregion` nor replication `userIdentity` applies, this function
|
|
29
|
+
* returns false (no drop)—duplicate archives are possible if identical pipelines
|
|
30
|
+
* run in every Region without those signals.
|
|
31
|
+
*/
|
|
32
|
+
declare function shouldDropAsGlobalTableReplicationRecord(record: DynamoDbStreamKinesisRecord, archiveLambdaRegion: string): boolean;
|
|
33
|
+
declare function parseCurrentResourceKeys(record: DynamoDbStreamKinesisRecord): {
|
|
34
|
+
tenantId: string;
|
|
35
|
+
workspaceId: string;
|
|
36
|
+
resourceType: string;
|
|
37
|
+
resourceId: string;
|
|
38
|
+
version: string;
|
|
39
|
+
} | null;
|
|
40
|
+
declare function handler(event: FirehoseTransformationEvent): Promise<FirehoseTransformationResult>;
|
|
41
|
+
|
|
42
|
+
export { type DynamoDbStreamKinesisRecord, handler, parseCurrentResourceKeys, shouldDropAsGlobalTableReplicationRecord };
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import { AttributeValue } from '@aws-sdk/client-dynamodb';
|
|
2
|
+
import { FirehoseTransformationEvent, FirehoseTransformationResult } from 'aws-lambda';
|
|
3
|
+
|
|
4
|
+
interface DynamoDbStreamKinesisRecord {
|
|
5
|
+
eventName?: string;
|
|
6
|
+
userIdentity?: unknown;
|
|
7
|
+
dynamodb?: {
|
|
8
|
+
Keys?: Record<string, AttributeValue>;
|
|
9
|
+
NewImage?: Record<string, AttributeValue>;
|
|
10
|
+
OldImage?: Record<string, AttributeValue>;
|
|
11
|
+
};
|
|
12
|
+
}
|
|
13
|
+
/**
|
|
14
|
+
* Returns true when this stream/Kinesis record should not be archived because it
|
|
15
|
+
* represents a **replica-side application** of a global-table change (the logical
|
|
16
|
+
* write originated in another Region).
|
|
17
|
+
*
|
|
18
|
+
* - If `aws:rep:updateregion` is present on the item image and differs from
|
|
19
|
+
* `archiveLambdaRegion`, the change was replicated into this Region (archive
|
|
20
|
+
* only in the Region that matches `aws:rep:updateregion`).
|
|
21
|
+
* - Otherwise, if `userIdentity` matches the DynamoDB replication service SLR,
|
|
22
|
+
* treat as replication. **Excluded:** TTL deletes (`type` Service and
|
|
23
|
+
* `principalId` `dynamodb.amazonaws.com`) per AWS stream Identity docs.
|
|
24
|
+
*
|
|
25
|
+
* For MREC global tables version 2019.11.21, AWS documents that stream records
|
|
26
|
+
* may not carry distinguishable metadata; the recommended approach is a custom
|
|
27
|
+
* “write region” attribute on items. If neither that attribute nor
|
|
28
|
+
* `aws:rep:updateregion` nor replication `userIdentity` applies, this function
|
|
29
|
+
* returns false (no drop)—duplicate archives are possible if identical pipelines
|
|
30
|
+
* run in every Region without those signals.
|
|
31
|
+
*/
|
|
32
|
+
declare function shouldDropAsGlobalTableReplicationRecord(record: DynamoDbStreamKinesisRecord, archiveLambdaRegion: string): boolean;
|
|
33
|
+
declare function parseCurrentResourceKeys(record: DynamoDbStreamKinesisRecord): {
|
|
34
|
+
tenantId: string;
|
|
35
|
+
workspaceId: string;
|
|
36
|
+
resourceType: string;
|
|
37
|
+
resourceId: string;
|
|
38
|
+
version: string;
|
|
39
|
+
} | null;
|
|
40
|
+
declare function handler(event: FirehoseTransformationEvent): Promise<FirehoseTransformationResult>;
|
|
41
|
+
|
|
42
|
+
export { type DynamoDbStreamKinesisRecord, handler, parseCurrentResourceKeys, shouldDropAsGlobalTableReplicationRecord };
|
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/components/dynamodb/firehose-archive-transform.handler.ts
|
|
21
|
+
var firehose_archive_transform_handler_exports = {};
|
|
22
|
+
__export(firehose_archive_transform_handler_exports, {
|
|
23
|
+
handler: () => handler,
|
|
24
|
+
parseCurrentResourceKeys: () => parseCurrentResourceKeys,
|
|
25
|
+
shouldDropAsGlobalTableReplicationRecord: () => shouldDropAsGlobalTableReplicationRecord
|
|
26
|
+
});
|
|
27
|
+
module.exports = __toCommonJS(firehose_archive_transform_handler_exports);
|
|
28
|
+
function dynamodbValueToJs(av) {
|
|
29
|
+
if (av.S !== void 0) {
|
|
30
|
+
return av.S;
|
|
31
|
+
}
|
|
32
|
+
if (av.N !== void 0) {
|
|
33
|
+
return av.N.includes(".") ? Number.parseFloat(av.N) : Number.parseInt(av.N, 10);
|
|
34
|
+
}
|
|
35
|
+
if (av.BOOL !== void 0) {
|
|
36
|
+
return av.BOOL;
|
|
37
|
+
}
|
|
38
|
+
if (av.NULL !== void 0) {
|
|
39
|
+
return null;
|
|
40
|
+
}
|
|
41
|
+
if (av.M !== void 0) {
|
|
42
|
+
return dynamodbImageToPlain(av.M);
|
|
43
|
+
}
|
|
44
|
+
if (av.L !== void 0) {
|
|
45
|
+
return av.L.map((x) => dynamodbValueToJs(x));
|
|
46
|
+
}
|
|
47
|
+
if (av.SS !== void 0) {
|
|
48
|
+
return av.SS;
|
|
49
|
+
}
|
|
50
|
+
if (av.NS !== void 0) {
|
|
51
|
+
return av.NS.map(
|
|
52
|
+
(n) => n.includes(".") ? Number.parseFloat(n) : Number.parseInt(n, 10)
|
|
53
|
+
);
|
|
54
|
+
}
|
|
55
|
+
return void 0;
|
|
56
|
+
}
|
|
57
|
+
function dynamodbImageToPlain(image) {
|
|
58
|
+
const out = {};
|
|
59
|
+
for (const [k, v] of Object.entries(image)) {
|
|
60
|
+
out[k] = dynamodbValueToJs(v);
|
|
61
|
+
}
|
|
62
|
+
return out;
|
|
63
|
+
}
|
|
64
|
+
var CURRENT_SK = "CURRENT";
|
|
65
|
+
var PK_PATTERN = /^TID#(?<tenantId>[^#]+)#WID#(?<workspaceId>[^#]+)#RT#(?<resourceType>[^#]+)#ID#(?<resourceId>.+)$/;
|
|
66
|
+
var AWS_REP_UPDATE_REGION = "aws:rep:updateregion";
|
|
67
|
+
function getDynamoDbStringAttr(image, name) {
|
|
68
|
+
if (!image) {
|
|
69
|
+
return void 0;
|
|
70
|
+
}
|
|
71
|
+
const av = image[name];
|
|
72
|
+
if (typeof av?.S === "string" && av.S.trim() !== "") {
|
|
73
|
+
return av.S.trim();
|
|
74
|
+
}
|
|
75
|
+
return void 0;
|
|
76
|
+
}
|
|
77
|
+
function primaryImageForReplicationCheck(record) {
|
|
78
|
+
if (record.eventName === "REMOVE") {
|
|
79
|
+
return record.dynamodb?.OldImage;
|
|
80
|
+
}
|
|
81
|
+
return record.dynamodb?.NewImage;
|
|
82
|
+
}
|
|
83
|
+
function shouldDropAsGlobalTableReplicationRecord(record, archiveLambdaRegion) {
|
|
84
|
+
const image = primaryImageForReplicationCheck(record);
|
|
85
|
+
const updateRegion = getDynamoDbStringAttr(image, AWS_REP_UPDATE_REGION);
|
|
86
|
+
if (updateRegion && archiveLambdaRegion && updateRegion !== archiveLambdaRegion) {
|
|
87
|
+
return true;
|
|
88
|
+
}
|
|
89
|
+
return isDynamoDbReplicationUserIdentity(record.userIdentity);
|
|
90
|
+
}
|
|
91
|
+
function isDynamoDbReplicationUserIdentity(userIdentity) {
|
|
92
|
+
if (!userIdentity || typeof userIdentity !== "object") {
|
|
93
|
+
return false;
|
|
94
|
+
}
|
|
95
|
+
const ui = userIdentity;
|
|
96
|
+
const principalRaw = ui.principalId ?? ui.PrincipalId;
|
|
97
|
+
const typeRaw = ui.type ?? ui.Type;
|
|
98
|
+
const principal = typeof principalRaw === "string" ? principalRaw.toLowerCase() : "";
|
|
99
|
+
const type = typeof typeRaw === "string" ? typeRaw.toLowerCase() : "";
|
|
100
|
+
if (type === "service" && principal === "dynamodb.amazonaws.com") {
|
|
101
|
+
return false;
|
|
102
|
+
}
|
|
103
|
+
const replicationMarkers = [
|
|
104
|
+
"awsservicerolefordynamodbreplication",
|
|
105
|
+
"replication.dynamodb.amazonaws.com"
|
|
106
|
+
];
|
|
107
|
+
return replicationMarkers.some((m) => principal.includes(m));
|
|
108
|
+
}
|
|
109
|
+
function parseCurrentResourceKeys(record) {
|
|
110
|
+
const keys = record.dynamodb?.Keys;
|
|
111
|
+
if (!keys) {
|
|
112
|
+
return null;
|
|
113
|
+
}
|
|
114
|
+
const pkAttr = keys.PK?.S;
|
|
115
|
+
const skAttr = keys.SK?.S;
|
|
116
|
+
if (!pkAttr || skAttr !== CURRENT_SK) {
|
|
117
|
+
return null;
|
|
118
|
+
}
|
|
119
|
+
const m = PK_PATTERN.exec(pkAttr);
|
|
120
|
+
if (!m?.groups) {
|
|
121
|
+
return null;
|
|
122
|
+
}
|
|
123
|
+
const { tenantId, workspaceId, resourceType, resourceId } = m.groups;
|
|
124
|
+
const image = record.eventName === "REMOVE" ? record.dynamodb?.OldImage : record.dynamodb?.NewImage;
|
|
125
|
+
if (!image) {
|
|
126
|
+
return null;
|
|
127
|
+
}
|
|
128
|
+
const plain = dynamodbImageToPlain(image);
|
|
129
|
+
const version = typeof plain.vid === "string" ? plain.vid : null;
|
|
130
|
+
if (!version) {
|
|
131
|
+
return null;
|
|
132
|
+
}
|
|
133
|
+
return { tenantId, workspaceId, resourceType, resourceId, version };
|
|
134
|
+
}
|
|
135
|
+
function partitionToken(value) {
|
|
136
|
+
if (!value || value.trim() === "") {
|
|
137
|
+
return "-";
|
|
138
|
+
}
|
|
139
|
+
return value.replace(/[/\\]/g, "_");
|
|
140
|
+
}
|
|
141
|
+
function buildArchivePayload(record, keys) {
|
|
142
|
+
const newImage = record.dynamodb?.NewImage;
|
|
143
|
+
const oldImage = record.dynamodb?.OldImage;
|
|
144
|
+
const resourceImage = record.eventName === "REMOVE" ? oldImage : newImage;
|
|
145
|
+
const resourcePlain = resourceImage ? dynamodbImageToPlain(resourceImage) : {};
|
|
146
|
+
if (typeof resourcePlain.resource === "string") {
|
|
147
|
+
try {
|
|
148
|
+
resourcePlain.resource = JSON.parse(resourcePlain.resource);
|
|
149
|
+
} catch {
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
return {
|
|
153
|
+
eventName: record.eventName,
|
|
154
|
+
archivedAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
155
|
+
tenantId: keys.tenantId,
|
|
156
|
+
workspaceId: keys.workspaceId,
|
|
157
|
+
resourceType: keys.resourceType,
|
|
158
|
+
resourceId: keys.resourceId,
|
|
159
|
+
version: keys.version,
|
|
160
|
+
resource: resourcePlain
|
|
161
|
+
};
|
|
162
|
+
}
|
|
163
|
+
function handler(event) {
|
|
164
|
+
const records = [];
|
|
165
|
+
const archiveLambdaRegion = process.env.AWS_REGION ?? "";
|
|
166
|
+
for (const rec of event.records) {
|
|
167
|
+
try {
|
|
168
|
+
const payload = Buffer.from(rec.data, "base64").toString("utf8");
|
|
169
|
+
const change = JSON.parse(payload);
|
|
170
|
+
if (shouldDropAsGlobalTableReplicationRecord(change, archiveLambdaRegion)) {
|
|
171
|
+
records.push({
|
|
172
|
+
recordId: rec.recordId,
|
|
173
|
+
result: "Dropped",
|
|
174
|
+
data: rec.data
|
|
175
|
+
});
|
|
176
|
+
continue;
|
|
177
|
+
}
|
|
178
|
+
const keys = parseCurrentResourceKeys(change);
|
|
179
|
+
if (!keys) {
|
|
180
|
+
records.push({
|
|
181
|
+
recordId: rec.recordId,
|
|
182
|
+
result: "Dropped",
|
|
183
|
+
data: rec.data
|
|
184
|
+
});
|
|
185
|
+
continue;
|
|
186
|
+
}
|
|
187
|
+
const archive = buildArchivePayload(change, keys);
|
|
188
|
+
const out = Buffer.from(`${JSON.stringify(archive)}
|
|
189
|
+
`).toString(
|
|
190
|
+
"base64"
|
|
191
|
+
);
|
|
192
|
+
records.push({
|
|
193
|
+
recordId: rec.recordId,
|
|
194
|
+
result: "Ok",
|
|
195
|
+
data: out,
|
|
196
|
+
metadata: {
|
|
197
|
+
partitionKeys: {
|
|
198
|
+
tenantId: partitionToken(keys.tenantId),
|
|
199
|
+
workspaceId: partitionToken(keys.workspaceId),
|
|
200
|
+
resourceType: partitionToken(keys.resourceType),
|
|
201
|
+
resourceId: partitionToken(keys.resourceId),
|
|
202
|
+
version: partitionToken(keys.version)
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
});
|
|
206
|
+
} catch {
|
|
207
|
+
records.push({
|
|
208
|
+
recordId: rec.recordId,
|
|
209
|
+
result: "ProcessingFailed",
|
|
210
|
+
data: rec.data
|
|
211
|
+
});
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
return Promise.resolve({ records });
|
|
215
|
+
}
|
|
216
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
217
|
+
0 && (module.exports = {
|
|
218
|
+
handler,
|
|
219
|
+
parseCurrentResourceKeys,
|
|
220
|
+
shouldDropAsGlobalTableReplicationRecord
|
|
221
|
+
});
|
|
222
|
+
//# sourceMappingURL=firehose-archive-transform.handler.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/components/dynamodb/firehose-archive-transform.handler.ts"],"sourcesContent":["import type { AttributeValue } from \"@aws-sdk/client-dynamodb\";\nimport type {\n FirehoseTransformationEvent,\n FirehoseTransformationResult,\n FirehoseTransformationResultRecord,\n} from \"aws-lambda\";\n\nfunction dynamodbValueToJs(av: AttributeValue): unknown {\n if (av.S !== undefined) {\n return av.S;\n }\n if (av.N !== undefined) {\n return av.N.includes(\".\")\n ? Number.parseFloat(av.N)\n : Number.parseInt(av.N, 10);\n }\n if (av.BOOL !== undefined) {\n return av.BOOL;\n }\n if (av.NULL !== undefined) {\n return null;\n }\n if (av.M !== undefined) {\n return dynamodbImageToPlain(av.M);\n }\n if (av.L !== undefined) {\n return av.L.map((x) => dynamodbValueToJs(x));\n }\n if (av.SS !== undefined) {\n return av.SS;\n }\n if (av.NS !== undefined) {\n return av.NS.map((n) =>\n n.includes(\".\") ? Number.parseFloat(n) : Number.parseInt(n, 10),\n );\n }\n return undefined;\n}\n\nfunction dynamodbImageToPlain(\n image: Record<string, AttributeValue>,\n): Record<string, unknown> {\n const out: Record<string, unknown> = {};\n for (const [k, v] of Object.entries(image)) {\n out[k] = dynamodbValueToJs(v);\n }\n return out;\n}\n\n/**\n * Firehose data-transformation handler: filters DynamoDB change records to\n * current FHIR resource items (SK = CURRENT, TID#…#WID#…#RT#…#ID#… PK),\n * writes archive JSON to S3 via Firehose, and sets dynamic partition keys\n * tenantId/workspaceId/resourceType/resourceId/version per ADR 2026-03-11-02.\n */\n\nconst CURRENT_SK = \"CURRENT\";\nconst PK_PATTERN =\n /^TID#(?<tenantId>[^#]+)#WID#(?<workspaceId>[^#]+)#RT#(?<resourceType>[^#]+)#ID#(?<resourceId>.+)$/;\n\nexport interface DynamoDbStreamKinesisRecord {\n eventName?: string;\n userIdentity?: unknown;\n dynamodb?: {\n Keys?: Record<string, AttributeValue>;\n NewImage?: Record<string, AttributeValue>;\n OldImage?: Record<string, AttributeValue>;\n };\n}\n\n/** DynamoDB-managed attribute on global table items (see AWS Global Tables legacy / replication docs). */\nconst AWS_REP_UPDATE_REGION = \"aws:rep:updateregion\";\n\nfunction getDynamoDbStringAttr(\n image: Record<string, AttributeValue> | undefined,\n name: string,\n): string | undefined {\n if (!image) {\n return undefined;\n }\n const av = image[name];\n if (typeof av?.S === \"string\" && av.S.trim() !== \"\") {\n return av.S.trim();\n }\n return undefined;\n}\n\nfunction primaryImageForReplicationCheck(\n record: DynamoDbStreamKinesisRecord,\n): Record<string, AttributeValue> | undefined {\n if (record.eventName === \"REMOVE\") {\n return record.dynamodb?.OldImage;\n }\n return record.dynamodb?.NewImage;\n}\n\n/**\n * Returns true when this stream/Kinesis record should not be archived because it\n * represents a **replica-side application** of a global-table change (the logical\n * write originated in another Region).\n *\n * - If `aws:rep:updateregion` is present on the item image and differs from\n * `archiveLambdaRegion`, the change was replicated into this Region (archive\n * only in the Region that matches `aws:rep:updateregion`).\n * - Otherwise, if `userIdentity` matches the DynamoDB replication service SLR,\n * treat as replication. **Excluded:** TTL deletes (`type` Service and\n * `principalId` `dynamodb.amazonaws.com`) per AWS stream Identity docs.\n *\n * For MREC global tables version 2019.11.21, AWS documents that stream records\n * may not carry distinguishable metadata; the recommended approach is a custom\n * “write region” attribute on items. If neither that attribute nor\n * `aws:rep:updateregion` nor replication `userIdentity` applies, this function\n * returns false (no drop)—duplicate archives are possible if identical pipelines\n * run in every Region without those signals.\n */\nexport function shouldDropAsGlobalTableReplicationRecord(\n record: DynamoDbStreamKinesisRecord,\n archiveLambdaRegion: string,\n): boolean {\n const image = primaryImageForReplicationCheck(record);\n const updateRegion = getDynamoDbStringAttr(image, AWS_REP_UPDATE_REGION);\n if (\n updateRegion &&\n archiveLambdaRegion &&\n updateRegion !== archiveLambdaRegion\n ) {\n return true;\n }\n\n return isDynamoDbReplicationUserIdentity(record.userIdentity);\n}\n\nfunction isDynamoDbReplicationUserIdentity(userIdentity: unknown): boolean {\n if (!userIdentity || typeof userIdentity !== \"object\") {\n return false;\n }\n const ui = userIdentity as Record<string, unknown>;\n const principalRaw = ui.principalId ?? ui.PrincipalId;\n const typeRaw = ui.type ?? ui.Type;\n const principal =\n typeof principalRaw === \"string\" ? principalRaw.toLowerCase() : \"\";\n const type = typeof typeRaw === \"string\" ? typeRaw.toLowerCase() : \"\";\n\n if (type === \"service\" && principal === \"dynamodb.amazonaws.com\") {\n return false;\n }\n\n const replicationMarkers = [\n \"awsservicerolefordynamodbreplication\",\n \"replication.dynamodb.amazonaws.com\",\n ];\n return replicationMarkers.some((m) => principal.includes(m));\n}\n\nexport function parseCurrentResourceKeys(record: DynamoDbStreamKinesisRecord): {\n tenantId: string;\n workspaceId: string;\n resourceType: string;\n resourceId: string;\n version: string;\n} | null {\n const keys = record.dynamodb?.Keys;\n if (!keys) {\n return null;\n }\n const pkAttr = keys.PK?.S;\n const skAttr = keys.SK?.S;\n if (!pkAttr || skAttr !== CURRENT_SK) {\n return null;\n }\n const m = PK_PATTERN.exec(pkAttr);\n if (!m?.groups) {\n return null;\n }\n const { tenantId, workspaceId, resourceType, resourceId } = m.groups;\n const image =\n record.eventName === \"REMOVE\"\n ? record.dynamodb?.OldImage\n : record.dynamodb?.NewImage;\n if (!image) {\n return null;\n }\n const plain = dynamodbImageToPlain(image as Record<string, AttributeValue>);\n const version = typeof plain.vid === \"string\" ? plain.vid : null;\n if (!version) {\n return null;\n }\n return { tenantId, workspaceId, resourceType, resourceId, version };\n}\n\nfunction partitionToken(value: string): string {\n if (!value || value.trim() === \"\") {\n return \"-\";\n }\n return value.replace(/[/\\\\]/g, \"_\");\n}\n\nfunction buildArchivePayload(\n record: DynamoDbStreamKinesisRecord,\n keys: ReturnType<typeof parseCurrentResourceKeys>,\n): Record<string, unknown> {\n const newImage = record.dynamodb?.NewImage;\n const oldImage = record.dynamodb?.OldImage;\n const resourceImage = record.eventName === \"REMOVE\" ? oldImage : newImage;\n const resourcePlain = resourceImage\n ? dynamodbImageToPlain(resourceImage as Record<string, AttributeValue>)\n : {};\n\n if (typeof resourcePlain.resource === \"string\") {\n try {\n resourcePlain.resource = JSON.parse(resourcePlain.resource) as unknown;\n } catch {\n /* keep raw string if not valid JSON */\n }\n }\n\n return {\n eventName: record.eventName,\n archivedAt: new Date().toISOString(),\n tenantId: keys!.tenantId,\n workspaceId: keys!.workspaceId,\n resourceType: keys!.resourceType,\n resourceId: keys!.resourceId,\n version: keys!.version,\n resource: resourcePlain,\n };\n}\n\nexport function handler(\n event: FirehoseTransformationEvent,\n): Promise<FirehoseTransformationResult> {\n const records: FirehoseTransformationResultRecord[] = [];\n const archiveLambdaRegion = process.env.AWS_REGION ?? \"\";\n\n for (const rec of event.records) {\n try {\n const payload = Buffer.from(rec.data, \"base64\").toString(\"utf8\");\n const change = JSON.parse(payload) as DynamoDbStreamKinesisRecord;\n\n if (\n shouldDropAsGlobalTableReplicationRecord(change, archiveLambdaRegion)\n ) {\n records.push({\n recordId: rec.recordId,\n result: \"Dropped\",\n data: rec.data,\n });\n continue;\n }\n\n const keys = parseCurrentResourceKeys(change);\n\n if (!keys) {\n records.push({\n recordId: rec.recordId,\n result: \"Dropped\",\n data: rec.data,\n });\n continue;\n }\n\n const archive = buildArchivePayload(change, keys);\n const out = Buffer.from(`${JSON.stringify(archive)}\\n`).toString(\n \"base64\",\n );\n\n records.push({\n recordId: rec.recordId,\n result: \"Ok\",\n data: out,\n metadata: {\n partitionKeys: {\n tenantId: partitionToken(keys.tenantId),\n workspaceId: partitionToken(keys.workspaceId),\n resourceType: partitionToken(keys.resourceType),\n resourceId: partitionToken(keys.resourceId),\n version: partitionToken(keys.version),\n },\n },\n });\n } catch {\n records.push({\n recordId: rec.recordId,\n result: \"ProcessingFailed\",\n data: rec.data,\n });\n }\n }\n\n return Promise.resolve({ records });\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAOA,SAAS,kBAAkB,IAA6B;AACtD,MAAI,GAAG,MAAM,QAAW;AACtB,WAAO,GAAG;AAAA,EACZ;AACA,MAAI,GAAG,MAAM,QAAW;AACtB,WAAO,GAAG,EAAE,SAAS,GAAG,IACpB,OAAO,WAAW,GAAG,CAAC,IACtB,OAAO,SAAS,GAAG,GAAG,EAAE;AAAA,EAC9B;AACA,MAAI,GAAG,SAAS,QAAW;AACzB,WAAO,GAAG;AAAA,EACZ;AACA,MAAI,GAAG,SAAS,QAAW;AACzB,WAAO;AAAA,EACT;AACA,MAAI,GAAG,MAAM,QAAW;AACtB,WAAO,qBAAqB,GAAG,CAAC;AAAA,EAClC;AACA,MAAI,GAAG,MAAM,QAAW;AACtB,WAAO,GAAG,EAAE,IAAI,CAAC,MAAM,kBAAkB,CAAC,CAAC;AAAA,EAC7C;AACA,MAAI,GAAG,OAAO,QAAW;AACvB,WAAO,GAAG;AAAA,EACZ;AACA,MAAI,GAAG,OAAO,QAAW;AACvB,WAAO,GAAG,GAAG;AAAA,MAAI,CAAC,MAChB,EAAE,SAAS,GAAG,IAAI,OAAO,WAAW,CAAC,IAAI,OAAO,SAAS,GAAG,EAAE;AAAA,IAChE;AAAA,EACF;AACA,SAAO;AACT;AAEA,SAAS,qBACP,OACyB;AACzB,QAAM,MAA+B,CAAC;AACtC,aAAW,CAAC,GAAG,CAAC,KAAK,OAAO,QAAQ,KAAK,GAAG;AAC1C,QAAI,CAAC,IAAI,kBAAkB,CAAC;AAAA,EAC9B;AACA,SAAO;AACT;AASA,IAAM,aAAa;AACnB,IAAM,aACJ;AAaF,IAAM,wBAAwB;AAE9B,SAAS,sBACP,OACA,MACoB;AACpB,MAAI,CAAC,OAAO;AACV,WAAO;AAAA,EACT;AACA,QAAM,KAAK,MAAM,IAAI;AACrB,MAAI,OAAO,IAAI,MAAM,YAAY,GAAG,EAAE,KAAK,MAAM,IAAI;AACnD,WAAO,GAAG,EAAE,KAAK;AAAA,EACnB;AACA,SAAO;AACT;AAEA,SAAS,gCACP,QAC4C;AAC5C,MAAI,OAAO,cAAc,UAAU;AACjC,WAAO,OAAO,UAAU;AAAA,EAC1B;AACA,SAAO,OAAO,UAAU;AAC1B;AAqBO,SAAS,yCACd,QACA,qBACS;AACT,QAAM,QAAQ,gCAAgC,MAAM;AACpD,QAAM,eAAe,sBAAsB,OAAO,qBAAqB;AACvE,MACE,gBACA,uBACA,iBAAiB,qBACjB;AACA,WAAO;AAAA,EACT;AAEA,SAAO,kCAAkC,OAAO,YAAY;AAC9D;AAEA,SAAS,kCAAkC,cAAgC;AACzE,MAAI,CAAC,gBAAgB,OAAO,iBAAiB,UAAU;AACrD,WAAO;AAAA,EACT;AACA,QAAM,KAAK;AACX,QAAM,eAAe,GAAG,eAAe,GAAG;AAC1C,QAAM,UAAU,GAAG,QAAQ,GAAG;AAC9B,QAAM,YACJ,OAAO,iBAAiB,WAAW,aAAa,YAAY,IAAI;AAClE,QAAM,OAAO,OAAO,YAAY,WAAW,QAAQ,YAAY,IAAI;AAEnE,MAAI,SAAS,aAAa,cAAc,0BAA0B;AAChE,WAAO;AAAA,EACT;AAEA,QAAM,qBAAqB;AAAA,IACzB;AAAA,IACA;AAAA,EACF;AACA,SAAO,mBAAmB,KAAK,CAAC,MAAM,UAAU,SAAS,CAAC,CAAC;AAC7D;AAEO,SAAS,yBAAyB,QAMhC;AACP,QAAM,OAAO,OAAO,UAAU;AAC9B,MAAI,CAAC,MAAM;AACT,WAAO;AAAA,EACT;AACA,QAAM,SAAS,KAAK,IAAI;AACxB,QAAM,SAAS,KAAK,IAAI;AACxB,MAAI,CAAC,UAAU,WAAW,YAAY;AACpC,WAAO;AAAA,EACT;AACA,QAAM,IAAI,WAAW,KAAK,MAAM;AAChC,MAAI,CAAC,GAAG,QAAQ;AACd,WAAO;AAAA,EACT;AACA,QAAM,EAAE,UAAU,aAAa,cAAc,WAAW,IAAI,EAAE;AAC9D,QAAM,QACJ,OAAO,cAAc,WACjB,OAAO,UAAU,WACjB,OAAO,UAAU;AACvB,MAAI,CAAC,OAAO;AACV,WAAO;AAAA,EACT;AACA,QAAM,QAAQ,qBAAqB,KAAuC;AAC1E,QAAM,UAAU,OAAO,MAAM,QAAQ,WAAW,MAAM,MAAM;AAC5D,MAAI,CAAC,SAAS;AACZ,WAAO;AAAA,EACT;AACA,SAAO,EAAE,UAAU,aAAa,cAAc,YAAY,QAAQ;AACpE;AAEA,SAAS,eAAe,OAAuB;AAC7C,MAAI,CAAC,SAAS,MAAM,KAAK,MAAM,IAAI;AACjC,WAAO;AAAA,EACT;AACA,SAAO,MAAM,QAAQ,UAAU,GAAG;AACpC;AAEA,SAAS,oBACP,QACA,MACyB;AACzB,QAAM,WAAW,OAAO,UAAU;AAClC,QAAM,WAAW,OAAO,UAAU;AAClC,QAAM,gBAAgB,OAAO,cAAc,WAAW,WAAW;AACjE,QAAM,gBAAgB,gBAClB,qBAAqB,aAA+C,IACpE,CAAC;AAEL,MAAI,OAAO,cAAc,aAAa,UAAU;AAC9C,QAAI;AACF,oBAAc,WAAW,KAAK,MAAM,cAAc,QAAQ;AAAA,IAC5D,QAAQ;AAAA,IAER;AAAA,EACF;AAEA,SAAO;AAAA,IACL,WAAW,OAAO;AAAA,IAClB,aAAY,oBAAI,KAAK,GAAE,YAAY;AAAA,IACnC,UAAU,KAAM;AAAA,IAChB,aAAa,KAAM;AAAA,IACnB,cAAc,KAAM;AAAA,IACpB,YAAY,KAAM;AAAA,IAClB,SAAS,KAAM;AAAA,IACf,UAAU;AAAA,EACZ;AACF;AAEO,SAAS,QACd,OACuC;AACvC,QAAM,UAAgD,CAAC;AACvD,QAAM,sBAAsB,QAAQ,IAAI,cAAc;AAEtD,aAAW,OAAO,MAAM,SAAS;AAC/B,QAAI;AACF,YAAM,UAAU,OAAO,KAAK,IAAI,MAAM,QAAQ,EAAE,SAAS,MAAM;AAC/D,YAAM,SAAS,KAAK,MAAM,OAAO;AAEjC,UACE,yCAAyC,QAAQ,mBAAmB,GACpE;AACA,gBAAQ,KAAK;AAAA,UACX,UAAU,IAAI;AAAA,UACd,QAAQ;AAAA,UACR,MAAM,IAAI;AAAA,QACZ,CAAC;AACD;AAAA,MACF;AAEA,YAAM,OAAO,yBAAyB,MAAM;AAE5C,UAAI,CAAC,MAAM;AACT,gBAAQ,KAAK;AAAA,UACX,UAAU,IAAI;AAAA,UACd,QAAQ;AAAA,UACR,MAAM,IAAI;AAAA,QACZ,CAAC;AACD;AAAA,MACF;AAEA,YAAM,UAAU,oBAAoB,QAAQ,IAAI;AAChD,YAAM,MAAM,OAAO,KAAK,GAAG,KAAK,UAAU,OAAO,CAAC;AAAA,CAAI,EAAE;AAAA,QACtD;AAAA,MACF;AAEA,cAAQ,KAAK;AAAA,QACX,UAAU,IAAI;AAAA,QACd,QAAQ;AAAA,QACR,MAAM;AAAA,QACN,UAAU;AAAA,UACR,eAAe;AAAA,YACb,UAAU,eAAe,KAAK,QAAQ;AAAA,YACtC,aAAa,eAAe,KAAK,WAAW;AAAA,YAC5C,cAAc,eAAe,KAAK,YAAY;AAAA,YAC9C,YAAY,eAAe,KAAK,UAAU;AAAA,YAC1C,SAAS,eAAe,KAAK,OAAO;AAAA,UACtC;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH,QAAQ;AACN,cAAQ,KAAK;AAAA,QACX,UAAU,IAAI;AAAA,QACd,QAAQ;AAAA,QACR,MAAM,IAAI;AAAA,MACZ,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO,QAAQ,QAAQ,EAAE,QAAQ,CAAC;AACpC;","names":[]}
|
|
@@ -0,0 +1,197 @@
|
|
|
1
|
+
import "./chunk-LZOMFHX3.mjs";
|
|
2
|
+
|
|
3
|
+
// src/components/dynamodb/firehose-archive-transform.handler.ts
|
|
4
|
+
function dynamodbValueToJs(av) {
|
|
5
|
+
if (av.S !== void 0) {
|
|
6
|
+
return av.S;
|
|
7
|
+
}
|
|
8
|
+
if (av.N !== void 0) {
|
|
9
|
+
return av.N.includes(".") ? Number.parseFloat(av.N) : Number.parseInt(av.N, 10);
|
|
10
|
+
}
|
|
11
|
+
if (av.BOOL !== void 0) {
|
|
12
|
+
return av.BOOL;
|
|
13
|
+
}
|
|
14
|
+
if (av.NULL !== void 0) {
|
|
15
|
+
return null;
|
|
16
|
+
}
|
|
17
|
+
if (av.M !== void 0) {
|
|
18
|
+
return dynamodbImageToPlain(av.M);
|
|
19
|
+
}
|
|
20
|
+
if (av.L !== void 0) {
|
|
21
|
+
return av.L.map((x) => dynamodbValueToJs(x));
|
|
22
|
+
}
|
|
23
|
+
if (av.SS !== void 0) {
|
|
24
|
+
return av.SS;
|
|
25
|
+
}
|
|
26
|
+
if (av.NS !== void 0) {
|
|
27
|
+
return av.NS.map(
|
|
28
|
+
(n) => n.includes(".") ? Number.parseFloat(n) : Number.parseInt(n, 10)
|
|
29
|
+
);
|
|
30
|
+
}
|
|
31
|
+
return void 0;
|
|
32
|
+
}
|
|
33
|
+
function dynamodbImageToPlain(image) {
|
|
34
|
+
const out = {};
|
|
35
|
+
for (const [k, v] of Object.entries(image)) {
|
|
36
|
+
out[k] = dynamodbValueToJs(v);
|
|
37
|
+
}
|
|
38
|
+
return out;
|
|
39
|
+
}
|
|
40
|
+
var CURRENT_SK = "CURRENT";
|
|
41
|
+
var PK_PATTERN = /^TID#(?<tenantId>[^#]+)#WID#(?<workspaceId>[^#]+)#RT#(?<resourceType>[^#]+)#ID#(?<resourceId>.+)$/;
|
|
42
|
+
var AWS_REP_UPDATE_REGION = "aws:rep:updateregion";
|
|
43
|
+
function getDynamoDbStringAttr(image, name) {
|
|
44
|
+
if (!image) {
|
|
45
|
+
return void 0;
|
|
46
|
+
}
|
|
47
|
+
const av = image[name];
|
|
48
|
+
if (typeof av?.S === "string" && av.S.trim() !== "") {
|
|
49
|
+
return av.S.trim();
|
|
50
|
+
}
|
|
51
|
+
return void 0;
|
|
52
|
+
}
|
|
53
|
+
function primaryImageForReplicationCheck(record) {
|
|
54
|
+
if (record.eventName === "REMOVE") {
|
|
55
|
+
return record.dynamodb?.OldImage;
|
|
56
|
+
}
|
|
57
|
+
return record.dynamodb?.NewImage;
|
|
58
|
+
}
|
|
59
|
+
function shouldDropAsGlobalTableReplicationRecord(record, archiveLambdaRegion) {
|
|
60
|
+
const image = primaryImageForReplicationCheck(record);
|
|
61
|
+
const updateRegion = getDynamoDbStringAttr(image, AWS_REP_UPDATE_REGION);
|
|
62
|
+
if (updateRegion && archiveLambdaRegion && updateRegion !== archiveLambdaRegion) {
|
|
63
|
+
return true;
|
|
64
|
+
}
|
|
65
|
+
return isDynamoDbReplicationUserIdentity(record.userIdentity);
|
|
66
|
+
}
|
|
67
|
+
function isDynamoDbReplicationUserIdentity(userIdentity) {
|
|
68
|
+
if (!userIdentity || typeof userIdentity !== "object") {
|
|
69
|
+
return false;
|
|
70
|
+
}
|
|
71
|
+
const ui = userIdentity;
|
|
72
|
+
const principalRaw = ui.principalId ?? ui.PrincipalId;
|
|
73
|
+
const typeRaw = ui.type ?? ui.Type;
|
|
74
|
+
const principal = typeof principalRaw === "string" ? principalRaw.toLowerCase() : "";
|
|
75
|
+
const type = typeof typeRaw === "string" ? typeRaw.toLowerCase() : "";
|
|
76
|
+
if (type === "service" && principal === "dynamodb.amazonaws.com") {
|
|
77
|
+
return false;
|
|
78
|
+
}
|
|
79
|
+
const replicationMarkers = [
|
|
80
|
+
"awsservicerolefordynamodbreplication",
|
|
81
|
+
"replication.dynamodb.amazonaws.com"
|
|
82
|
+
];
|
|
83
|
+
return replicationMarkers.some((m) => principal.includes(m));
|
|
84
|
+
}
|
|
85
|
+
function parseCurrentResourceKeys(record) {
|
|
86
|
+
const keys = record.dynamodb?.Keys;
|
|
87
|
+
if (!keys) {
|
|
88
|
+
return null;
|
|
89
|
+
}
|
|
90
|
+
const pkAttr = keys.PK?.S;
|
|
91
|
+
const skAttr = keys.SK?.S;
|
|
92
|
+
if (!pkAttr || skAttr !== CURRENT_SK) {
|
|
93
|
+
return null;
|
|
94
|
+
}
|
|
95
|
+
const m = PK_PATTERN.exec(pkAttr);
|
|
96
|
+
if (!m?.groups) {
|
|
97
|
+
return null;
|
|
98
|
+
}
|
|
99
|
+
const { tenantId, workspaceId, resourceType, resourceId } = m.groups;
|
|
100
|
+
const image = record.eventName === "REMOVE" ? record.dynamodb?.OldImage : record.dynamodb?.NewImage;
|
|
101
|
+
if (!image) {
|
|
102
|
+
return null;
|
|
103
|
+
}
|
|
104
|
+
const plain = dynamodbImageToPlain(image);
|
|
105
|
+
const version = typeof plain.vid === "string" ? plain.vid : null;
|
|
106
|
+
if (!version) {
|
|
107
|
+
return null;
|
|
108
|
+
}
|
|
109
|
+
return { tenantId, workspaceId, resourceType, resourceId, version };
|
|
110
|
+
}
|
|
111
|
+
function partitionToken(value) {
|
|
112
|
+
if (!value || value.trim() === "") {
|
|
113
|
+
return "-";
|
|
114
|
+
}
|
|
115
|
+
return value.replace(/[/\\]/g, "_");
|
|
116
|
+
}
|
|
117
|
+
function buildArchivePayload(record, keys) {
|
|
118
|
+
const newImage = record.dynamodb?.NewImage;
|
|
119
|
+
const oldImage = record.dynamodb?.OldImage;
|
|
120
|
+
const resourceImage = record.eventName === "REMOVE" ? oldImage : newImage;
|
|
121
|
+
const resourcePlain = resourceImage ? dynamodbImageToPlain(resourceImage) : {};
|
|
122
|
+
if (typeof resourcePlain.resource === "string") {
|
|
123
|
+
try {
|
|
124
|
+
resourcePlain.resource = JSON.parse(resourcePlain.resource);
|
|
125
|
+
} catch {
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
return {
|
|
129
|
+
eventName: record.eventName,
|
|
130
|
+
archivedAt: (/* @__PURE__ */ new Date()).toISOString(),
|
|
131
|
+
tenantId: keys.tenantId,
|
|
132
|
+
workspaceId: keys.workspaceId,
|
|
133
|
+
resourceType: keys.resourceType,
|
|
134
|
+
resourceId: keys.resourceId,
|
|
135
|
+
version: keys.version,
|
|
136
|
+
resource: resourcePlain
|
|
137
|
+
};
|
|
138
|
+
}
|
|
139
|
+
function handler(event) {
|
|
140
|
+
const records = [];
|
|
141
|
+
const archiveLambdaRegion = process.env.AWS_REGION ?? "";
|
|
142
|
+
for (const rec of event.records) {
|
|
143
|
+
try {
|
|
144
|
+
const payload = Buffer.from(rec.data, "base64").toString("utf8");
|
|
145
|
+
const change = JSON.parse(payload);
|
|
146
|
+
if (shouldDropAsGlobalTableReplicationRecord(change, archiveLambdaRegion)) {
|
|
147
|
+
records.push({
|
|
148
|
+
recordId: rec.recordId,
|
|
149
|
+
result: "Dropped",
|
|
150
|
+
data: rec.data
|
|
151
|
+
});
|
|
152
|
+
continue;
|
|
153
|
+
}
|
|
154
|
+
const keys = parseCurrentResourceKeys(change);
|
|
155
|
+
if (!keys) {
|
|
156
|
+
records.push({
|
|
157
|
+
recordId: rec.recordId,
|
|
158
|
+
result: "Dropped",
|
|
159
|
+
data: rec.data
|
|
160
|
+
});
|
|
161
|
+
continue;
|
|
162
|
+
}
|
|
163
|
+
const archive = buildArchivePayload(change, keys);
|
|
164
|
+
const out = Buffer.from(`${JSON.stringify(archive)}
|
|
165
|
+
`).toString(
|
|
166
|
+
"base64"
|
|
167
|
+
);
|
|
168
|
+
records.push({
|
|
169
|
+
recordId: rec.recordId,
|
|
170
|
+
result: "Ok",
|
|
171
|
+
data: out,
|
|
172
|
+
metadata: {
|
|
173
|
+
partitionKeys: {
|
|
174
|
+
tenantId: partitionToken(keys.tenantId),
|
|
175
|
+
workspaceId: partitionToken(keys.workspaceId),
|
|
176
|
+
resourceType: partitionToken(keys.resourceType),
|
|
177
|
+
resourceId: partitionToken(keys.resourceId),
|
|
178
|
+
version: partitionToken(keys.version)
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
});
|
|
182
|
+
} catch {
|
|
183
|
+
records.push({
|
|
184
|
+
recordId: rec.recordId,
|
|
185
|
+
result: "ProcessingFailed",
|
|
186
|
+
data: rec.data
|
|
187
|
+
});
|
|
188
|
+
}
|
|
189
|
+
}
|
|
190
|
+
return Promise.resolve({ records });
|
|
191
|
+
}
|
|
192
|
+
export {
|
|
193
|
+
handler,
|
|
194
|
+
parseCurrentResourceKeys,
|
|
195
|
+
shouldDropAsGlobalTableReplicationRecord
|
|
196
|
+
};
|
|
197
|
+
//# sourceMappingURL=firehose-archive-transform.handler.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/components/dynamodb/firehose-archive-transform.handler.ts"],"sourcesContent":["import type { AttributeValue } from \"@aws-sdk/client-dynamodb\";\nimport type {\n FirehoseTransformationEvent,\n FirehoseTransformationResult,\n FirehoseTransformationResultRecord,\n} from \"aws-lambda\";\n\nfunction dynamodbValueToJs(av: AttributeValue): unknown {\n if (av.S !== undefined) {\n return av.S;\n }\n if (av.N !== undefined) {\n return av.N.includes(\".\")\n ? Number.parseFloat(av.N)\n : Number.parseInt(av.N, 10);\n }\n if (av.BOOL !== undefined) {\n return av.BOOL;\n }\n if (av.NULL !== undefined) {\n return null;\n }\n if (av.M !== undefined) {\n return dynamodbImageToPlain(av.M);\n }\n if (av.L !== undefined) {\n return av.L.map((x) => dynamodbValueToJs(x));\n }\n if (av.SS !== undefined) {\n return av.SS;\n }\n if (av.NS !== undefined) {\n return av.NS.map((n) =>\n n.includes(\".\") ? Number.parseFloat(n) : Number.parseInt(n, 10),\n );\n }\n return undefined;\n}\n\nfunction dynamodbImageToPlain(\n image: Record<string, AttributeValue>,\n): Record<string, unknown> {\n const out: Record<string, unknown> = {};\n for (const [k, v] of Object.entries(image)) {\n out[k] = dynamodbValueToJs(v);\n }\n return out;\n}\n\n/**\n * Firehose data-transformation handler: filters DynamoDB change records to\n * current FHIR resource items (SK = CURRENT, TID#…#WID#…#RT#…#ID#… PK),\n * writes archive JSON to S3 via Firehose, and sets dynamic partition keys\n * tenantId/workspaceId/resourceType/resourceId/version per ADR 2026-03-11-02.\n */\n\nconst CURRENT_SK = \"CURRENT\";\nconst PK_PATTERN =\n /^TID#(?<tenantId>[^#]+)#WID#(?<workspaceId>[^#]+)#RT#(?<resourceType>[^#]+)#ID#(?<resourceId>.+)$/;\n\nexport interface DynamoDbStreamKinesisRecord {\n eventName?: string;\n userIdentity?: unknown;\n dynamodb?: {\n Keys?: Record<string, AttributeValue>;\n NewImage?: Record<string, AttributeValue>;\n OldImage?: Record<string, AttributeValue>;\n };\n}\n\n/** DynamoDB-managed attribute on global table items (see AWS Global Tables legacy / replication docs). */\nconst AWS_REP_UPDATE_REGION = \"aws:rep:updateregion\";\n\nfunction getDynamoDbStringAttr(\n image: Record<string, AttributeValue> | undefined,\n name: string,\n): string | undefined {\n if (!image) {\n return undefined;\n }\n const av = image[name];\n if (typeof av?.S === \"string\" && av.S.trim() !== \"\") {\n return av.S.trim();\n }\n return undefined;\n}\n\nfunction primaryImageForReplicationCheck(\n record: DynamoDbStreamKinesisRecord,\n): Record<string, AttributeValue> | undefined {\n if (record.eventName === \"REMOVE\") {\n return record.dynamodb?.OldImage;\n }\n return record.dynamodb?.NewImage;\n}\n\n/**\n * Returns true when this stream/Kinesis record should not be archived because it\n * represents a **replica-side application** of a global-table change (the logical\n * write originated in another Region).\n *\n * - If `aws:rep:updateregion` is present on the item image and differs from\n * `archiveLambdaRegion`, the change was replicated into this Region (archive\n * only in the Region that matches `aws:rep:updateregion`).\n * - Otherwise, if `userIdentity` matches the DynamoDB replication service SLR,\n * treat as replication. **Excluded:** TTL deletes (`type` Service and\n * `principalId` `dynamodb.amazonaws.com`) per AWS stream Identity docs.\n *\n * For MREC global tables version 2019.11.21, AWS documents that stream records\n * may not carry distinguishable metadata; the recommended approach is a custom\n * “write region” attribute on items. If neither that attribute nor\n * `aws:rep:updateregion` nor replication `userIdentity` applies, this function\n * returns false (no drop)—duplicate archives are possible if identical pipelines\n * run in every Region without those signals.\n */\nexport function shouldDropAsGlobalTableReplicationRecord(\n record: DynamoDbStreamKinesisRecord,\n archiveLambdaRegion: string,\n): boolean {\n const image = primaryImageForReplicationCheck(record);\n const updateRegion = getDynamoDbStringAttr(image, AWS_REP_UPDATE_REGION);\n if (\n updateRegion &&\n archiveLambdaRegion &&\n updateRegion !== archiveLambdaRegion\n ) {\n return true;\n }\n\n return isDynamoDbReplicationUserIdentity(record.userIdentity);\n}\n\nfunction isDynamoDbReplicationUserIdentity(userIdentity: unknown): boolean {\n if (!userIdentity || typeof userIdentity !== \"object\") {\n return false;\n }\n const ui = userIdentity as Record<string, unknown>;\n const principalRaw = ui.principalId ?? ui.PrincipalId;\n const typeRaw = ui.type ?? ui.Type;\n const principal =\n typeof principalRaw === \"string\" ? principalRaw.toLowerCase() : \"\";\n const type = typeof typeRaw === \"string\" ? typeRaw.toLowerCase() : \"\";\n\n if (type === \"service\" && principal === \"dynamodb.amazonaws.com\") {\n return false;\n }\n\n const replicationMarkers = [\n \"awsservicerolefordynamodbreplication\",\n \"replication.dynamodb.amazonaws.com\",\n ];\n return replicationMarkers.some((m) => principal.includes(m));\n}\n\nexport function parseCurrentResourceKeys(record: DynamoDbStreamKinesisRecord): {\n tenantId: string;\n workspaceId: string;\n resourceType: string;\n resourceId: string;\n version: string;\n} | null {\n const keys = record.dynamodb?.Keys;\n if (!keys) {\n return null;\n }\n const pkAttr = keys.PK?.S;\n const skAttr = keys.SK?.S;\n if (!pkAttr || skAttr !== CURRENT_SK) {\n return null;\n }\n const m = PK_PATTERN.exec(pkAttr);\n if (!m?.groups) {\n return null;\n }\n const { tenantId, workspaceId, resourceType, resourceId } = m.groups;\n const image =\n record.eventName === \"REMOVE\"\n ? record.dynamodb?.OldImage\n : record.dynamodb?.NewImage;\n if (!image) {\n return null;\n }\n const plain = dynamodbImageToPlain(image as Record<string, AttributeValue>);\n const version = typeof plain.vid === \"string\" ? plain.vid : null;\n if (!version) {\n return null;\n }\n return { tenantId, workspaceId, resourceType, resourceId, version };\n}\n\nfunction partitionToken(value: string): string {\n if (!value || value.trim() === \"\") {\n return \"-\";\n }\n return value.replace(/[/\\\\]/g, \"_\");\n}\n\nfunction buildArchivePayload(\n record: DynamoDbStreamKinesisRecord,\n keys: ReturnType<typeof parseCurrentResourceKeys>,\n): Record<string, unknown> {\n const newImage = record.dynamodb?.NewImage;\n const oldImage = record.dynamodb?.OldImage;\n const resourceImage = record.eventName === \"REMOVE\" ? oldImage : newImage;\n const resourcePlain = resourceImage\n ? dynamodbImageToPlain(resourceImage as Record<string, AttributeValue>)\n : {};\n\n if (typeof resourcePlain.resource === \"string\") {\n try {\n resourcePlain.resource = JSON.parse(resourcePlain.resource) as unknown;\n } catch {\n /* keep raw string if not valid JSON */\n }\n }\n\n return {\n eventName: record.eventName,\n archivedAt: new Date().toISOString(),\n tenantId: keys!.tenantId,\n workspaceId: keys!.workspaceId,\n resourceType: keys!.resourceType,\n resourceId: keys!.resourceId,\n version: keys!.version,\n resource: resourcePlain,\n };\n}\n\nexport function handler(\n event: FirehoseTransformationEvent,\n): Promise<FirehoseTransformationResult> {\n const records: FirehoseTransformationResultRecord[] = [];\n const archiveLambdaRegion = process.env.AWS_REGION ?? \"\";\n\n for (const rec of event.records) {\n try {\n const payload = Buffer.from(rec.data, \"base64\").toString(\"utf8\");\n const change = JSON.parse(payload) as DynamoDbStreamKinesisRecord;\n\n if (\n shouldDropAsGlobalTableReplicationRecord(change, archiveLambdaRegion)\n ) {\n records.push({\n recordId: rec.recordId,\n result: \"Dropped\",\n data: rec.data,\n });\n continue;\n }\n\n const keys = parseCurrentResourceKeys(change);\n\n if (!keys) {\n records.push({\n recordId: rec.recordId,\n result: \"Dropped\",\n data: rec.data,\n });\n continue;\n }\n\n const archive = buildArchivePayload(change, keys);\n const out = Buffer.from(`${JSON.stringify(archive)}\\n`).toString(\n \"base64\",\n );\n\n records.push({\n recordId: rec.recordId,\n result: \"Ok\",\n data: out,\n metadata: {\n partitionKeys: {\n tenantId: partitionToken(keys.tenantId),\n workspaceId: partitionToken(keys.workspaceId),\n resourceType: partitionToken(keys.resourceType),\n resourceId: partitionToken(keys.resourceId),\n version: partitionToken(keys.version),\n },\n },\n });\n } catch {\n records.push({\n recordId: rec.recordId,\n result: \"ProcessingFailed\",\n data: rec.data,\n });\n }\n }\n\n return Promise.resolve({ records });\n}\n"],"mappings":";;;AAOA,SAAS,kBAAkB,IAA6B;AACtD,MAAI,GAAG,MAAM,QAAW;AACtB,WAAO,GAAG;AAAA,EACZ;AACA,MAAI,GAAG,MAAM,QAAW;AACtB,WAAO,GAAG,EAAE,SAAS,GAAG,IACpB,OAAO,WAAW,GAAG,CAAC,IACtB,OAAO,SAAS,GAAG,GAAG,EAAE;AAAA,EAC9B;AACA,MAAI,GAAG,SAAS,QAAW;AACzB,WAAO,GAAG;AAAA,EACZ;AACA,MAAI,GAAG,SAAS,QAAW;AACzB,WAAO;AAAA,EACT;AACA,MAAI,GAAG,MAAM,QAAW;AACtB,WAAO,qBAAqB,GAAG,CAAC;AAAA,EAClC;AACA,MAAI,GAAG,MAAM,QAAW;AACtB,WAAO,GAAG,EAAE,IAAI,CAAC,MAAM,kBAAkB,CAAC,CAAC;AAAA,EAC7C;AACA,MAAI,GAAG,OAAO,QAAW;AACvB,WAAO,GAAG;AAAA,EACZ;AACA,MAAI,GAAG,OAAO,QAAW;AACvB,WAAO,GAAG,GAAG;AAAA,MAAI,CAAC,MAChB,EAAE,SAAS,GAAG,IAAI,OAAO,WAAW,CAAC,IAAI,OAAO,SAAS,GAAG,EAAE;AAAA,IAChE;AAAA,EACF;AACA,SAAO;AACT;AAEA,SAAS,qBACP,OACyB;AACzB,QAAM,MAA+B,CAAC;AACtC,aAAW,CAAC,GAAG,CAAC,KAAK,OAAO,QAAQ,KAAK,GAAG;AAC1C,QAAI,CAAC,IAAI,kBAAkB,CAAC;AAAA,EAC9B;AACA,SAAO;AACT;AASA,IAAM,aAAa;AACnB,IAAM,aACJ;AAaF,IAAM,wBAAwB;AAE9B,SAAS,sBACP,OACA,MACoB;AACpB,MAAI,CAAC,OAAO;AACV,WAAO;AAAA,EACT;AACA,QAAM,KAAK,MAAM,IAAI;AACrB,MAAI,OAAO,IAAI,MAAM,YAAY,GAAG,EAAE,KAAK,MAAM,IAAI;AACnD,WAAO,GAAG,EAAE,KAAK;AAAA,EACnB;AACA,SAAO;AACT;AAEA,SAAS,gCACP,QAC4C;AAC5C,MAAI,OAAO,cAAc,UAAU;AACjC,WAAO,OAAO,UAAU;AAAA,EAC1B;AACA,SAAO,OAAO,UAAU;AAC1B;AAqBO,SAAS,yCACd,QACA,qBACS;AACT,QAAM,QAAQ,gCAAgC,MAAM;AACpD,QAAM,eAAe,sBAAsB,OAAO,qBAAqB;AACvE,MACE,gBACA,uBACA,iBAAiB,qBACjB;AACA,WAAO;AAAA,EACT;AAEA,SAAO,kCAAkC,OAAO,YAAY;AAC9D;AAEA,SAAS,kCAAkC,cAAgC;AACzE,MAAI,CAAC,gBAAgB,OAAO,iBAAiB,UAAU;AACrD,WAAO;AAAA,EACT;AACA,QAAM,KAAK;AACX,QAAM,eAAe,GAAG,eAAe,GAAG;AAC1C,QAAM,UAAU,GAAG,QAAQ,GAAG;AAC9B,QAAM,YACJ,OAAO,iBAAiB,WAAW,aAAa,YAAY,IAAI;AAClE,QAAM,OAAO,OAAO,YAAY,WAAW,QAAQ,YAAY,IAAI;AAEnE,MAAI,SAAS,aAAa,cAAc,0BAA0B;AAChE,WAAO;AAAA,EACT;AAEA,QAAM,qBAAqB;AAAA,IACzB;AAAA,IACA;AAAA,EACF;AACA,SAAO,mBAAmB,KAAK,CAAC,MAAM,UAAU,SAAS,CAAC,CAAC;AAC7D;AAEO,SAAS,yBAAyB,QAMhC;AACP,QAAM,OAAO,OAAO,UAAU;AAC9B,MAAI,CAAC,MAAM;AACT,WAAO;AAAA,EACT;AACA,QAAM,SAAS,KAAK,IAAI;AACxB,QAAM,SAAS,KAAK,IAAI;AACxB,MAAI,CAAC,UAAU,WAAW,YAAY;AACpC,WAAO;AAAA,EACT;AACA,QAAM,IAAI,WAAW,KAAK,MAAM;AAChC,MAAI,CAAC,GAAG,QAAQ;AACd,WAAO;AAAA,EACT;AACA,QAAM,EAAE,UAAU,aAAa,cAAc,WAAW,IAAI,EAAE;AAC9D,QAAM,QACJ,OAAO,cAAc,WACjB,OAAO,UAAU,WACjB,OAAO,UAAU;AACvB,MAAI,CAAC,OAAO;AACV,WAAO;AAAA,EACT;AACA,QAAM,QAAQ,qBAAqB,KAAuC;AAC1E,QAAM,UAAU,OAAO,MAAM,QAAQ,WAAW,MAAM,MAAM;AAC5D,MAAI,CAAC,SAAS;AACZ,WAAO;AAAA,EACT;AACA,SAAO,EAAE,UAAU,aAAa,cAAc,YAAY,QAAQ;AACpE;AAEA,SAAS,eAAe,OAAuB;AAC7C,MAAI,CAAC,SAAS,MAAM,KAAK,MAAM,IAAI;AACjC,WAAO;AAAA,EACT;AACA,SAAO,MAAM,QAAQ,UAAU,GAAG;AACpC;AAEA,SAAS,oBACP,QACA,MACyB;AACzB,QAAM,WAAW,OAAO,UAAU;AAClC,QAAM,WAAW,OAAO,UAAU;AAClC,QAAM,gBAAgB,OAAO,cAAc,WAAW,WAAW;AACjE,QAAM,gBAAgB,gBAClB,qBAAqB,aAA+C,IACpE,CAAC;AAEL,MAAI,OAAO,cAAc,aAAa,UAAU;AAC9C,QAAI;AACF,oBAAc,WAAW,KAAK,MAAM,cAAc,QAAQ;AAAA,IAC5D,QAAQ;AAAA,IAER;AAAA,EACF;AAEA,SAAO;AAAA,IACL,WAAW,OAAO;AAAA,IAClB,aAAY,oBAAI,KAAK,GAAE,YAAY;AAAA,IACnC,UAAU,KAAM;AAAA,IAChB,aAAa,KAAM;AAAA,IACnB,cAAc,KAAM;AAAA,IACpB,YAAY,KAAM;AAAA,IAClB,SAAS,KAAM;AAAA,IACf,UAAU;AAAA,EACZ;AACF;AAEO,SAAS,QACd,OACuC;AACvC,QAAM,UAAgD,CAAC;AACvD,QAAM,sBAAsB,QAAQ,IAAI,cAAc;AAEtD,aAAW,OAAO,MAAM,SAAS;AAC/B,QAAI;AACF,YAAM,UAAU,OAAO,KAAK,IAAI,MAAM,QAAQ,EAAE,SAAS,MAAM;AAC/D,YAAM,SAAS,KAAK,MAAM,OAAO;AAEjC,UACE,yCAAyC,QAAQ,mBAAmB,GACpE;AACA,gBAAQ,KAAK;AAAA,UACX,UAAU,IAAI;AAAA,UACd,QAAQ;AAAA,UACR,MAAM,IAAI;AAAA,QACZ,CAAC;AACD;AAAA,MACF;AAEA,YAAM,OAAO,yBAAyB,MAAM;AAE5C,UAAI,CAAC,MAAM;AACT,gBAAQ,KAAK;AAAA,UACX,UAAU,IAAI;AAAA,UACd,QAAQ;AAAA,UACR,MAAM,IAAI;AAAA,QACZ,CAAC;AACD;AAAA,MACF;AAEA,YAAM,UAAU,oBAAoB,QAAQ,IAAI;AAChD,YAAM,MAAM,OAAO,KAAK,GAAG,KAAK,UAAU,OAAO,CAAC;AAAA,CAAI,EAAE;AAAA,QACtD;AAAA,MACF;AAEA,cAAQ,KAAK;AAAA,QACX,UAAU,IAAI;AAAA,QACd,QAAQ;AAAA,QACR,MAAM;AAAA,QACN,UAAU;AAAA,UACR,eAAe;AAAA,YACb,UAAU,eAAe,KAAK,QAAQ;AAAA,YACtC,aAAa,eAAe,KAAK,WAAW;AAAA,YAC5C,cAAc,eAAe,KAAK,YAAY;AAAA,YAC9C,YAAY,eAAe,KAAK,UAAU;AAAA,YAC1C,SAAS,eAAe,KAAK,OAAO;AAAA,UACtC;AAAA,QACF;AAAA,MACF,CAAC;AAAA,IACH,QAAQ;AACN,cAAQ,KAAK;AAAA,QACX,UAAU,IAAI;AAAA,QACd,QAAQ;AAAA,QACR,MAAM,IAAI;AAAA,MACZ,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO,QAAQ,QAAQ,EAAE,QAAQ,CAAC;AACpC;","names":[]}
|
package/lib/index.d.mts
CHANGED
|
@@ -7,12 +7,15 @@ import { GraphqlApi, IGraphqlApi, GraphqlApiProps } from 'aws-cdk-lib/aws-appsyn
|
|
|
7
7
|
import { UserPool, UserPoolProps, UserPoolClient, UserPoolClientProps, UserPoolDomain, UserPoolDomainProps, IUserPool, IUserPoolClient, IUserPoolDomain } from 'aws-cdk-lib/aws-cognito';
|
|
8
8
|
import { Key, KeyProps, IKey } from 'aws-cdk-lib/aws-kms';
|
|
9
9
|
import { NodejsFunction } from 'aws-cdk-lib/aws-lambda-nodejs';
|
|
10
|
+
import * as kinesis from 'aws-cdk-lib/aws-kinesis';
|
|
11
|
+
import * as kinesisfirehose from 'aws-cdk-lib/aws-kinesisfirehose';
|
|
12
|
+
import * as s3 from 'aws-cdk-lib/aws-s3';
|
|
13
|
+
import { IBucket, BucketProps } from 'aws-cdk-lib/aws-s3';
|
|
10
14
|
import { Table, TableProps, ITable } from 'aws-cdk-lib/aws-dynamodb';
|
|
11
15
|
import { EventBus, EventBusProps, IEventBus } from 'aws-cdk-lib/aws-events';
|
|
12
16
|
import { HostedZone, HostedZoneProps, IHostedZone, HostedZoneAttributes } from 'aws-cdk-lib/aws-route53';
|
|
13
17
|
import { StringParameterProps, StringParameter } from 'aws-cdk-lib/aws-ssm';
|
|
14
18
|
import { Distribution, DistributionProps } from 'aws-cdk-lib/aws-cloudfront';
|
|
15
|
-
import { IBucket, BucketProps } from 'aws-cdk-lib/aws-s3';
|
|
16
19
|
import { IFunction } from 'aws-cdk-lib/aws-lambda';
|
|
17
20
|
|
|
18
21
|
/**
|
|
@@ -433,6 +436,31 @@ declare class PreTokenGenerationLambda extends Construct {
|
|
|
433
436
|
constructor(scope: Construct);
|
|
434
437
|
}
|
|
435
438
|
|
|
439
|
+
interface DataStoreHistoricalArchiveProps {
|
|
440
|
+
/**
|
|
441
|
+
* Kinesis stream that receives DynamoDB item-level changes (table Kinesis destination).
|
|
442
|
+
*/
|
|
443
|
+
readonly kinesisStream: kinesis.IStream;
|
|
444
|
+
/**
|
|
445
|
+
* Removal policy for the archive bucket and related resources.
|
|
446
|
+
*/
|
|
447
|
+
readonly removalPolicy: RemovalPolicy;
|
|
448
|
+
/**
|
|
449
|
+
* Short hash for unique stream/bucket naming within the deployment.
|
|
450
|
+
*/
|
|
451
|
+
readonly stackHash: string;
|
|
452
|
+
}
|
|
453
|
+
/**
|
|
454
|
+
* DynamoDB change stream → Kinesis → Firehose → S3 with a transform Lambda for
|
|
455
|
+
* scope filtering and dynamic partitioning (ADR 2026-03-11-02).
|
|
456
|
+
*/
|
|
457
|
+
declare class DataStoreHistoricalArchive extends Construct {
|
|
458
|
+
readonly archiveBucket: s3.Bucket;
|
|
459
|
+
readonly deliveryStream: kinesisfirehose.IDeliveryStream;
|
|
460
|
+
readonly transformFunction: NodejsFunction;
|
|
461
|
+
constructor(scope: Construct, id: string, props: DataStoreHistoricalArchiveProps);
|
|
462
|
+
}
|
|
463
|
+
|
|
436
464
|
/**
|
|
437
465
|
* @see sites/www-docs/content/packages/@openhi/constructs/components/dynamodb/dynamo-db-data-store.md
|
|
438
466
|
*/
|
|
@@ -456,6 +484,9 @@ interface DynamoDbDataStoreProps extends Omit<TableProps, "tableName" | "removal
|
|
|
456
484
|
*
|
|
457
485
|
* Primary key: PK (String), SK (String).
|
|
458
486
|
* GSIs: GSI1 (reverse reference), GSI2 (identifier lookup), GSI3 (facility ops), GSI4 (resource type list).
|
|
487
|
+
*
|
|
488
|
+
* For historical archive to S3, pass `kinesisStream` and `stream` (e.g.
|
|
489
|
+
* `StreamViewType.NEW_AND_OLD_IMAGES`) on the table props per ADR 2026-03-11-02.
|
|
459
490
|
*/
|
|
460
491
|
declare class DynamoDbDataStore extends Table {
|
|
461
492
|
constructor(scope: Construct, id: string, props?: DynamoDbDataStoreProps);
|
|
@@ -896,6 +927,14 @@ declare class OpenHiDataService extends OpenHiService {
|
|
|
896
927
|
* from other stacks to obtain an ITable reference by name.
|
|
897
928
|
*/
|
|
898
929
|
readonly dataStore: ITable;
|
|
930
|
+
/**
|
|
931
|
+
* Kinesis stream receiving DynamoDB item-level changes for the data store table.
|
|
932
|
+
*/
|
|
933
|
+
readonly dataStoreChangeStream: kinesis.IStream;
|
|
934
|
+
/**
|
|
935
|
+
* S3 historical archive pipeline (Kinesis → Firehose → S3) per ADR 2026-03-11-02.
|
|
936
|
+
*/
|
|
937
|
+
readonly dataStoreHistoricalArchive: DataStoreHistoricalArchive;
|
|
899
938
|
constructor(ohEnv: OpenHiEnvironment, props?: OpenHiDataServiceProps);
|
|
900
939
|
/**
|
|
901
940
|
* Creates the data event bus.
|
|
@@ -936,4 +975,4 @@ declare class OpenHiGraphqlService extends OpenHiService {
|
|
|
936
975
|
protected createRootGraphqlApi(): RootGraphqlApi;
|
|
937
976
|
}
|
|
938
977
|
|
|
939
|
-
export { type BuildParameterNameProps, ChildHostedZone, type ChildHostedZoneProps, CognitoUserPool, CognitoUserPoolClient, CognitoUserPoolDomain, CognitoUserPoolKmsKey, DataEventBus, DiscoverableStringParameter, type DiscoverableStringParameterProps, DynamoDbDataStore, type DynamoDbDataStoreProps, OpenHiApp, type OpenHiAppProps, OpenHiAuthService, type OpenHiAuthServiceProps, OpenHiDataService, type OpenHiDataServiceProps, OpenHiEnvironment, type OpenHiEnvironmentProps, OpenHiGlobalService, type OpenHiGlobalServiceProps, OpenHiGraphqlService, type OpenHiGraphqlServiceProps, OpenHiRestApiService, type OpenHiRestApiServiceProps, OpenHiService, type OpenHiServiceProps, type OpenHiServiceType, OpenHiStage, type OpenHiStageProps, OpsEventBus, PreTokenGenerationLambda, REST_API_BASE_URL_SSM_NAME, RootGraphqlApi, type RootGraphqlApiProps, RootHostedZone, RootHttpApi, type RootHttpApiProps, RootWildcardCertificate, STATIC_HOSTING_SERVICE_TYPE, StaticHosting, type StaticHostingProps, getDynamoDbDataStoreTableName };
|
|
978
|
+
export { type BuildParameterNameProps, ChildHostedZone, type ChildHostedZoneProps, CognitoUserPool, CognitoUserPoolClient, CognitoUserPoolDomain, CognitoUserPoolKmsKey, DataEventBus, DataStoreHistoricalArchive, type DataStoreHistoricalArchiveProps, DiscoverableStringParameter, type DiscoverableStringParameterProps, DynamoDbDataStore, type DynamoDbDataStoreProps, OpenHiApp, type OpenHiAppProps, OpenHiAuthService, type OpenHiAuthServiceProps, OpenHiDataService, type OpenHiDataServiceProps, OpenHiEnvironment, type OpenHiEnvironmentProps, OpenHiGlobalService, type OpenHiGlobalServiceProps, OpenHiGraphqlService, type OpenHiGraphqlServiceProps, OpenHiRestApiService, type OpenHiRestApiServiceProps, OpenHiService, type OpenHiServiceProps, type OpenHiServiceType, OpenHiStage, type OpenHiStageProps, OpsEventBus, PreTokenGenerationLambda, REST_API_BASE_URL_SSM_NAME, RootGraphqlApi, type RootGraphqlApiProps, RootHostedZone, RootHttpApi, type RootHttpApiProps, RootWildcardCertificate, STATIC_HOSTING_SERVICE_TYPE, StaticHosting, type StaticHostingProps, getDynamoDbDataStoreTableName };
|