serverless-plugin-module-registry 1.0.12 → 1.0.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,371 @@
1
+ import { DynamoDBStreamEvent, DynamoDBRecord } from 'aws-lambda'
2
+ import { SQSClient, SendMessageCommand } from '@aws-sdk/client-sqs'
3
+ import { EventBridgeClient, PutEventsCommand } from '@aws-sdk/client-eventbridge'
4
+ import { createLogger } from './shared/logger'
5
+ import { RoleUpdateMessage, FeatureChangeEvent } from './shared/types'
6
+
7
+ const logger = createLogger('stream-processor')
8
+ const sqsClient = new SQSClient({})
9
+ const eventBridgeClient = new EventBridgeClient({})
10
+
11
+ /**
12
+ * Parse SK to extract module name and feature ID
13
+ * SK Format: MODULE#{moduleName}#FEATURE#{featureId}
14
+ */
15
+ function parseModuleAndFeature(sk: string): { moduleName: string; featureId: string } | null {
16
+ const parts = sk.split('#')
17
+ if (parts.length !== 4 || parts[0] !== 'MODULE' || parts[2] !== 'FEATURE') {
18
+ return null
19
+ }
20
+ return {
21
+ moduleName: parts[1],
22
+ featureId: parts[3],
23
+ }
24
+ }
25
+
26
+ /**
27
+ * Extract role name from PK
28
+ * PK Format: ROLE#{roleName}
29
+ */
30
+ function extractRoleName(pk: string): string {
31
+ return pk.replace('ROLE#', '')
32
+ }
33
+
34
+ /**
35
+ * Filter DynamoDB Stream records for ROLE# items
36
+ */
37
+ function filterRoleRecords(records: DynamoDBRecord[]): DynamoDBRecord[] {
38
+ return records.filter(record => {
39
+ const pk = record.dynamodb?.Keys?.PK?.S || record.dynamodb?.Keys?.pk?.S
40
+ return pk?.startsWith('ROLE#') ?? false
41
+ })
42
+ }
43
+
44
+ /**
45
+ * Filter DynamoDB Stream records for FEATURE# items
46
+ * PK Format: MODULE#{moduleName}
47
+ * SK Format: Must contain #FEATURE# pattern
48
+ */
49
+ function filterFeatureRecords(records: DynamoDBRecord[]): DynamoDBRecord[] {
50
+ return records.filter(record => {
51
+ const pk = record.dynamodb?.Keys?.PK?.S || record.dynamodb?.Keys?.pk?.S
52
+ const sk = record.dynamodb?.Keys?.SK?.S || record.dynamodb?.Keys?.sk?.S
53
+
54
+ // PK must start with MODULE# (not ROLE#)
55
+ const isModulePK = pk?.startsWith('MODULE#') ?? false
56
+ // SK must contain #FEATURE# pattern
57
+ const isFeatureSK = sk?.includes('#FEATURE#') ?? false
58
+
59
+ return isModulePK && isFeatureSK
60
+ })
61
+ }
62
+
63
+ /**
64
+ * Parse feature event from DynamoDB record
65
+ * PK Format: MODULE#{moduleName}
66
+ * SK Format: MODULE#*#FEATURE#{featureId} or similar patterns containing #FEATURE#
67
+ * Returns null for invalid patterns
68
+ */
69
+ function parseFeatureEvent(record: DynamoDBRecord): FeatureChangeEvent | null {
70
+ try {
71
+ // Extract PK (moduleName)
72
+ const pk = record.dynamodb?.Keys?.PK?.S || record.dynamodb?.Keys?.pk?.S
73
+ if (!pk || !pk.startsWith('MODULE#')) {
74
+ logger.warn('Invalid PK format for feature record', { eventID: record.eventID })
75
+ return null
76
+ }
77
+
78
+ const moduleName = pk.replace('MODULE#', '')
79
+ if (!moduleName) {
80
+ logger.warn('Empty moduleName in feature record', { eventID: record.eventID })
81
+ return null
82
+ }
83
+
84
+ // Extract SK from NewImage (for INSERT/MODIFY) or OldImage (for REMOVE)
85
+ const image = record.dynamodb?.NewImage || record.dynamodb?.OldImage
86
+ const sk = image?.SK?.S || image?.sk?.S
87
+
88
+ if (!sk || !sk.includes('#FEATURE#')) {
89
+ logger.warn('Invalid SK format for feature record', { eventID: record.eventID, sk })
90
+ return null
91
+ }
92
+
93
+ // Extract featureId from SK
94
+ // SK patterns can vary: MODULE#*#FEATURE#{featureId} or other patterns containing #FEATURE#
95
+ const featureMatch = sk.match(/#FEATURE#([^#]+)/)
96
+ if (!featureMatch || !featureMatch[1]) {
97
+ logger.warn('Failed to extract featureId from SK', { eventID: record.eventID, sk })
98
+ return null
99
+ }
100
+
101
+ const featureId = featureMatch[1]
102
+
103
+ // Extract eventType
104
+ const eventType = record.eventName as 'INSERT' | 'MODIFY' | 'REMOVE'
105
+
106
+ return {
107
+ moduleName,
108
+ featureId,
109
+ eventType,
110
+ timestamp: new Date().toISOString(),
111
+ }
112
+ } catch (error) {
113
+ logger.error('Error parsing feature event', error as Error, { eventID: record.eventID })
114
+ return null
115
+ }
116
+ }
117
+
118
+ /**
119
+ * Deduplicate messages by roleName + moduleName
120
+ * Keep only the latest event for each combination
121
+ */
122
+ function deduplicateMessages(messages: RoleUpdateMessage[]): RoleUpdateMessage[] {
123
+ const messageMap = new Map<string, RoleUpdateMessage>()
124
+
125
+ for (const message of messages) {
126
+ const key = `${message.roleName}:${message.moduleName}`
127
+ const existing = messageMap.get(key)
128
+
129
+ // Keep the latest message (comparing timestamps)
130
+ if (!existing || new Date(message.timestamp) > new Date(existing.timestamp)) {
131
+ messageMap.set(key, message)
132
+ }
133
+ }
134
+
135
+ return Array.from(messageMap.values())
136
+ }
137
+
138
+ /**
139
+ * Publish feature change events to EventBridge
140
+ * Wrapped in try/catch to isolate failures and prevent breaking ROLE# processing
141
+ */
142
+ async function publishFeatureChangeEvents(
143
+ events: FeatureChangeEvent[],
144
+ eventBusName: string
145
+ ): Promise<number> {
146
+ if (events.length === 0) {
147
+ return 0
148
+ }
149
+
150
+ let publishedCount = 0
151
+
152
+ try {
153
+ // Batch publish events to EventBridge (max 10 entries per PutEvents call)
154
+ const batchSize = 10
155
+ for (let i = 0; i < events.length; i += batchSize) {
156
+ const batch = events.slice(i, i + batchSize)
157
+
158
+ const putEventsCommand = new PutEventsCommand({
159
+ Entries: batch.map(event => ({
160
+ Source: 'module-registry',
161
+ DetailType: 'FeatureChange',
162
+ Detail: JSON.stringify({
163
+ moduleName: event.moduleName,
164
+ featureId: event.featureId,
165
+ eventType: event.eventType,
166
+ timestamp: event.timestamp,
167
+ }),
168
+ EventBusName: eventBusName,
169
+ })),
170
+ })
171
+
172
+ await eventBridgeClient.send(putEventsCommand)
173
+ publishedCount += batch.length
174
+
175
+ for (const event of batch) {
176
+ logger.info('Published feature change event', {
177
+ moduleName: event.moduleName,
178
+ featureId: event.featureId,
179
+ eventType: event.eventType,
180
+ })
181
+ }
182
+ }
183
+ } catch (error) {
184
+ // Log error but don't throw - feature event publishing failures should not break ROLE# processing
185
+ logger.error('Failed to publish feature change events to EventBridge', error as Error, {
186
+ eventCount: events.length,
187
+ publishedCount,
188
+ })
189
+ }
190
+
191
+ return publishedCount
192
+ }
193
+
194
+ /**
195
+ * Main Lambda handler for processing DynamoDB Stream events
196
+ */
197
+ export async function handler(event: DynamoDBStreamEvent): Promise<void> {
198
+ const startTime = Date.now()
199
+ const QUEUE_URL = process.env.QUEUE_URL
200
+
201
+ logger.info('Processing DynamoDB Stream event', {
202
+ recordCount: event.Records.length,
203
+ })
204
+
205
+ if (!QUEUE_URL) {
206
+ logger.error('QUEUE_URL environment variable not set')
207
+ throw new Error('QUEUE_URL environment variable not set')
208
+ }
209
+
210
+ // Filter for ROLE# records only
211
+ const roleRecords = filterRoleRecords(event.Records)
212
+
213
+ logger.info('Filtered ROLE# records', {
214
+ originalCount: event.Records.length,
215
+ filteredCount: roleRecords.length,
216
+ })
217
+
218
+ // Build messages from filtered records
219
+ const messages: RoleUpdateMessage[] = []
220
+
221
+ for (const record of roleRecords) {
222
+ try {
223
+ // Extract PK (role name)
224
+ const pk = record.dynamodb?.Keys?.PK?.S || record.dynamodb?.Keys?.pk?.S
225
+ if (!pk) {
226
+ logger.warn('Record missing PK', { eventID: record.eventID })
227
+ continue
228
+ }
229
+
230
+ const roleName = extractRoleName(pk)
231
+
232
+ // Extract SK from NewImage (for INSERT/MODIFY) or OldImage (for REMOVE)
233
+ const image = record.dynamodb?.NewImage || record.dynamodb?.OldImage
234
+ const sk = image?.SK?.S || image?.sk?.S
235
+
236
+ if (!sk) {
237
+ logger.warn('Record missing SK', { eventID: record.eventID, roleName })
238
+ continue
239
+ }
240
+
241
+ // Parse module name from SK
242
+ const parsed = parseModuleAndFeature(sk)
243
+ if (!parsed) {
244
+ logger.warn('Failed to parse SK', { eventID: record.eventID, sk })
245
+ continue
246
+ }
247
+
248
+ // Build message payload
249
+ const message: RoleUpdateMessage = {
250
+ roleName,
251
+ moduleName: parsed.moduleName,
252
+ eventType: record.eventName as 'INSERT' | 'MODIFY' | 'REMOVE',
253
+ timestamp: new Date().toISOString(),
254
+ }
255
+
256
+ messages.push(message)
257
+
258
+ logger.info('Parsed Stream record', {
259
+ eventID: record.eventID,
260
+ roleName,
261
+ moduleName: parsed.moduleName,
262
+ featureId: parsed.featureId,
263
+ eventType: record.eventName,
264
+ })
265
+ } catch (error) {
266
+ logger.error('Error processing record', error as Error, {
267
+ eventID: record.eventID,
268
+ })
269
+ // Continue processing other records
270
+ }
271
+ }
272
+
273
+ // Deduplicate messages
274
+ const originalCount = messages.length
275
+ const dedupedMessages = deduplicateMessages(messages)
276
+
277
+ logger.info('Deduplicated messages', {
278
+ originalCount,
279
+ deduplicatedCount: dedupedMessages.length,
280
+ })
281
+
282
+ // Publish messages to SQS
283
+ let sqsPublishedCount = 0
284
+ if (dedupedMessages.length > 0) {
285
+ for (const message of dedupedMessages) {
286
+ try {
287
+ await sqsClient.send(new SendMessageCommand({
288
+ QueueUrl: QUEUE_URL,
289
+ MessageBody: JSON.stringify(message),
290
+ MessageGroupId: message.roleName, // For FIFO queues
291
+ MessageDeduplicationId: `${message.roleName}-${message.moduleName}-${message.timestamp}`,
292
+ }))
293
+
294
+ sqsPublishedCount++
295
+
296
+ logger.info('Published SQS message', {
297
+ roleName: message.roleName,
298
+ moduleName: message.moduleName,
299
+ eventType: message.eventType,
300
+ })
301
+ } catch (error) {
302
+ logger.error('Failed to publish SQS message', error as Error, {
303
+ roleName: message.roleName,
304
+ moduleName: message.moduleName,
305
+ })
306
+ // Continue trying to publish other messages
307
+ }
308
+ }
309
+ }
310
+
311
+ // Process FEATURE# records (separate from ROLE# processing)
312
+ // Wrapped in try/catch to isolate failures and prevent breaking ROLE# processing
313
+ let featurePublishedCount = 0
314
+ try {
315
+ const EVENT_BUS_NAME = process.env.EVENT_BUS_NAME
316
+
317
+ if (!EVENT_BUS_NAME) {
318
+ logger.warn('EVENT_BUS_NAME environment variable not set, skipping feature event processing')
319
+ } else {
320
+ const featureStartTime = Date.now()
321
+
322
+ // Filter for FEATURE# records
323
+ const featureRecords = filterFeatureRecords(event.Records)
324
+
325
+ logger.info('Filtered FEATURE# records', {
326
+ originalCount: event.Records.length,
327
+ filteredCount: featureRecords.length,
328
+ })
329
+
330
+ if (featureRecords.length > 0) {
331
+ // Parse feature events
332
+ const featureEvents: FeatureChangeEvent[] = []
333
+ for (const record of featureRecords) {
334
+ const parsedEvent = parseFeatureEvent(record)
335
+ if (parsedEvent) {
336
+ featureEvents.push(parsedEvent)
337
+ }
338
+ }
339
+
340
+ logger.info('Parsed feature events', {
341
+ filteredCount: featureRecords.length,
342
+ parsedCount: featureEvents.length,
343
+ })
344
+
345
+ // Publish to EventBridge
346
+ if (featureEvents.length > 0) {
347
+ featurePublishedCount = await publishFeatureChangeEvents(featureEvents, EVENT_BUS_NAME)
348
+ }
349
+
350
+ const featureDuration = Date.now() - featureStartTime
351
+ logger.info('Completed feature event processing', {
352
+ filteredRecords: featureRecords.length,
353
+ publishedEvents: featurePublishedCount,
354
+ duration: featureDuration,
355
+ })
356
+ }
357
+ }
358
+ } catch (error) {
359
+ // Log error but don't throw - feature processing failures should not break ROLE# processing
360
+ logger.error('Error during feature event processing', error as Error)
361
+ }
362
+
363
+ const duration = Date.now() - startTime
364
+
365
+ logger.info('Completed Stream processing', {
366
+ processedRecords: event.Records.length,
367
+ roleMessages: sqsPublishedCount,
368
+ featureEvents: featurePublishedCount,
369
+ duration,
370
+ })
371
+ }
@@ -0,0 +1,36 @@
1
+ Resources:
2
+ # CloudWatch Alarm for DLQ Messages
3
+ DLQAlarm:
4
+ Type: AWS::CloudWatch::Alarm
5
+ Properties:
6
+ AlarmName: ${self:provider.stackName}-dlq-messages
7
+ AlarmDescription: Alert when messages appear in the DLQ
8
+ MetricName: ApproximateNumberOfMessagesVisible
9
+ Namespace: AWS/SQS
10
+ Statistic: Average
11
+ Period: 300
12
+ EvaluationPeriods: 1
13
+ Threshold: 1
14
+ ComparisonOperator: GreaterThanOrEqualToThreshold
15
+ Dimensions:
16
+ - Name: QueueName
17
+ Value: !GetAtt RoleUpdateDLQ.QueueName
18
+ TreatMissingData: notBreaching
19
+
20
+ # CloudWatch Alarm for Role Updater Lambda Errors
21
+ RoleUpdaterErrorAlarm:
22
+ Type: AWS::CloudWatch::Alarm
23
+ Properties:
24
+ AlarmName: ${self:provider.stackName}-role-updater-errors
25
+ AlarmDescription: Alert on Role Updater Lambda errors
26
+ MetricName: Errors
27
+ Namespace: AWS/Lambda
28
+ Statistic: Sum
29
+ Period: 300
30
+ EvaluationPeriods: 1
31
+ Threshold: 5
32
+ ComparisonOperator: GreaterThanThreshold
33
+ Dimensions:
34
+ - Name: FunctionName
35
+ Value: !Ref RoleUpdaterLambdaFunction
36
+ TreatMissingData: notBreaching
@@ -0,0 +1,106 @@
1
+ Resources:
2
+ # Custom CloudFormation Resource to enable DynamoDB Streams
3
+ EnableTableStreams:
4
+ Type: Custom::DynamoDBStreams
5
+ Properties:
6
+ ServiceToken: !GetAtt StreamEnablerLambda.Arn
7
+ TableName: ${param:tableName}
8
+ StreamViewType: NEW_AND_OLD_IMAGES
9
+
10
+ # Lambda function for the custom resource
11
+ StreamEnablerLambda:
12
+ Type: AWS::Lambda::Function
13
+ Properties:
14
+ FunctionName: ${self:provider.stackName}-stream-enabler
15
+ Handler: index.handler
16
+ Runtime: nodejs20.x
17
+ Role: !GetAtt StreamEnablerRole.Arn
18
+ Timeout: 60
19
+ Code:
20
+ ZipFile: |
21
+ const { DynamoDBClient, DescribeTableCommand, UpdateTableCommand } = require('@aws-sdk/client-dynamodb');
22
+ const https = require('https');
23
+ const url = require('url');
24
+
25
+ const client = new DynamoDBClient({});
26
+
27
+ async function sendResponse(event, context, status, data, physicalResourceId) {
28
+ const responseBody = JSON.stringify({
29
+ Status: status,
30
+ Reason: `See CloudWatch Log Stream: ${context.logStreamName}`,
31
+ PhysicalResourceId: physicalResourceId || context.logStreamName,
32
+ StackId: event.StackId,
33
+ RequestId: event.RequestId,
34
+ LogicalResourceId: event.LogicalResourceId,
35
+ Data: data
36
+ });
37
+
38
+ const parsedUrl = url.parse(event.ResponseURL);
39
+ const options = {
40
+ hostname: parsedUrl.hostname,
41
+ port: 443,
42
+ path: parsedUrl.path,
43
+ method: 'PUT',
44
+ headers: {
45
+ 'Content-Type': '',
46
+ 'Content-Length': responseBody.length
47
+ }
48
+ };
49
+
50
+ return new Promise((resolve, reject) => {
51
+ const req = https.request(options, (res) => {
52
+ resolve();
53
+ });
54
+ req.on('error', reject);
55
+ req.write(responseBody);
56
+ req.end();
57
+ });
58
+ }
59
+
60
+ exports.handler = async (event, context) => {
61
+ console.log('Event:', JSON.stringify(event, null, 2));
62
+
63
+ try {
64
+ const { TableName, StreamViewType } = event.ResourceProperties;
65
+ const requestType = event.RequestType;
66
+
67
+ if (requestType === 'Delete') {
68
+ // On delete, we optionally disable streams (or leave enabled for safety)
69
+ // For safety, we'll leave the stream enabled
70
+ await sendResponse(event, context, 'SUCCESS', {}, TableName);
71
+ return;
72
+ }
73
+
74
+ // Check if streams are already enabled (idempotent)
75
+ const describeResult = await client.send(new DescribeTableCommand({ TableName }));
76
+ const streamEnabled = describeResult.Table?.StreamSpecification?.StreamEnabled;
77
+ const currentStreamArn = describeResult.Table?.LatestStreamArn;
78
+
79
+ if (!streamEnabled) {
80
+ console.log('Enabling streams on table:', TableName);
81
+ // Enable streams
82
+ await client.send(new UpdateTableCommand({
83
+ TableName,
84
+ StreamSpecification: {
85
+ StreamEnabled: true,
86
+ StreamViewType
87
+ }
88
+ }));
89
+
90
+ // Wait a bit for the stream to be created
91
+ await new Promise(resolve => setTimeout(resolve, 2000));
92
+
93
+ // Get updated table info
94
+ const updatedTable = await client.send(new DescribeTableCommand({ TableName }));
95
+ const streamArn = updatedTable.Table?.LatestStreamArn;
96
+
97
+ await sendResponse(event, context, 'SUCCESS', { StreamArn: streamArn }, TableName);
98
+ } else {
99
+ console.log('Streams already enabled on table:', TableName);
100
+ await sendResponse(event, context, 'SUCCESS', { StreamArn: currentStreamArn }, TableName);
101
+ }
102
+ } catch (error) {
103
+ console.error('Error:', error);
104
+ await sendResponse(event, context, 'FAILED', {}, event.PhysicalResourceId);
105
+ }
106
+ };
@@ -0,0 +1,74 @@
1
+ Resources:
2
+ # DynamoDB Table for Module Registry
3
+ ModuleRegistryTable:
4
+ Type: AWS::DynamoDB::Table
5
+ DeletionPolicy: ${param:deletionPolicy}
6
+ UpdateReplacePolicy: ${param:deletionPolicy}
7
+ Properties:
8
+ TableName: ${param:tableName}
9
+ BillingMode: PAY_PER_REQUEST
10
+ PointInTimeRecoverySpecification:
11
+ PointInTimeRecoveryEnabled: true
12
+ StreamSpecification:
13
+ StreamViewType: NEW_AND_OLD_IMAGES
14
+ AttributeDefinitions:
15
+ - AttributeName: pk
16
+ AttributeType: S
17
+ - AttributeName: sk
18
+ AttributeType: S
19
+ - AttributeName: gsi1pk
20
+ AttributeType: S
21
+ - AttributeName: gsi1sk
22
+ AttributeType: S
23
+ - AttributeName: gsi2pk
24
+ AttributeType: S
25
+ - AttributeName: gsi2sk
26
+ AttributeType: S
27
+ KeySchema:
28
+ - AttributeName: pk
29
+ KeyType: HASH
30
+ - AttributeName: sk
31
+ KeyType: RANGE
32
+ GlobalSecondaryIndexes:
33
+ - IndexName: GSI1
34
+ KeySchema:
35
+ - AttributeName: gsi1pk
36
+ KeyType: HASH
37
+ - AttributeName: gsi1sk
38
+ KeyType: RANGE
39
+ Projection:
40
+ ProjectionType: ALL
41
+ - IndexName: GSI2
42
+ KeySchema:
43
+ - AttributeName: gsi2pk
44
+ KeyType: HASH
45
+ - AttributeName: gsi2sk
46
+ KeyType: RANGE
47
+ Projection:
48
+ ProjectionType: ALL
49
+ Tags:
50
+ - Key: Module
51
+ Value: module-registry
52
+ - Key: Purpose
53
+ Value: StateStore
54
+ - Key: ManagedBy
55
+ Value: Serverless
56
+
57
+ Outputs:
58
+ ModuleRegistryTableName:
59
+ Description: Module Registry DynamoDB Table Name
60
+ Value: !Ref ModuleRegistryTable
61
+ Export:
62
+ Name: ${self:provider.stackName}-TableName
63
+
64
+ ModuleRegistryTableArn:
65
+ Description: Module Registry DynamoDB Table ARN
66
+ Value: !GetAtt ModuleRegistryTable.Arn
67
+ Export:
68
+ Name: ${self:provider.stackName}-TableArn
69
+
70
+ ModuleRegistryTableStreamArn:
71
+ Description: Module Registry DynamoDB Table Stream ARN
72
+ Value: !GetAtt ModuleRegistryTable.StreamArn
73
+ Export:
74
+ Name: ${self:provider.stackName}-TableStreamArn
@@ -0,0 +1,26 @@
1
+ Resources:
2
+ # EventBridge Bus for Module Registry Feature Events
3
+ ModuleRegistryEventBus:
4
+ Type: AWS::Events::EventBus
5
+ Properties:
6
+ Name: module-registry-events-${self:provider.stage}
7
+ Tags:
8
+ - Key: Module
9
+ Value: module-registry
10
+ - Key: Purpose
11
+ Value: EventBus
12
+ - Key: ManagedBy
13
+ Value: Serverless
14
+
15
+ Outputs:
16
+ EventBusArn:
17
+ Description: Module Registry EventBridge EventBus ARN
18
+ Value: !GetAtt ModuleRegistryEventBus.Arn
19
+ Export:
20
+ Name: ${self:provider.stackName}-EventBusArn
21
+
22
+ EventBusName:
23
+ Description: Module Registry EventBridge EventBus Name
24
+ Value: !Ref ModuleRegistryEventBus
25
+ Export:
26
+ Name: ${self:provider.stackName}-EventBusName