@friggframework/devtools 2.0.0--canary.461.2ca8c89.0 → 2.0.0--canary.461.4f3c330.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -268,17 +268,234 @@ class AuroraBuilder extends InfrastructureBuilder {
|
|
|
268
268
|
|
|
269
269
|
console.log(` ✅ Using discovered Aurora cluster: ${discoveredResources.auroraClusterEndpoint}`);
|
|
270
270
|
|
|
271
|
+
const dbConfig = appDefinition.database.postgres;
|
|
272
|
+
|
|
271
273
|
// Use discovered cluster details
|
|
272
274
|
result.environment.DATABASE_HOST = discoveredResources.auroraClusterEndpoint;
|
|
273
275
|
result.environment.DATABASE_PORT = String(discoveredResources.auroraPort || 5432);
|
|
274
276
|
|
|
275
|
-
if
|
|
277
|
+
// Check if we should auto-create credentials
|
|
278
|
+
if (dbConfig.autoCreateCredentials && !discoveredResources.databaseSecretArn) {
|
|
279
|
+
console.log(' Creating Secrets Manager secret and rotating Aurora password...');
|
|
280
|
+
|
|
281
|
+
// Create Secrets Manager secret with auto-generated password
|
|
282
|
+
result.resources.FriggDBSecret = {
|
|
283
|
+
Type: 'AWS::SecretsManager::Secret',
|
|
284
|
+
Properties: {
|
|
285
|
+
Name: '${self:service}-${self:provider.stage}-db-credentials',
|
|
286
|
+
Description: 'Aurora database credentials (auto-created for discovered cluster)',
|
|
287
|
+
GenerateSecretString: {
|
|
288
|
+
SecretStringTemplate: JSON.stringify({ username: dbConfig.username || 'postgres' }),
|
|
289
|
+
GenerateStringKey: 'password',
|
|
290
|
+
PasswordLength: 32,
|
|
291
|
+
ExcludeCharacters: '"@/\\',
|
|
292
|
+
},
|
|
293
|
+
Tags: [
|
|
294
|
+
{ Key: 'Name', Value: '${self:service}-${self:provider.stage}-db-secret' },
|
|
295
|
+
{ Key: 'ManagedBy', Value: 'Frigg' },
|
|
296
|
+
{ Key: 'Purpose', Value: 'DiscoveredClusterCredentials' },
|
|
297
|
+
],
|
|
298
|
+
},
|
|
299
|
+
};
|
|
300
|
+
|
|
301
|
+
// Get the cluster identifier from the endpoint
|
|
302
|
+
// Format: cluster-name.cluster-xyz.region.rds.amazonaws.com
|
|
303
|
+
const clusterIdentifier = discoveredResources.auroraClusterEndpoint.split('.')[0];
|
|
304
|
+
|
|
305
|
+
// Create custom resource to rotate the Aurora master password
|
|
306
|
+
// This uses a Lambda-backed CloudFormation custom resource
|
|
307
|
+
result.resources.FriggAuroraPasswordRotator = {
|
|
308
|
+
Type: 'Custom::AuroraPasswordRotator',
|
|
309
|
+
Properties: {
|
|
310
|
+
ServiceToken: { 'Fn::GetAtt': ['PasswordRotatorLambda', 'Arn'] },
|
|
311
|
+
ClusterIdentifier: clusterIdentifier,
|
|
312
|
+
SecretArn: { Ref: 'FriggDBSecret' },
|
|
313
|
+
Region: '${self:provider.region}',
|
|
314
|
+
},
|
|
315
|
+
DependsOn: ['FriggDBSecret', 'PasswordRotatorLambda'],
|
|
316
|
+
};
|
|
317
|
+
|
|
318
|
+
// Lambda function to rotate the password
|
|
319
|
+
result.resources.PasswordRotatorLambda = {
|
|
320
|
+
Type: 'AWS::Lambda::Function',
|
|
321
|
+
Properties: {
|
|
322
|
+
FunctionName: '${self:service}-${self:provider.stage}-password-rotator',
|
|
323
|
+
Runtime: 'nodejs22.x',
|
|
324
|
+
Handler: 'index.handler',
|
|
325
|
+
Role: { 'Fn::GetAtt': ['PasswordRotatorRole', 'Arn'] },
|
|
326
|
+
Timeout: 60,
|
|
327
|
+
Code: {
|
|
328
|
+
ZipFile: `
|
|
329
|
+
const { RDSClient, ModifyDBClusterCommand } = require('@aws-sdk/client-rds');
|
|
330
|
+
const { SecretsManagerClient, GetSecretValueCommand } = require('@aws-sdk/client-secrets-manager');
|
|
331
|
+
|
|
332
|
+
exports.handler = async (event, context) => {
|
|
333
|
+
console.log('Event:', JSON.stringify(event, null, 2));
|
|
334
|
+
|
|
335
|
+
const { RequestType, ResourceProperties } = event;
|
|
336
|
+
const { ClusterIdentifier, SecretArn, Region } = ResourceProperties;
|
|
337
|
+
|
|
338
|
+
const sendResponse = async (status, data = {}) => {
|
|
339
|
+
const responseBody = JSON.stringify({
|
|
340
|
+
Status: status,
|
|
341
|
+
Reason: data.Reason || 'See CloudWatch logs',
|
|
342
|
+
PhysicalResourceId: context.logStreamName,
|
|
343
|
+
StackId: event.StackId,
|
|
344
|
+
RequestId: event.RequestId,
|
|
345
|
+
LogicalResourceId: event.LogicalResourceId,
|
|
346
|
+
Data: data,
|
|
347
|
+
});
|
|
348
|
+
|
|
349
|
+
await fetch(event.ResponseURL, {
|
|
350
|
+
method: 'PUT',
|
|
351
|
+
body: responseBody,
|
|
352
|
+
headers: { 'Content-Type': '' },
|
|
353
|
+
});
|
|
354
|
+
};
|
|
355
|
+
|
|
356
|
+
try {
|
|
357
|
+
if (RequestType === 'Delete') {
|
|
358
|
+
await sendResponse('SUCCESS', { Message: 'Delete not required' });
|
|
359
|
+
return;
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
// Get the new password from Secrets Manager
|
|
363
|
+
const smClient = new SecretsManagerClient({ region: Region });
|
|
364
|
+
const secretResponse = await smClient.send(
|
|
365
|
+
new GetSecretValueCommand({ SecretId: SecretArn })
|
|
366
|
+
);
|
|
367
|
+
const secret = JSON.parse(secretResponse.SecretString);
|
|
368
|
+
const newPassword = secret.password;
|
|
369
|
+
|
|
370
|
+
// Rotate the Aurora cluster master password
|
|
371
|
+
const rdsClient = new RDSClient({ region: Region });
|
|
372
|
+
await rdsClient.send(
|
|
373
|
+
new ModifyDBClusterCommand({
|
|
374
|
+
DBClusterIdentifier: ClusterIdentifier,
|
|
375
|
+
MasterUserPassword: newPassword,
|
|
376
|
+
ApplyImmediately: true,
|
|
377
|
+
})
|
|
378
|
+
);
|
|
379
|
+
|
|
380
|
+
console.log(\`Successfully rotated password for cluster: \${ClusterIdentifier}\`);
|
|
381
|
+
await sendResponse('SUCCESS', {
|
|
382
|
+
Message: 'Password rotated successfully',
|
|
383
|
+
ClusterIdentifier,
|
|
384
|
+
});
|
|
385
|
+
} catch (error) {
|
|
386
|
+
console.error('Error rotating password:', error);
|
|
387
|
+
await sendResponse('FAILED', { Reason: error.message });
|
|
388
|
+
}
|
|
389
|
+
};
|
|
390
|
+
`,
|
|
391
|
+
},
|
|
392
|
+
},
|
|
393
|
+
};
|
|
394
|
+
|
|
395
|
+
// IAM role for the password rotator Lambda
|
|
396
|
+
result.resources.PasswordRotatorRole = {
|
|
397
|
+
Type: 'AWS::IAM::Role',
|
|
398
|
+
Properties: {
|
|
399
|
+
AssumeRolePolicyDocument: {
|
|
400
|
+
Version: '2012-10-17',
|
|
401
|
+
Statement: [
|
|
402
|
+
{
|
|
403
|
+
Effect: 'Allow',
|
|
404
|
+
Principal: { Service: 'lambda.amazonaws.com' },
|
|
405
|
+
Action: 'sts:AssumeRole',
|
|
406
|
+
},
|
|
407
|
+
],
|
|
408
|
+
},
|
|
409
|
+
ManagedPolicyArns: [
|
|
410
|
+
'arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole',
|
|
411
|
+
],
|
|
412
|
+
Policies: [
|
|
413
|
+
{
|
|
414
|
+
PolicyName: 'PasswordRotatorPolicy',
|
|
415
|
+
PolicyDocument: {
|
|
416
|
+
Version: '2012-10-17',
|
|
417
|
+
Statement: [
|
|
418
|
+
{
|
|
419
|
+
Effect: 'Allow',
|
|
420
|
+
Action: [
|
|
421
|
+
'rds:ModifyDBCluster',
|
|
422
|
+
'rds:DescribeDBClusters',
|
|
423
|
+
],
|
|
424
|
+
Resource: '*',
|
|
425
|
+
},
|
|
426
|
+
{
|
|
427
|
+
Effect: 'Allow',
|
|
428
|
+
Action: ['secretsmanager:GetSecretValue'],
|
|
429
|
+
Resource: { Ref: 'FriggDBSecret' },
|
|
430
|
+
},
|
|
431
|
+
],
|
|
432
|
+
},
|
|
433
|
+
},
|
|
434
|
+
],
|
|
435
|
+
},
|
|
436
|
+
};
|
|
437
|
+
|
|
438
|
+
// Use the secret for DATABASE_URL
|
|
439
|
+
result.environment.DATABASE_SECRET_ARN = { Ref: 'FriggDBSecret' };
|
|
440
|
+
result.environment.DATABASE_URL = this.buildDatabaseUrl(
|
|
441
|
+
discoveredResources.auroraClusterEndpoint,
|
|
442
|
+
discoveredResources.auroraPort || 5432,
|
|
443
|
+
dbConfig.database || 'frigg',
|
|
444
|
+
{ Ref: 'FriggDBSecret' }
|
|
445
|
+
);
|
|
446
|
+
|
|
447
|
+
// Grant Lambda functions permission to read the secret
|
|
448
|
+
result.iamStatements.push({
|
|
449
|
+
Effect: 'Allow',
|
|
450
|
+
Action: ['secretsmanager:GetSecretValue'],
|
|
451
|
+
Resource: { Ref: 'FriggDBSecret' },
|
|
452
|
+
});
|
|
453
|
+
|
|
454
|
+
console.log(' ✅ Credentials auto-creation configured');
|
|
455
|
+
} else if (discoveredResources.databaseSecretArn) {
|
|
456
|
+
// Use existing discovered secret
|
|
276
457
|
result.environment.DATABASE_SECRET_ARN = discoveredResources.databaseSecretArn;
|
|
458
|
+
result.environment.DATABASE_URL = this.buildDatabaseUrl(
|
|
459
|
+
discoveredResources.auroraClusterEndpoint,
|
|
460
|
+
discoveredResources.auroraPort || 5432,
|
|
461
|
+
dbConfig.database || 'frigg',
|
|
462
|
+
discoveredResources.databaseSecretArn
|
|
463
|
+
);
|
|
464
|
+
|
|
277
465
|
result.iamStatements.push({
|
|
278
466
|
Effect: 'Allow',
|
|
279
467
|
Action: ['secretsmanager:GetSecretValue'],
|
|
280
468
|
Resource: discoveredResources.databaseSecretArn,
|
|
281
469
|
});
|
|
470
|
+
|
|
471
|
+
console.log(' ✅ Using discovered Secrets Manager credentials');
|
|
472
|
+
} else {
|
|
473
|
+
// No secret and no auto-create - construct DATABASE_URL using environment variables at runtime
|
|
474
|
+
const dbName = dbConfig.database || 'frigg';
|
|
475
|
+
|
|
476
|
+
// Set individual environment variables for flexible credential management
|
|
477
|
+
result.environment.DATABASE_HOST = discoveredResources.auroraClusterEndpoint;
|
|
478
|
+
result.environment.DATABASE_PORT = String(discoveredResources.auroraPort || 5432);
|
|
479
|
+
result.environment.DATABASE_NAME = dbName;
|
|
480
|
+
|
|
481
|
+
// Build DATABASE_URL using CloudFormation intrinsic functions to reference
|
|
482
|
+
// the environment variables at runtime (not build time)
|
|
483
|
+
result.environment.DATABASE_URL = {
|
|
484
|
+
'Fn::Sub': [
|
|
485
|
+
'postgresql://${DatabaseUser}:${DatabasePassword}@${DatabaseHost}:${DatabasePort}/${DatabaseName}',
|
|
486
|
+
{
|
|
487
|
+
DatabaseUser: '${env:DATABASE_USER, "postgres"}',
|
|
488
|
+
DatabasePassword: '${env:DATABASE_PASSWORD}',
|
|
489
|
+
DatabaseHost: discoveredResources.auroraClusterEndpoint,
|
|
490
|
+
DatabasePort: String(discoveredResources.auroraPort || 5432),
|
|
491
|
+
DatabaseName: dbName,
|
|
492
|
+
},
|
|
493
|
+
],
|
|
494
|
+
};
|
|
495
|
+
|
|
496
|
+
console.log(' ℹ️ No Secrets Manager secret found - DATABASE_URL will use DATABASE_USER and DATABASE_PASSWORD from environment');
|
|
497
|
+
console.log(' ℹ️ Set DATABASE_USER and DATABASE_PASSWORD in Lambda environment or via serverless deploy --param');
|
|
498
|
+
console.log(' ℹ️ Or enable autoCreateCredentials=true to automatically create and rotate credentials');
|
|
282
499
|
}
|
|
283
500
|
|
|
284
501
|
// Add security group ingress rule to allow Lambda to connect to Aurora
|
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Migration Infrastructure Builder
|
|
3
|
+
*
|
|
4
|
+
* Domain Layer - Hexagonal Architecture
|
|
5
|
+
*
|
|
6
|
+
* Responsible for:
|
|
7
|
+
* - SQS queue for migration jobs
|
|
8
|
+
* - Migration worker Lambda function (triggered by SQS)
|
|
9
|
+
* - Migration router Lambda function (HTTP API)
|
|
10
|
+
* - IAM permissions for SQS
|
|
11
|
+
*
|
|
12
|
+
* Only creates infrastructure when PostgreSQL is enabled.
|
|
13
|
+
* MongoDB uses `db push` which doesn't require migration queue/worker.
|
|
14
|
+
*/
|
|
15
|
+
|
|
16
|
+
const { InfrastructureBuilder, ValidationResult } = require('../shared/base-builder');
|
|
17
|
+
|
|
18
|
+
class MigrationBuilder extends InfrastructureBuilder {
|
|
19
|
+
constructor() {
|
|
20
|
+
super();
|
|
21
|
+
this.name = 'MigrationBuilder';
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
shouldExecute(appDefinition) {
|
|
25
|
+
// Only create migration infrastructure for PostgreSQL
|
|
26
|
+
// MongoDB uses `db push` which doesn't need queue/worker
|
|
27
|
+
// Skip in local mode
|
|
28
|
+
if (process.env.FRIGG_SKIP_AWS_DISCOVERY === 'true') {
|
|
29
|
+
return false;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
// Default to true if not explicitly disabled
|
|
33
|
+
return appDefinition.database?.postgres?.enable !== false;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
getDependencies() {
|
|
37
|
+
return []; // No dependencies - migrations can run independently
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
validate(appDefinition) {
|
|
41
|
+
const result = new ValidationResult();
|
|
42
|
+
|
|
43
|
+
// No specific validation needed - PostgreSQL builder handles DB validation
|
|
44
|
+
// This builder just creates the migration infrastructure
|
|
45
|
+
|
|
46
|
+
return result;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Build migration infrastructure
|
|
51
|
+
*/
|
|
52
|
+
async build(appDefinition, discoveredResources) {
|
|
53
|
+
console.log(`\n[${this.name}] Configuring database migration infrastructure...`);
|
|
54
|
+
|
|
55
|
+
const result = {
|
|
56
|
+
resources: {},
|
|
57
|
+
functions: {},
|
|
58
|
+
iamStatements: [],
|
|
59
|
+
environment: {},
|
|
60
|
+
};
|
|
61
|
+
|
|
62
|
+
// Create SQS queue for migration jobs
|
|
63
|
+
result.resources.DbMigrationQueue = {
|
|
64
|
+
Type: 'AWS::SQS::Queue',
|
|
65
|
+
Properties: {
|
|
66
|
+
QueueName: '${self:service}-${self:provider.stage}-DbMigrationQueue',
|
|
67
|
+
VisibilityTimeout: 900, // 15 minutes for long-running migrations
|
|
68
|
+
MessageRetentionPeriod: 1209600, // 14 days
|
|
69
|
+
ReceiveMessageWaitTimeSeconds: 20, // Long polling
|
|
70
|
+
},
|
|
71
|
+
};
|
|
72
|
+
|
|
73
|
+
console.log(' ✓ Created DbMigrationQueue resource');
|
|
74
|
+
|
|
75
|
+
// Create migration worker Lambda (triggered by SQS)
|
|
76
|
+
result.functions.dbMigrationWorker = {
|
|
77
|
+
handler: 'node_modules/@friggframework/core/handlers/workers/db-migration.handler',
|
|
78
|
+
layers: [{ Ref: 'PrismaLambdaLayer' }],
|
|
79
|
+
skipEsbuild: true,
|
|
80
|
+
timeout: 900, // 15 minutes for long migrations
|
|
81
|
+
memorySize: 1024, // Extra memory for Prisma operations
|
|
82
|
+
reservedConcurrency: 1, // Process one migration at a time (critical for safety)
|
|
83
|
+
description: 'Database migration worker (triggered by SQS queue)',
|
|
84
|
+
events: [
|
|
85
|
+
{
|
|
86
|
+
sqs: {
|
|
87
|
+
arn: { 'Fn::GetAtt': ['DbMigrationQueue', 'Arn'] },
|
|
88
|
+
batchSize: 1, // Process one migration at a time
|
|
89
|
+
},
|
|
90
|
+
},
|
|
91
|
+
],
|
|
92
|
+
};
|
|
93
|
+
|
|
94
|
+
console.log(' ✓ Created dbMigrationWorker function');
|
|
95
|
+
|
|
96
|
+
// Create migration router Lambda (HTTP API)
|
|
97
|
+
result.functions.dbMigrationRouter = {
|
|
98
|
+
handler: 'node_modules/@friggframework/core/handlers/routers/db-migration.handler',
|
|
99
|
+
layers: [{ Ref: 'PrismaLambdaLayer' }],
|
|
100
|
+
skipEsbuild: true,
|
|
101
|
+
timeout: 30, // Router just queues jobs, doesn't run migrations
|
|
102
|
+
memorySize: 512,
|
|
103
|
+
description: 'Database migration HTTP API (POST to trigger, GET to check status)',
|
|
104
|
+
events: [
|
|
105
|
+
{ httpApi: { path: '/db-migrate', method: 'POST' } },
|
|
106
|
+
{ httpApi: { path: '/db-migrate/{processId}', method: 'GET' } },
|
|
107
|
+
],
|
|
108
|
+
};
|
|
109
|
+
|
|
110
|
+
console.log(' ✓ Created dbMigrationRouter function');
|
|
111
|
+
|
|
112
|
+
// Add queue URL to environment
|
|
113
|
+
result.environment.DB_MIGRATION_QUEUE_URL = { Ref: 'DbMigrationQueue' };
|
|
114
|
+
|
|
115
|
+
console.log(' ✓ Added DB_MIGRATION_QUEUE_URL environment variable');
|
|
116
|
+
|
|
117
|
+
// Add IAM permissions for SQS
|
|
118
|
+
result.iamStatements.push({
|
|
119
|
+
Effect: 'Allow',
|
|
120
|
+
Action: [
|
|
121
|
+
'sqs:SendMessage',
|
|
122
|
+
'sqs:GetQueueUrl',
|
|
123
|
+
'sqs:GetQueueAttributes',
|
|
124
|
+
],
|
|
125
|
+
Resource: { 'Fn::GetAtt': ['DbMigrationQueue', 'Arn'] },
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
console.log(' ✓ Added SQS IAM permissions');
|
|
129
|
+
|
|
130
|
+
console.log(`[${this.name}] ✅ Migration infrastructure configuration completed`);
|
|
131
|
+
return result;
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
module.exports = {
|
|
136
|
+
MigrationBuilder,
|
|
137
|
+
};
|
|
138
|
+
|
|
@@ -0,0 +1,250 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Tests for MigrationBuilder
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
const { MigrationBuilder } = require('./migration-builder');
|
|
6
|
+
|
|
7
|
+
describe('MigrationBuilder', () => {
|
|
8
|
+
let builder;
|
|
9
|
+
let originalEnv;
|
|
10
|
+
|
|
11
|
+
beforeEach(() => {
|
|
12
|
+
builder = new MigrationBuilder();
|
|
13
|
+
originalEnv = { ...process.env };
|
|
14
|
+
});
|
|
15
|
+
|
|
16
|
+
afterEach(() => {
|
|
17
|
+
process.env = originalEnv;
|
|
18
|
+
});
|
|
19
|
+
|
|
20
|
+
describe('shouldExecute', () => {
|
|
21
|
+
it('should return true for PostgreSQL by default', () => {
|
|
22
|
+
const appDef = {
|
|
23
|
+
database: {},
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
expect(builder.shouldExecute(appDef)).toBe(true);
|
|
27
|
+
});
|
|
28
|
+
|
|
29
|
+
it('should return true when PostgreSQL is explicitly enabled', () => {
|
|
30
|
+
const appDef = {
|
|
31
|
+
database: {
|
|
32
|
+
postgres: {
|
|
33
|
+
enable: true,
|
|
34
|
+
},
|
|
35
|
+
},
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
expect(builder.shouldExecute(appDef)).toBe(true);
|
|
39
|
+
});
|
|
40
|
+
|
|
41
|
+
it('should return false when PostgreSQL is explicitly disabled', () => {
|
|
42
|
+
const appDef = {
|
|
43
|
+
database: {
|
|
44
|
+
postgres: {
|
|
45
|
+
enable: false,
|
|
46
|
+
},
|
|
47
|
+
},
|
|
48
|
+
};
|
|
49
|
+
|
|
50
|
+
expect(builder.shouldExecute(appDef)).toBe(false);
|
|
51
|
+
});
|
|
52
|
+
|
|
53
|
+
it('should return false in local mode', () => {
|
|
54
|
+
process.env.FRIGG_SKIP_AWS_DISCOVERY = 'true';
|
|
55
|
+
|
|
56
|
+
const appDef = {
|
|
57
|
+
database: {
|
|
58
|
+
postgres: {
|
|
59
|
+
enable: true,
|
|
60
|
+
},
|
|
61
|
+
},
|
|
62
|
+
};
|
|
63
|
+
|
|
64
|
+
expect(builder.shouldExecute(appDef)).toBe(false);
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
it('should return true for MongoDB-only (defaults to PostgreSQL)', () => {
|
|
68
|
+
const appDef = {
|
|
69
|
+
database: {
|
|
70
|
+
mongodb: {
|
|
71
|
+
enable: true,
|
|
72
|
+
},
|
|
73
|
+
},
|
|
74
|
+
};
|
|
75
|
+
|
|
76
|
+
expect(builder.shouldExecute(appDef)).toBe(true);
|
|
77
|
+
});
|
|
78
|
+
});
|
|
79
|
+
|
|
80
|
+
describe('getDependencies', () => {
|
|
81
|
+
it('should have no dependencies', () => {
|
|
82
|
+
expect(builder.getDependencies()).toEqual([]);
|
|
83
|
+
});
|
|
84
|
+
});
|
|
85
|
+
|
|
86
|
+
describe('validate', () => {
|
|
87
|
+
it('should always return valid', () => {
|
|
88
|
+
const appDef = {
|
|
89
|
+
database: {
|
|
90
|
+
postgres: {
|
|
91
|
+
enable: true,
|
|
92
|
+
},
|
|
93
|
+
},
|
|
94
|
+
};
|
|
95
|
+
|
|
96
|
+
const result = builder.validate(appDef);
|
|
97
|
+
|
|
98
|
+
expect(result.valid).toBe(true);
|
|
99
|
+
expect(result.errors).toEqual([]);
|
|
100
|
+
});
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
describe('build', () => {
|
|
104
|
+
it('should create SQS queue resource', async () => {
|
|
105
|
+
const appDef = {
|
|
106
|
+
database: {
|
|
107
|
+
postgres: {
|
|
108
|
+
enable: true,
|
|
109
|
+
},
|
|
110
|
+
},
|
|
111
|
+
};
|
|
112
|
+
|
|
113
|
+
const result = await builder.build(appDef, {});
|
|
114
|
+
|
|
115
|
+
expect(result.resources.DbMigrationQueue).toBeDefined();
|
|
116
|
+
expect(result.resources.DbMigrationQueue.Type).toBe('AWS::SQS::Queue');
|
|
117
|
+
expect(result.resources.DbMigrationQueue.Properties.QueueName).toBe(
|
|
118
|
+
'${self:service}-${self:provider.stage}-DbMigrationQueue'
|
|
119
|
+
);
|
|
120
|
+
expect(result.resources.DbMigrationQueue.Properties.VisibilityTimeout).toBe(900);
|
|
121
|
+
});
|
|
122
|
+
|
|
123
|
+
it('should create migration worker function', async () => {
|
|
124
|
+
const appDef = {
|
|
125
|
+
database: {
|
|
126
|
+
postgres: {
|
|
127
|
+
enable: true,
|
|
128
|
+
},
|
|
129
|
+
},
|
|
130
|
+
};
|
|
131
|
+
|
|
132
|
+
const result = await builder.build(appDef, {});
|
|
133
|
+
|
|
134
|
+
expect(result.functions.dbMigrationWorker).toBeDefined();
|
|
135
|
+
expect(result.functions.dbMigrationWorker.handler).toBe(
|
|
136
|
+
'node_modules/@friggframework/core/handlers/workers/db-migration.handler'
|
|
137
|
+
);
|
|
138
|
+
expect(result.functions.dbMigrationWorker.timeout).toBe(900);
|
|
139
|
+
expect(result.functions.dbMigrationWorker.memorySize).toBe(1024);
|
|
140
|
+
expect(result.functions.dbMigrationWorker.reservedConcurrency).toBe(1);
|
|
141
|
+
expect(result.functions.dbMigrationWorker.events).toEqual([
|
|
142
|
+
{
|
|
143
|
+
sqs: {
|
|
144
|
+
arn: { 'Fn::GetAtt': ['DbMigrationQueue', 'Arn'] },
|
|
145
|
+
batchSize: 1,
|
|
146
|
+
},
|
|
147
|
+
},
|
|
148
|
+
]);
|
|
149
|
+
});
|
|
150
|
+
|
|
151
|
+
it('should create migration router function', async () => {
|
|
152
|
+
const appDef = {
|
|
153
|
+
database: {
|
|
154
|
+
postgres: {
|
|
155
|
+
enable: true,
|
|
156
|
+
},
|
|
157
|
+
},
|
|
158
|
+
};
|
|
159
|
+
|
|
160
|
+
const result = await builder.build(appDef, {});
|
|
161
|
+
|
|
162
|
+
expect(result.functions.dbMigrationRouter).toBeDefined();
|
|
163
|
+
expect(result.functions.dbMigrationRouter.handler).toBe(
|
|
164
|
+
'node_modules/@friggframework/core/handlers/routers/db-migration.handler'
|
|
165
|
+
);
|
|
166
|
+
expect(result.functions.dbMigrationRouter.timeout).toBe(30);
|
|
167
|
+
expect(result.functions.dbMigrationRouter.events).toContainEqual({
|
|
168
|
+
httpApi: { path: '/db-migrate', method: 'POST' },
|
|
169
|
+
});
|
|
170
|
+
expect(result.functions.dbMigrationRouter.events).toContainEqual({
|
|
171
|
+
httpApi: { path: '/db-migrate/{processId}', method: 'GET' },
|
|
172
|
+
});
|
|
173
|
+
});
|
|
174
|
+
|
|
175
|
+
it('should add queue URL to environment', async () => {
|
|
176
|
+
const appDef = {
|
|
177
|
+
database: {
|
|
178
|
+
postgres: {
|
|
179
|
+
enable: true,
|
|
180
|
+
},
|
|
181
|
+
},
|
|
182
|
+
};
|
|
183
|
+
|
|
184
|
+
const result = await builder.build(appDef, {});
|
|
185
|
+
|
|
186
|
+
expect(result.environment.DB_MIGRATION_QUEUE_URL).toEqual({
|
|
187
|
+
Ref: 'DbMigrationQueue',
|
|
188
|
+
});
|
|
189
|
+
});
|
|
190
|
+
|
|
191
|
+
it('should add SQS IAM permissions', async () => {
|
|
192
|
+
const appDef = {
|
|
193
|
+
database: {
|
|
194
|
+
postgres: {
|
|
195
|
+
enable: true,
|
|
196
|
+
},
|
|
197
|
+
},
|
|
198
|
+
};
|
|
199
|
+
|
|
200
|
+
const result = await builder.build(appDef, {});
|
|
201
|
+
|
|
202
|
+
expect(result.iamStatements).toContainEqual({
|
|
203
|
+
Effect: 'Allow',
|
|
204
|
+
Action: [
|
|
205
|
+
'sqs:SendMessage',
|
|
206
|
+
'sqs:GetQueueUrl',
|
|
207
|
+
'sqs:GetQueueAttributes',
|
|
208
|
+
],
|
|
209
|
+
Resource: { 'Fn::GetAtt': ['DbMigrationQueue', 'Arn'] },
|
|
210
|
+
});
|
|
211
|
+
});
|
|
212
|
+
|
|
213
|
+
it('should include Prisma layer in both functions', async () => {
|
|
214
|
+
const appDef = {
|
|
215
|
+
database: {
|
|
216
|
+
postgres: {
|
|
217
|
+
enable: true,
|
|
218
|
+
},
|
|
219
|
+
},
|
|
220
|
+
};
|
|
221
|
+
|
|
222
|
+
const result = await builder.build(appDef, {});
|
|
223
|
+
|
|
224
|
+
expect(result.functions.dbMigrationWorker.layers).toEqual([{ Ref: 'PrismaLambdaLayer' }]);
|
|
225
|
+
expect(result.functions.dbMigrationRouter.layers).toEqual([{ Ref: 'PrismaLambdaLayer' }]);
|
|
226
|
+
});
|
|
227
|
+
|
|
228
|
+
it('should set skipEsbuild for both functions', async () => {
|
|
229
|
+
const appDef = {
|
|
230
|
+
database: {
|
|
231
|
+
postgres: {
|
|
232
|
+
enable: true,
|
|
233
|
+
},
|
|
234
|
+
},
|
|
235
|
+
};
|
|
236
|
+
|
|
237
|
+
const result = await builder.build(appDef, {});
|
|
238
|
+
|
|
239
|
+
expect(result.functions.dbMigrationWorker.skipEsbuild).toBe(true);
|
|
240
|
+
expect(result.functions.dbMigrationRouter.skipEsbuild).toBe(true);
|
|
241
|
+
});
|
|
242
|
+
});
|
|
243
|
+
|
|
244
|
+
describe('getName', () => {
|
|
245
|
+
it('should return MigrationBuilder', () => {
|
|
246
|
+
expect(builder.getName()).toBe('MigrationBuilder');
|
|
247
|
+
});
|
|
248
|
+
});
|
|
249
|
+
});
|
|
250
|
+
|
|
@@ -12,6 +12,7 @@ const { BuilderOrchestrator } = require('./domains/shared/builder-orchestrator')
|
|
|
12
12
|
const { VpcBuilder } = require('./domains/networking/vpc-builder');
|
|
13
13
|
const { KmsBuilder } = require('./domains/security/kms-builder');
|
|
14
14
|
const { AuroraBuilder } = require('./domains/database/aurora-builder');
|
|
15
|
+
const { MigrationBuilder } = require('./domains/database/migration-builder');
|
|
15
16
|
const { SsmBuilder } = require('./domains/parameters/ssm-builder');
|
|
16
17
|
const { WebsocketBuilder } = require('./domains/integration/websocket-builder');
|
|
17
18
|
const { IntegrationBuilder } = require('./domains/integration/integration-builder');
|
|
@@ -38,6 +39,7 @@ const composeServerlessDefinition = async (AppDefinition) => {
|
|
|
38
39
|
new VpcBuilder(),
|
|
39
40
|
new KmsBuilder(),
|
|
40
41
|
new AuroraBuilder(),
|
|
42
|
+
new MigrationBuilder(), // Add migration infrastructure after Aurora
|
|
41
43
|
new SsmBuilder(),
|
|
42
44
|
new WebsocketBuilder(),
|
|
43
45
|
new IntegrationBuilder(),
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@friggframework/devtools",
|
|
3
3
|
"prettier": "@friggframework/prettier-config",
|
|
4
|
-
"version": "2.0.0--canary.461.
|
|
4
|
+
"version": "2.0.0--canary.461.4f3c330.0",
|
|
5
5
|
"dependencies": {
|
|
6
6
|
"@aws-sdk/client-ec2": "^3.835.0",
|
|
7
7
|
"@aws-sdk/client-kms": "^3.835.0",
|
|
@@ -11,8 +11,8 @@
|
|
|
11
11
|
"@babel/eslint-parser": "^7.18.9",
|
|
12
12
|
"@babel/parser": "^7.25.3",
|
|
13
13
|
"@babel/traverse": "^7.25.3",
|
|
14
|
-
"@friggframework/schemas": "2.0.0--canary.461.
|
|
15
|
-
"@friggframework/test": "2.0.0--canary.461.
|
|
14
|
+
"@friggframework/schemas": "2.0.0--canary.461.4f3c330.0",
|
|
15
|
+
"@friggframework/test": "2.0.0--canary.461.4f3c330.0",
|
|
16
16
|
"@hapi/boom": "^10.0.1",
|
|
17
17
|
"@inquirer/prompts": "^5.3.8",
|
|
18
18
|
"axios": "^1.7.2",
|
|
@@ -34,8 +34,8 @@
|
|
|
34
34
|
"serverless-http": "^2.7.0"
|
|
35
35
|
},
|
|
36
36
|
"devDependencies": {
|
|
37
|
-
"@friggframework/eslint-config": "2.0.0--canary.461.
|
|
38
|
-
"@friggframework/prettier-config": "2.0.0--canary.461.
|
|
37
|
+
"@friggframework/eslint-config": "2.0.0--canary.461.4f3c330.0",
|
|
38
|
+
"@friggframework/prettier-config": "2.0.0--canary.461.4f3c330.0",
|
|
39
39
|
"aws-sdk-client-mock": "^4.1.0",
|
|
40
40
|
"aws-sdk-client-mock-jest": "^4.1.0",
|
|
41
41
|
"jest": "^30.1.3",
|
|
@@ -70,5 +70,5 @@
|
|
|
70
70
|
"publishConfig": {
|
|
71
71
|
"access": "public"
|
|
72
72
|
},
|
|
73
|
-
"gitHead": "
|
|
73
|
+
"gitHead": "4f3c33010ab48c11f66a630ea72f75afc2e21762"
|
|
74
74
|
}
|