spense-core 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/infra.ts ADDED
@@ -0,0 +1,315 @@
1
+ import { execSync } from 'child_process';
2
+ import * as fs from 'fs';
3
+ import * as path from 'path';
4
+ import { ProjectInfo, DeployConfig } from './types';
5
+ import { S3Client, PutObjectCommand, CreateBucketCommand, HeadBucketCommand } from '@aws-sdk/client-s3';
6
+ import * as archiver from 'archiver';
7
+
8
+ export interface DeployOutputs {
9
+ url: string;
10
+ dbEndpoint?: string;
11
+ }
12
+
13
+ export async function deployInfra(project: ProjectInfo, config: DeployConfig): Promise<DeployOutputs> {
14
+ const stackDir = path.join(process.cwd(), '.spense');
15
+ fs.mkdirSync(stackDir, { recursive: true });
16
+
17
+ // Set AWS credentials as env vars for CDK
18
+ process.env.AWS_ACCESS_KEY_ID = config.credentials.accessKeyId;
19
+ process.env.AWS_SECRET_ACCESS_KEY = config.credentials.secretAccessKey;
20
+ process.env.AWS_REGION = config.credentials.region;
21
+
22
+ // Step 1: Package and upload app to S3
23
+ const bucketName = `spense-${project.name}-${config.credentials.region}`;
24
+ const s3Key = `app-${Date.now()}.zip`;
25
+
26
+ console.log('📦 Packaging application...');
27
+ await packageAndUpload(project, config, bucketName, s3Key);
28
+
29
+ // Step 2: Generate and deploy CDK stack
30
+ const stackCode = generateStack(project, config, bucketName, s3Key);
31
+ fs.writeFileSync(path.join(stackDir, 'stack.ts'), stackCode);
32
+ fs.writeFileSync(path.join(stackDir, 'cdk.json'), '{"app":"npx ts-node stack.ts"}');
33
+ fs.writeFileSync(path.join(stackDir, 'package.json'), JSON.stringify({
34
+ dependencies: {
35
+ 'aws-cdk-lib': '^2.170.0',
36
+ 'constructs': '^10.4.2',
37
+ 'ts-node': '^10.9.2',
38
+ 'typescript': '^5.3.0'
39
+ }
40
+ }));
41
+
42
+ console.log('☁️ Deploying infrastructure...');
43
+ execSync('npm install', { cwd: stackDir, stdio: 'inherit' });
44
+ execSync('npx cdk bootstrap --require-approval never', { cwd: stackDir, stdio: 'inherit' });
45
+ execSync('npx cdk deploy --require-approval never --outputs-file outputs.json', { cwd: stackDir, stdio: 'inherit' });
46
+
47
+ const outputs = JSON.parse(fs.readFileSync(path.join(stackDir, 'outputs.json'), 'utf-8'));
48
+ const stackOutputs = outputs[`${project.name}-stack`] || {};
49
+
50
+ return {
51
+ url: stackOutputs.URL || stackOutputs.LoadBalancerDNS,
52
+ dbEndpoint: stackOutputs.DatabaseEndpoint
53
+ };
54
+ }
55
+
56
+ async function packageAndUpload(project: ProjectInfo, config: DeployConfig, bucket: string, key: string): Promise<void> {
57
+ const s3 = new S3Client({
58
+ region: config.credentials.region,
59
+ credentials: {
60
+ accessKeyId: config.credentials.accessKeyId,
61
+ secretAccessKey: config.credentials.secretAccessKey
62
+ }
63
+ });
64
+
65
+ // Create bucket if not exists
66
+ try {
67
+ await s3.send(new HeadBucketCommand({ Bucket: bucket }));
68
+ } catch {
69
+ await s3.send(new CreateBucketCommand({ Bucket: bucket }));
70
+ }
71
+
72
+ // Create zip of project
73
+ const zipPath = path.join(process.cwd(), '.spense', 'app.zip');
74
+ await createZip(process.cwd(), zipPath, project.type);
75
+
76
+ // Upload to S3
77
+ const zipContent = fs.readFileSync(zipPath);
78
+ await s3.send(new PutObjectCommand({
79
+ Bucket: bucket,
80
+ Key: key,
81
+ Body: zipContent
82
+ }));
83
+
84
+ console.log(`✓ Uploaded to s3://${bucket}/${key}`);
85
+ }
86
+
87
+ function createZip(sourceDir: string, outPath: string, projectType: string): Promise<void> {
88
+ return new Promise((resolve, reject) => {
89
+ const output = fs.createWriteStream(outPath);
90
+ const archive = archiver.default('zip', { zlib: { level: 9 } });
91
+
92
+ output.on('close', () => resolve());
93
+ archive.on('error', reject);
94
+
95
+ archive.pipe(output);
96
+
97
+ // Exclude common non-essential directories
98
+ const ignore = ['node_modules', '.git', '.spense', 'dist', 'build', 'target', '.idea', '.vscode'];
99
+
100
+ archive.glob('**/*', {
101
+ cwd: sourceDir,
102
+ ignore: ignore.map(i => `**/${i}/**`)
103
+ });
104
+
105
+ archive.finalize();
106
+ });
107
+ }
108
+
109
+ function generateStack(project: ProjectInfo, config: DeployConfig, bucket: string, s3Key: string): string {
110
+ const stackName = `${project.name}-stack`;
111
+ const region = config.credentials.region;
112
+
113
+ return `import * as cdk from 'aws-cdk-lib';
114
+ import * as ec2 from 'aws-cdk-lib/aws-ec2';
115
+ import * as elbv2 from 'aws-cdk-lib/aws-elasticloadbalancingv2';
116
+ import * as asg from 'aws-cdk-lib/aws-autoscaling';
117
+ import * as rds from 'aws-cdk-lib/aws-rds';
118
+ import * as iam from 'aws-cdk-lib/aws-iam';
119
+ import * as s3 from 'aws-cdk-lib/aws-s3';
120
+ ${config.useHttps ? "import * as acm from 'aws-cdk-lib/aws-certificatemanager';" : ''}
121
+ ${config.domain ? "import * as route53 from 'aws-cdk-lib/aws-route53';\nimport * as targets from 'aws-cdk-lib/aws-route53-targets';" : ''}
122
+
123
+ const app = new cdk.App();
124
+ const stack = new cdk.Stack(app, '${stackName}', { env: { region: '${region}' }});
125
+
126
+ const vpc = new ec2.Vpc(stack, 'Vpc', { maxAzs: 2, natGateways: 1 });
127
+
128
+ const appBucket = s3.Bucket.fromBucketName(stack, 'AppBucket', '${bucket}');
129
+
130
+ ${generateDbCode(config)}
131
+
132
+ const role = new iam.Role(stack, 'InstanceRole', {
133
+ assumedBy: new iam.ServicePrincipal('ec2.amazonaws.com'),
134
+ managedPolicies: [
135
+ iam.ManagedPolicy.fromAwsManagedPolicyName('AmazonSSMManagedInstanceCore'),
136
+ ]
137
+ });
138
+ appBucket.grantRead(role);
139
+ ${config.database !== 'none' ? 'db.secret?.grantRead(role);' : ''}
140
+
141
+ ${generateAsgCode(project, config, bucket, s3Key)}
142
+
143
+ const alb = new elbv2.ApplicationLoadBalancer(stack, 'ALB', { vpc, internetFacing: true });
144
+
145
+ ${generateListenerCode(config)}
146
+
147
+ listener.addTargets('App', {
148
+ port: ${project.port},
149
+ targets: [scaling],
150
+ healthCheck: { path: '/health', healthyHttpCodes: '200-399' }
151
+ });
152
+
153
+ ${generateDomainCode(config)}
154
+
155
+ new cdk.CfnOutput(stack, 'URL', { value: '${config.useHttps ? 'https' : 'http'}://' + ${config.domain ? `'${config.domain}'` : 'alb.loadBalancerDnsName'} });
156
+ ${config.database !== 'none' ? "new cdk.CfnOutput(stack, 'DatabaseEndpoint', { value: db.instanceEndpoint.hostname });" : ''}
157
+
158
+ app.synth();
159
+ `;
160
+ }
161
+
162
+ function generateDbCode(config: DeployConfig): string {
163
+ if (config.database === 'none') return '';
164
+
165
+ const engine = config.database === 'postgresql'
166
+ ? 'rds.DatabaseInstanceEngine.postgres({ version: rds.PostgresEngineVersion.VER_15 })'
167
+ : 'rds.DatabaseInstanceEngine.mysql({ version: rds.MysqlEngineVersion.VER_8_0 })';
168
+
169
+ return `const db = new rds.DatabaseInstance(stack, 'Database', {
170
+ vpc,
171
+ engine: ${engine},
172
+ instanceType: ec2.InstanceType.of(ec2.InstanceClass.T3, ec2.InstanceSize.${config.dbInstanceClass?.split('.')[2].toUpperCase() || 'MICRO'}),
173
+ credentials: rds.Credentials.fromGeneratedSecret('dbadmin'),
174
+ vpcSubnets: { subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS },
175
+ removalPolicy: cdk.RemovalPolicy.SNAPSHOT
176
+ });`;
177
+ }
178
+
179
+ function generateAsgCode(project: ProjectInfo, config: DeployConfig, bucket: string, s3Key: string): string {
180
+ if (config.ec2.useExisting) {
181
+ return `// Using existing instances: ${config.ec2.existingInstanceIds?.join(', ')}`;
182
+ }
183
+
184
+ const instanceSize = config.ec2.instanceType?.split('.')[1].toUpperCase() || 'SMALL';
185
+
186
+ return `const scaling = new asg.AutoScalingGroup(stack, 'ASG', {
187
+ vpc,
188
+ instanceType: ec2.InstanceType.of(ec2.InstanceClass.T3, ec2.InstanceSize.${instanceSize}),
189
+ machineImage: ec2.MachineImage.latestAmazonLinux2023(),
190
+ role,
191
+ minCapacity: ${config.ec2.minInstances || 1},
192
+ maxCapacity: ${config.ec2.maxInstances || 3},
193
+ vpcSubnets: { subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS },
194
+ ${config.useElasticIp ? 'associatePublicIpAddress: true,' : ''}
195
+ });
196
+
197
+ scaling.addUserData(\`${generateUserData(project, bucket, s3Key, config)}\`);
198
+
199
+ scaling.scaleOnCpuUtilization('CpuScaling', { targetUtilizationPercent: 70 });`;
200
+ }
201
+
202
+ function generateUserData(project: ProjectInfo, bucket: string, s3Key: string, config: DeployConfig): string {
203
+ const dbEnvVars = config.database !== 'none' ? `
204
+ # Get database credentials from Secrets Manager
205
+ DB_SECRET=$(aws secretsmanager get-secret-value --secret-id \${DB_SECRET_ARN} --query SecretString --output text)
206
+ export DATABASE_HOST=$(echo $DB_SECRET | jq -r '.host')
207
+ export DATABASE_PORT=$(echo $DB_SECRET | jq -r '.port')
208
+ export DATABASE_NAME=$(echo $DB_SECRET | jq -r '.dbname // "postgres"')
209
+ export DATABASE_USER=$(echo $DB_SECRET | jq -r '.username')
210
+ export DATABASE_PASSWORD=$(echo $DB_SECRET | jq -r '.password')
211
+ export DATABASE_URL="${config.database === 'postgresql' ? 'postgresql' : 'mysql'}://\${DATABASE_USER}:\${DATABASE_PASSWORD}@\${DATABASE_HOST}:\${DATABASE_PORT}/\${DATABASE_NAME}"
212
+ ` : '';
213
+
214
+ if (project.type === 'nodejs') {
215
+ return `#!/bin/bash
216
+ set -e
217
+
218
+ # Install dependencies
219
+ yum install -y nodejs npm aws-cli jq unzip
220
+
221
+ # Download app from S3
222
+ mkdir -p /app
223
+ cd /app
224
+ aws s3 cp s3://${bucket}/${s3Key} app.zip
225
+ unzip -o app.zip
226
+ rm app.zip
227
+
228
+ # Install node dependencies
229
+ npm install --production
230
+ ${project.buildCommand ? `npm run build` : ''}
231
+ ${dbEnvVars}
232
+ # Set environment
233
+ export NODE_ENV=production
234
+ export PORT=${project.port}
235
+
236
+ # Start app with pm2 for process management
237
+ npm install -g pm2
238
+ pm2 start ${project.startCommand.replace('node ', '')} --name app
239
+ pm2 startup
240
+ pm2 save
241
+ `;
242
+ }
243
+
244
+ return `#!/bin/bash
245
+ set -e
246
+
247
+ # Install dependencies
248
+ yum install -y java-17-amazon-corretto aws-cli jq unzip
249
+
250
+ # Download app from S3
251
+ mkdir -p /app
252
+ cd /app
253
+ aws s3 cp s3://${bucket}/${s3Key} app.zip
254
+ unzip -o app.zip
255
+ rm app.zip
256
+ ${dbEnvVars}
257
+ # Find and run the JAR
258
+ JAR_FILE=$(find . -name "*.jar" -type f | head -1)
259
+ export SERVER_PORT=${project.port}
260
+
261
+ # Create systemd service
262
+ cat > /etc/systemd/system/app.service << EOF
263
+ [Unit]
264
+ Description=Spring Boot App
265
+ After=network.target
266
+
267
+ [Service]
268
+ Type=simple
269
+ User=root
270
+ WorkingDirectory=/app
271
+ ExecStart=/usr/bin/java -jar \${JAR_FILE}
272
+ Restart=always
273
+ Environment=SERVER_PORT=${project.port}
274
+ ${config.database !== 'none' ? 'Environment=SPRING_DATASOURCE_URL=${DATABASE_URL}' : ''}
275
+
276
+ [Install]
277
+ WantedBy=multi-user.target
278
+ EOF
279
+
280
+ systemctl daemon-reload
281
+ systemctl enable app
282
+ systemctl start app
283
+ `;
284
+ }
285
+
286
+ function generateListenerCode(config: DeployConfig): string {
287
+ if (config.useHttps && config.domain) {
288
+ return `const cert = new acm.Certificate(stack, 'Cert', {
289
+ domainName: '${config.domain}',
290
+ validation: acm.CertificateValidation.fromDns()
291
+ });
292
+
293
+ const listener = alb.addListener('HTTPS', {
294
+ port: 443,
295
+ certificates: [cert]
296
+ });
297
+
298
+ alb.addListener('HTTP', { port: 80 }).addAction('Redirect', {
299
+ action: elbv2.ListenerAction.redirect({ protocol: 'HTTPS', port: '443', permanent: true })
300
+ });`;
301
+ }
302
+ return `const listener = alb.addListener('HTTP', { port: 80 });`;
303
+ }
304
+
305
+ function generateDomainCode(config: DeployConfig): string {
306
+ if (!config.domain) return '';
307
+
308
+ const baseDomain = config.domain.split('.').slice(-2).join('.');
309
+ return `const zone = route53.HostedZone.fromLookup(stack, 'Zone', { domainName: '${baseDomain}' });
310
+ new route53.ARecord(stack, 'DNS', {
311
+ zone,
312
+ recordName: '${config.domain}',
313
+ target: route53.RecordTarget.fromAlias(new targets.LoadBalancerTarget(alb))
314
+ });`;
315
+ }
package/src/types.ts ADDED
@@ -0,0 +1,59 @@
1
+ export type ProjectType = 'nodejs' | 'springboot';
2
+ export type DatabaseType = 'postgresql' | 'mysql' | 'none';
3
+
4
+ export interface ProjectInfo {
5
+ type: ProjectType;
6
+ name: string;
7
+ port: number;
8
+ buildCommand: string;
9
+ startCommand: string;
10
+ artifactPath: string;
11
+ }
12
+
13
+ export interface AwsCredentials {
14
+ accessKeyId: string;
15
+ secretAccessKey: string;
16
+ region: string;
17
+ }
18
+
19
+ export interface Ec2Choice {
20
+ useExisting: boolean;
21
+ existingInstanceIds?: string[];
22
+ instanceType?: string;
23
+ minInstances?: number;
24
+ maxInstances?: number;
25
+ }
26
+
27
+ export interface DeployConfig {
28
+ credentials: AwsCredentials;
29
+ ec2: Ec2Choice;
30
+ useElasticIp: boolean;
31
+ database: DatabaseType;
32
+ dbInstanceClass?: string;
33
+ domain?: string;
34
+ useHttps: boolean;
35
+ }
36
+
37
+ export const PRICING = {
38
+ ec2: {
39
+ 't3.micro': 0.0104,
40
+ 't3.small': 0.0208,
41
+ 't3.medium': 0.0416,
42
+ 't3.large': 0.0832,
43
+ } as Record<string, number>,
44
+ elasticIp: {
45
+ attached: 0,
46
+ detached: 0.005,
47
+ perHour: 0.005,
48
+ note: 'Free when attached to running instance, $0.005/hr when detached'
49
+ },
50
+ rds: {
51
+ 'db.t3.micro': 0.017,
52
+ 'db.t3.small': 0.034,
53
+ 'db.t3.medium': 0.068,
54
+ } as Record<string, number>,
55
+ alb: {
56
+ perHour: 0.0225,
57
+ perLcu: 0.008,
58
+ }
59
+ };
package/tsconfig.json ADDED
@@ -0,0 +1,13 @@
1
+ {
2
+ "compilerOptions": {
3
+ "target": "ES2020",
4
+ "module": "commonjs",
5
+ "outDir": "./dist",
6
+ "rootDir": "./src",
7
+ "strict": true,
8
+ "esModuleInterop": true,
9
+ "skipLibCheck": true,
10
+ "declaration": true
11
+ },
12
+ "include": ["src/**/*"]
13
+ }