@vibecodemax/cli 0.1.2 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,630 @@
1
+ import * as fs from "node:fs";
2
+ import * as path from "node:path";
3
+ import * as crypto from "node:crypto";
4
+ import { S3Client, HeadBucketCommand, CreateBucketCommand, PutPublicAccessBlockCommand, PutBucketEncryptionCommand, PutBucketOwnershipControlsCommand, PutBucketPolicyCommand, PutBucketCorsCommand, GetBucketLocationCommand, GetPublicAccessBlockCommand, GetBucketEncryptionCommand, GetBucketOwnershipControlsCommand, GetBucketPolicyCommand, GetBucketCorsCommand, PutObjectCommand, ListObjectsV2Command, DeleteObjectCommand, GetObjectCommand, } from "@aws-sdk/client-s3";
5
+ import { STSClient, GetCallerIdentityCommand } from "@aws-sdk/client-sts";
6
+ const SETUP_CONFIG_PATH = path.join(".vibecodemax", "setup-config.json");
7
+ const DEFAULT_BUCKETS = {
8
+ public: "public-assets",
9
+ private: "private-uploads",
10
+ };
11
+ const DEFAULT_PROJECT_SLUG = "vibecodemax";
12
+ const DEFAULT_REGION = "us-east-1";
13
+ const HEALTHCHECK_PREFIX = "_vibecodemax/healthcheck/default";
14
+ const PUBLIC_ACCESS_BLOCK_PRIVATE = {
15
+ BlockPublicAcls: true,
16
+ IgnorePublicAcls: true,
17
+ BlockPublicPolicy: true,
18
+ RestrictPublicBuckets: true,
19
+ };
20
+ const PUBLIC_ACCESS_BLOCK_PUBLIC = {
21
+ BlockPublicAcls: true,
22
+ IgnorePublicAcls: true,
23
+ BlockPublicPolicy: false,
24
+ RestrictPublicBuckets: false,
25
+ };
26
+ const BUCKET_CORS_CONFIGURATION = {
27
+ CORSRules: [
28
+ {
29
+ AllowedOrigins: ["*"],
30
+ AllowedMethods: ["PUT", "POST", "GET", "HEAD"],
31
+ AllowedHeaders: ["*"],
32
+ ExposeHeaders: ["ETag", "x-amz-request-id", "x-amz-id-2"],
33
+ MaxAgeSeconds: 3000,
34
+ },
35
+ ],
36
+ };
37
+ const BUCKET_NAME_REGEX = /^(?!xn--)(?!.*\.\.)(?!.*\.$)(?!\d+\.\d+\.\d+\.\d+$)[a-z0-9][a-z0-9.-]{1,61}[a-z0-9]$/;
38
+ function printJson(value) {
39
+ process.stdout.write(`${JSON.stringify(value)}\n`);
40
+ }
41
+ function fail(code, message, exitCode = 1, extra = {}) {
42
+ printJson({ ok: false, code, message, ...extra });
43
+ process.exit(exitCode);
44
+ }
45
+ function readFileIfExists(filePath) {
46
+ try {
47
+ return fs.readFileSync(filePath, "utf8");
48
+ }
49
+ catch {
50
+ return "";
51
+ }
52
+ }
53
+ function parseDotEnv(content) {
54
+ const env = {};
55
+ for (const rawLine of content.split(/\r?\n/)) {
56
+ const line = rawLine.trim();
57
+ if (!line || line.startsWith("#"))
58
+ continue;
59
+ const eq = line.indexOf("=");
60
+ if (eq === -1)
61
+ continue;
62
+ const key = line.slice(0, eq).trim();
63
+ const value = line.slice(eq + 1).trim();
64
+ if (!key)
65
+ continue;
66
+ env[key] = value;
67
+ }
68
+ return env;
69
+ }
70
+ function loadLocalEnv(cwd = process.cwd()) {
71
+ const envLocalPath = path.join(cwd, ".env.local");
72
+ const envBootstrapPath = path.join(cwd, ".env.bootstrap.local");
73
+ return {
74
+ envLocalPath,
75
+ envBootstrapPath,
76
+ values: {
77
+ ...parseDotEnv(readFileIfExists(envLocalPath)),
78
+ ...parseDotEnv(readFileIfExists(envBootstrapPath)),
79
+ },
80
+ };
81
+ }
82
+ function readSetupConfig(cwd = process.cwd()) {
83
+ const raw = readFileIfExists(path.join(cwd, SETUP_CONFIG_PATH)).trim();
84
+ if (!raw)
85
+ return {};
86
+ try {
87
+ const parsed = JSON.parse(raw);
88
+ return parsed && typeof parsed === "object" && !Array.isArray(parsed) ? parsed : {};
89
+ }
90
+ catch {
91
+ return {};
92
+ }
93
+ }
94
+ function isNonEmptyString(value) {
95
+ return typeof value === "string" && value.trim().length > 0;
96
+ }
97
+ function readStringFlag(flags, key) {
98
+ const value = flags[key];
99
+ return typeof value === "string" ? value.trim() : "";
100
+ }
101
+ function mergeEnvFile(filePath, nextValues) {
102
+ const existing = parseDotEnv(readFileIfExists(filePath));
103
+ const merged = { ...existing, ...nextValues };
104
+ const keys = Object.keys(merged).sort();
105
+ const content = `${keys.map((key) => `${key}=${merged[key]}`).join("\n")}\n`;
106
+ fs.writeFileSync(filePath, content, "utf8");
107
+ }
108
+ function normalizeProjectSlug(input) {
109
+ if (typeof input !== "string")
110
+ return null;
111
+ const slug = input
112
+ .trim()
113
+ .toLowerCase()
114
+ .replace(/[^a-z0-9-]+/g, "-")
115
+ .replace(/^-+|-+$/g, "")
116
+ .replace(/-+/g, "-");
117
+ if (!slug)
118
+ return null;
119
+ return slug.slice(0, 24);
120
+ }
121
+ function deriveProjectSlug(flags, setupConfig, cwd) {
122
+ const storageConfig = setupConfig.storage && typeof setupConfig.storage === "object" ? setupConfig.storage : {};
123
+ const candidates = [
124
+ readStringFlag(flags, "project-slug"),
125
+ setupConfig.projectSlug,
126
+ setupConfig.projectName,
127
+ setupConfig.appName,
128
+ setupConfig.name,
129
+ storageConfig.projectSlug,
130
+ path.basename(cwd),
131
+ DEFAULT_PROJECT_SLUG,
132
+ ];
133
+ for (const candidate of candidates) {
134
+ const normalized = normalizeProjectSlug(candidate);
135
+ if (normalized)
136
+ return normalized;
137
+ }
138
+ return DEFAULT_PROJECT_SLUG;
139
+ }
140
+ function normalizeRegion(value) {
141
+ if (!isNonEmptyString(value))
142
+ return null;
143
+ const trimmed = value.trim();
144
+ return /^[a-z]{2}-[a-z]+-\d$/.test(trimmed) ? trimmed : null;
145
+ }
146
+ function resolveRegion(flags, envValues, setupConfig) {
147
+ const storageConfig = setupConfig.storage && typeof setupConfig.storage === "object" ? setupConfig.storage : {};
148
+ return (normalizeRegion(readStringFlag(flags, "region"))
149
+ || normalizeRegion(envValues.AWS_REGION)
150
+ || normalizeRegion(envValues.AWS_DEFAULT_REGION)
151
+ || normalizeRegion(storageConfig.region)
152
+ || DEFAULT_REGION);
153
+ }
154
+ function requiresSessionToken(accessKeyId) {
155
+ return accessKeyId.startsWith("ASIA");
156
+ }
157
+ function readAwsContext(flags, cwd = process.cwd()) {
158
+ const setupConfig = readSetupConfig(cwd);
159
+ const localEnv = loadLocalEnv(cwd);
160
+ const accessKeyId = localEnv.values.AWS_ACCESS_KEY_ID || "";
161
+ const secretAccessKey = localEnv.values.AWS_SECRET_ACCESS_KEY || "";
162
+ const sessionToken = localEnv.values.AWS_SESSION_TOKEN || "";
163
+ const region = resolveRegion(flags, localEnv.values, setupConfig);
164
+ const projectSlug = deriveProjectSlug(flags, setupConfig, cwd);
165
+ const missingKeys = [];
166
+ const presentKeys = [];
167
+ if (isNonEmptyString(accessKeyId))
168
+ presentKeys.push("AWS_ACCESS_KEY_ID");
169
+ else
170
+ missingKeys.push("AWS_ACCESS_KEY_ID");
171
+ if (isNonEmptyString(secretAccessKey))
172
+ presentKeys.push("AWS_SECRET_ACCESS_KEY");
173
+ else
174
+ missingKeys.push("AWS_SECRET_ACCESS_KEY");
175
+ if (isNonEmptyString(localEnv.values.AWS_REGION))
176
+ presentKeys.push("AWS_REGION");
177
+ else if (normalizeRegion(setupConfig.storage?.region)) {
178
+ // region is supplied structurally; no missing key required
179
+ }
180
+ else {
181
+ missingKeys.push("AWS_REGION");
182
+ }
183
+ if (isNonEmptyString(accessKeyId) && requiresSessionToken(accessKeyId)) {
184
+ if (isNonEmptyString(sessionToken))
185
+ presentKeys.push("AWS_SESSION_TOKEN");
186
+ else
187
+ missingKeys.push("AWS_SESSION_TOKEN");
188
+ }
189
+ else if (isNonEmptyString(sessionToken)) {
190
+ presentKeys.push("AWS_SESSION_TOKEN");
191
+ }
192
+ return {
193
+ cwd,
194
+ setupConfig,
195
+ localEnv,
196
+ region,
197
+ projectSlug,
198
+ accessKeyId: accessKeyId.trim(),
199
+ secretAccessKey: secretAccessKey.trim(),
200
+ sessionToken: sessionToken.trim(),
201
+ missingKeys,
202
+ presentKeys,
203
+ };
204
+ }
205
+ function deterministicBuckets({ accountId, projectSlug }) {
206
+ const slug = normalizeProjectSlug(projectSlug) || DEFAULT_PROJECT_SLUG;
207
+ const suffixSeed = `${slug}:${accountId || "acct"}`;
208
+ const suffix = crypto.createHash("sha1").update(suffixSeed).digest("hex").slice(0, 8);
209
+ const prefixBase = `${slug}-${suffix}`;
210
+ const makeName = (postfix) => {
211
+ let candidate = `${prefixBase}-${postfix}`.replace(/-+/g, "-");
212
+ if (candidate.length <= 63)
213
+ return candidate;
214
+ const overflow = candidate.length - 63;
215
+ const shortenedSlug = slug.slice(0, Math.max(3, slug.length - overflow));
216
+ candidate = `${shortenedSlug}-${suffix}-${postfix}`.replace(/-+/g, "-");
217
+ return candidate.slice(0, 63).replace(/-+$/g, "");
218
+ };
219
+ const buckets = {
220
+ public: makeName(DEFAULT_BUCKETS.public),
221
+ private: makeName(DEFAULT_BUCKETS.private),
222
+ };
223
+ for (const [key, value] of Object.entries(buckets)) {
224
+ if (!BUCKET_NAME_REGEX.test(value)) {
225
+ fail("INVALID_BUCKET_NAME", `Derived ${key} bucket name is invalid.`, 1, { bucket: value });
226
+ }
227
+ }
228
+ return buckets;
229
+ }
230
+ function createAwsClients(region, credentials) {
231
+ const config = {
232
+ region,
233
+ credentials: {
234
+ accessKeyId: credentials.accessKeyId,
235
+ secretAccessKey: credentials.secretAccessKey,
236
+ ...(credentials.sessionToken ? { sessionToken: credentials.sessionToken } : {}),
237
+ },
238
+ };
239
+ return {
240
+ s3Client: new S3Client(config),
241
+ stsClient: new STSClient(config),
242
+ };
243
+ }
244
+ async function validateCredentials(region, credentials) {
245
+ try {
246
+ const { stsClient } = createAwsClients(region, credentials);
247
+ const identity = await stsClient.send(new GetCallerIdentityCommand({}));
248
+ return {
249
+ accountId: identity.Account || null,
250
+ arn: identity.Arn || null,
251
+ userId: identity.UserId || null,
252
+ };
253
+ }
254
+ catch (error) {
255
+ const message = error instanceof Error ? error.message : "AWS credential validation failed.";
256
+ fail("AWS_AUTH_FAILED", message, 1, { region });
257
+ }
258
+ }
259
+ async function resolveBucketRegion(s3Client, bucket) {
260
+ try {
261
+ const response = await s3Client.send(new GetBucketLocationCommand({ Bucket: bucket }));
262
+ const location = response.LocationConstraint ? String(response.LocationConstraint) : "";
263
+ if (!location || location == "None")
264
+ return "us-east-1";
265
+ if (location === "EU")
266
+ return "eu-west-1";
267
+ return location;
268
+ }
269
+ catch {
270
+ return null;
271
+ }
272
+ }
273
+ function getAwsStatus(error) {
274
+ const value = error;
275
+ return value?.$metadata?.httpStatusCode || value?.statusCode || null;
276
+ }
277
+ function getAwsCode(error) {
278
+ const value = error;
279
+ return value?.name || value?.Code || value?.code || "UnknownError";
280
+ }
281
+ function isNotFoundBucketError(error) {
282
+ const code = getAwsCode(error);
283
+ const status = getAwsStatus(error);
284
+ return status === 404 || code === "NotFound" || code === "NoSuchBucket";
285
+ }
286
+ function isRegionMismatchError(error) {
287
+ const code = getAwsCode(error);
288
+ const status = getAwsStatus(error);
289
+ return status === 301 || code === "PermanentRedirect" || code === "AuthorizationHeaderMalformed" || code === "IncorrectEndpoint" || code === "IllegalLocationConstraintException";
290
+ }
291
+ async function ensureSingleBucket(s3Client, bucket, region) {
292
+ let created = false;
293
+ try {
294
+ await s3Client.send(new HeadBucketCommand({ Bucket: bucket }));
295
+ }
296
+ catch (error) {
297
+ if (isRegionMismatchError(error)) {
298
+ const actualRegion = await resolveBucketRegion(s3Client, bucket);
299
+ fail("AWS_REGION_MISMATCH", `Bucket ${bucket} exists in a different region.`, 1, {
300
+ bucket,
301
+ selectedRegion: region,
302
+ actualRegion,
303
+ });
304
+ }
305
+ if (!isNotFoundBucketError(error)) {
306
+ const message = error instanceof Error ? error.message : `Failed to inspect bucket ${bucket}.`;
307
+ fail("AWS_BUCKET_CREATE_FAILED", message, 1, { bucket, awsCode: getAwsCode(error), awsStatus: getAwsStatus(error) });
308
+ }
309
+ const input = region === "us-east-1"
310
+ ? { Bucket: bucket }
311
+ : { Bucket: bucket, CreateBucketConfiguration: { LocationConstraint: region } };
312
+ try {
313
+ await s3Client.send(new CreateBucketCommand(input));
314
+ created = true;
315
+ }
316
+ catch (createError) {
317
+ const code = getAwsCode(createError);
318
+ if (code !== "BucketAlreadyOwnedByYou") {
319
+ const message = createError instanceof Error ? createError.message : `Failed to create bucket ${bucket}.`;
320
+ fail("AWS_BUCKET_CREATE_FAILED", message, 1, { bucket, awsCode: code, awsStatus: getAwsStatus(createError) });
321
+ }
322
+ }
323
+ }
324
+ return {
325
+ bucket,
326
+ created,
327
+ updated: !created,
328
+ verified: true,
329
+ region: (await resolveBucketRegion(s3Client, bucket)) || region,
330
+ };
331
+ }
332
+ async function ensureTwoBuckets(region, credentials, buckets) {
333
+ const { s3Client } = createAwsClients(region, credentials);
334
+ return {
335
+ public: await ensureSingleBucket(s3Client, buckets.public, region),
336
+ private: await ensureSingleBucket(s3Client, buckets.private, region),
337
+ };
338
+ }
339
+ async function applyBucketSafety(region, credentials, buckets) {
340
+ const { s3Client } = createAwsClients(region, credentials);
341
+ const targets = [
342
+ { name: buckets.public, block: PUBLIC_ACCESS_BLOCK_PUBLIC },
343
+ { name: buckets.private, block: PUBLIC_ACCESS_BLOCK_PRIVATE },
344
+ ];
345
+ for (const target of targets) {
346
+ try {
347
+ await s3Client.send(new PutBucketOwnershipControlsCommand({
348
+ Bucket: target.name,
349
+ OwnershipControls: { Rules: [{ ObjectOwnership: "BucketOwnerEnforced" }] },
350
+ }));
351
+ await s3Client.send(new PutBucketEncryptionCommand({
352
+ Bucket: target.name,
353
+ ServerSideEncryptionConfiguration: {
354
+ Rules: [{ ApplyServerSideEncryptionByDefault: { SSEAlgorithm: "AES256" } }],
355
+ },
356
+ }));
357
+ await s3Client.send(new PutPublicAccessBlockCommand({
358
+ Bucket: target.name,
359
+ PublicAccessBlockConfiguration: target.block,
360
+ }));
361
+ }
362
+ catch (error) {
363
+ const message = error instanceof Error ? error.message : `Failed to apply safety defaults to ${target.name}.`;
364
+ fail("AWS_POLICY_APPLY_FAILED", message, 1, { bucket: target.name, awsCode: getAwsCode(error), awsStatus: getAwsStatus(error) });
365
+ }
366
+ }
367
+ }
368
+ async function applyBucketCors(region, credentials, buckets) {
369
+ const { s3Client } = createAwsClients(region, credentials);
370
+ for (const bucket of [buckets.public, buckets.private]) {
371
+ try {
372
+ await s3Client.send(new PutBucketCorsCommand({
373
+ Bucket: bucket,
374
+ CORSConfiguration: BUCKET_CORS_CONFIGURATION,
375
+ }));
376
+ }
377
+ catch (error) {
378
+ const message = error instanceof Error ? error.message : `Failed to apply CORS to ${bucket}.`;
379
+ fail("AWS_POLICY_APPLY_FAILED", message, 1, { bucket, awsCode: getAwsCode(error), awsStatus: getAwsStatus(error) });
380
+ }
381
+ }
382
+ }
383
+ function buildPublicPolicy(bucket) {
384
+ const bucketArn = `arn:aws:s3:::${bucket}`;
385
+ const objectArn = `${bucketArn}/*`;
386
+ return {
387
+ Version: "2012-10-17",
388
+ Statement: [
389
+ {
390
+ Sid: "DenyInsecureTransport",
391
+ Effect: "Deny",
392
+ Principal: "*",
393
+ Action: "s3:*",
394
+ Resource: [bucketArn, objectArn],
395
+ Condition: { Bool: { "aws:SecureTransport": "false" } },
396
+ },
397
+ {
398
+ Sid: "AllowPublicReadObjects",
399
+ Effect: "Allow",
400
+ Principal: "*",
401
+ Action: "s3:GetObject",
402
+ Resource: objectArn,
403
+ },
404
+ ],
405
+ };
406
+ }
407
+ function buildPrivatePolicy(bucket) {
408
+ const bucketArn = `arn:aws:s3:::${bucket}`;
409
+ const objectArn = `${bucketArn}/*`;
410
+ return {
411
+ Version: "2012-10-17",
412
+ Statement: [
413
+ {
414
+ Sid: "DenyInsecureTransport",
415
+ Effect: "Deny",
416
+ Principal: "*",
417
+ Action: "s3:*",
418
+ Resource: [bucketArn, objectArn],
419
+ Condition: { Bool: { "aws:SecureTransport": "false" } },
420
+ },
421
+ ],
422
+ };
423
+ }
424
+ async function applyBucketPolicies(region, credentials, buckets) {
425
+ const { s3Client } = createAwsClients(region, credentials);
426
+ try {
427
+ await s3Client.send(new PutBucketPolicyCommand({ Bucket: buckets.public, Policy: JSON.stringify(buildPublicPolicy(buckets.public)) }));
428
+ await s3Client.send(new PutBucketPolicyCommand({ Bucket: buckets.private, Policy: JSON.stringify(buildPrivatePolicy(buckets.private)) }));
429
+ }
430
+ catch (error) {
431
+ const message = error instanceof Error ? error.message : "Failed to apply bucket policies.";
432
+ fail("AWS_POLICY_APPLY_FAILED", message, 1, { awsCode: getAwsCode(error), awsStatus: getAwsStatus(error) });
433
+ }
434
+ }
435
+ async function getBucketEvidence(s3Client, bucket, regionHint) {
436
+ await s3Client.send(new HeadBucketCommand({ Bucket: bucket }));
437
+ const bucketRegion = (await resolveBucketRegion(s3Client, bucket)) || regionHint;
438
+ const publicAccessBlockResponse = await s3Client.send(new GetPublicAccessBlockCommand({ Bucket: bucket }));
439
+ const encryptionResponse = await s3Client.send(new GetBucketEncryptionCommand({ Bucket: bucket }));
440
+ const ownershipResponse = await s3Client.send(new GetBucketOwnershipControlsCommand({ Bucket: bucket }));
441
+ const policyResponse = await s3Client.send(new GetBucketPolicyCommand({ Bucket: bucket }));
442
+ const corsResponse = await s3Client.send(new GetBucketCorsCommand({ Bucket: bucket }));
443
+ const policyText = typeof policyResponse.Policy === "string" ? policyResponse.Policy : "";
444
+ return {
445
+ bucketRegion,
446
+ publicAccessBlock: publicAccessBlockResponse.PublicAccessBlockConfiguration || null,
447
+ encryption: encryptionResponse.ServerSideEncryptionConfiguration?.Rules?.[0]?.ApplyServerSideEncryptionByDefault?.SSEAlgorithm || null,
448
+ ownershipControls: ownershipResponse.OwnershipControls?.Rules?.[0]?.ObjectOwnership || null,
449
+ policyApplied: Boolean(policyText),
450
+ hasPublicReadPolicy: policyText.includes("\"Action\":\"s3:GetObject\"") || policyText.includes("\"Action\":[\"s3:GetObject\"]"),
451
+ corsRules: corsResponse.CORSRules || [],
452
+ };
453
+ }
454
+ async function verifyTwoBuckets(region, credentials, buckets) {
455
+ const { s3Client } = createAwsClients(region, credentials);
456
+ const publicEvidence = await getBucketEvidence(s3Client, buckets.public, region);
457
+ const privateEvidence = await getBucketEvidence(s3Client, buckets.private, region);
458
+ const publicOk = publicEvidence.bucketRegion === region &&
459
+ publicEvidence.ownershipControls === "BucketOwnerEnforced" &&
460
+ publicEvidence.encryption === "AES256" &&
461
+ Boolean(publicEvidence.publicAccessBlock?.BlockPublicAcls) &&
462
+ Boolean(publicEvidence.publicAccessBlock?.IgnorePublicAcls) &&
463
+ publicEvidence.publicAccessBlock?.BlockPublicPolicy === false &&
464
+ publicEvidence.publicAccessBlock?.RestrictPublicBuckets === false &&
465
+ publicEvidence.policyApplied === true &&
466
+ publicEvidence.hasPublicReadPolicy === true &&
467
+ Array.isArray(publicEvidence.corsRules) && publicEvidence.corsRules.length > 0;
468
+ const privateOk = privateEvidence.bucketRegion === region &&
469
+ privateEvidence.ownershipControls === "BucketOwnerEnforced" &&
470
+ privateEvidence.encryption === "AES256" &&
471
+ Boolean(privateEvidence.publicAccessBlock?.BlockPublicAcls) &&
472
+ Boolean(privateEvidence.publicAccessBlock?.IgnorePublicAcls) &&
473
+ Boolean(privateEvidence.publicAccessBlock?.BlockPublicPolicy) &&
474
+ Boolean(privateEvidence.publicAccessBlock?.RestrictPublicBuckets) &&
475
+ privateEvidence.policyApplied === true &&
476
+ privateEvidence.hasPublicReadPolicy === false &&
477
+ Array.isArray(privateEvidence.corsRules) && privateEvidence.corsRules.length > 0;
478
+ if (!publicOk || !privateOk) {
479
+ fail("AWS_VERIFY_FAILED", "AWS S3 bucket configuration verification failed.", 1, {
480
+ publicBucket: buckets.public,
481
+ privateBucket: buckets.private,
482
+ });
483
+ }
484
+ }
485
+ function buildObjectUrl(bucket, region, key) {
486
+ const encoded = key.split("/").map((segment) => encodeURIComponent(segment)).join("/");
487
+ if (region === "us-east-1")
488
+ return `https://${bucket}.s3.amazonaws.com/${encoded}`;
489
+ return `https://${bucket}.s3.${region}.amazonaws.com/${encoded}`;
490
+ }
491
+ async function smokeTestBuckets(region, credentials, buckets) {
492
+ const { s3Client } = createAwsClients(region, credentials);
493
+ const runId = `run_${Date.now()}`;
494
+ const prefix = `${HEALTHCHECK_PREFIX}/${runId}`;
495
+ const publicKey = `${prefix}/public-probe.txt`;
496
+ const privateKey = `${prefix}/private-probe.txt`;
497
+ await s3Client.send(new PutObjectCommand({ Bucket: buckets.public, Key: publicKey, Body: "public healthcheck probe", ContentType: "text/plain" }));
498
+ await s3Client.send(new PutObjectCommand({ Bucket: buckets.private, Key: privateKey, Body: "private healthcheck probe", ContentType: "text/plain" }));
499
+ const publicList = await s3Client.send(new ListObjectsV2Command({ Bucket: buckets.public, Prefix: prefix }));
500
+ const privateList = await s3Client.send(new ListObjectsV2Command({ Bucket: buckets.private, Prefix: prefix }));
501
+ const listPassed = Number(publicList.KeyCount || 0) > 0 && Number(privateList.KeyCount || 0) > 0;
502
+ if (!listPassed) {
503
+ fail("AWS_SMOKE_TEST_FAILED", "AWS S3 smoke-test list operation did not return the uploaded healthcheck objects.", 1, { prefix });
504
+ }
505
+ const publicUrl = buildObjectUrl(buckets.public, region, publicKey);
506
+ const privateUrl = buildObjectUrl(buckets.private, region, privateKey);
507
+ const publicReadResponse = await fetch(publicUrl);
508
+ const privateReadResponse = await fetch(privateUrl);
509
+ const publicRead = publicReadResponse.ok;
510
+ const privateReadBlocked = !privateReadResponse.ok;
511
+ if (!publicRead || !privateReadBlocked) {
512
+ fail("AWS_SMOKE_TEST_FAILED", "AWS S3 smoke-test public/private read checks failed.", 1, {
513
+ publicReadStatus: publicReadResponse.status,
514
+ privateReadStatus: privateReadResponse.status,
515
+ });
516
+ }
517
+ await s3Client.send(new GetObjectCommand({ Bucket: buckets.private, Key: privateKey }));
518
+ await s3Client.send(new DeleteObjectCommand({ Bucket: buckets.public, Key: publicKey }));
519
+ await s3Client.send(new DeleteObjectCommand({ Bucket: buckets.private, Key: privateKey }));
520
+ return {
521
+ ok: true,
522
+ command: "storage smoke-test-s3",
523
+ runId,
524
+ prefix,
525
+ buckets,
526
+ upload: true,
527
+ list: true,
528
+ publicRead: true,
529
+ privateRead: true,
530
+ delete: true,
531
+ publicObjectUrl: publicUrl,
532
+ };
533
+ }
534
+ export async function checkS3Context(flags) {
535
+ const context = readAwsContext(flags);
536
+ if (context.missingKeys.length > 0) {
537
+ printJson({
538
+ ok: true,
539
+ command: "storage check-s3-context",
540
+ ready: false,
541
+ missingKeys: context.missingKeys,
542
+ presentKeys: context.presentKeys,
543
+ region: context.region,
544
+ expectedExisting: false,
545
+ checkedFiles: [path.basename(context.localEnv.envBootstrapPath), path.basename(context.localEnv.envLocalPath)],
546
+ });
547
+ return;
548
+ }
549
+ const identity = await validateCredentials(context.region, {
550
+ accessKeyId: context.accessKeyId,
551
+ secretAccessKey: context.secretAccessKey,
552
+ ...(context.sessionToken ? { sessionToken: context.sessionToken } : {}),
553
+ });
554
+ const buckets = deterministicBuckets({ accountId: identity.accountId, projectSlug: context.projectSlug });
555
+ printJson({
556
+ ok: true,
557
+ command: "storage check-s3-context",
558
+ ready: true,
559
+ missingKeys: [],
560
+ presentKeys: context.presentKeys,
561
+ region: context.region,
562
+ expectedExisting: false,
563
+ identity,
564
+ bucketPlan: buckets,
565
+ bucketSource: "generated_deterministic",
566
+ checkedFiles: [path.basename(context.localEnv.envBootstrapPath), path.basename(context.localEnv.envLocalPath)],
567
+ });
568
+ }
569
+ export async function setupS3Storage(flags) {
570
+ const context = readAwsContext(flags);
571
+ if (context.missingKeys.length > 0) {
572
+ fail("MISSING_ENV", `Missing required AWS values: ${context.missingKeys.join(", ")}. Add them to .env.bootstrap.local.`);
573
+ }
574
+ const credentials = {
575
+ accessKeyId: context.accessKeyId,
576
+ secretAccessKey: context.secretAccessKey,
577
+ ...(context.sessionToken ? { sessionToken: context.sessionToken } : {}),
578
+ };
579
+ const identity = await validateCredentials(context.region, credentials);
580
+ const buckets = deterministicBuckets({ accountId: identity.accountId, projectSlug: context.projectSlug });
581
+ const bucketResults = await ensureTwoBuckets(context.region, credentials, buckets);
582
+ await applyBucketSafety(context.region, credentials, buckets);
583
+ await applyBucketCors(context.region, credentials, buckets);
584
+ await applyBucketPolicies(context.region, credentials, buckets);
585
+ await verifyTwoBuckets(context.region, credentials, buckets);
586
+ mergeEnvFile(context.localEnv.envLocalPath, {
587
+ AWS_REGION: context.region,
588
+ AWS_S3_PUBLIC_BUCKET: buckets.public,
589
+ AWS_S3_PRIVATE_BUCKET: buckets.private,
590
+ });
591
+ printJson({
592
+ ok: true,
593
+ command: "storage setup-s3",
594
+ region: context.region,
595
+ identity,
596
+ bucketSource: "generated_deterministic",
597
+ buckets,
598
+ bucketsConfigured: [bucketResults.public, bucketResults.private],
599
+ safetyDefaultsApplied: true,
600
+ corsApplied: true,
601
+ policiesApplied: true,
602
+ verified: true,
603
+ envWritten: ["AWS_REGION", "AWS_S3_PUBLIC_BUCKET", "AWS_S3_PRIVATE_BUCKET"],
604
+ });
605
+ }
606
+ export async function smokeTestS3(flags) {
607
+ const context = readAwsContext(flags);
608
+ const publicBucket = context.localEnv.values.AWS_S3_PUBLIC_BUCKET || "";
609
+ const privateBucket = context.localEnv.values.AWS_S3_PRIVATE_BUCKET || "";
610
+ if (context.missingKeys.length > 0) {
611
+ fail("MISSING_ENV", `Missing required AWS values: ${context.missingKeys.join(", ")}. Add them to .env.bootstrap.local.`);
612
+ }
613
+ if (!isNonEmptyString(publicBucket) || !isNonEmptyString(privateBucket)) {
614
+ fail("MISSING_ENV", "AWS_S3_PUBLIC_BUCKET or AWS_S3_PRIVATE_BUCKET is missing. Run storage setup first so .env.local is populated.");
615
+ }
616
+ const credentials = {
617
+ accessKeyId: context.accessKeyId,
618
+ secretAccessKey: context.secretAccessKey,
619
+ ...(context.sessionToken ? { sessionToken: context.sessionToken } : {}),
620
+ };
621
+ await validateCredentials(context.region, credentials);
622
+ const result = await smokeTestBuckets(context.region, credentials, {
623
+ public: publicBucket.trim(),
624
+ private: privateBucket.trim(),
625
+ });
626
+ printJson(result);
627
+ }
628
+ export function __testOnlyDeterministicBuckets(projectSlug, accountId) {
629
+ return deterministicBuckets({ projectSlug, accountId });
630
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@vibecodemax/cli",
3
- "version": "0.1.2",
3
+ "version": "0.1.3",
4
4
  "description": "VibeCodeMax CLI — local provider setup for bootstrap and project configuration",
5
5
  "type": "module",
6
6
  "bin": {
@@ -13,7 +13,8 @@
13
13
  ],
14
14
  "scripts": {
15
15
  "build": "tsc -p tsconfig.json",
16
- "typecheck": "tsc -p tsconfig.json --noEmit"
16
+ "typecheck": "tsc -p tsconfig.json --noEmit",
17
+ "test": "npm run build && node --test test/*.test.mjs"
17
18
  },
18
19
  "engines": {
19
20
  "node": ">=20"
@@ -31,5 +32,9 @@
31
32
  "homepage": "https://github.com/VibeCodeMax/cli#readme",
32
33
  "bugs": {
33
34
  "url": "https://github.com/VibeCodeMax/cli/issues"
35
+ },
36
+ "dependencies": {
37
+ "@aws-sdk/client-s3": "^3.1029.0",
38
+ "@aws-sdk/client-sts": "^3.1029.0"
34
39
  }
35
40
  }