hatchkit 0.1.42 → 0.1.43
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/adopt.d.ts +77 -0
- package/dist/adopt.d.ts.map +1 -1
- package/dist/adopt.js +395 -157
- package/dist/adopt.js.map +1 -1
- package/dist/provision/s3-buckets.d.ts.map +1 -1
- package/dist/provision/s3-buckets.js +44 -24
- package/dist/provision/s3-buckets.js.map +1 -1
- package/dist/provision/write-env.d.ts +6 -0
- package/dist/provision/write-env.d.ts.map +1 -1
- package/dist/provision/write-env.js +17 -0
- package/dist/provision/write-env.js.map +1 -1
- package/dist/scaffold/build-pipeline.d.ts +26 -2
- package/dist/scaffold/build-pipeline.d.ts.map +1 -1
- package/dist/scaffold/build-pipeline.js +159 -6
- package/dist/scaffold/build-pipeline.js.map +1 -1
- package/dist/templates/build-pipeline/Dockerfile.nextjs-monorepo.hbs +107 -0
- package/dist/templates/build-pipeline/docker-compose.yml.hbs +14 -2
- package/package.json +1 -1
package/dist/adopt.js
CHANGED
|
@@ -33,7 +33,7 @@
|
|
|
33
33
|
* a second run on the same dir notices the existing manifest and
|
|
34
34
|
* exits early with a "use `hatchkit update` instead" hint.
|
|
35
35
|
*/
|
|
36
|
-
import { existsSync, readFileSync, readdirSync } from "node:fs";
|
|
36
|
+
import { existsSync, readFileSync, readdirSync, statSync } from "node:fs";
|
|
37
37
|
import { join, relative } from "node:path";
|
|
38
38
|
import { Separator, confirm, input, select } from "@inquirer/prompts";
|
|
39
39
|
import chalk from "chalk";
|
|
@@ -43,6 +43,7 @@ import { pushInitialBranch } from "./deploy/github.js";
|
|
|
43
43
|
import { pushProjectKeyToCoolify, pushProjectKeyToGh } from "./deploy/keys.js";
|
|
44
44
|
import { handleAdoptFailure } from "./deploy/rollback.js";
|
|
45
45
|
import { runProvision } from "./provision/index.js";
|
|
46
|
+
import { readEnvKeys } from "./provision/write-env.js";
|
|
46
47
|
import { detectBuildPipeline, scaffoldBuildPipeline } from "./scaffold/build-pipeline.js";
|
|
47
48
|
import { MANIFEST_FILENAME, readManifest, writeManifest, } from "./scaffold/manifest.js";
|
|
48
49
|
import { installCancelHandler, isCancelInProgress, uninstallCancelHandler, } from "./utils/cancel-handler.js";
|
|
@@ -143,7 +144,13 @@ export async function runAdopt(cwd, opts = {}) {
|
|
|
143
144
|
// private one and produces "permission denied" on deploy.
|
|
144
145
|
isPrivate: state.gitRemoteIsPrivate ?? true,
|
|
145
146
|
appPort: "3000",
|
|
146
|
-
|
|
147
|
+
// Pipeline scaffold defaults OFF when the layout is unrecognised.
|
|
148
|
+
// The Dockerfile templates assume a single-package project rooted
|
|
149
|
+
// at /; for a workspace with no standard server/client dirs the
|
|
150
|
+
// generated files build the wrong thing (or nothing). User can
|
|
151
|
+
// still flip this on in the review loop — buildAdoptGroups parks
|
|
152
|
+
// the cursor on the row in this case so the choice is explicit.
|
|
153
|
+
scaffoldBuildPipeline: !state.unknownWorkspaceLayout,
|
|
147
154
|
// Provisioning is opt-in. Each service mints real resources on a
|
|
148
155
|
// third-party (GlitchTip project, OpenPanel project, Resend API
|
|
149
156
|
// key) and cleaning those up after the fact is a chore — better
|
|
@@ -209,7 +216,7 @@ export async function runAdopt(cwd, opts = {}) {
|
|
|
209
216
|
// ---------------------------------------------------------------------------
|
|
210
217
|
// Detection
|
|
211
218
|
// ---------------------------------------------------------------------------
|
|
212
|
-
async function detectProject(projectDir) {
|
|
219
|
+
export async function detectProject(projectDir) {
|
|
213
220
|
const hasManifest = existsSync(join(projectDir, MANIFEST_FILENAME));
|
|
214
221
|
const existingManifest = hasManifest ? (readManifest(projectDir) ?? undefined) : undefined;
|
|
215
222
|
let packageName;
|
|
@@ -228,6 +235,31 @@ async function detectProject(projectDir) {
|
|
|
228
235
|
catch {
|
|
229
236
|
// No package.json at root — that's fine for a non-Node project.
|
|
230
237
|
}
|
|
238
|
+
// Workspace markers — pnpm, yarn/npm, turbo, lerna, rush. When a
|
|
239
|
+
// marker is present but the standard server/client dir scan below
|
|
240
|
+
// turns up nothing, we're looking at a layout the scaffolder's
|
|
241
|
+
// surface-based Dockerfile templates can't handle. Flagged below as
|
|
242
|
+
// `unknownWorkspaceLayout`.
|
|
243
|
+
const workspaceMarkers = [
|
|
244
|
+
"pnpm-workspace.yaml",
|
|
245
|
+
"pnpm-workspace.yml",
|
|
246
|
+
"turbo.json",
|
|
247
|
+
"lerna.json",
|
|
248
|
+
"rush.json",
|
|
249
|
+
];
|
|
250
|
+
let hasWorkspaceMarker = workspaceMarkers.some((f) => existsSync(join(projectDir, f)));
|
|
251
|
+
if (!hasWorkspaceMarker) {
|
|
252
|
+
// npm/yarn workspaces live as a `workspaces` field inside the root
|
|
253
|
+
// package.json (string array or { packages: [...] } object).
|
|
254
|
+
try {
|
|
255
|
+
const rootPkg = JSON.parse(readFileSync(join(projectDir, "package.json"), "utf-8"));
|
|
256
|
+
if (rootPkg.workspaces)
|
|
257
|
+
hasWorkspaceMarker = true;
|
|
258
|
+
}
|
|
259
|
+
catch {
|
|
260
|
+
// No / unreadable root package.json — leave hasWorkspaceMarker false.
|
|
261
|
+
}
|
|
262
|
+
}
|
|
231
263
|
// Walk a generous set of common monorepo layouts.
|
|
232
264
|
const serverDir = firstExisting(projectDir, [
|
|
233
265
|
"packages/server",
|
|
@@ -349,6 +381,10 @@ async function detectProject(projectDir) {
|
|
|
349
381
|
// undefined and let the stepper fall back to a sensible default.
|
|
350
382
|
}
|
|
351
383
|
}
|
|
384
|
+
const unknownWorkspaceLayout = hasWorkspaceMarker && !serverDir && !clientDir;
|
|
385
|
+
const standaloneBuildCandidates = unknownWorkspaceLayout
|
|
386
|
+
? findStandaloneBuildCandidates(projectDir)
|
|
387
|
+
: [];
|
|
352
388
|
return {
|
|
353
389
|
projectDir,
|
|
354
390
|
packageName,
|
|
@@ -356,6 +392,8 @@ async function detectProject(projectDir) {
|
|
|
356
392
|
hasManifest,
|
|
357
393
|
serverDir,
|
|
358
394
|
clientDir,
|
|
395
|
+
unknownWorkspaceLayout,
|
|
396
|
+
standaloneBuildCandidates,
|
|
359
397
|
features,
|
|
360
398
|
prodEnvIsEncrypted,
|
|
361
399
|
hasEnvKeys,
|
|
@@ -368,6 +406,57 @@ async function detectProject(projectDir) {
|
|
|
368
406
|
existingManifest,
|
|
369
407
|
};
|
|
370
408
|
}
|
|
409
|
+
/** Scan first-level subdirs for a standalone-buildable project — own
|
|
410
|
+
* `package.json` AND its own lockfile (the marker that pnpm/npm/yarn
|
|
411
|
+
* would install it independently of the parent workspace). `.npmrc`
|
|
412
|
+
* with `ignore-workspace=true` is a stronger signal: that's the
|
|
413
|
+
* explicit "treat me as standalone" toggle Docusaurus / marketing
|
|
414
|
+
* sites use when they live next to a CLI workspace.
|
|
415
|
+
*
|
|
416
|
+
* Returns first-level matches only; we don't recurse because the
|
|
417
|
+
* intent is "show the user a starting point", not enumerate every
|
|
418
|
+
* buildable subtree. */
|
|
419
|
+
function findStandaloneBuildCandidates(projectDir) {
|
|
420
|
+
const out = [];
|
|
421
|
+
let entries;
|
|
422
|
+
try {
|
|
423
|
+
entries = readdirSync(projectDir);
|
|
424
|
+
}
|
|
425
|
+
catch {
|
|
426
|
+
return out;
|
|
427
|
+
}
|
|
428
|
+
for (const name of entries) {
|
|
429
|
+
if (name.startsWith(".") || name === "node_modules")
|
|
430
|
+
continue;
|
|
431
|
+
const dir = join(projectDir, name);
|
|
432
|
+
let isDir = false;
|
|
433
|
+
try {
|
|
434
|
+
isDir = statSync(dir).isDirectory();
|
|
435
|
+
}
|
|
436
|
+
catch {
|
|
437
|
+
continue;
|
|
438
|
+
}
|
|
439
|
+
if (!isDir)
|
|
440
|
+
continue;
|
|
441
|
+
if (!existsSync(join(dir, "package.json")))
|
|
442
|
+
continue;
|
|
443
|
+
const hasOwnLockfile = existsSync(join(dir, "pnpm-lock.yaml")) ||
|
|
444
|
+
existsSync(join(dir, "package-lock.json")) ||
|
|
445
|
+
existsSync(join(dir, "yarn.lock"));
|
|
446
|
+
if (!hasOwnLockfile)
|
|
447
|
+
continue;
|
|
448
|
+
let hasIgnoreWorkspace = false;
|
|
449
|
+
try {
|
|
450
|
+
const npmrc = readFileSync(join(dir, ".npmrc"), "utf-8");
|
|
451
|
+
hasIgnoreWorkspace = /^\s*ignore-workspace\s*=\s*true\s*$/m.test(npmrc);
|
|
452
|
+
}
|
|
453
|
+
catch {
|
|
454
|
+
// No .npmrc — still a candidate (own lockfile is the main signal).
|
|
455
|
+
}
|
|
456
|
+
out.push({ dir, hasIgnoreWorkspace });
|
|
457
|
+
}
|
|
458
|
+
return out;
|
|
459
|
+
}
|
|
371
460
|
function firstExisting(root, candidates) {
|
|
372
461
|
for (const c of candidates) {
|
|
373
462
|
const full = join(root, c);
|
|
@@ -500,6 +589,22 @@ function printDetected(state) {
|
|
|
500
589
|
? chalk.yellow("repo present, no `origin` set")
|
|
501
590
|
: chalk.dim("not a git repo yet")));
|
|
502
591
|
lines.push(row("features (guess)", state.features.length > 0 ? state.features.join(", ") : chalk.dim("none detected")));
|
|
592
|
+
if (state.unknownWorkspaceLayout) {
|
|
593
|
+
lines.push("");
|
|
594
|
+
lines.push(chalk.yellow(" ! Workspace marker detected (pnpm-workspace.yaml / workspaces / turbo / lerna)"));
|
|
595
|
+
lines.push(chalk.yellow(" but no standard server/client dir matched. Adopt will skip the Docker"));
|
|
596
|
+
lines.push(chalk.yellow(" + GH Actions pipeline scaffold — the templates assume a single-package"));
|
|
597
|
+
lines.push(chalk.yellow(" project and would build the wrong thing here."));
|
|
598
|
+
if (state.standaloneBuildCandidates.length > 0) {
|
|
599
|
+
lines.push("");
|
|
600
|
+
lines.push(chalk.dim(" Standalone-buildable subdirs (own lockfile):"));
|
|
601
|
+
for (const c of state.standaloneBuildCandidates) {
|
|
602
|
+
const tag = c.hasIgnoreWorkspace ? chalk.green(" ignore-workspace=true") : "";
|
|
603
|
+
lines.push(chalk.dim(` · ${relativeTo(c.dir)}${tag}`));
|
|
604
|
+
}
|
|
605
|
+
lines.push(chalk.dim(" Point a hand-authored Dockerfile at one of those and re-run adopt."));
|
|
606
|
+
}
|
|
607
|
+
}
|
|
503
608
|
for (const l of lines)
|
|
504
609
|
console.log(l);
|
|
505
610
|
console.log();
|
|
@@ -677,7 +782,10 @@ function buildAdoptGroups(state, plan) {
|
|
|
677
782
|
{
|
|
678
783
|
key: "scaffoldBuildPipeline",
|
|
679
784
|
label: "Docker + GH Actions",
|
|
680
|
-
|
|
785
|
+
// Park cursor here when the layout is unrecognised — the
|
|
786
|
+
// user should make an explicit yes/no rather than walk
|
|
787
|
+
// past a defaulted-off row without seeing it.
|
|
788
|
+
set: !state.unknownWorkspaceLayout,
|
|
681
789
|
summary: renderBuildPipelineSummary(state, plan),
|
|
682
790
|
},
|
|
683
791
|
],
|
|
@@ -1048,6 +1156,47 @@ async function editAdoptStep(state, plan, step) {
|
|
|
1048
1156
|
}
|
|
1049
1157
|
return plan;
|
|
1050
1158
|
}
|
|
1159
|
+
/** Canonical env key per service — used by `filterServicesForResume`
|
|
1160
|
+
* to decide whether a service's credentials are already wired into
|
|
1161
|
+
* the project's env files. If the key is present, re-minting on a
|
|
1162
|
+
* resume would orphan whatever's there (Resend mints a fresh API
|
|
1163
|
+
* key each call; OpenPanel mints a fresh project; Stripe re-creates
|
|
1164
|
+
* the webhook endpoint). `email` is intentionally absent — Email
|
|
1165
|
+
* Routing is zone-state with no env footprint, and its provisioner
|
|
1166
|
+
* is already 409-idempotent. */
|
|
1167
|
+
const RESUME_SERVICE_ENV_KEY = {
|
|
1168
|
+
glitchtip: { server: "GLITCHTIP_DSN", client: "PUBLIC_GLITCHTIP_DSN" },
|
|
1169
|
+
openpanel: { server: "OPENPANEL_CLIENT_ID", client: "PUBLIC_OPENPANEL_CLIENT_ID" },
|
|
1170
|
+
resend: { server: "RESEND_API_KEY" },
|
|
1171
|
+
s3: { server: "R2_ENDPOINT" },
|
|
1172
|
+
email: {},
|
|
1173
|
+
};
|
|
1174
|
+
/** Filter the services list for `runProvision` on `--resume`: drop
|
|
1175
|
+
* every service whose canonical env keys are already in the target
|
|
1176
|
+
* env files. A non-resume run returns the list unchanged. */
|
|
1177
|
+
function filterServicesForResume(args) {
|
|
1178
|
+
if (!args.resume)
|
|
1179
|
+
return args.services;
|
|
1180
|
+
const serverKeys = args.serverEnvPath ? readEnvKeys(args.serverEnvPath) : new Set();
|
|
1181
|
+
const clientKeys = args.clientEnvPath ? readEnvKeys(args.clientEnvPath) : new Set();
|
|
1182
|
+
const kept = [];
|
|
1183
|
+
for (const svc of args.services) {
|
|
1184
|
+
const want = RESUME_SERVICE_ENV_KEY[svc];
|
|
1185
|
+
if (!want || (!want.server && !want.client)) {
|
|
1186
|
+
kept.push(svc);
|
|
1187
|
+
continue;
|
|
1188
|
+
}
|
|
1189
|
+
const serverOk = !want.server || serverKeys.has(want.server);
|
|
1190
|
+
const clientOk = !want.client || clientKeys.has(want.client);
|
|
1191
|
+
if (serverOk && clientOk) {
|
|
1192
|
+
const which = [want.server, want.client].filter(Boolean).join(" + ");
|
|
1193
|
+
console.log(chalk.dim(` · Skipping ${svc} on --resume — ${which} already in .env.production.`));
|
|
1194
|
+
continue;
|
|
1195
|
+
}
|
|
1196
|
+
kept.push(svc);
|
|
1197
|
+
}
|
|
1198
|
+
return kept;
|
|
1199
|
+
}
|
|
1051
1200
|
async function executePlan(state, plan, opts = { resume: false }) {
|
|
1052
1201
|
console.log(chalk.bold("\n ── Adopting ──────────────────────────────────────────────\n"));
|
|
1053
1202
|
const caveats = [];
|
|
@@ -1272,11 +1421,35 @@ async function executePlan(state, plan, opts = { resume: false }) {
|
|
|
1272
1421
|
if (plan.scaffoldBuildPipeline && plan.deploymentMode === "coolify" && appUuidForSecrets) {
|
|
1273
1422
|
const slug = repoSlugFromRemote(remoteUrl);
|
|
1274
1423
|
if (slug) {
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1424
|
+
// --resume gate: if every secret this step would push is
|
|
1425
|
+
// already present on the repo, skip the push. The values
|
|
1426
|
+
// themselves aren't readable through `gh secret list` (write-
|
|
1427
|
+
// only), so we trust name-presence as the signal — same
|
|
1428
|
+
// contract `ghSecretExists` uses for the ledger-record gate
|
|
1429
|
+
// below. The user's recourse for a rotated Coolify token is
|
|
1430
|
+
// re-running adopt *without* --resume.
|
|
1431
|
+
const coolifySecretNames = [
|
|
1432
|
+
"COOLIFY_BASE_URL",
|
|
1433
|
+
"COOLIFY_API_TOKEN",
|
|
1434
|
+
"COOLIFY_TOKEN",
|
|
1435
|
+
"COOLIFY_WEBHOOK_URL",
|
|
1436
|
+
"COOLIFY_RESOURCE_UUID",
|
|
1437
|
+
];
|
|
1438
|
+
let skipCoolifySecrets = false;
|
|
1439
|
+
if (opts.resume) {
|
|
1440
|
+
const checks = await Promise.all(coolifySecretNames.map((n) => ghSecretExists(state.projectDir, slug, n)));
|
|
1441
|
+
if (checks.every(Boolean)) {
|
|
1442
|
+
skipCoolifySecrets = true;
|
|
1443
|
+
console.log(chalk.dim(` · Skipping Coolify GH Actions secrets on --resume — all ${coolifySecretNames.length} already on ${slug}.`));
|
|
1444
|
+
}
|
|
1445
|
+
}
|
|
1446
|
+
if (!skipCoolifySecrets) {
|
|
1447
|
+
await setCoolifyDeploySecrets({
|
|
1448
|
+
projectDir: state.projectDir,
|
|
1449
|
+
repoSlug: slug,
|
|
1450
|
+
apps: [{ uuid: appUuidForSecrets }],
|
|
1451
|
+
});
|
|
1452
|
+
}
|
|
1280
1453
|
}
|
|
1281
1454
|
else {
|
|
1282
1455
|
console.log(chalk.dim(" · Couldn't resolve owner/repo from git remote — set the deploy secrets manually."));
|
|
@@ -1305,21 +1478,26 @@ async function executePlan(state, plan, opts = { resume: false }) {
|
|
|
1305
1478
|
// we're the creator preserves the "destroy never deletes
|
|
1306
1479
|
// pre-existing user data" invariant — see LedgerStep doc.
|
|
1307
1480
|
const preExisted = await ghSecretExists(state.projectDir, slug, secretName);
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
if (!preExisted) {
|
|
1311
|
-
ledger.record({ kind: "ghActionsSecret", repo: slug, name: secretName });
|
|
1312
|
-
}
|
|
1481
|
+
if (opts.resume && preExisted) {
|
|
1482
|
+
console.log(chalk.dim(` · Skipping ${secretName} push on --resume — secret already on ${slug}.`));
|
|
1313
1483
|
}
|
|
1314
|
-
|
|
1315
|
-
|
|
1316
|
-
|
|
1317
|
-
|
|
1318
|
-
|
|
1319
|
-
|
|
1320
|
-
|
|
1321
|
-
|
|
1322
|
-
|
|
1484
|
+
else {
|
|
1485
|
+
try {
|
|
1486
|
+
await pushProjectKeyToGh(plan.name, slug);
|
|
1487
|
+
if (!preExisted) {
|
|
1488
|
+
ledger.record({ kind: "ghActionsSecret", repo: slug, name: secretName });
|
|
1489
|
+
}
|
|
1490
|
+
}
|
|
1491
|
+
catch (err) {
|
|
1492
|
+
caveats.push({
|
|
1493
|
+
title: `${secretName} not set on GitHub Actions`,
|
|
1494
|
+
reason: err.message,
|
|
1495
|
+
recovery: [
|
|
1496
|
+
`hatchkit keys push ${plan.name} --target gh --repo ${slug}`,
|
|
1497
|
+
`(or copy from \`hatchkit keys show ${plan.name}\` and run \`gh secret set ${secretName} --repo ${slug} --body <key>\`)`,
|
|
1498
|
+
],
|
|
1499
|
+
});
|
|
1500
|
+
}
|
|
1323
1501
|
}
|
|
1324
1502
|
}
|
|
1325
1503
|
else if (remoteUrl) {
|
|
@@ -1513,7 +1691,23 @@ async function executePlan(state, plan, opts = { resume: false }) {
|
|
|
1513
1691
|
// to a normal `hatchkit add`. Forward the surface choice — runProvision
|
|
1514
1692
|
// uses the same vocabulary, so a client-only adopt produces a
|
|
1515
1693
|
// client-only `add`.
|
|
1516
|
-
|
|
1694
|
+
//
|
|
1695
|
+
// --resume contract: filter out services whose canonical env keys
|
|
1696
|
+
// are already present in the target env files. Re-minting Resend
|
|
1697
|
+
// keys / OpenPanel projects / Stripe webhooks on every resume
|
|
1698
|
+
// orphans live credentials and rotates secrets the user didn't
|
|
1699
|
+
// ask to rotate. The keychain caches some of these per-service,
|
|
1700
|
+
// but those caches don't survive a fresh machine — the env file
|
|
1701
|
+
// is the durable signal, so we trust it. A service is re-included
|
|
1702
|
+
// if it's newly in `plan.services` (added since the last attempt)
|
|
1703
|
+
// or its canonical env key is missing.
|
|
1704
|
+
const resumeServices = filterServicesForResume({
|
|
1705
|
+
services: plan.services,
|
|
1706
|
+
resume: opts.resume === true,
|
|
1707
|
+
serverEnvPath: plan.serverDir ? join(plan.serverDir, ".env.production") : null,
|
|
1708
|
+
clientEnvPath: plan.clientDir ? join(plan.clientDir, ".env.production") : null,
|
|
1709
|
+
});
|
|
1710
|
+
if (resumeServices.length > 0) {
|
|
1517
1711
|
console.log();
|
|
1518
1712
|
const provisionMode = plan.surfaces === "both"
|
|
1519
1713
|
? "shared"
|
|
@@ -1522,7 +1716,7 @@ async function executePlan(state, plan, opts = { resume: false }) {
|
|
|
1522
1716
|
: "client-only";
|
|
1523
1717
|
await runProvision({
|
|
1524
1718
|
baseName: plan.name,
|
|
1525
|
-
services:
|
|
1719
|
+
services: resumeServices,
|
|
1526
1720
|
surfaces: {
|
|
1527
1721
|
mode: provisionMode,
|
|
1528
1722
|
serverEnvDir: plan.serverDir,
|
|
@@ -1592,96 +1786,117 @@ async function executePlan(state, plan, opts = { resume: false }) {
|
|
|
1592
1786
|
// (globally via `hatchkit config add s3 r2`, which re-pastes +
|
|
1593
1787
|
// verifies it) and re-run `hatchkit provision s3` to finish.
|
|
1594
1788
|
if (plan.features.includes("s3")) {
|
|
1595
|
-
|
|
1596
|
-
|
|
1597
|
-
|
|
1598
|
-
|
|
1599
|
-
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
|
-
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
|
|
1607
|
-
|
|
1608
|
-
|
|
1609
|
-
|
|
1610
|
-
|
|
1611
|
-
|
|
1612
|
-
|
|
1613
|
-
|
|
1614
|
-
|
|
1615
|
-
|
|
1616
|
-
|
|
1617
|
-
|
|
1618
|
-
|
|
1619
|
-
|
|
1620
|
-
|
|
1621
|
-
|
|
1622
|
-
|
|
1623
|
-
|
|
1624
|
-
|
|
1625
|
-
|
|
1626
|
-
|
|
1627
|
-
|
|
1628
|
-
|
|
1629
|
-
|
|
1630
|
-
|
|
1631
|
-
|
|
1632
|
-
|
|
1633
|
-
|
|
1634
|
-
|
|
1635
|
-
|
|
1636
|
-
|
|
1637
|
-
|
|
1638
|
-
|
|
1639
|
-
|
|
1640
|
-
|
|
1789
|
+
// --resume gate: when the manifest already records the assets
|
|
1790
|
+
// bucket AND .env.production has a working access/secret pair,
|
|
1791
|
+
// there's nothing to provision. Skip the whole step rather than
|
|
1792
|
+
// re-attaching custom domains / re-reconciling CORS / re-probing
|
|
1793
|
+
// tokens, all of which are network round-trips with no payoff
|
|
1794
|
+
// when nothing has changed since the last attempt.
|
|
1795
|
+
const s3ManifestSnapshot = readManifest(state.projectDir);
|
|
1796
|
+
const s3EnvPath = join(state.projectDir, ".env.production");
|
|
1797
|
+
const s3EnvKeys = readEnvKeys(s3EnvPath);
|
|
1798
|
+
const s3HasEnvCreds = (s3EnvKeys.has("R2_ACCESS_KEY_ID") && s3EnvKeys.has("R2_SECRET_ACCESS_KEY")) ||
|
|
1799
|
+
(s3EnvKeys.has("S3_ACCESS_KEY_ID") && s3EnvKeys.has("S3_SECRET_ACCESS_KEY")) ||
|
|
1800
|
+
(s3EnvKeys.has("AWS_ACCESS_KEY_ID") && s3EnvKeys.has("AWS_SECRET_ACCESS_KEY"));
|
|
1801
|
+
const s3ManifestComplete = !!s3ManifestSnapshot?.s3Buckets?.assets?.name;
|
|
1802
|
+
const s3AlreadyWired = opts.resume && s3HasEnvCreds && s3ManifestComplete;
|
|
1803
|
+
if (s3AlreadyWired) {
|
|
1804
|
+
console.log(chalk.dim(` · Skipping S3 on --resume — manifest records ${s3ManifestSnapshot?.s3Buckets?.assets?.name} and .env.production has access/secret keys.`));
|
|
1805
|
+
}
|
|
1806
|
+
else {
|
|
1807
|
+
try {
|
|
1808
|
+
const { provisionS3ForProject, defaultBucketHostname, existingCustomHostname } = await import("./provision/s3-buckets.js");
|
|
1809
|
+
// Resolve the public assets-bucket custom domain. If a previous
|
|
1810
|
+
// run already attached one, the manifest records it — reuse
|
|
1811
|
+
// that without re-prompting. Only ask on first adopt (or when
|
|
1812
|
+
// the manifest has no hostname yet, e.g. a previous run picked
|
|
1813
|
+
// the managed r2.dev URL or never got that far). Blank answer →
|
|
1814
|
+
// managed r2.dev.
|
|
1815
|
+
let publicHostname;
|
|
1816
|
+
const existingManifest = readManifest(state.projectDir);
|
|
1817
|
+
const recordedHostname = existingManifest
|
|
1818
|
+
? existingCustomHostname(existingManifest)
|
|
1819
|
+
: null;
|
|
1820
|
+
if (recordedHostname) {
|
|
1821
|
+
publicHostname = recordedHostname;
|
|
1822
|
+
}
|
|
1823
|
+
else if (process.stdin.isTTY) {
|
|
1824
|
+
const answer = (await input({
|
|
1825
|
+
message: "Custom domain for the public assets bucket (leave empty to use the managed r2.dev URL):",
|
|
1826
|
+
default: defaultBucketHostname(plan.domain),
|
|
1827
|
+
})).trim();
|
|
1828
|
+
publicHostname = answer === "" ? null : answer;
|
|
1829
|
+
}
|
|
1830
|
+
// Only create the public assets bucket here. The private "state"
|
|
1831
|
+
// bucket is an explicit opt-in even when the project has a
|
|
1832
|
+
// server — most don't need it, and adding one silently means
|
|
1833
|
+
// an extra R2 bucket + env var the user has to clean up later.
|
|
1834
|
+
// Users who want one re-run `hatchkit provision s3 --with-state-bucket`.
|
|
1835
|
+
const r = await provisionS3ForProject({
|
|
1836
|
+
projectDir: state.projectDir,
|
|
1837
|
+
publicHostname,
|
|
1641
1838
|
});
|
|
1839
|
+
// Ledger: record any *fresh* bucket creations + a fresh token
|
|
1840
|
+
// mint so destroy can revoke them. Reused buckets/tokens (from
|
|
1841
|
+
// a prior adopt run) stay out — those are already in the
|
|
1842
|
+
// earlier run's ledger or pre-existed before hatchkit ran.
|
|
1843
|
+
if (r.assets.created) {
|
|
1844
|
+
ledger.record({
|
|
1845
|
+
kind: "r2Bucket",
|
|
1846
|
+
bucketName: r.assets.name,
|
|
1847
|
+
accountId: r.accountId,
|
|
1848
|
+
});
|
|
1849
|
+
}
|
|
1850
|
+
if (r.state?.created) {
|
|
1851
|
+
ledger.record({
|
|
1852
|
+
kind: "r2Bucket",
|
|
1853
|
+
bucketName: r.state.name,
|
|
1854
|
+
accountId: r.accountId,
|
|
1855
|
+
});
|
|
1856
|
+
}
|
|
1857
|
+
if (r.tokenCreated) {
|
|
1858
|
+
ledger.record({
|
|
1859
|
+
kind: "r2Token",
|
|
1860
|
+
tokenId: r.tokenCreated.tokenId,
|
|
1861
|
+
accountId: r.accountId,
|
|
1862
|
+
audience: r.tokenCreated.audience,
|
|
1863
|
+
});
|
|
1864
|
+
}
|
|
1865
|
+
console.log(chalk.green(` ✓ S3 assets bucket ready — ${r.assets.publicUrl}`));
|
|
1866
|
+
console.log(chalk.dim(` Wrote ${r.envWritten.length} encrypted entries. ` +
|
|
1867
|
+
"(Need a private server-side bucket too? Run `hatchkit provision s3 --with-state-bucket`.)"));
|
|
1868
|
+
// The fresh bucket is empty. Existing projects almost always
|
|
1869
|
+
// have assets sitting in some other store — surface the one
|
|
1870
|
+
// command that copies them in. Cheap line to print, easy to
|
|
1871
|
+
// miss without it.
|
|
1872
|
+
console.log(chalk.dim(` Have existing assets to bring over? hatchkit assets migrate \\\n` +
|
|
1873
|
+
` --from-endpoint=<old-s3-endpoint> --from-bucket=<name> \\\n` +
|
|
1874
|
+
` --from-key=<access-key> --from-secret=<secret>`));
|
|
1642
1875
|
}
|
|
1643
|
-
|
|
1644
|
-
|
|
1645
|
-
|
|
1646
|
-
|
|
1647
|
-
|
|
1648
|
-
|
|
1876
|
+
catch (err) {
|
|
1877
|
+
console.log(chalk.yellow(`\n ✗ S3 bucket provisioning failed: ${err.message.split("\n")[0]}`));
|
|
1878
|
+
// Two kinds of recovery — pick based on whether the underlying
|
|
1879
|
+
// error looks like an admin-token problem (global) vs. a
|
|
1880
|
+
// bucket-side problem (per-project). Admin-token failures point
|
|
1881
|
+
// the user at the global config command (which validates the
|
|
1882
|
+
// token); everything else points at the per-project re-runner.
|
|
1883
|
+
const msg = err.message;
|
|
1884
|
+
const isAdminTokenIssue = /admin token|invalid api token|9109|10000|10001|HTTP 401|HTTP 403/i.test(msg);
|
|
1885
|
+
caveats.push({
|
|
1886
|
+
title: "S3 buckets not provisioned",
|
|
1887
|
+
reason: msg,
|
|
1888
|
+
recovery: isAdminTokenIssue
|
|
1889
|
+
? [
|
|
1890
|
+
"Looks like an R2 admin-token problem.",
|
|
1891
|
+
"Fix globally with: hatchkit config add s3 r2 (re-paste + verify perms)",
|
|
1892
|
+
`Then re-run from the project dir: cd ${plan.name} && hatchkit provision s3`,
|
|
1893
|
+
]
|
|
1894
|
+
: [
|
|
1895
|
+
"Once fixed, finish with: hatchkit provision s3",
|
|
1896
|
+
"(safe to re-run — bucket creation and env writes are idempotent)",
|
|
1897
|
+
],
|
|
1649
1898
|
});
|
|
1650
1899
|
}
|
|
1651
|
-
console.log(chalk.green(` ✓ S3 assets bucket ready — ${r.assets.publicUrl}`));
|
|
1652
|
-
console.log(chalk.dim(` Wrote ${r.envWritten.length} encrypted entries. ` +
|
|
1653
|
-
"(Need a private server-side bucket too? Run `hatchkit provision s3 --with-state-bucket`.)"));
|
|
1654
|
-
// The fresh bucket is empty. Existing projects almost always
|
|
1655
|
-
// have assets sitting in some other store — surface the one
|
|
1656
|
-
// command that copies them in. Cheap line to print, easy to
|
|
1657
|
-
// miss without it.
|
|
1658
|
-
console.log(chalk.dim(` Have existing assets to bring over? hatchkit assets migrate \\\n` +
|
|
1659
|
-
` --from-endpoint=<old-s3-endpoint> --from-bucket=<name> \\\n` +
|
|
1660
|
-
` --from-key=<access-key> --from-secret=<secret>`));
|
|
1661
|
-
}
|
|
1662
|
-
catch (err) {
|
|
1663
|
-
console.log(chalk.yellow(`\n ✗ S3 bucket provisioning failed: ${err.message.split("\n")[0]}`));
|
|
1664
|
-
// Two kinds of recovery — pick based on whether the underlying
|
|
1665
|
-
// error looks like an admin-token problem (global) vs. a
|
|
1666
|
-
// bucket-side problem (per-project). Admin-token failures point
|
|
1667
|
-
// the user at the global config command (which validates the
|
|
1668
|
-
// token); everything else points at the per-project re-runner.
|
|
1669
|
-
const msg = err.message;
|
|
1670
|
-
const isAdminTokenIssue = /admin token|invalid api token|9109|10000|10001|HTTP 401|HTTP 403/i.test(msg);
|
|
1671
|
-
caveats.push({
|
|
1672
|
-
title: "S3 buckets not provisioned",
|
|
1673
|
-
reason: msg,
|
|
1674
|
-
recovery: isAdminTokenIssue
|
|
1675
|
-
? [
|
|
1676
|
-
"Looks like an R2 admin-token problem.",
|
|
1677
|
-
"Fix globally with: hatchkit config add s3 r2 (re-paste + verify perms)",
|
|
1678
|
-
`Then re-run from the project dir: cd ${plan.name} && hatchkit provision s3`,
|
|
1679
|
-
]
|
|
1680
|
-
: [
|
|
1681
|
-
"Once fixed, finish with: hatchkit provision s3",
|
|
1682
|
-
"(safe to re-run — bucket creation and env writes are idempotent)",
|
|
1683
|
-
],
|
|
1684
|
-
});
|
|
1685
1900
|
}
|
|
1686
1901
|
}
|
|
1687
1902
|
// Step 4c: Stripe — strictly separate from `create`'s Stripe block
|
|
@@ -1708,55 +1923,69 @@ async function executePlan(state, plan, opts = { resume: false }) {
|
|
|
1708
1923
|
});
|
|
1709
1924
|
}
|
|
1710
1925
|
else {
|
|
1711
|
-
|
|
1712
|
-
|
|
1713
|
-
|
|
1714
|
-
|
|
1926
|
+
// --resume gate: if Stripe keys are already encrypted in
|
|
1927
|
+
// .env.production AND set in .env.development, the env is
|
|
1928
|
+
// wired — skip the provisioner entirely. Re-running it on
|
|
1929
|
+
// a cache miss (e.g. fresh machine) would reprompt for the
|
|
1930
|
+
// sk/pk and re-create the webhook endpoint, leaving the
|
|
1931
|
+
// old endpoint orphaned in the user's Stripe dashboard.
|
|
1715
1932
|
const devEnvPath = join(plan.serverDir, ".env.development");
|
|
1716
1933
|
const prodEnvPath = join(plan.serverDir, ".env.production");
|
|
1717
|
-
const
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
|
|
1721
|
-
|
|
1722
|
-
}
|
|
1723
|
-
const pairs = parseEnvLines(renderStripeEnv(result.test));
|
|
1724
|
-
writeDevEnv(devEnvPath, pairs);
|
|
1725
|
-
if (result.test.kind === "configured") {
|
|
1726
|
-
ledger.record({
|
|
1727
|
-
kind: "keychain",
|
|
1728
|
-
account: SECRET_KEYS.stripeProjectWebhookId(plan.name, "test"),
|
|
1729
|
-
});
|
|
1730
|
-
}
|
|
1731
|
-
console.log(chalk.green(result.test.kind === "skipped"
|
|
1732
|
-
? ` ✓ Stripe sandbox placeholders → ${devLabel} (fill in later)`
|
|
1733
|
-
: ` ✓ Stripe sandbox creds → ${devLabel} (${pairs.length} keys)`));
|
|
1934
|
+
const stripeAlreadyWired = opts.resume &&
|
|
1935
|
+
readEnvKeys(prodEnvPath).has("STRIPE_SECRET_KEY") &&
|
|
1936
|
+
readEnvKeys(devEnvPath).has("STRIPE_SECRET_KEY");
|
|
1937
|
+
if (stripeAlreadyWired) {
|
|
1938
|
+
console.log(chalk.dim(` · Skipping Stripe on --resume — STRIPE_SECRET_KEY present in both .env.production and .env.development.`));
|
|
1734
1939
|
}
|
|
1735
|
-
|
|
1736
|
-
|
|
1737
|
-
|
|
1940
|
+
else {
|
|
1941
|
+
const result = await provisionStripeProject({
|
|
1942
|
+
projectName: plan.name,
|
|
1943
|
+
domain: plan.domain,
|
|
1944
|
+
});
|
|
1945
|
+
const devLabel = relative(state.projectDir, devEnvPath);
|
|
1946
|
+
const prodLabel = relative(state.projectDir, prodEnvPath);
|
|
1947
|
+
if (result.test) {
|
|
1948
|
+
if (result.test.kind === "skipped") {
|
|
1949
|
+
appendCommentBlock(devEnvPath, renderStripeSkipComment("test", devLabel));
|
|
1950
|
+
}
|
|
1951
|
+
const pairs = parseEnvLines(renderStripeEnv(result.test));
|
|
1952
|
+
writeDevEnv(devEnvPath, pairs);
|
|
1953
|
+
if (result.test.kind === "configured") {
|
|
1954
|
+
ledger.record({
|
|
1955
|
+
kind: "keychain",
|
|
1956
|
+
account: SECRET_KEYS.stripeProjectWebhookId(plan.name, "test"),
|
|
1957
|
+
});
|
|
1958
|
+
}
|
|
1959
|
+
console.log(chalk.green(result.test.kind === "skipped"
|
|
1960
|
+
? ` ✓ Stripe sandbox placeholders → ${devLabel} (fill in later)`
|
|
1961
|
+
: ` ✓ Stripe sandbox creds → ${devLabel} (${pairs.length} keys)`));
|
|
1738
1962
|
}
|
|
1739
|
-
|
|
1740
|
-
|
|
1741
|
-
|
|
1742
|
-
|
|
1743
|
-
|
|
1744
|
-
|
|
1963
|
+
if (result.live) {
|
|
1964
|
+
if (result.live.kind === "skipped") {
|
|
1965
|
+
appendCommentBlock(prodEnvPath, renderStripeSkipComment("live", prodLabel));
|
|
1966
|
+
}
|
|
1967
|
+
const pairs = parseEnvLines(renderStripeEnv(result.live));
|
|
1968
|
+
writeProdEnv(prodEnvPath, pairs);
|
|
1969
|
+
if (result.live.kind === "configured") {
|
|
1970
|
+
ledger.record({
|
|
1971
|
+
kind: "keychain",
|
|
1972
|
+
account: SECRET_KEYS.stripeProjectWebhookId(plan.name, "live"),
|
|
1973
|
+
});
|
|
1974
|
+
}
|
|
1975
|
+
console.log(chalk.green(result.live.kind === "skipped"
|
|
1976
|
+
? ` ✓ Stripe live placeholders → ${prodLabel} (encrypted CHANGE_ME values, fill in later)`
|
|
1977
|
+
: ` ✓ Stripe live creds → ${prodLabel} (encrypted, ${pairs.length} keys)`));
|
|
1978
|
+
}
|
|
1979
|
+
if (!result.test && !result.live) {
|
|
1980
|
+
caveats.push({
|
|
1981
|
+
title: "Stripe wiring skipped",
|
|
1982
|
+
reason: "No Stripe master key configured — neither test nor live mode could be wired.",
|
|
1983
|
+
recovery: [
|
|
1984
|
+
"Run `hatchkit config add stripe` to add at least one master key,",
|
|
1985
|
+
`then re-run \`hatchkit adopt --resume\` from ${state.projectDir}.`,
|
|
1986
|
+
],
|
|
1745
1987
|
});
|
|
1746
1988
|
}
|
|
1747
|
-
console.log(chalk.green(result.live.kind === "skipped"
|
|
1748
|
-
? ` ✓ Stripe live placeholders → ${prodLabel} (encrypted CHANGE_ME values, fill in later)`
|
|
1749
|
-
: ` ✓ Stripe live creds → ${prodLabel} (encrypted, ${pairs.length} keys)`));
|
|
1750
|
-
}
|
|
1751
|
-
if (!result.test && !result.live) {
|
|
1752
|
-
caveats.push({
|
|
1753
|
-
title: "Stripe wiring skipped",
|
|
1754
|
-
reason: "No Stripe master key configured — neither test nor live mode could be wired.",
|
|
1755
|
-
recovery: [
|
|
1756
|
-
"Run `hatchkit config add stripe` to add at least one master key,",
|
|
1757
|
-
`then re-run \`hatchkit adopt --resume\` from ${state.projectDir}.`,
|
|
1758
|
-
],
|
|
1759
|
-
});
|
|
1760
1989
|
}
|
|
1761
1990
|
}
|
|
1762
1991
|
}
|
|
@@ -2441,8 +2670,11 @@ async function listStagedFiles(cwd) {
|
|
|
2441
2670
|
* noting which files already exist (will be left alone) vs which
|
|
2442
2671
|
* will be scaffolded. */
|
|
2443
2672
|
function renderBuildPipelineSummary(state, plan) {
|
|
2444
|
-
if (!plan.scaffoldBuildPipeline)
|
|
2445
|
-
return
|
|
2673
|
+
if (!plan.scaffoldBuildPipeline) {
|
|
2674
|
+
return state.unknownWorkspaceLayout
|
|
2675
|
+
? chalk.dim("no — unrecognised workspace layout, hand-author your own")
|
|
2676
|
+
: chalk.dim("no — leave files as-is");
|
|
2677
|
+
}
|
|
2446
2678
|
const pipe = detectBuildPipeline(state.projectDir);
|
|
2447
2679
|
const willWrite = [];
|
|
2448
2680
|
const kept = [];
|
|
@@ -2462,7 +2694,13 @@ function renderBuildPipelineSummary(state, plan) {
|
|
|
2462
2694
|
return chalk.dim("all files already present — nothing to write");
|
|
2463
2695
|
const writePart = `write ${willWrite.join(", ")}`;
|
|
2464
2696
|
const keepPart = kept.length > 0 ? chalk.dim(` · keep ${kept.join(", ")}`) : "";
|
|
2465
|
-
|
|
2697
|
+
// Strong warning when the user has overridden the unknown-layout
|
|
2698
|
+
// default. We still write the files (their call), but flag that the
|
|
2699
|
+
// templates' single-package assumption probably doesn't fit this repo.
|
|
2700
|
+
const layoutWarn = state.unknownWorkspaceLayout
|
|
2701
|
+
? ` ${chalk.yellow("(unrecognised workspace — templates may build the wrong thing)")}`
|
|
2702
|
+
: "";
|
|
2703
|
+
return `${writePart}${keepPart}${layoutWarn}`;
|
|
2466
2704
|
}
|
|
2467
2705
|
function detectDockerComposeDomainServiceName(projectDir, surfaces) {
|
|
2468
2706
|
const pipe = detectBuildPipeline(projectDir);
|