@bensandee/tooling 0.16.0 → 0.18.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -3
- package/dist/bin.mjs +176 -11
- package/dist/docker-verify/index.mjs +1 -217
- package/dist/verify-BaWlzdPh.mjs +223 -0
- package/package.json +3 -4
- package/CHANGELOG.md +0 -242
- package/dist/exec-CC49vrkM.mjs +0 -7
package/README.md
CHANGED
|
@@ -55,7 +55,7 @@ The tool auto-detects project structure, CI platform, project type, and Docker p
|
|
|
55
55
|
|
|
56
56
|
Docker packages are discovered automatically. Any package with a `Dockerfile` or `docker/Dockerfile` is a Docker package. Image names are derived as `{root-package-name}-{package-name}`, build context defaults to `.` (project root). For single-package repos, `Dockerfile` or `docker/Dockerfile` at the project root is checked.
|
|
57
57
|
|
|
58
|
-
When Docker packages are present, `repo:sync` generates a deploy workflow (`.forgejo/workflows/
|
|
58
|
+
When Docker packages are present, `repo:sync` generates a deploy workflow (`.forgejo/workflows/publish.yml` or `.github/workflows/publish.yml`) triggered on version tags (`v*.*.*`) that runs `pnpm exec tooling docker:publish`.
|
|
59
59
|
|
|
60
60
|
#### Overrides
|
|
61
61
|
|
|
@@ -219,5 +219,3 @@ if (!result.success) {
|
|
|
219
219
|
| `VerifyResult` | Result: `{ success: true, elapsedMs }` or `{ success: false, reason, message, elapsedMs }` |
|
|
220
220
|
| `DockerVerifyExecutor` | Side-effect abstraction (exec, fetch, timers) for testability |
|
|
221
221
|
| `ContainerInfo` | Container status info from `composePs` |
|
|
222
|
-
|
|
223
|
-
## [Changelog](./CHANGELOG.md)
|
package/dist/bin.mjs
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
|
-
import { t as isExecSyncError } from "./
|
|
2
|
+
import { l as createRealExecutor$1, t as runVerification, u as isExecSyncError } from "./verify-BaWlzdPh.mjs";
|
|
3
3
|
import { defineCommand, runMain } from "citty";
|
|
4
4
|
import * as p from "@clack/prompts";
|
|
5
5
|
import path from "node:path";
|
|
@@ -7,7 +7,7 @@ import { existsSync, mkdirSync, readFileSync, readdirSync, rmSync, writeFileSync
|
|
|
7
7
|
import JSON5 from "json5";
|
|
8
8
|
import { parse } from "jsonc-parser";
|
|
9
9
|
import { z } from "zod";
|
|
10
|
-
import { isMap, isSeq, parseDocument } from "yaml";
|
|
10
|
+
import { isMap, isSeq, parse as parse$1, parseDocument } from "yaml";
|
|
11
11
|
import { execSync } from "node:child_process";
|
|
12
12
|
import { FatalError, TransientError, UnexpectedError } from "@bensandee/common";
|
|
13
13
|
//#region src/types.ts
|
|
@@ -451,6 +451,21 @@ function createDryRunContext(config) {
|
|
|
451
451
|
//#endregion
|
|
452
452
|
//#region src/utils/tooling-config.ts
|
|
453
453
|
const CONFIG_FILE = ".tooling.json";
|
|
454
|
+
const DeclarativeHealthCheckSchema = z.object({
|
|
455
|
+
name: z.string(),
|
|
456
|
+
url: z.string(),
|
|
457
|
+
status: z.number().int().optional()
|
|
458
|
+
});
|
|
459
|
+
const DockerVerifyConfigSchema = z.object({
|
|
460
|
+
composeFiles: z.array(z.string()).optional(),
|
|
461
|
+
envFile: z.string().optional(),
|
|
462
|
+
services: z.array(z.string()).optional(),
|
|
463
|
+
healthChecks: z.array(DeclarativeHealthCheckSchema).optional(),
|
|
464
|
+
buildCommand: z.string().optional(),
|
|
465
|
+
buildCwd: z.string().optional(),
|
|
466
|
+
timeoutMs: z.number().int().positive().optional(),
|
|
467
|
+
pollIntervalMs: z.number().int().positive().optional()
|
|
468
|
+
});
|
|
454
469
|
const ToolingConfigSchema = z.object({
|
|
455
470
|
structure: z.enum(["single", "monorepo"]).optional(),
|
|
456
471
|
useEslintPlugin: z.boolean().optional(),
|
|
@@ -479,7 +494,8 @@ const ToolingConfigSchema = z.object({
|
|
|
479
494
|
docker: z.record(z.string(), z.object({
|
|
480
495
|
dockerfile: z.string(),
|
|
481
496
|
context: z.string().default(".")
|
|
482
|
-
})).optional()
|
|
497
|
+
})).optional(),
|
|
498
|
+
dockerVerify: DockerVerifyConfigSchema.optional()
|
|
483
499
|
});
|
|
484
500
|
/** Load saved tooling config from the target directory. Returns undefined if missing or invalid. */
|
|
485
501
|
function loadToolingConfig(targetDir) {
|
|
@@ -639,8 +655,8 @@ function addReleaseDeps(deps, config) {
|
|
|
639
655
|
function getAddedDevDepNames(config) {
|
|
640
656
|
const deps = { ...ROOT_DEV_DEPS };
|
|
641
657
|
if (config.structure !== "monorepo") Object.assign(deps, PER_PACKAGE_DEV_DEPS);
|
|
642
|
-
deps["@bensandee/config"] = "0.8.
|
|
643
|
-
deps["@bensandee/tooling"] = "0.
|
|
658
|
+
deps["@bensandee/config"] = "0.8.2";
|
|
659
|
+
deps["@bensandee/tooling"] = "0.18.0";
|
|
644
660
|
if (config.formatter === "oxfmt") deps["oxfmt"] = "0.35.0";
|
|
645
661
|
if (config.formatter === "prettier") deps["prettier"] = "3.8.1";
|
|
646
662
|
addReleaseDeps(deps, config);
|
|
@@ -660,8 +676,8 @@ async function generatePackageJson(ctx) {
|
|
|
660
676
|
if (ctx.config.releaseStrategy !== "none" && ctx.config.releaseStrategy !== "changesets") allScripts["trigger-release"] = "pnpm exec tooling release:trigger";
|
|
661
677
|
const devDeps = { ...ROOT_DEV_DEPS };
|
|
662
678
|
if (!isMonorepo) Object.assign(devDeps, PER_PACKAGE_DEV_DEPS);
|
|
663
|
-
devDeps["@bensandee/config"] = isWorkspacePackage(ctx, "@bensandee/config") ? "workspace:*" : "0.8.
|
|
664
|
-
devDeps["@bensandee/tooling"] = isWorkspacePackage(ctx, "@bensandee/tooling") ? "workspace:*" : "0.
|
|
679
|
+
devDeps["@bensandee/config"] = isWorkspacePackage(ctx, "@bensandee/config") ? "workspace:*" : "0.8.2";
|
|
680
|
+
devDeps["@bensandee/tooling"] = isWorkspacePackage(ctx, "@bensandee/tooling") ? "workspace:*" : "0.18.0";
|
|
665
681
|
if (ctx.config.useEslintPlugin) devDeps["@bensandee/eslint-plugin"] = isWorkspacePackage(ctx, "@bensandee/eslint-plugin") ? "workspace:*" : "0.9.2";
|
|
666
682
|
if (ctx.config.formatter === "oxfmt") devDeps["oxfmt"] = "0.35.0";
|
|
667
683
|
if (ctx.config.formatter === "prettier") devDeps["prettier"] = "3.8.1";
|
|
@@ -2479,7 +2495,7 @@ async function generateDeployCi(ctx) {
|
|
|
2479
2495
|
description: "Deploy CI workflow not applicable"
|
|
2480
2496
|
};
|
|
2481
2497
|
const isGitHub = ctx.config.ci === "github";
|
|
2482
|
-
const workflowPath = isGitHub ? ".github/workflows/
|
|
2498
|
+
const workflowPath = isGitHub ? ".github/workflows/publish.yml" : ".forgejo/workflows/publish.yml";
|
|
2483
2499
|
const nodeVersionYaml = hasEnginesNode(ctx) ? "node-version-file: package.json" : "node-version: \"24\"";
|
|
2484
2500
|
const content = deployWorkflow(ctx.config.ci, nodeVersionYaml);
|
|
2485
2501
|
if (ctx.exists(workflowPath)) {
|
|
@@ -4241,11 +4257,159 @@ const dockerBuildCommand = defineCommand({
|
|
|
4241
4257
|
}
|
|
4242
4258
|
});
|
|
4243
4259
|
//#endregion
|
|
4260
|
+
//#region src/docker-verify/detect.ts
|
|
4261
|
+
/** Compose file names to scan, in priority order. */
|
|
4262
|
+
const COMPOSE_FILE_CANDIDATES = [
|
|
4263
|
+
"docker-compose.yaml",
|
|
4264
|
+
"docker-compose.yml",
|
|
4265
|
+
"compose.yaml",
|
|
4266
|
+
"compose.yml"
|
|
4267
|
+
];
|
|
4268
|
+
/** Zod schema for the subset of compose YAML we care about. */
|
|
4269
|
+
const ComposePortSchema = z.union([z.string(), z.object({
|
|
4270
|
+
published: z.union([z.string(), z.number()]),
|
|
4271
|
+
target: z.union([z.string(), z.number()]).optional()
|
|
4272
|
+
}).passthrough()]);
|
|
4273
|
+
const ComposeServiceSchema = z.object({
|
|
4274
|
+
ports: z.array(ComposePortSchema).optional(),
|
|
4275
|
+
healthcheck: z.unknown().optional()
|
|
4276
|
+
}).passthrough();
|
|
4277
|
+
const ComposeFileSchema = z.object({ services: z.record(z.string(), ComposeServiceSchema).optional() }).passthrough();
|
|
4278
|
+
/** Detect which compose files exist at conventional paths. */
|
|
4279
|
+
function detectComposeFiles(cwd) {
|
|
4280
|
+
return COMPOSE_FILE_CANDIDATES.filter((name) => existsSync(path.join(cwd, name)));
|
|
4281
|
+
}
|
|
4282
|
+
/** Parse a single port mapping string and extract the host port. */
|
|
4283
|
+
function parsePortString(port) {
|
|
4284
|
+
const withoutProtocol = port.split("/")[0];
|
|
4285
|
+
if (!withoutProtocol) return void 0;
|
|
4286
|
+
const parts = withoutProtocol.split(":");
|
|
4287
|
+
if (parts.length === 2) {
|
|
4288
|
+
const host = parts[0];
|
|
4289
|
+
if (!host) return void 0;
|
|
4290
|
+
const parsed = Number.parseInt(host, 10);
|
|
4291
|
+
return Number.isNaN(parsed) ? void 0 : parsed;
|
|
4292
|
+
}
|
|
4293
|
+
if (parts.length === 3) {
|
|
4294
|
+
const host = parts[1];
|
|
4295
|
+
if (!host) return void 0;
|
|
4296
|
+
const parsed = Number.parseInt(host, 10);
|
|
4297
|
+
return Number.isNaN(parsed) ? void 0 : parsed;
|
|
4298
|
+
}
|
|
4299
|
+
}
|
|
4300
|
+
/** Extract the host port from a compose port definition. */
|
|
4301
|
+
function extractHostPort(port) {
|
|
4302
|
+
if (typeof port === "string") return parsePortString(port);
|
|
4303
|
+
const published = port.published;
|
|
4304
|
+
if (typeof published === "number") return published;
|
|
4305
|
+
if (typeof published === "string") {
|
|
4306
|
+
const parsed = Number.parseInt(published, 10);
|
|
4307
|
+
return Number.isNaN(parsed) ? void 0 : parsed;
|
|
4308
|
+
}
|
|
4309
|
+
}
|
|
4310
|
+
/** Parse compose files and return service info (names, ports, healthcheck presence). */
|
|
4311
|
+
function parseComposeServices(cwd, composeFiles) {
|
|
4312
|
+
const serviceMap = /* @__PURE__ */ new Map();
|
|
4313
|
+
for (const file of composeFiles) {
|
|
4314
|
+
let parsed;
|
|
4315
|
+
try {
|
|
4316
|
+
const content = readFileSync(path.join(cwd, file), "utf-8");
|
|
4317
|
+
parsed = ComposeFileSchema.safeParse(parse$1(content));
|
|
4318
|
+
} catch (_error) {
|
|
4319
|
+
continue;
|
|
4320
|
+
}
|
|
4321
|
+
if (!parsed.success || !parsed.data.services) continue;
|
|
4322
|
+
for (const [name, service] of Object.entries(parsed.data.services)) {
|
|
4323
|
+
const existing = serviceMap.get(name);
|
|
4324
|
+
let hostPort = existing?.hostPort;
|
|
4325
|
+
if (hostPort === void 0 && service.ports) for (const port of service.ports) {
|
|
4326
|
+
const extracted = extractHostPort(port);
|
|
4327
|
+
if (extracted !== void 0) {
|
|
4328
|
+
hostPort = extracted;
|
|
4329
|
+
break;
|
|
4330
|
+
}
|
|
4331
|
+
}
|
|
4332
|
+
serviceMap.set(name, {
|
|
4333
|
+
name,
|
|
4334
|
+
hostPort,
|
|
4335
|
+
hasHealthcheck: existing?.hasHealthcheck ?? service.healthcheck !== void 0
|
|
4336
|
+
});
|
|
4337
|
+
}
|
|
4338
|
+
}
|
|
4339
|
+
return [...serviceMap.values()];
|
|
4340
|
+
}
|
|
4341
|
+
/** Generate health checks from parsed services: services with exposed ports get HTTP checks, unless they define a compose-level healthcheck. */
|
|
4342
|
+
function deriveHealthChecks(services) {
|
|
4343
|
+
return services.filter((s) => s.hostPort !== void 0 && !s.hasHealthcheck).map((s) => ({
|
|
4344
|
+
name: s.name,
|
|
4345
|
+
url: `http://localhost:${String(s.hostPort)}/`
|
|
4346
|
+
}));
|
|
4347
|
+
}
|
|
4348
|
+
/** Auto-detect compose config from conventional file locations. */
|
|
4349
|
+
function computeVerifyDefaults(cwd) {
|
|
4350
|
+
const composeFiles = detectComposeFiles(cwd);
|
|
4351
|
+
if (composeFiles.length === 0) return {};
|
|
4352
|
+
const services = parseComposeServices(cwd, composeFiles);
|
|
4353
|
+
const healthChecks = deriveHealthChecks(services);
|
|
4354
|
+
return {
|
|
4355
|
+
composeFiles,
|
|
4356
|
+
services: services.map((s) => s.name),
|
|
4357
|
+
healthChecks: healthChecks.length > 0 ? healthChecks : void 0
|
|
4358
|
+
};
|
|
4359
|
+
}
|
|
4360
|
+
//#endregion
|
|
4361
|
+
//#region src/commands/docker-verify.ts
|
|
4362
|
+
/** Convert declarative health checks to functional ones. */
|
|
4363
|
+
function toHttpHealthChecks(checks) {
|
|
4364
|
+
return checks.map((check) => ({
|
|
4365
|
+
name: check.name,
|
|
4366
|
+
url: check.url,
|
|
4367
|
+
validate: async (res) => check.status ? res.status === check.status : res.ok
|
|
4368
|
+
}));
|
|
4369
|
+
}
|
|
4370
|
+
const dockerVerifyCommand = defineCommand({
|
|
4371
|
+
meta: {
|
|
4372
|
+
name: "docker:verify",
|
|
4373
|
+
description: "Verify Docker Compose stack health by auto-detecting services from compose files"
|
|
4374
|
+
},
|
|
4375
|
+
args: {
|
|
4376
|
+
timeout: {
|
|
4377
|
+
type: "string",
|
|
4378
|
+
description: "Maximum time to wait for health checks, in ms (default: 120000)"
|
|
4379
|
+
},
|
|
4380
|
+
"poll-interval": {
|
|
4381
|
+
type: "string",
|
|
4382
|
+
description: "Interval between polling attempts, in ms (default: 5000)"
|
|
4383
|
+
}
|
|
4384
|
+
},
|
|
4385
|
+
async run({ args }) {
|
|
4386
|
+
const cwd = process.cwd();
|
|
4387
|
+
const defaults = computeVerifyDefaults(cwd);
|
|
4388
|
+
if (!defaults.composeFiles || defaults.composeFiles.length === 0) throw new FatalError("No compose files found. Expected docker-compose.yaml or compose.yaml.");
|
|
4389
|
+
if (!defaults.services || defaults.services.length === 0) throw new FatalError("No services found in compose files.");
|
|
4390
|
+
const config = {
|
|
4391
|
+
compose: {
|
|
4392
|
+
cwd,
|
|
4393
|
+
composeFiles: defaults.composeFiles,
|
|
4394
|
+
envFile: defaults.envFile,
|
|
4395
|
+
services: defaults.services
|
|
4396
|
+
},
|
|
4397
|
+
buildCommand: defaults.buildCommand,
|
|
4398
|
+
buildCwd: defaults.buildCwd,
|
|
4399
|
+
healthChecks: defaults.healthChecks ? toHttpHealthChecks(defaults.healthChecks) : [],
|
|
4400
|
+
timeoutMs: args.timeout ? Number.parseInt(args.timeout, 10) : defaults.timeoutMs,
|
|
4401
|
+
pollIntervalMs: args["poll-interval"] ? Number.parseInt(args["poll-interval"], 10) : defaults.pollIntervalMs
|
|
4402
|
+
};
|
|
4403
|
+
const result = await runVerification(createRealExecutor$1(), config);
|
|
4404
|
+
if (!result.success) throw new FatalError(`Verification failed (${result.reason}): ${result.message}`);
|
|
4405
|
+
}
|
|
4406
|
+
});
|
|
4407
|
+
//#endregion
|
|
4244
4408
|
//#region src/bin.ts
|
|
4245
4409
|
const main = defineCommand({
|
|
4246
4410
|
meta: {
|
|
4247
4411
|
name: "tooling",
|
|
4248
|
-
version: "0.
|
|
4412
|
+
version: "0.18.0",
|
|
4249
4413
|
description: "Bootstrap and maintain standardized TypeScript project tooling"
|
|
4250
4414
|
},
|
|
4251
4415
|
subCommands: {
|
|
@@ -4257,10 +4421,11 @@ const main = defineCommand({
|
|
|
4257
4421
|
"changesets:merge": releaseMergeCommand,
|
|
4258
4422
|
"release:simple": releaseSimpleCommand,
|
|
4259
4423
|
"docker:publish": publishDockerCommand,
|
|
4260
|
-
"docker:build": dockerBuildCommand
|
|
4424
|
+
"docker:build": dockerBuildCommand,
|
|
4425
|
+
"docker:verify": dockerVerifyCommand
|
|
4261
4426
|
}
|
|
4262
4427
|
});
|
|
4263
|
-
console.log(`@bensandee/tooling v0.
|
|
4428
|
+
console.log(`@bensandee/tooling v0.18.0`);
|
|
4264
4429
|
runMain(main);
|
|
4265
4430
|
//#endregion
|
|
4266
4431
|
export {};
|
|
@@ -1,218 +1,2 @@
|
|
|
1
|
-
import { t as
|
|
2
|
-
import { z } from "zod";
|
|
3
|
-
import { execSync } from "node:child_process";
|
|
4
|
-
//#region src/docker-verify/executor.ts
|
|
5
|
-
/** Create an executor that runs real commands, fetches, and manages process signals. */
|
|
6
|
-
function createRealExecutor() {
|
|
7
|
-
return {
|
|
8
|
-
exec(command, options) {
|
|
9
|
-
try {
|
|
10
|
-
return {
|
|
11
|
-
stdout: execSync(command, {
|
|
12
|
-
cwd: options?.cwd,
|
|
13
|
-
env: options?.env ? {
|
|
14
|
-
...process.env,
|
|
15
|
-
...options.env
|
|
16
|
-
} : void 0,
|
|
17
|
-
encoding: "utf-8",
|
|
18
|
-
stdio: [
|
|
19
|
-
"pipe",
|
|
20
|
-
"pipe",
|
|
21
|
-
"pipe"
|
|
22
|
-
]
|
|
23
|
-
}),
|
|
24
|
-
stderr: "",
|
|
25
|
-
exitCode: 0
|
|
26
|
-
};
|
|
27
|
-
} catch (err) {
|
|
28
|
-
if (isExecSyncError(err)) return {
|
|
29
|
-
stdout: err.stdout,
|
|
30
|
-
stderr: err.stderr,
|
|
31
|
-
exitCode: err.status
|
|
32
|
-
};
|
|
33
|
-
return {
|
|
34
|
-
stdout: "",
|
|
35
|
-
stderr: "",
|
|
36
|
-
exitCode: 1
|
|
37
|
-
};
|
|
38
|
-
}
|
|
39
|
-
},
|
|
40
|
-
execInherit(command, options) {
|
|
41
|
-
execSync(command, {
|
|
42
|
-
cwd: options?.cwd,
|
|
43
|
-
env: options?.env ? {
|
|
44
|
-
...process.env,
|
|
45
|
-
...options.env
|
|
46
|
-
} : void 0,
|
|
47
|
-
stdio: "inherit"
|
|
48
|
-
});
|
|
49
|
-
},
|
|
50
|
-
fetch: globalThis.fetch,
|
|
51
|
-
now: () => Date.now(),
|
|
52
|
-
sleep: (ms) => new Promise((resolve) => setTimeout(resolve, ms)),
|
|
53
|
-
onSignal(signal, handler) {
|
|
54
|
-
process.on(signal, handler);
|
|
55
|
-
return () => {
|
|
56
|
-
process.removeListener(signal, handler);
|
|
57
|
-
};
|
|
58
|
-
},
|
|
59
|
-
log: (msg) => console.log(msg),
|
|
60
|
-
logError: (msg) => console.error(msg)
|
|
61
|
-
};
|
|
62
|
-
}
|
|
63
|
-
//#endregion
|
|
64
|
-
//#region src/docker-verify/compose.ts
|
|
65
|
-
/** Zod schema for a single container entry from `docker compose ps --format json`. */
|
|
66
|
-
const ContainerInfoSchema = z.object({
|
|
67
|
-
Service: z.string(),
|
|
68
|
-
Health: z.string()
|
|
69
|
-
});
|
|
70
|
-
/** Build the `docker compose` base command string from config. */
|
|
71
|
-
function composeCommand(config) {
|
|
72
|
-
return `docker compose ${config.composeFiles.map((f) => `-f ${f}`).join(" ")}${config.envFile ? ` --env-file ${config.envFile}` : ""}`;
|
|
73
|
-
}
|
|
74
|
-
/** Run the build command if configured. */
|
|
75
|
-
function buildImages(executor, config) {
|
|
76
|
-
if (!config.buildCommand) return;
|
|
77
|
-
executor.execInherit(config.buildCommand, { cwd: config.buildCwd ?? config.compose.cwd });
|
|
78
|
-
}
|
|
79
|
-
/** Start the compose stack in detached mode. */
|
|
80
|
-
function composeUp(executor, config) {
|
|
81
|
-
executor.execInherit(`${composeCommand(config)} up -d`, { cwd: config.cwd });
|
|
82
|
-
}
|
|
83
|
-
/** Tear down the compose stack, removing volumes and orphans. Swallows errors. */
|
|
84
|
-
function composeDown(executor, config) {
|
|
85
|
-
try {
|
|
86
|
-
executor.execInherit(`${composeCommand(config)} down -v --remove-orphans`, { cwd: config.cwd });
|
|
87
|
-
} catch (_error) {}
|
|
88
|
-
}
|
|
89
|
-
/** Show logs for a specific service (or all services if not specified). Swallows errors. */
|
|
90
|
-
function composeLogs(executor, config, service) {
|
|
91
|
-
try {
|
|
92
|
-
const suffix = service ? ` ${service}` : "";
|
|
93
|
-
executor.execInherit(`${composeCommand(config)} logs${suffix}`, { cwd: config.cwd });
|
|
94
|
-
} catch (_error) {}
|
|
95
|
-
}
|
|
96
|
-
/**
|
|
97
|
-
* Query container status via `docker compose ps --format json`.
|
|
98
|
-
* Handles both JSON array and newline-delimited JSON (varies by docker compose version).
|
|
99
|
-
*/
|
|
100
|
-
function composePs(executor, config) {
|
|
101
|
-
const output = executor.exec(`${composeCommand(config)} ps --format json`, { cwd: config.cwd }).stdout.trim();
|
|
102
|
-
if (!output) return [];
|
|
103
|
-
const ArraySchema = z.array(ContainerInfoSchema);
|
|
104
|
-
try {
|
|
105
|
-
const direct = ArraySchema.safeParse(JSON.parse(output));
|
|
106
|
-
if (direct.success) return direct.data;
|
|
107
|
-
const single = ContainerInfoSchema.safeParse(JSON.parse(output));
|
|
108
|
-
if (single.success) return [single.data];
|
|
109
|
-
} catch (_error) {}
|
|
110
|
-
try {
|
|
111
|
-
const joined = `[${output.split("\n").join(",")}]`;
|
|
112
|
-
const delimited = ArraySchema.safeParse(JSON.parse(joined));
|
|
113
|
-
return delimited.success ? delimited.data : [];
|
|
114
|
-
} catch (_error) {
|
|
115
|
-
return [];
|
|
116
|
-
}
|
|
117
|
-
}
|
|
118
|
-
//#endregion
|
|
119
|
-
//#region src/docker-verify/health.ts
|
|
120
|
-
/** Look up the health status of a specific service from container info. */
|
|
121
|
-
function getContainerHealth(containers, serviceName) {
|
|
122
|
-
return containers.find((c) => c.Service === serviceName)?.Health ?? "unknown";
|
|
123
|
-
}
|
|
124
|
-
/** Run a single HTTP health check, returning true if the validator passes. */
|
|
125
|
-
async function checkHttpHealth(executor, check) {
|
|
126
|
-
try {
|
|
127
|
-
const response = await executor.fetch(check.url);
|
|
128
|
-
return await check.validate(response);
|
|
129
|
-
} catch (_error) {
|
|
130
|
-
return false;
|
|
131
|
-
}
|
|
132
|
-
}
|
|
133
|
-
//#endregion
|
|
134
|
-
//#region src/docker-verify/verify.ts
|
|
135
|
-
const DEFAULT_TIMEOUT_MS = 12e4;
|
|
136
|
-
const DEFAULT_POLL_INTERVAL_MS = 5e3;
|
|
137
|
-
/** Run the full Docker image verification lifecycle. */
|
|
138
|
-
async function runVerification(executor, config) {
|
|
139
|
-
const timeoutMs = config.timeoutMs ?? DEFAULT_TIMEOUT_MS;
|
|
140
|
-
const pollIntervalMs = config.pollIntervalMs ?? DEFAULT_POLL_INTERVAL_MS;
|
|
141
|
-
const { compose } = config;
|
|
142
|
-
const cleanup = () => composeDown(executor, compose);
|
|
143
|
-
const disposeInt = executor.onSignal("SIGINT", () => {
|
|
144
|
-
cleanup();
|
|
145
|
-
process.exit(1);
|
|
146
|
-
});
|
|
147
|
-
const disposeTerm = executor.onSignal("SIGTERM", () => {
|
|
148
|
-
cleanup();
|
|
149
|
-
process.exit(1);
|
|
150
|
-
});
|
|
151
|
-
try {
|
|
152
|
-
if (config.buildCommand) {
|
|
153
|
-
executor.log("Building images...");
|
|
154
|
-
buildImages(executor, config);
|
|
155
|
-
}
|
|
156
|
-
executor.log("Starting compose stack...");
|
|
157
|
-
composeUp(executor, compose);
|
|
158
|
-
executor.log(`Waiting for stack to be healthy (max ${timeoutMs / 1e3}s)...`);
|
|
159
|
-
const startTime = executor.now();
|
|
160
|
-
const healthStatus = new Map(config.healthChecks.map((c) => [c.name, false]));
|
|
161
|
-
while (executor.now() - startTime < timeoutMs) {
|
|
162
|
-
const containers = composePs(executor, compose);
|
|
163
|
-
for (const service of compose.services) if (getContainerHealth(containers, service) === "unhealthy") {
|
|
164
|
-
executor.logError(`Container ${service} is unhealthy`);
|
|
165
|
-
composeLogs(executor, compose, service);
|
|
166
|
-
cleanup();
|
|
167
|
-
return {
|
|
168
|
-
success: false,
|
|
169
|
-
reason: "unhealthy-container",
|
|
170
|
-
message: service,
|
|
171
|
-
elapsedMs: executor.now() - startTime
|
|
172
|
-
};
|
|
173
|
-
}
|
|
174
|
-
for (const check of config.healthChecks) if (!healthStatus.get(check.name)) {
|
|
175
|
-
if (await checkHttpHealth(executor, check)) {
|
|
176
|
-
healthStatus.set(check.name, true);
|
|
177
|
-
executor.log(`${check.name} is healthy!`);
|
|
178
|
-
}
|
|
179
|
-
}
|
|
180
|
-
if ([...healthStatus.values()].every(Boolean)) {
|
|
181
|
-
executor.log("Verification successful! All systems operational.");
|
|
182
|
-
cleanup();
|
|
183
|
-
return {
|
|
184
|
-
success: true,
|
|
185
|
-
elapsedMs: executor.now() - startTime
|
|
186
|
-
};
|
|
187
|
-
}
|
|
188
|
-
const elapsed = Math.floor((executor.now() - startTime) / 1e3);
|
|
189
|
-
if (elapsed > 0 && elapsed % 5 === 0) {
|
|
190
|
-
const statuses = [...healthStatus.entries()].map(([name, ok]) => `${name}=${ok ? "OK" : "Pending"}`).join(", ");
|
|
191
|
-
executor.log(`Waiting... (${elapsed}s elapsed). ${statuses}`);
|
|
192
|
-
}
|
|
193
|
-
await executor.sleep(pollIntervalMs);
|
|
194
|
-
}
|
|
195
|
-
executor.logError("Timeout waiting for stack to become healthy");
|
|
196
|
-
for (const service of compose.services) composeLogs(executor, compose, service);
|
|
197
|
-
cleanup();
|
|
198
|
-
return {
|
|
199
|
-
success: false,
|
|
200
|
-
reason: "timeout",
|
|
201
|
-
message: "Exceeded timeout",
|
|
202
|
-
elapsedMs: executor.now() - startTime
|
|
203
|
-
};
|
|
204
|
-
} catch (error) {
|
|
205
|
-
cleanup();
|
|
206
|
-
return {
|
|
207
|
-
success: false,
|
|
208
|
-
reason: "error",
|
|
209
|
-
message: error instanceof Error ? error.message : String(error),
|
|
210
|
-
elapsedMs: 0
|
|
211
|
-
};
|
|
212
|
-
} finally {
|
|
213
|
-
disposeInt();
|
|
214
|
-
disposeTerm();
|
|
215
|
-
}
|
|
216
|
-
}
|
|
217
|
-
//#endregion
|
|
1
|
+
import { a as composeDown, c as composeUp, i as composeCommand, l as createRealExecutor, n as checkHttpHealth, o as composeLogs, r as getContainerHealth, s as composePs, t as runVerification } from "../verify-BaWlzdPh.mjs";
|
|
218
2
|
export { checkHttpHealth, composeCommand, composeDown, composeLogs, composePs, composeUp, createRealExecutor, getContainerHealth, runVerification };
|
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
import { execSync } from "node:child_process";
|
|
3
|
+
//#region src/utils/exec.ts
|
|
4
|
+
/** Type guard for `execSync` errors that carry stdout/stderr/status. */
|
|
5
|
+
function isExecSyncError(err) {
|
|
6
|
+
return err instanceof Error && "stdout" in err && typeof err.stdout === "string" && "stderr" in err && typeof err.stderr === "string" && "status" in err && typeof err.status === "number";
|
|
7
|
+
}
|
|
8
|
+
//#endregion
|
|
9
|
+
//#region src/docker-verify/executor.ts
|
|
10
|
+
/** Create an executor that runs real commands, fetches, and manages process signals. */
|
|
11
|
+
function createRealExecutor() {
|
|
12
|
+
return {
|
|
13
|
+
exec(command, options) {
|
|
14
|
+
try {
|
|
15
|
+
return {
|
|
16
|
+
stdout: execSync(command, {
|
|
17
|
+
cwd: options?.cwd,
|
|
18
|
+
env: options?.env ? {
|
|
19
|
+
...process.env,
|
|
20
|
+
...options.env
|
|
21
|
+
} : void 0,
|
|
22
|
+
encoding: "utf-8",
|
|
23
|
+
stdio: [
|
|
24
|
+
"pipe",
|
|
25
|
+
"pipe",
|
|
26
|
+
"pipe"
|
|
27
|
+
]
|
|
28
|
+
}),
|
|
29
|
+
stderr: "",
|
|
30
|
+
exitCode: 0
|
|
31
|
+
};
|
|
32
|
+
} catch (err) {
|
|
33
|
+
if (isExecSyncError(err)) return {
|
|
34
|
+
stdout: err.stdout,
|
|
35
|
+
stderr: err.stderr,
|
|
36
|
+
exitCode: err.status
|
|
37
|
+
};
|
|
38
|
+
return {
|
|
39
|
+
stdout: "",
|
|
40
|
+
stderr: "",
|
|
41
|
+
exitCode: 1
|
|
42
|
+
};
|
|
43
|
+
}
|
|
44
|
+
},
|
|
45
|
+
execInherit(command, options) {
|
|
46
|
+
execSync(command, {
|
|
47
|
+
cwd: options?.cwd,
|
|
48
|
+
env: options?.env ? {
|
|
49
|
+
...process.env,
|
|
50
|
+
...options.env
|
|
51
|
+
} : void 0,
|
|
52
|
+
stdio: "inherit"
|
|
53
|
+
});
|
|
54
|
+
},
|
|
55
|
+
fetch: globalThis.fetch,
|
|
56
|
+
now: () => Date.now(),
|
|
57
|
+
sleep: (ms) => new Promise((resolve) => setTimeout(resolve, ms)),
|
|
58
|
+
onSignal(signal, handler) {
|
|
59
|
+
process.on(signal, handler);
|
|
60
|
+
return () => {
|
|
61
|
+
process.removeListener(signal, handler);
|
|
62
|
+
};
|
|
63
|
+
},
|
|
64
|
+
log: (msg) => console.log(msg),
|
|
65
|
+
logError: (msg) => console.error(msg)
|
|
66
|
+
};
|
|
67
|
+
}
|
|
68
|
+
//#endregion
|
|
69
|
+
//#region src/docker-verify/compose.ts
|
|
70
|
+
/** Zod schema for a single container entry from `docker compose ps --format json`. */
|
|
71
|
+
const ContainerInfoSchema = z.object({
|
|
72
|
+
Service: z.string(),
|
|
73
|
+
Health: z.string()
|
|
74
|
+
});
|
|
75
|
+
/** Build the `docker compose` base command string from config. */
|
|
76
|
+
function composeCommand(config) {
|
|
77
|
+
return `docker compose ${config.composeFiles.map((f) => `-f ${f}`).join(" ")}${config.envFile ? ` --env-file ${config.envFile}` : ""}`;
|
|
78
|
+
}
|
|
79
|
+
/** Run the build command if configured. */
|
|
80
|
+
function buildImages(executor, config) {
|
|
81
|
+
if (!config.buildCommand) return;
|
|
82
|
+
executor.execInherit(config.buildCommand, { cwd: config.buildCwd ?? config.compose.cwd });
|
|
83
|
+
}
|
|
84
|
+
/** Start the compose stack in detached mode. */
|
|
85
|
+
function composeUp(executor, config) {
|
|
86
|
+
executor.execInherit(`${composeCommand(config)} up -d`, { cwd: config.cwd });
|
|
87
|
+
}
|
|
88
|
+
/** Tear down the compose stack, removing volumes and orphans. Swallows errors. */
|
|
89
|
+
function composeDown(executor, config) {
|
|
90
|
+
try {
|
|
91
|
+
executor.execInherit(`${composeCommand(config)} down -v --remove-orphans`, { cwd: config.cwd });
|
|
92
|
+
} catch (_error) {}
|
|
93
|
+
}
|
|
94
|
+
/** Show logs for a specific service (or all services if not specified). Swallows errors. */
|
|
95
|
+
function composeLogs(executor, config, service) {
|
|
96
|
+
try {
|
|
97
|
+
const suffix = service ? ` ${service}` : "";
|
|
98
|
+
executor.execInherit(`${composeCommand(config)} logs${suffix}`, { cwd: config.cwd });
|
|
99
|
+
} catch (_error) {}
|
|
100
|
+
}
|
|
101
|
+
/**
|
|
102
|
+
* Query container status via `docker compose ps --format json`.
|
|
103
|
+
* Handles both JSON array and newline-delimited JSON (varies by docker compose version).
|
|
104
|
+
*/
|
|
105
|
+
function composePs(executor, config) {
|
|
106
|
+
const output = executor.exec(`${composeCommand(config)} ps --format json`, { cwd: config.cwd }).stdout.trim();
|
|
107
|
+
if (!output) return [];
|
|
108
|
+
const ArraySchema = z.array(ContainerInfoSchema);
|
|
109
|
+
try {
|
|
110
|
+
const direct = ArraySchema.safeParse(JSON.parse(output));
|
|
111
|
+
if (direct.success) return direct.data;
|
|
112
|
+
const single = ContainerInfoSchema.safeParse(JSON.parse(output));
|
|
113
|
+
if (single.success) return [single.data];
|
|
114
|
+
} catch (_error) {}
|
|
115
|
+
try {
|
|
116
|
+
const joined = `[${output.split("\n").join(",")}]`;
|
|
117
|
+
const delimited = ArraySchema.safeParse(JSON.parse(joined));
|
|
118
|
+
return delimited.success ? delimited.data : [];
|
|
119
|
+
} catch (_error) {
|
|
120
|
+
return [];
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
//#endregion
|
|
124
|
+
//#region src/docker-verify/health.ts
|
|
125
|
+
/** Look up the health status of a specific service from container info. */
|
|
126
|
+
function getContainerHealth(containers, serviceName) {
|
|
127
|
+
return containers.find((c) => c.Service === serviceName)?.Health ?? "unknown";
|
|
128
|
+
}
|
|
129
|
+
/** Run a single HTTP health check, returning true if the validator passes. */
|
|
130
|
+
async function checkHttpHealth(executor, check) {
|
|
131
|
+
try {
|
|
132
|
+
const response = await executor.fetch(check.url);
|
|
133
|
+
return await check.validate(response);
|
|
134
|
+
} catch (_error) {
|
|
135
|
+
return false;
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
//#endregion
|
|
139
|
+
//#region src/docker-verify/verify.ts
|
|
140
|
+
const DEFAULT_TIMEOUT_MS = 12e4;
|
|
141
|
+
const DEFAULT_POLL_INTERVAL_MS = 5e3;
|
|
142
|
+
/** Run the full Docker image verification lifecycle. */
|
|
143
|
+
async function runVerification(executor, config) {
|
|
144
|
+
const timeoutMs = config.timeoutMs ?? DEFAULT_TIMEOUT_MS;
|
|
145
|
+
const pollIntervalMs = config.pollIntervalMs ?? DEFAULT_POLL_INTERVAL_MS;
|
|
146
|
+
const { compose } = config;
|
|
147
|
+
const cleanup = () => composeDown(executor, compose);
|
|
148
|
+
const disposeInt = executor.onSignal("SIGINT", () => {
|
|
149
|
+
cleanup();
|
|
150
|
+
process.exit(1);
|
|
151
|
+
});
|
|
152
|
+
const disposeTerm = executor.onSignal("SIGTERM", () => {
|
|
153
|
+
cleanup();
|
|
154
|
+
process.exit(1);
|
|
155
|
+
});
|
|
156
|
+
try {
|
|
157
|
+
if (config.buildCommand) {
|
|
158
|
+
executor.log("Building images...");
|
|
159
|
+
buildImages(executor, config);
|
|
160
|
+
}
|
|
161
|
+
executor.log("Starting compose stack...");
|
|
162
|
+
composeUp(executor, compose);
|
|
163
|
+
executor.log(`Waiting for stack to be healthy (max ${timeoutMs / 1e3}s)...`);
|
|
164
|
+
const startTime = executor.now();
|
|
165
|
+
const healthStatus = new Map(config.healthChecks.map((c) => [c.name, false]));
|
|
166
|
+
while (executor.now() - startTime < timeoutMs) {
|
|
167
|
+
const containers = composePs(executor, compose);
|
|
168
|
+
for (const service of compose.services) if (getContainerHealth(containers, service) === "unhealthy") {
|
|
169
|
+
executor.logError(`Container ${service} is unhealthy`);
|
|
170
|
+
composeLogs(executor, compose, service);
|
|
171
|
+
cleanup();
|
|
172
|
+
return {
|
|
173
|
+
success: false,
|
|
174
|
+
reason: "unhealthy-container",
|
|
175
|
+
message: service,
|
|
176
|
+
elapsedMs: executor.now() - startTime
|
|
177
|
+
};
|
|
178
|
+
}
|
|
179
|
+
for (const check of config.healthChecks) if (!healthStatus.get(check.name)) {
|
|
180
|
+
if (await checkHttpHealth(executor, check)) {
|
|
181
|
+
healthStatus.set(check.name, true);
|
|
182
|
+
executor.log(`${check.name} is healthy!`);
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
if ([...healthStatus.values()].every(Boolean)) {
|
|
186
|
+
executor.log("Verification successful! All systems operational.");
|
|
187
|
+
cleanup();
|
|
188
|
+
return {
|
|
189
|
+
success: true,
|
|
190
|
+
elapsedMs: executor.now() - startTime
|
|
191
|
+
};
|
|
192
|
+
}
|
|
193
|
+
const elapsed = Math.floor((executor.now() - startTime) / 1e3);
|
|
194
|
+
if (elapsed > 0 && elapsed % 5 === 0) {
|
|
195
|
+
const statuses = [...healthStatus.entries()].map(([name, ok]) => `${name}=${ok ? "OK" : "Pending"}`).join(", ");
|
|
196
|
+
executor.log(`Waiting... (${elapsed}s elapsed). ${statuses}`);
|
|
197
|
+
}
|
|
198
|
+
await executor.sleep(pollIntervalMs);
|
|
199
|
+
}
|
|
200
|
+
executor.logError("Timeout waiting for stack to become healthy");
|
|
201
|
+
for (const service of compose.services) composeLogs(executor, compose, service);
|
|
202
|
+
cleanup();
|
|
203
|
+
return {
|
|
204
|
+
success: false,
|
|
205
|
+
reason: "timeout",
|
|
206
|
+
message: "Exceeded timeout",
|
|
207
|
+
elapsedMs: executor.now() - startTime
|
|
208
|
+
};
|
|
209
|
+
} catch (error) {
|
|
210
|
+
cleanup();
|
|
211
|
+
return {
|
|
212
|
+
success: false,
|
|
213
|
+
reason: "error",
|
|
214
|
+
message: error instanceof Error ? error.message : String(error),
|
|
215
|
+
elapsedMs: 0
|
|
216
|
+
};
|
|
217
|
+
} finally {
|
|
218
|
+
disposeInt();
|
|
219
|
+
disposeTerm();
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
//#endregion
|
|
223
|
+
export { composeDown as a, composeUp as c, composeCommand as i, createRealExecutor as l, checkHttpHealth as n, composeLogs as o, getContainerHealth as r, composePs as s, runVerification as t, isExecSyncError as u };
|
package/package.json
CHANGED
|
@@ -1,13 +1,12 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@bensandee/tooling",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.18.0",
|
|
4
4
|
"description": "CLI tool to bootstrap and maintain standardized TypeScript project tooling",
|
|
5
5
|
"bin": {
|
|
6
6
|
"tooling": "./dist/bin.mjs"
|
|
7
7
|
},
|
|
8
8
|
"files": [
|
|
9
|
-
"dist"
|
|
10
|
-
"CHANGELOG.md"
|
|
9
|
+
"dist"
|
|
11
10
|
],
|
|
12
11
|
"type": "module",
|
|
13
12
|
"imports": {
|
|
@@ -42,7 +41,7 @@
|
|
|
42
41
|
"tsdown": "0.21.2",
|
|
43
42
|
"typescript": "5.9.3",
|
|
44
43
|
"vitest": "4.0.18",
|
|
45
|
-
"@bensandee/config": "0.8.
|
|
44
|
+
"@bensandee/config": "0.8.2"
|
|
46
45
|
},
|
|
47
46
|
"optionalDependencies": {
|
|
48
47
|
"@changesets/cli": "^2.29.4",
|
package/CHANGELOG.md
DELETED
|
@@ -1,242 +0,0 @@
|
|
|
1
|
-
# @bensandee/tooling
|
|
2
|
-
|
|
3
|
-
## 0.16.0
|
|
4
|
-
|
|
5
|
-
### Minor Changes
|
|
6
|
-
|
|
7
|
-
- b3e6d83: Redesign as conventions-first tool: auto-detect project structure, CI platform, project type, and Docker packages from the filesystem. `.tooling.json` now stores overrides only. Replace `repo:init`/`repo:update`/`repo:check` with idempotent `repo:sync` (and `repo:sync --check`). Docker packages are discovered by convention (`Dockerfile` or `docker/Dockerfile` in package dirs) instead of requiring explicit config. First-run prompts reduced to release strategy, CI platform (if not detected), and formatter (if Prettier found).
|
|
8
|
-
|
|
9
|
-
## 0.15.0
|
|
10
|
-
|
|
11
|
-
### Minor Changes
|
|
12
|
-
|
|
13
|
-
- 2ef37e2: Add `docker:build` and `docker:publish` CLI commands. Packages declare a `docker` field in their `package.json` with `dockerfile` and `context`, and the tooling handles `docker build` with the correct image name (`{repo}-{package}`). `docker:build --package .` enables a per-package `image:build` script for local testing. `docker:publish` builds all images, then tags/pushes them with semver variants (latest, vX.Y.Z, vX.Y, vX) from each package's own version. Also adds a deploy workflow generator (`setupDocker` config option) that emits a CI workflow triggered on version tags.
|
|
14
|
-
- c09d233: Combine CI and release workflows for changesets strategy into a single check.yml with release job gated on check success
|
|
15
|
-
|
|
16
|
-
### Patch Changes
|
|
17
|
-
|
|
18
|
-
- 6cae944: Prompt to overwrite outdated release workflows during repo:update instead of only merging missing steps
|
|
19
|
-
|
|
20
|
-
## 0.14.1
|
|
21
|
-
|
|
22
|
-
### Patch Changes
|
|
23
|
-
|
|
24
|
-
- caeebd8: Add d.ts declaration file output and types export conditions
|
|
25
|
-
- caeebd8: Bump tsdown from 0.21.0 to 0.21.2
|
|
26
|
-
- Updated dependencies [caeebd8]
|
|
27
|
-
- Updated dependencies [caeebd8]
|
|
28
|
-
- @bensandee/common@0.1.2
|
|
29
|
-
|
|
30
|
-
## 0.14.0
|
|
31
|
-
|
|
32
|
-
### Minor Changes
|
|
33
|
-
|
|
34
|
-
- e95d449: Add `--fail-fast` / `--no-fail-fast` flag to `checks:run` to control whether execution stops on the first failure. Defaults to fail-fast in dev and continue-on-error in CI.
|
|
35
|
-
- 715a4ea: Add `@bensandee/tooling/docker-verify` subpath export: a TypeScript framework for Docker image verification with compose lifecycle management, HTTP health polling, container health monitoring, and signal-safe cleanup. Consumers import building blocks and compose them with custom validators instead of writing boilerplate.
|
|
36
|
-
- 27c3480: Add `release:simple` command and rename CLI subcommands
|
|
37
|
-
|
|
38
|
-
**Breaking changes:**
|
|
39
|
-
|
|
40
|
-
- `release:create-forgejo-release` renamed to `forgejo:create-release`
|
|
41
|
-
- `release:merge` renamed to `changesets:merge`
|
|
42
|
-
- `releaseStrategy: "commit-and-tag-version"` renamed to `"simple"` in `.tooling.json` config
|
|
43
|
-
- Generated CI workflow for commit-and-tag-version now uses `pnpm exec tooling release:simple` instead of inline shell commands
|
|
44
|
-
|
|
45
|
-
**New feature:**
|
|
46
|
-
|
|
47
|
-
`release:simple` — a CLI command that handles the full release lifecycle for projects using commit-and-tag-version:
|
|
48
|
-
|
|
49
|
-
- Runs `commit-and-tag-version` to bump version, update CHANGELOG, and create a git tag
|
|
50
|
-
- Pushes to origin with `--follow-tags`
|
|
51
|
-
- Creates sliding version tags (vX, vX.Y) for flexible deployment pinning
|
|
52
|
-
- Creates Forgejo or GitHub releases automatically
|
|
53
|
-
|
|
54
|
-
### Patch Changes
|
|
55
|
-
|
|
56
|
-
- 715a4ea: Add README files to all published packages for npm registry documentation
|
|
57
|
-
- 27c3480: Pre-populate `repo:init` prompts from saved `.tooling.json` config
|
|
58
|
-
|
|
59
|
-
When re-running `repo:init` on a project with an existing `.tooling.json`, each prompt now defaults to the previously saved choice instead of the detection-based default. Press Enter to keep existing settings or change only what you need.
|
|
60
|
-
|
|
61
|
-
- d448ec6: Update node tsconfig base to use `nodenext` module resolution with `allowImportingTsExtensions`, enabling `.ts` extensions in imports for projects running TypeScript natively on Node 24+. Migrate all tooling-cli imports to use `.ts` extensions and switch `#src` subpath mapping to `#src/*.ts`. Use extensionless imports for library packages.
|
|
62
|
-
- c49593f: Add `commit-and-tag-version` and `@changesets/cli` as optional dependencies
|
|
63
|
-
|
|
64
|
-
These tools are only needed when using their respective release strategies, so they're optional rather than required. Target projects already install them as devDependencies via the package-json generator.
|
|
65
|
-
|
|
66
|
-
- Updated dependencies [715a4ea]
|
|
67
|
-
- Updated dependencies [d448ec6]
|
|
68
|
-
- @bensandee/common@0.1.1
|
|
69
|
-
|
|
70
|
-
## 0.13.0
|
|
71
|
-
|
|
72
|
-
### Minor Changes
|
|
73
|
-
|
|
74
|
-
- bbe3634: Add `checks:run` command (renamed from `repo:run-checks`). Add `ci:check`, `tooling:check`, and `tooling:update` as generated package.json scripts. CI workflows now run `pnpm ci:check`. Managed scripts are updated on `repo:update`/`repo:check` if they don't reference the expected command.
|
|
75
|
-
|
|
76
|
-
### Patch Changes
|
|
77
|
-
|
|
78
|
-
- f20b25d: `checks:run` now reads package.json to detect which scripts are defined. Undefined scripts show "(not defined)" instead of silently passing. Commands use `pnpm run` instead of `pnpm run --if-present`.
|
|
79
|
-
|
|
80
|
-
## 0.12.0
|
|
81
|
-
|
|
82
|
-
### Minor Changes
|
|
83
|
-
|
|
84
|
-
- 5de6090: Add `repo:run-checks` command that runs all standard checks (build, typecheck, lint, test, format, knip, tooling:check, image:check) without short-circuiting, reporting a summary of failures at the end. Supports `--skip` to skip specific checks and `--add` to append custom checks. Generated CI workflows now use `pnpm check`, and the package.json generator produces `check` and `tooling:check` scripts pointing to this command. Managed scripts (`check`, `tooling:check`) are updated on `repo:update`/`repo:check` if they don't already reference the expected command.
|
|
85
|
-
|
|
86
|
-
## 0.11.0
|
|
87
|
-
|
|
88
|
-
### Minor Changes
|
|
89
|
-
|
|
90
|
-
- 493ae65: Add `repo:run-checks` command that runs all standard checks (build, typecheck, lint, test, format, knip, repo:check) without short-circuiting, reporting a summary of failures at the end. Generated CI workflows and package.json `check` scripts now use this command. Skip `trigger-release` script for changesets release strategy.
|
|
91
|
-
|
|
92
|
-
### Patch Changes
|
|
93
|
-
|
|
94
|
-
- ae18571: Add .pnpm-store to gitignore file
|
|
95
|
-
- 916c1ee: Ensure `yaml-language-server` schema comment is added to existing Forgejo workflow files during update/merge
|
|
96
|
-
|
|
97
|
-
## 0.10.1
|
|
98
|
-
|
|
99
|
-
### Patch Changes
|
|
100
|
-
|
|
101
|
-
- f131a3d: Add `pnpm why` to the allowed Bash commands in generated Claude settings
|
|
102
|
-
- 1cb2ce8: Add yaml-language-server schema comments to generated Forgejo workflow files and update schema glob to match both .yml and .yaml extensions
|
|
103
|
-
|
|
104
|
-
## 0.10.0
|
|
105
|
-
|
|
106
|
-
### Minor Changes
|
|
107
|
-
|
|
108
|
-
- 34a0e1e: feat: merge missing config into existing lefthook and CI workflow files instead of skipping
|
|
109
|
-
|
|
110
|
-
Generators for `lefthook.yml`, CI check workflows, and release workflows now merge required
|
|
111
|
-
entries into existing files rather than silently skipping them. This means `repo:update` can
|
|
112
|
-
add new steps (e.g. a newly required CI check) to repos that were initialized before the step
|
|
113
|
-
existed.
|
|
114
|
-
|
|
115
|
-
Add `# @bensandee/tooling:ignore` in the first 10 lines of any YAML file to opt out of
|
|
116
|
-
automatic merging.
|
|
117
|
-
|
|
118
|
-
### Patch Changes
|
|
119
|
-
|
|
120
|
-
- 330cc2c: fix: use semantic JSON comparison in repo:check and repo:update to ignore formatting-only differences
|
|
121
|
-
|
|
122
|
-
## 0.9.0
|
|
123
|
-
|
|
124
|
-
### Minor Changes
|
|
125
|
-
|
|
126
|
-
- 88f2a93: Require `.tooling.json` for `repo:update` and `repo:check` commands. Previously these commands would warn and continue with detected defaults when `.tooling.json` was missing, which could cause unexpected overwrites without proper archiving. Now they exit with an error directing the user to run `tooling repo:init` first.
|
|
127
|
-
|
|
128
|
-
Write Forgejo workflow schema mapping to `.code-workspace` file when present, falling back to `.vscode/settings.json`. The `yaml.schemas` setting in `.vscode/settings.json` doesn't apply in VS Code multi-root workspaces.
|
|
129
|
-
|
|
130
|
-
Improve post-init guidance: suggest a Claude Code prompt ("Execute the steps in .tooling-migrate.md") instead of "paste contents".
|
|
131
|
-
|
|
132
|
-
## 0.8.1
|
|
133
|
-
|
|
134
|
-
### Patch Changes
|
|
135
|
-
|
|
136
|
-
- efcfdcc: Fix findOpenPr to filter PRs client-side by head.ref instead of relying on Forgejo's inconsistent head query parameter, which could match the wrong PR
|
|
137
|
-
- 88aac23: Add forgejo workflow schema additions
|
|
138
|
-
- e4c41d6: Fix wrong agent name in settings.json for claude
|
|
139
|
-
- 43509b8: Pin @bensandee/\* package versions in generated package.json instead of using "latest". Versions are read from sibling package.json files at build time via tsdown's define feature, so they auto-update with each release.
|
|
140
|
-
- 5e65e50: enhance ciWorkflow to support Forgejo email notifications
|
|
141
|
-
- 60a5502: refactor generateClaudeSettings to handle monorepo structure and update tests for plugin integration
|
|
142
|
-
|
|
143
|
-
## 0.8.0
|
|
144
|
-
|
|
145
|
-
### Minor Changes
|
|
146
|
-
|
|
147
|
-
- 375f7fd: Add claude skills to settings.json
|
|
148
|
-
|
|
149
|
-
### Patch Changes
|
|
150
|
-
|
|
151
|
-
- 375098b: Add more safety restrictions to settings.json
|
|
152
|
-
- b330adf: Fix bad update to tsconfig when not needed
|
|
153
|
-
|
|
154
|
-
## 0.7.3
|
|
155
|
-
|
|
156
|
-
### Patch Changes
|
|
157
|
-
|
|
158
|
-
- 3257e04: Fix no-unsafe-json-parse rule and fix new lint errors
|
|
159
|
-
- ca61fa7: Don't overwrite existing oxfmt config
|
|
160
|
-
- 1bdf858: More intelligent addition of src folder to tsconfig
|
|
161
|
-
- 8de49b9: Add line about adding packages when necessary to resolve errors
|
|
162
|
-
|
|
163
|
-
## 0.7.2
|
|
164
|
-
|
|
165
|
-
### Patch Changes
|
|
166
|
-
|
|
167
|
-
- e48bc27: Fix bug where tsconfigs in packages would be force-updated even if solutions-style
|
|
168
|
-
|
|
169
|
-
## 0.7.1
|
|
170
|
-
|
|
171
|
-
### Patch Changes
|
|
172
|
-
|
|
173
|
-
- 6ef4ea9: Fix tsconfig build/update issues
|
|
174
|
-
- 3608a1a: Run pnpm update after repo:update
|
|
175
|
-
|
|
176
|
-
## 0.7.0
|
|
177
|
-
|
|
178
|
-
### Minor Changes
|
|
179
|
-
|
|
180
|
-
- 912013d: Add repo:check command
|
|
181
|
-
- 2545262: Add common package + error subclasses
|
|
182
|
-
|
|
183
|
-
### Patch Changes
|
|
184
|
-
|
|
185
|
-
- Updated dependencies [2545262]
|
|
186
|
-
- @bensandee/common@0.1.0
|
|
187
|
-
|
|
188
|
-
## 0.6.2
|
|
189
|
-
|
|
190
|
-
### Patch Changes
|
|
191
|
-
|
|
192
|
-
- caa1270: Fix hang migrating repo:init
|
|
193
|
-
|
|
194
|
-
## 0.6.1
|
|
195
|
-
|
|
196
|
-
### Patch Changes
|
|
197
|
-
|
|
198
|
-
- 2182ab3: fix bug where renovate.json5 wasn't cleaned up to use our preset
|
|
199
|
-
- d811a96: Lefthook doesn't need an install step in package.json prepare
|
|
200
|
-
|
|
201
|
-
## 0.6.0
|
|
202
|
-
|
|
203
|
-
### Minor Changes
|
|
204
|
-
|
|
205
|
-
- 94cd161: Updated default oxlint config to include more default rules.
|
|
206
|
-
|
|
207
|
-
## 0.5.1
|
|
208
|
-
|
|
209
|
-
### Patch Changes
|
|
210
|
-
|
|
211
|
-
- e0bc32e: Improve migration for tsconfig and husky/lint-staged
|
|
212
|
-
- 02c1a1b: Include version when running tooling cli
|
|
213
|
-
|
|
214
|
-
## 0.5.0
|
|
215
|
-
|
|
216
|
-
### Minor Changes
|
|
217
|
-
|
|
218
|
-
- 58fc8a3: Add lefthook support in place of husky, lint-staged
|
|
219
|
-
|
|
220
|
-
## 0.4.0
|
|
221
|
-
|
|
222
|
-
### Minor Changes
|
|
223
|
-
|
|
224
|
-
- e02953a: Bug fixing, move renovate config to standard location
|
|
225
|
-
- 451908d: Restructure package names and exports.
|
|
226
|
-
|
|
227
|
-
## 0.3.0
|
|
228
|
-
|
|
229
|
-
### Minor Changes
|
|
230
|
-
|
|
231
|
-
- 5e9719f: Many bug fixes
|
|
232
|
-
|
|
233
|
-
## 0.2.0
|
|
234
|
-
|
|
235
|
-
### Minor Changes
|
|
236
|
-
|
|
237
|
-
- c376981: Initial release
|
|
238
|
-
|
|
239
|
-
### Patch Changes
|
|
240
|
-
|
|
241
|
-
- 3fc9fe3: Support multiple release architectures (release-it, commit-and-tag-version and changsets)
|
|
242
|
-
- 4004530: Add release-forgejo command to perform final steps of release creation in forgejo.
|
package/dist/exec-CC49vrkM.mjs
DELETED
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
//#region src/utils/exec.ts
|
|
2
|
-
/** Type guard for `execSync` errors that carry stdout/stderr/status. */
|
|
3
|
-
function isExecSyncError(err) {
|
|
4
|
-
return err instanceof Error && "stdout" in err && typeof err.stdout === "string" && "stderr" in err && typeof err.stderr === "string" && "status" in err && typeof err.status === "number";
|
|
5
|
-
}
|
|
6
|
-
//#endregion
|
|
7
|
-
export { isExecSyncError as t };
|