ai-spec-dev 0.30.1 → 0.31.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +29 -1
- package/RELEASE_LOG.md +33 -0
- package/cli/index.ts +24 -1
- package/core/prompt-hasher.ts +42 -0
- package/core/run-logger.ts +21 -0
- package/core/self-evaluator.ts +172 -0
- package/dist/cli/index.js +444 -323
- package/dist/cli/index.js.map +1 -1
- package/dist/cli/index.mjs +444 -323
- package/dist/cli/index.mjs.map +1 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs.map +1 -1
- package/package.json +1 -1
- package/purpose.md +189 -2
package/README.md
CHANGED
|
@@ -93,7 +93,7 @@ ai-spec create "给用户模块增加登录功能"
|
|
|
93
93
|
[cycle 2/2] Running tests: npm test
|
|
94
94
|
✔ Tests passed.
|
|
95
95
|
✔ All checks passed after 2 cycle(s).
|
|
96
|
-
[9/
|
|
96
|
+
[9/10] Automated code review (3-pass: architecture + implementation + impact/complexity)...
|
|
97
97
|
Pass 1/3: Architecture review...
|
|
98
98
|
Pass 2/3: Implementation review...
|
|
99
99
|
Pass 3/3: Impact & complexity assessment...
|
|
@@ -1259,6 +1259,32 @@ Attempting auto-fix (3 error(s))...
|
|
|
1259
1259
|
|
|
1260
1260
|
---
|
|
1261
1261
|
|
|
1262
|
+
### Step 10 — Harness Self-Eval
|
|
1263
|
+
|
|
1264
|
+
代码审查完成后自动执行,**零 AI 调用**,纯确定性评分:
|
|
1265
|
+
|
|
1266
|
+
| 维度 | 评分逻辑 |
|
|
1267
|
+
|------|---------|
|
|
1268
|
+
| DSL Coverage (0-10) | 生成文件是否覆盖 DSL 声明的 endpoint 层和 model 层 |
|
|
1269
|
+
| Compile Score (0-10) | error feedback 全通过 → 10;未通过 / 跳过 → 5 |
|
|
1270
|
+
| Review Score (0-10) | 从 3-pass review 文本提取 `Score: X/10` |
|
|
1271
|
+
|
|
1272
|
+
**Harness Score** = 加权平均(DSL 40% + Compile 30% + Review 30%)
|
|
1273
|
+
|
|
1274
|
+
```
|
|
1275
|
+
─── Harness Self-Eval ───────────────────────────
|
|
1276
|
+
Score : [████████░░] 7.8/10
|
|
1277
|
+
DSL : 8/10 Compile: pass Review: 7.2/10
|
|
1278
|
+
Prompt : a3f2c1d8
|
|
1279
|
+
─────────────────────────────────────────────────
|
|
1280
|
+
```
|
|
1281
|
+
|
|
1282
|
+
- `harnessScore` 和 `promptHash` 写入 RunLog(`.ai-spec-logs/<runId>.json`)
|
|
1283
|
+
- 每次改动 prompt 文件后,`promptHash` 自动变化,结合 `harnessScore` 可量化 prompt 改动的效果
|
|
1284
|
+
- 后续可通过脚本聚合多个 RunLog,绘制 harnessScore × promptHash 的趋势图
|
|
1285
|
+
|
|
1286
|
+
---
|
|
1287
|
+
|
|
1262
1288
|
## 多 Repo 工作区模式
|
|
1263
1289
|
|
|
1264
1290
|
当父目录中存在 `.ai-spec-workspace.json` 时,`ai-spec create` 自动切换为**多 Repo 联动模式**,一句需求驱动前后端全链路实现。
|
|
@@ -1525,6 +1551,8 @@ ai-spec-dev-poc/
|
|
|
1525
1551
|
│ ├── reviewer.ts # AI 代码审查(git diff / 文件内容双模式)
|
|
1526
1552
|
│ ├── test-generator.ts # 测试骨架生成器(DSL → Jest/Vitest 骨架)
|
|
1527
1553
|
│ ├── error-feedback.ts # 错误反馈自动修复(测试+lint检测 · 依赖图排序修复 · AI修复循环)
|
|
1554
|
+
│ ├── prompt-hasher.ts # [v0.31.0] Prompt Hash:6 个核心 prompt 的 SHA-256 短 hash
|
|
1555
|
+
│ ├── self-evaluator.ts # [v0.31.0] Harness Self-Eval:零 AI 调用,DSL覆盖+编译+review加权评分
|
|
1528
1556
|
│ ├── knowledge-memory.ts # 经验积累:审查 issue → 宪法§9
|
|
1529
1557
|
│ ├── workspace-loader.ts # [Phase 4] 工作区配置加载 + repo 类型自动检测
|
|
1530
1558
|
│ ├── requirement-decomposer.ts # [Phase 4] 需求跨 repo 拆分 + UX 决策生成
|
package/RELEASE_LOG.md
CHANGED
|
@@ -2,6 +2,39 @@
|
|
|
2
2
|
|
|
3
3
|
---
|
|
4
4
|
|
|
5
|
+
## [0.31.0] 2026-03-29 — Harness Engineer:Prompt Hash + Create 内联 Self-Eval
|
|
6
|
+
|
|
7
|
+
### 新增内容
|
|
8
|
+
|
|
9
|
+
**Feature #1 — Prompt Hash 关联(`core/prompt-hasher.ts`、`core/run-logger.ts`)**
|
|
10
|
+
|
|
11
|
+
- 新增 `computePromptHash()` — 对 6 个核心 prompt 字符串(codegen、DSL extractor、spec generator、review 三 pass)计算 SHA-256 并取前 8 位,返回形如 `a3f2c1d8` 的短 hex 字符串
|
|
12
|
+
- `RunLog` 新增 `promptHash?: string` 字段;`RunLogger` 新增 `setPromptHash()` + `setHarnessScore()` 方法
|
|
13
|
+
- `ai-spec create` 运行开始时立即调用 `computePromptHash()` 写入 RunLog,任何 prompt 文件改动都会产生不同的 hash
|
|
14
|
+
- **目的**:跨多次运行对比 `harnessScore` 时,可以精确知道「这两次用的 prompt 版本是否相同」,将 prompt 改动的效果从模型随机性中解耦
|
|
15
|
+
|
|
16
|
+
**Feature #2 — Create 内联 Harness Self-Eval(`core/self-evaluator.ts`、`cli/index.ts`)**
|
|
17
|
+
|
|
18
|
+
- 新增 `core/self-evaluator.ts` — 零 AI 调用的确定性评分模块:
|
|
19
|
+
- **DSL Coverage Score (0-10)**:检查 `generatedFiles` 中是否存在 endpoint 层文件(`src/api*`、`src/routes*`、`src/controller*`…)和 model 层文件(`src/model*`、`prisma/`、`src/db*`…),与 DSL 中声明的 endpoint / model 数量对照
|
|
20
|
+
- **Compile Score (0-10)**:`runErrorFeedback()` 返回 `true` → 10,未通过 / 跳过 → 5
|
|
21
|
+
- **Review Score (0-10)**:从 3-pass review 文本中提取 `Score: X/10`(与 `reviewer.ts` 同规则),review 跳过时为 null
|
|
22
|
+
- **Harness Score**:加权平均(有 review:DSL×40% + Compile×30% + Review×30%;无 review:DSL×55% + Compile×45%)
|
|
23
|
+
- `runErrorFeedback()` 的返回值(`boolean`)现在被接住赋给 `compilePassed`,传入 self-eval
|
|
24
|
+
- `ai-spec create` Step 9(code review)之后新增 **Step 10: Harness Self-Eval**,完成后打印:
|
|
25
|
+
|
|
26
|
+
```
|
|
27
|
+
─── Harness Self-Eval ───────────────────────────
|
|
28
|
+
Score : [████████░░] 7.8/10
|
|
29
|
+
DSL : 8/10 Compile: pass Review: 7.2/10
|
|
30
|
+
Prompt : a3f2c1d8
|
|
31
|
+
─────────────────────────────────────────────────
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
- `harnessScore` 和所有维度分数写入 RunLog 的 `self_eval:done` 事件 + 根级 `harnessScore` 字段,便于后续脚本聚合分析
|
|
35
|
+
|
|
36
|
+
---
|
|
37
|
+
|
|
5
38
|
## [0.30.0] 2026-03-29 — 错误修复依赖图排序 + 前端 Import 多行感知解析
|
|
6
39
|
|
|
7
40
|
### 改进内容
|
package/cli/index.ts
CHANGED
|
@@ -73,6 +73,8 @@ import { SpecUpdater } from "../core/spec-updater";
|
|
|
73
73
|
import { exportOpenApi } from "../core/openapi-exporter";
|
|
74
74
|
import { generateRunId, RunLogger, setActiveLogger } from "../core/run-logger";
|
|
75
75
|
import { RunSnapshot, setActiveSnapshot } from "../core/run-snapshot";
|
|
76
|
+
import { computePromptHash } from "../core/prompt-hasher";
|
|
77
|
+
import { runSelfEval, printSelfEval } from "../core/self-evaluator";
|
|
76
78
|
|
|
77
79
|
// ─── Config File ──────────────────────────────────────────────────────────────
|
|
78
80
|
|
|
@@ -305,6 +307,11 @@ program
|
|
|
305
307
|
});
|
|
306
308
|
setActiveLogger(runLogger);
|
|
307
309
|
|
|
310
|
+
// Record prompt hash immediately — links this RunLog to the prompt version
|
|
311
|
+
// in use, enabling cross-run harnessScore comparisons (Harness Engineering).
|
|
312
|
+
const promptHash = computePromptHash();
|
|
313
|
+
runLogger.setPromptHash(promptHash);
|
|
314
|
+
|
|
308
315
|
// ── Step 1: Context ───────────────────────────────────────────────────────
|
|
309
316
|
console.log(chalk.blue("[1/6] Loading project context..."));
|
|
310
317
|
runLogger.stageStart("context_load");
|
|
@@ -625,14 +632,16 @@ program
|
|
|
625
632
|
// ── Step 8: Error Feedback Loop ───────────────────────────────────────────
|
|
626
633
|
// In TDD mode, the error feedback loop is the primary driver:
|
|
627
634
|
// it runs tests, collects failures, and fixes implementation until tests pass.
|
|
635
|
+
let compilePassed = false;
|
|
628
636
|
if (opts.skipErrorFeedback) {
|
|
629
637
|
console.log(chalk.gray("[8/9] Skipping error feedback (--skip-error-feedback)."));
|
|
638
|
+
compilePassed = true; // treat skip as neutral pass for self-eval
|
|
630
639
|
} else {
|
|
631
640
|
if (opts.tdd) {
|
|
632
641
|
console.log(chalk.cyan("[8/9] TDD mode — error feedback loop driving implementation to pass tests..."));
|
|
633
642
|
}
|
|
634
643
|
runLogger.stageStart("error_feedback");
|
|
635
|
-
await runErrorFeedback(codegenProvider, workingDir, extractedDsl, {
|
|
644
|
+
compilePassed = await runErrorFeedback(codegenProvider, workingDir, extractedDsl, {
|
|
636
645
|
maxCycles: opts.tdd ? 3 : 2, // TDD gets one extra cycle
|
|
637
646
|
});
|
|
638
647
|
runLogger.stageEnd("error_feedback");
|
|
@@ -665,6 +674,20 @@ program
|
|
|
665
674
|
await accumulateReviewKnowledge(specProvider, currentDir, reviewResult);
|
|
666
675
|
}
|
|
667
676
|
|
|
677
|
+
// ── Step 10: Harness Self-Evaluation ──────────────────────────────────────
|
|
678
|
+
// Zero AI calls — deterministic scoring from file-system state + review text.
|
|
679
|
+
// Records harnessScore + promptHash in RunLog for cross-run trend analysis.
|
|
680
|
+
runLogger.stageStart("self_eval");
|
|
681
|
+
const selfEvalResult = runSelfEval({
|
|
682
|
+
dsl: extractedDsl,
|
|
683
|
+
generatedFiles,
|
|
684
|
+
compilePassed,
|
|
685
|
+
reviewText: reviewResult,
|
|
686
|
+
promptHash,
|
|
687
|
+
logger: runLogger,
|
|
688
|
+
});
|
|
689
|
+
printSelfEval(selfEvalResult);
|
|
690
|
+
|
|
668
691
|
// ── Done ──────────────────────────────────────────────────────────────────
|
|
669
692
|
runLogger.finish();
|
|
670
693
|
console.log(chalk.bold.green("\n✔ All done!"));
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import { createHash } from "crypto";
|
|
2
|
+
|
|
3
|
+
import { codeGenSystemPrompt } from "../prompts/codegen.prompt";
|
|
4
|
+
import {
|
|
5
|
+
reviewArchitectureSystemPrompt,
|
|
6
|
+
reviewImplementationSystemPrompt,
|
|
7
|
+
reviewImpactComplexitySystemPrompt,
|
|
8
|
+
} from "../prompts/codegen.prompt";
|
|
9
|
+
import { dslSystemPrompt } from "../prompts/dsl.prompt";
|
|
10
|
+
import { specPrompt } from "../prompts/spec.prompt";
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Compute a short deterministic hash of the key prompt strings used in a run.
|
|
14
|
+
*
|
|
15
|
+
* Why this matters (Harness Engineering):
|
|
16
|
+
* When you change a prompt and re-run `ai-spec create`, the resulting RunLog
|
|
17
|
+
* will have a different promptHash. Cross-referencing RunLogs by promptHash
|
|
18
|
+
* lets you quantify whether a prompt change improved or degraded harnessScore
|
|
19
|
+
* without keeping a separate changelog.
|
|
20
|
+
*
|
|
21
|
+
* Coverage: codegen system prompt (TS), DSL extractor, spec generator, and all
|
|
22
|
+
* three review-pass prompts — these drive the vast majority of token spend and
|
|
23
|
+
* output variance.
|
|
24
|
+
*
|
|
25
|
+
* Returns: 8-char lowercase hex (e.g. "a3f2c1d8"). Collision probability for
|
|
26
|
+
* practical prompt-tweak scenarios is negligible.
|
|
27
|
+
*/
|
|
28
|
+
export function computePromptHash(): string {
|
|
29
|
+
const segments = [
|
|
30
|
+
codeGenSystemPrompt,
|
|
31
|
+
dslSystemPrompt,
|
|
32
|
+
specPrompt,
|
|
33
|
+
reviewArchitectureSystemPrompt,
|
|
34
|
+
reviewImplementationSystemPrompt,
|
|
35
|
+
reviewImpactComplexitySystemPrompt,
|
|
36
|
+
];
|
|
37
|
+
|
|
38
|
+
return createHash("sha256")
|
|
39
|
+
.update(segments.join("\x00")) // \x00 separator prevents segment-boundary collisions
|
|
40
|
+
.digest("hex")
|
|
41
|
+
.slice(0, 8);
|
|
42
|
+
}
|
package/core/run-logger.ts
CHANGED
|
@@ -20,6 +20,15 @@ export interface RunLog {
|
|
|
20
20
|
provider?: string;
|
|
21
21
|
model?: string;
|
|
22
22
|
specPath?: string;
|
|
23
|
+
/**
|
|
24
|
+
* 8-char hex hash of the key prompt strings used in this run.
|
|
25
|
+
* Changes whenever any of: codegen, DSL, spec, or review prompts are edited.
|
|
26
|
+
* Use this to correlate RunLogs across runs and measure whether a prompt
|
|
27
|
+
* change improved or degraded harnessScore (Harness Engineering observability).
|
|
28
|
+
*/
|
|
29
|
+
promptHash?: string;
|
|
30
|
+
/** Harness self-evaluation score recorded at end of `create` (0-10). */
|
|
31
|
+
harnessScore?: number;
|
|
23
32
|
entries: LogEntry[];
|
|
24
33
|
filesWritten: string[];
|
|
25
34
|
errors: string[];
|
|
@@ -73,6 +82,18 @@ export class RunLogger {
|
|
|
73
82
|
this.flush();
|
|
74
83
|
}
|
|
75
84
|
|
|
85
|
+
/** Record the prompt hash for this run (call once at run start). */
|
|
86
|
+
setPromptHash(hash: string): void {
|
|
87
|
+
this.log.promptHash = hash;
|
|
88
|
+
this.flush();
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
/** Record the harness self-eval score (call once at run end). */
|
|
92
|
+
setHarnessScore(score: number): void {
|
|
93
|
+
this.log.harnessScore = score;
|
|
94
|
+
this.flush();
|
|
95
|
+
}
|
|
96
|
+
|
|
76
97
|
fileWritten(filePath: string): void {
|
|
77
98
|
if (!this.log.filesWritten.includes(filePath)) {
|
|
78
99
|
this.log.filesWritten.push(filePath);
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
import chalk from "chalk";
|
|
2
|
+
import { SpecDSL } from "./dsl-types";
|
|
3
|
+
import { RunLogger } from "./run-logger";
|
|
4
|
+
|
|
5
|
+
// ─── Types ────────────────────────────────────────────────────────────────────
|
|
6
|
+
|
|
7
|
+
export interface SelfEvalResult {
|
|
8
|
+
/** 0-10: did generated files cover the endpoint + model layers declared in DSL? */
|
|
9
|
+
dslCoverageScore: number;
|
|
10
|
+
/** 0-10: 10 = error feedback passed cleanly, 5 = partial / skipped */
|
|
11
|
+
compileScore: number;
|
|
12
|
+
/** 0-10 extracted from 3-pass review text, or null when review was skipped */
|
|
13
|
+
reviewScore: number | null;
|
|
14
|
+
/** 0-10 weighted overall — the "Harness Score" recorded in RunLog */
|
|
15
|
+
harnessScore: number;
|
|
16
|
+
/** Prompt hash at the time this run executed */
|
|
17
|
+
promptHash: string;
|
|
18
|
+
detail: {
|
|
19
|
+
endpointsTotal: number;
|
|
20
|
+
endpointLayerCovered: boolean;
|
|
21
|
+
modelsTotal: number;
|
|
22
|
+
modelLayerCovered: boolean;
|
|
23
|
+
filesWritten: number;
|
|
24
|
+
};
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
// ─── Helpers ──────────────────────────────────────────────────────────────────
|
|
28
|
+
|
|
29
|
+
/** File-path patterns that indicate an API / controller / route layer file. */
|
|
30
|
+
const ENDPOINT_LAYER_PATTERNS = [
|
|
31
|
+
/src\/api/,
|
|
32
|
+
/src\/routes?/,
|
|
33
|
+
/src\/controller/,
|
|
34
|
+
/src\/handler/,
|
|
35
|
+
/src\/endpoints?/,
|
|
36
|
+
];
|
|
37
|
+
|
|
38
|
+
/** File-path patterns that indicate a data / model / schema layer file. */
|
|
39
|
+
const MODEL_LAYER_PATTERNS = [
|
|
40
|
+
/src\/model/,
|
|
41
|
+
/src\/schema/,
|
|
42
|
+
/src\/entit/,
|
|
43
|
+
/src\/db/,
|
|
44
|
+
/prisma/,
|
|
45
|
+
/src\/data/,
|
|
46
|
+
/src\/domain/,
|
|
47
|
+
];
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Extract a numeric score from review text.
|
|
51
|
+
* Matches the same "Score: X/10" pattern as `reviewer.ts → extractScore()`.
|
|
52
|
+
*/
|
|
53
|
+
function extractReviewScore(reviewText: string): number | null {
|
|
54
|
+
const match = reviewText.match(/Score:\s*(\d+(?:\.\d+)?)\s*\/\s*10/i);
|
|
55
|
+
return match ? parseFloat(match[1]) : null;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
// ─── Main ─────────────────────────────────────────────────────────────────────
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* Run a lightweight self-evaluation at the end of `ai-spec create`.
|
|
62
|
+
*
|
|
63
|
+
* Design goals (Harness Engineering):
|
|
64
|
+
* - Zero AI calls: all scoring is deterministic file-system + text checks
|
|
65
|
+
* - Produces a single `harnessScore` (0-10) stored in RunLog alongside `promptHash`
|
|
66
|
+
* - Lets you compare runs across prompt versions: did harnessScore go up or down?
|
|
67
|
+
*
|
|
68
|
+
* Scoring weights:
|
|
69
|
+
* | Dimension | Weight (with review) | Weight (review skipped) |
|
|
70
|
+
* |-----------------|----------------------|-------------------------|
|
|
71
|
+
* | DSL Coverage | 40 % | 55 % |
|
|
72
|
+
* | Compile/Error | 30 % | 45 % |
|
|
73
|
+
* | Review Score | 30 % | — |
|
|
74
|
+
*/
|
|
75
|
+
export function runSelfEval(opts: {
|
|
76
|
+
dsl: SpecDSL | null;
|
|
77
|
+
generatedFiles: string[];
|
|
78
|
+
/** true = error-feedback loop ended with all checks passing */
|
|
79
|
+
compilePassed: boolean;
|
|
80
|
+
/** Full text of the 3-pass review output; empty string if review was skipped */
|
|
81
|
+
reviewText: string;
|
|
82
|
+
promptHash: string;
|
|
83
|
+
logger: RunLogger;
|
|
84
|
+
}): SelfEvalResult {
|
|
85
|
+
const { dsl, generatedFiles, compilePassed, reviewText, promptHash, logger } = opts;
|
|
86
|
+
|
|
87
|
+
// ── DSL Coverage Score ────────────────────────────────────────────────────
|
|
88
|
+
const endpointsTotal = dsl?.endpoints?.length ?? 0;
|
|
89
|
+
const modelsTotal = dsl?.models?.length ?? 0;
|
|
90
|
+
|
|
91
|
+
const endpointLayerCovered = generatedFiles.some((f) =>
|
|
92
|
+
ENDPOINT_LAYER_PATTERNS.some((p) => p.test(f))
|
|
93
|
+
);
|
|
94
|
+
const modelLayerCovered = generatedFiles.some((f) =>
|
|
95
|
+
MODEL_LAYER_PATTERNS.some((p) => p.test(f))
|
|
96
|
+
);
|
|
97
|
+
|
|
98
|
+
let dslCoverageScore = 10;
|
|
99
|
+
if (generatedFiles.length === 0) {
|
|
100
|
+
dslCoverageScore = 0;
|
|
101
|
+
} else {
|
|
102
|
+
if (endpointsTotal > 0 && !endpointLayerCovered) dslCoverageScore -= 4;
|
|
103
|
+
if (modelsTotal > 0 && !modelLayerCovered) dslCoverageScore -= 3;
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
// ── Compile Score ─────────────────────────────────────────────────────────
|
|
107
|
+
// 10 = clean pass, 5 = error feedback ran but didn't fully clear / was skipped
|
|
108
|
+
const compileScore = compilePassed ? 10 : 5;
|
|
109
|
+
|
|
110
|
+
// ── Review Score ──────────────────────────────────────────────────────────
|
|
111
|
+
const reviewScore = reviewText ? extractReviewScore(reviewText) : null;
|
|
112
|
+
|
|
113
|
+
// ── Harness Score (weighted average) ──────────────────────────────────────
|
|
114
|
+
const harnessScore = reviewScore !== null
|
|
115
|
+
? Math.round((dslCoverageScore * 0.4 + compileScore * 0.3 + reviewScore * 0.3) * 10) / 10
|
|
116
|
+
: Math.round((dslCoverageScore * 0.55 + compileScore * 0.45) * 10) / 10;
|
|
117
|
+
|
|
118
|
+
const result: SelfEvalResult = {
|
|
119
|
+
dslCoverageScore,
|
|
120
|
+
compileScore,
|
|
121
|
+
reviewScore,
|
|
122
|
+
harnessScore,
|
|
123
|
+
promptHash,
|
|
124
|
+
detail: {
|
|
125
|
+
endpointsTotal,
|
|
126
|
+
endpointLayerCovered,
|
|
127
|
+
modelsTotal,
|
|
128
|
+
modelLayerCovered,
|
|
129
|
+
filesWritten: generatedFiles.length,
|
|
130
|
+
},
|
|
131
|
+
};
|
|
132
|
+
|
|
133
|
+
// Persist to RunLog
|
|
134
|
+
logger.setHarnessScore(harnessScore);
|
|
135
|
+
logger.stageEnd("self_eval", {
|
|
136
|
+
harnessScore,
|
|
137
|
+
dslCoverageScore,
|
|
138
|
+
compileScore,
|
|
139
|
+
reviewScore: reviewScore ?? undefined,
|
|
140
|
+
promptHash,
|
|
141
|
+
});
|
|
142
|
+
|
|
143
|
+
return result;
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
// ─── Display ──────────────────────────────────────────────────────────────────
|
|
147
|
+
|
|
148
|
+
export function printSelfEval(result: SelfEvalResult): void {
|
|
149
|
+
const scoreColor =
|
|
150
|
+
result.harnessScore >= 8 ? chalk.green :
|
|
151
|
+
result.harnessScore >= 6 ? chalk.yellow :
|
|
152
|
+
chalk.red;
|
|
153
|
+
|
|
154
|
+
const filled = Math.round(result.harnessScore);
|
|
155
|
+
const bar = "█".repeat(filled) + "░".repeat(10 - filled);
|
|
156
|
+
|
|
157
|
+
const compileTag = result.compileScore === 10
|
|
158
|
+
? chalk.green("pass")
|
|
159
|
+
: chalk.yellow("partial");
|
|
160
|
+
const reviewTag = result.reviewScore !== null
|
|
161
|
+
? `Review: ${result.reviewScore}/10`
|
|
162
|
+
: chalk.gray("Review: skipped");
|
|
163
|
+
|
|
164
|
+
console.log(chalk.cyan("\n─── Harness Self-Eval ───────────────────────────"));
|
|
165
|
+
console.log(` Score : ${scoreColor(`[${bar}] ${result.harnessScore}/10`)}`);
|
|
166
|
+
console.log(
|
|
167
|
+
` DSL : ${scoreColor(result.dslCoverageScore + "/10")} ` +
|
|
168
|
+
`Compile: ${compileTag} ${reviewTag}`
|
|
169
|
+
);
|
|
170
|
+
console.log(chalk.gray(` Prompt : ${result.promptHash}`));
|
|
171
|
+
console.log(chalk.gray("─".repeat(49)));
|
|
172
|
+
}
|