@kodax-ai/kodax-cli 0.7.38

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/CHANGELOG.md +1304 -0
  2. package/LICENSE +191 -0
  3. package/README.md +1167 -0
  4. package/README_CN.md +631 -0
  5. package/dist/builtin/code-review/SKILL.md +63 -0
  6. package/dist/builtin/git-workflow/SKILL.md +84 -0
  7. package/dist/builtin/skill-creator/SKILL.md +122 -0
  8. package/dist/builtin/skill-creator/agents/analyzer.md +12 -0
  9. package/dist/builtin/skill-creator/agents/comparator.md +13 -0
  10. package/dist/builtin/skill-creator/agents/grader.md +13 -0
  11. package/dist/builtin/skill-creator/references/schemas.md +227 -0
  12. package/dist/builtin/skill-creator/scripts/aggregate-benchmark.d.ts +46 -0
  13. package/dist/builtin/skill-creator/scripts/aggregate-benchmark.js +209 -0
  14. package/dist/builtin/skill-creator/scripts/analyze-benchmark.d.ts +46 -0
  15. package/dist/builtin/skill-creator/scripts/analyze-benchmark.js +289 -0
  16. package/dist/builtin/skill-creator/scripts/compare-runs.d.ts +62 -0
  17. package/dist/builtin/skill-creator/scripts/compare-runs.js +333 -0
  18. package/dist/builtin/skill-creator/scripts/generate-review.d.ts +33 -0
  19. package/dist/builtin/skill-creator/scripts/generate-review.js +415 -0
  20. package/dist/builtin/skill-creator/scripts/grade-evals.d.ts +73 -0
  21. package/dist/builtin/skill-creator/scripts/grade-evals.js +405 -0
  22. package/dist/builtin/skill-creator/scripts/improve-description.d.ts +23 -0
  23. package/dist/builtin/skill-creator/scripts/improve-description.js +161 -0
  24. package/dist/builtin/skill-creator/scripts/init-skill.d.ts +14 -0
  25. package/dist/builtin/skill-creator/scripts/init-skill.js +153 -0
  26. package/dist/builtin/skill-creator/scripts/install-skill.d.ts +29 -0
  27. package/dist/builtin/skill-creator/scripts/install-skill.js +176 -0
  28. package/dist/builtin/skill-creator/scripts/package-skill.d.ts +38 -0
  29. package/dist/builtin/skill-creator/scripts/package-skill.js +124 -0
  30. package/dist/builtin/skill-creator/scripts/quick-validate.d.ts +8 -0
  31. package/dist/builtin/skill-creator/scripts/quick-validate.js +166 -0
  32. package/dist/builtin/skill-creator/scripts/run-eval.d.ts +66 -0
  33. package/dist/builtin/skill-creator/scripts/run-eval.js +356 -0
  34. package/dist/builtin/skill-creator/scripts/run-loop.d.ts +49 -0
  35. package/dist/builtin/skill-creator/scripts/run-loop.js +243 -0
  36. package/dist/builtin/skill-creator/scripts/run-trigger-eval.d.ts +58 -0
  37. package/dist/builtin/skill-creator/scripts/run-trigger-eval.js +225 -0
  38. package/dist/builtin/skill-creator/scripts/utils.js +278 -0
  39. package/dist/builtin/tdd/SKILL.md +56 -0
  40. package/dist/index.js +1717 -0
  41. package/dist/kodax_cli.js +1870 -0
  42. package/package.json +122 -0
  43. package/scripts/kodax-bin.cjs +27 -0
  44. package/scripts/production-env.cjs +16 -0
@@ -0,0 +1,333 @@
1
+ #!/usr/bin/env node
2
+
3
+ import { readdir, readFile, writeFile } from 'node:fs/promises';
4
+ import path from 'node:path';
5
+ import { fileURLToPath } from 'node:url';
6
+ import {
7
+ extractJsonObject,
8
+ loadKodaXSDK,
9
+ loadRelativeText,
10
+ readJsonFile,
11
+ truncateText,
12
+ } from './utils.js';
13
+
14
+ async function listDirectories(dirPath) {
15
+ const entries = await readdir(dirPath, { withFileTypes: true }).catch(() => []);
16
+ return entries
17
+ .filter((entry) => entry.isDirectory())
18
+ .map((entry) => path.join(dirPath, entry.name))
19
+ .sort((left, right) => left.localeCompare(right));
20
+ }
21
+
22
+ function normalizeStringArray(value) {
23
+ if (!Array.isArray(value)) {
24
+ return [];
25
+ }
26
+ return value
27
+ .map((item) => String(item ?? '').trim())
28
+ .filter(Boolean);
29
+ }
30
+
31
+ function normalizeComparisonResult(rawText) {
32
+ const parsed = extractJsonObject(rawText) ?? {};
33
+ const winner = ['A', 'B', 'tie', 'inconclusive'].includes(parsed.winner)
34
+ ? parsed.winner
35
+ : 'inconclusive';
36
+ const confidenceValue = Number(parsed.confidence);
37
+
38
+ return {
39
+ winner,
40
+ confidence: Number.isFinite(confidenceValue)
41
+ ? Math.max(0, Math.min(1, Number(confidenceValue.toFixed(4))))
42
+ : 0,
43
+ rationale: String(parsed.rationale ?? '').trim(),
44
+ strengths_a: normalizeStringArray(parsed.strengths_a),
45
+ strengths_b: normalizeStringArray(parsed.strengths_b),
46
+ risks: normalizeStringArray(parsed.risks),
47
+ };
48
+ }
49
+
50
+ async function loadComparisonPair(evalDir, configA, configB, pairIndex) {
51
+ const runsA = (await listDirectories(path.join(evalDir, configA)))
52
+ .filter((dirPath) => path.basename(dirPath).startsWith('run-'));
53
+ const runsB = (await listDirectories(path.join(evalDir, configB)))
54
+ .filter((dirPath) => path.basename(dirPath).startsWith('run-'));
55
+ const left = runsA[pairIndex];
56
+ const right = runsB[pairIndex];
57
+
58
+ if (!left || !right) {
59
+ return null;
60
+ }
61
+
62
+ const evalMetadata = await readJsonFile(path.join(evalDir, 'eval_metadata.json'), {});
63
+ const outputA = await readFile(path.join(left, 'outputs', 'result.md'), 'utf8').catch(() => '');
64
+ const outputB = await readFile(path.join(right, 'outputs', 'result.md'), 'utf8').catch(() => '');
65
+
66
+ return {
67
+ evalDir,
68
+ evalMetadata,
69
+ runA: {
70
+ runDir: left,
71
+ runId: path.basename(left),
72
+ configName: configA,
73
+ output: outputA,
74
+ },
75
+ runB: {
76
+ runDir: right,
77
+ runId: path.basename(right),
78
+ configName: configB,
79
+ output: outputB,
80
+ },
81
+ };
82
+ }
83
+
84
+ export function buildComparisonPrompt(input) {
85
+ const pair = input.presentPrimaryFirst
86
+ ? { A: input.runA, B: input.runB }
87
+ : { A: input.runB, B: input.runA };
88
+
89
+ return `${input.agentInstructions.trim()}
90
+
91
+ Return JSON with this shape:
92
+ {
93
+ "winner": "A | B | tie | inconclusive",
94
+ "confidence": 0.0,
95
+ "rationale": "short explanation",
96
+ "strengths_a": [],
97
+ "strengths_b": [],
98
+ "risks": []
99
+ }
100
+
101
+ Judge only the visible outputs. Do not mention hidden config names in the rationale.
102
+
103
+ ## Eval Prompt
104
+ ${truncateText(input.evalMetadata.prompt ?? '', 4000)}
105
+
106
+ ## Expected Outcome
107
+ ${truncateText(input.evalMetadata.expected_output ?? '', 2000)}
108
+
109
+ ## Assertions
110
+ ${truncateText(JSON.stringify(input.evalMetadata.assertions ?? [], null, 2), 4000)}
111
+
112
+ ## Candidate A
113
+ ${truncateText(pair.A.output, 12000)}
114
+
115
+ ## Candidate B
116
+ ${truncateText(pair.B.output, 12000)}
117
+ `;
118
+ }
119
+
120
+ async function defaultRunComparator(prompt, options) {
121
+ const { runKodaX } = await loadKodaXSDK();
122
+ const result = await runKodaX(
123
+ {
124
+ provider: options.provider ?? 'anthropic',
125
+ model: options.model,
126
+ maxIter: options.maxIter ?? 20,
127
+ reasoningMode: options.reasoningMode ?? 'balanced',
128
+ thinking: options.reasoningMode ? options.reasoningMode !== 'off' : true,
129
+ context: {
130
+ gitRoot: path.resolve(options.cwd ?? options.workspaceDir ?? process.cwd()),
131
+ },
132
+ },
133
+ prompt
134
+ );
135
+ return result.lastText;
136
+ }
137
+
138
+ function mapWinnerToConfig(winner, presentPrimaryFirst, configA, configB) {
139
+ if (winner === 'A') {
140
+ return presentPrimaryFirst ? configA : configB;
141
+ }
142
+ if (winner === 'B') {
143
+ return presentPrimaryFirst ? configB : configA;
144
+ }
145
+ return winner;
146
+ }
147
+
148
+ export function renderComparisonMarkdown(document) {
149
+ const lines = [
150
+ `# Blind Comparison: ${document.config_a} vs ${document.config_b}`,
151
+ '',
152
+ `Generated: ${document.generated_at}`,
153
+ '',
154
+ `- ${document.config_a} wins: ${document.summary.config_a_wins}`,
155
+ `- ${document.config_b} wins: ${document.summary.config_b_wins}`,
156
+ `- ties: ${document.summary.ties}`,
157
+ `- inconclusive: ${document.summary.inconclusive}`,
158
+ '',
159
+ ];
160
+
161
+ for (const comparison of document.comparisons) {
162
+ lines.push(`## Eval ${comparison.eval_id ?? comparison.eval_name ?? comparison.index}`);
163
+ lines.push('');
164
+ lines.push(`- Winner: ${comparison.winner_config}`);
165
+ lines.push(`- Confidence: ${comparison.confidence}`);
166
+ lines.push(`- Rationale: ${comparison.rationale || 'n/a'}`);
167
+ lines.push('');
168
+ }
169
+
170
+ return `${lines.join('\n')}\n`;
171
+ }
172
+
173
+ export async function compareWorkspace(
174
+ options,
175
+ runner = defaultRunComparator
176
+ ) {
177
+ const workspaceDir = path.resolve(options.workspaceDir);
178
+ const configA = options.configA ?? 'with_skill';
179
+ const configB = options.configB ?? 'without_skill';
180
+ const maxPairs = Number.isFinite(options.maxPairs) && options.maxPairs > 0
181
+ ? Math.floor(options.maxPairs)
182
+ : Number.POSITIVE_INFINITY;
183
+ const agentInstructions = await loadRelativeText(import.meta.url, '../agents/comparator.md');
184
+ const comparisons = [];
185
+
186
+ for (const evalDir of await listDirectories(workspaceDir)) {
187
+ if (!path.basename(evalDir).startsWith('eval-')) {
188
+ continue;
189
+ }
190
+
191
+ for (let pairIndex = 0; pairIndex < maxPairs; pairIndex += 1) {
192
+ const pair = await loadComparisonPair(evalDir, configA, configB, pairIndex);
193
+ if (!pair) {
194
+ break;
195
+ }
196
+
197
+ const presentPrimaryFirst = comparisons.length % 2 === 0;
198
+ const prompt = buildComparisonPrompt({
199
+ agentInstructions,
200
+ evalMetadata: pair.evalMetadata,
201
+ runA: pair.runA,
202
+ runB: pair.runB,
203
+ presentPrimaryFirst,
204
+ });
205
+ const rawResponse = await runner(prompt, {
206
+ ...options,
207
+ workspaceDir,
208
+ evalDir,
209
+ pairIndex,
210
+ configA,
211
+ configB,
212
+ });
213
+ const parsed = normalizeComparisonResult(rawResponse);
214
+ const winnerConfig = mapWinnerToConfig(parsed.winner, presentPrimaryFirst, configA, configB);
215
+
216
+ comparisons.push({
217
+ index: comparisons.length + 1,
218
+ eval_id: pair.evalMetadata.eval_id ?? null,
219
+ eval_name: pair.evalMetadata.eval_name ?? null,
220
+ run_a: path.relative(workspaceDir, pair.runA.runDir).replace(/\\/g, '/'),
221
+ run_b: path.relative(workspaceDir, pair.runB.runDir).replace(/\\/g, '/'),
222
+ presented_as: presentPrimaryFirst
223
+ ? { A: configA, B: configB }
224
+ : { A: configB, B: configA },
225
+ winner_label: parsed.winner,
226
+ winner_config: winnerConfig,
227
+ confidence: parsed.confidence,
228
+ rationale: parsed.rationale,
229
+ strengths_a: parsed.strengths_a,
230
+ strengths_b: parsed.strengths_b,
231
+ risks: parsed.risks,
232
+ });
233
+ }
234
+ }
235
+
236
+ if (comparisons.length === 0) {
237
+ throw new Error(`No comparable run pairs found for ${configA} vs ${configB} in ${workspaceDir}`);
238
+ }
239
+
240
+ const document = {
241
+ workspace: workspaceDir,
242
+ generated_at: new Date().toISOString(),
243
+ config_a: configA,
244
+ config_b: configB,
245
+ summary: {
246
+ total_pairs: comparisons.length,
247
+ config_a_wins: comparisons.filter((item) => item.winner_config === configA).length,
248
+ config_b_wins: comparisons.filter((item) => item.winner_config === configB).length,
249
+ ties: comparisons.filter((item) => item.winner_config === 'tie').length,
250
+ inconclusive: comparisons.filter((item) => item.winner_config === 'inconclusive').length,
251
+ },
252
+ comparisons,
253
+ };
254
+
255
+ const outputPath = path.resolve(options.outputPath ?? path.join(workspaceDir, 'comparison.json'));
256
+ const markdownPath = path.resolve(options.markdownPath ?? path.join(workspaceDir, 'comparison.md'));
257
+ await writeFile(outputPath, `${JSON.stringify(document, null, 2)}\n`, 'utf8');
258
+ await writeFile(markdownPath, renderComparisonMarkdown(document), 'utf8');
259
+
260
+ return {
261
+ document,
262
+ outputPath,
263
+ markdownPath,
264
+ };
265
+ }
266
+
267
+ function parseArgs(argv) {
268
+ const args = {
269
+ workspaceDir: argv[2] ?? '',
270
+ configA: 'with_skill',
271
+ configB: 'without_skill',
272
+ outputPath: undefined,
273
+ markdownPath: undefined,
274
+ provider: 'anthropic',
275
+ model: undefined,
276
+ reasoningMode: 'balanced',
277
+ maxIter: 20,
278
+ maxPairs: undefined,
279
+ cwd: process.cwd(),
280
+ };
281
+
282
+ for (let index = 3; index < argv.length; index += 1) {
283
+ const token = argv[index];
284
+ if (token === '--config-a' && argv[index + 1]) {
285
+ args.configA = argv[++index];
286
+ } else if (token === '--config-b' && argv[index + 1]) {
287
+ args.configB = argv[++index];
288
+ } else if (token === '--output' && argv[index + 1]) {
289
+ args.outputPath = argv[++index];
290
+ } else if (token === '--markdown' && argv[index + 1]) {
291
+ args.markdownPath = argv[++index];
292
+ } else if (token === '--provider' && argv[index + 1]) {
293
+ args.provider = argv[++index];
294
+ } else if (token === '--model' && argv[index + 1]) {
295
+ args.model = argv[++index];
296
+ } else if (token === '--reasoning' && argv[index + 1]) {
297
+ args.reasoningMode = argv[++index];
298
+ } else if (token === '--max-iter' && argv[index + 1]) {
299
+ args.maxIter = Number(argv[++index]);
300
+ } else if (token === '--max-pairs' && argv[index + 1]) {
301
+ args.maxPairs = Number(argv[++index]);
302
+ } else if (token === '--cwd' && argv[index + 1]) {
303
+ args.cwd = argv[++index];
304
+ }
305
+ }
306
+
307
+ return args;
308
+ }
309
+
310
+ async function main() {
311
+ const args = parseArgs(process.argv);
312
+ if (!args.workspaceDir) {
313
+ console.error('Usage: node scripts/compare-runs.js <workspace> [--config-a with_skill] [--config-b without_skill] [--output comparison.json]');
314
+ process.exit(1);
315
+ }
316
+
317
+ const result = await compareWorkspace(args);
318
+ process.stdout.write(`${JSON.stringify({
319
+ comparison: result.document.summary,
320
+ output: result.outputPath,
321
+ markdown: result.markdownPath,
322
+ }, null, 2)}\n`);
323
+ }
324
+
325
+ const isDirectRun = process.argv[1]
326
+ && fileURLToPath(import.meta.url) === path.resolve(process.argv[1]);
327
+
328
+ if (isDirectRun) {
329
+ main().catch((error) => {
330
+ console.error(error instanceof Error ? error.message : String(error));
331
+ process.exit(1);
332
+ });
333
+ }
@@ -0,0 +1,33 @@
1
+ export interface ReviewOutput {
2
+ name: string;
3
+ kind: string;
4
+ content?: string;
5
+ dataUri?: string;
6
+ }
7
+
8
+ export interface ReviewRun {
9
+ id: string;
10
+ evalId: string | number | null;
11
+ prompt: string;
12
+ grading: Record<string, unknown> | null;
13
+ outputs: ReviewOutput[];
14
+ }
15
+
16
+ export interface ReviewPayload {
17
+ skillName: string;
18
+ workspace: string;
19
+ benchmark: Record<string, unknown> | null;
20
+ feedback: Record<string, string>;
21
+ runs: ReviewRun[];
22
+ }
23
+
24
+ export function findRuns(
25
+ workspaceRoot: string,
26
+ currentDir?: string,
27
+ runs?: ReviewRun[]
28
+ ): Promise<ReviewRun[]>;
29
+ export function buildPayload(
30
+ workspace: string,
31
+ args: { skillName: string; benchmark?: string | null }
32
+ ): Promise<ReviewPayload>;
33
+ export function renderHtml(payload: ReviewPayload, staticMode: boolean): string;