@kodax-ai/kodax-cli 0.7.38

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. package/CHANGELOG.md +1304 -0
  2. package/LICENSE +191 -0
  3. package/README.md +1167 -0
  4. package/README_CN.md +631 -0
  5. package/dist/builtin/code-review/SKILL.md +63 -0
  6. package/dist/builtin/git-workflow/SKILL.md +84 -0
  7. package/dist/builtin/skill-creator/SKILL.md +122 -0
  8. package/dist/builtin/skill-creator/agents/analyzer.md +12 -0
  9. package/dist/builtin/skill-creator/agents/comparator.md +13 -0
  10. package/dist/builtin/skill-creator/agents/grader.md +13 -0
  11. package/dist/builtin/skill-creator/references/schemas.md +227 -0
  12. package/dist/builtin/skill-creator/scripts/aggregate-benchmark.d.ts +46 -0
  13. package/dist/builtin/skill-creator/scripts/aggregate-benchmark.js +209 -0
  14. package/dist/builtin/skill-creator/scripts/analyze-benchmark.d.ts +46 -0
  15. package/dist/builtin/skill-creator/scripts/analyze-benchmark.js +289 -0
  16. package/dist/builtin/skill-creator/scripts/compare-runs.d.ts +62 -0
  17. package/dist/builtin/skill-creator/scripts/compare-runs.js +333 -0
  18. package/dist/builtin/skill-creator/scripts/generate-review.d.ts +33 -0
  19. package/dist/builtin/skill-creator/scripts/generate-review.js +415 -0
  20. package/dist/builtin/skill-creator/scripts/grade-evals.d.ts +73 -0
  21. package/dist/builtin/skill-creator/scripts/grade-evals.js +405 -0
  22. package/dist/builtin/skill-creator/scripts/improve-description.d.ts +23 -0
  23. package/dist/builtin/skill-creator/scripts/improve-description.js +161 -0
  24. package/dist/builtin/skill-creator/scripts/init-skill.d.ts +14 -0
  25. package/dist/builtin/skill-creator/scripts/init-skill.js +153 -0
  26. package/dist/builtin/skill-creator/scripts/install-skill.d.ts +29 -0
  27. package/dist/builtin/skill-creator/scripts/install-skill.js +176 -0
  28. package/dist/builtin/skill-creator/scripts/package-skill.d.ts +38 -0
  29. package/dist/builtin/skill-creator/scripts/package-skill.js +124 -0
  30. package/dist/builtin/skill-creator/scripts/quick-validate.d.ts +8 -0
  31. package/dist/builtin/skill-creator/scripts/quick-validate.js +166 -0
  32. package/dist/builtin/skill-creator/scripts/run-eval.d.ts +66 -0
  33. package/dist/builtin/skill-creator/scripts/run-eval.js +356 -0
  34. package/dist/builtin/skill-creator/scripts/run-loop.d.ts +49 -0
  35. package/dist/builtin/skill-creator/scripts/run-loop.js +243 -0
  36. package/dist/builtin/skill-creator/scripts/run-trigger-eval.d.ts +58 -0
  37. package/dist/builtin/skill-creator/scripts/run-trigger-eval.js +225 -0
  38. package/dist/builtin/skill-creator/scripts/utils.js +278 -0
  39. package/dist/builtin/tdd/SKILL.md +56 -0
  40. package/dist/index.js +1717 -0
  41. package/dist/kodax_cli.js +1870 -0
  42. package/package.json +122 -0
  43. package/scripts/kodax-bin.cjs +27 -0
  44. package/scripts/production-env.cjs +16 -0
@@ -0,0 +1,405 @@
1
+ #!/usr/bin/env node
2
+
3
+ import { readdir, readFile, writeFile } from 'node:fs/promises';
4
+ import path from 'node:path';
5
+ import { fileURLToPath } from 'node:url';
6
+ import {
7
+ computePassSummary,
8
+ ensureDirectory,
9
+ extractJsonObject,
10
+ loadKodaXSDK,
11
+ loadRelativeText,
12
+ readJsonFile,
13
+ truncateText,
14
+ } from './utils.js';
15
+
16
+ function normalizeExpectationText(value) {
17
+ return String(value ?? '').trim();
18
+ }
19
+
20
+ function normalizeExpectedRubric(evalMetadata) {
21
+ const assertions = Array.isArray(evalMetadata?.assertions) ? evalMetadata.assertions : [];
22
+ const items = assertions
23
+ .map((assertion) => {
24
+ if (typeof assertion === 'string') {
25
+ return normalizeExpectationText(assertion);
26
+ }
27
+ if (assertion && typeof assertion === 'object' && typeof assertion.text === 'string') {
28
+ return normalizeExpectationText(assertion.text);
29
+ }
30
+ return '';
31
+ })
32
+ .filter(Boolean);
33
+
34
+ if (items.length > 0) {
35
+ return items;
36
+ }
37
+
38
+ const expectedOutput = normalizeExpectationText(evalMetadata?.expected_output);
39
+ return expectedOutput ? [expectedOutput] : [];
40
+ }
41
+
42
+ function normalizeStringArray(value) {
43
+ if (!Array.isArray(value)) {
44
+ return [];
45
+ }
46
+ return value
47
+ .map((item) => String(item ?? '').trim())
48
+ .filter(Boolean);
49
+ }
50
+
51
+ function normalizeExpectationEntry(entry, fallbackText) {
52
+ const text = normalizeExpectationText(entry?.text ?? fallbackText);
53
+ return {
54
+ text,
55
+ passed: entry?.passed === true,
56
+ evidence: normalizeExpectationText(entry?.evidence),
57
+ };
58
+ }
59
+
60
+ function normalizeParsedGrading(rawText, rubric) {
61
+ const parsed = extractJsonObject(rawText) ?? {};
62
+ const returnedExpectations = Array.isArray(parsed.expectations) ? parsed.expectations : [];
63
+ const normalizedExpectations = [];
64
+ const consumed = new Set();
65
+
66
+ for (const rubricText of rubric) {
67
+ const matchIndex = returnedExpectations.findIndex((entry, index) => {
68
+ if (consumed.has(index)) {
69
+ return false;
70
+ }
71
+
72
+ const candidate = normalizeExpectationText(entry?.text);
73
+ return candidate.toLowerCase() === rubricText.toLowerCase();
74
+ });
75
+
76
+ if (matchIndex === -1) {
77
+ normalizedExpectations.push({
78
+ text: rubricText,
79
+ passed: false,
80
+ evidence: '',
81
+ });
82
+ continue;
83
+ }
84
+
85
+ consumed.add(matchIndex);
86
+ normalizedExpectations.push(normalizeExpectationEntry(returnedExpectations[matchIndex], rubricText));
87
+ }
88
+
89
+ if (rubric.length === 0) {
90
+ for (const entry of returnedExpectations) {
91
+ const normalized = normalizeExpectationEntry(entry, '');
92
+ if (normalized.text) {
93
+ normalizedExpectations.push(normalized);
94
+ }
95
+ }
96
+ }
97
+
98
+ return {
99
+ overall_summary: normalizeExpectationText(parsed.overall_summary),
100
+ expectations: normalizedExpectations,
101
+ user_notes_summary: {
102
+ uncertainties: normalizeStringArray(parsed.user_notes_summary?.uncertainties),
103
+ needs_review: normalizeStringArray(parsed.user_notes_summary?.needs_review),
104
+ workarounds: normalizeStringArray(parsed.user_notes_summary?.workarounds),
105
+ },
106
+ };
107
+ }
108
+
109
+ async function listDirectories(dirPath) {
110
+ const entries = await readdir(dirPath, { withFileTypes: true }).catch(() => []);
111
+ return entries
112
+ .filter((entry) => entry.isDirectory())
113
+ .map((entry) => path.join(dirPath, entry.name))
114
+ .sort((left, right) => left.localeCompare(right));
115
+ }
116
+
117
+ async function loadRunContext(runDir) {
118
+ const configDir = path.dirname(runDir);
119
+ const evalDir = path.dirname(configDir);
120
+ const configName = path.basename(configDir);
121
+ const runId = path.basename(runDir);
122
+ const outputsDir = path.join(runDir, 'outputs');
123
+ const evalMetadata = await readJsonFile(path.join(evalDir, 'eval_metadata.json'), {});
124
+ const resultText = await readFile(path.join(outputsDir, 'result.md'), 'utf8').catch(() => '');
125
+ const promptText = await readFile(path.join(outputsDir, 'prompt.txt'), 'utf8').catch(() => '');
126
+ const transcript = await readFile(path.join(runDir, 'transcript.md'), 'utf8').catch(() => '');
127
+ const metrics = await readJsonFile(path.join(outputsDir, 'metrics.json'), {});
128
+ const result = await readJsonFile(path.join(runDir, 'result.json'), {});
129
+ const timing = await readJsonFile(path.join(runDir, 'timing.json'), {});
130
+
131
+ return {
132
+ evalDir,
133
+ runDir,
134
+ runId,
135
+ configName,
136
+ evalMetadata,
137
+ outputsDir,
138
+ resultText,
139
+ promptText,
140
+ transcript,
141
+ metrics,
142
+ result,
143
+ timing,
144
+ };
145
+ }
146
+
147
+ export function buildGradingPrompt(input) {
148
+ const rubric = normalizeExpectedRubric(input.evalMetadata);
149
+ const rubricText = rubric.length > 0
150
+ ? rubric.map((item, index) => `${index + 1}. ${item}`).join('\n')
151
+ : '(No explicit assertions; infer one expectation from expected_output if present.)';
152
+
153
+ return `${input.agentInstructions.trim()}
154
+
155
+ Return JSON with this shape:
156
+ {
157
+ "overall_summary": "short summary",
158
+ "expectations": [
159
+ {
160
+ "text": "expectation text",
161
+ "passed": true,
162
+ "evidence": "short evidence grounded in the output"
163
+ }
164
+ ],
165
+ "user_notes_summary": {
166
+ "uncertainties": [],
167
+ "needs_review": [],
168
+ "workarounds": []
169
+ }
170
+ }
171
+
172
+ ## Run Metadata
173
+ - eval_id: ${input.evalMetadata.eval_id ?? 'unknown'}
174
+ - eval_name: ${input.evalMetadata.eval_name ?? 'unknown'}
175
+ - config: ${input.configName}
176
+ - run_id: ${input.runId}
177
+
178
+ ## Eval Prompt
179
+ ${truncateText(input.evalMetadata.prompt ?? input.promptText, 4000)}
180
+
181
+ ## Expected Outcome
182
+ ${truncateText(input.evalMetadata.expected_output ?? '', 2000)}
183
+
184
+ ## Assertions
185
+ ${rubricText}
186
+
187
+ ## Final Output
188
+ ${truncateText(input.resultText, 12000)}
189
+
190
+ ## Result Flags
191
+ ${JSON.stringify({
192
+ success: input.result.success ?? null,
193
+ signal: input.result.signal ?? null,
194
+ interrupted: input.result.interrupted ?? false,
195
+ limit_reached: input.result.limit_reached ?? input.result.limitReached ?? false,
196
+ }, null, 2)}
197
+
198
+ ## Execution Metrics
199
+ ${JSON.stringify({
200
+ total_tool_calls: input.metrics.total_tool_calls ?? input.result.execution_metrics?.total_tool_calls ?? 0,
201
+ errors_encountered: input.metrics.errors_encountered ?? input.result.execution_metrics?.errors_encountered ?? 0,
202
+ output_chars: input.metrics.output_chars ?? input.result.execution_metrics?.output_chars ?? input.resultText.length,
203
+ total_tokens: input.timing.total_tokens ?? 0,
204
+ total_duration_seconds: input.timing.total_duration_seconds ?? 0,
205
+ }, null, 2)}
206
+
207
+ ## Transcript Excerpt
208
+ ${truncateText(input.transcript, 5000)}
209
+ `;
210
+ }
211
+
212
+ async function defaultRunJudge(prompt, options) {
213
+ const { runKodaX } = await loadKodaXSDK();
214
+ const result = await runKodaX(
215
+ {
216
+ provider: options.provider ?? 'anthropic',
217
+ model: options.model,
218
+ maxIter: options.maxIter ?? 20,
219
+ reasoningMode: options.reasoningMode ?? 'balanced',
220
+ thinking: options.reasoningMode ? options.reasoningMode !== 'off' : true,
221
+ context: {
222
+ gitRoot: path.resolve(options.cwd ?? options.workspaceDir ?? process.cwd()),
223
+ },
224
+ },
225
+ prompt
226
+ );
227
+ return result.lastText;
228
+ }
229
+
230
+ export async function gradeRun(runDir, options, runner = defaultRunJudge) {
231
+ const run = await loadRunContext(path.resolve(runDir));
232
+ const rubric = normalizeExpectedRubric(run.evalMetadata);
233
+ const agentInstructions = await loadRelativeText(import.meta.url, '../agents/grader.md');
234
+ const prompt = buildGradingPrompt({
235
+ ...run,
236
+ agentInstructions,
237
+ });
238
+ const rawResponse = await runner(prompt, {
239
+ ...options,
240
+ runDir: run.runDir,
241
+ configName: run.configName,
242
+ evalMetadata: run.evalMetadata,
243
+ runId: run.runId,
244
+ });
245
+ const parsed = normalizeParsedGrading(rawResponse, rubric);
246
+ const executionMetrics = {
247
+ total_tool_calls: run.metrics.total_tool_calls ?? run.result.execution_metrics?.total_tool_calls ?? 0,
248
+ errors_encountered: run.metrics.errors_encountered ?? run.result.execution_metrics?.errors_encountered ?? 0,
249
+ output_chars: run.metrics.output_chars ?? run.result.execution_metrics?.output_chars ?? run.resultText.length,
250
+ };
251
+
252
+ const grading = {
253
+ summary: computePassSummary(parsed.expectations),
254
+ expectations: parsed.expectations,
255
+ execution_metrics: executionMetrics,
256
+ user_notes_summary: parsed.user_notes_summary,
257
+ overall_summary: parsed.overall_summary,
258
+ timing: {
259
+ total_tokens: run.timing.total_tokens ?? 0,
260
+ total_duration_seconds: run.timing.total_duration_seconds ?? 0,
261
+ },
262
+ meta: {
263
+ generated_at: new Date().toISOString(),
264
+ eval_id: run.evalMetadata.eval_id ?? null,
265
+ eval_name: run.evalMetadata.eval_name ?? null,
266
+ config: run.configName,
267
+ run_id: run.runId,
268
+ },
269
+ };
270
+
271
+ await writeFile(
272
+ path.join(run.runDir, 'grading.json'),
273
+ `${JSON.stringify(grading, null, 2)}\n`,
274
+ 'utf8'
275
+ );
276
+
277
+ return {
278
+ runDir: run.runDir,
279
+ grading,
280
+ prompt,
281
+ rawResponse,
282
+ };
283
+ }
284
+
285
+ export async function gradeWorkspace(options, runner = defaultRunJudge) {
286
+ const workspaceDir = path.resolve(options.workspaceDir);
287
+ await ensureDirectory(workspaceDir);
288
+
289
+ const configFilter = new Set(
290
+ (options.configs ?? [])
291
+ .map((item) => String(item).trim())
292
+ .filter(Boolean)
293
+ );
294
+ const processedRuns = [];
295
+ const skippedRuns = [];
296
+
297
+ for (const evalDir of await listDirectories(workspaceDir)) {
298
+ if (!path.basename(evalDir).startsWith('eval-')) {
299
+ continue;
300
+ }
301
+
302
+ for (const configDir of await listDirectories(evalDir)) {
303
+ const configName = path.basename(configDir);
304
+ if (configFilter.size > 0 && !configFilter.has(configName)) {
305
+ continue;
306
+ }
307
+
308
+ for (const runDir of await listDirectories(configDir)) {
309
+ if (!path.basename(runDir).startsWith('run-')) {
310
+ continue;
311
+ }
312
+
313
+ const gradingPath = path.join(runDir, 'grading.json');
314
+ const existing = await readJsonFile(gradingPath, null);
315
+ if (existing && options.overwrite !== true) {
316
+ skippedRuns.push(path.relative(workspaceDir, runDir).replace(/\\/g, '/'));
317
+ continue;
318
+ }
319
+
320
+ const graded = await gradeRun(runDir, options, runner);
321
+ processedRuns.push({
322
+ run: path.relative(workspaceDir, runDir).replace(/\\/g, '/'),
323
+ summary: graded.grading.summary,
324
+ config: graded.grading.meta.config,
325
+ eval_id: graded.grading.meta.eval_id,
326
+ });
327
+ }
328
+ }
329
+ }
330
+
331
+ const summary = {
332
+ workspace: workspaceDir,
333
+ generated_at: new Date().toISOString(),
334
+ processed: processedRuns.length,
335
+ skipped: skippedRuns.length,
336
+ processed_runs: processedRuns,
337
+ skipped_runs: skippedRuns,
338
+ };
339
+
340
+ await writeFile(
341
+ path.join(workspaceDir, 'grading-summary.json'),
342
+ `${JSON.stringify(summary, null, 2)}\n`,
343
+ 'utf8'
344
+ );
345
+
346
+ return summary;
347
+ }
348
+
349
+ function parseArgs(argv) {
350
+ const args = {
351
+ workspaceDir: argv[2] ?? '',
352
+ provider: 'anthropic',
353
+ model: undefined,
354
+ reasoningMode: 'balanced',
355
+ maxIter: 20,
356
+ cwd: process.cwd(),
357
+ overwrite: false,
358
+ configs: [],
359
+ };
360
+
361
+ for (let index = 3; index < argv.length; index += 1) {
362
+ const token = argv[index];
363
+ if (token === '--provider' && argv[index + 1]) {
364
+ args.provider = argv[++index];
365
+ } else if (token === '--model' && argv[index + 1]) {
366
+ args.model = argv[++index];
367
+ } else if (token === '--reasoning' && argv[index + 1]) {
368
+ args.reasoningMode = argv[++index];
369
+ } else if (token === '--max-iter' && argv[index + 1]) {
370
+ args.maxIter = Number(argv[++index]);
371
+ } else if (token === '--cwd' && argv[index + 1]) {
372
+ args.cwd = argv[++index];
373
+ } else if (token === '--overwrite') {
374
+ args.overwrite = true;
375
+ } else if (token === '--configs' && argv[index + 1]) {
376
+ args.configs = argv[++index]
377
+ .split(',')
378
+ .map((item) => item.trim())
379
+ .filter(Boolean);
380
+ }
381
+ }
382
+
383
+ return args;
384
+ }
385
+
386
+ async function main() {
387
+ const args = parseArgs(process.argv);
388
+ if (!args.workspaceDir) {
389
+ console.error('Usage: node scripts/grade-evals.js <workspace> [--provider anthropic] [--configs with_skill,without_skill] [--overwrite]');
390
+ process.exit(1);
391
+ }
392
+
393
+ const summary = await gradeWorkspace(args);
394
+ process.stdout.write(`${JSON.stringify(summary, null, 2)}\n`);
395
+ }
396
+
397
+ const isDirectRun = process.argv[1]
398
+ && fileURLToPath(import.meta.url) === path.resolve(process.argv[1]);
399
+
400
+ if (isDirectRun) {
401
+ main().catch((error) => {
402
+ console.error(error instanceof Error ? error.message : String(error));
403
+ process.exit(1);
404
+ });
405
+ }
@@ -0,0 +1,23 @@
1
+ export interface ImproveDescriptionOptions {
2
+ skillPath: string;
3
+ evalResultsPath: string;
4
+ provider?: string;
5
+ model?: string;
6
+ output?: string;
7
+ write?: boolean;
8
+ historyPath?: string;
9
+ maxIter?: number;
10
+ reasoningMode?: string;
11
+ }
12
+
13
+ export interface ImproveDescriptionResult {
14
+ description: string;
15
+ rawResponse: string;
16
+ prompt: string;
17
+ }
18
+
19
+ export function extractDescriptionCandidate(text: string): string;
20
+ export function improveDescription(
21
+ options: ImproveDescriptionOptions,
22
+ generator?: (prompt: string, options: ImproveDescriptionOptions) => Promise<string>
23
+ ): Promise<ImproveDescriptionResult>;
@@ -0,0 +1,161 @@
1
+ #!/usr/bin/env node
2
+
3
+ import { readFile, writeFile } from 'node:fs/promises';
4
+ import path from 'node:path';
5
+ import { fileURLToPath } from 'node:url';
6
+ import {
7
+ extractTaggedText,
8
+ loadKodaXSDK,
9
+ loadSkill,
10
+ pathExists,
11
+ writeSkill,
12
+ } from './utils.js';
13
+
14
+ function buildImprovePrompt({ skillName, body, currentDescription, evalResults, history }) {
15
+ const failed = evalResults.results.filter((item) => !item.pass);
16
+ const historyText = history.length === 0
17
+ ? 'None.'
18
+ : history.map((item, index) => `${index + 1}. ${item.description} (${item.score})`).join('\n');
19
+
20
+ return [
21
+ `You are improving the trigger description for a KodaX skill named "${skillName}".`,
22
+ 'Write a better description that stays under 1024 characters and focuses on user intent.',
23
+ 'Respond with only the new description inside <new_description> tags.',
24
+ '',
25
+ `Current description: ${currentDescription}`,
26
+ '',
27
+ 'Skill body for context:',
28
+ body,
29
+ '',
30
+ 'Failed eval cases:',
31
+ failed.length === 0 ? 'None.' : JSON.stringify(failed, null, 2),
32
+ '',
33
+ 'Previous attempts:',
34
+ historyText,
35
+ ].join('\n');
36
+ }
37
+
38
+ export function extractDescriptionCandidate(text) {
39
+ const tagged = extractTaggedText(text, 'new_description');
40
+ return (tagged ?? text).trim().replace(/^"|"$/g, '');
41
+ }
42
+
43
+ async function defaultGenerate(prompt, options) {
44
+ const { runKodaX } = await loadKodaXSDK();
45
+ const result = await runKodaX(
46
+ {
47
+ provider: options.provider,
48
+ model: options.model,
49
+ maxIter: options.maxIter ?? 18,
50
+ reasoningMode: options.reasoningMode ?? 'off',
51
+ thinking: options.reasoningMode ? options.reasoningMode !== 'off' : false,
52
+ },
53
+ prompt
54
+ );
55
+
56
+ return result.lastText;
57
+ }
58
+
59
+ export async function improveDescription(options, generator = defaultGenerate) {
60
+ const skill = await loadSkill(options.skillPath);
61
+ const evalResults = JSON.parse(await readFile(options.evalResultsPath, 'utf8'));
62
+ const history = options.historyPath && await pathExists(options.historyPath)
63
+ ? JSON.parse(await readFile(options.historyPath, 'utf8')).history ?? []
64
+ : [];
65
+
66
+ const prompt = buildImprovePrompt({
67
+ skillName: skill.frontmatter.name,
68
+ body: skill.body,
69
+ currentDescription: String(evalResults.description ?? skill.frontmatter.description ?? ''),
70
+ evalResults,
71
+ history,
72
+ });
73
+
74
+ const rawResponse = await generator(prompt, options);
75
+ const description = extractDescriptionCandidate(rawResponse);
76
+ if (!description) {
77
+ throw new Error('No description candidate was produced.');
78
+ }
79
+ if (description.length > 1024) {
80
+ throw new Error(`Generated description is too long (${description.length}/1024).`);
81
+ }
82
+
83
+ return {
84
+ description,
85
+ rawResponse,
86
+ prompt,
87
+ };
88
+ }
89
+
90
+ function parseArgs(argv) {
91
+ const args = {
92
+ skillPath: '',
93
+ evalResultsPath: '',
94
+ provider: 'anthropic',
95
+ model: undefined,
96
+ output: undefined,
97
+ write: false,
98
+ historyPath: undefined,
99
+ maxIter: 18,
100
+ reasoningMode: 'off',
101
+ };
102
+
103
+ for (let index = 2; index < argv.length; index += 1) {
104
+ const token = argv[index];
105
+ if (token === '--skill-path' && argv[index + 1]) {
106
+ args.skillPath = argv[++index];
107
+ } else if (token === '--eval-results' && argv[index + 1]) {
108
+ args.evalResultsPath = argv[++index];
109
+ } else if (token === '--provider' && argv[index + 1]) {
110
+ args.provider = argv[++index];
111
+ } else if (token === '--model' && argv[index + 1]) {
112
+ args.model = argv[++index];
113
+ } else if (token === '--output' && argv[index + 1]) {
114
+ args.output = argv[++index];
115
+ } else if (token === '--write') {
116
+ args.write = true;
117
+ } else if (token === '--history' && argv[index + 1]) {
118
+ args.historyPath = argv[++index];
119
+ } else if (token === '--max-iter' && argv[index + 1]) {
120
+ args.maxIter = Number(argv[++index]);
121
+ } else if (token === '--reasoning' && argv[index + 1]) {
122
+ args.reasoningMode = argv[++index];
123
+ }
124
+ }
125
+
126
+ return args;
127
+ }
128
+
129
+ async function main() {
130
+ const args = parseArgs(process.argv);
131
+ if (!args.skillPath || !args.evalResultsPath) {
132
+ console.error('Usage: node scripts/improve-description.js --skill-path <dir> --eval-results <results.json> [--write] [--output result.json]');
133
+ process.exit(1);
134
+ }
135
+
136
+ const result = await improveDescription(args);
137
+ const output = `${JSON.stringify(result, null, 2)}\n`;
138
+
139
+ if (args.write) {
140
+ const skill = await loadSkill(args.skillPath);
141
+ skill.frontmatter.description = result.description;
142
+ await writeSkill(args.skillPath, skill.frontmatter, skill.body);
143
+ }
144
+
145
+ if (args.output) {
146
+ await writeFile(args.output, output, 'utf8');
147
+ console.log(`Wrote ${path.resolve(args.output)}`);
148
+ } else {
149
+ process.stdout.write(output);
150
+ }
151
+ }
152
+
153
+ const isDirectRun = process.argv[1]
154
+ && fileURLToPath(import.meta.url) === path.resolve(process.argv[1]);
155
+
156
+ if (isDirectRun) {
157
+ main().catch((error) => {
158
+ console.error(error instanceof Error ? error.message : String(error));
159
+ process.exit(1);
160
+ });
161
+ }
@@ -0,0 +1,14 @@
1
+ export interface InitSkillOptions {
2
+ name: string;
3
+ baseDir?: string;
4
+ description?: string;
5
+ force?: boolean;
6
+ includeEvals?: boolean;
7
+ }
8
+
9
+ export function renderSkillTemplate(name: string, description: string): string;
10
+ export function renderEvalTemplate(name: string): string;
11
+ export function initSkill(options: InitSkillOptions): Promise<{
12
+ skillDir: string;
13
+ created: string[];
14
+ }>;