@skillrecordings/cli 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. package/.env.encrypted +0 -0
  2. package/CHANGELOG.md +35 -0
  3. package/README.md +214 -0
  4. package/bin/skill.ts +3 -0
  5. package/data/tt-archive-dataset.json +1 -0
  6. package/data/validate-test-dataset.json +97 -0
  7. package/docs/CLI-AUTH.md +504 -0
  8. package/package.json +38 -0
  9. package/preload.ts +18 -0
  10. package/src/__tests__/init.test.ts +74 -0
  11. package/src/alignment-test.ts +64 -0
  12. package/src/check-apps.ts +16 -0
  13. package/src/commands/auth/decrypt.ts +123 -0
  14. package/src/commands/auth/encrypt.ts +81 -0
  15. package/src/commands/auth/index.ts +50 -0
  16. package/src/commands/auth/keygen.ts +41 -0
  17. package/src/commands/auth/status.ts +164 -0
  18. package/src/commands/axiom/forensic.ts +868 -0
  19. package/src/commands/axiom/index.ts +697 -0
  20. package/src/commands/build-dataset.ts +311 -0
  21. package/src/commands/db-status.ts +47 -0
  22. package/src/commands/deploys.ts +219 -0
  23. package/src/commands/eval-local/compare.ts +171 -0
  24. package/src/commands/eval-local/health.ts +212 -0
  25. package/src/commands/eval-local/index.ts +76 -0
  26. package/src/commands/eval-local/real-tools.ts +416 -0
  27. package/src/commands/eval-local/run.ts +1168 -0
  28. package/src/commands/eval-local/score-production.ts +256 -0
  29. package/src/commands/eval-local/seed.ts +276 -0
  30. package/src/commands/eval-pipeline/index.ts +53 -0
  31. package/src/commands/eval-pipeline/real-tools.ts +492 -0
  32. package/src/commands/eval-pipeline/run.ts +1316 -0
  33. package/src/commands/eval-pipeline/seed.ts +395 -0
  34. package/src/commands/eval-prompt.ts +496 -0
  35. package/src/commands/eval.test.ts +253 -0
  36. package/src/commands/eval.ts +108 -0
  37. package/src/commands/faq-classify.ts +460 -0
  38. package/src/commands/faq-cluster.ts +135 -0
  39. package/src/commands/faq-extract.ts +249 -0
  40. package/src/commands/faq-mine.ts +432 -0
  41. package/src/commands/faq-review.ts +426 -0
  42. package/src/commands/front/index.ts +351 -0
  43. package/src/commands/front/pull-conversations.ts +275 -0
  44. package/src/commands/front/tags.ts +825 -0
  45. package/src/commands/front-cache.ts +1277 -0
  46. package/src/commands/front-stats.ts +75 -0
  47. package/src/commands/health.test.ts +82 -0
  48. package/src/commands/health.ts +362 -0
  49. package/src/commands/init.test.ts +89 -0
  50. package/src/commands/init.ts +106 -0
  51. package/src/commands/inngest/client.ts +294 -0
  52. package/src/commands/inngest/events.ts +296 -0
  53. package/src/commands/inngest/investigate.ts +382 -0
  54. package/src/commands/inngest/runs.ts +149 -0
  55. package/src/commands/inngest/signal.ts +143 -0
  56. package/src/commands/kb-sync.ts +498 -0
  57. package/src/commands/memory/find.ts +135 -0
  58. package/src/commands/memory/get.ts +87 -0
  59. package/src/commands/memory/index.ts +97 -0
  60. package/src/commands/memory/stats.ts +163 -0
  61. package/src/commands/memory/store.ts +49 -0
  62. package/src/commands/memory/vote.ts +159 -0
  63. package/src/commands/pipeline.ts +127 -0
  64. package/src/commands/responses.ts +856 -0
  65. package/src/commands/tools.ts +293 -0
  66. package/src/commands/wizard.ts +319 -0
  67. package/src/index.ts +172 -0
  68. package/src/lib/crypto.ts +56 -0
  69. package/src/lib/env-loader.ts +206 -0
  70. package/src/lib/onepassword.ts +137 -0
  71. package/src/test-agent-local.ts +115 -0
  72. package/tsconfig.json +11 -0
  73. package/vitest.config.ts +10 -0
@@ -0,0 +1,253 @@
1
+ import * as fs from 'node:fs/promises'
2
+ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
3
+ import { runEval } from './eval'
4
+
5
+ // Mock process.exit to prevent test termination
6
+ const mockExit = vi.spyOn(process, 'exit').mockImplementation((code) => {
7
+ throw new Error(`process.exit(${code})`)
8
+ })
9
+
10
+ // Mock fs module
11
+ vi.mock('node:fs/promises')
12
+
13
+ // Mock core evals module
14
+ vi.mock('@skillrecordings/core/evals/routing', () => ({
15
+ evalRouting: vi.fn(),
16
+ }))
17
+
18
+ describe('eval command', () => {
19
+ beforeEach(() => {
20
+ vi.clearAllMocks()
21
+ mockExit.mockClear()
22
+ })
23
+
24
+ afterEach(() => {
25
+ mockExit.mockClear()
26
+ })
27
+
28
+ it('should require dataset path', async () => {
29
+ const consoleErrorSpy = vi.spyOn(console, 'error')
30
+
31
+ await expect(runEval('routing', undefined)).rejects.toThrow(
32
+ 'process.exit(1)'
33
+ )
34
+
35
+ expect(consoleErrorSpy).toHaveBeenCalledWith(
36
+ expect.stringContaining('Dataset path is required')
37
+ )
38
+ })
39
+
40
+ it('should fail if dataset file does not exist', async () => {
41
+ const consoleErrorSpy = vi.spyOn(console, 'error')
42
+ vi.mocked(fs.access).mockRejectedValue(new Error('File not found'))
43
+
44
+ await expect(
45
+ runEval('routing', '/path/to/nonexistent.json')
46
+ ).rejects.toThrow('process.exit(1)')
47
+
48
+ expect(consoleErrorSpy).toHaveBeenCalledWith(
49
+ expect.stringContaining('Dataset file not found')
50
+ )
51
+ })
52
+
53
+ it('should fail if dataset is invalid JSON', async () => {
54
+ const consoleErrorSpy = vi.spyOn(console, 'error')
55
+ vi.mocked(fs.access).mockResolvedValue(undefined)
56
+ vi.mocked(fs.readFile).mockResolvedValue('invalid json')
57
+
58
+ await expect(runEval('routing', '/path/to/invalid.json')).rejects.toThrow(
59
+ 'process.exit(1)'
60
+ )
61
+
62
+ expect(consoleErrorSpy).toHaveBeenCalledWith(
63
+ expect.stringContaining('Invalid JSON')
64
+ )
65
+ })
66
+
67
+ it('should print pretty results table by default', async () => {
68
+ const consoleSpy = vi.spyOn(console, 'log')
69
+ const mockDataset = [
70
+ {
71
+ message: 'Test message',
72
+ expectedCategory: 'needs_response',
73
+ expectedRoute: 'classifier' as const,
74
+ },
75
+ ]
76
+ const mockReport = {
77
+ precision: 0.92,
78
+ recall: 0.95,
79
+ fpRate: 0.03,
80
+ fnRate: 0.02,
81
+ byCategory: {
82
+ needs_response: {
83
+ tp: 10,
84
+ fp: 1,
85
+ fn: 1,
86
+ tn: 5,
87
+ precision: 0.95,
88
+ recall: 0.93,
89
+ f1: 0.94,
90
+ count: 17,
91
+ },
92
+ },
93
+ cost: {
94
+ tokens: 5000,
95
+ estimatedUsd: 0.00125,
96
+ },
97
+ latency: {
98
+ p50: 120,
99
+ p95: 250,
100
+ p99: 350,
101
+ },
102
+ passed: true,
103
+ }
104
+
105
+ vi.mocked(fs.access).mockResolvedValue(undefined)
106
+ vi.mocked(fs.readFile).mockResolvedValue(JSON.stringify(mockDataset))
107
+
108
+ const { evalRouting } = await import('@skillrecordings/core/evals/routing')
109
+ vi.mocked(evalRouting).mockResolvedValue(mockReport)
110
+
111
+ await expect(runEval('routing', '/path/to/dataset.json')).rejects.toThrow(
112
+ 'process.exit(0)'
113
+ )
114
+
115
+ const output = consoleSpy.mock.calls.flat().join('\n')
116
+ expect(output).toContain('Precision')
117
+ expect(output).toContain('92.0%')
118
+ expect(output).toContain('Recall')
119
+ expect(output).toContain('95.0%')
120
+ expect(output).toContain('Latency')
121
+ expect(output).toContain('120ms')
122
+ })
123
+
124
+ it('should output JSON when --json flag is used', async () => {
125
+ const consoleSpy = vi.spyOn(console, 'log')
126
+ const mockDataset = [
127
+ {
128
+ message: 'Test message',
129
+ expectedCategory: 'needs_response',
130
+ expectedRoute: 'classifier' as const,
131
+ },
132
+ ]
133
+ const mockReport = {
134
+ precision: 0.92,
135
+ recall: 0.95,
136
+ fpRate: 0.03,
137
+ fnRate: 0.02,
138
+ byCategory: {},
139
+ cost: {
140
+ tokens: 5000,
141
+ estimatedUsd: 0.00125,
142
+ },
143
+ latency: {
144
+ p50: 120,
145
+ p95: 250,
146
+ p99: 350,
147
+ },
148
+ passed: true,
149
+ }
150
+
151
+ vi.mocked(fs.access).mockResolvedValue(undefined)
152
+ vi.mocked(fs.readFile).mockResolvedValue(JSON.stringify(mockDataset))
153
+
154
+ const { evalRouting } = await import('@skillrecordings/core/evals/routing')
155
+ vi.mocked(evalRouting).mockResolvedValue(mockReport)
156
+
157
+ await expect(
158
+ runEval('routing', '/path/to/dataset.json', { json: true })
159
+ ).rejects.toThrow('process.exit(0)')
160
+
161
+ const output = consoleSpy.mock.calls.flat().join('\n')
162
+ const parsed = JSON.parse(output)
163
+
164
+ expect(parsed.precision).toBe(0.92)
165
+ expect(parsed.recall).toBe(0.95)
166
+ expect(parsed.passed).toBe(true)
167
+ })
168
+
169
+ it('should exit with code 1 when gates fail', async () => {
170
+ const mockDataset = [
171
+ {
172
+ message: 'Test message',
173
+ expectedCategory: 'needs_response',
174
+ expectedRoute: 'classifier' as const,
175
+ },
176
+ ]
177
+ const mockReport = {
178
+ precision: 0.85, // Below threshold
179
+ recall: 0.88, // Below threshold
180
+ fpRate: 0.05,
181
+ fnRate: 0.04,
182
+ byCategory: {},
183
+ cost: {
184
+ tokens: 5000,
185
+ estimatedUsd: 0.00125,
186
+ },
187
+ latency: {
188
+ p50: 120,
189
+ p95: 250,
190
+ p99: 350,
191
+ },
192
+ passed: false,
193
+ }
194
+
195
+ vi.mocked(fs.access).mockResolvedValue(undefined)
196
+ vi.mocked(fs.readFile).mockResolvedValue(JSON.stringify(mockDataset))
197
+
198
+ const { evalRouting } = await import('@skillrecordings/core/evals/routing')
199
+ vi.mocked(evalRouting).mockResolvedValue(mockReport)
200
+
201
+ await expect(
202
+ runEval('routing', '/path/to/dataset.json', {
203
+ gates: { minPrecision: 0.92, minRecall: 0.95 },
204
+ })
205
+ ).rejects.toThrow('process.exit(1)')
206
+ })
207
+
208
+ it('should accept custom gates', async () => {
209
+ const mockDataset = [
210
+ {
211
+ message: 'Test message',
212
+ expectedCategory: 'needs_response',
213
+ expectedRoute: 'classifier' as const,
214
+ },
215
+ ]
216
+ const mockReport = {
217
+ precision: 0.92,
218
+ recall: 0.95,
219
+ fpRate: 0.03,
220
+ fnRate: 0.02,
221
+ byCategory: {},
222
+ cost: {
223
+ tokens: 5000,
224
+ estimatedUsd: 0.00125,
225
+ },
226
+ latency: {
227
+ p50: 120,
228
+ p95: 250,
229
+ p99: 350,
230
+ },
231
+ passed: true,
232
+ }
233
+
234
+ vi.mocked(fs.access).mockResolvedValue(undefined)
235
+ vi.mocked(fs.readFile).mockResolvedValue(JSON.stringify(mockDataset))
236
+
237
+ const { evalRouting } = await import('@skillrecordings/core/evals/routing')
238
+ vi.mocked(evalRouting).mockResolvedValue(mockReport)
239
+
240
+ // Test with custom gates
241
+ await expect(
242
+ runEval('routing', '/path/to/dataset.json', {
243
+ gates: { minPrecision: 0.9, minRecall: 0.93, maxFpRate: 0.05 },
244
+ })
245
+ ).rejects.toThrow('process.exit(0)')
246
+
247
+ expect(evalRouting).toHaveBeenCalledWith(mockDataset, {
248
+ minPrecision: 0.9,
249
+ minRecall: 0.93,
250
+ maxFpRate: 0.05,
251
+ })
252
+ })
253
+ })
@@ -0,0 +1,108 @@
1
+ import { access, readFile } from 'node:fs/promises'
2
+ import type {
3
+ EvalDatapoint,
4
+ EvalGates,
5
+ EvalReport,
6
+ } from '@skillrecordings/core/evals/routing'
7
+
8
+ export interface EvalOptions {
9
+ json?: boolean
10
+ gates?: EvalGates
11
+ }
12
+
13
+ /**
14
+ * Run evals against a dataset
15
+ *
16
+ * Usage: skill eval routing --dataset path/to/dataset.json [--gates strict|relaxed] [--json]
17
+ *
18
+ * @param evalType - Type of eval to run (currently only 'routing' supported)
19
+ * @param datasetPath - Path to JSON dataset file
20
+ * @param options - Command options
21
+ */
22
+ export async function runEval(
23
+ evalType: string,
24
+ datasetPath: string | undefined,
25
+ options: EvalOptions = {}
26
+ ): Promise<void> {
27
+ const { json = false, gates } = options
28
+
29
+ // Validate inputs
30
+ if (!datasetPath) {
31
+ console.error(
32
+ 'Error: Dataset path is required. Usage: skill eval routing --dataset <path>'
33
+ )
34
+ process.exit(1)
35
+ }
36
+
37
+ // Check if file exists
38
+ try {
39
+ await access(datasetPath)
40
+ } catch {
41
+ console.error(`Error: Dataset file not found: ${datasetPath}`)
42
+ process.exit(1)
43
+ }
44
+
45
+ // Read and parse dataset
46
+ let dataset: EvalDatapoint[]
47
+ try {
48
+ const content = await readFile(datasetPath, 'utf-8')
49
+ dataset = JSON.parse(content)
50
+ } catch (error) {
51
+ console.error(
52
+ `Error: Invalid JSON in dataset file: ${error instanceof Error ? error.message : 'Unknown error'}`
53
+ )
54
+ process.exit(1)
55
+ }
56
+
57
+ // Import evalRouting (only when needed to avoid circular deps)
58
+ const { evalRouting } = await import('@skillrecordings/core/evals/routing')
59
+
60
+ // Run eval with optional gates
61
+ const report: EvalReport = await evalRouting(dataset, gates)
62
+
63
+ // Output results
64
+ if (json) {
65
+ console.log(JSON.stringify(report, null, 2))
66
+ } else {
67
+ printPrettyResults(report)
68
+ }
69
+
70
+ // Exit with appropriate code
71
+ process.exit(report.passed ? 0 : 1)
72
+ }
73
+
74
+ /**
75
+ * Print pretty-formatted results table
76
+ */
77
+ function printPrettyResults(report: EvalReport): void {
78
+ console.log('\nšŸ“Š Evaluation Results\n')
79
+ console.log('Overall Metrics:')
80
+ console.log(` Precision: ${(report.precision * 100).toFixed(1)}%`)
81
+ console.log(` Recall: ${(report.recall * 100).toFixed(1)}%`)
82
+ console.log(` False Positive Rate: ${(report.fpRate * 100).toFixed(1)}%`)
83
+ console.log(` False Negative Rate: ${(report.fnRate * 100).toFixed(1)}%`)
84
+
85
+ console.log('\nPerformance:')
86
+ console.log(` Latency (p50): ${report.latency.p50.toFixed(0)}ms`)
87
+ console.log(` Latency (p95): ${report.latency.p95.toFixed(0)}ms`)
88
+ console.log(` Latency (p99): ${report.latency.p99.toFixed(0)}ms`)
89
+ console.log(` Total Tokens: ${report.cost.tokens.toLocaleString()}`)
90
+ console.log(` Estimated Cost: $${report.cost.estimatedUsd.toFixed(4)}`)
91
+
92
+ if (Object.keys(report.byCategory).length > 0) {
93
+ console.log('\nCategory Breakdown:')
94
+ for (const [category, metrics] of Object.entries(report.byCategory)) {
95
+ console.log(`\n ${category}:`)
96
+ console.log(` Precision: ${(metrics.precision * 100).toFixed(1)}%`)
97
+ console.log(` Recall: ${(metrics.recall * 100).toFixed(1)}%`)
98
+ console.log(` F1: ${(metrics.f1 * 100).toFixed(1)}%`)
99
+ console.log(` Count: ${metrics.count}`)
100
+ }
101
+ }
102
+
103
+ console.log(`\n${report.passed ? 'āœ… PASSED' : 'āŒ FAILED'}`)
104
+
105
+ if (!report.passed) {
106
+ console.log('\nOne or more metrics fell below threshold gates.')
107
+ }
108
+ }