xtrm-tools 2.1.5 → 2.1.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +9 -3
- package/cli/dist/index.cjs +991 -908
- package/cli/dist/index.cjs.map +1 -1
- package/cli/package.json +4 -1
- package/config/hooks.json +5 -0
- package/hooks/README.md +19 -0
- package/hooks/beads-stop-gate.mjs +1 -0
- package/hooks/main-guard-post-push.mjs +71 -0
- package/hooks/main-guard.mjs +10 -1
- package/package.json +17 -5
- package/project-skills/{ts-quality-gate → quality-gates}/.claude/hooks/quality-check.cjs +36 -1
- package/project-skills/{py-quality-gate → quality-gates}/.claude/hooks/quality-check.py +15 -2
- package/project-skills/{py-quality-gate → quality-gates}/.claude/settings.json +10 -0
- package/project-skills/quality-gates/.claude/skills/using-quality-gates/SKILL.md +254 -0
- package/project-skills/quality-gates/README.md +109 -0
- package/project-skills/quality-gates/evals/evals.json +181 -0
- package/project-skills/quality-gates/workspace/iteration-1/FINAL-EVAL-SUMMARY.md +75 -0
- package/project-skills/quality-gates/workspace/iteration-1/edge-case-auto-fix-verification/with_skill/outputs/response.md +59 -0
- package/project-skills/quality-gates/workspace/iteration-1/edge-case-mixed-language-project/with_skill/outputs/response.md +60 -0
- package/project-skills/quality-gates/workspace/iteration-1/eval-summary.md +105 -0
- package/project-skills/quality-gates/workspace/iteration-1/partial-install-python-only/with_skill/outputs/response.md +93 -0
- package/project-skills/quality-gates/workspace/iteration-1/python-refactor-request/with_skill/outputs/response.md +104 -0
- package/project-skills/quality-gates/workspace/iteration-1/quality-gate-error-fix/with_skill/outputs/response.md +74 -0
- package/project-skills/quality-gates/workspace/iteration-1/should-not-trigger-general-chat/with_skill/outputs/response.md +18 -0
- package/project-skills/quality-gates/workspace/iteration-1/should-not-trigger-math-question/with_skill/outputs/response.md +18 -0
- package/project-skills/quality-gates/workspace/iteration-1/should-not-trigger-unrelated-coding/with_skill/outputs/response.md +56 -0
- package/project-skills/quality-gates/workspace/iteration-1/tdd-guard-blocking-confusion/with_skill/outputs/response.md +67 -0
- package/project-skills/quality-gates/workspace/iteration-1/typescript-feature-with-tests/with_skill/outputs/response.md +97 -0
- package/project-skills/service-skills-set/install-service-skills.py +41 -11
- package/project-skills/tdd-guard/.claude/hooks/tdd-guard-pretool-bridge.cjs +0 -1
- package/project-skills/tdd-guard/reporters/jest/src/JestReporter.test-data.ts +199 -0
- package/project-skills/tdd-guard/reporters/jest/src/JestReporter.test.ts +302 -0
- package/project-skills/tdd-guard/reporters/jest/src/JestReporter.ts +201 -0
- package/project-skills/tdd-guard/reporters/jest/src/index.ts +4 -0
- package/project-skills/tdd-guard/reporters/jest/src/types.ts +42 -0
- package/project-skills/tdd-guard/reporters/jest/tsconfig.json +11 -0
- package/project-skills/tdd-guard/reporters/vitest/src/VitestReporter.test-data.ts +85 -0
- package/project-skills/tdd-guard/reporters/vitest/src/VitestReporter.test.ts +446 -0
- package/project-skills/tdd-guard/reporters/vitest/src/VitestReporter.ts +110 -0
- package/project-skills/tdd-guard/reporters/vitest/src/index.ts +4 -0
- package/project-skills/tdd-guard/reporters/vitest/src/types.ts +39 -0
- package/project-skills/tdd-guard/reporters/vitest/tsconfig.json +11 -0
- package/hooks/__pycache__/agent_context.cpython-314.pyc +0 -0
- package/project-skills/py-quality-gate/.claude/skills/using-py-quality-gate/SKILL.md +0 -112
- package/project-skills/py-quality-gate/README.md +0 -147
- package/project-skills/service-skills-set/.claude/git-hooks/__pycache__/doc_reminder.cpython-314.pyc +0 -0
- package/project-skills/service-skills-set/.claude/git-hooks/__pycache__/skill_staleness.cpython-314.pyc +0 -0
- package/project-skills/service-skills-set/.claude/skills/creating-service-skills/scripts/__pycache__/bootstrap.cpython-314.pyc +0 -0
- package/project-skills/service-skills-set/.claude/skills/updating-service-skills/scripts/__pycache__/drift_detector.cpython-314.pyc +0 -0
- package/project-skills/service-skills-set/.claude/skills/using-service-skills/scripts/__pycache__/cataloger.cpython-314.pyc +0 -0
- package/project-skills/service-skills-set/.claude/skills/using-service-skills/scripts/__pycache__/skill_activator.cpython-314.pyc +0 -0
- package/project-skills/service-skills-set/.claude/skills/using-service-skills/scripts/__pycache__/test_skill_activator.cpython-314-pytest-9.0.2.pyc +0 -0
- package/project-skills/service-skills-set/.claude/skills/using-service-skills/scripts/test_skill_activator.py +0 -58
- package/project-skills/service-skills-set/__pycache__/install-service-skills.cpython-314.pyc +0 -0
- package/project-skills/ts-quality-gate/.claude/settings.json +0 -16
- package/project-skills/ts-quality-gate/.claude/skills/using-ts-quality-gate/SKILL.md +0 -81
- package/project-skills/ts-quality-gate/README.md +0 -115
- package/skills/documenting/scripts/__pycache__/drift_detector.cpython-314.pyc +0 -0
- package/skills/documenting/scripts/__pycache__/orchestrator.cpython-314.pyc +0 -0
- package/skills/documenting/scripts/__pycache__/validate_metadata.cpython-314.pyc +0 -0
- package/skills/documenting/scripts/changelog/__pycache__/__init__.cpython-314.pyc +0 -0
- package/skills/documenting/scripts/changelog/__pycache__/add_entry.cpython-314.pyc +0 -0
- package/skills/documenting/scripts/changelog/__pycache__/bump_release.cpython-314.pyc +0 -0
- package/skills/documenting/scripts/changelog/__pycache__/validate_changelog.cpython-314.pyc +0 -0
- package/skills/documenting/tests/__pycache__/test_changelog.cpython-314-pytest-9.0.2.pyc +0 -0
- package/skills/documenting/tests/__pycache__/test_drift_detector.cpython-314-pytest-9.0.2.pyc +0 -0
- package/skills/documenting/tests/__pycache__/test_orchestrator.cpython-314-pytest-9.0.2.pyc +0 -0
- package/skills/documenting/tests/__pycache__/test_validate_metadata.cpython-314-pytest-9.0.2.pyc +0 -0
- package/skills/documenting/tests/integration_test.sh +0 -70
- package/skills/documenting/tests/test_changelog.py +0 -201
- package/skills/documenting/tests/test_drift_detector.py +0 -80
- package/skills/documenting/tests/test_orchestrator.py +0 -52
- package/skills/documenting/tests/test_validate_metadata.py +0 -64
- /package/project-skills/{ts-quality-gate → quality-gates}/.claude/hooks/hook-config.json +0 -0
|
@@ -0,0 +1,446 @@
|
|
|
1
|
+
import { describe, it, expect, beforeEach, afterEach } from 'vitest'
|
|
2
|
+
import type { TestModule, TestCase } from 'vitest/node'
|
|
3
|
+
import { VitestReporter } from './VitestReporter'
|
|
4
|
+
import {
|
|
5
|
+
MemoryStorage,
|
|
6
|
+
FileStorage,
|
|
7
|
+
Storage,
|
|
8
|
+
Config,
|
|
9
|
+
DEFAULT_DATA_DIR,
|
|
10
|
+
isFailingTest,
|
|
11
|
+
isPassingTest,
|
|
12
|
+
TestResult,
|
|
13
|
+
Test,
|
|
14
|
+
} from 'tdd-guard'
|
|
15
|
+
import {
|
|
16
|
+
testModule,
|
|
17
|
+
failedTestCase,
|
|
18
|
+
createTestCase,
|
|
19
|
+
createUnhandledError,
|
|
20
|
+
createTestResult,
|
|
21
|
+
} from './VitestReporter.test-data'
|
|
22
|
+
import type { FormattedError } from './types'
|
|
23
|
+
import { rmSync, mkdtempSync } from 'node:fs'
|
|
24
|
+
import { tmpdir } from 'node:os'
|
|
25
|
+
import { join } from 'node:path'
|
|
26
|
+
|
|
27
|
+
describe('VitestReporter', () => {
|
|
28
|
+
let sut: Awaited<ReturnType<typeof setupVitestReporter>>
|
|
29
|
+
const module = testModule()
|
|
30
|
+
const passedTest = createTestCase()
|
|
31
|
+
const failedTest = failedTestCase()
|
|
32
|
+
|
|
33
|
+
beforeEach(() => {
|
|
34
|
+
sut = setupVitestReporter()
|
|
35
|
+
})
|
|
36
|
+
|
|
37
|
+
afterEach(() => {
|
|
38
|
+
sut.cleanup()
|
|
39
|
+
})
|
|
40
|
+
|
|
41
|
+
it('uses FileStorage by default', () => {
|
|
42
|
+
const reporter = new VitestReporter()
|
|
43
|
+
expect(reporter['storage']).toBeInstanceOf(FileStorage)
|
|
44
|
+
})
|
|
45
|
+
|
|
46
|
+
it('uses FileStorage when no storage provided', async () => {
|
|
47
|
+
const localSut = setupVitestReporter({ type: 'file' })
|
|
48
|
+
|
|
49
|
+
expect(localSut.reporter['storage']).toBeInstanceOf(FileStorage)
|
|
50
|
+
|
|
51
|
+
const result = await localSut.collectAndGetSaved([
|
|
52
|
+
testModule(),
|
|
53
|
+
createTestCase(),
|
|
54
|
+
])
|
|
55
|
+
|
|
56
|
+
expect(result).toBeTruthy()
|
|
57
|
+
expect(result).toContain('testModules')
|
|
58
|
+
|
|
59
|
+
localSut.cleanup()
|
|
60
|
+
})
|
|
61
|
+
|
|
62
|
+
it('accepts Storage instance in constructor', () => {
|
|
63
|
+
const storage = new MemoryStorage()
|
|
64
|
+
const reporter = new VitestReporter(storage)
|
|
65
|
+
expect(reporter['storage']).toBe(storage)
|
|
66
|
+
})
|
|
67
|
+
|
|
68
|
+
it('accepts root path string in constructor', () => {
|
|
69
|
+
const rootPath = '/some/project/root'
|
|
70
|
+
const reporter = new VitestReporter(rootPath)
|
|
71
|
+
expect(reporter['storage']).toBeInstanceOf(FileStorage)
|
|
72
|
+
// Verify the storage is configured with the correct path
|
|
73
|
+
const fileStorage = reporter['storage'] as FileStorage
|
|
74
|
+
const config = fileStorage['config'] as Config
|
|
75
|
+
const expectedDataDir = join(rootPath, ...DEFAULT_DATA_DIR.split('/'))
|
|
76
|
+
expect(config.dataDir).toBe(expectedDataDir)
|
|
77
|
+
})
|
|
78
|
+
|
|
79
|
+
describe('when collecting test data', () => {
|
|
80
|
+
beforeEach(async () => {
|
|
81
|
+
sut.reporter.onTestModuleCollected(module)
|
|
82
|
+
sut.reporter.onTestCaseResult(passedTest)
|
|
83
|
+
sut.reporter.onTestCaseResult(failedTest)
|
|
84
|
+
await sut.reporter.onTestRunEnd()
|
|
85
|
+
})
|
|
86
|
+
|
|
87
|
+
it('saves output as valid JSON', async () => {
|
|
88
|
+
const parsed = await sut.getParsedData()
|
|
89
|
+
expect(parsed).toBeDefined()
|
|
90
|
+
})
|
|
91
|
+
|
|
92
|
+
it('includes test modules', async () => {
|
|
93
|
+
const parsed = await sut.getParsedData()
|
|
94
|
+
|
|
95
|
+
expect(parsed).not.toBeNull()
|
|
96
|
+
expect(parsed?.testModules).toHaveLength(1)
|
|
97
|
+
expect(parsed?.testModules[0].moduleId).toBe(module.moduleId)
|
|
98
|
+
})
|
|
99
|
+
|
|
100
|
+
it('includes test cases', async () => {
|
|
101
|
+
const tests = await sut.getTests()
|
|
102
|
+
expect(tests).toHaveLength(2)
|
|
103
|
+
})
|
|
104
|
+
|
|
105
|
+
it('captures test states', async () => {
|
|
106
|
+
const passedTests = await sut.getPassedTests()
|
|
107
|
+
const failedTests = await sut.getFailedTests()
|
|
108
|
+
|
|
109
|
+
expect(passedTests).toHaveLength(1)
|
|
110
|
+
expect(failedTests).toHaveLength(1)
|
|
111
|
+
})
|
|
112
|
+
|
|
113
|
+
it('includes error information for failed tests', async () => {
|
|
114
|
+
const failedTests = await sut.getFailedTests()
|
|
115
|
+
const failedTestData = failedTests[0]
|
|
116
|
+
|
|
117
|
+
expect(failedTestData).toBeDefined()
|
|
118
|
+
expect(failedTestData.state).toBe('failed')
|
|
119
|
+
expect(failedTestData.errors).toBeDefined()
|
|
120
|
+
expect(failedTestData.errors?.length).toBeGreaterThan(0)
|
|
121
|
+
})
|
|
122
|
+
})
|
|
123
|
+
|
|
124
|
+
describe('test state mapping', () => {
|
|
125
|
+
it.each([
|
|
126
|
+
['passed', 'passed'],
|
|
127
|
+
['failed', 'failed'],
|
|
128
|
+
['skipped', 'skipped'],
|
|
129
|
+
['pending', 'skipped'], // pending gets mapped to skipped
|
|
130
|
+
] as const)('maps %s to %s', async (vitestState, expected) => {
|
|
131
|
+
// Given a test with the specified state
|
|
132
|
+
const testCase = createTestCase({
|
|
133
|
+
result: () => createTestResult(vitestState),
|
|
134
|
+
})
|
|
135
|
+
|
|
136
|
+
// When we process the test
|
|
137
|
+
sut.reporter.onTestModuleCollected(module)
|
|
138
|
+
sut.reporter.onTestCaseResult(testCase)
|
|
139
|
+
await sut.reporter.onTestRunEnd()
|
|
140
|
+
|
|
141
|
+
// Then it should be mapped correctly
|
|
142
|
+
const tests = await sut.getTests()
|
|
143
|
+
expect(tests[0]?.state).toBe(expected)
|
|
144
|
+
})
|
|
145
|
+
})
|
|
146
|
+
|
|
147
|
+
describe('error expected and actual values', () => {
|
|
148
|
+
let error: FormattedError | undefined
|
|
149
|
+
|
|
150
|
+
beforeEach(async () => {
|
|
151
|
+
// Given a test with an assertion error
|
|
152
|
+
sut.reporter.onTestModuleCollected(module)
|
|
153
|
+
sut.reporter.onTestCaseResult(failedTest)
|
|
154
|
+
await sut.reporter.onTestRunEnd()
|
|
155
|
+
|
|
156
|
+
// When we get the failed test errors
|
|
157
|
+
const failedTests = await sut.getFailedTests()
|
|
158
|
+
error = failedTests[0]?.errors?.[0]
|
|
159
|
+
})
|
|
160
|
+
|
|
161
|
+
it('includes expected value in error when available', () => {
|
|
162
|
+
expect(error).toHaveProperty('expected')
|
|
163
|
+
expect(error?.expected).toBe('3')
|
|
164
|
+
})
|
|
165
|
+
|
|
166
|
+
it('includes actual value in error when available', () => {
|
|
167
|
+
expect(error).toHaveProperty('actual')
|
|
168
|
+
expect(error?.actual).toBe('2')
|
|
169
|
+
})
|
|
170
|
+
})
|
|
171
|
+
|
|
172
|
+
it('handles empty test runs', async () => {
|
|
173
|
+
// When no tests are collected
|
|
174
|
+
await sut.reporter.onTestRunEnd()
|
|
175
|
+
|
|
176
|
+
// Then output should be valid JSON with empty modules
|
|
177
|
+
const parsed = await sut.getParsedData()
|
|
178
|
+
|
|
179
|
+
expect(parsed).not.toBeNull()
|
|
180
|
+
expect(parsed).toEqual({ testModules: [], unhandledErrors: [] })
|
|
181
|
+
})
|
|
182
|
+
|
|
183
|
+
describe('storage integration', () => {
|
|
184
|
+
it('saves test output to storage', async () => {
|
|
185
|
+
const result = await sut.collectAndGetSaved([
|
|
186
|
+
testModule(),
|
|
187
|
+
createTestCase(),
|
|
188
|
+
])
|
|
189
|
+
|
|
190
|
+
expect(result).toBeTruthy()
|
|
191
|
+
expect(result).toContain('testModules')
|
|
192
|
+
expect(result).toContain('passed')
|
|
193
|
+
})
|
|
194
|
+
|
|
195
|
+
it('accumulates multiple test results in storage', async () => {
|
|
196
|
+
const result = await sut.collectAndGetSaved([
|
|
197
|
+
module,
|
|
198
|
+
passedTest,
|
|
199
|
+
failedTest,
|
|
200
|
+
])
|
|
201
|
+
|
|
202
|
+
const parsed = JSON.parse(result!)
|
|
203
|
+
expect(parsed.testModules[0].tests).toHaveLength(2)
|
|
204
|
+
})
|
|
205
|
+
})
|
|
206
|
+
|
|
207
|
+
describe('stores import errors as unhandled errors', () => {
|
|
208
|
+
let parsed: TestResult | null
|
|
209
|
+
|
|
210
|
+
beforeEach(async () => {
|
|
211
|
+
// Given a module that was collected but has no tests due to import error
|
|
212
|
+
const moduleWithImportError = testModule({
|
|
213
|
+
moduleId: '/src/example.test.ts',
|
|
214
|
+
errors: () => [createUnhandledError()],
|
|
215
|
+
})
|
|
216
|
+
|
|
217
|
+
// When the test run ends
|
|
218
|
+
sut.reporter.onTestModuleCollected(moduleWithImportError)
|
|
219
|
+
await sut.reporter.onTestRunEnd()
|
|
220
|
+
|
|
221
|
+
parsed = await sut.getParsedData()
|
|
222
|
+
})
|
|
223
|
+
|
|
224
|
+
it('includes the module in test modules', () => {
|
|
225
|
+
expect(parsed?.testModules).toHaveLength(1)
|
|
226
|
+
})
|
|
227
|
+
|
|
228
|
+
it('shows module with one synthetic failed test', () => {
|
|
229
|
+
expect(parsed?.testModules[0].tests).toHaveLength(1)
|
|
230
|
+
expect(parsed?.testModules[0].tests[0].state).toBe('failed')
|
|
231
|
+
})
|
|
232
|
+
|
|
233
|
+
it('uses module filename as test name', () => {
|
|
234
|
+
const syntheticTest = parsed?.testModules[0].tests[0]
|
|
235
|
+
expect(syntheticTest?.name).toBe('example.test.ts')
|
|
236
|
+
expect(syntheticTest?.fullName).toBe('/src/example.test.ts')
|
|
237
|
+
})
|
|
238
|
+
|
|
239
|
+
it('includes import error details in synthetic test', () => {
|
|
240
|
+
const syntheticTest = parsed?.testModules[0].tests[0]
|
|
241
|
+
expect(syntheticTest?.errors).toHaveLength(1)
|
|
242
|
+
expect(syntheticTest?.errors?.[0].message).toBe(
|
|
243
|
+
'Cannot find module "./helpers"'
|
|
244
|
+
)
|
|
245
|
+
})
|
|
246
|
+
|
|
247
|
+
it('includes empty unhandled errors', () => {
|
|
248
|
+
expect(parsed?.unhandledErrors).toHaveLength(0)
|
|
249
|
+
})
|
|
250
|
+
|
|
251
|
+
it('preserves error message in synthetic test', () => {
|
|
252
|
+
const error = parsed?.testModules[0].tests[0].errors?.[0]
|
|
253
|
+
expect(error?.message).toBe('Cannot find module "./helpers"')
|
|
254
|
+
})
|
|
255
|
+
|
|
256
|
+
it('preserves error stack trace in synthetic test', () => {
|
|
257
|
+
expect(parsed?.testModules[0].tests[0].errors?.[0].stack).toContain(
|
|
258
|
+
'imported from'
|
|
259
|
+
)
|
|
260
|
+
})
|
|
261
|
+
})
|
|
262
|
+
|
|
263
|
+
describe('handles module errors from testModule.errors()', () => {
|
|
264
|
+
it('creates synthetic test when module has errors', async () => {
|
|
265
|
+
// Given a module with its own errors (like import errors)
|
|
266
|
+
const moduleWithErrors = testModule({
|
|
267
|
+
moduleId: '/src/import-error.test.ts',
|
|
268
|
+
errors: () => [createUnhandledError()],
|
|
269
|
+
})
|
|
270
|
+
|
|
271
|
+
// When the test run ends
|
|
272
|
+
sut.reporter.onTestModuleCollected(moduleWithErrors)
|
|
273
|
+
await sut.reporter.onTestRunEnd([], [], 'failed')
|
|
274
|
+
|
|
275
|
+
// Then a synthetic failed test should be created
|
|
276
|
+
const parsed = await sut.getParsedData()
|
|
277
|
+
expect(parsed?.testModules[0].tests).toHaveLength(1)
|
|
278
|
+
expect(parsed?.testModules[0].tests[0].state).toBe('failed')
|
|
279
|
+
})
|
|
280
|
+
})
|
|
281
|
+
|
|
282
|
+
describe('handles testModules parameter', () => {
|
|
283
|
+
it('receives test modules in onTestRunEnd', async () => {
|
|
284
|
+
const module1 = testModule({ moduleId: '/test1.ts' })
|
|
285
|
+
const module2 = testModule({ moduleId: '/test2.ts' })
|
|
286
|
+
|
|
287
|
+
await sut.reporter.onTestRunEnd([module1, module2])
|
|
288
|
+
|
|
289
|
+
const parsed = await sut.getParsedData()
|
|
290
|
+
expect(parsed?.testModules).toEqual([]) // Empty because we didn't collect them
|
|
291
|
+
})
|
|
292
|
+
})
|
|
293
|
+
|
|
294
|
+
describe('handles unhandled errors', () => {
|
|
295
|
+
it('includes unhandled errors in output when provided', async () => {
|
|
296
|
+
const unhandledError = createUnhandledError({
|
|
297
|
+
message: 'Connection failed',
|
|
298
|
+
name: 'NetworkError',
|
|
299
|
+
})
|
|
300
|
+
|
|
301
|
+
await sut.reporter.onTestRunEnd([], [unhandledError])
|
|
302
|
+
|
|
303
|
+
const parsed = await sut.getParsedData()
|
|
304
|
+
expect(parsed?.unhandledErrors).toEqual([
|
|
305
|
+
expect.objectContaining({
|
|
306
|
+
message: 'Connection failed',
|
|
307
|
+
name: 'NetworkError',
|
|
308
|
+
}),
|
|
309
|
+
])
|
|
310
|
+
})
|
|
311
|
+
|
|
312
|
+
it('returns empty array when no errors provided', async () => {
|
|
313
|
+
await sut.reporter.onTestRunEnd()
|
|
314
|
+
|
|
315
|
+
const parsed = await sut.getParsedData()
|
|
316
|
+
expect(parsed?.unhandledErrors).toEqual([])
|
|
317
|
+
})
|
|
318
|
+
})
|
|
319
|
+
|
|
320
|
+
describe('when test run ends with reason', () => {
|
|
321
|
+
it('captures "failed" reason in output', async () => {
|
|
322
|
+
const moduleWithImportError = testModule({
|
|
323
|
+
moduleId: '/src/linters/eslint/helpers.test.ts',
|
|
324
|
+
})
|
|
325
|
+
|
|
326
|
+
sut.reporter.onTestModuleCollected(moduleWithImportError)
|
|
327
|
+
await sut.reporter.onTestRunEnd([], [], 'failed')
|
|
328
|
+
|
|
329
|
+
const parsed = await sut.getParsedData()
|
|
330
|
+
|
|
331
|
+
expect(parsed?.reason).toBe('failed')
|
|
332
|
+
// When no errors are provided, module should have no tests
|
|
333
|
+
expect(parsed?.testModules[0].tests).toHaveLength(0)
|
|
334
|
+
})
|
|
335
|
+
|
|
336
|
+
it('creates synthetic test when module fails with errors', async () => {
|
|
337
|
+
const moduleWithImportError = testModule({
|
|
338
|
+
moduleId: '/src/failing.test.ts',
|
|
339
|
+
errors: () => [createUnhandledError()],
|
|
340
|
+
})
|
|
341
|
+
|
|
342
|
+
sut.reporter.onTestModuleCollected(moduleWithImportError)
|
|
343
|
+
await sut.reporter.onTestRunEnd([], [], 'failed')
|
|
344
|
+
|
|
345
|
+
const parsed = await sut.getParsedData()
|
|
346
|
+
|
|
347
|
+
expect(parsed?.reason).toBe('failed')
|
|
348
|
+
expect(parsed?.testModules[0].tests).toHaveLength(1)
|
|
349
|
+
expect(parsed?.testModules[0].tests[0].state).toBe('failed')
|
|
350
|
+
})
|
|
351
|
+
|
|
352
|
+
it('captures "interrupted" reason in output', async () => {
|
|
353
|
+
await sut.reporter.onTestRunEnd([], [], 'interrupted')
|
|
354
|
+
|
|
355
|
+
const parsed = await sut.getParsedData()
|
|
356
|
+
expect(parsed?.reason).toBe('interrupted')
|
|
357
|
+
})
|
|
358
|
+
|
|
359
|
+
it('captures "passed" reason in output', async () => {
|
|
360
|
+
sut.reporter.onTestModuleCollected(module)
|
|
361
|
+
sut.reporter.onTestCaseResult(passedTest)
|
|
362
|
+
await sut.reporter.onTestRunEnd([], [], 'passed')
|
|
363
|
+
|
|
364
|
+
const parsed = await sut.getParsedData()
|
|
365
|
+
expect(parsed?.reason).toBe('passed')
|
|
366
|
+
})
|
|
367
|
+
})
|
|
368
|
+
})
|
|
369
|
+
|
|
370
|
+
function setupVitestReporter(options?: { type: 'file' | 'memory' }) {
|
|
371
|
+
const { storage, cleanup } = createTestStorage(options?.type)
|
|
372
|
+
const reporter = new VitestReporter(storage)
|
|
373
|
+
|
|
374
|
+
const collectAndGetSaved = async (
|
|
375
|
+
items: Array<TestModule | TestCase>
|
|
376
|
+
): Promise<string | null> => {
|
|
377
|
+
collectTestData(reporter, items)
|
|
378
|
+
await reporter.onTestRunEnd()
|
|
379
|
+
return storage.getTest()
|
|
380
|
+
}
|
|
381
|
+
|
|
382
|
+
const getParsedData = async (): Promise<TestResult | null> => {
|
|
383
|
+
const content = await storage.getTest()
|
|
384
|
+
return content ? JSON.parse(content) : null
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
const getTests = async (): Promise<Test[]> => {
|
|
388
|
+
return getTestsFromStorage(storage)
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
const getPassedTests = async (): Promise<(Test & { state: 'passed' })[]> => {
|
|
392
|
+
const tests = await getTests()
|
|
393
|
+
return tests.filter(isPassingTest)
|
|
394
|
+
}
|
|
395
|
+
|
|
396
|
+
const getFailedTests = async (): Promise<(Test & { state: 'failed' })[]> => {
|
|
397
|
+
const tests = await getTests()
|
|
398
|
+
return tests.filter(isFailingTest)
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
return {
|
|
402
|
+
reporter,
|
|
403
|
+
storage,
|
|
404
|
+
collectAndGetSaved,
|
|
405
|
+
getParsedData,
|
|
406
|
+
getTests,
|
|
407
|
+
getPassedTests,
|
|
408
|
+
getFailedTests,
|
|
409
|
+
cleanup,
|
|
410
|
+
}
|
|
411
|
+
}
|
|
412
|
+
|
|
413
|
+
function createTestStorage(type: 'file' | 'memory' = 'memory'): {
|
|
414
|
+
storage: Storage
|
|
415
|
+
cleanup: () => void
|
|
416
|
+
} {
|
|
417
|
+
if (type === 'file') {
|
|
418
|
+
const projectRoot = mkdtempSync(join(tmpdir(), 'vitest-reporter-test-'))
|
|
419
|
+
const config = new Config({ projectRoot })
|
|
420
|
+
const storage = new FileStorage(config)
|
|
421
|
+
const cleanup = () => rmSync(projectRoot, { recursive: true, force: true })
|
|
422
|
+
return { storage, cleanup }
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
return { storage: new MemoryStorage(), cleanup: () => {} }
|
|
426
|
+
}
|
|
427
|
+
|
|
428
|
+
function collectTestData(
|
|
429
|
+
reporter: VitestReporter,
|
|
430
|
+
items: Array<TestModule | TestCase>
|
|
431
|
+
): void {
|
|
432
|
+
for (const item of items) {
|
|
433
|
+
if ('moduleId' in item && !('module' in item)) {
|
|
434
|
+
reporter.onTestModuleCollected(item as TestModule)
|
|
435
|
+
} else {
|
|
436
|
+
reporter.onTestCaseResult(item as TestCase)
|
|
437
|
+
}
|
|
438
|
+
}
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
async function getTestsFromStorage(storage: Storage): Promise<Test[]> {
|
|
442
|
+
const content = await storage.getTest()
|
|
443
|
+
if (!content) return []
|
|
444
|
+
const parsed: TestResult = JSON.parse(content)
|
|
445
|
+
return parsed.testModules[0]?.tests ?? []
|
|
446
|
+
}
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
import { Reporter, TestModule, TestCase, TestRunEndReason } from 'vitest/node'
|
|
2
|
+
import type { SerializedError } from '@vitest/utils'
|
|
3
|
+
import { Storage, FileStorage, Config } from 'tdd-guard'
|
|
4
|
+
import { basename } from 'node:path'
|
|
5
|
+
import type {
|
|
6
|
+
CollectedModuleData,
|
|
7
|
+
FormattedError,
|
|
8
|
+
FormattedTest,
|
|
9
|
+
ModuleDataMap,
|
|
10
|
+
ModuleResult,
|
|
11
|
+
TestRunOutput,
|
|
12
|
+
} from './types'
|
|
13
|
+
|
|
14
|
+
export class VitestReporter implements Reporter {
|
|
15
|
+
private readonly storage: Storage
|
|
16
|
+
private readonly collectedData: ModuleDataMap = new Map()
|
|
17
|
+
|
|
18
|
+
constructor(storageOrRoot?: Storage | string) {
|
|
19
|
+
this.storage =
|
|
20
|
+
typeof storageOrRoot === 'string'
|
|
21
|
+
? new FileStorage(new Config({ projectRoot: storageOrRoot }))
|
|
22
|
+
: (storageOrRoot ?? new FileStorage())
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
onTestModuleCollected(testModule: TestModule): void {
|
|
26
|
+
this.collectedData.set(testModule.moduleId, {
|
|
27
|
+
module: testModule,
|
|
28
|
+
tests: [],
|
|
29
|
+
})
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
onTestCaseResult(testCase: TestCase): void {
|
|
33
|
+
const moduleId = testCase.module.moduleId
|
|
34
|
+
if (!moduleId) return
|
|
35
|
+
|
|
36
|
+
this.collectedData.get(moduleId)?.tests.push(testCase)
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
async onTestRunEnd(
|
|
40
|
+
_testModules?: ReadonlyArray<TestModule>,
|
|
41
|
+
unhandledErrors?: ReadonlyArray<SerializedError>,
|
|
42
|
+
reason?: TestRunEndReason
|
|
43
|
+
): Promise<void> {
|
|
44
|
+
// _testModules contains only module metadata, we use collected data from callbacks
|
|
45
|
+
const formattedModules = formatAllModuleResults(this.collectedData)
|
|
46
|
+
const output = createTestRunOutput(
|
|
47
|
+
formattedModules,
|
|
48
|
+
unhandledErrors,
|
|
49
|
+
reason
|
|
50
|
+
)
|
|
51
|
+
await this.storage.saveTest(JSON.stringify(output, null, 2))
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
function createTestRunOutput(
|
|
56
|
+
testModules: ModuleResult[],
|
|
57
|
+
unhandledErrors?: ReadonlyArray<SerializedError>,
|
|
58
|
+
reason?: TestRunEndReason
|
|
59
|
+
): TestRunOutput {
|
|
60
|
+
return {
|
|
61
|
+
testModules,
|
|
62
|
+
unhandledErrors: unhandledErrors ?? [],
|
|
63
|
+
...(reason && { reason }),
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
function formatAllModuleResults(collectedData: ModuleDataMap): ModuleResult[] {
|
|
68
|
+
return Array.from(collectedData.values()).map((data) => ({
|
|
69
|
+
moduleId: data.module.moduleId,
|
|
70
|
+
tests: moduleFailedToLoad(data)
|
|
71
|
+
? createTestForFailedModule(data)
|
|
72
|
+
: formatNormalTests(data),
|
|
73
|
+
}))
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
function moduleFailedToLoad(data: CollectedModuleData): boolean {
|
|
77
|
+
return data.module.errors().length > 0 && data.tests.length === 0
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
function createTestForFailedModule(data: CollectedModuleData): FormattedTest[] {
|
|
81
|
+
return [
|
|
82
|
+
{
|
|
83
|
+
name: basename(data.module.moduleId),
|
|
84
|
+
fullName: data.module.moduleId,
|
|
85
|
+
state: 'failed',
|
|
86
|
+
errors: data.module.errors().map(formatError),
|
|
87
|
+
},
|
|
88
|
+
]
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
function formatNormalTests(data: CollectedModuleData): FormattedTest[] {
|
|
92
|
+
return data.tests.map((test) => {
|
|
93
|
+
const result = test.result()
|
|
94
|
+
return {
|
|
95
|
+
name: test.name,
|
|
96
|
+
fullName: test.fullName,
|
|
97
|
+
state: result.state === 'pending' ? 'skipped' : result.state,
|
|
98
|
+
errors: result.errors?.map(formatError),
|
|
99
|
+
}
|
|
100
|
+
})
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
function formatError(error: SerializedError): FormattedError {
|
|
104
|
+
return {
|
|
105
|
+
message: error.message,
|
|
106
|
+
stack: error.stack,
|
|
107
|
+
expected: error.expected,
|
|
108
|
+
actual: error.actual,
|
|
109
|
+
}
|
|
110
|
+
}
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import type {
|
|
2
|
+
TestState,
|
|
3
|
+
TestRunEndReason,
|
|
4
|
+
TestModule,
|
|
5
|
+
TestCase,
|
|
6
|
+
} from 'vitest/node'
|
|
7
|
+
import type { SerializedError } from '@vitest/utils'
|
|
8
|
+
|
|
9
|
+
export type ModuleDataMap = Map<string, CollectedModuleData>
|
|
10
|
+
|
|
11
|
+
export type CollectedModuleData = {
|
|
12
|
+
module: TestModule
|
|
13
|
+
tests: TestCase[]
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export type FormattedError = {
|
|
17
|
+
message: string
|
|
18
|
+
stack?: string
|
|
19
|
+
expected?: unknown
|
|
20
|
+
actual?: unknown
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
export type FormattedTest = {
|
|
24
|
+
name: string
|
|
25
|
+
fullName: string
|
|
26
|
+
state: TestState
|
|
27
|
+
errors?: FormattedError[]
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
export type ModuleResult = {
|
|
31
|
+
moduleId: string
|
|
32
|
+
tests: FormattedTest[]
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
export type TestRunOutput = {
|
|
36
|
+
testModules: ModuleResult[]
|
|
37
|
+
unhandledErrors: readonly SerializedError[]
|
|
38
|
+
reason?: TestRunEndReason
|
|
39
|
+
}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
{
|
|
2
|
+
"extends": "../../tsconfig.json",
|
|
3
|
+
"compilerOptions": {
|
|
4
|
+
"composite": true,
|
|
5
|
+
"outDir": "./dist",
|
|
6
|
+
"rootDir": "./src",
|
|
7
|
+
"tsBuildInfoFile": "./dist/tsconfig.tsbuildinfo"
|
|
8
|
+
},
|
|
9
|
+
"include": ["src/**/*"],
|
|
10
|
+
"exclude": ["**/*.test.ts", "**/*.spec.ts"]
|
|
11
|
+
}
|
|
Binary file
|