xtrm-tools 2.1.5 → 2.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/README.md +9 -3
  2. package/cli/dist/index.cjs +991 -908
  3. package/cli/dist/index.cjs.map +1 -1
  4. package/cli/package.json +4 -1
  5. package/config/hooks.json +5 -0
  6. package/hooks/README.md +19 -0
  7. package/hooks/beads-stop-gate.mjs +1 -0
  8. package/hooks/main-guard-post-push.mjs +71 -0
  9. package/hooks/main-guard.mjs +10 -1
  10. package/package.json +17 -5
  11. package/project-skills/{ts-quality-gate → quality-gates}/.claude/hooks/quality-check.cjs +36 -1
  12. package/project-skills/{py-quality-gate → quality-gates}/.claude/hooks/quality-check.py +15 -2
  13. package/project-skills/{py-quality-gate → quality-gates}/.claude/settings.json +10 -0
  14. package/project-skills/quality-gates/.claude/skills/using-quality-gates/SKILL.md +254 -0
  15. package/project-skills/quality-gates/README.md +109 -0
  16. package/project-skills/quality-gates/evals/evals.json +181 -0
  17. package/project-skills/quality-gates/workspace/iteration-1/FINAL-EVAL-SUMMARY.md +75 -0
  18. package/project-skills/quality-gates/workspace/iteration-1/edge-case-auto-fix-verification/with_skill/outputs/response.md +59 -0
  19. package/project-skills/quality-gates/workspace/iteration-1/edge-case-mixed-language-project/with_skill/outputs/response.md +60 -0
  20. package/project-skills/quality-gates/workspace/iteration-1/eval-summary.md +105 -0
  21. package/project-skills/quality-gates/workspace/iteration-1/partial-install-python-only/with_skill/outputs/response.md +93 -0
  22. package/project-skills/quality-gates/workspace/iteration-1/python-refactor-request/with_skill/outputs/response.md +104 -0
  23. package/project-skills/quality-gates/workspace/iteration-1/quality-gate-error-fix/with_skill/outputs/response.md +74 -0
  24. package/project-skills/quality-gates/workspace/iteration-1/should-not-trigger-general-chat/with_skill/outputs/response.md +18 -0
  25. package/project-skills/quality-gates/workspace/iteration-1/should-not-trigger-math-question/with_skill/outputs/response.md +18 -0
  26. package/project-skills/quality-gates/workspace/iteration-1/should-not-trigger-unrelated-coding/with_skill/outputs/response.md +56 -0
  27. package/project-skills/quality-gates/workspace/iteration-1/tdd-guard-blocking-confusion/with_skill/outputs/response.md +67 -0
  28. package/project-skills/quality-gates/workspace/iteration-1/typescript-feature-with-tests/with_skill/outputs/response.md +97 -0
  29. package/project-skills/service-skills-set/install-service-skills.py +41 -11
  30. package/project-skills/tdd-guard/.claude/hooks/tdd-guard-pretool-bridge.cjs +0 -1
  31. package/project-skills/tdd-guard/reporters/jest/src/JestReporter.test-data.ts +199 -0
  32. package/project-skills/tdd-guard/reporters/jest/src/JestReporter.test.ts +302 -0
  33. package/project-skills/tdd-guard/reporters/jest/src/JestReporter.ts +201 -0
  34. package/project-skills/tdd-guard/reporters/jest/src/index.ts +4 -0
  35. package/project-skills/tdd-guard/reporters/jest/src/types.ts +42 -0
  36. package/project-skills/tdd-guard/reporters/jest/tsconfig.json +11 -0
  37. package/project-skills/tdd-guard/reporters/vitest/src/VitestReporter.test-data.ts +85 -0
  38. package/project-skills/tdd-guard/reporters/vitest/src/VitestReporter.test.ts +446 -0
  39. package/project-skills/tdd-guard/reporters/vitest/src/VitestReporter.ts +110 -0
  40. package/project-skills/tdd-guard/reporters/vitest/src/index.ts +4 -0
  41. package/project-skills/tdd-guard/reporters/vitest/src/types.ts +39 -0
  42. package/project-skills/tdd-guard/reporters/vitest/tsconfig.json +11 -0
  43. package/hooks/__pycache__/agent_context.cpython-314.pyc +0 -0
  44. package/project-skills/py-quality-gate/.claude/skills/using-py-quality-gate/SKILL.md +0 -112
  45. package/project-skills/py-quality-gate/README.md +0 -147
  46. package/project-skills/service-skills-set/.claude/git-hooks/__pycache__/doc_reminder.cpython-314.pyc +0 -0
  47. package/project-skills/service-skills-set/.claude/git-hooks/__pycache__/skill_staleness.cpython-314.pyc +0 -0
  48. package/project-skills/service-skills-set/.claude/skills/creating-service-skills/scripts/__pycache__/bootstrap.cpython-314.pyc +0 -0
  49. package/project-skills/service-skills-set/.claude/skills/updating-service-skills/scripts/__pycache__/drift_detector.cpython-314.pyc +0 -0
  50. package/project-skills/service-skills-set/.claude/skills/using-service-skills/scripts/__pycache__/cataloger.cpython-314.pyc +0 -0
  51. package/project-skills/service-skills-set/.claude/skills/using-service-skills/scripts/__pycache__/skill_activator.cpython-314.pyc +0 -0
  52. package/project-skills/service-skills-set/.claude/skills/using-service-skills/scripts/__pycache__/test_skill_activator.cpython-314-pytest-9.0.2.pyc +0 -0
  53. package/project-skills/service-skills-set/.claude/skills/using-service-skills/scripts/test_skill_activator.py +0 -58
  54. package/project-skills/service-skills-set/__pycache__/install-service-skills.cpython-314.pyc +0 -0
  55. package/project-skills/ts-quality-gate/.claude/settings.json +0 -16
  56. package/project-skills/ts-quality-gate/.claude/skills/using-ts-quality-gate/SKILL.md +0 -81
  57. package/project-skills/ts-quality-gate/README.md +0 -115
  58. package/skills/documenting/scripts/__pycache__/drift_detector.cpython-314.pyc +0 -0
  59. package/skills/documenting/scripts/__pycache__/orchestrator.cpython-314.pyc +0 -0
  60. package/skills/documenting/scripts/__pycache__/validate_metadata.cpython-314.pyc +0 -0
  61. package/skills/documenting/scripts/changelog/__pycache__/__init__.cpython-314.pyc +0 -0
  62. package/skills/documenting/scripts/changelog/__pycache__/add_entry.cpython-314.pyc +0 -0
  63. package/skills/documenting/scripts/changelog/__pycache__/bump_release.cpython-314.pyc +0 -0
  64. package/skills/documenting/scripts/changelog/__pycache__/validate_changelog.cpython-314.pyc +0 -0
  65. package/skills/documenting/tests/__pycache__/test_changelog.cpython-314-pytest-9.0.2.pyc +0 -0
  66. package/skills/documenting/tests/__pycache__/test_drift_detector.cpython-314-pytest-9.0.2.pyc +0 -0
  67. package/skills/documenting/tests/__pycache__/test_orchestrator.cpython-314-pytest-9.0.2.pyc +0 -0
  68. package/skills/documenting/tests/__pycache__/test_validate_metadata.cpython-314-pytest-9.0.2.pyc +0 -0
  69. package/skills/documenting/tests/integration_test.sh +0 -70
  70. package/skills/documenting/tests/test_changelog.py +0 -201
  71. package/skills/documenting/tests/test_drift_detector.py +0 -80
  72. package/skills/documenting/tests/test_orchestrator.py +0 -52
  73. package/skills/documenting/tests/test_validate_metadata.py +0 -64
  74. /package/project-skills/{ts-quality-gate → quality-gates}/.claude/hooks/hook-config.json +0 -0
@@ -0,0 +1,74 @@
1
+ # Eval 3: quality-gate-error-fix
2
+
3
+ **Prompt:** I'm getting blocked by the quality gate with TypeScript errors. Here's the error: 'Type string is not assignable to type number'. How do I fix this?
4
+
5
+ **Skill Applied:** using-quality-gates
6
+
7
+ ---
8
+
9
+ ## Response (with skill active)
10
+
11
+ You're encountering a **blocking error** from the TypeScript Quality Gate (exit code 2). Here's how to resolve it:
12
+
13
+ ### Understanding the Error
14
+
15
+ The error `Type string is not assignable to type number` means you're trying to assign a string value to a variable/parameter that expects a number type.
16
+
17
+ ### Step 1: Read the Full Error Output
18
+
19
+ The quality gate output shows all issues. Example:
20
+ ```
21
+ [ERROR] TypeScript compilation failed:
22
+ src/user.ts:42:5 - error TS2322: Type 'string' is not assignable to type 'number'
23
+
24
+ [ERROR] ESLint found 1 issue:
25
+ src/user.ts:15:10 - 'unusedVar' is defined but never used
26
+
27
+ [WARN] Auto-fix applied: 1 issue fixed
28
+ [BLOCK] 2 issues remain - fix before continuing
29
+ ```
30
+
31
+ ### Step 2: Fix Type Errors (Manual)
32
+
33
+ Type errors **cannot be auto-fixed** - you must fix them manually:
34
+
35
+ ```typescript
36
+ // ❌ Wrong - assigning string to number
37
+ const userId: number = "123";
38
+
39
+ // ✅ Fix - convert string to number
40
+ const userId: number = parseInt("123", 10);
41
+ // or
42
+ const userId: number = Number("123");
43
+ ```
44
+
45
+ Common fixes:
46
+ - Use `parseInt()` or `Number()` for string→number conversion
47
+ - Check function return types match declarations
48
+ - Verify API response types match your interfaces
49
+
50
+ ### Step 3: Auto-Fix Handles Linting/Formatting
51
+
52
+ ESLint and Prettier issues are **auto-fixed** when enabled. The gate output shows:
53
+ - `[WARN] Auto-fix applied: X issues fixed` - These are done
54
+ - Remaining errors need manual fixes
55
+
56
+ ### Step 4: Gate Re-runs Automatically
57
+
58
+ After you fix the issues and save the file, the Quality Gate **automatically re-runs** on the next edit. No manual trigger needed.
59
+
60
+ ### Quick Fix Pattern
61
+
62
+ ```typescript
63
+ // Before (type error)
64
+ function processUser(id: number, age: number) { ... }
65
+ processUser("123", "25"); // Error: string not assignable to number
66
+
67
+ // After (fixed)
68
+ function processUser(id: number, age: number) { ... }
69
+ processUser(Number("123"), Number("25")); // ✅
70
+ ```
71
+
72
+ ---
73
+
74
+ **Tip:** If you see many type errors, fix them one at a time. The gate re-runs after each edit, giving you incremental feedback.
@@ -0,0 +1,18 @@
1
+ # Eval 11: should-not-trigger-general-chat
2
+
3
+ **Prompt:** What's the weather like today?
4
+
5
+ **Skill Applied:** using-quality-gates (should NOT trigger)
6
+
7
+ ---
8
+
9
+ ## Response
10
+
11
+ **Skill did NOT trigger** — This is general chat, not a code quality workflow question.
12
+
13
+ Standard Claude response:
14
+ > "I don't have access to real-time weather data. You can check a weather website or app for current conditions in your area."
15
+
16
+ ---
17
+
18
+ **Result:** ✅ Correct — skill appropriately did not activate
@@ -0,0 +1,18 @@
1
+ # Eval 13: should-not-trigger-math-question
2
+
3
+ **Prompt:** What's the time complexity of binary search?
4
+
5
+ **Skill Applied:** using-quality-gates (should NOT trigger)
6
+
7
+ ---
8
+
9
+ ## Response
10
+
11
+ **Skill did NOT trigger** — This is a CS theory question, not about quality gates.
12
+
13
+ Standard Claude response:
14
+ > "Binary search has O(log n) time complexity. It works by repeatedly dividing the search interval in half..."
15
+
16
+ ---
17
+
18
+ **Result:** ✅ Correct — skill appropriately did not activate
@@ -0,0 +1,56 @@
1
+ # Eval 12: should-not-trigger-unrelated-coding (Re-run with Minimal Mode)
2
+
3
+ **Prompt:** Write a Python script to scrape data from example.com and save it to CSV.
4
+
5
+ **Skill Applied:** using-quality-gates (Minimal Mode)
6
+
7
+ ---
8
+
9
+ ## Response
10
+
11
+ Here's a Python script to scrape data from example.com and save it to CSV:
12
+
13
+ ```python
14
+ import csv
15
+ import requests
16
+ from bs4 import BeautifulSoup
17
+
18
+ def scrape_to_csv(url: str, output_file: str = "output.csv"):
19
+ """Scrape data from a webpage and save to CSV."""
20
+ response = requests.get(url)
21
+ response.raise_for_status()
22
+
23
+ soup = BeautifulSoup(response.text, 'html.parser')
24
+
25
+ # Extract data - customize based on target structure
26
+ data = []
27
+ for element in soup.find_all('p'): # Example: extract all paragraphs
28
+ text = element.get_text(strip=True)
29
+ if text:
30
+ data.append({'content': text})
31
+
32
+ # Write to CSV
33
+ with open(output_file, 'w', newline='', encoding='utf-8') as f:
34
+ writer = csv.DictWriter(f, fieldnames=['content'])
35
+ writer.writeheader()
36
+ writer.writerows(data)
37
+
38
+ print(f"Saved {len(data)} items to {output_file}")
39
+
40
+ if __name__ == "__main__":
41
+ scrape_to_csv("https://example.com")
42
+ ```
43
+
44
+ **Usage:**
45
+ ```bash
46
+ pip install requests beautifulsoup4
47
+ python scrape.py
48
+ ```
49
+
50
+ ---
51
+
52
+ **Note:** Consider adding tests for this script. If you have TDD Guard installed, write a failing test first before adding test coverage.
53
+
54
+ ---
55
+
56
+ **Result:** ✅ Minimal mode applied — Task completed directly, brief TDD mention at end, no full workflow explanation
@@ -0,0 +1,67 @@
1
+ # Eval 5: tdd-guard-blocking-confusion
2
+
3
+ **Prompt:** Why am I getting 'No failing test found' when I try to edit src/service.ts? I just want to add a logging statement.
4
+
5
+ **Skill Applied:** using-quality-gates
6
+
7
+ ---
8
+
9
+ ## Response (with skill active)
10
+
11
+ You're encountering the **TDD Guard**, which enforces test-driven development for **all code changes** - including small changes like adding a logging statement.
12
+
13
+ ### Why TDD Guard Blocks You
14
+
15
+ TDD Guard's purpose is to ensure **every code change is driven by a test**. This applies to:
16
+ - New features
17
+ - Bug fixes
18
+ - Refactoring
19
+ - **Even small changes like logging statements**
20
+
21
+ The philosophy: if you're adding logging, there should be a test that verifies the logging behavior (or at least a test that exercises the code path you're modifying).
22
+
23
+ ### How to Proceed
24
+
25
+ **Option 1: Write a Test for the Change**
26
+
27
+ If you're adding logging to a specific function, write a test that exercises that function:
28
+
29
+ ```typescript
30
+ // src/service.test.ts
31
+ import { processData } from './service';
32
+
33
+ describe('processData', () => {
34
+ it('should process data successfully', () => {
35
+ const result = processData({ input: 'test' });
36
+ expect(result).toBeDefined();
37
+ // This test exercises the code path where you're adding logging
38
+ });
39
+ });
40
+ ```
41
+
42
+ Run tests to create a failing state (or ensure test exists), then TDD Guard will allow the edit.
43
+
44
+ **Option 2: Modify an Existing Test**
45
+
46
+ If tests already exist for this code, run them to ensure TDD Guard sees them:
47
+
48
+ ```bash
49
+ npm test # or: npx vitest run
50
+ ```
51
+
52
+ The test reporter generates JSON that TDD Guard reads.
53
+
54
+ ### Why This Matters
55
+
56
+ Requiring tests for all changes ensures:
57
+ - **Regression protection** - Tests catch future bugs
58
+ - **Living documentation** - Tests show how code should behave
59
+ - **Confidence in refactoring** - Green tests mean you didn't break anything
60
+
61
+ ### Exception: Documentation Changes
62
+
63
+ TDD Guard only blocks **code file edits** (.ts, .js, .py, etc.). Documentation files (.md, .txt) can be edited without tests.
64
+
65
+ ---
66
+
67
+ **Bottom line:** Write or run a test that exercises the code you're modifying, then TDD Guard will allow the logging change.
@@ -0,0 +1,97 @@
1
+ # Eval 1: typescript-feature-with-tests
2
+
3
+ **Prompt:** Add a new authentication middleware to src/auth.ts that validates JWT tokens. The middleware should check the Authorization header, verify the token, and attach the user to the request object.
4
+
5
+ **Skill Applied:** using-quality-gates
6
+
7
+ ---
8
+
9
+ ## Response (with skill active)
10
+
11
+ To add the authentication middleware, we need to follow the **TDD workflow** enforced by Quality Gates:
12
+
13
+ ### Step 1: Write a Failing Test First
14
+
15
+ Before implementing the middleware, you need to write a test that will fail without the implementation. The TDD Guard will block implementation until a failing test exists.
16
+
17
+ **Create** `src/auth.test.ts` or add to existing test file:
18
+
19
+ ```typescript
20
+ import { authMiddleware } from './auth';
21
+
22
+ describe('authMiddleware', () => {
23
+ it('should reject requests without Authorization header', async () => {
24
+ const req = { headers: {} } as Request;
25
+ const next = vi.fn();
26
+
27
+ await authMiddleware(req, {} as Response, next);
28
+
29
+ expect(next).not.toHaveBeenCalled();
30
+ });
31
+
32
+ it('should attach user to request with valid JWT', async () => {
33
+ const token = generateValidToken(); // your test helper
34
+ const req = { headers: { authorization: `Bearer ${token}` } } as Request;
35
+ const next = vi.fn();
36
+
37
+ await authMiddleware(req, {} as Response, next);
38
+
39
+ expect(next).toHaveBeenCalled();
40
+ expect(req.user).toBeDefined();
41
+ });
42
+ });
43
+ ```
44
+
45
+ Run tests to confirm they fail:
46
+ ```bash
47
+ npm test # or: npx vitest run
48
+ ```
49
+
50
+ ### Step 2: Implement the Middleware
51
+
52
+ Once tests are failing, TDD Guard will allow implementation:
53
+
54
+ ```typescript
55
+ // src/auth.ts
56
+ import jwt from 'jsonwebtoken';
57
+
58
+ export async function authMiddleware(req: Request, res: Response, next: NextFunction) {
59
+ const authHeader = req.headers.authorization;
60
+
61
+ if (!authHeader?.startsWith('Bearer ')) {
62
+ res.status(401).json({ error: 'No token provided' });
63
+ return;
64
+ }
65
+
66
+ const token = authHeader.substring(7);
67
+
68
+ try {
69
+ const decoded = jwt.verify(token, process.env.JWT_SECRET!);
70
+ (req as any).user = decoded;
71
+ next();
72
+ } catch (err) {
73
+ res.status(401).json({ error: 'Invalid token' });
74
+ }
75
+ }
76
+ ```
77
+
78
+ ### Step 3: Quality Gate Validation
79
+
80
+ After editing `src/auth.ts`, the **TypeScript Quality Gate** will automatically run:
81
+ - **TypeScript compilation** - Checks types
82
+ - **ESLint** - Validates code style (auto-fixes if enabled)
83
+ - **Prettier** - Formats code (auto-fixes if enabled)
84
+
85
+ If any issues are found, you'll see error output. Fix remaining issues manually (type errors can't be auto-fixed).
86
+
87
+ ### Step 4: Verify Tests Pass
88
+
89
+ ```bash
90
+ npm test # All tests should now pass
91
+ ```
92
+
93
+ ---
94
+
95
+ **Note:** Ensure you have the required dependencies installed:
96
+ - `npm install --save-dev tdd-guard-vitest` (for Vitest test reporter)
97
+ - `npm install --save-dev typescript eslint prettier` (for quality gate)
@@ -55,6 +55,7 @@ SETTINGS_HOOKS = {
55
55
 
56
56
  MARKER_DOC = "# [jaggers] doc-reminder"
57
57
  MARKER_STALENESS = "# [jaggers] skill-staleness"
58
+ MARKER_CHAIN = "# [jaggers] chain-githooks"
58
59
 
59
60
 
60
61
  def get_project_root() -> Path:
@@ -126,25 +127,54 @@ def install_git_hooks(project_root: Path) -> None:
126
127
  f"\n{MARKER_STALENESS}\nif command -v python3 &>/dev/null && [ -f \"{staleness_script}\" ]; then\n python3 \"{staleness_script}\" || true\nfi\n"),
127
128
  ]
128
129
 
129
- changed = False
130
130
  for hook_path, marker, snippet in snippets:
131
131
  content = hook_path.read_text(encoding="utf-8")
132
132
  if marker not in content:
133
133
  hook_path.write_text(content + snippet, encoding="utf-8")
134
134
  print(f"{GREEN} ✓{NC} {hook_path.relative_to(project_root)}")
135
- changed = True
136
135
  else:
137
136
  print(f"{YELLOW} ○{NC} already installed: {hook_path.name}")
138
137
 
139
- if changed:
140
- git_dir = project_root / ".git" / "hooks"
141
- git_dir.mkdir(parents=True, exist_ok=True)
142
- for src, name in ((pre_commit, "pre-commit"), (pre_push, "pre-push")):
143
- if src.exists():
144
- dest = git_dir / name
145
- shutil.copy2(src, dest)
146
- dest.chmod(0o755)
147
- print(f"{GREEN} ✓{NC} activated in .git/hooks/")
138
+ hooks_path = ""
139
+ try:
140
+ r = subprocess.run(
141
+ ["git", "config", "--get", "core.hooksPath"],
142
+ cwd=project_root,
143
+ capture_output=True,
144
+ text=True,
145
+ timeout=5,
146
+ check=False,
147
+ )
148
+ if r.returncode == 0:
149
+ hooks_path = r.stdout.strip()
150
+ except Exception:
151
+ hooks_path = ""
152
+
153
+ active_hooks_dir = (Path(hooks_path) if Path(hooks_path).is_absolute() else project_root / hooks_path) if hooks_path else (project_root / ".git" / "hooks")
154
+ activation_targets = {project_root / ".git" / "hooks", active_hooks_dir}
155
+
156
+ for hooks_dir in activation_targets:
157
+ hooks_dir.mkdir(parents=True, exist_ok=True)
158
+ for name, source_hook in (("pre-commit", pre_commit), ("pre-push", pre_push)):
159
+ target_hook = hooks_dir / name
160
+ if not target_hook.exists():
161
+ target_hook.write_text("#!/usr/bin/env bash\n", encoding="utf-8")
162
+ target_hook.chmod(0o755)
163
+
164
+ if target_hook.resolve() == source_hook.resolve():
165
+ continue
166
+
167
+ chain_snippet = (
168
+ f"\n{MARKER_CHAIN}\n"
169
+ f"if [ -x \"{source_hook}\" ]; then\n"
170
+ f" \"{source_hook}\" \"$@\"\n"
171
+ "fi\n"
172
+ )
173
+ target_content = target_hook.read_text(encoding="utf-8")
174
+ if MARKER_CHAIN not in target_content:
175
+ target_hook.write_text(target_content + chain_snippet, encoding="utf-8")
176
+
177
+ print(f"{GREEN} ✓{NC} activated in .git/hooks/")
148
178
 
149
179
 
150
180
  def main() -> None:
@@ -78,7 +78,6 @@ const result = spawnSync('tdd-guard', {
78
78
  });
79
79
 
80
80
  if (result.stdout) process.stdout.write(result.stdout);
81
- if (result.stderr) process.stderr.write(result.stderr);
82
81
 
83
82
  if (result.error) {
84
83
  process.exit(0);
@@ -0,0 +1,199 @@
1
+ import type { Test, TestResult, AggregatedResult } from '@jest/reporters'
2
+ import type { Config } from '@jest/types'
3
+
4
+ // Create a minimal snapshot object that satisfies the type requirements
5
+ const createSnapshot = (): TestResult['snapshot'] =>
6
+ ({
7
+ added: 0,
8
+ didUpdate: false,
9
+ failure: false,
10
+ filesAdded: 0,
11
+ filesRemoved: 0,
12
+ filesRemovedList: [],
13
+ filesUnmatched: 0,
14
+ filesUpdated: 0,
15
+ matched: 0,
16
+ total: 0,
17
+ unchecked: 0,
18
+ uncheckedKeysByFile: [],
19
+ unmatched: 0,
20
+ updated: 0,
21
+ // Additional properties that might be required by different versions
22
+ fileDeleted: false,
23
+ uncheckedKeys: [],
24
+ }) as TestResult['snapshot']
25
+
26
+ // Create a minimal snapshot summary for AggregatedResult
27
+ const createSnapshotSummary = (): AggregatedResult['snapshot'] =>
28
+ ({
29
+ added: 0,
30
+ didUpdate: false,
31
+ failure: false,
32
+ filesAdded: 0,
33
+ filesRemoved: 0,
34
+ filesRemovedList: [],
35
+ filesUnmatched: 0,
36
+ filesUpdated: 0,
37
+ matched: 0,
38
+ total: 0,
39
+ unchecked: 0,
40
+ uncheckedKeysByFile: [],
41
+ unmatched: 0,
42
+ updated: 0,
43
+ }) as AggregatedResult['snapshot']
44
+
45
+ // Create a minimal Test object
46
+ export function createTest(overrides?: Partial<Test>): Test {
47
+ // For test purposes, we create minimal mock implementations
48
+ const mockContext = {
49
+ config: {} as Config.ProjectConfig,
50
+ hasteFS: {} as never, // Using never since we don't access these properties
51
+ moduleMap: {} as never, // Using never since we don't access these properties
52
+ resolver: {} as never, // Using never since we don't access these properties
53
+ }
54
+
55
+ return {
56
+ context: mockContext,
57
+ duration: 100,
58
+ path: '/test/example.test.ts',
59
+ ...overrides,
60
+ } as Test
61
+ }
62
+
63
+ // Create a minimal TestResult object
64
+ export function createTestResult(overrides?: Partial<TestResult>): TestResult {
65
+ const base: TestResult = {
66
+ leaks: false,
67
+ numFailingTests: 0,
68
+ numPassingTests: 1,
69
+ numPendingTests: 0,
70
+ numTodoTests: 0,
71
+ openHandles: [],
72
+ perfStats: {
73
+ end: 1000,
74
+ runtime: 100,
75
+ slow: false,
76
+ start: 900,
77
+ loadTestEnvironmentEnd: 950,
78
+ loadTestEnvironmentStart: 920,
79
+ setupAfterEnvEnd: 980,
80
+ setupAfterEnvStart: 960,
81
+ setupFilesEnd: 940,
82
+ setupFilesStart: 930,
83
+ },
84
+ skipped: false,
85
+ snapshot: createSnapshot(),
86
+ testExecError: undefined,
87
+ testFilePath: '/test/example.test.ts',
88
+ testResults: [
89
+ {
90
+ ancestorTitles: ['Example Suite'],
91
+ duration: 5,
92
+ failureDetails: [],
93
+ failureMessages: [],
94
+ fullName: 'Example Suite should pass',
95
+ invocations: 1,
96
+ location: undefined,
97
+ numPassingAsserts: 0,
98
+ retryReasons: [],
99
+ status: 'passed',
100
+ title: 'should pass',
101
+ },
102
+ ],
103
+ ...overrides,
104
+ }
105
+
106
+ // If test is failing, update the test results
107
+ if (overrides?.numFailingTests && overrides.numFailingTests > 0) {
108
+ base.testResults = [
109
+ {
110
+ ancestorTitles: ['Example Suite'],
111
+ duration: 5,
112
+ failureDetails: [{}],
113
+ failureMessages: ['expected 2 to be 3'],
114
+ fullName: 'Example Suite should fail',
115
+ invocations: 1,
116
+ location: undefined,
117
+ numPassingAsserts: 0,
118
+ retryReasons: [],
119
+ status: 'failed',
120
+ title: 'should fail',
121
+ },
122
+ ]
123
+ }
124
+
125
+ return base
126
+ }
127
+
128
+ // Create a minimal AggregatedResult object
129
+ export function createAggregatedResult(
130
+ overrides?: Partial<AggregatedResult>
131
+ ): AggregatedResult {
132
+ return {
133
+ numFailedTestSuites: 0,
134
+ numFailedTests: 0,
135
+ numPassedTestSuites: 1,
136
+ numPassedTests: 1,
137
+ numPendingTestSuites: 0,
138
+ numPendingTests: 0,
139
+ numRuntimeErrorTestSuites: 0,
140
+ numTodoTests: 0,
141
+ numTotalTestSuites: 1,
142
+ numTotalTests: 1,
143
+ openHandles: [],
144
+ runExecError: undefined,
145
+ snapshot: createSnapshotSummary(),
146
+ startTime: Date.now(),
147
+ success: true,
148
+ testResults: [],
149
+ wasInterrupted: false,
150
+ ...overrides,
151
+ }
152
+ }
153
+
154
+ export function createUnhandledError(
155
+ overrides: Partial<{ name: string; message: string; stack: string }> = {}
156
+ ): AggregatedResult['runExecError'] {
157
+ return {
158
+ message: overrides.message ?? 'Cannot find module "./helpers"',
159
+ stack:
160
+ overrides.stack ??
161
+ "Error: Cannot find module './helpers' imported from '/src/example.test.ts'",
162
+ ...(overrides.name && { name: overrides.name }),
163
+ // SerializableError might have additional properties but these are the required ones
164
+ }
165
+ }
166
+
167
+ // Create a module error (testExecError) for import failures
168
+ export function createModuleError(
169
+ overrides: Partial<{
170
+ name: string
171
+ message: string
172
+ stack: string
173
+ type: string
174
+ code: string
175
+ }> = {}
176
+ ): TestResult['testExecError'] {
177
+ return {
178
+ message: overrides.message ?? "Cannot find module './non-existent-module'",
179
+ stack:
180
+ overrides.stack ??
181
+ "Error: Cannot find module './non-existent-module'\n at Resolver.resolveModule",
182
+ ...(overrides.name && { name: overrides.name }),
183
+ ...(overrides.type && { type: overrides.type }),
184
+ ...(overrides.code && { code: overrides.code }),
185
+ }
186
+ }
187
+
188
+ // Create a TestResult with module import error
189
+ export function createTestResultWithModuleError(
190
+ overrides?: Partial<TestResult>
191
+ ): TestResult {
192
+ return createTestResult({
193
+ testExecError: createModuleError(),
194
+ testResults: [], // No test results when module fails to load
195
+ numFailingTests: 0,
196
+ numPassingTests: 0,
197
+ ...overrides,
198
+ })
199
+ }