codeguard-testgen 1.0.14 → 1.0.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/README.md +157 -1034
  2. package/dist/ai.d.ts +8 -0
  3. package/dist/ai.d.ts.map +1 -0
  4. package/dist/ai.js +332 -0
  5. package/dist/ai.js.map +1 -0
  6. package/dist/ast.d.ts +8 -0
  7. package/dist/ast.d.ts.map +1 -0
  8. package/dist/ast.js +988 -0
  9. package/dist/ast.js.map +1 -0
  10. package/dist/config.d.ts +4 -0
  11. package/dist/config.d.ts.map +1 -1
  12. package/dist/config.js +4 -0
  13. package/dist/config.js.map +1 -1
  14. package/dist/git.d.ts +18 -0
  15. package/dist/git.d.ts.map +1 -0
  16. package/dist/git.js +208 -0
  17. package/dist/git.js.map +1 -0
  18. package/dist/globals.d.ts +24 -0
  19. package/dist/globals.d.ts.map +1 -0
  20. package/dist/globals.js +40 -0
  21. package/dist/globals.js.map +1 -0
  22. package/dist/index.d.ts +9 -54
  23. package/dist/index.d.ts.map +1 -1
  24. package/dist/index.js +85 -5434
  25. package/dist/index.js.map +1 -1
  26. package/dist/pathResolver.d.ts +12 -0
  27. package/dist/pathResolver.d.ts.map +1 -0
  28. package/dist/pathResolver.js +44 -0
  29. package/dist/pathResolver.js.map +1 -0
  30. package/dist/reviewer.d.ts +13 -0
  31. package/dist/reviewer.d.ts.map +1 -0
  32. package/dist/reviewer.js +402 -0
  33. package/dist/reviewer.js.map +1 -0
  34. package/dist/testGenerator.d.ts +24 -0
  35. package/dist/testGenerator.d.ts.map +1 -0
  36. package/dist/testGenerator.js +1107 -0
  37. package/dist/testGenerator.js.map +1 -0
  38. package/dist/toolDefinitions.d.ts +6 -0
  39. package/dist/toolDefinitions.d.ts.map +1 -0
  40. package/dist/toolDefinitions.js +370 -0
  41. package/dist/toolDefinitions.js.map +1 -0
  42. package/dist/toolHandlers.d.ts +76 -0
  43. package/dist/toolHandlers.d.ts.map +1 -0
  44. package/dist/toolHandlers.js +1430 -0
  45. package/dist/toolHandlers.js.map +1 -0
  46. package/dist/types.d.ts +74 -0
  47. package/dist/types.d.ts.map +1 -0
  48. package/dist/types.js +3 -0
  49. package/dist/types.js.map +1 -0
  50. package/package.json +1 -2
@@ -0,0 +1,1107 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.listFilesRecursive = listFilesRecursive;
4
+ exports.getTestFilePath = getTestFilePath;
5
+ exports.generateTests = generateTests;
6
+ exports.promptUser = promptUser;
7
+ exports.generateTestsForFolder = generateTestsForFolder;
8
+ exports.generateTestForSingleFunction = generateTestForSingleFunction;
9
+ exports.smartValidateTestSuite = smartValidateTestSuite;
10
+ exports.fixFailingTests = fixFailingTests;
11
+ exports.generateTestsForFunctions = generateTestsForFunctions;
12
+ exports.generateTestsForFunction = generateTestsForFunction;
13
+ const fs = require("fs/promises");
14
+ const fsSync = require("fs");
15
+ const path = require("path");
16
+ const readline = require("readline");
17
+ const globals_1 = require("./globals");
18
+ const ast_1 = require("./ast");
19
+ const ai_1 = require("./ai");
20
+ const toolHandlers_1 = require("./toolHandlers");
21
+ const toolDefinitions_1 = require("./toolDefinitions");
22
+ // ---------------------------------------------------------------------------
23
+ // File system utilities (only used internally by test generator)
24
+ // ---------------------------------------------------------------------------
25
+ async function listFilesRecursive(dir, fileList = []) {
26
+ const files = await fs.readdir(dir);
27
+ for (const file of files) {
28
+ const filePath = path.join(dir, file);
29
+ const stat = await fs.stat(filePath);
30
+ if (stat.isDirectory()) {
31
+ const shouldExclude = globals_1.g.CONFIG.excludeDirs.includes(file) ||
32
+ file.startsWith('.') ||
33
+ file === 'tests' ||
34
+ file === '__tests__' ||
35
+ file === 'test';
36
+ if (!shouldExclude) {
37
+ await listFilesRecursive(filePath, fileList);
38
+ }
39
+ }
40
+ else {
41
+ const ext = path.extname(file);
42
+ const isTestFile = file.endsWith('.test.ts') ||
43
+ file.endsWith('.test.tsx') ||
44
+ file.endsWith('.test.js') ||
45
+ file.endsWith('.test.jsx') ||
46
+ file.endsWith('.spec.ts') ||
47
+ file.endsWith('.spec.tsx') ||
48
+ file.endsWith('.spec.js') ||
49
+ file.endsWith('.spec.jsx');
50
+ if (globals_1.g.CONFIG.extensions.includes(ext) && !isTestFile && !file.startsWith('.')) {
51
+ fileList.push(filePath);
52
+ }
53
+ }
54
+ }
55
+ return fileList;
56
+ }
57
+ // Generate test file path preserving directory structure
58
+ function getTestFilePath(sourceFile) {
59
+ const testFileName = path.basename(sourceFile).replace(/\.(ts|js)x?$/, '.test.ts');
60
+ const normalizedSource = sourceFile.replace(/\\/g, '/');
61
+ let relativeDir = path.dirname(normalizedSource);
62
+ if (relativeDir.startsWith('./')) {
63
+ relativeDir = relativeDir.substring(2);
64
+ }
65
+ const sourceRoot = globals_1.g.CONFIG.sourceRoot || 'src';
66
+ const sourceRootPattern = `${sourceRoot}/`;
67
+ if (normalizedSource.includes(sourceRootPattern)) {
68
+ const srcIndex = normalizedSource.indexOf(sourceRootPattern);
69
+ const pathAfterSrc = normalizedSource.substring(srcIndex + sourceRootPattern.length);
70
+ relativeDir = path.dirname(pathAfterSrc);
71
+ }
72
+ let testPath;
73
+ if (relativeDir && relativeDir !== '.') {
74
+ testPath = path.join(globals_1.g.CONFIG.testDir, relativeDir, testFileName);
75
+ }
76
+ else {
77
+ testPath = path.join(globals_1.g.CONFIG.testDir, testFileName);
78
+ }
79
+ return testPath;
80
+ }
81
+ // ---------------------------------------------------------------------------
82
+ // Main conversation loop
83
+ // ---------------------------------------------------------------------------
84
+ async function generateTests(sourceFile) {
85
+ let result = (0, ast_1.analyzeFileAST)(sourceFile);
86
+ console.log('HELLO123');
87
+ if (!result.success) {
88
+ result = (0, ast_1.analyzeFileAST)(sourceFile);
89
+ }
90
+ if (!result.success || !result.analysis || !result.analysis.functions) {
91
+ throw new Error(`File analysis failed. Unable to extract functions from file. Error: ${result.error || 'unknown'}`);
92
+ }
93
+ const exportedFunctions = result.analysis.functions.filter((f) => f.exported);
94
+ const functionNames = exportedFunctions.map((f) => f.name).filter((name) => name);
95
+ if (functionNames.length === 0) {
96
+ throw new Error('No exported functions found in file. Cannot generate tests.');
97
+ }
98
+ return await generateTestsForFunctions(sourceFile, functionNames);
99
+ }
100
+ // ---------------------------------------------------------------------------
101
+ // Interactive CLI helpers
102
+ // ---------------------------------------------------------------------------
103
+ async function promptUser(question) {
104
+ const rl = readline.createInterface({
105
+ input: process.stdin,
106
+ output: process.stdout
107
+ });
108
+ return new Promise(resolve => {
109
+ rl.question(question, answer => {
110
+ rl.close();
111
+ resolve(answer);
112
+ });
113
+ });
114
+ }
115
+ // Get all directories recursively
116
+ async function listDirectories(dir, dirList = []) {
117
+ const items = await fs.readdir(dir);
118
+ for (const item of items) {
119
+ const itemPath = path.join(dir, item);
120
+ const stat = await fs.stat(itemPath);
121
+ if (stat.isDirectory() && !globals_1.g.CONFIG.excludeDirs.includes(item)) {
122
+ dirList.push(itemPath);
123
+ await listDirectories(itemPath, dirList);
124
+ }
125
+ }
126
+ return dirList;
127
+ }
128
+ // Folder-wise test generation
129
+ async function generateTestsForFolder() {
130
+ console.log('\n📂 Folder-wise Test Generation\n');
131
+ const directories = await listDirectories('.');
132
+ if (directories.length === 0) {
133
+ console.log('No directories found!');
134
+ return;
135
+ }
136
+ console.log('Select a folder to generate tests for all files:\n');
137
+ directories.forEach((dir, index) => {
138
+ console.log(`${index + 1}. ${dir}`);
139
+ });
140
+ const choice = await promptUser('\nEnter folder number: ');
141
+ const selectedDir = directories[parseInt(choice) - 1];
142
+ if (!selectedDir) {
143
+ console.log('Invalid selection!');
144
+ return;
145
+ }
146
+ const files = await listFilesRecursive(selectedDir);
147
+ if (files.length === 0) {
148
+ console.log(`No source files found in ${selectedDir}!`);
149
+ return;
150
+ }
151
+ console.log(`\n📝 Found ${files.length} files to process in ${selectedDir}\n`);
152
+ for (let i = 0; i < files.length; i++) {
153
+ const file = files[i];
154
+ const testFilePath = getTestFilePath(file);
155
+ console.log(`\n[${i + 1}/${files.length}] Processing: ${file}`);
156
+ if (fsSync.existsSync(testFilePath)) {
157
+ const answer = await promptUser(` Test file already exists: ${testFilePath}\n Regenerate? (y/n): `);
158
+ if (answer.toLowerCase() !== 'y') {
159
+ console.log(' Skipped.');
160
+ continue;
161
+ }
162
+ }
163
+ try {
164
+ await generateTests(file);
165
+ console.log(` ✅ Completed: ${testFilePath}`);
166
+ }
167
+ catch (error) {
168
+ console.error(` ❌ Failed: ${error.message}`);
169
+ }
170
+ }
171
+ console.log(`\n✨ Folder processing complete! Processed ${files.length} files.`);
172
+ }
173
+ // ---------------------------------------------------------------------------
174
+ // Function-wise test generation (core loop)
175
+ // ---------------------------------------------------------------------------
176
+ /**
177
+ * Generate tests for a single function
178
+ * @returns true if tests passed, false if legitimate failure reported
179
+ */
180
+ async function generateTestForSingleFunction(sourceFile, functionName, testFilePath, testFileExists) {
181
+ globals_1.g.EXPECTED_TEST_FILE_PATH = testFilePath;
182
+ const functionAST = (0, ast_1.getFunctionAST)(sourceFile, functionName);
183
+ const testEnv = globals_1.g.CONFIG.testEnv;
184
+ let messages;
185
+ if (testEnv == "vitest") {
186
+ messages = [
187
+ {
188
+ role: "user",
189
+ content: `You are a senior developer generating production-ready Vitest unit tests for TypeScript.
190
+
191
+ ## TARGET
192
+ Function: ${functionName} | Source: ${sourceFile}
193
+ Test File: ${testFilePath} (Exists: ${testFileExists})
194
+
195
+ ## WORKFLOW
196
+
197
+ ### 1. ANALYSIS (Execute in order)
198
+ 1. get_function_ast(${sourceFile}, "${functionName}") → implementation
199
+ 3. get_imports_ast(${sourceFile}) → trace ALL dependencies
200
+ 4. get_file_preamble(${testFilePath}) → existing mocks/imports (if exists)
201
+ 5. For each dependency: find_file() → get_function_ast() → understand behavior
202
+ 6. calculate_relative_path(from: ${testFilePath}, to: import_path) → all imports
203
+
204
+ **Dependency Tracing (CRITICAL):**
205
+ - Map EVERY function call to its import source (verify with get_imports_ast)
206
+ - Export ALL used functions from each vi.mock block
207
+ - Set return values in beforeEach for ALL mocked functions
208
+ - Auto-detect required named exports from AST to prevent "X is not defined"
209
+ - For each dependency: get_function_ast() → note its **returnType** → use that type for mock return values
210
+ - If returnType is a custom type/interface: get_type_definitions() → build mock matching the exact shape
211
+
212
+ ### 2. FILE STRUCTURE (STRICT ORDER)
213
+
214
+
215
+ // 1. MOCKS (before ANY imports)
216
+ vi.mock('module-path');
217
+ vi.mock('database/index', () => ({ db: { query: vi.fn() } }));
218
+ vi.mock('env', () => ({ _ENV: { KEY: 'test' } }));
219
+
220
+ // 2. IMPORTS
221
+ import { describe, it, expect, beforeEach, vi } from 'vitest';
222
+ import type { TypeOnly } from './types';
223
+ import { ${functionName} } from 'calculated-path';
224
+ import { dependency } from 'dependency-path';
225
+
226
+ // 3. TYPED MOCKS
227
+ const mockDep = vi.mocked(dependency);
228
+
229
+ // 4. TESTS
230
+ describe('${functionName}', () => {
231
+ beforeEach(() => {
232
+ // clearMocks: true in config handles cleanup
233
+ mockDep.mockResolvedValue(defaultValue);
234
+ });
235
+
236
+ it('should_behavior_when_condition', async () => {
237
+ // ARRANGE
238
+ const input = { id: 1 };
239
+ mockDep.mockResolvedValueOnce(specificValue);
240
+
241
+ // ACT
242
+ const result = await ${functionName}(input);
243
+
244
+ // ASSERT
245
+ expect(result).toEqual(expected);
246
+ expect(mockDep).toHaveBeenCalledWith(input);
247
+ expect(mockDep).toHaveBeenCalledTimes(1);
248
+ });
249
+ });
250
+
251
+
252
+ ### 3. COVERAGE (Minimum 5 tests)
253
+
254
+ 1. **Happy Path** (1-2): Valid inputs → expected outputs
255
+ 2. **Edge Cases** (2-3): Empty/null/undefined/0/boundaries/special chars
256
+ 3. **Error Handling** (1-2): Invalid inputs, dependency failures
257
+ - Sync: expect(() => fn()).toThrow(ErrorClass)
258
+ - Async: await expect(fn()).rejects.toThrow(ErrorClass)
259
+ 4. **Branch Coverage**: Each conditional path tested
260
+ 5. **Async Behavior**: Promise resolution/rejection (if applicable)
261
+
262
+ ### 4. MOCK STRATEGY
263
+
264
+ **ALWAYS Mock:**
265
+ - External modules (fs, http, database)
266
+ - Modules with side effects (logging, analytics)
267
+ - database/index, env, WinstonLogger
268
+
269
+ **NOT Mocking:**
270
+ - Pure utility functions from same codebase (test integration)
271
+ - Type imports: import type { X } (NEVER mock)
272
+
273
+ **Mock Pattern:**
274
+ - Module-level: vi.mock('path') → const mockFn = vi.mocked(importedFn)
275
+ - NEVER: vi.spyOn(exports, 'fn') or global wrappers
276
+ - Hoist mock functions for use in both factory and tests
277
+
278
+ ### 4.1 MOCK RETURN TYPE SAFETY (CRITICAL)
279
+
280
+ When setting up mock return values, you MUST match the dependency's declared return type:
281
+
282
+ **Step**: For EVERY mocked function, check its return type via get_function_ast before setting mockResolvedValue/mockReturnValue.
283
+
284
+ **Type → Mock Value Rules:**
285
+ | Return Type | Correct Mock | WRONG Mock |
286
+ |-------------|-------------|------------|
287
+ | \`boolean\` | \`true\` or \`false\` | \`{ success: true }\` |
288
+ | \`Promise<boolean>\` | \`mockResolvedValue(true)\` | \`mockResolvedValue({ ok: true })\` |
289
+ | \`string\` | \`'some-string'\` | \`{ value: 'str' }\` |
290
+ | \`number\` | \`42\` | \`{ count: 42 }\` |
291
+ | \`void\` / \`undefined\` | \`mockResolvedValue(undefined)\` | \`mockResolvedValue({ done: true })\` |
292
+ | \`Promise<SomeType>\` | \`mockResolvedValue(objectMatchingSomeType)\` | Primitive or wrong shape |
293
+ | \`SomeType\` (interface/type) | Use \`get_type_definitions\` to build a matching object | Guessed object shape |
294
+
295
+ **Process for each mock:**
296
+ 1. Identify the dependency function being mocked
297
+ 2. Use \`get_function_ast\` on the dependency to get its \`returnType\`
298
+ 3. If return type is a custom type/interface, use \`get_type_definitions\` to get the full shape
299
+ 4. Set mock value that EXACTLY matches the return type
300
+ 5. For \`Promise<T>\`, use \`mockResolvedValue(valueOfTypeT)\`; for plain \`T\`, use \`mockReturnValue(valueOfTypeT)\`
301
+
302
+ **NEVER guess mock return values. ALWAYS verify against the dependency's declared return type.**
303
+
304
+ ### 5. ASSERTIONS
305
+
306
+ **Priority Order:**
307
+ 1. **Exact**: toEqual(), toBe() for primitives
308
+ 2. **Partial**: toMatchObject() for subset matching
309
+ 3. **Structural**: expect.objectContaining(), expect.any(Type)
310
+ 4. **Specific Checks**: toBeDefined(), toBeNull(), toHaveLength(n)
311
+ 5. **Mock Verification**: toHaveBeenCalledWith(), toHaveBeenCalledTimes()
312
+
313
+ **NEVER:**
314
+ - toBeTruthy() for object existence (use toBeDefined())
315
+ - Snapshots for dates, random values, or as primary assertions
316
+
317
+ ### 6. CRITICAL RULES
318
+
319
+ **MUST:**
320
+ - ✅ ALL vi.mock() before imports
321
+ - ✅ Use calculate_relative_path for ALL imports
322
+ - ✅ Test file exists: ${testFileExists} - if the test file exist, alway check the mock and imports already present in the test file, using get_file_preamble tool. Make sure you do not duplicate mocks and mocks and imports are added at correct position.
323
+ - ✅ When editing existing file: UPDATE existing vi.mock, NEVER duplicate
324
+ - ✅ Test names: should_behavior_when_condition
325
+ - ✅ AAA pattern with comments
326
+ - ✅ Each test = one behavior
327
+ - ✅ Import types separately: import type { Config }
328
+ - ✅ Use vi.mocked<typeof module>() for full type inference
329
+ - ✅ Mock internal non-exported functions
330
+ - ✅ Use vi.useFakeTimers() for time-dependent tests
331
+ - ✅ Test cases expectations must match source code
332
+ - ✅ When making new test cases for a function make sure that other test function are not affected by the new test cases.
333
+ - ✅ You are allowed to read vitest or jest config file to understand the configuration and the rules, and generate tests accordingly.
334
+ - ✅ When adding new test cases for a function, make sure you add the imports and mocks at the correct position. Proper order as mentioned above should always be maintained.
335
+ - ✅ Generated test cases must be meaningful for production usage and easily understandable by developers.
336
+
337
+
338
+ **NEVER:**
339
+ - ❌ Mock after imports
340
+ - ❌ Shared state between tests
341
+ - ❌ Multiple behaviors in one test
342
+ - ❌ Generic test names ("test1", "works")
343
+ - ❌ Manual cleanup (vi.clearAllMocks in tests - config handles it)
344
+ - ❌ Environment dependencies without mocks
345
+ - ❌ Use require() (ES imports only)
346
+ - ❌ Reference function from wrong module (verify with get_imports_ast)
347
+ - ❌ Change existing mocks in ways that break other tests
348
+ - ❌ Modify source file or any other file in the codebase.
349
+
350
+
351
+ ### 6.5. PLAN BEFORE WRITING (MANDATORY)
352
+ Before writing any test code, insert a planning comment at the top of the describe block:
353
+
354
+ \`\`\`typescript
355
+ describe('${functionName}', () => {
356
+ // This function: [one-sentence summary of what it does].
357
+ // It calls: [list every direct dependency/function call].
358
+ // I will only mock: [list exactly which deps to mock and why].
359
+ // Edge cases to cover: [list specific edge cases from the code].
360
+ ...
361
+ });
362
+ \`\`\`
363
+
364
+ This forces you to reason about the function before writing tests.
365
+ After all tests pass, use search_replace_block to DELETE the planning comment from the final test file. [MANDATORY]
366
+
367
+
368
+ ### 7. EXECUTION
369
+
370
+ **Step 1: Add imports/mocks (if test file already exists)**
371
+ Use insert_at_position to add any NEW imports or mocks that don't already exist:
372
+ - insert_at_position({ position: 'before_imports', content: "vi.mock('new-module');" })
373
+ - insert_at_position({ position: 'after_imports', content: "import { dep } from './dep';" })
374
+
375
+ **Step 2: Write the describe block**
376
+ upsert_function_tests({
377
+ test_file_path: "${testFilePath}",
378
+ function_name: "${functionName}",
379
+ new_test_content: "describe('${functionName}', () => { ... })"
380
+ });
381
+
382
+ ⚠️ CRITICAL: new_test_content must contain ONLY the describe block for this function.
383
+ Do NOT include imports, vi.mock() calls, or other functions' describe blocks in new_test_content.
384
+ If you need to add imports or mocks, use insert_at_position BEFORE calling upsert_function_tests.
385
+
386
+ **Step 3: Run tests**
387
+ run_tests({
388
+ test_file_path: "${testFilePath}",
389
+ function_names: ["${functionName}"]
390
+ });
391
+
392
+
393
+ ### 8. FAILURE DEBUGGING
394
+
395
+ **Import Errors:**
396
+ - Recalculate paths with calculate_relative_path
397
+ - Check barrel exports (index.ts redirects)
398
+ - Verify source file exports function
399
+
400
+ **Mock Errors:**
401
+ - Add missing vi.mock() at top
402
+ - Ensure all used functions exported from mock
403
+ - Verify mock setup in beforeEach
404
+
405
+ **Type Errors:**
406
+ - Import types with import type
407
+ - Check mock return types match signatures
408
+ - Use proper generic constraints
409
+ - Mock return type mismatch: Re-check dependency's returnType via get_function_ast, fix mockResolvedValue/mockReturnValue to match the declared return type exactly
410
+
411
+ **Assertion Failures:**
412
+ - Log mock calls: console.log(mockFn.mock.calls)
413
+ - Check execution path (add temp logs in source)
414
+ - Verify input data types
415
+ - Check async/await usage
416
+ - Validate prerequisite mocks return expected values
417
+
418
+ ** If fails, categorize:
419
+
420
+ **[MUST] FIXABLE** → Fix these:
421
+ | Error | Fix Method |
422
+ |-------|-----------|
423
+ | Wrong imports | find_file(fileName) to get the file path + calculate_relative_path + search_replace_block |
424
+ | Missing mocks | insert_at_position |
425
+ | Syntax errors | search_replace_block (3-5 lines context) |
426
+ | Mock pollution | Fix beforeEach pattern |
427
+ | "Test suite failed to run" | get_file_preamble + fix imports/mocks |
428
+ | "Cannot find module" | calculate_relative_path |
429
+
430
+ **Process:**
431
+ 1. Read FULL error message
432
+ 2. Identify error type (import/mock/assertion/type)
433
+ 3. Make focused fix using available tools
434
+ 4. Iterate until ALL PASS
435
+
436
+ ### 9. QUALITY CHECKLIST
437
+ - [ ] Independent tests (no execution order dependency)
438
+ - [ ] Fast (<100ms per test, no real I/O)
439
+ - [ ] Readable (AAA, descriptive names)
440
+ - [ ] Focused (one behavior per test)
441
+ - [ ] Deterministic (same input = same output)
442
+ - [ ] Type-safe (no any, proper generics)
443
+ - [ ] Complete coverage (all paths tested)
444
+ - [ ] No duplicate declarations
445
+ - [ ] Existing tests still pass
446
+ - [ ] Planning comment is removed from the top of the describe block, after all tests pass.
447
+
448
+ ## START
449
+ gather context → **write tests immediately** → verify → run → fix → complete.
450
+
451
+ **[CRITICAL]** NEVER change existing mocks such that other tests fail. Use test-specific overrides with mockReturnValueOnce.
452
+ You must be efficient and fast in your approach, do not overthink the problem.
453
+
454
+ **PRE COMPUTED FUNCTION AST **
455
+ ${JSON.stringify(functionAST)}
456
+ `
457
+ },
458
+ ];
459
+ }
460
+ else {
461
+ messages = [
462
+ {
463
+ role: "user",
464
+ content: `You are an expert software test engineer. Generate comprehensive Jest unit tests for: ${functionName} in ${sourceFile}.
465
+ [Critical] Be prompt and efficient in your response. Make sure the test case file is typed and complete. Be as fast as possible in your repsonse.
466
+ [Critical] You cannot remove or modify the existing mocks and imports in the test file since other test may be using it. You can only add new mocks and imports for the new test cases.
467
+
468
+ ## CONTEXT
469
+ Test file: ${testFilePath} | Exists: ${testFileExists}
470
+
471
+ ⚠️ CRITICAL: You MUST use this EXACT test file path: ${testFilePath}
472
+
473
+ ---
474
+
475
+ ## EXECUTION PLAN
476
+
477
+ **Phase 1: Deep Analysis**
478
+ \`\`\`
479
+ 1. For each dependency:
480
+ - Same file: get_function_ast(${sourceFile},{functionName}) -> implementation + dependencies
481
+ - Other file [Can take reference from the imports of the ${sourceFile} file for the file name that has the required function]: find_file(filename) to get file path -> get_function_ast({file_path},{functionName}) + check for external calls
482
+ 2. get_imports_ast → all dependencies
483
+ 3. calculate_relative_path(from: ${testFilePath}, to: import_path) → all imports, accpets multiple comma separated 'to' paths. Use exact path returned by this tool for all imports.
484
+ 4. get_file_preamble → imports and mocks already declared in the file
485
+ 5. search_codebase → look for relevant context in codebase.
486
+ \`\`\`
487
+
488
+ **Phase 1.1: Execution Path Tracing (CRITICAL FOR SUCCESS)**
489
+ *Before writing tests, map the logic requirements for external calls.*
490
+ 1. Identify every external call (e.g., \`analyticsHelper.postEvent\`).
491
+ 2. Trace backwards: What \`if\`, \`switch\`, or \`try/catch\` block guards this call?
492
+ 3. Identify the dependency that controls that guard.
493
+ 4. Plan the Mock Return: Determine exactly what value the dependency must return to enter that block.
494
+ 5. For each dependency: get_function_ast() → note its **returnType** → use that type for mock return values.
495
+ 6. If returnType is a custom type/interface: get_type_definitions() → build mock matching the exact shape.
496
+
497
+ **Phase 2: Test Generation**
498
+
499
+ Mock Pattern (CRITICAL - Top of file):
500
+ \`\`\`typescript
501
+ // ===== MOCKS (BEFORE IMPORTS) =====
502
+ jest.mock('config', () => ({
503
+ get: (key: string) => ({
504
+ AUTH: { JWT_KEY: 'test', COOKIE_DATA_ONE_YEAR: 31536000000 },
505
+ USER_DEL_SECRET: 'secret'
506
+ })
507
+ }), { virtual: true });
508
+
509
+ // virtual:true ONLY for config, db, models, routes, services, axios, newrelic, GOOGLE_CLOUD_STORAGE, winston, logger, etc.
510
+
511
+ jest.mock('../helpers/dependency'); // NO virtual:true for regular modules
512
+
513
+ // ===== IMPORTS =====
514
+ import { functionName } from '../controller';
515
+ import { dependencyMethod } from '../helpers/dependency';
516
+
517
+ // ===== TYPED MOCKS =====
518
+ const mockDependencyMethod = dependencyMethod as jest.MockedFunction<typeof dependencyMethod>;
519
+
520
+ \`\`\`
521
+
522
+ Requirements (5+ tests minimum):
523
+ - ✅ Happy path
524
+ - 🔸 Edge cases (null, undefined, empty)
525
+ - ❌ Error conditions
526
+ - ⏱️ Async behavior
527
+ - 🔍 API null/undefined handling
528
+
529
+ /**
530
+ * Phase 3: Anti-Pollution Pattern (MUST FOLLOW EXACTLY THIS PATTERN, NO VARIATIONS)
531
+ */
532
+
533
+ \`\`\`typescript
534
+ // ===== GLOBAL CLEANUP (Near top, outside describe blocks) =====
535
+ afterEach(() => {
536
+ jest.restoreAllMocks(); // Automatically restores ALL spies
537
+ });
538
+
539
+ // ===== TESTS =====
540
+ describe('functionName', () => {
541
+ beforeEach(() => {
542
+ jest.resetAllMocks(); // Resets ALL mocks (call history + implementations)
543
+
544
+ // Set fresh defaults for THIS describe block only
545
+ mockDep1.mockResolvedValue({ status: 'success' });
546
+ mockDep2.mockReturnValue(true);
547
+ });
548
+
549
+ test('happy path', async () => {
550
+ mockDep1.mockResolvedValueOnce({ id: 123 }); // Override for this test only
551
+
552
+ const result = await functionName();
553
+
554
+ expect(result).toEqual({ id: 123 });
555
+ expect(mockDep1).toHaveBeenCalledWith(expect.objectContaining({ param: 'value' }));
556
+ });
557
+
558
+ test('error case', async () => {
559
+ mockDep1.mockRejectedValueOnce(new Error('fail'));
560
+ await expect(functionName()).rejects.toThrow('fail');
561
+ });
562
+ });
563
+
564
+ // ===== INTERNAL SPIES (When testing same-file function calls) =====
565
+ describe('functionWithInternalCalls', () => {
566
+ let internalFnSpy: jest.SpyInstance;
567
+
568
+ beforeEach(() => {
569
+ jest.resetAllMocks();
570
+
571
+ // ✅ EXCEPTION: require() needed here for spying on same module
572
+ const controller = require('../controller');
573
+ internalFnSpy = jest.spyOn(controller, 'internalFunction').mockResolvedValue(undefined);
574
+ });
575
+
576
+ // No manual restore needed - global afterEach handles it
577
+
578
+ test('calls internal function', async () => {
579
+ await functionWithInternalCalls();
580
+ expect(internalFnSpy).toHaveBeenCalled();
581
+ });
582
+ });
583
+ \`\`\`
584
+
585
+ ### CRITICAL RULES (Prevent Mock Pollution):
586
+ **DO ✅**
587
+ 1. \`jest.resetAllMocks()\` as FIRST line in every \`beforeEach()\` (not clearAllMocks)
588
+ 2. Global \`afterEach(() => jest.restoreAllMocks())\` near top of test file
589
+ 3. Set mock defaults in each \`describe\` block's \`beforeEach()\` independently
590
+ 4. Override with \`mockResolvedValueOnce/mockReturnValueOnce\` in individual tests
591
+ 5. Type all mocks: \`const mockFn = fn as jest.MockedFunction<typeof fn>\`
592
+ 6. All \`jest.mock()\` at top before imports (use calculate_relative_path for paths)
593
+ 7. Check for existing mocks with \`get_file_preamble\` tool before adding duplicates
594
+
595
+ **DON'T ❌**
596
+ 1. Use \`jest.clearAllMocks()\` (only clears history, not implementations) → Use \`resetAllMocks()\`
597
+ 2. Manually \`.mockReset()\` individual mocks → \`resetAllMocks()\` handles all
598
+ 3. Share mock state between \`describe\` blocks → Each block sets its own defaults
599
+ 4. Use \`require()\` except when creating spies on same module being tested
600
+ 5. Use \`virtual:true\` for regular files → Only for: config, db, models, services (modules not in filesystem)
601
+ 6. Forget global \`afterEach(() => jest.restoreAllMocks())\` → Causes spy pollution
602
+
603
+ ### Phase 3.5: Plan Before Writing (MANDATORY)
604
+ Before writing any test code, insert a planning comment at the top of the describe block:
605
+
606
+ \`\`\`typescript
607
+ describe('${functionName}', () => {
608
+ // This function: [one-sentence summary of what it does].
609
+ // It calls: [list every direct dependency/function call].
610
+ // I will only mock: [list exactly which deps to mock and why].
611
+ // Edge cases to cover: [list specific edge cases from the code].
612
+ ...
613
+ });
614
+ \`\`\`
615
+
616
+ This forces you to reason about the function before writing tests.
617
+ After all tests pass, use search_replace_block to DELETE the planning comment from the final test file.
618
+
619
+ ### Phase 4: Write Tests
620
+
621
+ **Step 1: Add imports/mocks (if test file already exists)**
622
+ Use insert_at_position to add any NEW imports or mocks that don't already exist:
623
+ - insert_at_position({ position: 'before_imports', content: "jest.mock('new-module');" })
624
+ - insert_at_position({ position: 'after_imports', content: "import { dep } from './dep';" })
625
+
626
+ **Step 2: Write the describe block**
627
+ upsert_function_tests({
628
+ test_file_path: "${testFilePath}",
629
+ function_name: "${functionName}",
630
+ new_test_content: "describe('${functionName}', () => { ... })"
631
+ });
632
+
633
+ ⚠️ CRITICAL: new_test_content must contain ONLY the describe block for this function.
634
+ Do NOT include imports, jest.mock() calls, or other functions' describe blocks in new_test_content.
635
+ If you need to add imports or mocks, use insert_at_position BEFORE calling upsert_function_tests.
636
+
637
+ **Step 3: Run tests**
638
+ run_tests({ test_file_path: "${testFilePath}", function_names: ["${functionName}"] });
639
+
640
+
641
+ ### MOCK RETURN TYPE SAFETY (CRITICAL)
642
+
643
+ When setting up mock return values, you MUST match the dependency's declared return type:
644
+
645
+ **Step**: For EVERY mocked function, check its return type via get_function_ast before setting mockResolvedValue/mockReturnValue.
646
+
647
+ **Type → Mock Value Rules:**
648
+ | Return Type | Correct Mock | WRONG Mock |
649
+ |-------------|-------------|------------|
650
+ | \`boolean\` | \`true\` or \`false\` | \`{ success: true }\` |
651
+ | \`Promise<boolean>\` | \`mockResolvedValue(true)\` | \`mockResolvedValue({ ok: true })\` |
652
+ | \`string\` | \`'some-string'\` | \`{ value: 'str' }\` |
653
+ | \`number\` | \`42\` | \`{ count: 42 }\` |
654
+ | \`void\` / \`undefined\` | \`mockResolvedValue(undefined)\` | \`mockResolvedValue({ done: true })\` |
655
+ | \`Promise<SomeType>\` | \`mockResolvedValue(objectMatchingSomeType)\` | Primitive or wrong shape |
656
+ | \`SomeType\` (interface/type) | Use \`get_type_definitions\` to build a matching object | Guessed object shape |
657
+
658
+ **Process for each mock:**
659
+ 1. Identify the dependency function being mocked
660
+ 2. Use \`get_function_ast\` on the dependency to get its \`returnType\`
661
+ 3. If return type is a custom type/interface, use \`get_type_definitions\` to get the full shape
662
+ 4. Set mock value that EXACTLY matches the return type
663
+ 5. For \`Promise<T>\`, use \`mockResolvedValue(valueOfTypeT)\`; for plain \`T\`, use \`mockReturnValue(valueOfTypeT)\`
664
+
665
+ **NEVER guess mock return values. ALWAYS verify against the dependency's declared return type.**
666
+
667
+ **Phase 4: Write Tests**
668
+ ⚠️ CRITICAL REQUIREMENT: Use EXACTLY this test file path: "${testFilePath}"
669
+ DO NOT modify the path. DO NOT create ${functionName}.test.ts or any other variation.
670
+
671
+ If the test file already exists, first add any NEW imports/mocks using insert_at_position:
672
+ - insert_at_position({ position: 'before_imports', content: "vi.mock('new-module');" })
673
+ - insert_at_position({ position: 'after_imports', content: "import { dep } from './dep';" })
674
+
675
+ Then write ONLY the describe block:
676
+ → upsert_function_tests({
677
+ test_file_path: "${testFilePath}", // ⚠️ USE THIS EXACT PATH - DO NOT CHANGE!
678
+ function_name: "${functionName}",
679
+ new_test_content: "describe('${functionName}', () => {...})" // ⚠️ ONLY the describe block!
680
+ })
681
+
682
+ ⚠️ new_test_content must contain ONLY the describe('${functionName}', ...) block.
683
+ Do NOT include imports, vi.mock() calls, or other functions' tests in new_test_content.
684
+ This will automatically replace existing tests for the function or append if not found.
685
+ All functions from the same source file MUST share the same test file.
686
+
687
+
688
+
689
+ ## PHASE 5: SELF-REVIEW (Before Running Tests)
690
+
691
+ **Review Checklist:**
692
+ 1. ✅ All jest.mock() calls at top of file (before imports)?
693
+ 2. ✅ Used calculate_relative_path for all mock paths?
694
+ 3. ✅ Functions imported directly (not loaded via require)?
695
+ 4. ✅ Mocks typed with \`as jest.MockedFunction<typeof fn>\`?
696
+ 5. ✅ beforeEach() has jest.clearAllMocks() as FIRST line?
697
+ 6. ✅ Each describe block sets its own default mock values in beforeEach()?
698
+ 7. ✅ Used mockResolvedValueOnce/mockReturnValueOnce for test overrides?
699
+ 8. ✅ At least 5 test cases (happy/error/edge/async/null)?
700
+ 9. ✅ All async functions use async/await in tests?
701
+ 10. ✅ Spies on internal functions restored in afterEach()?
702
+ 11. ✅ No re-requiring modules or checking if mocks exist?
703
+ 12. ✅ No TypeScript type errors in test code?
704
+ 13. ✅ Generated test cases are meaningful for production usage and easily understandable by developers.
705
+ 14. ✅ Phase 3.5: Plan Before Writing (MANDATORY) is followed.
706
+ 15. ✅ Planning comment is present at the top of the describe block.
707
+
708
+
709
+ **Phase 6: Tests - Run & Fix - Loop**
710
+
711
+ 1️⃣ Run: \`run_tests({ test_file_path: "${testFilePath}", function_names: ["${functionName}"] })\`
712
+
713
+ 2️⃣ If fails, categorize:
714
+
715
+ **[MUST] FIXABLE** → Fix these:
716
+ | Error | Fix Method |
717
+ |-------|-----------|
718
+ | Wrong imports | find_file(fileName) to get the file path + calculate_relative_path + search_replace_block |
719
+ | Missing mocks | insert_at_position |
720
+ | Syntax errors | search_replace_block (3-5 lines context) |
721
+ | Mock pollution | Fix beforeEach pattern |
722
+ | "Test suite failed to run" | get_file_preamble + fix imports/mocks |
723
+ | "Cannot find module" | calculate_relative_path |
724
+ | Mock return type mismatch | Re-check dependency's returnType via get_function_ast, fix mockResolvedValue/mockReturnValue to match the declared return type exactly |
725
+
726
+
727
+ 3️⃣ Repeat until: ✅ All test cases pass
728
+
729
+ ---
730
+
731
+ ## CRITICAL REMINDERS
732
+
733
+ - Use calculate_relative_path for ALL jest.mock() paths
734
+ - virtual:true ONLY for: db, config, models, routes, index, services, axios, newrelic, GOOGLE_CLOUD_STORAGE
735
+ - search_replace_block preferred (handles whitespace)
736
+ - Ensure test independence (no pollution)
737
+ - Fix test bugs, report source bugs
738
+ - [CRITICAL] Each test suite should be completely self-contained and not depend on or affect any other test suite's state.
739
+ - Test file exists: ${testFileExists} - if the test file exist, always check the mock and imports already present in the test file, using get_file_preamble tool. Make sure you do not duplicate mocks and mocks and imports are added at correct position.
740
+ - Mocking of winston logger or any other external dependeny is critical and mandatory.
741
+ - Use search_codebase tool to look for relevant context in codebase quickly.
742
+
743
+ **START:** Analyze deeply and write test cases only when you are sure about the function and the dependencies. Make sure the written test cases run and pass on first attempt.
744
+
745
+ **PRE COMPUTED FUNCTION AST **
746
+ ${JSON.stringify(functionAST)}
747
+ `,
748
+ },
749
+ ];
750
+ }
751
+ let iterations = 0;
752
+ const maxIterations = 100;
753
+ let testFileWritten = false;
754
+ let allToolResults = [];
755
+ let legitimateFailureReported = false;
756
+ let lastTestError = '';
757
+ let sameErrorCount = 0;
758
+ while (iterations < maxIterations) {
759
+ iterations++;
760
+ if (iterations === 1) {
761
+ (0, globals_1.updateSpinner)('🤖 AI is analyzing selected functions...');
762
+ }
763
+ else if (iterations % 5 === 0) {
764
+ if (globals_1.g.globalSpinner) {
765
+ globals_1.g.globalSpinner.text = `🤖 AI is still working (step ${iterations})...`;
766
+ }
767
+ }
768
+ const response = await (0, ai_1.callAI)(messages, toolDefinitions_1.TOOLS_FOR_TEST_GENERATION);
769
+ if (response.content) {
770
+ const content = response.content;
771
+ const excusePatterns = [
772
+ /unable to proceed/i,
773
+ /cannot directly/i,
774
+ /constrained by/i,
775
+ /simulated environment/i,
776
+ /limited to providing/i,
777
+ /beyond my capabilities/i,
778
+ /can't execute/i
779
+ ];
780
+ const isMakingExcuses = excusePatterns.some(pattern => typeof content === 'string' && pattern.test(content));
781
+ if (isMakingExcuses) {
782
+ console.log('\n⚠️ AI is making excuses! Forcing it to use tools...');
783
+ messages.push({
784
+ role: 'user',
785
+ content: 'STOP making excuses! You CAN use the tools. Use upsert_function_tests tool NOW to write the test cases for the function.'
786
+ });
787
+ continue;
788
+ }
789
+ messages.push({ role: 'assistant', content });
790
+ }
791
+ if (!response.toolCalls || response.toolCalls.length === 0) {
792
+ const lastTestRun = allToolResults[allToolResults.length - 1];
793
+ const testsActuallyPassed = lastTestRun?.name === 'run_tests' && lastTestRun?.result?.passed;
794
+ if (legitimateFailureReported) {
795
+ console.log('\n✅ Test generation complete (with legitimate failures reported)');
796
+ break;
797
+ }
798
+ if (testFileWritten && testsActuallyPassed) {
799
+ console.log('\n✅ Test generation complete!');
800
+ break;
801
+ }
802
+ console.log('\n⚠️ No tool calls. Prompting AI to continue...');
803
+ if (!testFileWritten) {
804
+ messages.push({
805
+ role: 'user',
806
+ content: `🚨 STOP TALKING! Use upsert_function_tests tool NOW for: ${functionName}
807
+
808
+ Example:
809
+ upsert_function_tests({
810
+ test_file_path: "${testFilePath}",
811
+ function_name: "${functionName}",
812
+ new_test_content: "describe('${functionName}', () => { test('should...', () => { ... }) })"
813
+ })
814
+
815
+ ⚠️ new_test_content must ONLY contain the describe block - NO imports or mocks!
816
+ Use insert_at_position separately for imports/mocks if needed.
817
+ This works for both NEW and EXISTING test files!`
818
+ });
819
+ }
820
+ else {
821
+ messages.push({
822
+ role: 'user',
823
+ content: `STOP talking and USE TOOLS NOW!
824
+
825
+ ✅ PRIMARY: Use search_replace_block (RECOMMENDED):
826
+ 1. Include 3-5 lines of context around the code to fix
827
+ 2. Replace with corrected version
828
+ 3. Handles whitespace/indentation automatically!
829
+ 4. Then run_tests to verify
830
+
831
+ 📌 ALTERNATIVE: Use insert_at_position for adding imports/mocks
832
+ - insert_at_position({ position: 'after_imports', content: "vi.mock('../module');" })
833
+
834
+ ⚠️ SECONDARY: Use upsert_function_tests for function-level rewrites
835
+
836
+ Start NOW with search_replace_block or insert_at_position!`
837
+ });
838
+ }
839
+ continue;
840
+ }
841
+ const toolResults = [];
842
+ for (const toolCall of response.toolCalls) {
843
+ // console.log('toolCall', toolCall);
844
+ const result = await (0, toolHandlers_1.executeTool)(toolCall.name, toolCall.input);
845
+ // console.log('result', result);
846
+ const toolResult = {
847
+ id: toolCall.id,
848
+ name: toolCall.name,
849
+ result
850
+ };
851
+ toolResults.push(toolResult);
852
+ allToolResults.push(toolResult);
853
+ if (toolCall.name === 'report_legitimate_failure' && result.success) {
854
+ legitimateFailureReported = true;
855
+ console.log('\n✅ Legitimate failure acknowledged. Stopping test fixes.');
856
+ console.log(` Recommendation: ${result.recommendation}`);
857
+ }
858
+ if (toolCall.name === 'run_tests' && !result.success) {
859
+ const errorOutput = result.output || result.error || '';
860
+ const currentError = errorOutput.substring(0, 300);
861
+ if (currentError === lastTestError) {
862
+ sameErrorCount++;
863
+ if (sameErrorCount >= 3) {
864
+ messages.push({
865
+ role: 'user',
866
+ content: `The same test error has occurred ${sameErrorCount} times in a row!
867
+ Make focused attempt to fix the tests using the tools available.`
868
+ });
869
+ }
870
+ }
871
+ else {
872
+ lastTestError = currentError;
873
+ sameErrorCount = 1;
874
+ }
875
+ }
876
+ if (toolCall.name === 'upsert_function_tests') {
877
+ if (result.success) {
878
+ testFileWritten = true;
879
+ messages.push({
880
+ role: 'user',
881
+ content: `Test files are written successfully. Please use run_tests tool to verify the tests. If the tests fail, please make focused attempts to fix the tests using the tools available.`
882
+ });
883
+ }
884
+ }
885
+ }
886
+ // Add tool results to conversation based on provider
887
+ (0, ai_1.addToolResultsToMessages)(globals_1.g.CONFIG.aiProvider, messages, response.toolCalls, toolResults);
888
+ if (legitimateFailureReported) {
889
+ console.log('\n✅ Stopping iteration: Legitimate failure reported.');
890
+ break;
891
+ }
892
+ const testRun = toolResults.find(tr => tr.name === 'run_tests');
893
+ if (testRun?.result.passed) {
894
+ console.log('\n🎉 All tests passed!');
895
+ break;
896
+ }
897
+ }
898
+ if (iterations >= maxIterations) {
899
+ console.log('\n⚠️ Reached maximum iterations. Tests may not be complete.');
900
+ }
901
+ if (legitimateFailureReported) {
902
+ console.log('\n📋 Test file updated with legitimate failures documented.');
903
+ console.log(' These failures indicate bugs in the source code that need to be fixed.');
904
+ }
905
+ globals_1.g.EXPECTED_TEST_FILE_PATH = null;
906
+ const testRuns = allToolResults.filter(tr => tr.name === 'run_tests');
907
+ const lastTestRun = testRuns.length > 0 ? testRuns[testRuns.length - 1] : null;
908
+ return !legitimateFailureReported && (lastTestRun?.result?.passed || false);
909
+ }
910
+ /**
911
+ * Smart validation that fixes failing tests
912
+ */
913
+ async function smartValidateTestSuite(sourceFile, testFilePath, functionNames) {
914
+ console.log(`\n${'='.repeat(80)}`);
915
+ console.log(`🔍 VALIDATION: Running full test suite (${functionNames.length} function(s))`);
916
+ console.log(`${'='.repeat(80)}\n`);
917
+ let fullSuiteResult;
918
+ if (globals_1.g.CONFIG.testEnv == 'vitest') {
919
+ fullSuiteResult = (0, toolHandlers_1.runTestsVitest)(testFilePath);
920
+ }
921
+ else {
922
+ fullSuiteResult = (0, toolHandlers_1.runTestsJest)(testFilePath);
923
+ }
924
+ if (fullSuiteResult.passed) {
925
+ console.log(`\n✅ Full test suite passed! All ${functionNames.length} function(s) working together correctly.`);
926
+ return;
927
+ }
928
+ console.log(`\n⚠️ Full test suite has failures. Attempting to fix failing tests...\n`);
929
+ const failingTests = (0, toolHandlers_1.parseFailingTestNames)(fullSuiteResult.output);
930
+ console.log(`\n📊 Debug: Found ${failingTests.length} failing test(s) from output`);
931
+ if (failingTests.length === 0) {
932
+ console.log('⚠️ Could not parse specific failing test names from output.');
933
+ console.log(' Attempting general fix based on full error output...\n');
934
+ await fixFailingTests(sourceFile, testFilePath, functionNames, [], fullSuiteResult.output);
935
+ return;
936
+ }
937
+ console.log(`Found ${failingTests.length} failing test(s): ${failingTests.join(', ')}\n`);
938
+ await fixFailingTests(sourceFile, testFilePath, functionNames, failingTests, fullSuiteResult.output);
939
+ }
940
+ /**
941
+ * Fix failing tests using AI
942
+ */
943
+ async function fixFailingTests(sourceFile, testFilePath, functionNames, failingTests, fullSuiteOutput) {
944
+ const messages = [
945
+ {
946
+ role: 'user',
947
+ content: `You are fixing FAILING TESTS in the ${globals_1.g.CONFIG.testEnv === 'vitest' ? 'Vitest' : 'Jest'} test suite.
948
+
949
+ Source file: ${sourceFile}
950
+ Test file: ${testFilePath}
951
+ Functions tested: ${functionNames.join(', ')}
952
+
953
+ FAILING TESTS:
954
+ ${failingTests.map(t => `- ${t}`).join('\n')}
955
+
956
+ Full suite output:
957
+ ${fullSuiteOutput}
958
+
959
+ YOUR TASK - Fix all failing tests:
960
+
961
+ COMMON ISSUES TO FIX:
962
+ - Mock state bleeding between describe blocks
963
+ - Missing ${globals_1.g.CONFIG.testEnv === 'vitest' ? 'vitest' : 'jest'} imports (describe, it, expect, beforeEach, ${globals_1.g.CONFIG.testEnv === 'vitest' ? 'vi' : 'jest'})
964
+ - Incorrect mock typing (use MockedFunction from ${globals_1.g.CONFIG.testEnv})
965
+ - beforeEach not setting up mocks properly
966
+ - Missing or incorrect imports
967
+ - Mock implementation issues
968
+ - Incorrect test assertions
969
+ - Test logic errors
970
+
971
+ NOTE: ${globals_1.g.CONFIG.testEnv === 'vitest' ? 'vitest.config.ts' : 'jest.config.ts'} should have clearMocks/restoreMocks enabled.
972
+
973
+ TOOLS TO USE:
974
+ 1. get_file_preamble - See current setup
975
+ 2. search_replace_block - Fix specific sections (preferred)
976
+ 3. insert_at_position - Add missing imports/mocks
977
+ 4. run_tests - Verify fixes
978
+
979
+ START by calling get_file_preamble to see the current test structure.`
980
+ }
981
+ ];
982
+ let iterations = 0;
983
+ const maxIterations = 500;
984
+ while (iterations < maxIterations) {
985
+ iterations++;
986
+ console.log(`\n🔧 Test fix attempt ${iterations}/${maxIterations}...`);
987
+ const response = await (0, ai_1.callAI)(messages, toolDefinitions_1.TOOLS_FOR_TEST_GENERATION);
988
+ if (response.content) {
989
+ messages.push({ role: 'assistant', content: response.content });
990
+ }
991
+ if (!response.toolCalls || response.toolCalls.length === 0) {
992
+ let finalTest;
993
+ if (globals_1.g.CONFIG.testEnv == 'vitest') {
994
+ finalTest = (0, toolHandlers_1.runTestsVitest)(testFilePath);
995
+ }
996
+ else {
997
+ finalTest = (0, toolHandlers_1.runTestsJest)(testFilePath);
998
+ }
999
+ if (finalTest.passed) {
1000
+ console.log('\n✅ Tests fixed! Full test suite now passes.');
1001
+ return;
1002
+ }
1003
+ console.log('\n⚠️ AI stopped but tests still failing.');
1004
+ break;
1005
+ }
1006
+ const toolResults = [];
1007
+ for (const toolCall of response.toolCalls) {
1008
+ const result = await (0, toolHandlers_1.executeTool)(toolCall.name, toolCall.input);
1009
+ toolResults.push({ id: toolCall.id, name: toolCall.name, result });
1010
+ if (toolCall.name === 'run_tests' && result.passed) {
1011
+ console.log('\n✅ Tests fixed! Full test suite now passes.');
1012
+ return;
1013
+ }
1014
+ }
1015
+ (0, ai_1.addToolResultsToMessages)(globals_1.g.CONFIG.aiProvider, messages, response.toolCalls, toolResults);
1016
+ }
1017
+ console.log('\n⚠️ Could not automatically fix all failing tests. Manual review may be needed.');
1018
+ }
1019
+ /**
1020
+ * Generate tests for multiple functions, one at a time
1021
+ */
1022
+ async function generateTestsForFunctions(sourceFile, functionNames) {
1023
+ const testFilePath = getTestFilePath(sourceFile);
1024
+ let testFileExists = fsSync.existsSync(testFilePath);
1025
+ const validationInterval = globals_1.g.CONFIG.validationInterval;
1026
+ for (let i = 0; i < functionNames.length; i++) {
1027
+ const functionName = functionNames[i];
1028
+ (0, globals_1.stopSpinner)();
1029
+ console.log(`\n${'='.repeat(80)}`);
1030
+ console.log(`Processing function ${i + 1}/${functionNames.length}: ${functionName}`);
1031
+ console.log(`${'='.repeat(80)}\n`);
1032
+ const passed = await generateTestForSingleFunction(sourceFile, functionName, testFilePath, testFileExists);
1033
+ testFileExists = true;
1034
+ (0, globals_1.stopSpinner)();
1035
+ if (passed) {
1036
+ console.log(`\n✅ Function '${functionName}' tests completed successfully!`);
1037
+ }
1038
+ else {
1039
+ console.log(`\n⚠️ Function '${functionName}' completed with issues. Continuing to next function...`);
1040
+ }
1041
+ if (validationInterval !== undefined && validationInterval !== null) {
1042
+ const isPeriodicCheckpoint = validationInterval > 0 && (i + 1) % validationInterval === 0;
1043
+ const isFinalFunction = i === functionNames.length - 1;
1044
+ if (isPeriodicCheckpoint || isFinalFunction) {
1045
+ console.log(`\n${'─'.repeat(80)}`);
1046
+ console.log(`📊 CHECKPOINT ${i + 1}/${functionNames.length}: Running full suite validation...`);
1047
+ console.log(`${'─'.repeat(80)}`);
1048
+ await smartValidateTestSuite(sourceFile, testFilePath, functionNames.slice(0, i + 1));
1049
+ }
1050
+ }
1051
+ await new Promise(resolve => setTimeout(resolve, 5000));
1052
+ }
1053
+ console.log(`\n${'='.repeat(80)}`);
1054
+ console.log(`✅ All ${functionNames.length} function(s) processed!`);
1055
+ console.log(`${'='.repeat(80)}\n`);
1056
+ return testFilePath;
1057
+ }
1058
+ async function generateTestsForFunction() {
1059
+ console.log('\n🎯 Function-wise Test Generation\n');
1060
+ (0, globals_1.updateSpinner)('📂 Scanning repository...');
1061
+ const files = await listFilesRecursive('.');
1062
+ if (files.length === 0) {
1063
+ globals_1.g.globalSpinner.fail('No source files found!');
1064
+ globals_1.g.globalSpinner = null;
1065
+ return;
1066
+ }
1067
+ (0, globals_1.updateSpinner)(`Found ${files.length} source file(s)`);
1068
+ await new Promise(resolve => setTimeout(resolve, 200));
1069
+ (0, globals_1.stopSpinner)();
1070
+ console.log('\nSelect a file:\n');
1071
+ files.forEach((file, index) => {
1072
+ console.log(`${index + 1}. ${file}`);
1073
+ });
1074
+ const fileChoice = await promptUser('\nEnter file number: ');
1075
+ const selectedFile = files[parseInt(fileChoice) - 1];
1076
+ if (!selectedFile) {
1077
+ console.log('Invalid selection!');
1078
+ return;
1079
+ }
1080
+ const analysis = (0, ast_1.analyzeFileAST)(selectedFile);
1081
+ if (!analysis.success) {
1082
+ console.error(`Failed to analyze file: ${analysis.error}`);
1083
+ return;
1084
+ }
1085
+ const functions = analysis.analysis.functions.filter((f) => f.exported);
1086
+ if (functions.length === 0) {
1087
+ console.log('No exported functions found in the file!');
1088
+ return;
1089
+ }
1090
+ functions.forEach((func, index) => {
1091
+ console.log(`${index + 1}. ${func.name} (${func.type}, ${func.async ? 'async' : 'sync'})`);
1092
+ });
1093
+ const functionsChoice = await promptUser('\nEnter function numbers (comma-separated, e.g., 1,3,4): ');
1094
+ const selectedIndices = functionsChoice.split(',').map(s => parseInt(s.trim()) - 1);
1095
+ const selectedFunctions = selectedIndices
1096
+ .filter(i => i >= 0 && i < functions.length)
1097
+ .map(i => functions[i].name);
1098
+ if (selectedFunctions.length === 0) {
1099
+ console.log('No valid functions selected!');
1100
+ return;
1101
+ }
1102
+ console.log(`\n✅ Selected functions: ${selectedFunctions.join(', ')}\n`);
1103
+ await generateTestsForFunctions(selectedFile, selectedFunctions);
1104
+ (0, globals_1.stopSpinner)();
1105
+ console.log('\n✨ Done!');
1106
+ }
1107
+ //# sourceMappingURL=testGenerator.js.map