@google/gemini-cli-core 0.9.0-nightly.20251002.aa8b2abe → 0.9.0-nightly.20251003.2ab61dd1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/README.md +2 -1
  2. package/dist/src/agents/codebase-investigator.js +16 -9
  3. package/dist/src/agents/codebase-investigator.js.map +1 -1
  4. package/dist/src/agents/executor.d.ts +3 -8
  5. package/dist/src/agents/executor.js +195 -94
  6. package/dist/src/agents/executor.js.map +1 -1
  7. package/dist/src/agents/executor.test.js +331 -287
  8. package/dist/src/agents/executor.test.js.map +1 -1
  9. package/dist/src/agents/types.d.ts +16 -4
  10. package/dist/src/config/config.d.ts +4 -0
  11. package/dist/src/config/config.js +8 -0
  12. package/dist/src/config/config.js.map +1 -1
  13. package/dist/src/core/client.js +2 -2
  14. package/dist/src/core/client.js.map +1 -1
  15. package/dist/src/core/client.test.js +1 -1
  16. package/dist/src/core/client.test.js.map +1 -1
  17. package/dist/src/core/prompts.d.ts +2 -1
  18. package/dist/src/core/prompts.js +81 -8
  19. package/dist/src/core/prompts.js.map +1 -1
  20. package/dist/src/core/prompts.test.js +73 -24
  21. package/dist/src/core/prompts.test.js.map +1 -1
  22. package/dist/src/generated/git-commit.d.ts +2 -2
  23. package/dist/src/generated/git-commit.js +2 -2
  24. package/dist/src/telemetry/constants.d.ts +1 -0
  25. package/dist/src/telemetry/constants.js +1 -0
  26. package/dist/src/telemetry/constants.js.map +1 -1
  27. package/dist/src/telemetry/loggers.js +2 -2
  28. package/dist/src/telemetry/loggers.js.map +1 -1
  29. package/dist/src/telemetry/loggers.test.js +2 -2
  30. package/dist/src/telemetry/loggers.test.js.map +1 -1
  31. package/dist/src/tools/mcp-client.js +3 -0
  32. package/dist/src/tools/mcp-client.js.map +1 -1
  33. package/dist/src/tools/mcp-client.test.js +60 -0
  34. package/dist/src/tools/mcp-client.test.js.map +1 -1
  35. package/dist/src/utils/memoryDiscovery.d.ts +1 -0
  36. package/dist/src/utils/memoryDiscovery.js +2 -1
  37. package/dist/src/utils/memoryDiscovery.js.map +1 -1
  38. package/dist/src/utils/memoryDiscovery.test.js +99 -21
  39. package/dist/src/utils/memoryDiscovery.test.js.map +1 -1
  40. package/dist/tsconfig.tsbuildinfo +1 -1
  41. package/package.json +1 -1
@@ -3,7 +3,7 @@
3
3
  * Copyright 2025 Google LLC
4
4
  * SPDX-License-Identifier: Apache-2.0
5
5
  */
6
- import { describe, it, expect, vi, beforeEach, afterEach, } from 'vitest';
6
+ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
7
7
  import { AgentExecutor } from './executor.js';
8
8
  import { AgentTerminateMode } from './types.js';
9
9
  import { makeFakeConfig } from '../test-utils/config.js';
@@ -11,6 +11,7 @@ import { ToolRegistry } from '../tools/tool-registry.js';
11
11
  import { LSTool } from '../tools/ls.js';
12
12
  import { ReadFileTool } from '../tools/read-file.js';
13
13
  import { GeminiChat, StreamEventType, } from '../core/geminiChat.js';
14
+ import { Type, } from '@google/genai';
14
15
  import { MockTool } from '../test-utils/mock-tool.js';
15
16
  import { getDirectoryContextString } from '../utils/environmentContext.js';
16
17
  const { mockSendMessageStream, mockExecuteToolCall } = vi.hoisted(() => ({
@@ -30,13 +31,22 @@ vi.mock('../core/nonInteractiveToolExecutor.js', () => ({
30
31
  executeToolCall: mockExecuteToolCall,
31
32
  }));
32
33
  vi.mock('../utils/environmentContext.js');
33
- const MockedGeminiChat = GeminiChat;
34
- // A mock tool that is NOT on the NON_INTERACTIVE_TOOL_ALLOWLIST
35
- const MOCK_TOOL_NOT_ALLOWED = new MockTool({ name: 'write_file' });
34
+ const MockedGeminiChat = vi.mocked(GeminiChat);
35
+ const mockedGetDirectoryContextString = vi.mocked(getDirectoryContextString);
36
+ // Constants for testing
37
+ const TASK_COMPLETE_TOOL_NAME = 'complete_task';
38
+ const MOCK_TOOL_NOT_ALLOWED = new MockTool({ name: 'write_file_interactive' });
39
+ /**
40
+ * Helper to create a mock API response chunk.
41
+ * Uses conditional spread to handle readonly functionCalls property safely.
42
+ */
36
43
  const createMockResponseChunk = (parts, functionCalls) => ({
37
44
  candidates: [{ index: 0, content: { role: 'model', parts } }],
38
- functionCalls,
45
+ ...(functionCalls && functionCalls.length > 0 ? { functionCalls } : {}),
39
46
  });
47
+ /**
48
+ * Helper to mock a single turn of model response in the stream.
49
+ */
40
50
  const mockModelResponse = (functionCalls, thought, text) => {
41
51
  const parts = [];
42
52
  if (thought) {
@@ -47,9 +57,7 @@ const mockModelResponse = (functionCalls, thought, text) => {
47
57
  }
48
58
  if (text)
49
59
  parts.push({ text });
50
- const responseChunk = createMockResponseChunk(parts,
51
- // Ensure functionCalls is undefined if the array is empty, matching API behavior
52
- functionCalls.length > 0 ? functionCalls : undefined);
60
+ const responseChunk = createMockResponseChunk(parts, functionCalls);
53
61
  mockSendMessageStream.mockImplementationOnce(async () => (async function* () {
54
62
  yield {
55
63
  type: StreamEventType.CHUNK,
@@ -57,33 +65,60 @@ const mockModelResponse = (functionCalls, thought, text) => {
57
65
  };
58
66
  })());
59
67
  };
68
+ /**
69
+ * Helper to extract the message parameters sent to sendMessageStream.
70
+ * Provides type safety for inspecting mock calls.
71
+ */
72
+ const getMockMessageParams = (callIndex) => {
73
+ const call = mockSendMessageStream.mock.calls[callIndex];
74
+ expect(call).toBeDefined();
75
+ // Arg 1 of sendMessageStream is the message parameters
76
+ return call[1];
77
+ };
60
78
  let mockConfig;
61
79
  let parentToolRegistry;
62
- const createTestDefinition = (tools = [LSTool.Name], runConfigOverrides = {}, outputConfigOverrides = {}, promptConfigOverrides = {}) => ({
63
- name: 'TestAgent',
64
- description: 'An agent for testing.',
65
- inputConfig: {
66
- inputs: { goal: { type: 'string', required: true, description: 'goal' } },
67
- },
68
- modelConfig: { model: 'gemini-test-model', temp: 0, top_p: 1 },
69
- runConfig: { max_time_minutes: 5, max_turns: 5, ...runConfigOverrides },
70
- promptConfig: {
71
- systemPrompt: 'Achieve the goal: ${goal}.',
72
- ...promptConfigOverrides,
73
- },
74
- toolConfig: { tools },
75
- outputConfig: { description: 'The final result.', ...outputConfigOverrides },
76
- });
80
+ const createTestDefinition = (tools = [LSTool.Name], runConfigOverrides = {}, outputConfigMode = 'default') => {
81
+ let outputConfig;
82
+ if (outputConfigMode === 'default') {
83
+ outputConfig = {
84
+ outputName: 'finalResult',
85
+ description: 'The final result.',
86
+ schema: { type: Type.STRING },
87
+ };
88
+ }
89
+ else if (outputConfigMode !== 'none') {
90
+ outputConfig = {
91
+ outputName: 'finalResult',
92
+ description: 'The final result.',
93
+ schema: { type: Type.STRING },
94
+ ...outputConfigMode,
95
+ };
96
+ }
97
+ return {
98
+ name: 'TestAgent',
99
+ description: 'An agent for testing.',
100
+ inputConfig: {
101
+ inputs: { goal: { type: 'string', required: true, description: 'goal' } },
102
+ },
103
+ modelConfig: { model: 'gemini-test-model', temp: 0, top_p: 1 },
104
+ runConfig: { max_time_minutes: 5, max_turns: 5, ...runConfigOverrides },
105
+ promptConfig: { systemPrompt: 'Achieve the goal: ${goal}.' },
106
+ toolConfig: { tools },
107
+ outputConfig,
108
+ };
109
+ };
77
110
  describe('AgentExecutor', () => {
78
111
  let activities;
79
112
  let onActivity;
80
113
  let abortController;
81
114
  let signal;
82
115
  beforeEach(async () => {
83
- mockSendMessageStream.mockClear();
84
- mockExecuteToolCall.mockClear();
85
- vi.clearAllMocks();
86
- // Use fake timers for timeout and concurrency testing
116
+ vi.resetAllMocks();
117
+ mockSendMessageStream.mockReset();
118
+ mockExecuteToolCall.mockReset();
119
+ MockedGeminiChat.mockImplementation(() => ({
120
+ sendMessageStream: mockSendMessageStream,
121
+ }));
87
122
  vi.useFakeTimers();
88
123
  mockConfig = makeFakeConfig();
89
124
  parentToolRegistry = new ToolRegistry(mockConfig);
@@ -91,7 +126,7 @@ describe('AgentExecutor', () => {
91
126
  parentToolRegistry.registerTool(new ReadFileTool(mockConfig));
92
127
  parentToolRegistry.registerTool(MOCK_TOOL_NOT_ALLOWED);
93
128
  vi.spyOn(mockConfig, 'getToolRegistry').mockResolvedValue(parentToolRegistry);
94
- vi.mocked(getDirectoryContextString).mockResolvedValue('Mocked Environment Context');
129
+ mockedGetDirectoryContextString.mockResolvedValue('Mocked Environment Context');
95
130
  activities = [];
96
131
  onActivity = (activity) => activities.push(activity);
97
132
  abortController = new AbortController();
@@ -108,13 +143,12 @@ describe('AgentExecutor', () => {
108
143
  });
109
144
  it('SECURITY: should throw if a tool is not on the non-interactive allowlist', async () => {
110
145
  const definition = createTestDefinition([MOCK_TOOL_NOT_ALLOWED.name]);
111
- await expect(AgentExecutor.create(definition, mockConfig, onActivity)).rejects.toThrow(`Tool "${MOCK_TOOL_NOT_ALLOWED.name}" is not on the allow-list for non-interactive execution`);
146
+ await expect(AgentExecutor.create(definition, mockConfig, onActivity)).rejects.toThrow(/not on the allow-list for non-interactive execution/);
112
147
  });
113
148
  it('should create an isolated ToolRegistry for the agent', async () => {
114
149
  const definition = createTestDefinition([LSTool.Name, ReadFileTool.Name]);
115
150
  const executor = await AgentExecutor.create(definition, mockConfig, onActivity);
116
- // @ts-expect-error - accessing private property for test validation
117
- const agentRegistry = executor.toolRegistry;
151
+ const agentRegistry = executor['toolRegistry'];
118
152
  expect(agentRegistry).not.toBe(parentToolRegistry);
119
153
  expect(agentRegistry.getAllToolNames()).toEqual(expect.arrayContaining([LSTool.Name, ReadFileTool.Name]));
120
154
  expect(agentRegistry.getAllToolNames()).toHaveLength(2);
@@ -122,7 +156,7 @@ describe('AgentExecutor', () => {
122
156
  });
123
157
  });
124
158
  describe('run (Execution Loop and Logic)', () => {
125
- it('should execute a successful work and extraction phase (Happy Path) and emit activities', async () => {
159
+ it('should execute successfully when model calls complete_task with output (Happy Path with Output)', async () => {
126
160
  const definition = createTestDefinition();
127
161
  const executor = await AgentExecutor.create(definition, mockConfig, onActivity);
128
162
  const inputs = { goal: 'Find files' };
@@ -142,84 +176,211 @@ describe('AgentExecutor', () => {
142
176
  ],
143
177
  error: undefined,
144
178
  });
145
- // Turn 2: Model stops
146
- mockModelResponse([], 'T2: Done');
147
- // Extraction Phase
148
- mockModelResponse([], undefined, 'Result: file1.txt.');
179
+ // Turn 2: Model calls complete_task with required output
180
+ mockModelResponse([
181
+ {
182
+ name: TASK_COMPLETE_TOOL_NAME,
183
+ args: { finalResult: 'Found file1.txt' },
184
+ id: 'call2',
185
+ },
186
+ ], 'T2: Done');
149
187
  const output = await executor.run(inputs, signal);
150
- expect(mockSendMessageStream).toHaveBeenCalledTimes(3);
151
- expect(mockExecuteToolCall).toHaveBeenCalledTimes(1);
152
- // Verify System Prompt Templating
188
+ expect(mockSendMessageStream).toHaveBeenCalledTimes(2);
153
189
  const chatConstructorArgs = MockedGeminiChat.mock.calls[0];
154
190
  const chatConfig = chatConstructorArgs[1];
155
- expect(chatConfig?.systemInstruction).toContain('Achieve the goal: Find files.');
156
- // Verify environment context is appended
157
- expect(chatConfig?.systemInstruction).toContain('# Environment Context\nMocked Environment Context');
158
- // Verify standard rules are appended
159
- expect(chatConfig?.systemInstruction).toContain('You are running in a non-interactive mode.');
160
- // Verify absolute path rule is appended
161
- expect(chatConfig?.systemInstruction).toContain('Always use absolute paths for file operations.');
162
- // Verify Extraction Phase Call (Specific arguments)
163
- expect(mockSendMessageStream).toHaveBeenCalledWith('gemini-test-model', expect.objectContaining({
164
- // Extraction message should be based on outputConfig.description
165
- message: expect.arrayContaining([
166
- {
167
- text: expect.stringContaining('Based on your work so far, provide: The final result.'),
168
- },
169
- ]),
170
- config: expect.objectContaining({ tools: undefined }), // No tools in extraction
171
- }), expect.stringContaining('#extraction'));
172
- expect(output.result).toBe('Result: file1.txt.');
191
+ expect(chatConfig?.systemInstruction).toContain(`MUST call the \`${TASK_COMPLETE_TOOL_NAME}\` tool`);
192
+ const turn1Params = getMockMessageParams(0);
193
+ const firstToolGroup = turn1Params.config?.tools?.[0];
194
+ expect(firstToolGroup).toBeDefined();
195
+ if (!firstToolGroup || !('functionDeclarations' in firstToolGroup)) {
196
+ throw new Error('Test expectation failed: Config does not contain functionDeclarations.');
197
+ }
198
+ const sentTools = firstToolGroup.functionDeclarations;
199
+ expect(sentTools).toBeDefined();
200
+ expect(sentTools).toEqual(expect.arrayContaining([
201
+ expect.objectContaining({ name: LSTool.Name }),
202
+ expect.objectContaining({ name: TASK_COMPLETE_TOOL_NAME }),
203
+ ]));
204
+ const completeToolDef = sentTools.find((t) => t.name === TASK_COMPLETE_TOOL_NAME);
205
+ expect(completeToolDef?.parameters?.required).toContain('finalResult');
206
+ expect(output.result).toBe('Found file1.txt');
173
207
  expect(output.terminate_reason).toBe(AgentTerminateMode.GOAL);
174
- // Verify Activity Stream (Observability)
175
208
  expect(activities).toEqual(expect.arrayContaining([
176
- // Thought subjects are extracted by the executor (parseThought)
177
209
  expect.objectContaining({
178
210
  type: 'THOUGHT_CHUNK',
179
211
  data: { text: 'T1: Listing' },
180
212
  }),
181
- expect.objectContaining({
182
- type: 'TOOL_CALL_START',
183
- data: { name: LSTool.Name, args: { path: '.' } },
184
- }),
185
213
  expect.objectContaining({
186
214
  type: 'TOOL_CALL_END',
187
215
  data: { name: LSTool.Name, output: 'file1.txt' },
188
216
  }),
189
217
  expect.objectContaining({
190
- type: 'THOUGHT_CHUNK',
191
- data: { text: 'T2: Done' },
218
+ type: 'TOOL_CALL_START',
219
+ data: {
220
+ name: TASK_COMPLETE_TOOL_NAME,
221
+ args: { finalResult: 'Found file1.txt' },
222
+ },
223
+ }),
224
+ expect.objectContaining({
225
+ type: 'TOOL_CALL_END',
226
+ data: {
227
+ name: TASK_COMPLETE_TOOL_NAME,
228
+ output: expect.stringContaining('Output submitted'),
229
+ },
192
230
  }),
193
231
  ]));
194
232
  });
195
- it('should execute parallel tool calls concurrently', async () => {
196
- const definition = createTestDefinition([LSTool.Name, ReadFileTool.Name]);
233
+ it('should execute successfully when model calls complete_task without output (Happy Path No Output)', async () => {
234
+ const definition = createTestDefinition([LSTool.Name], {}, 'none');
235
+ const executor = await AgentExecutor.create(definition, mockConfig, onActivity);
236
+ mockModelResponse([
237
+ { name: LSTool.Name, args: { path: '.' }, id: 'call1' },
238
+ ]);
239
+ mockExecuteToolCall.mockResolvedValueOnce({
240
+ callId: 'call1',
241
+ resultDisplay: 'ok',
242
+ responseParts: [
243
+ {
244
+ functionResponse: { name: LSTool.Name, response: {}, id: 'call1' },
245
+ },
246
+ ],
247
+ });
248
+ mockModelResponse([{ name: TASK_COMPLETE_TOOL_NAME, args: {}, id: 'call2' }], 'Task finished.');
249
+ const output = await executor.run({ goal: 'Do work' }, signal);
250
+ const turn1Params = getMockMessageParams(0);
251
+ const firstToolGroup = turn1Params.config?.tools?.[0];
252
+ expect(firstToolGroup).toBeDefined();
253
+ if (!firstToolGroup || !('functionDeclarations' in firstToolGroup)) {
254
+ throw new Error('Test expectation failed: Config does not contain functionDeclarations.');
255
+ }
256
+ const sentTools = firstToolGroup.functionDeclarations;
257
+ expect(sentTools).toBeDefined();
258
+ const completeToolDef = sentTools.find((t) => t.name === TASK_COMPLETE_TOOL_NAME);
259
+ expect(completeToolDef?.parameters?.required).toEqual([]);
260
+ expect(completeToolDef?.description).toContain('signal that you have completed');
261
+ expect(output.result).toBe('Task completed successfully.');
262
+ expect(output.terminate_reason).toBe(AgentTerminateMode.GOAL);
263
+ });
264
+ it('should error immediately if the model stops tools without calling complete_task (Protocol Violation)', async () => {
265
+ const definition = createTestDefinition();
266
+ const executor = await AgentExecutor.create(definition, mockConfig, onActivity);
267
+ mockModelResponse([
268
+ { name: LSTool.Name, args: { path: '.' }, id: 'call1' },
269
+ ]);
270
+ mockExecuteToolCall.mockResolvedValueOnce({
271
+ callId: 'call1',
272
+ resultDisplay: 'ok',
273
+ responseParts: [
274
+ {
275
+ functionResponse: { name: LSTool.Name, response: {}, id: 'call1' },
276
+ },
277
+ ],
278
+ });
279
+ mockModelResponse([], 'I think I am done.');
280
+ const output = await executor.run({ goal: 'Strict test' }, signal);
281
+ expect(mockSendMessageStream).toHaveBeenCalledTimes(2);
282
+ const expectedError = `Agent stopped calling tools but did not call '${TASK_COMPLETE_TOOL_NAME}' to finalize the session.`;
283
+ expect(output.terminate_reason).toBe(AgentTerminateMode.ERROR);
284
+ expect(output.result).toBe(expectedError);
285
+ expect(activities).toContainEqual(expect.objectContaining({
286
+ type: 'ERROR',
287
+ data: expect.objectContaining({
288
+ context: 'protocol_violation',
289
+ error: expectedError,
290
+ }),
291
+ }));
292
+ });
293
+ it('should report an error if complete_task is called with missing required arguments', async () => {
294
+ const definition = createTestDefinition();
295
+ const executor = await AgentExecutor.create(definition, mockConfig, onActivity);
296
+ // Turn 1: Missing arg
297
+ mockModelResponse([
298
+ {
299
+ name: TASK_COMPLETE_TOOL_NAME,
300
+ args: { wrongArg: 'oops' },
301
+ id: 'call1',
302
+ },
303
+ ]);
304
+ // Turn 2: Corrected
305
+ mockModelResponse([
306
+ {
307
+ name: TASK_COMPLETE_TOOL_NAME,
308
+ args: { finalResult: 'Corrected result' },
309
+ id: 'call2',
310
+ },
311
+ ]);
312
+ const output = await executor.run({ goal: 'Error test' }, signal);
313
+ expect(mockSendMessageStream).toHaveBeenCalledTimes(2);
314
+ const expectedError = "Missing required argument 'finalResult' for completion.";
315
+ expect(activities).toContainEqual(expect.objectContaining({
316
+ type: 'ERROR',
317
+ data: {
318
+ context: 'tool_call',
319
+ name: TASK_COMPLETE_TOOL_NAME,
320
+ error: expectedError,
321
+ },
322
+ }));
323
+ const turn2Params = getMockMessageParams(1);
324
+ const turn2Parts = turn2Params.message;
325
+ expect(turn2Parts).toBeDefined();
326
+ expect(turn2Parts).toHaveLength(1);
327
+ expect(turn2Parts[0]).toEqual(expect.objectContaining({
328
+ functionResponse: expect.objectContaining({
329
+ name: TASK_COMPLETE_TOOL_NAME,
330
+ response: { error: expectedError },
331
+ id: 'call1',
332
+ }),
333
+ }));
334
+ expect(output.result).toBe('Corrected result');
335
+ expect(output.terminate_reason).toBe(AgentTerminateMode.GOAL);
336
+ });
337
+ it('should handle multiple calls to complete_task in the same turn (accept first, block rest)', async () => {
338
+ const definition = createTestDefinition([], {}, 'none');
339
+ const executor = await AgentExecutor.create(definition, mockConfig, onActivity);
340
+ // Turn 1: Duplicate calls
341
+ mockModelResponse([
342
+ { name: TASK_COMPLETE_TOOL_NAME, args: {}, id: 'call1' },
343
+ { name: TASK_COMPLETE_TOOL_NAME, args: {}, id: 'call2' },
344
+ ]);
345
+ const output = await executor.run({ goal: 'Dup test' }, signal);
346
+ expect(mockSendMessageStream).toHaveBeenCalledTimes(1);
347
+ expect(output.terminate_reason).toBe(AgentTerminateMode.GOAL);
348
+ const completions = activities.filter((a) => a.type === 'TOOL_CALL_END' &&
349
+ a.data['name'] === TASK_COMPLETE_TOOL_NAME);
350
+ const errors = activities.filter((a) => a.type === 'ERROR' && a.data['name'] === TASK_COMPLETE_TOOL_NAME);
351
+ expect(completions).toHaveLength(1);
352
+ expect(errors).toHaveLength(1);
353
+ expect(errors[0].data['error']).toContain('Task already marked complete in this turn');
354
+ });
355
+ it('should execute parallel tool calls and then complete', async () => {
356
+ const definition = createTestDefinition([LSTool.Name]);
197
357
  const executor = await AgentExecutor.create(definition, mockConfig, onActivity);
198
358
  const call1 = {
199
359
  name: LSTool.Name,
200
- args: { path: '/dir1' },
201
- id: 'call1',
360
+ args: { path: '/a' },
361
+ id: 'c1',
202
362
  };
203
- // Using LSTool twice for simplicity in mocking standardized responses.
204
363
  const call2 = {
205
364
  name: LSTool.Name,
206
- args: { path: '/dir2' },
207
- id: 'call2',
365
+ args: { path: '/b' },
366
+ id: 'c2',
208
367
  };
209
- // Turn 1: Model calls two tools simultaneously
210
- mockModelResponse([call1, call2], 'T1: Listing both');
211
- // Use concurrency tracking to ensure parallelism
212
- let activeCalls = 0;
213
- let maxActiveCalls = 0;
368
+ // Turn 1: Parallel calls
369
+ mockModelResponse([call1, call2]);
370
+ // Concurrency mock
371
+ let callsStarted = 0;
372
+ let resolveCalls;
373
+ const bothStarted = new Promise((r) => {
374
+ resolveCalls = r;
375
+ });
214
376
  mockExecuteToolCall.mockImplementation(async (_ctx, reqInfo) => {
215
- activeCalls++;
216
- maxActiveCalls = Math.max(maxActiveCalls, activeCalls);
217
- // Simulate latency. We must advance the fake timers for this to resolve.
218
- await new Promise((resolve) => setTimeout(resolve, 100));
219
- activeCalls--;
377
+ callsStarted++;
378
+ if (callsStarted === 2)
379
+ resolveCalls();
380
+ await vi.advanceTimersByTimeAsync(100);
220
381
  return {
221
382
  callId: reqInfo.callId,
222
- resultDisplay: `Result for ${reqInfo.name}`,
383
+ resultDisplay: 'ok',
223
384
  responseParts: [
224
385
  {
225
386
  functionResponse: {
@@ -229,262 +390,145 @@ describe('AgentExecutor', () => {
229
390
  },
230
391
  },
231
392
  ],
232
- error: undefined,
233
393
  };
234
394
  });
235
- // Turn 2: Model stops
236
- mockModelResponse([]);
237
- // Extraction
238
- mockModelResponse([], undefined, 'Done.');
239
- const runPromise = executor.run({ goal: 'Parallel test' }, signal);
240
- // Advance timers while the parallel calls (Promise.all + setTimeout) are running
395
+ // Turn 2: Completion
396
+ mockModelResponse([
397
+ {
398
+ name: TASK_COMPLETE_TOOL_NAME,
399
+ args: { finalResult: 'done' },
400
+ id: 'c3',
401
+ },
402
+ ]);
403
+ const runPromise = executor.run({ goal: 'Parallel' }, signal);
404
+ await vi.advanceTimersByTimeAsync(1);
405
+ await bothStarted;
241
406
  await vi.advanceTimersByTimeAsync(150);
242
- await runPromise;
407
+ await vi.advanceTimersByTimeAsync(1);
408
+ const output = await runPromise;
243
409
  expect(mockExecuteToolCall).toHaveBeenCalledTimes(2);
244
- expect(maxActiveCalls).toBe(2);
245
- // Verify the input to the next model call (Turn 2) contains both responses
246
- // sendMessageStream calls: [0] Turn 1, [1] Turn 2, [2] Extraction
247
- const turn2Input = mockSendMessageStream.mock.calls[1][1];
248
- const turn2Parts = turn2Input.message;
249
- // Promise.all preserves the order of the input array.
250
- expect(turn2Parts.length).toBe(2);
251
- expect(turn2Parts[0]).toEqual(expect.objectContaining({
252
- functionResponse: expect.objectContaining({ id: 'call1' }),
253
- }));
254
- expect(turn2Parts[1]).toEqual(expect.objectContaining({
255
- functionResponse: expect.objectContaining({ id: 'call2' }),
256
- }));
257
- });
258
- it('should use the templated query from promptConfig.query when provided', async () => {
259
- const customQuery = 'Please achieve the goal: ${goal}';
260
- const definition = createTestDefinition([], // No tools needed for this test
261
- {}, {}, { query: customQuery, systemPrompt: 'You are a helpful agent.' });
262
- const executor = await AgentExecutor.create(definition, mockConfig, onActivity);
263
- const inputs = { goal: 'test custom query' };
264
- // Model stops immediately
265
- mockModelResponse([]);
266
- // Extraction
267
- mockModelResponse([], undefined, 'Done.');
268
- await executor.run(inputs, signal);
269
- // Verify the first call to sendMessageStream (the work phase)
270
- const workPhaseCallArgs = mockSendMessageStream.mock.calls[0][1];
271
- const workPhaseMessageParts = workPhaseCallArgs.message;
272
- expect(workPhaseMessageParts).toEqual([
273
- { text: 'Please achieve the goal: test custom query' },
274
- ]);
275
- });
276
- it('should default to "Get Started!" when promptConfig.query is not provided', async () => {
277
- const definition = createTestDefinition([], // No tools needed for this test
278
- {}, {}, { query: undefined, systemPrompt: 'You are a helpful agent.' });
279
- const executor = await AgentExecutor.create(definition, mockConfig, onActivity);
280
- const inputs = { goal: 'test default query' };
281
- // Model stops immediately
282
- mockModelResponse([]);
283
- // Extraction
284
- mockModelResponse([], undefined, 'Done.');
285
- await executor.run(inputs, signal);
286
- // Verify the first call to sendMessageStream (the work phase)
287
- const workPhaseCallArgs = mockSendMessageStream.mock.calls[0][1];
288
- const workPhaseMessageParts = workPhaseCallArgs.message;
289
- expect(workPhaseMessageParts).toEqual([{ text: 'Get Started!' }]);
410
+ expect(output.terminate_reason).toBe(AgentTerminateMode.GOAL);
411
+ // Safe access to message parts
412
+ const turn2Params = getMockMessageParams(1);
413
+ const parts = turn2Params.message;
414
+ expect(parts).toBeDefined();
415
+ expect(parts).toHaveLength(2);
416
+ expect(parts).toEqual(expect.arrayContaining([
417
+ expect.objectContaining({
418
+ functionResponse: expect.objectContaining({ id: 'c1' }),
419
+ }),
420
+ expect.objectContaining({
421
+ functionResponse: expect.objectContaining({ id: 'c2' }),
422
+ }),
423
+ ]));
290
424
  });
291
- it('should handle tool execution failure gracefully and report error', async () => {
425
+ it('SECURITY: should block unauthorized tools and provide explicit failure to model', async () => {
292
426
  const definition = createTestDefinition([LSTool.Name]);
293
427
  const executor = await AgentExecutor.create(definition, mockConfig, onActivity);
294
- // Turn 1: Model calls ls, but it fails
428
+ // Turn 1: Model tries to use a tool not in its config
429
+ const badCallId = 'bad_call_1';
295
430
  mockModelResponse([
296
- { name: LSTool.Name, args: { path: '/invalid' }, id: 'call1' },
297
- ]);
298
- const errorMessage = 'Internal failure.';
299
- mockExecuteToolCall.mockResolvedValueOnce({
300
- callId: 'call1',
301
- resultDisplay: `Error: ${errorMessage}`,
302
- responseParts: undefined, // Failed tools might return undefined parts
303
- error: { message: errorMessage },
304
- });
305
- // Turn 2: Model stops
306
- mockModelResponse([]);
307
- mockModelResponse([], undefined, 'Failed.');
308
- await executor.run({ goal: 'Failure test' }, signal);
309
- // Verify that the error was reported in the activity stream
310
- expect(activities).toContainEqual(expect.objectContaining({
311
- type: 'ERROR',
312
- data: {
313
- error: errorMessage,
314
- context: 'tool_call',
315
- name: LSTool.Name,
316
- },
317
- }));
318
- // Verify the input to the next model call (Turn 2) contains the fallback error message
319
- const turn2Input = mockSendMessageStream.mock.calls[1][1];
320
- const turn2Parts = turn2Input.message;
321
- expect(turn2Parts).toEqual([
322
431
  {
323
- text: 'All tool calls failed. Please analyze the errors and try an alternative approach.',
432
+ name: ReadFileTool.Name,
433
+ args: { path: 'secret.txt' },
434
+ id: badCallId,
324
435
  },
325
436
  ]);
326
- });
327
- it('SECURITY: should block calls to tools not registered for the agent at runtime', async () => {
328
- // Agent definition only includes LSTool
329
- const definition = createTestDefinition([LSTool.Name]);
330
- const executor = await AgentExecutor.create(definition, mockConfig, onActivity);
331
- // Turn 1: Model hallucinates a call to ReadFileTool
332
- // (ReadFileTool exists in the parent registry but not the agent's isolated registry)
437
+ // Turn 2: Model gives up and completes
333
438
  mockModelResponse([
334
439
  {
335
- name: ReadFileTool.Name,
336
- args: { path: 'config.txt' },
337
- id: 'call_blocked',
440
+ name: TASK_COMPLETE_TOOL_NAME,
441
+ args: { finalResult: 'Could not read file.' },
442
+ id: 'c2',
338
443
  },
339
444
  ]);
340
- // Turn 2: Model stops
341
- mockModelResponse([]);
342
- // Extraction
343
- mockModelResponse([], undefined, 'Done.');
344
445
  const consoleWarnSpy = vi
345
446
  .spyOn(console, 'warn')
346
447
  .mockImplementation(() => { });
347
- await executor.run({ goal: 'Security test' }, signal);
348
- // Verify executeToolCall was NEVER called because the tool was unauthorized
448
+ await executor.run({ goal: 'Sec test' }, signal);
449
+ // Verify external executor was not called (Security held)
349
450
  expect(mockExecuteToolCall).not.toHaveBeenCalled();
350
- expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining(`attempted to call unauthorized tool '${ReadFileTool.Name}'`));
451
+ // 2. Verify console warning
452
+ expect(consoleWarnSpy).toHaveBeenCalledWith(expect.stringContaining(`[AgentExecutor] Blocked call:`));
351
453
  consoleWarnSpy.mockRestore();
352
- // Verify the input to the next model call (Turn 2) indicates failure (as the only call was blocked)
353
- const turn2Input = mockSendMessageStream.mock.calls[1][1];
354
- const turn2Parts = turn2Input.message;
355
- expect(turn2Parts[0].text).toContain('All tool calls failed');
356
- });
357
- it('should use OutputConfig completion_criteria in the extraction message', async () => {
358
- const definition = createTestDefinition([LSTool.Name], {}, {
359
- description: 'A summary.',
360
- completion_criteria: ['Must include file names', 'Must be concise'],
361
- });
362
- const executor = await AgentExecutor.create(definition, mockConfig, onActivity);
363
- // Turn 1: Model stops immediately
364
- mockModelResponse([]);
365
- // Extraction Phase
366
- mockModelResponse([], undefined, 'Result: Done.');
367
- await executor.run({ goal: 'Extraction test' }, signal);
368
- // Verify the extraction call (the second call)
369
- const extractionCallArgs = mockSendMessageStream.mock.calls[1][1];
370
- const extractionMessageParts = extractionCallArgs.message;
371
- const extractionText = extractionMessageParts[0].text;
372
- expect(extractionText).toContain('Based on your work so far, provide: A summary.');
373
- expect(extractionText).toContain('Be sure you have addressed:');
374
- expect(extractionText).toContain('- Must include file names');
375
- expect(extractionText).toContain('- Must be concise');
376
- });
377
- it('should apply templating to initialMessages', async () => {
378
- const definition = createTestDefinition([], // No tools needed
379
- {}, {}, {
380
- // Override systemPrompt to be undefined and provide initialMessages
381
- systemPrompt: undefined,
382
- initialMessages: [
383
- {
384
- role: 'user',
385
- parts: [{ text: 'The user wants to ${goal}.' }],
386
- },
387
- {
388
- role: 'model',
389
- parts: [{ text: 'Okay, I will start working on ${goal}.' }],
454
+ // Verify specific error was sent back to model
455
+ const turn2Params = getMockMessageParams(1);
456
+ const parts = turn2Params.message;
457
+ expect(parts).toBeDefined();
458
+ expect(parts[0]).toEqual(expect.objectContaining({
459
+ functionResponse: expect.objectContaining({
460
+ id: badCallId,
461
+ name: ReadFileTool.Name,
462
+ response: {
463
+ error: expect.stringContaining('Unauthorized tool call'),
390
464
  },
391
- ],
392
- });
393
- const executor = await AgentExecutor.create(definition, mockConfig, onActivity);
394
- const inputs = { goal: 'find the file' };
395
- // Model stops immediately
396
- mockModelResponse([]);
397
- // Extraction
398
- mockModelResponse([], undefined, 'Done.');
399
- await executor.run(inputs, signal);
400
- // Verify that the initialMessages were templated correctly
401
- const chatConstructorArgs = MockedGeminiChat.mock.calls[0];
402
- const startHistory = chatConstructorArgs[2]; // 3rd argument is startHistory
403
- expect(startHistory).toEqual([
404
- {
405
- role: 'user',
406
- parts: [{ text: 'The user wants to find the file.' }],
407
- },
408
- {
409
- role: 'model',
410
- parts: [{ text: 'Okay, I will start working on find the file.' }],
411
- },
412
- ]);
465
+ }),
466
+ }));
467
+ // Verify Activity Stream reported the error
468
+ expect(activities).toContainEqual(expect.objectContaining({
469
+ type: 'ERROR',
470
+ data: expect.objectContaining({
471
+ context: 'tool_call_unauthorized',
472
+ name: ReadFileTool.Name,
473
+ }),
474
+ }));
413
475
  });
414
476
  });
415
477
  describe('run (Termination Conditions)', () => {
416
- const mockKeepAliveResponse = () => {
417
- mockModelResponse([{ name: LSTool.Name, args: { path: '.' }, id: 'loop' }], 'Looping');
418
- mockExecuteToolCall.mockResolvedValue({
419
- callId: 'loop',
478
+ const mockWorkResponse = (id) => {
479
+ mockModelResponse([{ name: LSTool.Name, args: { path: '.' }, id }]);
480
+ mockExecuteToolCall.mockResolvedValueOnce({
481
+ callId: id,
420
482
  resultDisplay: 'ok',
421
483
  responseParts: [
422
- { functionResponse: { name: LSTool.Name, response: {}, id: 'loop' } },
484
+ { functionResponse: { name: LSTool.Name, response: {}, id } },
423
485
  ],
424
- error: undefined,
425
486
  });
426
487
  };
427
488
  it('should terminate when max_turns is reached', async () => {
428
- const MAX_TURNS = 2;
489
+ const MAX = 2;
429
490
  const definition = createTestDefinition([LSTool.Name], {
430
- max_turns: MAX_TURNS,
491
+ max_turns: MAX,
431
492
  });
432
- const executor = await AgentExecutor.create(definition, mockConfig, onActivity);
433
- // Turn 1
434
- mockKeepAliveResponse();
435
- // Turn 2
436
- mockKeepAliveResponse();
437
- const output = await executor.run({ goal: 'Termination test' }, signal);
493
+ const executor = await AgentExecutor.create(definition, mockConfig);
494
+ mockWorkResponse('t1');
495
+ mockWorkResponse('t2');
496
+ const output = await executor.run({ goal: 'Turns test' }, signal);
438
497
  expect(output.terminate_reason).toBe(AgentTerminateMode.MAX_TURNS);
439
- expect(mockSendMessageStream).toHaveBeenCalledTimes(MAX_TURNS);
440
- // Extraction phase should be skipped when termination is forced
441
- expect(mockSendMessageStream).not.toHaveBeenCalledWith(expect.any(String), expect.any(Object), expect.stringContaining('#extraction'));
498
+ expect(mockSendMessageStream).toHaveBeenCalledTimes(MAX);
442
499
  });
443
500
  it('should terminate if timeout is reached', async () => {
444
501
  const definition = createTestDefinition([LSTool.Name], {
445
- max_time_minutes: 5,
446
- max_turns: 100,
502
+ max_time_minutes: 1,
447
503
  });
448
- const executor = await AgentExecutor.create(definition, mockConfig, onActivity);
449
- // Turn 1 setup
450
- mockModelResponse([{ name: LSTool.Name, args: { path: '.' }, id: 'loop' }], 'Looping');
451
- // Mock a tool call that takes a long time, causing the overall timeout
452
- mockExecuteToolCall.mockImplementation(async () => {
453
- // Advance time past the 5-minute limit during the tool call execution
454
- await vi.advanceTimersByTimeAsync(5 * 60 * 1000 + 1);
504
+ const executor = await AgentExecutor.create(definition, mockConfig);
505
+ mockModelResponse([{ name: LSTool.Name, args: { path: '.' }, id: 't1' }]);
506
+ // Long running tool
507
+ mockExecuteToolCall.mockImplementationOnce(async () => {
508
+ await vi.advanceTimersByTimeAsync(61 * 1000);
455
509
  return {
456
- callId: 'loop',
510
+ callId: 't1',
457
511
  resultDisplay: 'ok',
458
- responseParts: [
459
- {
460
- functionResponse: { name: LSTool.Name, response: {}, id: 'loop' },
461
- },
462
- ],
463
- error: undefined,
512
+ responseParts: [],
464
513
  };
465
514
  });
466
- const output = await executor.run({ goal: 'Termination test' }, signal);
515
+ const output = await executor.run({ goal: 'Timeout test' }, signal);
467
516
  expect(output.terminate_reason).toBe(AgentTerminateMode.TIMEOUT);
468
- // Should only have called the model once before the timeout check stopped it
469
517
  expect(mockSendMessageStream).toHaveBeenCalledTimes(1);
470
518
  });
471
- it('should terminate when AbortSignal is triggered mid-stream', async () => {
519
+ it('should terminate when AbortSignal is triggered', async () => {
472
520
  const definition = createTestDefinition();
473
- const executor = await AgentExecutor.create(definition, mockConfig, onActivity);
474
- // Mock the model response stream
475
- mockSendMessageStream.mockImplementation(async () => (async function* () {
476
- // Yield the first chunk
521
+ const executor = await AgentExecutor.create(definition, mockConfig);
522
+ mockSendMessageStream.mockImplementationOnce(async () => (async function* () {
477
523
  yield {
478
524
  type: StreamEventType.CHUNK,
479
525
  value: createMockResponseChunk([
480
- { text: '**Thinking** Step 1', thought: true },
526
+ { text: 'Thinking...', thought: true },
481
527
  ]),
482
528
  };
483
- // Simulate abort happening mid-stream
484
529
  abortController.abort();
485
- // The loop in callModel should break immediately due to signal check.
486
530
  })());
487
- const output = await executor.run({ goal: 'Termination test' }, signal);
531
+ const output = await executor.run({ goal: 'Abort test' }, signal);
488
532
  expect(output.terminate_reason).toBe(AgentTerminateMode.ABORTED);
489
533
  });
490
534
  });