@lobehub/lobehub 2.0.0-next.48 → 2.0.0-next.49

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. package/CHANGELOG.md +33 -0
  2. package/README.md +1 -1
  3. package/README.zh-CN.md +1 -1
  4. package/changelog/v1.json +12 -0
  5. package/locales/ar/chat.json +1 -0
  6. package/locales/ar/topic.json +1 -0
  7. package/locales/bg-BG/chat.json +1 -0
  8. package/locales/bg-BG/topic.json +1 -0
  9. package/locales/de-DE/chat.json +1 -0
  10. package/locales/de-DE/topic.json +1 -0
  11. package/locales/en-US/chat.json +1 -0
  12. package/locales/en-US/topic.json +1 -0
  13. package/locales/es-ES/chat.json +1 -0
  14. package/locales/es-ES/topic.json +1 -0
  15. package/locales/fa-IR/chat.json +1 -0
  16. package/locales/fa-IR/topic.json +1 -0
  17. package/locales/fr-FR/chat.json +1 -0
  18. package/locales/fr-FR/topic.json +1 -0
  19. package/locales/it-IT/chat.json +1 -0
  20. package/locales/it-IT/topic.json +1 -0
  21. package/locales/ja-JP/chat.json +1 -0
  22. package/locales/ja-JP/topic.json +1 -0
  23. package/locales/ko-KR/chat.json +1 -0
  24. package/locales/ko-KR/topic.json +1 -0
  25. package/locales/nl-NL/chat.json +1 -0
  26. package/locales/nl-NL/topic.json +1 -0
  27. package/locales/pl-PL/chat.json +1 -0
  28. package/locales/pl-PL/topic.json +1 -0
  29. package/locales/pt-BR/chat.json +1 -0
  30. package/locales/pt-BR/topic.json +1 -0
  31. package/locales/ru-RU/chat.json +1 -0
  32. package/locales/ru-RU/topic.json +1 -0
  33. package/locales/tr-TR/chat.json +1 -0
  34. package/locales/tr-TR/topic.json +1 -0
  35. package/locales/vi-VN/chat.json +1 -0
  36. package/locales/vi-VN/topic.json +1 -0
  37. package/locales/zh-CN/chat.json +1 -0
  38. package/locales/zh-CN/discover.json +1 -1
  39. package/locales/zh-CN/topic.json +1 -0
  40. package/locales/zh-TW/chat.json +1 -0
  41. package/locales/zh-TW/topic.json +1 -0
  42. package/package.json +9 -3
  43. package/packages/agent-runtime/src/core/InterventionChecker.ts +5 -16
  44. package/packages/agent-runtime/src/core/__tests__/InterventionChecker.test.ts +27 -80
  45. package/packages/agent-runtime/src/core/__tests__/runtime.test.ts +32 -13
  46. package/packages/agent-runtime/src/core/runtime.ts +7 -3
  47. package/packages/agent-runtime/src/types/event.ts +2 -1
  48. package/packages/agent-runtime/src/types/generalAgent.ts +1 -0
  49. package/packages/agent-runtime/src/types/instruction.ts +3 -2
  50. package/packages/agent-runtime/src/types/state.ts +3 -1
  51. package/packages/conversation-flow/src/transformation/FlatListBuilder.ts +4 -1
  52. package/packages/database/src/models/message.ts +3 -0
  53. package/packages/obervability-otel/src/node.ts +15 -1
  54. package/packages/types/src/message/common/base.ts +2 -2
  55. package/packages/types/src/message/common/tools.ts +16 -10
  56. package/packages/types/src/message/ui/chat.ts +7 -1
  57. package/packages/types/src/tool/intervention.ts +2 -3
  58. package/packages/types/src/user/settings/tool.ts +15 -28
  59. package/renovate.json +28 -11
  60. package/src/app/[variants]/(main)/chat/components/topic/features/Topic/TopicListContent/TopicItem/TopicContent.tsx +1 -1
  61. package/src/app/[variants]/(main)/chat/session/features/SessionListContent/List/Item/Actions.tsx +1 -1
  62. package/src/features/Conversation/Messages/Group/GroupChildren.tsx +20 -15
  63. package/src/features/Conversation/Messages/Group/GroupContext.tsx +15 -0
  64. package/src/features/Conversation/Messages/Group/Tool/Inspector/BuiltinPluginTitle.tsx +2 -4
  65. package/src/features/Conversation/Messages/Group/Tool/Inspector/ToolTitle.tsx +3 -5
  66. package/src/features/Conversation/Messages/Group/Tool/Inspector/index.tsx +19 -7
  67. package/src/features/Conversation/Messages/Group/Tool/Render/Arguments/index.tsx +14 -12
  68. package/src/features/Conversation/Messages/Group/Tool/Render/Intervention/ApprovalActions.tsx +143 -0
  69. package/src/features/Conversation/Messages/Group/Tool/Render/Intervention/KeyValueEditor.tsx +213 -0
  70. package/src/features/Conversation/Messages/Group/Tool/Render/Intervention/ModeSelector.tsx +134 -0
  71. package/src/features/Conversation/Messages/Group/Tool/Render/Intervention/index.tsx +99 -0
  72. package/src/features/Conversation/Messages/Group/Tool/Render/RejectedResponse.tsx +45 -0
  73. package/src/features/Conversation/Messages/Group/Tool/Render/index.tsx +23 -1
  74. package/src/features/Conversation/Messages/Group/Tool/index.tsx +42 -18
  75. package/src/features/Conversation/Messages/Group/Tools.tsx +3 -1
  76. package/src/locales/default/chat.ts +22 -0
  77. package/src/locales/default/common.ts +1 -0
  78. package/src/locales/default/topic.ts +1 -0
  79. package/src/server/routers/lambda/message.ts +4 -1
  80. package/src/server/services/message/index.ts +13 -0
  81. package/src/services/message/index.ts +17 -2
  82. package/src/store/chat/agents/GeneralChatAgent.ts +141 -24
  83. package/src/store/chat/agents/__tests__/GeneralChatAgent.test.ts +605 -0
  84. package/src/store/chat/agents/createAgentExecutors.ts +144 -26
  85. package/src/store/chat/agents/createToolEngine.ts +22 -0
  86. package/src/store/chat/slices/aiChat/actions/conversationControl.ts +106 -0
  87. package/src/store/chat/slices/aiChat/actions/streamingExecutor.ts +54 -26
  88. package/src/store/chat/slices/message/reducer.ts +2 -1
  89. package/src/store/chat/slices/plugin/actions/optimisticUpdate.ts +26 -1
  90. package/src/store/user/slices/settings/action.ts +15 -0
@@ -0,0 +1,605 @@
1
+ import { AgentRuntimeContext, AgentState } from '@lobechat/agent-runtime';
2
+ import { ChatToolPayload } from '@lobechat/types';
3
+ import { describe, expect, it } from 'vitest';
4
+
5
+ import { GeneralChatAgent } from '../GeneralChatAgent';
6
+
7
+ describe('GeneralChatAgent', () => {
8
+ const mockModelRuntimeConfig = {
9
+ model: 'gpt-4o-mini',
10
+ provider: 'openai',
11
+ };
12
+
13
+ const createMockState = (overrides?: Partial<AgentState>): AgentState => ({
14
+ sessionId: 'test-session',
15
+ status: 'running',
16
+ messages: [],
17
+ toolManifestMap: {},
18
+ stepCount: 0,
19
+ usage: {
20
+ llm: { apiCalls: 0, processingTimeMs: 0, tokens: { input: 0, output: 0, total: 0 } },
21
+ tools: { totalCalls: 0, totalTimeMs: 0, byTool: [] },
22
+ humanInteraction: {
23
+ approvalRequests: 0,
24
+ promptRequests: 0,
25
+ selectRequests: 0,
26
+ totalWaitingTimeMs: 0,
27
+ },
28
+ },
29
+ cost: {
30
+ calculatedAt: new Date().toISOString(),
31
+ currency: 'USD',
32
+ llm: { byModel: [], currency: 'USD', total: 0 },
33
+ tools: { byTool: [], currency: 'USD', total: 0 },
34
+ total: 0,
35
+ },
36
+ createdAt: new Date().toISOString(),
37
+ lastModified: new Date().toISOString(),
38
+ ...overrides,
39
+ });
40
+
41
+ const createMockContext = (
42
+ phase: AgentRuntimeContext['phase'],
43
+ payload?: any,
44
+ ): AgentRuntimeContext => ({
45
+ phase,
46
+ payload,
47
+ session: {
48
+ sessionId: 'test-session',
49
+ messageCount: 0,
50
+ status: 'running',
51
+ stepCount: 0,
52
+ },
53
+ });
54
+
55
+ describe('init and user_input phase', () => {
56
+ it('should return call_llm instruction for init phase', async () => {
57
+ const agent = new GeneralChatAgent({
58
+ agentConfig: { maxSteps: 100 },
59
+ sessionId: 'test-session',
60
+ modelRuntimeConfig: mockModelRuntimeConfig,
61
+ });
62
+
63
+ const state = createMockState({
64
+ messages: [{ role: 'user', content: 'Hello' }] as any,
65
+ });
66
+ const context = createMockContext('init', { model: 'gpt-4o-mini', provider: 'openai' });
67
+
68
+ const result = await agent.runner(context, state);
69
+
70
+ expect(result).toEqual({
71
+ type: 'call_llm',
72
+ payload: {
73
+ messages: state.messages,
74
+ model: 'gpt-4o-mini',
75
+ provider: 'openai',
76
+ },
77
+ });
78
+ });
79
+
80
+ it('should return call_llm instruction for user_input phase', async () => {
81
+ const agent = new GeneralChatAgent({
82
+ agentConfig: { maxSteps: 100 },
83
+ sessionId: 'test-session',
84
+ modelRuntimeConfig: mockModelRuntimeConfig,
85
+ });
86
+
87
+ const state = createMockState({
88
+ messages: [{ role: 'user', content: 'What is the weather?' }] as any,
89
+ });
90
+ const context = createMockContext('user_input', {
91
+ message: { role: 'user', content: 'What is the weather?' },
92
+ });
93
+
94
+ const result = await agent.runner(context, state);
95
+
96
+ expect(result).toEqual({
97
+ type: 'call_llm',
98
+ payload: {
99
+ messages: state.messages,
100
+ message: { role: 'user', content: 'What is the weather?' },
101
+ },
102
+ });
103
+ });
104
+ });
105
+
106
+ describe('llm_result phase', () => {
107
+ it('should return finish when no tool calls', async () => {
108
+ const agent = new GeneralChatAgent({
109
+ agentConfig: { maxSteps: 100 },
110
+ sessionId: 'test-session',
111
+ modelRuntimeConfig: mockModelRuntimeConfig,
112
+ });
113
+
114
+ const state = createMockState();
115
+ const context = createMockContext('llm_result', {
116
+ hasToolsCalling: false,
117
+ toolsCalling: [],
118
+ parentMessageId: 'msg-1',
119
+ });
120
+
121
+ const result = await agent.runner(context, state);
122
+
123
+ expect(result).toEqual({
124
+ type: 'finish',
125
+ reason: 'completed',
126
+ reasonDetail: 'LLM response completed without tool calls',
127
+ });
128
+ });
129
+
130
+ it('should return call_tool for single tool that does not need intervention', async () => {
131
+ const agent = new GeneralChatAgent({
132
+ agentConfig: { maxSteps: 100 },
133
+ sessionId: 'test-session',
134
+ modelRuntimeConfig: mockModelRuntimeConfig,
135
+ });
136
+
137
+ const toolCall: ChatToolPayload = {
138
+ id: 'call-1',
139
+ identifier: 'test-plugin',
140
+ apiName: 'test-api',
141
+ arguments: '{}',
142
+ type: 'default',
143
+ };
144
+
145
+ const state = createMockState({
146
+ toolManifestMap: {
147
+ 'test-plugin': {
148
+ identifier: 'test-plugin',
149
+ // No humanIntervention config = no intervention needed
150
+ },
151
+ },
152
+ });
153
+
154
+ const context = createMockContext('llm_result', {
155
+ hasToolsCalling: true,
156
+ toolsCalling: [toolCall],
157
+ parentMessageId: 'msg-1',
158
+ });
159
+
160
+ const result = await agent.runner(context, state);
161
+
162
+ expect(result).toEqual([
163
+ {
164
+ type: 'call_tool',
165
+ payload: {
166
+ parentMessageId: 'msg-1',
167
+ toolCalling: toolCall,
168
+ },
169
+ },
170
+ ]);
171
+ });
172
+
173
+ it('should return call_tools_batch for multiple tools that do not need intervention', async () => {
174
+ const agent = new GeneralChatAgent({
175
+ agentConfig: { maxSteps: 100 },
176
+ sessionId: 'test-session',
177
+ modelRuntimeConfig: mockModelRuntimeConfig,
178
+ });
179
+
180
+ const toolCalls: ChatToolPayload[] = [
181
+ {
182
+ id: 'call-1',
183
+ identifier: 'plugin-1',
184
+ apiName: 'api-1',
185
+ arguments: '{}',
186
+ type: 'default',
187
+ },
188
+ {
189
+ id: 'call-2',
190
+ identifier: 'plugin-2',
191
+ apiName: 'api-2',
192
+ arguments: '{}',
193
+ type: 'default',
194
+ },
195
+ ];
196
+
197
+ const state = createMockState({
198
+ toolManifestMap: {
199
+ 'plugin-1': { identifier: 'plugin-1' },
200
+ 'plugin-2': { identifier: 'plugin-2' },
201
+ },
202
+ });
203
+
204
+ const context = createMockContext('llm_result', {
205
+ hasToolsCalling: true,
206
+ toolsCalling: toolCalls,
207
+ parentMessageId: 'msg-1',
208
+ });
209
+
210
+ const result = await agent.runner(context, state);
211
+
212
+ expect(result).toEqual([
213
+ {
214
+ type: 'call_tools_batch',
215
+ payload: {
216
+ parentMessageId: 'msg-1',
217
+ toolsCalling: toolCalls,
218
+ },
219
+ },
220
+ ]);
221
+ });
222
+
223
+ it('should return request_human_approve for tools requiring intervention', async () => {
224
+ const agent = new GeneralChatAgent({
225
+ agentConfig: { maxSteps: 100 },
226
+ sessionId: 'test-session',
227
+ modelRuntimeConfig: mockModelRuntimeConfig,
228
+ });
229
+
230
+ const toolCall: ChatToolPayload = {
231
+ id: 'call-1',
232
+ identifier: 'dangerous-plugin',
233
+ apiName: 'delete-api',
234
+ arguments: '{}',
235
+ type: 'default',
236
+ };
237
+
238
+ const state = createMockState({
239
+ toolManifestMap: {
240
+ 'dangerous-plugin': {
241
+ identifier: 'dangerous-plugin',
242
+ humanIntervention: 'require', // Always require approval
243
+ },
244
+ },
245
+ });
246
+
247
+ const context = createMockContext('llm_result', {
248
+ hasToolsCalling: true,
249
+ toolsCalling: [toolCall],
250
+ parentMessageId: 'msg-1',
251
+ });
252
+
253
+ const result = await agent.runner(context, state);
254
+
255
+ expect(result).toEqual([
256
+ {
257
+ type: 'request_human_approve',
258
+ pendingToolsCalling: [toolCall],
259
+ reason: 'human_intervention_required',
260
+ },
261
+ ]);
262
+ });
263
+
264
+ it('should return both call_tools_batch and request_human_approve for mixed tools', async () => {
265
+ const agent = new GeneralChatAgent({
266
+ agentConfig: { maxSteps: 100 },
267
+ sessionId: 'test-session',
268
+ modelRuntimeConfig: mockModelRuntimeConfig,
269
+ });
270
+
271
+ const safeTool: ChatToolPayload = {
272
+ id: 'call-1',
273
+ identifier: 'safe-plugin',
274
+ apiName: 'read-api',
275
+ arguments: '{}',
276
+ type: 'default',
277
+ };
278
+
279
+ const dangerousTool: ChatToolPayload = {
280
+ id: 'call-2',
281
+ identifier: 'dangerous-plugin',
282
+ apiName: 'delete-api',
283
+ arguments: '{}',
284
+ type: 'default',
285
+ };
286
+
287
+ const state = createMockState({
288
+ toolManifestMap: {
289
+ 'safe-plugin': {
290
+ identifier: 'safe-plugin',
291
+ // No intervention
292
+ },
293
+ 'dangerous-plugin': {
294
+ identifier: 'dangerous-plugin',
295
+ humanIntervention: 'require',
296
+ },
297
+ },
298
+ });
299
+
300
+ const context = createMockContext('llm_result', {
301
+ hasToolsCalling: true,
302
+ toolsCalling: [safeTool, dangerousTool],
303
+ parentMessageId: 'msg-1',
304
+ });
305
+
306
+ const result = await agent.runner(context, state);
307
+
308
+ expect(result).toEqual([
309
+ {
310
+ type: 'call_tool',
311
+ payload: {
312
+ parentMessageId: 'msg-1',
313
+ toolCalling: safeTool,
314
+ },
315
+ },
316
+ {
317
+ type: 'request_human_approve',
318
+ pendingToolsCalling: [dangerousTool],
319
+ reason: 'human_intervention_required',
320
+ },
321
+ ]);
322
+ });
323
+ });
324
+
325
+ describe('tool_result phase', () => {
326
+ it('should return call_llm when no pending tools', async () => {
327
+ const agent = new GeneralChatAgent({
328
+ agentConfig: { maxSteps: 100 },
329
+ sessionId: 'test-session',
330
+ modelRuntimeConfig: mockModelRuntimeConfig,
331
+ });
332
+
333
+ const state = createMockState({
334
+ messages: [
335
+ { role: 'user', content: 'Hello' },
336
+ { role: 'assistant', content: '', tools: [] },
337
+ { role: 'tool', content: 'Result', tool_call_id: 'call-1' },
338
+ ] as any,
339
+ });
340
+
341
+ const context = createMockContext('tool_result', {
342
+ parentMessageId: 'tool-msg-1',
343
+ result: { data: 'result' },
344
+ });
345
+
346
+ const result = await agent.runner(context, state);
347
+
348
+ expect(result).toEqual({
349
+ type: 'call_llm',
350
+ payload: {
351
+ messages: state.messages,
352
+ model: 'gpt-4o-mini',
353
+ parentMessageId: 'tool-msg-1',
354
+ provider: 'openai',
355
+ tools: undefined,
356
+ },
357
+ });
358
+ });
359
+
360
+ it('should return request_human_approve when there are pending tools', async () => {
361
+ const agent = new GeneralChatAgent({
362
+ agentConfig: { maxSteps: 100 },
363
+ sessionId: 'test-session',
364
+ modelRuntimeConfig: mockModelRuntimeConfig,
365
+ });
366
+
367
+ const pendingPlugin: ChatToolPayload = {
368
+ id: 'call-2',
369
+ identifier: 'plugin-2',
370
+ apiName: 'api-2',
371
+ arguments: '{}',
372
+ type: 'default',
373
+ };
374
+
375
+ const state = createMockState({
376
+ messages: [
377
+ { role: 'user', content: 'Hello' },
378
+ { role: 'assistant', content: '', tools: [] },
379
+ { role: 'tool', content: 'Result', tool_call_id: 'call-1' },
380
+ {
381
+ role: 'tool',
382
+ content: '',
383
+ tool_call_id: 'call-2',
384
+ plugin: pendingPlugin,
385
+ pluginIntervention: { status: 'pending' },
386
+ },
387
+ ] as any,
388
+ });
389
+
390
+ const context = createMockContext('tool_result', {
391
+ parentMessageId: 'tool-msg-1',
392
+ });
393
+
394
+ const result = await agent.runner(context, state);
395
+
396
+ expect(result).toEqual({
397
+ type: 'request_human_approve',
398
+ pendingToolsCalling: [pendingPlugin],
399
+ reason: 'Some tools still pending approval',
400
+ skipCreateToolMessage: true,
401
+ });
402
+ });
403
+ });
404
+
405
+ describe('tools_batch_result phase', () => {
406
+ it('should return call_llm when no pending tools', async () => {
407
+ const agent = new GeneralChatAgent({
408
+ agentConfig: { maxSteps: 100 },
409
+ sessionId: 'test-session',
410
+ modelRuntimeConfig: mockModelRuntimeConfig,
411
+ });
412
+
413
+ const state = createMockState({
414
+ messages: [
415
+ { role: 'user', content: 'Hello' },
416
+ { role: 'assistant', content: '', tools: [] },
417
+ { role: 'tool', content: 'Result 1', tool_call_id: 'call-1' },
418
+ { role: 'tool', content: 'Result 2', tool_call_id: 'call-2' },
419
+ ] as any,
420
+ });
421
+
422
+ const context = createMockContext('tools_batch_result', {
423
+ parentMessageId: 'tool-msg-2',
424
+ });
425
+
426
+ const result = await agent.runner(context, state);
427
+
428
+ expect(result).toEqual({
429
+ type: 'call_llm',
430
+ payload: {
431
+ messages: state.messages,
432
+ model: 'gpt-4o-mini',
433
+ parentMessageId: 'tool-msg-2',
434
+ provider: 'openai',
435
+ tools: undefined,
436
+ },
437
+ });
438
+ });
439
+
440
+ it('should return request_human_approve when there are pending tools', async () => {
441
+ const agent = new GeneralChatAgent({
442
+ agentConfig: { maxSteps: 100 },
443
+ sessionId: 'test-session',
444
+ modelRuntimeConfig: mockModelRuntimeConfig,
445
+ });
446
+
447
+ const pendingPlugin: ChatToolPayload = {
448
+ id: 'call-3',
449
+ identifier: 'plugin-3',
450
+ apiName: 'api-3',
451
+ arguments: '{}',
452
+ type: 'default',
453
+ };
454
+
455
+ const state = createMockState({
456
+ messages: [
457
+ { role: 'user', content: 'Hello' },
458
+ { role: 'assistant', content: '', tools: [] },
459
+ { role: 'tool', content: 'Result 1', tool_call_id: 'call-1' },
460
+ { role: 'tool', content: 'Result 2', tool_call_id: 'call-2' },
461
+ {
462
+ role: 'tool',
463
+ content: '',
464
+ tool_call_id: 'call-3',
465
+ plugin: pendingPlugin,
466
+ pluginIntervention: { status: 'pending' },
467
+ },
468
+ ] as any,
469
+ });
470
+
471
+ const context = createMockContext('tools_batch_result', {
472
+ parentMessageId: 'tool-msg-2',
473
+ });
474
+
475
+ const result = await agent.runner(context, state);
476
+
477
+ expect(result).toEqual({
478
+ type: 'request_human_approve',
479
+ pendingToolsCalling: [pendingPlugin],
480
+ reason: 'Some tools still pending approval',
481
+ skipCreateToolMessage: true,
482
+ });
483
+ });
484
+ });
485
+
486
+ describe('error phase', () => {
487
+ it('should return finish instruction with error details', async () => {
488
+ const agent = new GeneralChatAgent({
489
+ agentConfig: { maxSteps: 100 },
490
+ sessionId: 'test-session',
491
+ modelRuntimeConfig: mockModelRuntimeConfig,
492
+ });
493
+
494
+ const state = createMockState();
495
+ const errorMessage = 'Network timeout';
496
+ const context = createMockContext('error', {
497
+ error: new Error(errorMessage),
498
+ });
499
+
500
+ const result = await agent.runner(context, state);
501
+
502
+ expect(result).toEqual({
503
+ type: 'finish',
504
+ reason: 'error_recovery',
505
+ reasonDetail: errorMessage,
506
+ });
507
+ });
508
+
509
+ it('should handle error without message', async () => {
510
+ const agent = new GeneralChatAgent({
511
+ agentConfig: { maxSteps: 100 },
512
+ sessionId: 'test-session',
513
+ modelRuntimeConfig: mockModelRuntimeConfig,
514
+ });
515
+
516
+ const state = createMockState();
517
+ const context = createMockContext('error', { error: {} });
518
+
519
+ const result = await agent.runner(context, state);
520
+
521
+ expect(result).toEqual({
522
+ type: 'finish',
523
+ reason: 'error_recovery',
524
+ reasonDetail: 'Unknown error occurred',
525
+ });
526
+ });
527
+ });
528
+
529
+ describe('unknown phase', () => {
530
+ it('should return finish instruction for unknown phase', async () => {
531
+ const agent = new GeneralChatAgent({
532
+ agentConfig: { maxSteps: 100 },
533
+ sessionId: 'test-session',
534
+ modelRuntimeConfig: mockModelRuntimeConfig,
535
+ });
536
+
537
+ const state = createMockState();
538
+ const context = createMockContext('unknown_phase' as any);
539
+
540
+ const result = await agent.runner(context, state);
541
+
542
+ expect(result).toEqual({
543
+ type: 'finish',
544
+ reason: 'agent_decision',
545
+ reasonDetail: 'Unknown phase: unknown_phase',
546
+ });
547
+ });
548
+ });
549
+
550
+ describe('intervention checking', () => {
551
+ it('should check intervention at API level when configured', async () => {
552
+ const agent = new GeneralChatAgent({
553
+ agentConfig: { maxSteps: 100 },
554
+ sessionId: 'test-session',
555
+ modelRuntimeConfig: mockModelRuntimeConfig,
556
+ });
557
+
558
+ const toolCall: ChatToolPayload = {
559
+ id: 'call-1',
560
+ identifier: 'plugin',
561
+ apiName: 'dangerous-api',
562
+ arguments: '{}',
563
+ type: 'default',
564
+ };
565
+
566
+ const state = createMockState({
567
+ toolManifestMap: {
568
+ plugin: {
569
+ identifier: 'plugin',
570
+ // Tool-level config
571
+ humanIntervention: 'never',
572
+ api: [
573
+ {
574
+ name: 'safe-api',
575
+ // Safe API
576
+ },
577
+ {
578
+ name: 'dangerous-api',
579
+ // API-level config overrides tool-level
580
+ humanIntervention: 'require',
581
+ },
582
+ ],
583
+ },
584
+ },
585
+ });
586
+
587
+ const context = createMockContext('llm_result', {
588
+ hasToolsCalling: true,
589
+ toolsCalling: [toolCall],
590
+ parentMessageId: 'msg-1',
591
+ });
592
+
593
+ const result = await agent.runner(context, state);
594
+
595
+ // Should require approval because API-level config overrides
596
+ expect(result).toEqual([
597
+ {
598
+ type: 'request_human_approve',
599
+ pendingToolsCalling: [toolCall],
600
+ reason: 'human_intervention_required',
601
+ },
602
+ ]);
603
+ });
604
+ });
605
+ });