@lobehub/chat 1.141.7 → 1.141.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (128) hide show
  1. package/CHANGELOG.md +50 -0
  2. package/apps/desktop/package.json +1 -0
  3. package/apps/desktop/src/main/controllers/LocalFileCtr.ts +279 -52
  4. package/apps/desktop/src/main/controllers/__tests__/LocalFileCtr.test.ts +392 -0
  5. package/changelog/v1.json +18 -0
  6. package/docs/usage/features/{group-chat.mdx → agent-team.mdx} +14 -14
  7. package/docs/usage/features/agent-team.zh-CN.mdx +52 -0
  8. package/locales/ar/chat.json +17 -17
  9. package/locales/ar/setting.json +15 -19
  10. package/locales/ar/welcome.json +1 -1
  11. package/locales/bg-BG/chat.json +17 -17
  12. package/locales/bg-BG/setting.json +15 -19
  13. package/locales/de-DE/chat.json +17 -17
  14. package/locales/de-DE/setting.json +15 -19
  15. package/locales/de-DE/welcome.json +1 -1
  16. package/locales/en-US/chat.json +17 -17
  17. package/locales/en-US/setting.json +15 -19
  18. package/locales/en-US/welcome.json +1 -1
  19. package/locales/es-ES/chat.json +17 -17
  20. package/locales/es-ES/setting.json +15 -19
  21. package/locales/es-ES/welcome.json +1 -1
  22. package/locales/fa-IR/chat.json +17 -17
  23. package/locales/fa-IR/setting.json +15 -19
  24. package/locales/fa-IR/welcome.json +1 -1
  25. package/locales/fr-FR/chat.json +16 -16
  26. package/locales/fr-FR/setting.json +15 -19
  27. package/locales/fr-FR/welcome.json +1 -1
  28. package/locales/it-IT/chat.json +17 -17
  29. package/locales/it-IT/setting.json +15 -19
  30. package/locales/it-IT/welcome.json +1 -1
  31. package/locales/ja-JP/chat.json +17 -17
  32. package/locales/ja-JP/setting.json +15 -19
  33. package/locales/ja-JP/welcome.json +1 -1
  34. package/locales/ko-KR/chat.json +17 -17
  35. package/locales/ko-KR/setting.json +15 -19
  36. package/locales/ko-KR/welcome.json +1 -1
  37. package/locales/nl-NL/chat.json +17 -17
  38. package/locales/nl-NL/setting.json +15 -19
  39. package/locales/nl-NL/welcome.json +1 -1
  40. package/locales/pl-PL/chat.json +17 -17
  41. package/locales/pl-PL/setting.json +15 -19
  42. package/locales/pt-BR/chat.json +17 -17
  43. package/locales/pt-BR/setting.json +15 -19
  44. package/locales/pt-BR/welcome.json +1 -1
  45. package/locales/ru-RU/chat.json +17 -17
  46. package/locales/ru-RU/setting.json +15 -19
  47. package/locales/ru-RU/welcome.json +1 -1
  48. package/locales/tr-TR/chat.json +17 -17
  49. package/locales/tr-TR/setting.json +15 -19
  50. package/locales/vi-VN/chat.json +15 -15
  51. package/locales/vi-VN/setting.json +15 -19
  52. package/locales/zh-CN/chat.json +17 -17
  53. package/locales/zh-CN/setting.json +15 -19
  54. package/locales/zh-CN/welcome.json +1 -1
  55. package/locales/zh-TW/chat.json +17 -17
  56. package/locales/zh-TW/setting.json +15 -19
  57. package/locales/zh-TW/welcome.json +1 -1
  58. package/package.json +1 -1
  59. package/packages/agent-runtime/src/core/InterventionChecker.ts +173 -0
  60. package/packages/agent-runtime/src/core/UsageCounter.ts +248 -0
  61. package/packages/agent-runtime/src/core/__tests__/InterventionChecker.test.ts +334 -0
  62. package/packages/agent-runtime/src/core/__tests__/UsageCounter.test.ts +873 -0
  63. package/packages/agent-runtime/src/core/__tests__/runtime.test.ts +32 -26
  64. package/packages/agent-runtime/src/core/index.ts +2 -0
  65. package/packages/agent-runtime/src/core/runtime.ts +31 -18
  66. package/packages/agent-runtime/src/types/instruction.ts +1 -1
  67. package/packages/agent-runtime/src/types/state.ts +3 -3
  68. package/packages/agent-runtime/src/types/usage.ts +34 -25
  69. package/packages/const/src/settings/systemAgent.ts +0 -1
  70. package/packages/context-engine/src/index.ts +1 -0
  71. package/packages/context-engine/src/tools/ToolNameResolver.ts +2 -2
  72. package/packages/context-engine/src/tools/ToolsEngine.ts +37 -8
  73. package/packages/context-engine/src/tools/__tests__/ToolsEngine.test.ts +149 -5
  74. package/packages/context-engine/src/tools/__tests__/utils.test.ts +2 -2
  75. package/packages/context-engine/src/tools/index.ts +1 -0
  76. package/packages/context-engine/src/tools/types.ts +18 -3
  77. package/packages/context-engine/src/tools/utils.ts +4 -4
  78. package/packages/types/src/tool/builtin.ts +54 -1
  79. package/packages/types/src/tool/index.ts +1 -0
  80. package/packages/types/src/tool/intervention.ts +114 -0
  81. package/packages/types/src/user/settings/systemAgent.ts +0 -1
  82. package/packages/types/src/user/settings/tool.ts +37 -0
  83. package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatList/ChatItem/OrchestratorThinking.tsx +2 -3
  84. package/src/app/[variants]/(main)/chat/(workspace)/@conversation/features/ChatList/ChatItem/index.tsx +2 -2
  85. package/src/app/[variants]/(main)/chat/(workspace)/@topic/features/GroupConfig/GroupMember.tsx +34 -2
  86. package/src/app/[variants]/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/Main.tsx +1 -1
  87. package/src/app/[variants]/(main)/chat/(workspace)/features/{GroupChatSettings → AgentTeamSettings}/index.tsx +4 -5
  88. package/src/app/[variants]/(main)/chat/(workspace)/features/SettingButton.tsx +2 -2
  89. package/src/app/[variants]/(main)/chat/@session/_layout/Desktop/SessionHeader.tsx +2 -0
  90. package/src/app/[variants]/(main)/chat/@session/features/SessionListContent/CollapseGroup/Actions.tsx +18 -1
  91. package/src/components/ChatGroupWizard/ChatGroupWizard.tsx +33 -5
  92. package/src/components/MemberSelectionModal/MemberSelectionModal.tsx +170 -26
  93. package/src/features/Conversation/Messages/Assistant/Actions/index.tsx +7 -2
  94. package/src/features/Conversation/Messages/Assistant/Tool/Render/index.tsx +4 -2
  95. package/src/features/Conversation/Messages/User/Actions.tsx +8 -2
  96. package/src/features/GroupChatSettings/{ChatGroupSettings.tsx → AgentTeamChatSettings.tsx} +6 -5
  97. package/src/features/GroupChatSettings/{GroupMembers.tsx → AgentTeamMembersSettings.tsx} +64 -19
  98. package/src/features/GroupChatSettings/{ChatGroupMeta.tsx → AgentTeamMetaSettings.tsx} +2 -2
  99. package/src/features/GroupChatSettings/AgentTeamSettings.tsx +54 -0
  100. package/src/features/GroupChatSettings/index.ts +4 -5
  101. package/src/locales/default/chat.ts +17 -17
  102. package/src/locales/default/setting.ts +15 -19
  103. package/src/locales/default/welcome.ts +1 -1
  104. package/src/store/chat/slices/aiChat/actions/generateAIGroupChat.ts +2 -1
  105. package/src/store/chat/slices/builtinTool/actions/{dalle.test.ts → __tests__/dalle.test.ts} +2 -5
  106. package/src/store/chat/slices/builtinTool/actions/__tests__/{localFile.test.ts → localSystem.test.ts} +4 -4
  107. package/src/store/chat/slices/builtinTool/actions/index.ts +2 -2
  108. package/src/store/chat/slices/builtinTool/actions/{localFile.ts → localSystem.ts} +183 -69
  109. package/src/store/chatGroup/action.ts +36 -1
  110. package/src/store/electron/selectors/__tests__/desktopState.test.ts +3 -3
  111. package/src/store/electron/selectors/desktopState.ts +11 -2
  112. package/src/store/user/slices/settings/selectors/__snapshots__/settings.test.ts.snap +0 -4
  113. package/src/store/user/slices/settings/selectors/systemAgent.ts +0 -2
  114. package/src/tools/local-system/Placeholder/ListFiles.tsx +10 -8
  115. package/src/tools/local-system/Placeholder/SearchFiles.tsx +12 -10
  116. package/src/tools/local-system/Placeholder/index.tsx +1 -1
  117. package/src/tools/local-system/Render/ReadLocalFile/ReadFileSkeleton.tsx +8 -18
  118. package/src/tools/local-system/Render/ReadLocalFile/ReadFileView.tsx +21 -6
  119. package/src/tools/local-system/Render/SearchFiles/Result.tsx +5 -4
  120. package/src/tools/local-system/Render/SearchFiles/SearchQuery/SearchView.tsx +4 -15
  121. package/src/tools/local-system/Render/SearchFiles/index.tsx +3 -2
  122. package/src/tools/local-system/type.ts +39 -0
  123. package/docs/usage/features/group-chat.zh-CN.mdx +0 -52
  124. package/src/features/GroupChatSettings/GroupSettings.tsx +0 -30
  125. package/src/features/GroupChatSettings/GroupSettingsContent.tsx +0 -24
  126. package/src/tools/local-system/Placeholder/ReadLocalFile.tsx +0 -9
  127. package/src/tools/local-system/Render/ReadLocalFile/style.ts +0 -37
  128. /package/src/store/chat/slices/builtinTool/actions/{search.test.ts → __tests__/search.test.ts} +0 -0
@@ -0,0 +1,873 @@
1
+ import { ModelUsage } from '@lobechat/types';
2
+ import { describe, expect, it } from 'vitest';
3
+
4
+ import { UsageCounter } from '../UsageCounter';
5
+ import { AgentRuntime } from '../runtime';
6
+
7
+ describe('UsageCounter', () => {
8
+ describe('UsageCounter.accumulateLLM', () => {
9
+ it('should accumulate LLM usage tokens', () => {
10
+ const state = AgentRuntime.createInitialState();
11
+
12
+ const modelUsage: ModelUsage = {
13
+ totalInputTokens: 100,
14
+ totalOutputTokens: 50,
15
+ totalTokens: 150,
16
+ };
17
+
18
+ const { usage } = UsageCounter.accumulateLLM({
19
+ cost: state.cost,
20
+ model: 'gpt-4',
21
+ modelUsage,
22
+ provider: 'openai',
23
+ usage: state.usage,
24
+ });
25
+
26
+ expect(usage.llm.tokens.input).toBe(100);
27
+ expect(usage.llm.tokens.output).toBe(50);
28
+ expect(usage.llm.tokens.total).toBe(150);
29
+ expect(usage.llm.apiCalls).toBe(1);
30
+ });
31
+
32
+ it('should not mutate original usage', () => {
33
+ const state = AgentRuntime.createInitialState();
34
+
35
+ const modelUsage: ModelUsage = {
36
+ totalInputTokens: 100,
37
+ totalOutputTokens: 50,
38
+ totalTokens: 150,
39
+ };
40
+
41
+ const { usage } = UsageCounter.accumulateLLM({
42
+ cost: state.cost,
43
+ model: 'gpt-4',
44
+ modelUsage: modelUsage,
45
+ provider: 'openai',
46
+ usage: state.usage,
47
+ });
48
+
49
+ expect(state.usage.llm.tokens.input).toBe(0);
50
+ expect(usage).not.toBe(state.usage);
51
+ });
52
+
53
+ it('should create new byModel entry when not exists', () => {
54
+ const state = AgentRuntime.createInitialState();
55
+
56
+ const modelUsage: ModelUsage = {
57
+ cost: 0.05,
58
+ totalInputTokens: 100,
59
+ totalOutputTokens: 50,
60
+ totalTokens: 150,
61
+ };
62
+
63
+ const { cost } = UsageCounter.accumulateLLM({
64
+ cost: state.cost,
65
+ model: 'gpt-4',
66
+ modelUsage: modelUsage,
67
+ provider: 'openai',
68
+ usage: state.usage,
69
+ });
70
+
71
+ expect(cost?.llm.byModel).toHaveLength(1);
72
+ expect(cost?.llm.byModel[0]).toEqual({
73
+ id: 'openai/gpt-4',
74
+ model: 'gpt-4',
75
+ provider: 'openai',
76
+ totalCost: 0.05,
77
+ usage: {
78
+ cost: 0.05,
79
+ totalInputTokens: 100,
80
+ totalOutputTokens: 50,
81
+ totalTokens: 150,
82
+ },
83
+ });
84
+ });
85
+
86
+ it('should accumulate to existing byModel entry', () => {
87
+ const state = AgentRuntime.createInitialState();
88
+
89
+ const usage1: ModelUsage = {
90
+ cost: 0.05,
91
+ totalInputTokens: 100,
92
+ totalOutputTokens: 50,
93
+ totalTokens: 150,
94
+ };
95
+
96
+ const usage2: ModelUsage = {
97
+ cost: 0.03,
98
+ totalInputTokens: 50,
99
+ totalOutputTokens: 25,
100
+ totalTokens: 75,
101
+ };
102
+
103
+ const result1 = UsageCounter.accumulateLLM({
104
+ cost: state.cost,
105
+ model: 'gpt-4',
106
+ modelUsage: usage1,
107
+ provider: 'openai',
108
+ usage: state.usage,
109
+ });
110
+ const result2 = UsageCounter.accumulateLLM({
111
+ cost: result1.cost,
112
+ model: 'gpt-4',
113
+ modelUsage: usage2,
114
+ provider: 'openai',
115
+ usage: result1.usage,
116
+ });
117
+
118
+ expect(result2.cost?.llm.byModel).toHaveLength(1);
119
+ expect(result2.cost?.llm.byModel[0]).toEqual({
120
+ id: 'openai/gpt-4',
121
+ model: 'gpt-4',
122
+ provider: 'openai',
123
+ totalCost: 0.08,
124
+ usage: {
125
+ cost: 0.08,
126
+ totalInputTokens: 150,
127
+ totalOutputTokens: 75,
128
+ totalTokens: 225,
129
+ },
130
+ });
131
+ });
132
+
133
+ it('should accumulate multiple models separately', () => {
134
+ const state = AgentRuntime.createInitialState();
135
+
136
+ const usage1: ModelUsage = {
137
+ cost: 0.05,
138
+ totalInputTokens: 100,
139
+ totalOutputTokens: 50,
140
+ totalTokens: 150,
141
+ };
142
+
143
+ const usage2: ModelUsage = {
144
+ cost: 0.02,
145
+ totalInputTokens: 50,
146
+ totalOutputTokens: 25,
147
+ totalTokens: 75,
148
+ };
149
+
150
+ const result1 = UsageCounter.accumulateLLM({
151
+ cost: state.cost,
152
+ model: 'gpt-4',
153
+ modelUsage: usage1,
154
+ provider: 'openai',
155
+ usage: state.usage,
156
+ });
157
+ const result2 = UsageCounter.accumulateLLM({
158
+ cost: result1.cost,
159
+ model: 'claude-3-5-sonnet-20241022',
160
+ modelUsage: usage2,
161
+ provider: 'anthropic',
162
+ usage: result1.usage,
163
+ });
164
+
165
+ expect(result2.cost?.llm.byModel).toHaveLength(2);
166
+ expect(result2.cost?.llm.byModel[0].id).toBe('openai/gpt-4');
167
+ expect(result2.cost?.llm.byModel[1].id).toBe('anthropic/claude-3-5-sonnet-20241022');
168
+ });
169
+
170
+ it('should accumulate cache-related tokens', () => {
171
+ const state = AgentRuntime.createInitialState();
172
+
173
+ const modelUsage: ModelUsage = {
174
+ cost: 0.05,
175
+ inputCacheMissTokens: 60,
176
+ inputCachedTokens: 40,
177
+ inputWriteCacheTokens: 20,
178
+ totalInputTokens: 100,
179
+ totalOutputTokens: 50,
180
+ totalTokens: 150,
181
+ };
182
+
183
+ const { cost } = UsageCounter.accumulateLLM({
184
+ cost: state.cost,
185
+ model: 'claude-3-5-sonnet-20241022',
186
+ modelUsage: modelUsage,
187
+ provider: 'anthropic',
188
+ usage: state.usage,
189
+ });
190
+
191
+ expect(cost?.llm.byModel[0].usage).toEqual({
192
+ cost: 0.05,
193
+ inputCacheMissTokens: 60,
194
+ inputCachedTokens: 40,
195
+ inputWriteCacheTokens: 20,
196
+ totalInputTokens: 100,
197
+ totalOutputTokens: 50,
198
+ totalTokens: 150,
199
+ });
200
+ });
201
+
202
+ it('should accumulate total costs correctly', () => {
203
+ const state = AgentRuntime.createInitialState();
204
+
205
+ const usage1: ModelUsage = {
206
+ cost: 0.05,
207
+ totalInputTokens: 100,
208
+ totalOutputTokens: 50,
209
+ totalTokens: 150,
210
+ };
211
+
212
+ const usage2: ModelUsage = {
213
+ cost: 0.03,
214
+ totalInputTokens: 50,
215
+ totalOutputTokens: 25,
216
+ totalTokens: 75,
217
+ };
218
+
219
+ const result1 = UsageCounter.accumulateLLM({
220
+ cost: state.cost,
221
+ model: 'gpt-4',
222
+ modelUsage: usage1,
223
+ provider: 'openai',
224
+ usage: state.usage,
225
+ });
226
+ const result2 = UsageCounter.accumulateLLM({
227
+ cost: result1.cost,
228
+ model: 'claude-3-5-sonnet-20241022',
229
+ modelUsage: usage2,
230
+ provider: 'anthropic',
231
+ usage: result1.usage,
232
+ });
233
+
234
+ expect(result2.cost?.llm.total).toBe(0.08);
235
+ expect(result2.cost?.total).toBe(0.08);
236
+ expect(result2.cost?.calculatedAt).toBeDefined();
237
+ });
238
+
239
+ it('should not accumulate cost when usage.cost is undefined', () => {
240
+ const state = AgentRuntime.createInitialState();
241
+
242
+ const modelUsage: ModelUsage = {
243
+ totalInputTokens: 100,
244
+ totalOutputTokens: 50,
245
+ totalTokens: 150,
246
+ };
247
+
248
+ const { cost } = UsageCounter.accumulateLLM({
249
+ cost: state.cost,
250
+ model: 'gpt-4',
251
+ modelUsage: modelUsage,
252
+ provider: 'openai',
253
+ usage: state.usage,
254
+ });
255
+
256
+ expect(cost?.llm.byModel).toHaveLength(0);
257
+ expect(cost?.llm.total).toBe(0);
258
+ expect(cost?.total).toBe(0);
259
+ });
260
+
261
+ it('should increment apiCalls for each accumulation', () => {
262
+ const state = AgentRuntime.createInitialState();
263
+
264
+ const modelUsage: ModelUsage = {
265
+ totalInputTokens: 100,
266
+ totalOutputTokens: 50,
267
+ totalTokens: 150,
268
+ };
269
+
270
+ const result1 = UsageCounter.accumulateLLM({
271
+ cost: state.cost,
272
+ model: 'gpt-4',
273
+ modelUsage: modelUsage,
274
+ provider: 'openai',
275
+ usage: state.usage,
276
+ });
277
+ const result2 = UsageCounter.accumulateLLM({
278
+ cost: result1.cost,
279
+ model: 'gpt-4',
280
+ modelUsage: modelUsage,
281
+ provider: 'openai',
282
+ usage: result1.usage,
283
+ });
284
+ const result3 = UsageCounter.accumulateLLM({
285
+ cost: result2.cost,
286
+ model: 'claude-3-5-sonnet-20241022',
287
+ modelUsage: modelUsage,
288
+ provider: 'anthropic',
289
+ usage: result2.usage,
290
+ });
291
+
292
+ expect(result3.usage.llm.apiCalls).toBe(3);
293
+ });
294
+
295
+ it('should auto-create usage and cost when not provided', () => {
296
+ const modelUsage: ModelUsage = {
297
+ cost: 0.05,
298
+ totalInputTokens: 100,
299
+ totalOutputTokens: 50,
300
+ totalTokens: 150,
301
+ };
302
+
303
+ const { usage, cost } = UsageCounter.accumulateLLM({
304
+ model: 'gpt-4',
305
+ modelUsage,
306
+ provider: 'openai',
307
+ });
308
+
309
+ expect(usage).toBeDefined();
310
+ expect(usage.llm.tokens.input).toBe(100);
311
+ expect(usage.llm.tokens.output).toBe(50);
312
+ expect(usage.llm.tokens.total).toBe(150);
313
+ expect(usage.llm.apiCalls).toBe(1);
314
+
315
+ expect(cost).toBeDefined();
316
+ expect(cost?.total).toBe(0.05);
317
+ expect(cost?.llm.total).toBe(0.05);
318
+ });
319
+ });
320
+
321
+ describe('UsageCounter.accumulateTool', () => {
322
+ it('should accumulate tool usage', () => {
323
+ const state = AgentRuntime.createInitialState();
324
+
325
+ const { usage } = UsageCounter.accumulateTool({
326
+ cost: state.cost,
327
+ executionTime: 1000,
328
+ success: true,
329
+ toolName: 'search',
330
+ usage: state.usage,
331
+ });
332
+
333
+ expect(usage.tools.byTool).toHaveLength(1);
334
+ expect(usage.tools.byTool[0]).toEqual({
335
+ calls: 1,
336
+ errors: 0,
337
+ name: 'search',
338
+ totalTimeMs: 1000,
339
+ });
340
+ expect(usage.tools.totalCalls).toBe(1);
341
+ expect(usage.tools.totalTimeMs).toBe(1000);
342
+ });
343
+
344
+ it('should not mutate original usage', () => {
345
+ const state = AgentRuntime.createInitialState();
346
+
347
+ const { usage } = UsageCounter.accumulateTool({
348
+ cost: state.cost,
349
+ executionTime: 1000,
350
+ success: true,
351
+ toolName: 'search',
352
+ usage: state.usage,
353
+ });
354
+
355
+ expect(state.usage.tools.totalCalls).toBe(0);
356
+ expect(usage).not.toBe(state.usage);
357
+ });
358
+
359
+ it('should accumulate errors when success is false', () => {
360
+ const state = AgentRuntime.createInitialState();
361
+
362
+ const { usage } = UsageCounter.accumulateTool({
363
+ cost: state.cost,
364
+ executionTime: 1000,
365
+ success: false,
366
+ toolName: 'search',
367
+ usage: state.usage,
368
+ });
369
+
370
+ expect(usage.tools.byTool[0]).toEqual({
371
+ calls: 1,
372
+ errors: 1,
373
+ name: 'search',
374
+ totalTimeMs: 1000,
375
+ });
376
+ });
377
+
378
+ it('should accumulate multiple tool calls', () => {
379
+ const state = AgentRuntime.createInitialState();
380
+
381
+ const result1 = UsageCounter.accumulateTool({
382
+ cost: state.cost,
383
+ executionTime: 1000,
384
+ success: true,
385
+ toolName: 'search',
386
+ usage: state.usage,
387
+ });
388
+ const result2 = UsageCounter.accumulateTool({
389
+ cost: result1.cost,
390
+ executionTime: 500,
391
+ success: true,
392
+ toolName: 'search',
393
+ usage: result1.usage,
394
+ });
395
+ const result3 = UsageCounter.accumulateTool({
396
+ cost: result2.cost,
397
+ executionTime: 200,
398
+ success: false,
399
+ toolName: 'calculator',
400
+ usage: result2.usage,
401
+ });
402
+
403
+ expect(result3.usage.tools.byTool).toHaveLength(2);
404
+ expect(result3.usage.tools.byTool.find((t) => t.name === 'search')).toEqual({
405
+ calls: 2,
406
+ errors: 0,
407
+ name: 'search',
408
+ totalTimeMs: 1500,
409
+ });
410
+ expect(result3.usage.tools.byTool.find((t) => t.name === 'calculator')).toEqual({
411
+ calls: 1,
412
+ errors: 1,
413
+ name: 'calculator',
414
+ totalTimeMs: 200,
415
+ });
416
+ expect(result3.usage.tools.totalCalls).toBe(3);
417
+ expect(result3.usage.tools.totalTimeMs).toBe(1700);
418
+ });
419
+
420
+ it('should accumulate tool cost when provided', () => {
421
+ const state = AgentRuntime.createInitialState();
422
+
423
+ const { cost } = UsageCounter.accumulateTool({
424
+ cost: state.cost,
425
+ executionTime: 1000,
426
+ success: true,
427
+ toolCost: 0.01,
428
+ toolName: 'premium-search',
429
+ usage: state.usage,
430
+ });
431
+
432
+ expect(cost?.tools.byTool).toHaveLength(1);
433
+ expect(cost?.tools.byTool[0]).toEqual({
434
+ calls: 1,
435
+ currency: 'USD',
436
+ name: 'premium-search',
437
+ totalCost: 0.01,
438
+ });
439
+ expect(cost?.tools.total).toBe(0.01);
440
+ expect(cost?.total).toBe(0.01);
441
+ });
442
+
443
+ it('should accumulate tool cost across multiple calls', () => {
444
+ const state = AgentRuntime.createInitialState();
445
+
446
+ const result1 = UsageCounter.accumulateTool({
447
+ cost: state.cost,
448
+ executionTime: 1000,
449
+ success: true,
450
+ toolCost: 0.01,
451
+ toolName: 'premium-search',
452
+ usage: state.usage,
453
+ });
454
+ const result2 = UsageCounter.accumulateTool({
455
+ cost: result1.cost,
456
+ executionTime: 500,
457
+ success: true,
458
+ toolCost: 0.005,
459
+ toolName: 'premium-search',
460
+ usage: result1.usage,
461
+ });
462
+
463
+ expect(result2.cost?.tools.byTool).toHaveLength(1);
464
+ expect(result2.cost?.tools.byTool[0]).toEqual({
465
+ calls: 2,
466
+ currency: 'USD',
467
+ name: 'premium-search',
468
+ totalCost: 0.015,
469
+ });
470
+ expect(result2.cost?.tools.total).toBe(0.015);
471
+ expect(result2.cost?.total).toBe(0.015);
472
+ });
473
+
474
+ it('should not accumulate cost when cost is undefined', () => {
475
+ const state = AgentRuntime.createInitialState();
476
+
477
+ const { cost } = UsageCounter.accumulateTool({
478
+ cost: state.cost,
479
+ executionTime: 1000,
480
+ success: true,
481
+ toolName: 'free-tool',
482
+ usage: state.usage,
483
+ });
484
+
485
+ expect(cost?.tools.byTool).toHaveLength(0);
486
+ expect(cost?.tools.total).toBe(0);
487
+ });
488
+ });
489
+
490
+ describe('mixed accumulation', () => {
491
+ it('should accumulate both LLM and tool costs correctly', () => {
492
+ const state = AgentRuntime.createInitialState();
493
+
494
+ const llmUsage: ModelUsage = {
495
+ cost: 0.05,
496
+ totalInputTokens: 100,
497
+ totalOutputTokens: 50,
498
+ totalTokens: 150,
499
+ };
500
+
501
+ const result1 = UsageCounter.accumulateLLM({
502
+ cost: state.cost,
503
+ model: 'gpt-4',
504
+ modelUsage: llmUsage,
505
+ provider: 'openai',
506
+ usage: state.usage,
507
+ });
508
+ const result2 = UsageCounter.accumulateTool({
509
+ cost: result1.cost,
510
+ executionTime: 1000,
511
+ success: true,
512
+ toolCost: 0.01,
513
+ toolName: 'premium-search',
514
+ usage: result1.usage,
515
+ });
516
+
517
+ expect(result2.cost?.llm.total).toBe(0.05);
518
+ expect(result2.cost?.tools.total).toBe(0.01);
519
+ expect(result2.cost?.total).toBeCloseTo(0.06);
520
+ });
521
+ });
522
+
523
+ describe('mergeModelUsage (private method tests via accumulateLLM)', () => {
524
+ it('should merge basic token counts', () => {
525
+ const state = AgentRuntime.createInitialState();
526
+
527
+ const usage1: ModelUsage = {
528
+ cost: 0.05,
529
+ totalInputTokens: 100,
530
+ totalOutputTokens: 50,
531
+ totalTokens: 150,
532
+ };
533
+
534
+ const usage2: ModelUsage = {
535
+ cost: 0.03,
536
+ totalInputTokens: 200,
537
+ totalOutputTokens: 100,
538
+ totalTokens: 300,
539
+ };
540
+
541
+ const result1 = UsageCounter.accumulateLLM({
542
+ cost: state.cost,
543
+ model: 'gpt-4',
544
+ modelUsage: usage1,
545
+ provider: 'openai',
546
+ usage: state.usage,
547
+ });
548
+ const result2 = UsageCounter.accumulateLLM({
549
+ cost: result1.cost,
550
+ model: 'gpt-4',
551
+ modelUsage: usage2,
552
+ provider: 'openai',
553
+ usage: result1.usage,
554
+ });
555
+
556
+ expect(result2.cost?.llm.byModel[0].usage).toEqual({
557
+ cost: 0.08,
558
+ totalInputTokens: 300,
559
+ totalOutputTokens: 150,
560
+ totalTokens: 450,
561
+ });
562
+ });
563
+
564
+ it('should merge cache-related tokens', () => {
565
+ const state = AgentRuntime.createInitialState();
566
+
567
+ const usage1: ModelUsage = {
568
+ cost: 0.05,
569
+ inputCacheMissTokens: 30,
570
+ inputCachedTokens: 50,
571
+ inputWriteCacheTokens: 20,
572
+ totalInputTokens: 100,
573
+ totalOutputTokens: 50,
574
+ totalTokens: 150,
575
+ };
576
+
577
+ const usage2: ModelUsage = {
578
+ cost: 0.03,
579
+ inputCacheMissTokens: 40,
580
+ inputCachedTokens: 80,
581
+ inputWriteCacheTokens: 30,
582
+ totalInputTokens: 150,
583
+ totalOutputTokens: 75,
584
+ totalTokens: 225,
585
+ };
586
+
587
+ const result1 = UsageCounter.accumulateLLM({
588
+ cost: state.cost,
589
+ model: 'claude-3-5-sonnet-20241022',
590
+ modelUsage: usage1,
591
+ provider: 'anthropic',
592
+ usage: state.usage,
593
+ });
594
+ const result2 = UsageCounter.accumulateLLM({
595
+ cost: result1.cost,
596
+ model: 'claude-3-5-sonnet-20241022',
597
+ modelUsage: usage2,
598
+ provider: 'anthropic',
599
+ usage: result1.usage,
600
+ });
601
+
602
+ expect(result2.cost?.llm.byModel[0].usage).toEqual({
603
+ cost: 0.08,
604
+ inputCacheMissTokens: 70,
605
+ inputCachedTokens: 130,
606
+ inputWriteCacheTokens: 50,
607
+ totalInputTokens: 250,
608
+ totalOutputTokens: 125,
609
+ totalTokens: 375,
610
+ });
611
+ });
612
+
613
+ it('should merge reasoning tokens', () => {
614
+ const state = AgentRuntime.createInitialState();
615
+
616
+ const usage1: ModelUsage = {
617
+ cost: 0.05,
618
+ outputReasoningTokens: 100,
619
+ outputTextTokens: 200,
620
+ totalInputTokens: 100,
621
+ totalOutputTokens: 300,
622
+ totalTokens: 400,
623
+ };
624
+
625
+ const usage2: ModelUsage = {
626
+ cost: 0.03,
627
+ outputReasoningTokens: 50,
628
+ outputTextTokens: 100,
629
+ totalInputTokens: 50,
630
+ totalOutputTokens: 150,
631
+ totalTokens: 200,
632
+ };
633
+
634
+ const result1 = UsageCounter.accumulateLLM({
635
+ cost: state.cost,
636
+ model: 'o1',
637
+ modelUsage: usage1,
638
+ provider: 'openai',
639
+ usage: state.usage,
640
+ });
641
+ const result2 = UsageCounter.accumulateLLM({
642
+ cost: result1.cost,
643
+ model: 'o1',
644
+ modelUsage: usage2,
645
+ provider: 'openai',
646
+ usage: result1.usage,
647
+ });
648
+
649
+ expect(result2.cost?.llm.byModel[0].usage).toEqual({
650
+ cost: 0.08,
651
+ outputReasoningTokens: 150,
652
+ outputTextTokens: 300,
653
+ totalInputTokens: 150,
654
+ totalOutputTokens: 450,
655
+ totalTokens: 600,
656
+ });
657
+ });
658
+
659
+ it('should merge audio and image tokens', () => {
660
+ const state = AgentRuntime.createInitialState();
661
+
662
+ const usage1: ModelUsage = {
663
+ cost: 0.05,
664
+ inputAudioTokens: 10,
665
+ inputImageTokens: 20,
666
+ outputAudioTokens: 5,
667
+ outputImageTokens: 15,
668
+ totalInputTokens: 30,
669
+ totalOutputTokens: 20,
670
+ totalTokens: 50,
671
+ };
672
+
673
+ const usage2: ModelUsage = {
674
+ cost: 0.03,
675
+ inputAudioTokens: 15,
676
+ inputImageTokens: 25,
677
+ outputAudioTokens: 8,
678
+ outputImageTokens: 12,
679
+ totalInputTokens: 40,
680
+ totalOutputTokens: 20,
681
+ totalTokens: 60,
682
+ };
683
+
684
+ const result1 = UsageCounter.accumulateLLM({
685
+ cost: state.cost,
686
+ model: 'gpt-4o-audio-preview',
687
+ modelUsage: usage1,
688
+ provider: 'openai',
689
+ usage: state.usage,
690
+ });
691
+ const result2 = UsageCounter.accumulateLLM({
692
+ cost: result1.cost,
693
+ model: 'gpt-4o-audio-preview',
694
+ modelUsage: usage2,
695
+ provider: 'openai',
696
+ usage: result1.usage,
697
+ });
698
+
699
+ expect(result2.cost?.llm.byModel[0].usage).toEqual({
700
+ cost: 0.08,
701
+ inputAudioTokens: 25,
702
+ inputImageTokens: 45,
703
+ outputAudioTokens: 13,
704
+ outputImageTokens: 27,
705
+ totalInputTokens: 70,
706
+ totalOutputTokens: 40,
707
+ totalTokens: 110,
708
+ });
709
+ });
710
+
711
+ it('should merge prediction tokens', () => {
712
+ const state = AgentRuntime.createInitialState();
713
+
714
+ const usage1: ModelUsage = {
715
+ acceptedPredictionTokens: 50,
716
+ cost: 0.05,
717
+ rejectedPredictionTokens: 10,
718
+ totalInputTokens: 100,
719
+ totalOutputTokens: 60,
720
+ totalTokens: 160,
721
+ };
722
+
723
+ const usage2: ModelUsage = {
724
+ acceptedPredictionTokens: 30,
725
+ cost: 0.03,
726
+ rejectedPredictionTokens: 5,
727
+ totalInputTokens: 50,
728
+ totalOutputTokens: 35,
729
+ totalTokens: 85,
730
+ };
731
+
732
+ const result1 = UsageCounter.accumulateLLM({
733
+ cost: state.cost,
734
+ model: 'gpt-4o',
735
+ modelUsage: usage1,
736
+ provider: 'openai',
737
+ usage: state.usage,
738
+ });
739
+ const result2 = UsageCounter.accumulateLLM({
740
+ cost: result1.cost,
741
+ model: 'gpt-4o',
742
+ modelUsage: usage2,
743
+ provider: 'openai',
744
+ usage: result1.usage,
745
+ });
746
+
747
+ expect(result2.cost?.llm.byModel[0].usage).toEqual({
748
+ acceptedPredictionTokens: 80,
749
+ cost: 0.08,
750
+ rejectedPredictionTokens: 15,
751
+ totalInputTokens: 150,
752
+ totalOutputTokens: 95,
753
+ totalTokens: 245,
754
+ });
755
+ });
756
+
757
+ it('should handle missing fields gracefully', () => {
758
+ const state = AgentRuntime.createInitialState();
759
+
760
+ const usage1: ModelUsage = {
761
+ cost: 0.05,
762
+ totalInputTokens: 100,
763
+ // totalOutputTokens is missing
764
+ };
765
+
766
+ const usage2: ModelUsage = {
767
+ cost: 0.03,
768
+ totalOutputTokens: 50,
769
+ // totalInputTokens is missing
770
+ };
771
+
772
+ const result1 = UsageCounter.accumulateLLM({
773
+ cost: state.cost,
774
+ model: 'gpt-4',
775
+ modelUsage: usage1,
776
+ provider: 'openai',
777
+ usage: state.usage,
778
+ });
779
+ const result2 = UsageCounter.accumulateLLM({
780
+ cost: result1.cost,
781
+ model: 'gpt-4',
782
+ modelUsage: usage2,
783
+ provider: 'openai',
784
+ usage: result1.usage,
785
+ });
786
+
787
+ expect(result2.cost?.llm.byModel[0].usage).toEqual({
788
+ cost: 0.08,
789
+ totalInputTokens: 100,
790
+ totalOutputTokens: 50,
791
+ });
792
+ });
793
+
794
+ it('should merge all fields in a comprehensive scenario', () => {
795
+ const state = AgentRuntime.createInitialState();
796
+
797
+ const usage1: ModelUsage = {
798
+ acceptedPredictionTokens: 10,
799
+ cost: 0.05,
800
+ inputAudioTokens: 5,
801
+ inputCacheMissTokens: 40,
802
+ inputCachedTokens: 60,
803
+ inputCitationTokens: 10,
804
+ inputImageTokens: 20,
805
+ inputTextTokens: 100,
806
+ inputWriteCacheTokens: 30,
807
+ outputAudioTokens: 3,
808
+ outputImageTokens: 8,
809
+ outputReasoningTokens: 20,
810
+ outputTextTokens: 50,
811
+ rejectedPredictionTokens: 5,
812
+ totalInputTokens: 200,
813
+ totalOutputTokens: 80,
814
+ totalTokens: 280,
815
+ };
816
+
817
+ const usage2: ModelUsage = {
818
+ acceptedPredictionTokens: 5,
819
+ cost: 0.03,
820
+ inputAudioTokens: 3,
821
+ inputCacheMissTokens: 20,
822
+ inputCachedTokens: 30,
823
+ inputCitationTokens: 5,
824
+ inputImageTokens: 10,
825
+ inputTextTokens: 50,
826
+ inputWriteCacheTokens: 15,
827
+ outputAudioTokens: 2,
828
+ outputImageTokens: 4,
829
+ outputReasoningTokens: 10,
830
+ outputTextTokens: 25,
831
+ rejectedPredictionTokens: 2,
832
+ totalInputTokens: 100,
833
+ totalOutputTokens: 40,
834
+ totalTokens: 140,
835
+ };
836
+
837
+ const result1 = UsageCounter.accumulateLLM({
838
+ cost: state.cost,
839
+ model: 'claude-3-5-sonnet-20241022',
840
+ modelUsage: usage1,
841
+ provider: 'anthropic',
842
+ usage: state.usage,
843
+ });
844
+ const result2 = UsageCounter.accumulateLLM({
845
+ cost: result1.cost,
846
+ model: 'claude-3-5-sonnet-20241022',
847
+ modelUsage: usage2,
848
+ provider: 'anthropic',
849
+ usage: result1.usage,
850
+ });
851
+
852
+ expect(result2.cost?.llm.byModel[0].usage).toEqual({
853
+ acceptedPredictionTokens: 15,
854
+ cost: 0.08,
855
+ inputAudioTokens: 8,
856
+ inputCacheMissTokens: 60,
857
+ inputCachedTokens: 90,
858
+ inputCitationTokens: 15,
859
+ inputImageTokens: 30,
860
+ inputTextTokens: 150,
861
+ inputWriteCacheTokens: 45,
862
+ outputAudioTokens: 5,
863
+ outputImageTokens: 12,
864
+ outputReasoningTokens: 30,
865
+ outputTextTokens: 75,
866
+ rejectedPredictionTokens: 7,
867
+ totalInputTokens: 300,
868
+ totalOutputTokens: 120,
869
+ totalTokens: 420,
870
+ });
871
+ });
872
+ });
873
+ });