@agentica/core 0.44.0-dev.20260313 → 0.44.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (150) hide show
  1. package/LICENSE +21 -21
  2. package/README.md +218 -218
  3. package/lib/context/internal/__IChatInitialApplication.d.ts +1 -2
  4. package/lib/errors/AgenticaJsonParseError.js +6 -6
  5. package/lib/index.mjs +47 -1
  6. package/lib/index.mjs.map +1 -1
  7. package/lib/orchestrate/call.js +16 -16
  8. package/lib/orchestrate/initialize.js +43 -1
  9. package/lib/orchestrate/initialize.js.map +1 -1
  10. package/lib/structures/IAgenticaController.d.ts +143 -143
  11. package/lib/utils/ChatGptCompletionMessageUtil.js +6 -6
  12. package/package.json +6 -6
  13. package/prompts/cancel.md +5 -5
  14. package/prompts/common.md +3 -3
  15. package/prompts/describe.md +7 -7
  16. package/prompts/execute.md +122 -122
  17. package/prompts/initialize.md +3 -3
  18. package/prompts/json_parse_error.md +35 -35
  19. package/prompts/select.md +7 -7
  20. package/prompts/validate.md +123 -123
  21. package/prompts/validate_repeated.md +31 -31
  22. package/src/Agentica.ts +367 -367
  23. package/src/MicroAgentica.ts +357 -357
  24. package/src/constants/AgenticaConstant.ts +4 -4
  25. package/src/constants/AgenticaDefaultPrompt.ts +44 -44
  26. package/src/constants/index.ts +2 -2
  27. package/src/context/AgenticaContext.ts +136 -136
  28. package/src/context/AgenticaContextRequestResult.ts +14 -14
  29. package/src/context/AgenticaOperation.ts +73 -73
  30. package/src/context/AgenticaOperationCollection.ts +49 -49
  31. package/src/context/AgenticaOperationSelection.ts +9 -9
  32. package/src/context/AgenticaTokenUsage.ts +186 -186
  33. package/src/context/MicroAgenticaContext.ts +99 -99
  34. package/src/context/index.ts +5 -5
  35. package/src/context/internal/AgenticaOperationComposer.ts +177 -177
  36. package/src/context/internal/AgenticaTokenUsageAggregator.ts +66 -66
  37. package/src/context/internal/__IChatCancelFunctionsApplication.ts +23 -23
  38. package/src/context/internal/__IChatFunctionReference.ts +21 -21
  39. package/src/context/internal/__IChatInitialApplication.ts +13 -15
  40. package/src/context/internal/__IChatSelectFunctionsApplication.ts +24 -24
  41. package/src/context/internal/isAgenticaContext.ts +11 -11
  42. package/src/errors/AgenticaJsonParseError.ts +52 -52
  43. package/src/errors/AgenticaValidationError.ts +49 -49
  44. package/src/errors/index.ts +2 -2
  45. package/src/events/AgenticaAssistantMessageEvent.ts +12 -12
  46. package/src/events/AgenticaCallEvent.ts +27 -27
  47. package/src/events/AgenticaCancelEvent.ts +9 -9
  48. package/src/events/AgenticaDescribeEvent.ts +14 -14
  49. package/src/events/AgenticaEvent.ts +59 -59
  50. package/src/events/AgenticaEvent.type.ts +19 -19
  51. package/src/events/AgenticaEventBase.ts +18 -18
  52. package/src/events/AgenticaEventSource.ts +6 -6
  53. package/src/events/AgenticaExecuteEvent.ts +45 -45
  54. package/src/events/AgenticaInitializeEvent.ts +7 -7
  55. package/src/events/AgenticaJsonParseErrorEvent.ts +16 -16
  56. package/src/events/AgenticaRequestEvent.ts +27 -27
  57. package/src/events/AgenticaResponseEvent.ts +32 -32
  58. package/src/events/AgenticaSelectEvent.ts +11 -11
  59. package/src/events/AgenticaUserMessageEvent.ts +12 -12
  60. package/src/events/AgenticaValidateEvent.ts +32 -32
  61. package/src/events/MicroAgenticaEvent.ts +45 -45
  62. package/src/events/index.ts +15 -15
  63. package/src/factory/events.ts +357 -357
  64. package/src/factory/histories.ts +348 -348
  65. package/src/factory/index.ts +3 -3
  66. package/src/factory/operations.ts +16 -16
  67. package/src/functional/assertHttpController.ts +106 -106
  68. package/src/functional/assertHttpLlmApplication.ts +52 -52
  69. package/src/functional/assertMcpController.ts +47 -47
  70. package/src/functional/createMcpLlmApplication.ts +72 -72
  71. package/src/functional/index.ts +7 -7
  72. package/src/functional/validateHttpController.ts +113 -113
  73. package/src/functional/validateHttpLlmApplication.ts +65 -65
  74. package/src/functional/validateMcpController.ts +53 -53
  75. package/src/histories/AgenticaAssistantMessageHistory.ts +10 -10
  76. package/src/histories/AgenticaCancelHistory.ts +8 -8
  77. package/src/histories/AgenticaDescribeHistory.ts +18 -18
  78. package/src/histories/AgenticaExecuteHistory.ts +64 -64
  79. package/src/histories/AgenticaHistory.ts +28 -28
  80. package/src/histories/AgenticaHistoryBase.ts +35 -35
  81. package/src/histories/AgenticaSelectHistory.ts +8 -8
  82. package/src/histories/AgenticaSystemMessageHistory.ts +10 -10
  83. package/src/histories/AgenticaUserMessageHistory.ts +11 -11
  84. package/src/histories/MicroAgenticaHistory.ts +19 -19
  85. package/src/histories/contents/AgenticaUserMessageAudioContent.ts +21 -21
  86. package/src/histories/contents/AgenticaUserMessageContent.ts +19 -19
  87. package/src/histories/contents/AgenticaUserMessageContentBase.ts +6 -6
  88. package/src/histories/contents/AgenticaUserMessageFileContent.ts +25 -25
  89. package/src/histories/contents/AgenticaUserMessageImageContent.ts +33 -33
  90. package/src/histories/contents/AgenticaUserMessageTextContent.ts +15 -15
  91. package/src/histories/contents/index.ts +5 -5
  92. package/src/histories/index.ts +10 -10
  93. package/src/index.ts +15 -15
  94. package/src/json/IAgenticaEventJson.ts +265 -265
  95. package/src/json/IAgenticaEventJson.type.ts +19 -19
  96. package/src/json/IAgenticaHistoryJson.ts +165 -165
  97. package/src/json/IAgenticaHistoryJson.type.ts +19 -19
  98. package/src/json/IAgenticaOperationJson.ts +36 -36
  99. package/src/json/IAgenticaOperationSelectionJson.ts +26 -26
  100. package/src/json/IAgenticaTokenUsageJson.ts +107 -107
  101. package/src/json/IMicroAgenticaEventJson.ts +22 -22
  102. package/src/json/IMicroAgenticaHistoryJson.ts +25 -25
  103. package/src/json/index.ts +7 -7
  104. package/src/orchestrate/call.ts +542 -542
  105. package/src/orchestrate/cancel.ts +265 -265
  106. package/src/orchestrate/describe.ts +66 -66
  107. package/src/orchestrate/execute.ts +61 -61
  108. package/src/orchestrate/index.ts +6 -6
  109. package/src/orchestrate/initialize.ts +102 -102
  110. package/src/orchestrate/internal/cancelFunctionFromContext.ts +33 -33
  111. package/src/orchestrate/internal/selectFunctionFromContext.ts +34 -34
  112. package/src/orchestrate/select.ts +320 -320
  113. package/src/structures/IAgenticaConfig.ts +83 -83
  114. package/src/structures/IAgenticaConfigBase.ts +87 -87
  115. package/src/structures/IAgenticaController.ts +143 -143
  116. package/src/structures/IAgenticaExecutor.ts +167 -167
  117. package/src/structures/IAgenticaProps.ts +78 -78
  118. package/src/structures/IAgenticaSystemPrompt.ts +236 -236
  119. package/src/structures/IAgenticaVendor.ts +54 -54
  120. package/src/structures/IMcpTool.ts +60 -60
  121. package/src/structures/IMicroAgenticaConfig.ts +56 -56
  122. package/src/structures/IMicroAgenticaExecutor.ts +67 -67
  123. package/src/structures/IMicroAgenticaProps.ts +77 -77
  124. package/src/structures/IMicroAgenticaSystemPrompt.ts +169 -169
  125. package/src/structures/index.ts +10 -10
  126. package/src/transformers/transformHistory.ts +172 -172
  127. package/src/utils/AssistantMessageEmptyError.ts +20 -20
  128. package/src/utils/AsyncQueue.spec.ts +355 -355
  129. package/src/utils/AsyncQueue.ts +95 -95
  130. package/src/utils/ByteArrayUtil.ts +5 -5
  131. package/src/utils/ChatGptCompletionMessageUtil.spec.ts +314 -314
  132. package/src/utils/ChatGptCompletionMessageUtil.ts +210 -210
  133. package/src/utils/ChatGptCompletionStreamingUtil.spec.ts +909 -909
  134. package/src/utils/ChatGptCompletionStreamingUtil.ts +91 -91
  135. package/src/utils/ChatGptTokenUsageAggregator.spec.ts +226 -226
  136. package/src/utils/ChatGptTokenUsageAggregator.ts +57 -57
  137. package/src/utils/MPSC.spec.ts +276 -276
  138. package/src/utils/MPSC.ts +42 -42
  139. package/src/utils/Singleton.spec.ts +138 -138
  140. package/src/utils/Singleton.ts +42 -42
  141. package/src/utils/StreamUtil.spec.ts +512 -512
  142. package/src/utils/StreamUtil.ts +87 -87
  143. package/src/utils/__map_take.spec.ts +140 -140
  144. package/src/utils/__map_take.ts +13 -13
  145. package/src/utils/__retry.spec.ts +198 -198
  146. package/src/utils/__retry.ts +18 -18
  147. package/src/utils/assertExecuteFailure.ts +16 -16
  148. package/src/utils/index.ts +4 -4
  149. package/src/utils/request.ts +140 -140
  150. package/src/utils/types.ts +50 -50
@@ -1,91 +1,91 @@
1
- import type { ChatCompletion, ChatCompletionChunk } from "openai/resources";
2
-
3
- import { ChatGptCompletionMessageUtil, MPSC, streamDefaultReaderToAsyncGenerator, StreamUtil, toAsyncGenerator } from ".";
4
-
5
- async function reduceStreamingWithDispatch(stream: ReadableStream<ChatCompletionChunk>, eventProcessor: (props: {
6
- stream: AsyncGenerator<string, undefined, undefined>;
7
- done: () => boolean;
8
- get: () => string;
9
- join: () => Promise<string>;
10
- }) => void, abortSignal?: AbortSignal) {
11
- const streamContext = new Map<number, { content: string; mpsc: MPSC<string> }>();
12
-
13
- const nullableCompletion = await StreamUtil.reduce<ChatCompletionChunk, Promise<ChatCompletion>>(stream, async (accPromise, chunk) => {
14
- const acc = await accPromise;
15
- const registerContext = (
16
- choices: ChatCompletionChunk.Choice[],
17
- ) => {
18
- for (const choice of choices) {
19
- // Handle content first, even if finish_reason is present
20
- if (choice.delta.content != null && choice.delta.content !== "") {
21
- // Process content logic (moved up from below)
22
- if (streamContext.has(choice.index)) {
23
- const context = streamContext.get(choice.index)!;
24
- context.content += choice.delta.content;
25
- context.mpsc.produce(choice.delta.content);
26
- }
27
- else {
28
- const mpsc = new MPSC<string>();
29
-
30
- streamContext.set(choice.index, {
31
- content: choice.delta.content,
32
- mpsc,
33
- });
34
- mpsc.produce(choice.delta.content);
35
-
36
- eventProcessor({
37
- stream: streamDefaultReaderToAsyncGenerator(mpsc.consumer.getReader()),
38
- done: () => mpsc.done(),
39
- get: () => streamContext.get(choice.index)?.content ?? "",
40
- join: async () => {
41
- await mpsc.waitClosed();
42
- return streamContext.get(choice.index)!.content;
43
- },
44
- });
45
- }
46
- }
47
-
48
- // Handle finish_reason after content processing
49
- if (choice.finish_reason != null) {
50
- const context = streamContext.get(choice.index);
51
- if (context != null) {
52
- context.mpsc.close();
53
- }
54
- }
55
- }
56
- };
57
- if (acc.object === "chat.completion.chunk") {
58
- registerContext([acc, chunk].flatMap(v => v.choices ?? []));
59
- return ChatGptCompletionMessageUtil.merge([acc, chunk]);
60
- }
61
- registerContext(chunk.choices ?? []);
62
- return ChatGptCompletionMessageUtil.accumulate(acc, chunk);
63
- }, { abortSignal });
64
-
65
- if (nullableCompletion == null) {
66
- throw new Error(
67
- "StreamUtil.reduce did not produce a ChatCompletion. Possible causes: the input stream was empty, invalid, or closed prematurely. "
68
- + "To debug: check that the stream is properly initialized and contains valid ChatCompletionChunk data. "
69
- + "You may also enable verbose logging upstream to inspect the stream contents. "
70
- + `Stream locked: ${stream.locked}.`,
71
- );
72
- }
73
-
74
- if ((nullableCompletion.object as string) === "chat.completion.chunk") {
75
- const completion = ChatGptCompletionMessageUtil.merge([nullableCompletion as unknown as ChatCompletionChunk]);
76
- completion.choices.forEach((choice) => {
77
- if (choice.message.content != null && choice.message.content !== "") {
78
- eventProcessor({
79
- stream: toAsyncGenerator(choice.message.content),
80
- done: () => true,
81
- get: () => choice.message.content!,
82
- join: async () => choice.message.content!,
83
- });
84
- }
85
- });
86
- return completion;
87
- }
88
- return nullableCompletion;
89
- }
90
-
91
- export { reduceStreamingWithDispatch };
1
+ import type { ChatCompletion, ChatCompletionChunk } from "openai/resources";
2
+
3
+ import { ChatGptCompletionMessageUtil, MPSC, streamDefaultReaderToAsyncGenerator, StreamUtil, toAsyncGenerator } from ".";
4
+
5
+ async function reduceStreamingWithDispatch(stream: ReadableStream<ChatCompletionChunk>, eventProcessor: (props: {
6
+ stream: AsyncGenerator<string, undefined, undefined>;
7
+ done: () => boolean;
8
+ get: () => string;
9
+ join: () => Promise<string>;
10
+ }) => void, abortSignal?: AbortSignal) {
11
+ const streamContext = new Map<number, { content: string; mpsc: MPSC<string> }>();
12
+
13
+ const nullableCompletion = await StreamUtil.reduce<ChatCompletionChunk, Promise<ChatCompletion>>(stream, async (accPromise, chunk) => {
14
+ const acc = await accPromise;
15
+ const registerContext = (
16
+ choices: ChatCompletionChunk.Choice[],
17
+ ) => {
18
+ for (const choice of choices) {
19
+ // Handle content first, even if finish_reason is present
20
+ if (choice.delta.content != null && choice.delta.content !== "") {
21
+ // Process content logic (moved up from below)
22
+ if (streamContext.has(choice.index)) {
23
+ const context = streamContext.get(choice.index)!;
24
+ context.content += choice.delta.content;
25
+ context.mpsc.produce(choice.delta.content);
26
+ }
27
+ else {
28
+ const mpsc = new MPSC<string>();
29
+
30
+ streamContext.set(choice.index, {
31
+ content: choice.delta.content,
32
+ mpsc,
33
+ });
34
+ mpsc.produce(choice.delta.content);
35
+
36
+ eventProcessor({
37
+ stream: streamDefaultReaderToAsyncGenerator(mpsc.consumer.getReader()),
38
+ done: () => mpsc.done(),
39
+ get: () => streamContext.get(choice.index)?.content ?? "",
40
+ join: async () => {
41
+ await mpsc.waitClosed();
42
+ return streamContext.get(choice.index)!.content;
43
+ },
44
+ });
45
+ }
46
+ }
47
+
48
+ // Handle finish_reason after content processing
49
+ if (choice.finish_reason != null) {
50
+ const context = streamContext.get(choice.index);
51
+ if (context != null) {
52
+ context.mpsc.close();
53
+ }
54
+ }
55
+ }
56
+ };
57
+ if (acc.object === "chat.completion.chunk") {
58
+ registerContext([acc, chunk].flatMap(v => v.choices ?? []));
59
+ return ChatGptCompletionMessageUtil.merge([acc, chunk]);
60
+ }
61
+ registerContext(chunk.choices ?? []);
62
+ return ChatGptCompletionMessageUtil.accumulate(acc, chunk);
63
+ }, { abortSignal });
64
+
65
+ if (nullableCompletion == null) {
66
+ throw new Error(
67
+ "StreamUtil.reduce did not produce a ChatCompletion. Possible causes: the input stream was empty, invalid, or closed prematurely. "
68
+ + "To debug: check that the stream is properly initialized and contains valid ChatCompletionChunk data. "
69
+ + "You may also enable verbose logging upstream to inspect the stream contents. "
70
+ + `Stream locked: ${stream.locked}.`,
71
+ );
72
+ }
73
+
74
+ if ((nullableCompletion.object as string) === "chat.completion.chunk") {
75
+ const completion = ChatGptCompletionMessageUtil.merge([nullableCompletion as unknown as ChatCompletionChunk]);
76
+ completion.choices.forEach((choice) => {
77
+ if (choice.message.content != null && choice.message.content !== "") {
78
+ eventProcessor({
79
+ stream: toAsyncGenerator(choice.message.content),
80
+ done: () => true,
81
+ get: () => choice.message.content!,
82
+ join: async () => choice.message.content!,
83
+ });
84
+ }
85
+ });
86
+ return completion;
87
+ }
88
+ return nullableCompletion;
89
+ }
90
+
91
+ export { reduceStreamingWithDispatch };
@@ -1,226 +1,226 @@
1
- import type { CompletionUsage } from "openai/resources";
2
-
3
- import { ChatGptTokenUsageAggregator } from "./ChatGptTokenUsageAggregator";
4
-
5
- describe("chatGptTokenUsageAggregator", () => {
6
- describe("sum", () => {
7
- it("should sum basic token usage", () => {
8
- const usage1: CompletionUsage = {
9
- prompt_tokens: 10,
10
- completion_tokens: 5,
11
- total_tokens: 15,
12
- };
13
-
14
- const usage2: CompletionUsage = {
15
- prompt_tokens: 20,
16
- completion_tokens: 10,
17
- total_tokens: 30,
18
- };
19
-
20
- const result = ChatGptTokenUsageAggregator.sum(usage1, usage2);
21
-
22
- expect(result).toEqual({
23
- prompt_tokens: 30,
24
- completion_tokens: 15,
25
- total_tokens: 45,
26
- completion_tokens_details: {
27
- accepted_prediction_tokens: 0,
28
- reasoning_tokens: 0,
29
- rejected_prediction_tokens: 0,
30
- },
31
- prompt_tokens_details: {
32
- audio_tokens: 0,
33
- cached_tokens: 0,
34
- },
35
- });
36
- });
37
-
38
- it("should handle undefined values", () => {
39
- const usage1: CompletionUsage = {
40
- prompt_tokens: 10,
41
- completion_tokens: 5,
42
- total_tokens: 15,
43
- };
44
-
45
- const usage2: CompletionUsage = {
46
- // @ts-expect-error - intended to be undefined
47
- prompt_tokens: undefined,
48
- // @ts-expect-error - intended to be undefined
49
- completion_tokens: undefined,
50
- // @ts-expect-error - intended to be undefined
51
- total_tokens: undefined,
52
- };
53
-
54
- const result = ChatGptTokenUsageAggregator.sum(usage1, usage2);
55
-
56
- expect(result).toEqual({
57
- prompt_tokens: 10,
58
- completion_tokens: 5,
59
- total_tokens: 15,
60
- completion_tokens_details: {
61
- accepted_prediction_tokens: 0,
62
- reasoning_tokens: 0,
63
- rejected_prediction_tokens: 0,
64
- },
65
- prompt_tokens_details: {
66
- audio_tokens: 0,
67
- cached_tokens: 0,
68
- },
69
- });
70
- });
71
-
72
- it("should sum completion token details", () => {
73
- const usage1: CompletionUsage = {
74
- prompt_tokens: 10,
75
- completion_tokens: 5,
76
- total_tokens: 15,
77
- completion_tokens_details: {
78
- accepted_prediction_tokens: 3,
79
- reasoning_tokens: 1,
80
- rejected_prediction_tokens: 1,
81
- },
82
- };
83
-
84
- const usage2: CompletionUsage = {
85
- prompt_tokens: 20,
86
- completion_tokens: 10,
87
- total_tokens: 30,
88
- completion_tokens_details: {
89
- accepted_prediction_tokens: 7,
90
- reasoning_tokens: 2,
91
- rejected_prediction_tokens: 1,
92
- },
93
- };
94
-
95
- const result = ChatGptTokenUsageAggregator.sum(usage1, usage2);
96
-
97
- expect(result.completion_tokens_details).toEqual({
98
- accepted_prediction_tokens: 10,
99
- reasoning_tokens: 3,
100
- rejected_prediction_tokens: 2,
101
- });
102
- });
103
-
104
- it("should handle undefined completion token details", () => {
105
- const usage1: CompletionUsage = {
106
- prompt_tokens: 10,
107
- completion_tokens: 5,
108
- total_tokens: 15,
109
- completion_tokens_details: {
110
- accepted_prediction_tokens: 3,
111
- reasoning_tokens: 1,
112
- rejected_prediction_tokens: 1,
113
- },
114
- };
115
-
116
- const usage2: CompletionUsage = {
117
- prompt_tokens: 20,
118
- completion_tokens: 10,
119
- total_tokens: 30,
120
- };
121
-
122
- const result = ChatGptTokenUsageAggregator.sum(usage1, usage2);
123
-
124
- expect(result.completion_tokens_details).toEqual({
125
- accepted_prediction_tokens: 3,
126
- reasoning_tokens: 1,
127
- rejected_prediction_tokens: 1,
128
- });
129
- });
130
-
131
- it("should sum prompt token details", () => {
132
- const usage1: CompletionUsage = {
133
- prompt_tokens: 10,
134
- completion_tokens: 5,
135
- total_tokens: 15,
136
- prompt_tokens_details: {
137
- audio_tokens: 3,
138
- cached_tokens: 2,
139
- },
140
- };
141
-
142
- const usage2: CompletionUsage = {
143
- prompt_tokens: 20,
144
- completion_tokens: 10,
145
- total_tokens: 30,
146
- prompt_tokens_details: {
147
- audio_tokens: 7,
148
- cached_tokens: 3,
149
- },
150
- };
151
-
152
- const result = ChatGptTokenUsageAggregator.sum(usage1, usage2);
153
-
154
- expect(result.prompt_tokens_details).toEqual({
155
- audio_tokens: 10,
156
- cached_tokens: 5,
157
- });
158
- });
159
-
160
- it("should handle undefined prompt token details", () => {
161
- const usage1: CompletionUsage = {
162
- prompt_tokens: 10,
163
- completion_tokens: 5,
164
- total_tokens: 15,
165
- prompt_tokens_details: {
166
- audio_tokens: 3,
167
- cached_tokens: 2,
168
- },
169
- };
170
-
171
- const usage2: CompletionUsage = {
172
- prompt_tokens: 20,
173
- completion_tokens: 10,
174
- total_tokens: 30,
175
- };
176
-
177
- const result = ChatGptTokenUsageAggregator.sum(usage1, usage2);
178
-
179
- expect(result.prompt_tokens_details).toEqual({
180
- audio_tokens: 3,
181
- cached_tokens: 2,
182
- });
183
- });
184
-
185
- it("should handle all undefined values", () => {
186
- const usage1: CompletionUsage = {
187
- // @ts-expect-error - intended to be undefined
188
- prompt_tokens: undefined,
189
- // @ts-expect-error - intended to be undefined
190
- completion_tokens: undefined,
191
- // @ts-expect-error - intended to be undefined
192
- total_tokens: undefined,
193
- completion_tokens_details: undefined,
194
- prompt_tokens_details: undefined,
195
- };
196
-
197
- const usage2: CompletionUsage = {
198
- // @ts-expect-error - intended to be undefined
199
- prompt_tokens: undefined,
200
- // @ts-expect-error - intended to be undefined
201
- completion_tokens: undefined,
202
- // @ts-expect-error - intended to be undefined
203
- total_tokens: undefined,
204
- completion_tokens_details: undefined,
205
- prompt_tokens_details: undefined,
206
- };
207
-
208
- const result = ChatGptTokenUsageAggregator.sum(usage1, usage2);
209
-
210
- expect(result).toEqual({
211
- prompt_tokens: 0,
212
- completion_tokens: 0,
213
- total_tokens: 0,
214
- completion_tokens_details: {
215
- accepted_prediction_tokens: 0,
216
- reasoning_tokens: 0,
217
- rejected_prediction_tokens: 0,
218
- },
219
- prompt_tokens_details: {
220
- audio_tokens: 0,
221
- cached_tokens: 0,
222
- },
223
- });
224
- });
225
- });
226
- });
1
+ import type { CompletionUsage } from "openai/resources";
2
+
3
+ import { ChatGptTokenUsageAggregator } from "./ChatGptTokenUsageAggregator";
4
+
5
+ describe("chatGptTokenUsageAggregator", () => {
6
+ describe("sum", () => {
7
+ it("should sum basic token usage", () => {
8
+ const usage1: CompletionUsage = {
9
+ prompt_tokens: 10,
10
+ completion_tokens: 5,
11
+ total_tokens: 15,
12
+ };
13
+
14
+ const usage2: CompletionUsage = {
15
+ prompt_tokens: 20,
16
+ completion_tokens: 10,
17
+ total_tokens: 30,
18
+ };
19
+
20
+ const result = ChatGptTokenUsageAggregator.sum(usage1, usage2);
21
+
22
+ expect(result).toEqual({
23
+ prompt_tokens: 30,
24
+ completion_tokens: 15,
25
+ total_tokens: 45,
26
+ completion_tokens_details: {
27
+ accepted_prediction_tokens: 0,
28
+ reasoning_tokens: 0,
29
+ rejected_prediction_tokens: 0,
30
+ },
31
+ prompt_tokens_details: {
32
+ audio_tokens: 0,
33
+ cached_tokens: 0,
34
+ },
35
+ });
36
+ });
37
+
38
+ it("should handle undefined values", () => {
39
+ const usage1: CompletionUsage = {
40
+ prompt_tokens: 10,
41
+ completion_tokens: 5,
42
+ total_tokens: 15,
43
+ };
44
+
45
+ const usage2: CompletionUsage = {
46
+ // @ts-expect-error - intended to be undefined
47
+ prompt_tokens: undefined,
48
+ // @ts-expect-error - intended to be undefined
49
+ completion_tokens: undefined,
50
+ // @ts-expect-error - intended to be undefined
51
+ total_tokens: undefined,
52
+ };
53
+
54
+ const result = ChatGptTokenUsageAggregator.sum(usage1, usage2);
55
+
56
+ expect(result).toEqual({
57
+ prompt_tokens: 10,
58
+ completion_tokens: 5,
59
+ total_tokens: 15,
60
+ completion_tokens_details: {
61
+ accepted_prediction_tokens: 0,
62
+ reasoning_tokens: 0,
63
+ rejected_prediction_tokens: 0,
64
+ },
65
+ prompt_tokens_details: {
66
+ audio_tokens: 0,
67
+ cached_tokens: 0,
68
+ },
69
+ });
70
+ });
71
+
72
+ it("should sum completion token details", () => {
73
+ const usage1: CompletionUsage = {
74
+ prompt_tokens: 10,
75
+ completion_tokens: 5,
76
+ total_tokens: 15,
77
+ completion_tokens_details: {
78
+ accepted_prediction_tokens: 3,
79
+ reasoning_tokens: 1,
80
+ rejected_prediction_tokens: 1,
81
+ },
82
+ };
83
+
84
+ const usage2: CompletionUsage = {
85
+ prompt_tokens: 20,
86
+ completion_tokens: 10,
87
+ total_tokens: 30,
88
+ completion_tokens_details: {
89
+ accepted_prediction_tokens: 7,
90
+ reasoning_tokens: 2,
91
+ rejected_prediction_tokens: 1,
92
+ },
93
+ };
94
+
95
+ const result = ChatGptTokenUsageAggregator.sum(usage1, usage2);
96
+
97
+ expect(result.completion_tokens_details).toEqual({
98
+ accepted_prediction_tokens: 10,
99
+ reasoning_tokens: 3,
100
+ rejected_prediction_tokens: 2,
101
+ });
102
+ });
103
+
104
+ it("should handle undefined completion token details", () => {
105
+ const usage1: CompletionUsage = {
106
+ prompt_tokens: 10,
107
+ completion_tokens: 5,
108
+ total_tokens: 15,
109
+ completion_tokens_details: {
110
+ accepted_prediction_tokens: 3,
111
+ reasoning_tokens: 1,
112
+ rejected_prediction_tokens: 1,
113
+ },
114
+ };
115
+
116
+ const usage2: CompletionUsage = {
117
+ prompt_tokens: 20,
118
+ completion_tokens: 10,
119
+ total_tokens: 30,
120
+ };
121
+
122
+ const result = ChatGptTokenUsageAggregator.sum(usage1, usage2);
123
+
124
+ expect(result.completion_tokens_details).toEqual({
125
+ accepted_prediction_tokens: 3,
126
+ reasoning_tokens: 1,
127
+ rejected_prediction_tokens: 1,
128
+ });
129
+ });
130
+
131
+ it("should sum prompt token details", () => {
132
+ const usage1: CompletionUsage = {
133
+ prompt_tokens: 10,
134
+ completion_tokens: 5,
135
+ total_tokens: 15,
136
+ prompt_tokens_details: {
137
+ audio_tokens: 3,
138
+ cached_tokens: 2,
139
+ },
140
+ };
141
+
142
+ const usage2: CompletionUsage = {
143
+ prompt_tokens: 20,
144
+ completion_tokens: 10,
145
+ total_tokens: 30,
146
+ prompt_tokens_details: {
147
+ audio_tokens: 7,
148
+ cached_tokens: 3,
149
+ },
150
+ };
151
+
152
+ const result = ChatGptTokenUsageAggregator.sum(usage1, usage2);
153
+
154
+ expect(result.prompt_tokens_details).toEqual({
155
+ audio_tokens: 10,
156
+ cached_tokens: 5,
157
+ });
158
+ });
159
+
160
+ it("should handle undefined prompt token details", () => {
161
+ const usage1: CompletionUsage = {
162
+ prompt_tokens: 10,
163
+ completion_tokens: 5,
164
+ total_tokens: 15,
165
+ prompt_tokens_details: {
166
+ audio_tokens: 3,
167
+ cached_tokens: 2,
168
+ },
169
+ };
170
+
171
+ const usage2: CompletionUsage = {
172
+ prompt_tokens: 20,
173
+ completion_tokens: 10,
174
+ total_tokens: 30,
175
+ };
176
+
177
+ const result = ChatGptTokenUsageAggregator.sum(usage1, usage2);
178
+
179
+ expect(result.prompt_tokens_details).toEqual({
180
+ audio_tokens: 3,
181
+ cached_tokens: 2,
182
+ });
183
+ });
184
+
185
+ it("should handle all undefined values", () => {
186
+ const usage1: CompletionUsage = {
187
+ // @ts-expect-error - intended to be undefined
188
+ prompt_tokens: undefined,
189
+ // @ts-expect-error - intended to be undefined
190
+ completion_tokens: undefined,
191
+ // @ts-expect-error - intended to be undefined
192
+ total_tokens: undefined,
193
+ completion_tokens_details: undefined,
194
+ prompt_tokens_details: undefined,
195
+ };
196
+
197
+ const usage2: CompletionUsage = {
198
+ // @ts-expect-error - intended to be undefined
199
+ prompt_tokens: undefined,
200
+ // @ts-expect-error - intended to be undefined
201
+ completion_tokens: undefined,
202
+ // @ts-expect-error - intended to be undefined
203
+ total_tokens: undefined,
204
+ completion_tokens_details: undefined,
205
+ prompt_tokens_details: undefined,
206
+ };
207
+
208
+ const result = ChatGptTokenUsageAggregator.sum(usage1, usage2);
209
+
210
+ expect(result).toEqual({
211
+ prompt_tokens: 0,
212
+ completion_tokens: 0,
213
+ total_tokens: 0,
214
+ completion_tokens_details: {
215
+ accepted_prediction_tokens: 0,
216
+ reasoning_tokens: 0,
217
+ rejected_prediction_tokens: 0,
218
+ },
219
+ prompt_tokens_details: {
220
+ audio_tokens: 0,
221
+ cached_tokens: 0,
222
+ },
223
+ });
224
+ });
225
+ });
226
+ });