@agentica/core 0.32.2 → 0.32.3-dev.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/lib/index.mjs +320 -342
  2. package/lib/index.mjs.map +1 -1
  3. package/lib/orchestrate/call.js +87 -107
  4. package/lib/orchestrate/call.js.map +1 -1
  5. package/lib/orchestrate/describe.js +5 -50
  6. package/lib/orchestrate/describe.js.map +1 -1
  7. package/lib/orchestrate/initialize.js +5 -50
  8. package/lib/orchestrate/initialize.js.map +1 -1
  9. package/lib/orchestrate/select.js +107 -126
  10. package/lib/orchestrate/select.js.map +1 -1
  11. package/lib/utils/AssistantMessageEmptyError.d.ts +7 -0
  12. package/lib/utils/AssistantMessageEmptyError.js +17 -0
  13. package/lib/utils/AssistantMessageEmptyError.js.map +1 -0
  14. package/lib/utils/ChatGptCompletionStreamingUtil.d.ts +8 -0
  15. package/lib/utils/ChatGptCompletionStreamingUtil.js +86 -0
  16. package/lib/utils/ChatGptCompletionStreamingUtil.js.map +1 -0
  17. package/lib/utils/ChatGptCompletionStreamingUtil.spec.d.ts +1 -0
  18. package/lib/utils/ChatGptCompletionStreamingUtil.spec.js +855 -0
  19. package/lib/utils/ChatGptCompletionStreamingUtil.spec.js.map +1 -0
  20. package/lib/utils/MPSC.js +8 -6
  21. package/lib/utils/MPSC.js.map +1 -1
  22. package/lib/utils/StreamUtil.d.ts +1 -1
  23. package/lib/utils/StreamUtil.js +2 -2
  24. package/lib/utils/StreamUtil.js.map +1 -1
  25. package/lib/utils/__retry.d.ts +1 -0
  26. package/lib/utils/__retry.js +30 -0
  27. package/lib/utils/__retry.js.map +1 -0
  28. package/lib/utils/__retry.spec.d.ts +1 -0
  29. package/lib/utils/__retry.spec.js +172 -0
  30. package/lib/utils/__retry.spec.js.map +1 -0
  31. package/package.json +1 -1
  32. package/src/orchestrate/call.ts +88 -114
  33. package/src/orchestrate/describe.ts +7 -65
  34. package/src/orchestrate/initialize.ts +4 -64
  35. package/src/orchestrate/select.ts +111 -138
  36. package/src/utils/AssistantMessageEmptyError.ts +13 -0
  37. package/src/utils/ChatGptCompletionMessageUtil.ts +1 -1
  38. package/src/utils/ChatGptCompletionStreamingUtil.spec.ts +908 -0
  39. package/src/utils/ChatGptCompletionStreamingUtil.ts +90 -0
  40. package/src/utils/MPSC.ts +8 -6
  41. package/src/utils/StreamUtil.ts +2 -2
  42. package/src/utils/__retry.spec.ts +198 -0
  43. package/src/utils/__retry.ts +18 -0
@@ -10,9 +10,7 @@ import { AgenticaDefaultPrompt } from "../constants/AgenticaDefaultPrompt";
10
10
  import { AgenticaSystemPrompt } from "../constants/AgenticaSystemPrompt";
11
11
  import { createDescribeEvent } from "../factory/events";
12
12
  import { decodeHistory } from "../factory/histories";
13
- import { ChatGptCompletionMessageUtil } from "../utils/ChatGptCompletionMessageUtil";
14
- import { MPSC } from "../utils/MPSC";
15
- import { streamDefaultReaderToAsyncGenerator, StreamUtil } from "../utils/StreamUtil";
13
+ import { reduceStreamingWithDispatch } from "../utils/ChatGptCompletionStreamingUtil";
16
14
 
17
15
  export async function describe<Model extends ILlmSchema.Model>(
18
16
  ctx: AgenticaContext<Model> | MicroAgenticaContext<Model>,
@@ -41,68 +39,12 @@ export async function describe<Model extends ILlmSchema.Model>(
41
39
  ],
42
40
  });
43
41
 
44
- const describeContext: ({
45
- content: string;
46
- mpsc: MPSC<string>;
47
- })[] = [];
48
-
49
- await StreamUtil.reduce<
50
- OpenAI.ChatCompletionChunk,
51
- Promise<OpenAI.ChatCompletion>
52
- >(completionStream, async (accPromise, chunk) => {
53
- const acc = await accPromise;
54
- const registerContext = (
55
- choices: OpenAI.ChatCompletionChunk.Choice[],
56
- ) => {
57
- for (const choice of choices) {
58
- /**
59
- * @TODO fix it
60
- * Sometimes, the complete message arrives along with a finish reason.
61
- */
62
- if (choice.finish_reason != null) {
63
- describeContext[choice.index]!.mpsc.close();
64
- continue;
65
- }
66
-
67
- if (choice.delta.content == null) {
68
- continue;
69
- }
70
-
71
- if (describeContext[choice.index] != null) {
72
- describeContext[choice.index]!.content += choice.delta.content;
73
- describeContext[choice.index]!.mpsc.produce(choice.delta.content);
74
- continue;
75
- }
76
-
77
- const mpsc = new MPSC<string>();
78
-
79
- describeContext[choice.index] = {
80
- content: choice.delta.content,
81
- mpsc,
82
- };
83
- mpsc.produce(choice.delta.content);
84
-
85
- const event: AgenticaDescribeEvent<Model> = createDescribeEvent({
86
- executes: histories,
87
- stream: streamDefaultReaderToAsyncGenerator(mpsc.consumer.getReader()),
88
- done: () => mpsc.done(),
89
- get: () => describeContext[choice.index]?.content ?? "",
90
- join: async () => {
91
- await mpsc.waitClosed();
92
- return describeContext[choice.index]!.content;
93
- },
94
- });
95
- ctx.dispatch(event);
96
- }
97
- };
98
-
99
- if (acc.object === "chat.completion.chunk") {
100
- registerContext([acc, chunk].flatMap(v => v.choices));
101
- return ChatGptCompletionMessageUtil.merge([acc, chunk]);
102
- }
103
-
104
- registerContext(chunk.choices);
105
- return ChatGptCompletionMessageUtil.accumulate(acc, chunk);
42
+ await reduceStreamingWithDispatch(completionStream, (props) => {
43
+ const event: AgenticaDescribeEvent<Model> = createDescribeEvent({
44
+ executes: histories,
45
+ ...props,
46
+ });
47
+ ctx.dispatch(event);
106
48
  });
107
49
  }
108
50
 
@@ -11,9 +11,7 @@ import { AgenticaDefaultPrompt } from "../constants/AgenticaDefaultPrompt";
11
11
  import { AgenticaSystemPrompt } from "../constants/AgenticaSystemPrompt";
12
12
  import { createAssistantMessageEvent } from "../factory/events";
13
13
  import { decodeHistory, decodeUserMessageContent } from "../factory/histories";
14
- import { ChatGptCompletionMessageUtil } from "../utils/ChatGptCompletionMessageUtil";
15
- import { MPSC } from "../utils/MPSC";
16
- import { streamDefaultReaderToAsyncGenerator, StreamUtil } from "../utils/StreamUtil";
14
+ import { reduceStreamingWithDispatch } from "../utils/ChatGptCompletionStreamingUtil";
17
15
 
18
16
  const FUNCTION: ILlmFunction<"chatgpt"> = typia.llm.application<
19
17
  __IChatInitialApplication,
@@ -65,67 +63,9 @@ export async function initialize<Model extends ILlmSchema.Model>(ctx: AgenticaCo
65
63
  // parallel_tool_calls: false,
66
64
  });
67
65
 
68
- const textContext: ({
69
- content: string;
70
- mpsc: MPSC<string>;
71
- })[] = [];
72
-
73
- const completion = await StreamUtil.reduce<
74
- OpenAI.ChatCompletionChunk,
75
- Promise<OpenAI.ChatCompletion>
76
- >(completionStream, async (accPromise, chunk) => {
77
- const acc = await accPromise;
78
- const registerContext = (
79
- choices: OpenAI.ChatCompletionChunk.Choice[],
80
- ) => {
81
- for (const choice of choices) {
82
- /**
83
- * @TODO fix it
84
- * Sometimes, the complete message arrives along with a finish reason.
85
- */
86
- if (choice.finish_reason != null) {
87
- textContext[choice.index]?.mpsc.close();
88
- continue;
89
- }
90
-
91
- if (choice.delta.content == null || choice.delta.content.length === 0) {
92
- continue;
93
- }
94
-
95
- if (textContext[choice.index] != null) {
96
- textContext[choice.index]!.content += choice.delta.content;
97
- textContext[choice.index]!.mpsc.produce(choice.delta.content);
98
- continue;
99
- }
100
-
101
- const mpsc = new MPSC<string>();
102
-
103
- textContext[choice.index] = {
104
- content: choice.delta.content,
105
- mpsc,
106
- };
107
- mpsc.produce(choice.delta.content);
108
-
109
- const event: AgenticaAssistantMessageEvent = createAssistantMessageEvent({
110
- stream: streamDefaultReaderToAsyncGenerator(mpsc.consumer.getReader()),
111
- done: () => mpsc.done(),
112
- get: () => textContext[choice.index]!.content,
113
- join: async () => {
114
- await mpsc.waitClosed();
115
- return textContext[choice.index]!.content;
116
- },
117
- });
118
- ctx.dispatch(event);
119
- }
120
- };
121
-
122
- if (acc.object === "chat.completion.chunk") {
123
- registerContext([acc, chunk].flatMap(v => v.choices));
124
- return ChatGptCompletionMessageUtil.merge([acc, chunk]);
125
- }
126
-
127
- registerContext(chunk.choices);
128
- return ChatGptCompletionMessageUtil.accumulate(acc, chunk);
66
+ const completion = await reduceStreamingWithDispatch(completionStream, (props) => {
67
+ const event: AgenticaAssistantMessageEvent = createAssistantMessageEvent(props);
68
+ ctx.dispatch(event);
129
69
  });
130
70
 
131
71
  if (completion === null) {
@@ -17,9 +17,10 @@ import { AgenticaDefaultPrompt } from "../constants/AgenticaDefaultPrompt";
17
17
  import { AgenticaSystemPrompt } from "../constants/AgenticaSystemPrompt";
18
18
  import { createAssistantMessageEvent } from "../factory/events";
19
19
  import { decodeHistory, decodeUserMessageContent } from "../factory/histories";
20
- import { MPSC } from "../utils";
21
- import { ChatGptCompletionMessageUtil } from "../utils/ChatGptCompletionMessageUtil";
22
- import { streamDefaultReaderToAsyncGenerator, StreamUtil, toAsyncGenerator } from "../utils/StreamUtil";
20
+ import { __get_retry } from "../utils/__retry";
21
+ import { AssistantMessageEmptyError, AssistantMessageEmptyWithReasoningError } from "../utils/AssistantMessageEmptyError";
22
+ import { reduceStreamingWithDispatch } from "../utils/ChatGptCompletionStreamingUtil";
23
+ import { toAsyncGenerator } from "../utils/StreamUtil";
23
24
 
24
25
  import { selectFunctionFromContext } from "./internal/selectFunctionFromContext";
25
26
 
@@ -93,154 +94,126 @@ async function step<Model extends ILlmSchema.Model>(
93
94
  retry: number,
94
95
  failures?: IFailure[],
95
96
  ): Promise<void> {
97
+ const _retryFn = __get_retry(ctx.config?.retry ?? AgenticaConstant.RETRY);
98
+ const retryFn = async (fn: (prevError?: unknown) => Promise<OpenAI.ChatCompletion>) => {
99
+ return _retryFn(fn).catch((e) => {
100
+ if (e instanceof AssistantMessageEmptyError) {
101
+ return Symbol("emptyAssistantMessage");
102
+ }
103
+ throw e;
104
+ });
105
+ };
96
106
  // ----
97
107
  // EXECUTE CHATGPT API
98
108
  // ----
99
- const completionStream = await ctx.request("select", {
100
- messages: [
101
- // COMMON SYSTEM PROMPT
102
- {
103
- role: "system",
104
- content: AgenticaDefaultPrompt.write(ctx.config),
105
- } satisfies OpenAI.ChatCompletionSystemMessageParam,
106
- // CANDIDATE FUNCTIONS
107
- {
108
- role: "assistant",
109
- tool_calls: [
110
- {
111
- type: "function",
112
- id: "getApiFunctions",
113
- function: {
114
- name: "getApiFunctions",
115
- arguments: JSON.stringify({}),
109
+ const completion = await retryFn(async (prevError) => {
110
+ const stream = await ctx.request("select", {
111
+ messages: [
112
+ // COMMON SYSTEM PROMPT
113
+ {
114
+ role: "system",
115
+ content: AgenticaDefaultPrompt.write(ctx.config),
116
+ } satisfies OpenAI.ChatCompletionSystemMessageParam,
117
+ // CANDIDATE FUNCTIONS
118
+ {
119
+ role: "assistant",
120
+ tool_calls: [
121
+ {
122
+ type: "function",
123
+ id: "getApiFunctions",
124
+ function: {
125
+ name: "getApiFunctions",
126
+ arguments: JSON.stringify({}),
127
+ },
116
128
  },
117
- },
118
- ],
119
- },
120
- {
121
- role: "tool",
122
- tool_call_id: "getApiFunctions",
123
- content: JSON.stringify(
124
- operations.map(op => ({
125
- name: op.name,
126
- description: op.function.description,
127
- ...(op.protocol === "http"
128
- ? {
129
- method: op.function.method,
130
- path: op.function.path,
131
- tags: op.function.tags,
132
- }
133
- : {}),
134
- })),
135
- ),
136
- },
137
- // PREVIOUS HISTORIES
138
- ...ctx.histories.map(decodeHistory).flat(),
139
- // USER INPUT
140
- {
141
- role: "user",
142
- content: ctx.prompt.contents.map(decodeUserMessageContent),
143
- },
144
- // SYSTEM PROMPT
145
- {
146
- role: "system",
147
- content:
148
- ctx.config?.systemPrompt?.select?.(ctx.histories)
149
- ?? AgenticaSystemPrompt.SELECT,
150
- },
151
- // TYPE CORRECTIONS
152
- ...emendMessages(failures ?? []),
153
- ],
154
- // STACK FUNCTIONS
155
- tools: [{
156
- type: "function",
157
- function: {
158
- name: CONTAINER.functions[0]!.name,
159
- description: CONTAINER.functions[0]!.description,
160
- /**
161
- * @TODO fix it
162
- * The property and value have a type mismatch, but it works.
163
- */
164
- parameters: CONTAINER.functions[0]!.parameters as unknown as Record<string, unknown>,
165
- },
166
- } satisfies OpenAI.ChatCompletionTool],
167
- tool_choice: retry === 0
168
- ? "auto"
169
- : "required",
170
- // parallel_tool_calls: false,
171
- });
172
-
173
- const selectContext: ({
174
- content: string;
175
- mpsc: MPSC<string>;
176
- })[] = [];
177
- const nullableCompletion = await StreamUtil.reduce<OpenAI.ChatCompletionChunk, Promise<OpenAI.ChatCompletion>>(completionStream, async (accPromise, chunk) => {
178
- const acc = await accPromise;
179
-
180
- const registerContext = (
181
- choices: OpenAI.ChatCompletionChunk.Choice[],
182
- ) => {
183
- for (const choice of choices) {
184
- /**
185
- * @TODO fix it
186
- * Sometimes, the complete message arrives along with a finish reason.
187
- */
188
- if (choice.finish_reason != null) {
189
- selectContext[choice.index]?.mpsc.close();
190
- continue;
191
- }
192
-
193
- if (choice.delta.content == null || choice.delta.content === "") {
194
- continue;
195
- }
196
-
197
- if (selectContext[choice.index] != null) {
198
- selectContext[choice.index]!.content += choice.delta.content;
199
- selectContext[choice.index]!.mpsc.produce(choice.delta.content);
200
- continue;
201
- }
202
-
203
- const mpsc = new MPSC<string>();
204
-
205
- selectContext[choice.index] = {
206
- content: choice.delta.content,
207
- mpsc,
208
- };
209
- mpsc.produce(choice.delta.content);
129
+ ],
130
+ },
131
+ {
132
+ role: "tool",
133
+ tool_call_id: "getApiFunctions",
134
+ content: JSON.stringify(
135
+ operations.map(op => ({
136
+ name: op.name,
137
+ description: op.function.description,
138
+ ...(op.protocol === "http"
139
+ ? {
140
+ method: op.function.method,
141
+ path: op.function.path,
142
+ tags: op.function.tags,
143
+ }
144
+ : {}),
145
+ })),
146
+ ),
147
+ },
148
+ // PREVIOUS HISTORIES
149
+ ...ctx.histories.map(decodeHistory).flat(),
150
+ // USER INPUT
151
+ {
152
+ role: "user",
153
+ content: ctx.prompt.contents.map(decodeUserMessageContent),
154
+ },
155
+ // PREVIOUS ERROR
156
+ ...(prevError instanceof AssistantMessageEmptyWithReasoningError ? [
157
+ {
158
+ role: "assistant",
159
+ content: prevError.reasoning,
160
+ } satisfies OpenAI.ChatCompletionMessageParam,
161
+ ] : []),
162
+ // SYSTEM PROMPT
163
+ {
164
+ role: "system",
165
+ content:
166
+ ctx.config?.systemPrompt?.select?.(ctx.histories)
167
+ ?? AgenticaSystemPrompt.SELECT,
168
+ },
169
+ // TYPE CORRECTIONS
170
+ ...emendMessages(failures ?? []),
171
+ ],
172
+ // STACK FUNCTIONS
173
+ tools: [{
174
+ type: "function",
175
+ function: {
176
+ name: CONTAINER.functions[0]!.name,
177
+ description: CONTAINER.functions[0]!.description,
178
+ /**
179
+ * @TODO fix it
180
+ * The property and value have a type mismatch, but it works.
181
+ */
182
+ parameters: CONTAINER.functions[0]!.parameters as unknown as Record<string, unknown>,
183
+ },
184
+ } satisfies OpenAI.ChatCompletionTool],
185
+ tool_choice: retry === 0
186
+ ? "auto"
187
+ : "required",
188
+ // parallel_tool_calls: false,
189
+ });
210
190
 
211
- const event: AgenticaAssistantMessageEvent = createAssistantMessageEvent({
212
- stream: streamDefaultReaderToAsyncGenerator(mpsc.consumer.getReader()),
213
- done: () => mpsc.done(),
214
- get: () => selectContext[choice.index]?.content ?? "",
215
- join: async () => {
216
- await mpsc.waitClosed();
217
- return selectContext[choice.index]!.content;
218
- },
219
- });
220
- ctx.dispatch(event);
191
+ const completion = await reduceStreamingWithDispatch(stream, (props) => {
192
+ const event: AgenticaAssistantMessageEvent = createAssistantMessageEvent(props);
193
+ ctx.dispatch(event);
194
+ });
195
+ const allAssistantMessagesEmpty = completion.choices.every(v => v.message.tool_calls == null && v.message.content === "");
196
+ if (allAssistantMessagesEmpty) {
197
+ const firstChoice = completion.choices.at(0);
198
+ if ((firstChoice?.message as { reasoning?: string })?.reasoning != null) {
199
+ throw new AssistantMessageEmptyWithReasoningError((firstChoice?.message as { reasoning?: string })?.reasoning ?? "");
221
200
  }
222
- };
223
- if (acc.object === "chat.completion.chunk") {
224
- registerContext([acc, chunk].flatMap(v => v.choices));
225
- return ChatGptCompletionMessageUtil.merge([acc, chunk]);
201
+ throw new AssistantMessageEmptyError();
226
202
  }
227
- registerContext(chunk.choices);
228
- return ChatGptCompletionMessageUtil.accumulate(acc, chunk);
203
+ return completion;
229
204
  });
230
- const completion = nullableCompletion!;
231
- const emptyAssistantMessages = completion.choices.filter(v => v.message.tool_calls == null && v.message.content === "");
232
- if(emptyAssistantMessages.length > 0) {
233
- emptyAssistantMessages.forEach(v => {
205
+
206
+ if (typeof completion === "symbol") {
234
207
  const event: AgenticaAssistantMessageEvent = createAssistantMessageEvent({
235
- stream: toAsyncGenerator(v.message.content ?? ""),
208
+ stream: toAsyncGenerator(""),
236
209
  done: () => true,
237
- get: () => v.message.content ?? "",
210
+ get: () => "",
238
211
  join: async () => {
239
- return v.message.content ?? "";
212
+ return "";
240
213
  },
241
- });
242
- ctx.dispatch(event);
243
214
  });
215
+ ctx.dispatch(event);
216
+ return;
244
217
  }
245
218
  // ----
246
219
  // VALIDATION
@@ -0,0 +1,13 @@
1
+ export class AssistantMessageEmptyError extends Error {
2
+ constructor() {
3
+ super();
4
+ }
5
+ }
6
+
7
+ export class AssistantMessageEmptyWithReasoningError extends AssistantMessageEmptyError {
8
+ public readonly reasoning: string;
9
+ constructor(reasoning: string) {
10
+ super();
11
+ this.reasoning = reasoning;
12
+ }
13
+ }
@@ -57,7 +57,7 @@ function accumulate(origin: ChatCompletion, chunk: ChatCompletionChunk): ChatCom
57
57
  ...({
58
58
  // for open router
59
59
  reasoning: (choice.delta as { reasoning?: string }).reasoning ?? null,
60
- })
60
+ }),
61
61
  } satisfies ChatCompletionMessage,
62
62
  };
63
63
  });