@agentica/core 0.9.0 → 0.10.0-dev.20250302

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. package/LICENSE +21 -21
  2. package/README.md +419 -419
  3. package/lib/Agentica.d.ts +4 -4
  4. package/lib/Agentica.js +7 -7
  5. package/lib/Agentica.js.map +1 -1
  6. package/lib/chatgpt/ChatGptCallFunctionAgent.js +14 -6
  7. package/lib/chatgpt/ChatGptCallFunctionAgent.js.map +1 -1
  8. package/lib/index.d.ts +1 -1
  9. package/lib/index.js +1 -1
  10. package/lib/index.js.map +1 -1
  11. package/lib/index.mjs +10 -5
  12. package/lib/index.mjs.map +1 -1
  13. package/lib/structures/IAgenticaContext.d.ts +1 -1
  14. package/lib/structures/IAgenticaController.d.ts +4 -5
  15. package/lib/structures/IAgenticaEvent.d.ts +3 -3
  16. package/lib/structures/IAgenticaOperation.d.ts +2 -3
  17. package/lib/structures/IAgenticaOperationSelection.d.ts +2 -3
  18. package/lib/structures/IAgenticaPrompt.d.ts +2 -3
  19. package/lib/structures/IAgenticaProps.d.ts +4 -4
  20. package/lib/structures/IAgenticaTokenUsage.d.ts +3 -3
  21. package/lib/structures/{IAgenticaProvider.d.ts → IAgenticaVendor.d.ts} +6 -6
  22. package/lib/structures/{IAgenticaProvider.js → IAgenticaVendor.js} +1 -1
  23. package/lib/structures/IAgenticaVendor.js.map +1 -0
  24. package/package.json +1 -1
  25. package/prompts/cancel.md +4 -4
  26. package/prompts/common.md +2 -2
  27. package/prompts/describe.md +6 -6
  28. package/prompts/execute.md +6 -6
  29. package/prompts/initialize.md +2 -2
  30. package/prompts/select.md +6 -6
  31. package/src/Agentica.ts +323 -323
  32. package/src/chatgpt/ChatGptAgent.ts +75 -75
  33. package/src/chatgpt/ChatGptCallFunctionAgent.ts +464 -448
  34. package/src/chatgpt/ChatGptCancelFunctionAgent.ts +287 -287
  35. package/src/chatgpt/ChatGptDescribeFunctionAgent.ts +52 -52
  36. package/src/chatgpt/ChatGptHistoryDecoder.ts +88 -88
  37. package/src/chatgpt/ChatGptInitializeFunctionAgent.ts +88 -88
  38. package/src/chatgpt/ChatGptSelectFunctionAgent.ts +319 -319
  39. package/src/functional/createHttpLlmApplication.ts +63 -63
  40. package/src/index.ts +19 -19
  41. package/src/internal/AgenticaConstant.ts +4 -4
  42. package/src/internal/AgenticaDefaultPrompt.ts +43 -43
  43. package/src/internal/AgenticaOperationComposer.ts +87 -87
  44. package/src/internal/AgenticaPromptFactory.ts +32 -32
  45. package/src/internal/AgenticaPromptTransformer.ts +86 -86
  46. package/src/internal/AgenticaTokenUsageAggregator.ts +115 -115
  47. package/src/internal/MathUtil.ts +3 -3
  48. package/src/internal/Singleton.ts +22 -22
  49. package/src/internal/__map_take.ts +15 -15
  50. package/src/structures/IAgenticaConfig.ts +123 -123
  51. package/src/structures/IAgenticaContext.ts +129 -129
  52. package/src/structures/IAgenticaController.ts +133 -132
  53. package/src/structures/IAgenticaEvent.ts +229 -229
  54. package/src/structures/IAgenticaExecutor.ts +156 -156
  55. package/src/structures/IAgenticaOperation.ts +63 -64
  56. package/src/structures/IAgenticaOperationCollection.ts +52 -52
  57. package/src/structures/IAgenticaOperationSelection.ts +68 -69
  58. package/src/structures/IAgenticaPrompt.ts +182 -178
  59. package/src/structures/IAgenticaProps.ts +70 -70
  60. package/src/structures/IAgenticaSystemPrompt.ts +124 -124
  61. package/src/structures/IAgenticaTokenUsage.ts +107 -107
  62. package/src/structures/{IAgenticaProvider.ts → IAgenticaVendor.ts} +39 -39
  63. package/src/structures/internal/__IChatCancelFunctionsApplication.ts +23 -23
  64. package/src/structures/internal/__IChatFunctionReference.ts +21 -21
  65. package/src/structures/internal/__IChatInitialApplication.ts +15 -15
  66. package/src/structures/internal/__IChatSelectFunctionsApplication.ts +24 -24
  67. package/src/typings/AgenticaSource.ts +6 -6
  68. package/lib/structures/IAgenticaProvider.js.map +0 -1
@@ -1,448 +1,464 @@
1
- import {
2
- ChatGptTypeChecker,
3
- HttpLlm,
4
- IChatGptSchema,
5
- IHttpMigrateRoute,
6
- IHttpResponse,
7
- ILlmSchema,
8
- } from "@samchon/openapi";
9
- import OpenAI from "openai";
10
- import { IValidation } from "typia";
11
-
12
- import { AgenticaConstant } from "../internal/AgenticaConstant";
13
- import { AgenticaDefaultPrompt } from "../internal/AgenticaDefaultPrompt";
14
- import { AgenticaPromptFactory } from "../internal/AgenticaPromptFactory";
15
- import { AgenticaSystemPrompt } from "../internal/AgenticaSystemPrompt";
16
- import { IAgenticaContext } from "../structures/IAgenticaContext";
17
- import { IAgenticaEvent } from "../structures/IAgenticaEvent";
18
- import { IAgenticaOperation } from "../structures/IAgenticaOperation";
19
- import { IAgenticaPrompt } from "../structures/IAgenticaPrompt";
20
- import { ChatGptCancelFunctionAgent } from "./ChatGptCancelFunctionAgent";
21
- import { ChatGptHistoryDecoder } from "./ChatGptHistoryDecoder";
22
-
23
- export namespace ChatGptCallFunctionAgent {
24
- export const execute = async <Model extends ILlmSchema.Model>(
25
- ctx: IAgenticaContext<Model>,
26
- ): Promise<IAgenticaPrompt<Model>[]> => {
27
- //----
28
- // EXECUTE CHATGPT API
29
- //----
30
- const completion: OpenAI.ChatCompletion = await ctx.request("call", {
31
- messages: [
32
- // COMMON SYSTEM PROMPT
33
- {
34
- role: "system",
35
- content: AgenticaDefaultPrompt.write(ctx.config),
36
- } satisfies OpenAI.ChatCompletionSystemMessageParam,
37
- // PREVIOUS HISTORIES
38
- ...ctx.histories.map(ChatGptHistoryDecoder.decode).flat(),
39
- // USER INPUT
40
- {
41
- role: "user",
42
- content: ctx.prompt.text,
43
- },
44
- // SYSTEM PROMPT
45
- {
46
- role: "system",
47
- content:
48
- ctx.config?.systemPrompt?.execute?.(ctx.histories) ??
49
- AgenticaSystemPrompt.EXECUTE,
50
- },
51
- ],
52
- // STACKED FUNCTIONS
53
- tools: ctx.stack.map(
54
- (op) =>
55
- ({
56
- type: "function",
57
- function: {
58
- name: op.name,
59
- description: op.function.description,
60
- parameters: (op.function.separated
61
- ? (op.function.separated.llm ??
62
- ({
63
- type: "object",
64
- properties: {},
65
- required: [],
66
- additionalProperties: false,
67
- $defs: {},
68
- } satisfies IChatGptSchema.IParameters))
69
- : op.function.parameters) as Record<string, any>,
70
- },
71
- }) as OpenAI.ChatCompletionTool,
72
- ),
73
- tool_choice: "auto",
74
- parallel_tool_calls: false,
75
- });
76
-
77
- //----
78
- // PROCESS COMPLETION
79
- //----
80
- const closures: Array<
81
- () => Promise<
82
- Array<
83
- | IAgenticaPrompt.IExecute<Model>
84
- | IAgenticaPrompt.ICancel<Model>
85
- | IAgenticaPrompt.IText
86
- >
87
- >
88
- > = [];
89
- for (const choice of completion.choices) {
90
- for (const tc of choice.message.tool_calls ?? []) {
91
- if (tc.type === "function") {
92
- const operation: IAgenticaOperation<Model> | undefined =
93
- ctx.operations.flat.get(tc.function.name);
94
- if (operation === undefined) continue;
95
- closures.push(
96
- async (): Promise<
97
- [IAgenticaPrompt.IExecute<Model>, IAgenticaPrompt.ICancel<Model>]
98
- > => {
99
- const call: IAgenticaEvent.ICall<Model> = {
100
- type: "call",
101
- id: tc.id,
102
- operation,
103
- arguments: JSON.parse(tc.function.arguments),
104
- };
105
- if (call.operation.protocol === "http")
106
- fillHttpArguments({
107
- operation: call.operation,
108
- arguments: call.arguments,
109
- });
110
- await ctx.dispatch(call);
111
-
112
- const execute: IAgenticaPrompt.IExecute<Model> = await propagate(
113
- ctx,
114
- call,
115
- 0,
116
- );
117
- await ctx.dispatch({
118
- type: "execute",
119
- id: call.id,
120
- operation: call.operation,
121
- arguments: execute.arguments,
122
- value: execute.value,
123
- });
124
-
125
- await ChatGptCancelFunctionAgent.cancelFunction(ctx, {
126
- name: call.operation.name,
127
- reason: "completed",
128
- });
129
- await ctx.dispatch({
130
- type: "cancel",
131
- operation: call.operation,
132
- reason: "complete",
133
- });
134
- return [
135
- execute,
136
- {
137
- type: "cancel",
138
- id: call.id,
139
- operations: [
140
- AgenticaPromptFactory.selection({
141
- ...call.operation,
142
- reason: "complete",
143
- }),
144
- ],
145
- } satisfies IAgenticaPrompt.ICancel<Model>,
146
- ] as const;
147
- },
148
- );
149
- }
150
- }
151
- if (
152
- choice.message.role === "assistant" &&
153
- !!choice.message.content?.length
154
- )
155
- closures.push(async () => {
156
- const value: IAgenticaPrompt.IText = {
157
- type: "text",
158
- role: "assistant",
159
- text: choice.message.content!,
160
- };
161
- await ctx.dispatch(value);
162
- return [value];
163
- });
164
- }
165
- return (await Promise.all(closures.map((fn) => fn()))).flat();
166
- };
167
-
168
- const propagate = async <Model extends ILlmSchema.Model>(
169
- ctx: IAgenticaContext<Model>,
170
- call: IAgenticaEvent.ICall<Model>,
171
- retry: number,
172
- ): Promise<IAgenticaPrompt.IExecute<Model>> => {
173
- if (call.operation.protocol === "http") {
174
- //----
175
- // HTTP PROTOCOL
176
- //----
177
- try {
178
- // CALL HTTP API
179
- const response: IHttpResponse = call.operation.controller.execute
180
- ? await call.operation.controller.execute({
181
- connection: call.operation.controller.connection,
182
- application: call.operation.controller.application,
183
- function: call.operation.function,
184
- arguments: call.arguments,
185
- })
186
- : await HttpLlm.propagate({
187
- connection: call.operation.controller.connection,
188
- application: call.operation.controller.application,
189
- function: call.operation.function,
190
- input: call.arguments,
191
- });
192
- // CHECK STATUS
193
- const success: boolean =
194
- ((response.status === 400 ||
195
- response.status === 404 ||
196
- response.status === 422) &&
197
- retry++ < (ctx.config?.retry ?? AgenticaConstant.RETRY) &&
198
- typeof response.body) === false;
199
- // DISPATCH EVENT
200
- return (
201
- (success === false
202
- ? await correct(ctx, call, retry, response.body)
203
- : null) ??
204
- (await AgenticaPromptFactory.execute({
205
- type: "execute",
206
- protocol: "http",
207
- controller: call.operation.controller,
208
- function: call.operation.function,
209
- id: call.id,
210
- name: call.operation.name,
211
- arguments: call.arguments,
212
- value: response,
213
- }))
214
- );
215
- } catch (error) {
216
- // DISPATCH ERROR
217
- return AgenticaPromptFactory.execute({
218
- type: "execute",
219
- protocol: "http",
220
- controller: call.operation.controller,
221
- function: call.operation.function,
222
- id: call.id,
223
- name: call.operation.name,
224
- arguments: call.arguments,
225
- value: {
226
- status: 500,
227
- headers: {},
228
- body:
229
- error instanceof Error
230
- ? {
231
- ...error,
232
- name: error.name,
233
- message: error.message,
234
- }
235
- : error,
236
- },
237
- });
238
- }
239
- } else {
240
- //----
241
- // CLASS FUNCTION
242
- //----
243
- // VALIDATE FIRST
244
- const check: IValidation<unknown> = call.operation.function.validate(
245
- call.arguments,
246
- );
247
- if (check.success === false)
248
- return (
249
- (retry++ < (ctx.config?.retry ?? AgenticaConstant.RETRY)
250
- ? await correct(ctx, call, retry, check.errors)
251
- : null) ??
252
- AgenticaPromptFactory.execute({
253
- type: "execute",
254
- protocol: "class",
255
- controller: call.operation.controller,
256
- function: call.operation.function,
257
- id: call.id,
258
- name: call.operation.name,
259
- arguments: call.arguments,
260
- value: {
261
- name: "TypeGuardError",
262
- message: "Invalid arguments.",
263
- errors: check.errors,
264
- },
265
- })
266
- );
267
- // EXECUTE FUNCTION
268
- try {
269
- const value: any =
270
- typeof call.operation.controller.execute === "function"
271
- ? await call.operation.controller.execute({
272
- application: call.operation.controller.application,
273
- function: call.operation.function,
274
- arguments: call.arguments,
275
- })
276
- : await (call.operation.controller.execute as any)[
277
- call.operation.function.name
278
- ](call.arguments);
279
- return AgenticaPromptFactory.execute({
280
- type: "execute",
281
- protocol: "class",
282
- controller: call.operation.controller,
283
- function: call.operation.function,
284
- id: call.id,
285
- name: call.operation.name,
286
- arguments: call.arguments,
287
- value,
288
- });
289
- } catch (error) {
290
- return AgenticaPromptFactory.execute({
291
- type: "execute",
292
- protocol: "class",
293
- controller: call.operation.controller,
294
- function: call.operation.function,
295
- id: call.id,
296
- name: call.operation.name,
297
- arguments: call.arguments,
298
- value:
299
- error instanceof Error
300
- ? {
301
- ...error,
302
- name: error.name,
303
- message: error.message,
304
- }
305
- : error,
306
- });
307
- }
308
- }
309
- };
310
-
311
- const correct = async <Model extends ILlmSchema.Model>(
312
- ctx: IAgenticaContext<Model>,
313
- call: IAgenticaEvent.ICall<Model>,
314
- retry: number,
315
- error: unknown,
316
- ): Promise<IAgenticaPrompt.IExecute<Model> | null> => {
317
- //----
318
- // EXECUTE CHATGPT API
319
- //----
320
- const completion: OpenAI.ChatCompletion = await ctx.request("call", {
321
- messages: [
322
- // COMMON SYSTEM PROMPT
323
- {
324
- role: "system",
325
- content: AgenticaDefaultPrompt.write(ctx.config),
326
- } satisfies OpenAI.ChatCompletionSystemMessageParam,
327
- // PREVIOUS HISTORIES
328
- ...ctx.histories.map(ChatGptHistoryDecoder.decode).flat(),
329
- // USER INPUT
330
- {
331
- role: "user",
332
- content: ctx.prompt.text,
333
- },
334
- // TYPE CORRECTION
335
- {
336
- role: "system",
337
- content:
338
- ctx.config?.systemPrompt?.execute?.(ctx.histories) ??
339
- AgenticaSystemPrompt.EXECUTE,
340
- },
341
- {
342
- role: "assistant",
343
- tool_calls: [
344
- {
345
- type: "function",
346
- id: call.id,
347
- function: {
348
- name: call.operation.name,
349
- arguments: JSON.stringify(call.arguments),
350
- },
351
- } satisfies OpenAI.ChatCompletionMessageToolCall,
352
- ],
353
- } satisfies OpenAI.ChatCompletionAssistantMessageParam,
354
- {
355
- role: "tool",
356
- content: typeof error === "string" ? error : JSON.stringify(error),
357
- tool_call_id: call.id,
358
- } satisfies OpenAI.ChatCompletionToolMessageParam,
359
- {
360
- role: "system",
361
- content: [
362
- "You A.I. assistant has composed wrong arguments.",
363
- "",
364
- "Correct it at the next function calling.",
365
- ].join("\n"),
366
- },
367
- ],
368
- // STACK FUNCTIONS
369
- tools: [
370
- {
371
- type: "function",
372
- function: {
373
- name: call.operation.name,
374
- description: call.operation.function.description,
375
- parameters: (call.operation.function.separated
376
- ? (call.operation.function.separated?.llm ??
377
- ({
378
- $defs: {},
379
- type: "object",
380
- properties: {},
381
- additionalProperties: false,
382
- required: [],
383
- } satisfies IChatGptSchema.IParameters))
384
- : call.operation.function.parameters) as any,
385
- },
386
- },
387
- ],
388
- tool_choice: "auto",
389
- parallel_tool_calls: false,
390
- });
391
-
392
- //----
393
- // PROCESS COMPLETION
394
- //----
395
- const toolCall: OpenAI.ChatCompletionMessageToolCall | undefined = (
396
- completion.choices[0]?.message.tool_calls ?? []
397
- ).find(
398
- (tc) =>
399
- tc.type === "function" && tc.function.name === call.operation.name,
400
- );
401
- if (toolCall === undefined) return null;
402
- return propagate(
403
- ctx,
404
- {
405
- id: toolCall.id,
406
- type: "call",
407
- operation: call.operation,
408
- arguments: JSON.parse(toolCall.function.arguments),
409
- },
410
- retry,
411
- );
412
- };
413
-
414
- const fillHttpArguments = <Model extends ILlmSchema.Model>(props: {
415
- operation: IAgenticaOperation<Model>;
416
- arguments: object;
417
- }): void => {
418
- if (props.operation.protocol !== "http") return;
419
- const route: IHttpMigrateRoute = props.operation.function.route();
420
- if (
421
- route.body &&
422
- route.operation().requestBody?.required === true &&
423
- (props.arguments as any).body === undefined &&
424
- isObject(
425
- (props.operation.function.parameters as IChatGptSchema.IParameters)
426
- .$defs,
427
- (props.operation.function.parameters as IChatGptSchema.IParameters)
428
- .properties.body!,
429
- )
430
- )
431
- (props.arguments as any).body = {};
432
- if (route.query && (props.arguments as any).query === undefined)
433
- (props.arguments as any).query = {};
434
- };
435
-
436
- const isObject = (
437
- $defs: Record<string, IChatGptSchema>,
438
- schema: IChatGptSchema,
439
- ): boolean => {
440
- return (
441
- ChatGptTypeChecker.isObject(schema) ||
442
- (ChatGptTypeChecker.isReference(schema) &&
443
- isObject($defs, $defs[schema.$ref.split("/").at(-1)!]!)) ||
444
- (ChatGptTypeChecker.isAnyOf(schema) &&
445
- schema.anyOf.every((schema) => isObject($defs, schema)))
446
- );
447
- };
448
- }
1
+ import {
2
+ ChatGptTypeChecker,
3
+ HttpLlm,
4
+ IChatGptSchema,
5
+ IHttpMigrateRoute,
6
+ IHttpResponse,
7
+ ILlmSchema,
8
+ } from "@samchon/openapi";
9
+ import OpenAI from "openai";
10
+ import { IValidation } from "typia";
11
+
12
+ import { AgenticaConstant } from "../internal/AgenticaConstant";
13
+ import { AgenticaDefaultPrompt } from "../internal/AgenticaDefaultPrompt";
14
+ import { AgenticaPromptFactory } from "../internal/AgenticaPromptFactory";
15
+ import { AgenticaSystemPrompt } from "../internal/AgenticaSystemPrompt";
16
+ import { IAgenticaContext } from "../structures/IAgenticaContext";
17
+ import { IAgenticaEvent } from "../structures/IAgenticaEvent";
18
+ import { IAgenticaOperation } from "../structures/IAgenticaOperation";
19
+ import { IAgenticaPrompt } from "../structures/IAgenticaPrompt";
20
+ import { ChatGptCancelFunctionAgent } from "./ChatGptCancelFunctionAgent";
21
+ import { ChatGptHistoryDecoder } from "./ChatGptHistoryDecoder";
22
+
23
+ export namespace ChatGptCallFunctionAgent {
24
+ export const execute = async <Model extends ILlmSchema.Model>(
25
+ ctx: IAgenticaContext<Model>,
26
+ ): Promise<IAgenticaPrompt<Model>[]> => {
27
+ //----
28
+ // EXECUTE CHATGPT API
29
+ //----
30
+ const completion: OpenAI.ChatCompletion = await ctx.request("call", {
31
+ messages: [
32
+ // COMMON SYSTEM PROMPT
33
+ {
34
+ role: "system",
35
+ content: AgenticaDefaultPrompt.write(ctx.config),
36
+ } satisfies OpenAI.ChatCompletionSystemMessageParam,
37
+ // PREVIOUS HISTORIES
38
+ ...ctx.histories.map(ChatGptHistoryDecoder.decode).flat(),
39
+ // USER INPUT
40
+ {
41
+ role: "user",
42
+ content: ctx.prompt.text,
43
+ },
44
+ // SYSTEM PROMPT
45
+ {
46
+ role: "system",
47
+ content:
48
+ ctx.config?.systemPrompt?.execute?.(ctx.histories) ??
49
+ AgenticaSystemPrompt.EXECUTE,
50
+ },
51
+ ],
52
+ // STACKED FUNCTIONS
53
+ tools: ctx.stack.map(
54
+ (op) =>
55
+ ({
56
+ type: "function",
57
+ function: {
58
+ name: op.name,
59
+ description: op.function.description,
60
+ parameters: (op.function.separated
61
+ ? (op.function.separated.llm ??
62
+ ({
63
+ type: "object",
64
+ properties: {},
65
+ required: [],
66
+ additionalProperties: false,
67
+ $defs: {},
68
+ } satisfies IChatGptSchema.IParameters))
69
+ : op.function.parameters) as Record<string, any>,
70
+ },
71
+ }) as OpenAI.ChatCompletionTool,
72
+ ),
73
+ tool_choice: "auto",
74
+ parallel_tool_calls: false,
75
+ });
76
+
77
+ //----
78
+ // PROCESS COMPLETION
79
+ //----
80
+ const closures: Array<
81
+ () => Promise<
82
+ Array<
83
+ | IAgenticaPrompt.IExecute<Model>
84
+ | IAgenticaPrompt.ICancel<Model>
85
+ | IAgenticaPrompt.IText
86
+ >
87
+ >
88
+ > = [];
89
+ for (const choice of completion.choices) {
90
+ for (const tc of choice.message.tool_calls ?? []) {
91
+ if (tc.type === "function") {
92
+ const operation: IAgenticaOperation<Model> | undefined =
93
+ ctx.operations.flat.get(tc.function.name);
94
+ if (operation === undefined) continue;
95
+ closures.push(
96
+ async (): Promise<
97
+ [IAgenticaPrompt.IExecute<Model>, IAgenticaPrompt.ICancel<Model>]
98
+ > => {
99
+ const call: IAgenticaEvent.ICall<Model> = {
100
+ type: "call",
101
+ id: tc.id,
102
+ operation,
103
+ arguments: JSON.parse(tc.function.arguments),
104
+ };
105
+ if (call.operation.protocol === "http")
106
+ fillHttpArguments({
107
+ operation: call.operation,
108
+ arguments: call.arguments,
109
+ });
110
+ await ctx.dispatch(call);
111
+
112
+ const execute: IAgenticaPrompt.IExecute<Model> = await propagate(
113
+ ctx,
114
+ call,
115
+ 0,
116
+ );
117
+ await ctx.dispatch({
118
+ type: "execute",
119
+ id: call.id,
120
+ operation: call.operation,
121
+ arguments: execute.arguments,
122
+ value: execute.value,
123
+ });
124
+
125
+ await ChatGptCancelFunctionAgent.cancelFunction(ctx, {
126
+ name: call.operation.name,
127
+ reason: "completed",
128
+ });
129
+ await ctx.dispatch({
130
+ type: "cancel",
131
+ operation: call.operation,
132
+ reason: "complete",
133
+ });
134
+ return [
135
+ execute,
136
+ {
137
+ type: "cancel",
138
+ id: call.id,
139
+ operations: [
140
+ AgenticaPromptFactory.selection({
141
+ ...call.operation,
142
+ reason: "complete",
143
+ }),
144
+ ],
145
+ } satisfies IAgenticaPrompt.ICancel<Model>,
146
+ ] as const;
147
+ },
148
+ );
149
+ }
150
+ }
151
+ if (
152
+ choice.message.role === "assistant" &&
153
+ !!choice.message.content?.length
154
+ )
155
+ closures.push(async () => {
156
+ const value: IAgenticaPrompt.IText = {
157
+ type: "text",
158
+ role: "assistant",
159
+ text: choice.message.content!,
160
+ };
161
+ await ctx.dispatch(value);
162
+ return [value];
163
+ });
164
+ }
165
+ return (await Promise.all(closures.map((fn) => fn()))).flat();
166
+ };
167
+
168
+ const propagate = async <Model extends ILlmSchema.Model>(
169
+ ctx: IAgenticaContext<Model>,
170
+ call: IAgenticaEvent.ICall<Model>,
171
+ retry: number,
172
+ ): Promise<IAgenticaPrompt.IExecute<Model>> => {
173
+ if (call.operation.protocol === "http") {
174
+ //----
175
+ // HTTP PROTOCOL
176
+ //----
177
+ // NESTED VALIDATOR
178
+ const check: IValidation<unknown> = call.operation.function.validate(
179
+ call.arguments,
180
+ );
181
+ if (
182
+ check.success === false &&
183
+ retry++ < (ctx.config?.retry ?? AgenticaConstant.RETRY)
184
+ ) {
185
+ const trial: IAgenticaPrompt.IExecute<Model> | null = await correct(
186
+ ctx,
187
+ call,
188
+ retry,
189
+ check.errors,
190
+ );
191
+ if (trial !== null) return trial;
192
+ }
193
+ try {
194
+ // CALL HTTP API
195
+ const response: IHttpResponse = call.operation.controller.execute
196
+ ? await call.operation.controller.execute({
197
+ connection: call.operation.controller.connection,
198
+ application: call.operation.controller.application,
199
+ function: call.operation.function,
200
+ arguments: call.arguments,
201
+ })
202
+ : await HttpLlm.propagate({
203
+ connection: call.operation.controller.connection,
204
+ application: call.operation.controller.application,
205
+ function: call.operation.function,
206
+ input: call.arguments,
207
+ });
208
+ // CHECK STATUS
209
+ const success: boolean =
210
+ ((response.status === 400 ||
211
+ response.status === 404 ||
212
+ response.status === 422) &&
213
+ retry++ < (ctx.config?.retry ?? AgenticaConstant.RETRY) &&
214
+ typeof response.body) === false;
215
+ // DISPATCH EVENT
216
+ return (
217
+ (success === false
218
+ ? await correct(ctx, call, retry, response.body)
219
+ : null) ??
220
+ (await AgenticaPromptFactory.execute({
221
+ type: "execute",
222
+ protocol: "http",
223
+ controller: call.operation.controller,
224
+ function: call.operation.function,
225
+ id: call.id,
226
+ name: call.operation.name,
227
+ arguments: call.arguments,
228
+ value: response,
229
+ }))
230
+ );
231
+ } catch (error) {
232
+ // DISPATCH ERROR
233
+ return AgenticaPromptFactory.execute({
234
+ type: "execute",
235
+ protocol: "http",
236
+ controller: call.operation.controller,
237
+ function: call.operation.function,
238
+ id: call.id,
239
+ name: call.operation.name,
240
+ arguments: call.arguments,
241
+ value: {
242
+ status: 500,
243
+ headers: {},
244
+ body:
245
+ error instanceof Error
246
+ ? {
247
+ ...error,
248
+ name: error.name,
249
+ message: error.message,
250
+ }
251
+ : error,
252
+ },
253
+ });
254
+ }
255
+ } else {
256
+ //----
257
+ // CLASS FUNCTION
258
+ //----
259
+ // VALIDATE FIRST
260
+ const check: IValidation<unknown> = call.operation.function.validate(
261
+ call.arguments,
262
+ );
263
+ if (check.success === false)
264
+ return (
265
+ (retry++ < (ctx.config?.retry ?? AgenticaConstant.RETRY)
266
+ ? await correct(ctx, call, retry, check.errors)
267
+ : null) ??
268
+ AgenticaPromptFactory.execute({
269
+ type: "execute",
270
+ protocol: "class",
271
+ controller: call.operation.controller,
272
+ function: call.operation.function,
273
+ id: call.id,
274
+ name: call.operation.name,
275
+ arguments: call.arguments,
276
+ value: {
277
+ name: "TypeGuardError",
278
+ message: "Invalid arguments.",
279
+ errors: check.errors,
280
+ },
281
+ })
282
+ );
283
+ // EXECUTE FUNCTION
284
+ try {
285
+ const value: any =
286
+ typeof call.operation.controller.execute === "function"
287
+ ? await call.operation.controller.execute({
288
+ application: call.operation.controller.application,
289
+ function: call.operation.function,
290
+ arguments: call.arguments,
291
+ })
292
+ : await (call.operation.controller.execute as any)[
293
+ call.operation.function.name
294
+ ](call.arguments);
295
+ return AgenticaPromptFactory.execute({
296
+ type: "execute",
297
+ protocol: "class",
298
+ controller: call.operation.controller,
299
+ function: call.operation.function,
300
+ id: call.id,
301
+ name: call.operation.name,
302
+ arguments: call.arguments,
303
+ value,
304
+ });
305
+ } catch (error) {
306
+ return AgenticaPromptFactory.execute({
307
+ type: "execute",
308
+ protocol: "class",
309
+ controller: call.operation.controller,
310
+ function: call.operation.function,
311
+ id: call.id,
312
+ name: call.operation.name,
313
+ arguments: call.arguments,
314
+ value:
315
+ error instanceof Error
316
+ ? {
317
+ ...error,
318
+ name: error.name,
319
+ message: error.message,
320
+ }
321
+ : error,
322
+ });
323
+ }
324
+ }
325
+ };
326
+
327
+ const correct = async <Model extends ILlmSchema.Model>(
328
+ ctx: IAgenticaContext<Model>,
329
+ call: IAgenticaEvent.ICall<Model>,
330
+ retry: number,
331
+ error: unknown,
332
+ ): Promise<IAgenticaPrompt.IExecute<Model> | null> => {
333
+ //----
334
+ // EXECUTE CHATGPT API
335
+ //----
336
+ const completion: OpenAI.ChatCompletion = await ctx.request("call", {
337
+ messages: [
338
+ // COMMON SYSTEM PROMPT
339
+ {
340
+ role: "system",
341
+ content: AgenticaDefaultPrompt.write(ctx.config),
342
+ } satisfies OpenAI.ChatCompletionSystemMessageParam,
343
+ // PREVIOUS HISTORIES
344
+ ...ctx.histories.map(ChatGptHistoryDecoder.decode).flat(),
345
+ // USER INPUT
346
+ {
347
+ role: "user",
348
+ content: ctx.prompt.text,
349
+ },
350
+ // TYPE CORRECTION
351
+ {
352
+ role: "system",
353
+ content:
354
+ ctx.config?.systemPrompt?.execute?.(ctx.histories) ??
355
+ AgenticaSystemPrompt.EXECUTE,
356
+ },
357
+ {
358
+ role: "assistant",
359
+ tool_calls: [
360
+ {
361
+ type: "function",
362
+ id: call.id,
363
+ function: {
364
+ name: call.operation.name,
365
+ arguments: JSON.stringify(call.arguments),
366
+ },
367
+ } satisfies OpenAI.ChatCompletionMessageToolCall,
368
+ ],
369
+ } satisfies OpenAI.ChatCompletionAssistantMessageParam,
370
+ {
371
+ role: "tool",
372
+ content: typeof error === "string" ? error : JSON.stringify(error),
373
+ tool_call_id: call.id,
374
+ } satisfies OpenAI.ChatCompletionToolMessageParam,
375
+ {
376
+ role: "system",
377
+ content: [
378
+ "You A.I. assistant has composed wrong arguments.",
379
+ "",
380
+ "Correct it at the next function calling.",
381
+ ].join("\n"),
382
+ },
383
+ ],
384
+ // STACK FUNCTIONS
385
+ tools: [
386
+ {
387
+ type: "function",
388
+ function: {
389
+ name: call.operation.name,
390
+ description: call.operation.function.description,
391
+ parameters: (call.operation.function.separated
392
+ ? (call.operation.function.separated?.llm ??
393
+ ({
394
+ $defs: {},
395
+ type: "object",
396
+ properties: {},
397
+ additionalProperties: false,
398
+ required: [],
399
+ } satisfies IChatGptSchema.IParameters))
400
+ : call.operation.function.parameters) as any,
401
+ },
402
+ },
403
+ ],
404
+ tool_choice: "auto",
405
+ parallel_tool_calls: false,
406
+ });
407
+
408
+ //----
409
+ // PROCESS COMPLETION
410
+ //----
411
+ const toolCall: OpenAI.ChatCompletionMessageToolCall | undefined = (
412
+ completion.choices[0]?.message.tool_calls ?? []
413
+ ).find(
414
+ (tc) =>
415
+ tc.type === "function" && tc.function.name === call.operation.name,
416
+ );
417
+ if (toolCall === undefined) return null;
418
+ return propagate(
419
+ ctx,
420
+ {
421
+ id: toolCall.id,
422
+ type: "call",
423
+ operation: call.operation,
424
+ arguments: JSON.parse(toolCall.function.arguments),
425
+ },
426
+ retry,
427
+ );
428
+ };
429
+
430
+ const fillHttpArguments = <Model extends ILlmSchema.Model>(props: {
431
+ operation: IAgenticaOperation<Model>;
432
+ arguments: object;
433
+ }): void => {
434
+ if (props.operation.protocol !== "http") return;
435
+ const route: IHttpMigrateRoute = props.operation.function.route();
436
+ if (
437
+ route.body &&
438
+ route.operation().requestBody?.required === true &&
439
+ (props.arguments as any).body === undefined &&
440
+ isObject(
441
+ (props.operation.function.parameters as IChatGptSchema.IParameters)
442
+ .$defs,
443
+ (props.operation.function.parameters as IChatGptSchema.IParameters)
444
+ .properties.body!,
445
+ )
446
+ )
447
+ (props.arguments as any).body = {};
448
+ if (route.query && (props.arguments as any).query === undefined)
449
+ (props.arguments as any).query = {};
450
+ };
451
+
452
+ const isObject = (
453
+ $defs: Record<string, IChatGptSchema>,
454
+ schema: IChatGptSchema,
455
+ ): boolean => {
456
+ return (
457
+ ChatGptTypeChecker.isObject(schema) ||
458
+ (ChatGptTypeChecker.isReference(schema) &&
459
+ isObject($defs, $defs[schema.$ref.split("/").at(-1)!]!)) ||
460
+ (ChatGptTypeChecker.isAnyOf(schema) &&
461
+ schema.anyOf.every((schema) => isObject($defs, schema)))
462
+ );
463
+ };
464
+ }