@agentica/core 0.12.2-dev.20250314 → 0.12.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -21
- package/README.md +461 -461
- package/lib/context/AgenticaTokenUsage.d.ts +6 -6
- package/package.json +1 -1
- package/prompts/cancel.md +4 -4
- package/prompts/common.md +2 -2
- package/prompts/describe.md +6 -6
- package/prompts/execute.md +6 -6
- package/prompts/initialize.md +2 -2
- package/prompts/select.md +6 -6
- package/src/Agentica.ts +359 -359
- package/src/chatgpt/ChatGptAgent.ts +76 -76
- package/src/chatgpt/ChatGptCallFunctionAgent.ts +466 -466
- package/src/chatgpt/ChatGptCancelFunctionAgent.ts +280 -280
- package/src/chatgpt/ChatGptCompletionMessageUtil.ts +166 -166
- package/src/chatgpt/ChatGptDescribeFunctionAgent.ts +122 -122
- package/src/chatgpt/ChatGptHistoryDecoder.ts +88 -88
- package/src/chatgpt/ChatGptInitializeFunctionAgent.ts +96 -96
- package/src/chatgpt/ChatGptSelectFunctionAgent.ts +311 -311
- package/src/chatgpt/ChatGptUsageAggregator.ts +62 -62
- package/src/context/AgenticaCancelPrompt.ts +32 -32
- package/src/context/AgenticaClassOperation.ts +23 -23
- package/src/context/AgenticaContext.ts +130 -130
- package/src/context/AgenticaHttpOperation.ts +27 -27
- package/src/context/AgenticaOperation.ts +66 -66
- package/src/context/AgenticaOperationBase.ts +57 -57
- package/src/context/AgenticaOperationCollection.ts +52 -52
- package/src/context/AgenticaOperationSelection.ts +27 -27
- package/src/context/AgenticaTokenUsage.ts +170 -170
- package/src/context/internal/AgenticaTokenUsageAggregator.ts +66 -66
- package/src/context/internal/__IChatCancelFunctionsApplication.ts +23 -23
- package/src/context/internal/__IChatFunctionReference.ts +21 -21
- package/src/context/internal/__IChatInitialApplication.ts +15 -15
- package/src/context/internal/__IChatSelectFunctionsApplication.ts +24 -24
- package/src/events/AgenticaCallEvent.ts +36 -36
- package/src/events/AgenticaCancelEvent.ts +28 -28
- package/src/events/AgenticaDescribeEvent.ts +66 -66
- package/src/events/AgenticaEvent.ts +36 -36
- package/src/events/AgenticaEventBase.ts +7 -7
- package/src/events/AgenticaEventSource.ts +6 -6
- package/src/events/AgenticaExecuteEvent.ts +50 -50
- package/src/events/AgenticaInitializeEvent.ts +14 -14
- package/src/events/AgenticaRequestEvent.ts +45 -45
- package/src/events/AgenticaResponseEvent.ts +48 -48
- package/src/events/AgenticaSelectEvent.ts +37 -37
- package/src/events/AgenticaTextEvent.ts +62 -62
- package/src/functional/assertHttpLlmApplication.ts +55 -55
- package/src/functional/validateHttpLlmApplication.ts +66 -66
- package/src/index.ts +44 -44
- package/src/internal/AgenticaConstant.ts +4 -4
- package/src/internal/AgenticaDefaultPrompt.ts +43 -43
- package/src/internal/AgenticaOperationComposer.ts +96 -96
- package/src/internal/ByteArrayUtil.ts +5 -5
- package/src/internal/MPSCUtil.ts +111 -111
- package/src/internal/MathUtil.ts +3 -3
- package/src/internal/Singleton.ts +22 -22
- package/src/internal/StreamUtil.ts +64 -64
- package/src/internal/__map_take.ts +15 -15
- package/src/json/IAgenticaEventJson.ts +178 -178
- package/src/json/IAgenticaOperationJson.ts +36 -36
- package/src/json/IAgenticaOperationSelectionJson.ts +19 -19
- package/src/json/IAgenticaPromptJson.ts +130 -130
- package/src/json/IAgenticaTokenUsageJson.ts +107 -107
- package/src/prompts/AgenticaCancelPrompt.ts +32 -32
- package/src/prompts/AgenticaDescribePrompt.ts +41 -41
- package/src/prompts/AgenticaExecutePrompt.ts +52 -52
- package/src/prompts/AgenticaPrompt.ts +14 -14
- package/src/prompts/AgenticaPromptBase.ts +27 -27
- package/src/prompts/AgenticaSelectPrompt.ts +32 -32
- package/src/prompts/AgenticaTextPrompt.ts +31 -31
- package/src/structures/IAgenticaConfig.ts +123 -123
- package/src/structures/IAgenticaController.ts +133 -133
- package/src/structures/IAgenticaExecutor.ts +157 -157
- package/src/structures/IAgenticaProps.ts +69 -69
- package/src/structures/IAgenticaSystemPrompt.ts +125 -125
- package/src/structures/IAgenticaVendor.ts +39 -39
- package/src/transformers/AgenticaEventTransformer.ts +165 -165
- package/src/transformers/AgenticaPromptTransformer.ts +134 -134
|
@@ -1,466 +1,466 @@
|
|
|
1
|
-
import {
|
|
2
|
-
ChatGptTypeChecker,
|
|
3
|
-
HttpLlm,
|
|
4
|
-
IChatGptSchema,
|
|
5
|
-
IHttpMigrateRoute,
|
|
6
|
-
IHttpResponse,
|
|
7
|
-
ILlmSchema,
|
|
8
|
-
} from "@samchon/openapi";
|
|
9
|
-
import OpenAI from "openai";
|
|
10
|
-
import { IValidation } from "typia";
|
|
11
|
-
|
|
12
|
-
import { AgenticaCancelPrompt } from "../context/AgenticaCancelPrompt";
|
|
13
|
-
import { AgenticaContext } from "../context/AgenticaContext";
|
|
14
|
-
import { AgenticaOperation } from "../context/AgenticaOperation";
|
|
15
|
-
import { AgenticaOperationSelection } from "../context/AgenticaOperationSelection";
|
|
16
|
-
import { AgenticaCallEvent } from "../events/AgenticaCallEvent";
|
|
17
|
-
import { AgenticaCancelEvent } from "../events/AgenticaCancelEvent";
|
|
18
|
-
import { AgenticaExecuteEvent } from "../events/AgenticaExecuteEvent";
|
|
19
|
-
import { AgenticaTextEvent } from "../events/AgenticaTextEvent";
|
|
20
|
-
import { AgenticaConstant } from "../internal/AgenticaConstant";
|
|
21
|
-
import { AgenticaDefaultPrompt } from "../internal/AgenticaDefaultPrompt";
|
|
22
|
-
import { AgenticaSystemPrompt } from "../internal/AgenticaSystemPrompt";
|
|
23
|
-
import { StreamUtil } from "../internal/StreamUtil";
|
|
24
|
-
import { AgenticaExecutePrompt } from "../prompts/AgenticaExecutePrompt";
|
|
25
|
-
import { AgenticaPrompt } from "../prompts/AgenticaPrompt";
|
|
26
|
-
import { AgenticaTextPrompt } from "../prompts/AgenticaTextPrompt";
|
|
27
|
-
import { ChatGptCancelFunctionAgent } from "./ChatGptCancelFunctionAgent";
|
|
28
|
-
import { ChatGptCompletionMessageUtil } from "./ChatGptCompletionMessageUtil";
|
|
29
|
-
import { ChatGptHistoryDecoder } from "./ChatGptHistoryDecoder";
|
|
30
|
-
|
|
31
|
-
export namespace ChatGptCallFunctionAgent {
|
|
32
|
-
export const execute = async <Model extends ILlmSchema.Model>(
|
|
33
|
-
ctx: AgenticaContext<Model>,
|
|
34
|
-
): Promise<AgenticaPrompt<Model>[]> => {
|
|
35
|
-
//----
|
|
36
|
-
// EXECUTE CHATGPT API
|
|
37
|
-
//----
|
|
38
|
-
const completionStream = await ctx.request("call", {
|
|
39
|
-
messages: [
|
|
40
|
-
// COMMON SYSTEM PROMPT
|
|
41
|
-
{
|
|
42
|
-
role: "system",
|
|
43
|
-
content: AgenticaDefaultPrompt.write(ctx.config),
|
|
44
|
-
} satisfies OpenAI.ChatCompletionSystemMessageParam,
|
|
45
|
-
// PREVIOUS HISTORIES
|
|
46
|
-
...ctx.histories.map(ChatGptHistoryDecoder.decode).flat(),
|
|
47
|
-
// USER INPUT
|
|
48
|
-
{
|
|
49
|
-
role: "user",
|
|
50
|
-
content: ctx.prompt.text,
|
|
51
|
-
},
|
|
52
|
-
// SYSTEM PROMPT
|
|
53
|
-
{
|
|
54
|
-
role: "system",
|
|
55
|
-
content:
|
|
56
|
-
ctx.config?.systemPrompt?.execute?.(ctx.histories) ??
|
|
57
|
-
AgenticaSystemPrompt.EXECUTE,
|
|
58
|
-
},
|
|
59
|
-
],
|
|
60
|
-
// STACKED FUNCTIONS
|
|
61
|
-
tools: ctx.stack.map(
|
|
62
|
-
(s) =>
|
|
63
|
-
({
|
|
64
|
-
type: "function",
|
|
65
|
-
function: {
|
|
66
|
-
name: s.operation.name,
|
|
67
|
-
description: s.operation.function.description,
|
|
68
|
-
parameters: (s.operation.function.separated
|
|
69
|
-
? (s.operation.function.separated.llm ??
|
|
70
|
-
({
|
|
71
|
-
type: "object",
|
|
72
|
-
properties: {},
|
|
73
|
-
required: [],
|
|
74
|
-
additionalProperties: false,
|
|
75
|
-
$defs: {},
|
|
76
|
-
} satisfies IChatGptSchema.IParameters))
|
|
77
|
-
: s.operation.function.parameters) as Record<string, any>,
|
|
78
|
-
},
|
|
79
|
-
}) as OpenAI.ChatCompletionTool,
|
|
80
|
-
),
|
|
81
|
-
tool_choice: "auto",
|
|
82
|
-
parallel_tool_calls: false,
|
|
83
|
-
});
|
|
84
|
-
|
|
85
|
-
//----
|
|
86
|
-
// PROCESS COMPLETION
|
|
87
|
-
//----
|
|
88
|
-
const closures: Array<
|
|
89
|
-
() => Promise<
|
|
90
|
-
Array<
|
|
91
|
-
| AgenticaExecutePrompt<Model>
|
|
92
|
-
| AgenticaCancelPrompt<Model>
|
|
93
|
-
| AgenticaTextPrompt
|
|
94
|
-
>
|
|
95
|
-
>
|
|
96
|
-
> = [];
|
|
97
|
-
|
|
98
|
-
const chunks = await StreamUtil.readAll(completionStream);
|
|
99
|
-
const completion = ChatGptCompletionMessageUtil.merge(chunks);
|
|
100
|
-
|
|
101
|
-
for (const choice of completion.choices) {
|
|
102
|
-
for (const tc of choice.message.tool_calls ?? []) {
|
|
103
|
-
if (tc.type === "function") {
|
|
104
|
-
const operation: AgenticaOperation<Model> | undefined =
|
|
105
|
-
ctx.operations.flat.get(tc.function.name);
|
|
106
|
-
if (operation === undefined) continue;
|
|
107
|
-
closures.push(
|
|
108
|
-
async (): Promise<
|
|
109
|
-
[AgenticaExecutePrompt<Model>, AgenticaCancelPrompt<Model>]
|
|
110
|
-
> => {
|
|
111
|
-
const call: AgenticaCallEvent<Model> = new AgenticaCallEvent({
|
|
112
|
-
id: tc.id,
|
|
113
|
-
operation,
|
|
114
|
-
arguments: JSON.parse(tc.function.arguments),
|
|
115
|
-
});
|
|
116
|
-
if (call.operation.protocol === "http")
|
|
117
|
-
fillHttpArguments({
|
|
118
|
-
operation: call.operation,
|
|
119
|
-
arguments: call.arguments,
|
|
120
|
-
});
|
|
121
|
-
await ctx.dispatch(call);
|
|
122
|
-
|
|
123
|
-
const execute: AgenticaExecutePrompt<Model> = await propagate(
|
|
124
|
-
ctx,
|
|
125
|
-
call,
|
|
126
|
-
0,
|
|
127
|
-
);
|
|
128
|
-
await ctx.dispatch(
|
|
129
|
-
new AgenticaExecuteEvent({
|
|
130
|
-
id: call.id,
|
|
131
|
-
operation: call.operation,
|
|
132
|
-
arguments: execute.arguments,
|
|
133
|
-
value: execute.value,
|
|
134
|
-
}),
|
|
135
|
-
);
|
|
136
|
-
|
|
137
|
-
await ChatGptCancelFunctionAgent.cancelFunction(ctx, {
|
|
138
|
-
name: call.operation.name,
|
|
139
|
-
reason: "completed",
|
|
140
|
-
});
|
|
141
|
-
await ctx.dispatch(
|
|
142
|
-
new AgenticaCancelEvent({
|
|
143
|
-
selection: new AgenticaOperationSelection({
|
|
144
|
-
operation: call.operation,
|
|
145
|
-
reason: "complete",
|
|
146
|
-
}),
|
|
147
|
-
}),
|
|
148
|
-
);
|
|
149
|
-
return [
|
|
150
|
-
execute,
|
|
151
|
-
new AgenticaCancelPrompt({
|
|
152
|
-
id: call.id,
|
|
153
|
-
selections: [
|
|
154
|
-
new AgenticaOperationSelection({
|
|
155
|
-
operation: call.operation,
|
|
156
|
-
reason: "complete",
|
|
157
|
-
}),
|
|
158
|
-
],
|
|
159
|
-
}),
|
|
160
|
-
] as const;
|
|
161
|
-
},
|
|
162
|
-
);
|
|
163
|
-
}
|
|
164
|
-
}
|
|
165
|
-
if (
|
|
166
|
-
choice.message.role === "assistant" &&
|
|
167
|
-
!!choice.message.content?.length
|
|
168
|
-
)
|
|
169
|
-
closures.push(async () => {
|
|
170
|
-
const value: AgenticaTextPrompt = new AgenticaTextPrompt({
|
|
171
|
-
role: "assistant",
|
|
172
|
-
text: choice.message.content!,
|
|
173
|
-
});
|
|
174
|
-
await ctx.dispatch(
|
|
175
|
-
new AgenticaTextEvent({
|
|
176
|
-
role: "assistant",
|
|
177
|
-
get: () => value.text,
|
|
178
|
-
done: () => true,
|
|
179
|
-
stream: StreamUtil.to(value.text),
|
|
180
|
-
join: () => Promise.resolve(value.text),
|
|
181
|
-
}),
|
|
182
|
-
);
|
|
183
|
-
return [value];
|
|
184
|
-
});
|
|
185
|
-
}
|
|
186
|
-
return (await Promise.all(closures.map((fn) => fn()))).flat();
|
|
187
|
-
};
|
|
188
|
-
|
|
189
|
-
const propagate = async <Model extends ILlmSchema.Model>(
|
|
190
|
-
ctx: AgenticaContext<Model>,
|
|
191
|
-
call: AgenticaCallEvent<Model>,
|
|
192
|
-
retry: number,
|
|
193
|
-
): Promise<AgenticaExecutePrompt<Model>> => {
|
|
194
|
-
if (call.operation.protocol === "http") {
|
|
195
|
-
//----
|
|
196
|
-
// HTTP PROTOCOL
|
|
197
|
-
//----
|
|
198
|
-
// NESTED VALIDATOR
|
|
199
|
-
const check: IValidation<unknown> = call.operation.function.validate(
|
|
200
|
-
call.arguments,
|
|
201
|
-
);
|
|
202
|
-
if (
|
|
203
|
-
check.success === false &&
|
|
204
|
-
retry++ < (ctx.config?.retry ?? AgenticaConstant.RETRY)
|
|
205
|
-
) {
|
|
206
|
-
const trial: AgenticaExecutePrompt<Model> | null = await correct(
|
|
207
|
-
ctx,
|
|
208
|
-
call,
|
|
209
|
-
retry,
|
|
210
|
-
check.errors,
|
|
211
|
-
);
|
|
212
|
-
if (trial !== null) return trial;
|
|
213
|
-
}
|
|
214
|
-
try {
|
|
215
|
-
// CALL HTTP API
|
|
216
|
-
const response: IHttpResponse = call.operation.controller.execute
|
|
217
|
-
? await call.operation.controller.execute({
|
|
218
|
-
connection: call.operation.controller.connection,
|
|
219
|
-
application: call.operation.controller.application,
|
|
220
|
-
function: call.operation.function,
|
|
221
|
-
arguments: call.arguments,
|
|
222
|
-
})
|
|
223
|
-
: await HttpLlm.propagate({
|
|
224
|
-
connection: call.operation.controller.connection,
|
|
225
|
-
application: call.operation.controller.application,
|
|
226
|
-
function: call.operation.function,
|
|
227
|
-
input: call.arguments,
|
|
228
|
-
});
|
|
229
|
-
// CHECK STATUS
|
|
230
|
-
const success: boolean =
|
|
231
|
-
((response.status === 400 ||
|
|
232
|
-
response.status === 404 ||
|
|
233
|
-
response.status === 422) &&
|
|
234
|
-
retry++ < (ctx.config?.retry ?? AgenticaConstant.RETRY) &&
|
|
235
|
-
typeof response.body) === false;
|
|
236
|
-
// DISPATCH EVENT
|
|
237
|
-
return (
|
|
238
|
-
(success === false
|
|
239
|
-
? await correct(ctx, call, retry, response.body)
|
|
240
|
-
: null) ??
|
|
241
|
-
new AgenticaExecutePrompt({
|
|
242
|
-
operation: call.operation,
|
|
243
|
-
id: call.id,
|
|
244
|
-
arguments: call.arguments,
|
|
245
|
-
value: response,
|
|
246
|
-
})
|
|
247
|
-
);
|
|
248
|
-
} catch (error) {
|
|
249
|
-
// DISPATCH ERROR
|
|
250
|
-
return new AgenticaExecutePrompt({
|
|
251
|
-
operation: call.operation,
|
|
252
|
-
id: call.id,
|
|
253
|
-
arguments: call.arguments,
|
|
254
|
-
value: {
|
|
255
|
-
status: 500,
|
|
256
|
-
headers: {},
|
|
257
|
-
body:
|
|
258
|
-
error instanceof Error
|
|
259
|
-
? {
|
|
260
|
-
...error,
|
|
261
|
-
name: error.name,
|
|
262
|
-
message: error.message,
|
|
263
|
-
}
|
|
264
|
-
: error,
|
|
265
|
-
},
|
|
266
|
-
});
|
|
267
|
-
}
|
|
268
|
-
} else {
|
|
269
|
-
//----
|
|
270
|
-
// CLASS FUNCTION
|
|
271
|
-
//----
|
|
272
|
-
// VALIDATE FIRST
|
|
273
|
-
const check: IValidation<unknown> = call.operation.function.validate(
|
|
274
|
-
call.arguments,
|
|
275
|
-
);
|
|
276
|
-
if (check.success === false)
|
|
277
|
-
return (
|
|
278
|
-
(retry++ < (ctx.config?.retry ?? AgenticaConstant.RETRY)
|
|
279
|
-
? await correct(ctx, call, retry, check.errors)
|
|
280
|
-
: null) ??
|
|
281
|
-
new AgenticaExecutePrompt({
|
|
282
|
-
id: call.id,
|
|
283
|
-
operation: call.operation,
|
|
284
|
-
arguments: call.arguments,
|
|
285
|
-
value: {
|
|
286
|
-
name: "TypeGuardError",
|
|
287
|
-
message: "Invalid arguments.",
|
|
288
|
-
errors: check.errors,
|
|
289
|
-
},
|
|
290
|
-
})
|
|
291
|
-
);
|
|
292
|
-
// EXECUTE FUNCTION
|
|
293
|
-
try {
|
|
294
|
-
const value: any =
|
|
295
|
-
typeof call.operation.controller.execute === "function"
|
|
296
|
-
? await call.operation.controller.execute({
|
|
297
|
-
application: call.operation.controller.application,
|
|
298
|
-
function: call.operation.function,
|
|
299
|
-
arguments: call.arguments,
|
|
300
|
-
})
|
|
301
|
-
: await (call.operation.controller.execute as any)[
|
|
302
|
-
call.operation.function.name
|
|
303
|
-
](call.arguments);
|
|
304
|
-
return new AgenticaExecutePrompt({
|
|
305
|
-
id: call.id,
|
|
306
|
-
operation: call.operation,
|
|
307
|
-
arguments: call.arguments,
|
|
308
|
-
value,
|
|
309
|
-
});
|
|
310
|
-
} catch (error) {
|
|
311
|
-
return new AgenticaExecutePrompt({
|
|
312
|
-
id: call.id,
|
|
313
|
-
operation: call.operation,
|
|
314
|
-
arguments: call.arguments,
|
|
315
|
-
value:
|
|
316
|
-
error instanceof Error
|
|
317
|
-
? {
|
|
318
|
-
...error,
|
|
319
|
-
name: error.name,
|
|
320
|
-
message: error.message,
|
|
321
|
-
}
|
|
322
|
-
: error,
|
|
323
|
-
});
|
|
324
|
-
}
|
|
325
|
-
}
|
|
326
|
-
};
|
|
327
|
-
|
|
328
|
-
const correct = async <Model extends ILlmSchema.Model>(
|
|
329
|
-
ctx: AgenticaContext<Model>,
|
|
330
|
-
call: AgenticaCallEvent<Model>,
|
|
331
|
-
retry: number,
|
|
332
|
-
error: unknown,
|
|
333
|
-
): Promise<AgenticaExecutePrompt<Model> | null> => {
|
|
334
|
-
//----
|
|
335
|
-
// EXECUTE CHATGPT API
|
|
336
|
-
//----
|
|
337
|
-
const completionStream = await ctx.request("call", {
|
|
338
|
-
messages: [
|
|
339
|
-
// COMMON SYSTEM PROMPT
|
|
340
|
-
{
|
|
341
|
-
role: "system",
|
|
342
|
-
content: AgenticaDefaultPrompt.write(ctx.config),
|
|
343
|
-
} satisfies OpenAI.ChatCompletionSystemMessageParam,
|
|
344
|
-
// PREVIOUS HISTORIES
|
|
345
|
-
...ctx.histories.map(ChatGptHistoryDecoder.decode).flat(),
|
|
346
|
-
// USER INPUT
|
|
347
|
-
{
|
|
348
|
-
role: "user",
|
|
349
|
-
content: ctx.prompt.text,
|
|
350
|
-
},
|
|
351
|
-
// TYPE CORRECTION
|
|
352
|
-
{
|
|
353
|
-
role: "system",
|
|
354
|
-
content:
|
|
355
|
-
ctx.config?.systemPrompt?.execute?.(ctx.histories) ??
|
|
356
|
-
AgenticaSystemPrompt.EXECUTE,
|
|
357
|
-
},
|
|
358
|
-
{
|
|
359
|
-
role: "assistant",
|
|
360
|
-
tool_calls: [
|
|
361
|
-
{
|
|
362
|
-
type: "function",
|
|
363
|
-
id: call.id,
|
|
364
|
-
function: {
|
|
365
|
-
name: call.operation.name,
|
|
366
|
-
arguments: JSON.stringify(call.arguments),
|
|
367
|
-
},
|
|
368
|
-
} satisfies OpenAI.ChatCompletionMessageToolCall,
|
|
369
|
-
],
|
|
370
|
-
} satisfies OpenAI.ChatCompletionAssistantMessageParam,
|
|
371
|
-
{
|
|
372
|
-
role: "tool",
|
|
373
|
-
content: typeof error === "string" ? error : JSON.stringify(error),
|
|
374
|
-
tool_call_id: call.id,
|
|
375
|
-
} satisfies OpenAI.ChatCompletionToolMessageParam,
|
|
376
|
-
{
|
|
377
|
-
role: "system",
|
|
378
|
-
content: [
|
|
379
|
-
"You A.I. assistant has composed wrong arguments.",
|
|
380
|
-
"",
|
|
381
|
-
"Correct it at the next function calling.",
|
|
382
|
-
].join("\n"),
|
|
383
|
-
},
|
|
384
|
-
],
|
|
385
|
-
// STACK FUNCTIONS
|
|
386
|
-
tools: [
|
|
387
|
-
{
|
|
388
|
-
type: "function",
|
|
389
|
-
function: {
|
|
390
|
-
name: call.operation.name,
|
|
391
|
-
description: call.operation.function.description,
|
|
392
|
-
parameters: (call.operation.function.separated
|
|
393
|
-
? (call.operation.function.separated?.llm ??
|
|
394
|
-
({
|
|
395
|
-
$defs: {},
|
|
396
|
-
type: "object",
|
|
397
|
-
properties: {},
|
|
398
|
-
additionalProperties: false,
|
|
399
|
-
required: [],
|
|
400
|
-
} satisfies IChatGptSchema.IParameters))
|
|
401
|
-
: call.operation.function.parameters) as any,
|
|
402
|
-
},
|
|
403
|
-
},
|
|
404
|
-
],
|
|
405
|
-
tool_choice: "auto",
|
|
406
|
-
parallel_tool_calls: false,
|
|
407
|
-
});
|
|
408
|
-
|
|
409
|
-
const chunks = await StreamUtil.readAll(completionStream);
|
|
410
|
-
const completion = ChatGptCompletionMessageUtil.merge(chunks);
|
|
411
|
-
//----
|
|
412
|
-
// PROCESS COMPLETION
|
|
413
|
-
//----
|
|
414
|
-
const toolCall: OpenAI.ChatCompletionMessageToolCall | undefined = (
|
|
415
|
-
completion.choices[0]?.message.tool_calls ?? []
|
|
416
|
-
).find(
|
|
417
|
-
(tc) =>
|
|
418
|
-
tc.type === "function" && tc.function.name === call.operation.name,
|
|
419
|
-
);
|
|
420
|
-
if (toolCall === undefined) return null;
|
|
421
|
-
return propagate(
|
|
422
|
-
ctx,
|
|
423
|
-
new AgenticaCallEvent({
|
|
424
|
-
id: toolCall.id,
|
|
425
|
-
operation: call.operation,
|
|
426
|
-
arguments: JSON.parse(toolCall.function.arguments),
|
|
427
|
-
}),
|
|
428
|
-
retry,
|
|
429
|
-
);
|
|
430
|
-
};
|
|
431
|
-
|
|
432
|
-
const fillHttpArguments = <Model extends ILlmSchema.Model>(props: {
|
|
433
|
-
operation: AgenticaOperation<Model>;
|
|
434
|
-
arguments: object;
|
|
435
|
-
}): void => {
|
|
436
|
-
if (props.operation.protocol !== "http") return;
|
|
437
|
-
const route: IHttpMigrateRoute = props.operation.function.route();
|
|
438
|
-
if (
|
|
439
|
-
route.body &&
|
|
440
|
-
route.operation().requestBody?.required === true &&
|
|
441
|
-
(props.arguments as any).body === undefined &&
|
|
442
|
-
isObject(
|
|
443
|
-
(props.operation.function.parameters as IChatGptSchema.IParameters)
|
|
444
|
-
.$defs,
|
|
445
|
-
(props.operation.function.parameters as IChatGptSchema.IParameters)
|
|
446
|
-
.properties.body!,
|
|
447
|
-
)
|
|
448
|
-
)
|
|
449
|
-
(props.arguments as any).body = {};
|
|
450
|
-
if (route.query && (props.arguments as any).query === undefined)
|
|
451
|
-
(props.arguments as any).query = {};
|
|
452
|
-
};
|
|
453
|
-
|
|
454
|
-
const isObject = (
|
|
455
|
-
$defs: Record<string, IChatGptSchema>,
|
|
456
|
-
schema: IChatGptSchema,
|
|
457
|
-
): boolean => {
|
|
458
|
-
return (
|
|
459
|
-
ChatGptTypeChecker.isObject(schema) ||
|
|
460
|
-
(ChatGptTypeChecker.isReference(schema) &&
|
|
461
|
-
isObject($defs, $defs[schema.$ref.split("/").at(-1)!]!)) ||
|
|
462
|
-
(ChatGptTypeChecker.isAnyOf(schema) &&
|
|
463
|
-
schema.anyOf.every((schema) => isObject($defs, schema)))
|
|
464
|
-
);
|
|
465
|
-
};
|
|
466
|
-
}
|
|
1
|
+
import {
|
|
2
|
+
ChatGptTypeChecker,
|
|
3
|
+
HttpLlm,
|
|
4
|
+
IChatGptSchema,
|
|
5
|
+
IHttpMigrateRoute,
|
|
6
|
+
IHttpResponse,
|
|
7
|
+
ILlmSchema,
|
|
8
|
+
} from "@samchon/openapi";
|
|
9
|
+
import OpenAI from "openai";
|
|
10
|
+
import { IValidation } from "typia";
|
|
11
|
+
|
|
12
|
+
import { AgenticaCancelPrompt } from "../context/AgenticaCancelPrompt";
|
|
13
|
+
import { AgenticaContext } from "../context/AgenticaContext";
|
|
14
|
+
import { AgenticaOperation } from "../context/AgenticaOperation";
|
|
15
|
+
import { AgenticaOperationSelection } from "../context/AgenticaOperationSelection";
|
|
16
|
+
import { AgenticaCallEvent } from "../events/AgenticaCallEvent";
|
|
17
|
+
import { AgenticaCancelEvent } from "../events/AgenticaCancelEvent";
|
|
18
|
+
import { AgenticaExecuteEvent } from "../events/AgenticaExecuteEvent";
|
|
19
|
+
import { AgenticaTextEvent } from "../events/AgenticaTextEvent";
|
|
20
|
+
import { AgenticaConstant } from "../internal/AgenticaConstant";
|
|
21
|
+
import { AgenticaDefaultPrompt } from "../internal/AgenticaDefaultPrompt";
|
|
22
|
+
import { AgenticaSystemPrompt } from "../internal/AgenticaSystemPrompt";
|
|
23
|
+
import { StreamUtil } from "../internal/StreamUtil";
|
|
24
|
+
import { AgenticaExecutePrompt } from "../prompts/AgenticaExecutePrompt";
|
|
25
|
+
import { AgenticaPrompt } from "../prompts/AgenticaPrompt";
|
|
26
|
+
import { AgenticaTextPrompt } from "../prompts/AgenticaTextPrompt";
|
|
27
|
+
import { ChatGptCancelFunctionAgent } from "./ChatGptCancelFunctionAgent";
|
|
28
|
+
import { ChatGptCompletionMessageUtil } from "./ChatGptCompletionMessageUtil";
|
|
29
|
+
import { ChatGptHistoryDecoder } from "./ChatGptHistoryDecoder";
|
|
30
|
+
|
|
31
|
+
export namespace ChatGptCallFunctionAgent {
|
|
32
|
+
export const execute = async <Model extends ILlmSchema.Model>(
|
|
33
|
+
ctx: AgenticaContext<Model>,
|
|
34
|
+
): Promise<AgenticaPrompt<Model>[]> => {
|
|
35
|
+
//----
|
|
36
|
+
// EXECUTE CHATGPT API
|
|
37
|
+
//----
|
|
38
|
+
const completionStream = await ctx.request("call", {
|
|
39
|
+
messages: [
|
|
40
|
+
// COMMON SYSTEM PROMPT
|
|
41
|
+
{
|
|
42
|
+
role: "system",
|
|
43
|
+
content: AgenticaDefaultPrompt.write(ctx.config),
|
|
44
|
+
} satisfies OpenAI.ChatCompletionSystemMessageParam,
|
|
45
|
+
// PREVIOUS HISTORIES
|
|
46
|
+
...ctx.histories.map(ChatGptHistoryDecoder.decode).flat(),
|
|
47
|
+
// USER INPUT
|
|
48
|
+
{
|
|
49
|
+
role: "user",
|
|
50
|
+
content: ctx.prompt.text,
|
|
51
|
+
},
|
|
52
|
+
// SYSTEM PROMPT
|
|
53
|
+
{
|
|
54
|
+
role: "system",
|
|
55
|
+
content:
|
|
56
|
+
ctx.config?.systemPrompt?.execute?.(ctx.histories) ??
|
|
57
|
+
AgenticaSystemPrompt.EXECUTE,
|
|
58
|
+
},
|
|
59
|
+
],
|
|
60
|
+
// STACKED FUNCTIONS
|
|
61
|
+
tools: ctx.stack.map(
|
|
62
|
+
(s) =>
|
|
63
|
+
({
|
|
64
|
+
type: "function",
|
|
65
|
+
function: {
|
|
66
|
+
name: s.operation.name,
|
|
67
|
+
description: s.operation.function.description,
|
|
68
|
+
parameters: (s.operation.function.separated
|
|
69
|
+
? (s.operation.function.separated.llm ??
|
|
70
|
+
({
|
|
71
|
+
type: "object",
|
|
72
|
+
properties: {},
|
|
73
|
+
required: [],
|
|
74
|
+
additionalProperties: false,
|
|
75
|
+
$defs: {},
|
|
76
|
+
} satisfies IChatGptSchema.IParameters))
|
|
77
|
+
: s.operation.function.parameters) as Record<string, any>,
|
|
78
|
+
},
|
|
79
|
+
}) as OpenAI.ChatCompletionTool,
|
|
80
|
+
),
|
|
81
|
+
tool_choice: "auto",
|
|
82
|
+
parallel_tool_calls: false,
|
|
83
|
+
});
|
|
84
|
+
|
|
85
|
+
//----
|
|
86
|
+
// PROCESS COMPLETION
|
|
87
|
+
//----
|
|
88
|
+
const closures: Array<
|
|
89
|
+
() => Promise<
|
|
90
|
+
Array<
|
|
91
|
+
| AgenticaExecutePrompt<Model>
|
|
92
|
+
| AgenticaCancelPrompt<Model>
|
|
93
|
+
| AgenticaTextPrompt
|
|
94
|
+
>
|
|
95
|
+
>
|
|
96
|
+
> = [];
|
|
97
|
+
|
|
98
|
+
const chunks = await StreamUtil.readAll(completionStream);
|
|
99
|
+
const completion = ChatGptCompletionMessageUtil.merge(chunks);
|
|
100
|
+
|
|
101
|
+
for (const choice of completion.choices) {
|
|
102
|
+
for (const tc of choice.message.tool_calls ?? []) {
|
|
103
|
+
if (tc.type === "function") {
|
|
104
|
+
const operation: AgenticaOperation<Model> | undefined =
|
|
105
|
+
ctx.operations.flat.get(tc.function.name);
|
|
106
|
+
if (operation === undefined) continue;
|
|
107
|
+
closures.push(
|
|
108
|
+
async (): Promise<
|
|
109
|
+
[AgenticaExecutePrompt<Model>, AgenticaCancelPrompt<Model>]
|
|
110
|
+
> => {
|
|
111
|
+
const call: AgenticaCallEvent<Model> = new AgenticaCallEvent({
|
|
112
|
+
id: tc.id,
|
|
113
|
+
operation,
|
|
114
|
+
arguments: JSON.parse(tc.function.arguments),
|
|
115
|
+
});
|
|
116
|
+
if (call.operation.protocol === "http")
|
|
117
|
+
fillHttpArguments({
|
|
118
|
+
operation: call.operation,
|
|
119
|
+
arguments: call.arguments,
|
|
120
|
+
});
|
|
121
|
+
await ctx.dispatch(call);
|
|
122
|
+
|
|
123
|
+
const execute: AgenticaExecutePrompt<Model> = await propagate(
|
|
124
|
+
ctx,
|
|
125
|
+
call,
|
|
126
|
+
0,
|
|
127
|
+
);
|
|
128
|
+
await ctx.dispatch(
|
|
129
|
+
new AgenticaExecuteEvent({
|
|
130
|
+
id: call.id,
|
|
131
|
+
operation: call.operation,
|
|
132
|
+
arguments: execute.arguments,
|
|
133
|
+
value: execute.value,
|
|
134
|
+
}),
|
|
135
|
+
);
|
|
136
|
+
|
|
137
|
+
await ChatGptCancelFunctionAgent.cancelFunction(ctx, {
|
|
138
|
+
name: call.operation.name,
|
|
139
|
+
reason: "completed",
|
|
140
|
+
});
|
|
141
|
+
await ctx.dispatch(
|
|
142
|
+
new AgenticaCancelEvent({
|
|
143
|
+
selection: new AgenticaOperationSelection({
|
|
144
|
+
operation: call.operation,
|
|
145
|
+
reason: "complete",
|
|
146
|
+
}),
|
|
147
|
+
}),
|
|
148
|
+
);
|
|
149
|
+
return [
|
|
150
|
+
execute,
|
|
151
|
+
new AgenticaCancelPrompt({
|
|
152
|
+
id: call.id,
|
|
153
|
+
selections: [
|
|
154
|
+
new AgenticaOperationSelection({
|
|
155
|
+
operation: call.operation,
|
|
156
|
+
reason: "complete",
|
|
157
|
+
}),
|
|
158
|
+
],
|
|
159
|
+
}),
|
|
160
|
+
] as const;
|
|
161
|
+
},
|
|
162
|
+
);
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
if (
|
|
166
|
+
choice.message.role === "assistant" &&
|
|
167
|
+
!!choice.message.content?.length
|
|
168
|
+
)
|
|
169
|
+
closures.push(async () => {
|
|
170
|
+
const value: AgenticaTextPrompt = new AgenticaTextPrompt({
|
|
171
|
+
role: "assistant",
|
|
172
|
+
text: choice.message.content!,
|
|
173
|
+
});
|
|
174
|
+
await ctx.dispatch(
|
|
175
|
+
new AgenticaTextEvent({
|
|
176
|
+
role: "assistant",
|
|
177
|
+
get: () => value.text,
|
|
178
|
+
done: () => true,
|
|
179
|
+
stream: StreamUtil.to(value.text),
|
|
180
|
+
join: () => Promise.resolve(value.text),
|
|
181
|
+
}),
|
|
182
|
+
);
|
|
183
|
+
return [value];
|
|
184
|
+
});
|
|
185
|
+
}
|
|
186
|
+
return (await Promise.all(closures.map((fn) => fn()))).flat();
|
|
187
|
+
};
|
|
188
|
+
|
|
189
|
+
const propagate = async <Model extends ILlmSchema.Model>(
|
|
190
|
+
ctx: AgenticaContext<Model>,
|
|
191
|
+
call: AgenticaCallEvent<Model>,
|
|
192
|
+
retry: number,
|
|
193
|
+
): Promise<AgenticaExecutePrompt<Model>> => {
|
|
194
|
+
if (call.operation.protocol === "http") {
|
|
195
|
+
//----
|
|
196
|
+
// HTTP PROTOCOL
|
|
197
|
+
//----
|
|
198
|
+
// NESTED VALIDATOR
|
|
199
|
+
const check: IValidation<unknown> = call.operation.function.validate(
|
|
200
|
+
call.arguments,
|
|
201
|
+
);
|
|
202
|
+
if (
|
|
203
|
+
check.success === false &&
|
|
204
|
+
retry++ < (ctx.config?.retry ?? AgenticaConstant.RETRY)
|
|
205
|
+
) {
|
|
206
|
+
const trial: AgenticaExecutePrompt<Model> | null = await correct(
|
|
207
|
+
ctx,
|
|
208
|
+
call,
|
|
209
|
+
retry,
|
|
210
|
+
check.errors,
|
|
211
|
+
);
|
|
212
|
+
if (trial !== null) return trial;
|
|
213
|
+
}
|
|
214
|
+
try {
|
|
215
|
+
// CALL HTTP API
|
|
216
|
+
const response: IHttpResponse = call.operation.controller.execute
|
|
217
|
+
? await call.operation.controller.execute({
|
|
218
|
+
connection: call.operation.controller.connection,
|
|
219
|
+
application: call.operation.controller.application,
|
|
220
|
+
function: call.operation.function,
|
|
221
|
+
arguments: call.arguments,
|
|
222
|
+
})
|
|
223
|
+
: await HttpLlm.propagate({
|
|
224
|
+
connection: call.operation.controller.connection,
|
|
225
|
+
application: call.operation.controller.application,
|
|
226
|
+
function: call.operation.function,
|
|
227
|
+
input: call.arguments,
|
|
228
|
+
});
|
|
229
|
+
// CHECK STATUS
|
|
230
|
+
const success: boolean =
|
|
231
|
+
((response.status === 400 ||
|
|
232
|
+
response.status === 404 ||
|
|
233
|
+
response.status === 422) &&
|
|
234
|
+
retry++ < (ctx.config?.retry ?? AgenticaConstant.RETRY) &&
|
|
235
|
+
typeof response.body) === false;
|
|
236
|
+
// DISPATCH EVENT
|
|
237
|
+
return (
|
|
238
|
+
(success === false
|
|
239
|
+
? await correct(ctx, call, retry, response.body)
|
|
240
|
+
: null) ??
|
|
241
|
+
new AgenticaExecutePrompt({
|
|
242
|
+
operation: call.operation,
|
|
243
|
+
id: call.id,
|
|
244
|
+
arguments: call.arguments,
|
|
245
|
+
value: response,
|
|
246
|
+
})
|
|
247
|
+
);
|
|
248
|
+
} catch (error) {
|
|
249
|
+
// DISPATCH ERROR
|
|
250
|
+
return new AgenticaExecutePrompt({
|
|
251
|
+
operation: call.operation,
|
|
252
|
+
id: call.id,
|
|
253
|
+
arguments: call.arguments,
|
|
254
|
+
value: {
|
|
255
|
+
status: 500,
|
|
256
|
+
headers: {},
|
|
257
|
+
body:
|
|
258
|
+
error instanceof Error
|
|
259
|
+
? {
|
|
260
|
+
...error,
|
|
261
|
+
name: error.name,
|
|
262
|
+
message: error.message,
|
|
263
|
+
}
|
|
264
|
+
: error,
|
|
265
|
+
},
|
|
266
|
+
});
|
|
267
|
+
}
|
|
268
|
+
} else {
|
|
269
|
+
//----
|
|
270
|
+
// CLASS FUNCTION
|
|
271
|
+
//----
|
|
272
|
+
// VALIDATE FIRST
|
|
273
|
+
const check: IValidation<unknown> = call.operation.function.validate(
|
|
274
|
+
call.arguments,
|
|
275
|
+
);
|
|
276
|
+
if (check.success === false)
|
|
277
|
+
return (
|
|
278
|
+
(retry++ < (ctx.config?.retry ?? AgenticaConstant.RETRY)
|
|
279
|
+
? await correct(ctx, call, retry, check.errors)
|
|
280
|
+
: null) ??
|
|
281
|
+
new AgenticaExecutePrompt({
|
|
282
|
+
id: call.id,
|
|
283
|
+
operation: call.operation,
|
|
284
|
+
arguments: call.arguments,
|
|
285
|
+
value: {
|
|
286
|
+
name: "TypeGuardError",
|
|
287
|
+
message: "Invalid arguments.",
|
|
288
|
+
errors: check.errors,
|
|
289
|
+
},
|
|
290
|
+
})
|
|
291
|
+
);
|
|
292
|
+
// EXECUTE FUNCTION
|
|
293
|
+
try {
|
|
294
|
+
const value: any =
|
|
295
|
+
typeof call.operation.controller.execute === "function"
|
|
296
|
+
? await call.operation.controller.execute({
|
|
297
|
+
application: call.operation.controller.application,
|
|
298
|
+
function: call.operation.function,
|
|
299
|
+
arguments: call.arguments,
|
|
300
|
+
})
|
|
301
|
+
: await (call.operation.controller.execute as any)[
|
|
302
|
+
call.operation.function.name
|
|
303
|
+
](call.arguments);
|
|
304
|
+
return new AgenticaExecutePrompt({
|
|
305
|
+
id: call.id,
|
|
306
|
+
operation: call.operation,
|
|
307
|
+
arguments: call.arguments,
|
|
308
|
+
value,
|
|
309
|
+
});
|
|
310
|
+
} catch (error) {
|
|
311
|
+
return new AgenticaExecutePrompt({
|
|
312
|
+
id: call.id,
|
|
313
|
+
operation: call.operation,
|
|
314
|
+
arguments: call.arguments,
|
|
315
|
+
value:
|
|
316
|
+
error instanceof Error
|
|
317
|
+
? {
|
|
318
|
+
...error,
|
|
319
|
+
name: error.name,
|
|
320
|
+
message: error.message,
|
|
321
|
+
}
|
|
322
|
+
: error,
|
|
323
|
+
});
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
};
|
|
327
|
+
|
|
328
|
+
const correct = async <Model extends ILlmSchema.Model>(
|
|
329
|
+
ctx: AgenticaContext<Model>,
|
|
330
|
+
call: AgenticaCallEvent<Model>,
|
|
331
|
+
retry: number,
|
|
332
|
+
error: unknown,
|
|
333
|
+
): Promise<AgenticaExecutePrompt<Model> | null> => {
|
|
334
|
+
//----
|
|
335
|
+
// EXECUTE CHATGPT API
|
|
336
|
+
//----
|
|
337
|
+
const completionStream = await ctx.request("call", {
|
|
338
|
+
messages: [
|
|
339
|
+
// COMMON SYSTEM PROMPT
|
|
340
|
+
{
|
|
341
|
+
role: "system",
|
|
342
|
+
content: AgenticaDefaultPrompt.write(ctx.config),
|
|
343
|
+
} satisfies OpenAI.ChatCompletionSystemMessageParam,
|
|
344
|
+
// PREVIOUS HISTORIES
|
|
345
|
+
...ctx.histories.map(ChatGptHistoryDecoder.decode).flat(),
|
|
346
|
+
// USER INPUT
|
|
347
|
+
{
|
|
348
|
+
role: "user",
|
|
349
|
+
content: ctx.prompt.text,
|
|
350
|
+
},
|
|
351
|
+
// TYPE CORRECTION
|
|
352
|
+
{
|
|
353
|
+
role: "system",
|
|
354
|
+
content:
|
|
355
|
+
ctx.config?.systemPrompt?.execute?.(ctx.histories) ??
|
|
356
|
+
AgenticaSystemPrompt.EXECUTE,
|
|
357
|
+
},
|
|
358
|
+
{
|
|
359
|
+
role: "assistant",
|
|
360
|
+
tool_calls: [
|
|
361
|
+
{
|
|
362
|
+
type: "function",
|
|
363
|
+
id: call.id,
|
|
364
|
+
function: {
|
|
365
|
+
name: call.operation.name,
|
|
366
|
+
arguments: JSON.stringify(call.arguments),
|
|
367
|
+
},
|
|
368
|
+
} satisfies OpenAI.ChatCompletionMessageToolCall,
|
|
369
|
+
],
|
|
370
|
+
} satisfies OpenAI.ChatCompletionAssistantMessageParam,
|
|
371
|
+
{
|
|
372
|
+
role: "tool",
|
|
373
|
+
content: typeof error === "string" ? error : JSON.stringify(error),
|
|
374
|
+
tool_call_id: call.id,
|
|
375
|
+
} satisfies OpenAI.ChatCompletionToolMessageParam,
|
|
376
|
+
{
|
|
377
|
+
role: "system",
|
|
378
|
+
content: [
|
|
379
|
+
"You A.I. assistant has composed wrong arguments.",
|
|
380
|
+
"",
|
|
381
|
+
"Correct it at the next function calling.",
|
|
382
|
+
].join("\n"),
|
|
383
|
+
},
|
|
384
|
+
],
|
|
385
|
+
// STACK FUNCTIONS
|
|
386
|
+
tools: [
|
|
387
|
+
{
|
|
388
|
+
type: "function",
|
|
389
|
+
function: {
|
|
390
|
+
name: call.operation.name,
|
|
391
|
+
description: call.operation.function.description,
|
|
392
|
+
parameters: (call.operation.function.separated
|
|
393
|
+
? (call.operation.function.separated?.llm ??
|
|
394
|
+
({
|
|
395
|
+
$defs: {},
|
|
396
|
+
type: "object",
|
|
397
|
+
properties: {},
|
|
398
|
+
additionalProperties: false,
|
|
399
|
+
required: [],
|
|
400
|
+
} satisfies IChatGptSchema.IParameters))
|
|
401
|
+
: call.operation.function.parameters) as any,
|
|
402
|
+
},
|
|
403
|
+
},
|
|
404
|
+
],
|
|
405
|
+
tool_choice: "auto",
|
|
406
|
+
parallel_tool_calls: false,
|
|
407
|
+
});
|
|
408
|
+
|
|
409
|
+
const chunks = await StreamUtil.readAll(completionStream);
|
|
410
|
+
const completion = ChatGptCompletionMessageUtil.merge(chunks);
|
|
411
|
+
//----
|
|
412
|
+
// PROCESS COMPLETION
|
|
413
|
+
//----
|
|
414
|
+
const toolCall: OpenAI.ChatCompletionMessageToolCall | undefined = (
|
|
415
|
+
completion.choices[0]?.message.tool_calls ?? []
|
|
416
|
+
).find(
|
|
417
|
+
(tc) =>
|
|
418
|
+
tc.type === "function" && tc.function.name === call.operation.name,
|
|
419
|
+
);
|
|
420
|
+
if (toolCall === undefined) return null;
|
|
421
|
+
return propagate(
|
|
422
|
+
ctx,
|
|
423
|
+
new AgenticaCallEvent({
|
|
424
|
+
id: toolCall.id,
|
|
425
|
+
operation: call.operation,
|
|
426
|
+
arguments: JSON.parse(toolCall.function.arguments),
|
|
427
|
+
}),
|
|
428
|
+
retry,
|
|
429
|
+
);
|
|
430
|
+
};
|
|
431
|
+
|
|
432
|
+
const fillHttpArguments = <Model extends ILlmSchema.Model>(props: {
|
|
433
|
+
operation: AgenticaOperation<Model>;
|
|
434
|
+
arguments: object;
|
|
435
|
+
}): void => {
|
|
436
|
+
if (props.operation.protocol !== "http") return;
|
|
437
|
+
const route: IHttpMigrateRoute = props.operation.function.route();
|
|
438
|
+
if (
|
|
439
|
+
route.body &&
|
|
440
|
+
route.operation().requestBody?.required === true &&
|
|
441
|
+
(props.arguments as any).body === undefined &&
|
|
442
|
+
isObject(
|
|
443
|
+
(props.operation.function.parameters as IChatGptSchema.IParameters)
|
|
444
|
+
.$defs,
|
|
445
|
+
(props.operation.function.parameters as IChatGptSchema.IParameters)
|
|
446
|
+
.properties.body!,
|
|
447
|
+
)
|
|
448
|
+
)
|
|
449
|
+
(props.arguments as any).body = {};
|
|
450
|
+
if (route.query && (props.arguments as any).query === undefined)
|
|
451
|
+
(props.arguments as any).query = {};
|
|
452
|
+
};
|
|
453
|
+
|
|
454
|
+
const isObject = (
|
|
455
|
+
$defs: Record<string, IChatGptSchema>,
|
|
456
|
+
schema: IChatGptSchema,
|
|
457
|
+
): boolean => {
|
|
458
|
+
return (
|
|
459
|
+
ChatGptTypeChecker.isObject(schema) ||
|
|
460
|
+
(ChatGptTypeChecker.isReference(schema) &&
|
|
461
|
+
isObject($defs, $defs[schema.$ref.split("/").at(-1)!]!)) ||
|
|
462
|
+
(ChatGptTypeChecker.isAnyOf(schema) &&
|
|
463
|
+
schema.anyOf.every((schema) => isObject($defs, schema)))
|
|
464
|
+
);
|
|
465
|
+
};
|
|
466
|
+
}
|