@agentica/core 0.12.1 → 0.12.2-dev.20250314
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -21
- package/README.md +461 -461
- package/package.json +1 -1
- package/prompts/cancel.md +4 -4
- package/prompts/common.md +2 -2
- package/prompts/describe.md +6 -6
- package/prompts/execute.md +6 -6
- package/prompts/initialize.md +2 -2
- package/prompts/select.md +6 -6
- package/src/Agentica.ts +359 -359
- package/src/chatgpt/ChatGptAgent.ts +76 -76
- package/src/chatgpt/ChatGptCallFunctionAgent.ts +466 -466
- package/src/chatgpt/ChatGptCancelFunctionAgent.ts +280 -280
- package/src/chatgpt/ChatGptCompletionMessageUtil.ts +166 -166
- package/src/chatgpt/ChatGptDescribeFunctionAgent.ts +122 -122
- package/src/chatgpt/ChatGptHistoryDecoder.ts +88 -88
- package/src/chatgpt/ChatGptInitializeFunctionAgent.ts +96 -96
- package/src/chatgpt/ChatGptSelectFunctionAgent.ts +311 -311
- package/src/chatgpt/ChatGptUsageAggregator.ts +62 -62
- package/src/context/AgenticaCancelPrompt.ts +32 -32
- package/src/context/AgenticaClassOperation.ts +23 -23
- package/src/context/AgenticaContext.ts +130 -130
- package/src/context/AgenticaHttpOperation.ts +27 -27
- package/src/context/AgenticaOperation.ts +66 -66
- package/src/context/AgenticaOperationBase.ts +57 -57
- package/src/context/AgenticaOperationCollection.ts +52 -52
- package/src/context/AgenticaOperationSelection.ts +27 -27
- package/src/context/AgenticaTokenUsage.ts +170 -170
- package/src/context/internal/AgenticaTokenUsageAggregator.ts +66 -66
- package/src/context/internal/__IChatCancelFunctionsApplication.ts +23 -23
- package/src/context/internal/__IChatFunctionReference.ts +21 -21
- package/src/context/internal/__IChatInitialApplication.ts +15 -15
- package/src/context/internal/__IChatSelectFunctionsApplication.ts +24 -24
- package/src/events/AgenticaCallEvent.ts +36 -36
- package/src/events/AgenticaCancelEvent.ts +28 -28
- package/src/events/AgenticaDescribeEvent.ts +66 -66
- package/src/events/AgenticaEvent.ts +36 -36
- package/src/events/AgenticaEventBase.ts +7 -7
- package/src/events/AgenticaEventSource.ts +6 -6
- package/src/events/AgenticaExecuteEvent.ts +50 -50
- package/src/events/AgenticaInitializeEvent.ts +14 -14
- package/src/events/AgenticaRequestEvent.ts +45 -45
- package/src/events/AgenticaResponseEvent.ts +48 -48
- package/src/events/AgenticaSelectEvent.ts +37 -37
- package/src/events/AgenticaTextEvent.ts +62 -62
- package/src/functional/assertHttpLlmApplication.ts +55 -55
- package/src/functional/validateHttpLlmApplication.ts +66 -66
- package/src/index.ts +44 -44
- package/src/internal/AgenticaConstant.ts +4 -4
- package/src/internal/AgenticaDefaultPrompt.ts +43 -43
- package/src/internal/AgenticaOperationComposer.ts +96 -96
- package/src/internal/ByteArrayUtil.ts +5 -5
- package/src/internal/MPSCUtil.ts +111 -111
- package/src/internal/MathUtil.ts +3 -3
- package/src/internal/Singleton.ts +22 -22
- package/src/internal/StreamUtil.ts +64 -64
- package/src/internal/__map_take.ts +15 -15
- package/src/json/IAgenticaEventJson.ts +178 -178
- package/src/json/IAgenticaOperationJson.ts +36 -36
- package/src/json/IAgenticaOperationSelectionJson.ts +19 -19
- package/src/json/IAgenticaPromptJson.ts +130 -130
- package/src/json/IAgenticaTokenUsageJson.ts +107 -107
- package/src/prompts/AgenticaCancelPrompt.ts +32 -32
- package/src/prompts/AgenticaDescribePrompt.ts +41 -41
- package/src/prompts/AgenticaExecutePrompt.ts +52 -52
- package/src/prompts/AgenticaPrompt.ts +14 -14
- package/src/prompts/AgenticaPromptBase.ts +27 -27
- package/src/prompts/AgenticaSelectPrompt.ts +32 -32
- package/src/prompts/AgenticaTextPrompt.ts +31 -31
- package/src/structures/IAgenticaConfig.ts +123 -123
- package/src/structures/IAgenticaController.ts +133 -133
- package/src/structures/IAgenticaExecutor.ts +157 -157
- package/src/structures/IAgenticaProps.ts +69 -69
- package/src/structures/IAgenticaSystemPrompt.ts +125 -125
- package/src/structures/IAgenticaVendor.ts +39 -39
- package/src/transformers/AgenticaEventTransformer.ts +165 -165
- package/src/transformers/AgenticaPromptTransformer.ts +134 -134
|
@@ -1,88 +1,88 @@
|
|
|
1
|
-
import { ILlmSchema } from "@samchon/openapi";
|
|
2
|
-
import OpenAI from "openai";
|
|
3
|
-
|
|
4
|
-
import { AgenticaPrompt } from "../prompts/AgenticaPrompt";
|
|
5
|
-
|
|
6
|
-
export namespace ChatGptHistoryDecoder {
|
|
7
|
-
export const decode = <Model extends ILlmSchema.Model>(
|
|
8
|
-
history: AgenticaPrompt<Model>,
|
|
9
|
-
): OpenAI.ChatCompletionMessageParam[] => {
|
|
10
|
-
// NO NEED TO DECODE DESCRIBE
|
|
11
|
-
if (history.type === "describe") return [];
|
|
12
|
-
else if (history.type === "text")
|
|
13
|
-
return [
|
|
14
|
-
{
|
|
15
|
-
role: history.role,
|
|
16
|
-
content: history.text,
|
|
17
|
-
},
|
|
18
|
-
];
|
|
19
|
-
else if (history.type === "select" || history.type === "cancel")
|
|
20
|
-
return [
|
|
21
|
-
{
|
|
22
|
-
role: "assistant",
|
|
23
|
-
tool_calls: [
|
|
24
|
-
{
|
|
25
|
-
type: "function",
|
|
26
|
-
id: history.id,
|
|
27
|
-
function: {
|
|
28
|
-
name: `${history.type}Functions`,
|
|
29
|
-
arguments: JSON.stringify({
|
|
30
|
-
functions: history.selections.map((s) => ({
|
|
31
|
-
name: s.operation.function.name,
|
|
32
|
-
reason: s.reason,
|
|
33
|
-
})),
|
|
34
|
-
}),
|
|
35
|
-
},
|
|
36
|
-
},
|
|
37
|
-
],
|
|
38
|
-
},
|
|
39
|
-
{
|
|
40
|
-
role: "tool",
|
|
41
|
-
tool_call_id: history.id,
|
|
42
|
-
content: "",
|
|
43
|
-
},
|
|
44
|
-
];
|
|
45
|
-
|
|
46
|
-
return [
|
|
47
|
-
{
|
|
48
|
-
role: "assistant",
|
|
49
|
-
tool_calls: [
|
|
50
|
-
{
|
|
51
|
-
type: "function",
|
|
52
|
-
id: history.id,
|
|
53
|
-
function: {
|
|
54
|
-
name: history.operation.name,
|
|
55
|
-
arguments: JSON.stringify(history.arguments),
|
|
56
|
-
},
|
|
57
|
-
},
|
|
58
|
-
],
|
|
59
|
-
},
|
|
60
|
-
{
|
|
61
|
-
role: "tool",
|
|
62
|
-
tool_call_id: history.id,
|
|
63
|
-
content: JSON.stringify({
|
|
64
|
-
function: {
|
|
65
|
-
protocol: history.operation.protocol,
|
|
66
|
-
description: history.operation.function.description,
|
|
67
|
-
parameters: history.operation.function.parameters,
|
|
68
|
-
output: history.operation.function.output,
|
|
69
|
-
...(history.operation.protocol === "http"
|
|
70
|
-
? {
|
|
71
|
-
method: history.operation.function.method,
|
|
72
|
-
path: history.operation.function.path,
|
|
73
|
-
}
|
|
74
|
-
: {}),
|
|
75
|
-
},
|
|
76
|
-
...(history.operation.protocol === "http"
|
|
77
|
-
? {
|
|
78
|
-
status: history.value.status,
|
|
79
|
-
data: history.value.body,
|
|
80
|
-
}
|
|
81
|
-
: {
|
|
82
|
-
value: history.value,
|
|
83
|
-
}),
|
|
84
|
-
}),
|
|
85
|
-
},
|
|
86
|
-
];
|
|
87
|
-
};
|
|
88
|
-
}
|
|
1
|
+
import { ILlmSchema } from "@samchon/openapi";
|
|
2
|
+
import OpenAI from "openai";
|
|
3
|
+
|
|
4
|
+
import { AgenticaPrompt } from "../prompts/AgenticaPrompt";
|
|
5
|
+
|
|
6
|
+
export namespace ChatGptHistoryDecoder {
|
|
7
|
+
export const decode = <Model extends ILlmSchema.Model>(
|
|
8
|
+
history: AgenticaPrompt<Model>,
|
|
9
|
+
): OpenAI.ChatCompletionMessageParam[] => {
|
|
10
|
+
// NO NEED TO DECODE DESCRIBE
|
|
11
|
+
if (history.type === "describe") return [];
|
|
12
|
+
else if (history.type === "text")
|
|
13
|
+
return [
|
|
14
|
+
{
|
|
15
|
+
role: history.role,
|
|
16
|
+
content: history.text,
|
|
17
|
+
},
|
|
18
|
+
];
|
|
19
|
+
else if (history.type === "select" || history.type === "cancel")
|
|
20
|
+
return [
|
|
21
|
+
{
|
|
22
|
+
role: "assistant",
|
|
23
|
+
tool_calls: [
|
|
24
|
+
{
|
|
25
|
+
type: "function",
|
|
26
|
+
id: history.id,
|
|
27
|
+
function: {
|
|
28
|
+
name: `${history.type}Functions`,
|
|
29
|
+
arguments: JSON.stringify({
|
|
30
|
+
functions: history.selections.map((s) => ({
|
|
31
|
+
name: s.operation.function.name,
|
|
32
|
+
reason: s.reason,
|
|
33
|
+
})),
|
|
34
|
+
}),
|
|
35
|
+
},
|
|
36
|
+
},
|
|
37
|
+
],
|
|
38
|
+
},
|
|
39
|
+
{
|
|
40
|
+
role: "tool",
|
|
41
|
+
tool_call_id: history.id,
|
|
42
|
+
content: "",
|
|
43
|
+
},
|
|
44
|
+
];
|
|
45
|
+
|
|
46
|
+
return [
|
|
47
|
+
{
|
|
48
|
+
role: "assistant",
|
|
49
|
+
tool_calls: [
|
|
50
|
+
{
|
|
51
|
+
type: "function",
|
|
52
|
+
id: history.id,
|
|
53
|
+
function: {
|
|
54
|
+
name: history.operation.name,
|
|
55
|
+
arguments: JSON.stringify(history.arguments),
|
|
56
|
+
},
|
|
57
|
+
},
|
|
58
|
+
],
|
|
59
|
+
},
|
|
60
|
+
{
|
|
61
|
+
role: "tool",
|
|
62
|
+
tool_call_id: history.id,
|
|
63
|
+
content: JSON.stringify({
|
|
64
|
+
function: {
|
|
65
|
+
protocol: history.operation.protocol,
|
|
66
|
+
description: history.operation.function.description,
|
|
67
|
+
parameters: history.operation.function.parameters,
|
|
68
|
+
output: history.operation.function.output,
|
|
69
|
+
...(history.operation.protocol === "http"
|
|
70
|
+
? {
|
|
71
|
+
method: history.operation.function.method,
|
|
72
|
+
path: history.operation.function.path,
|
|
73
|
+
}
|
|
74
|
+
: {}),
|
|
75
|
+
},
|
|
76
|
+
...(history.operation.protocol === "http"
|
|
77
|
+
? {
|
|
78
|
+
status: history.value.status,
|
|
79
|
+
data: history.value.body,
|
|
80
|
+
}
|
|
81
|
+
: {
|
|
82
|
+
value: history.value,
|
|
83
|
+
}),
|
|
84
|
+
}),
|
|
85
|
+
},
|
|
86
|
+
];
|
|
87
|
+
};
|
|
88
|
+
}
|
|
@@ -1,96 +1,96 @@
|
|
|
1
|
-
import { ILlmFunction, ILlmSchema } from "@samchon/openapi";
|
|
2
|
-
import OpenAI from "openai";
|
|
3
|
-
import typia from "typia";
|
|
4
|
-
|
|
5
|
-
import { AgenticaContext } from "../context/AgenticaContext";
|
|
6
|
-
import { __IChatInitialApplication } from "../context/internal/__IChatInitialApplication";
|
|
7
|
-
import { AgenticaDefaultPrompt } from "../internal/AgenticaDefaultPrompt";
|
|
8
|
-
import { AgenticaSystemPrompt } from "../internal/AgenticaSystemPrompt";
|
|
9
|
-
import { StreamUtil } from "../internal/StreamUtil";
|
|
10
|
-
import { AgenticaPrompt } from "../prompts/AgenticaPrompt";
|
|
11
|
-
import { AgenticaTextPrompt } from "../prompts/AgenticaTextPrompt";
|
|
12
|
-
import { ChatGptCompletionMessageUtil } from "./ChatGptCompletionMessageUtil";
|
|
13
|
-
import { ChatGptHistoryDecoder } from "./ChatGptHistoryDecoder";
|
|
14
|
-
|
|
15
|
-
export namespace ChatGptInitializeFunctionAgent {
|
|
16
|
-
export const execute = async <Model extends ILlmSchema.Model>(
|
|
17
|
-
ctx: AgenticaContext<Model>,
|
|
18
|
-
): Promise<AgenticaPrompt<Model>[]> => {
|
|
19
|
-
//----
|
|
20
|
-
// EXECUTE CHATGPT API
|
|
21
|
-
//----
|
|
22
|
-
const completionStream = await ctx.request("initialize", {
|
|
23
|
-
messages: [
|
|
24
|
-
// COMMON SYSTEM PROMPT
|
|
25
|
-
{
|
|
26
|
-
role: "system",
|
|
27
|
-
content: AgenticaDefaultPrompt.write(ctx.config),
|
|
28
|
-
} satisfies OpenAI.ChatCompletionSystemMessageParam,
|
|
29
|
-
// PREVIOUS HISTORIES
|
|
30
|
-
...ctx.histories.map(ChatGptHistoryDecoder.decode).flat(),
|
|
31
|
-
// USER INPUT
|
|
32
|
-
{
|
|
33
|
-
role: "user",
|
|
34
|
-
content: ctx.prompt.text,
|
|
35
|
-
},
|
|
36
|
-
{
|
|
37
|
-
// SYSTEM PROMPT
|
|
38
|
-
role: "system",
|
|
39
|
-
content:
|
|
40
|
-
ctx.config?.systemPrompt?.initialize?.(ctx.histories) ??
|
|
41
|
-
AgenticaSystemPrompt.INITIALIZE,
|
|
42
|
-
},
|
|
43
|
-
],
|
|
44
|
-
// GETTER FUNCTION
|
|
45
|
-
tools: [
|
|
46
|
-
{
|
|
47
|
-
type: "function",
|
|
48
|
-
function: {
|
|
49
|
-
name: FUNCTION.name,
|
|
50
|
-
description: FUNCTION.description,
|
|
51
|
-
parameters: FUNCTION.parameters as any,
|
|
52
|
-
},
|
|
53
|
-
},
|
|
54
|
-
],
|
|
55
|
-
tool_choice: "auto",
|
|
56
|
-
parallel_tool_calls: false,
|
|
57
|
-
});
|
|
58
|
-
|
|
59
|
-
const chunks = await StreamUtil.readAll(completionStream);
|
|
60
|
-
const completion = ChatGptCompletionMessageUtil.merge(chunks);
|
|
61
|
-
//----
|
|
62
|
-
// PROCESS COMPLETION
|
|
63
|
-
//----
|
|
64
|
-
const prompts: AgenticaPrompt<Model>[] = [];
|
|
65
|
-
for (const choice of completion.choices) {
|
|
66
|
-
if (
|
|
67
|
-
choice.message.role === "assistant" &&
|
|
68
|
-
!!choice.message.content?.length
|
|
69
|
-
) {
|
|
70
|
-
// @TODO this logic should call the dispatch function
|
|
71
|
-
prompts.push(
|
|
72
|
-
new AgenticaTextPrompt({
|
|
73
|
-
role: "assistant",
|
|
74
|
-
text: choice.message.content,
|
|
75
|
-
}),
|
|
76
|
-
);
|
|
77
|
-
}
|
|
78
|
-
}
|
|
79
|
-
if (
|
|
80
|
-
completion.choices.some(
|
|
81
|
-
(c) =>
|
|
82
|
-
!!c.message.tool_calls?.some(
|
|
83
|
-
(tc) =>
|
|
84
|
-
tc.type === "function" && tc.function.name === FUNCTION.name,
|
|
85
|
-
),
|
|
86
|
-
)
|
|
87
|
-
)
|
|
88
|
-
await ctx.initialize();
|
|
89
|
-
return prompts;
|
|
90
|
-
};
|
|
91
|
-
}
|
|
92
|
-
|
|
93
|
-
const FUNCTION: ILlmFunction<"chatgpt"> = typia.llm.application<
|
|
94
|
-
__IChatInitialApplication,
|
|
95
|
-
"chatgpt"
|
|
96
|
-
>().functions[0]!;
|
|
1
|
+
import { ILlmFunction, ILlmSchema } from "@samchon/openapi";
|
|
2
|
+
import OpenAI from "openai";
|
|
3
|
+
import typia from "typia";
|
|
4
|
+
|
|
5
|
+
import { AgenticaContext } from "../context/AgenticaContext";
|
|
6
|
+
import { __IChatInitialApplication } from "../context/internal/__IChatInitialApplication";
|
|
7
|
+
import { AgenticaDefaultPrompt } from "../internal/AgenticaDefaultPrompt";
|
|
8
|
+
import { AgenticaSystemPrompt } from "../internal/AgenticaSystemPrompt";
|
|
9
|
+
import { StreamUtil } from "../internal/StreamUtil";
|
|
10
|
+
import { AgenticaPrompt } from "../prompts/AgenticaPrompt";
|
|
11
|
+
import { AgenticaTextPrompt } from "../prompts/AgenticaTextPrompt";
|
|
12
|
+
import { ChatGptCompletionMessageUtil } from "./ChatGptCompletionMessageUtil";
|
|
13
|
+
import { ChatGptHistoryDecoder } from "./ChatGptHistoryDecoder";
|
|
14
|
+
|
|
15
|
+
export namespace ChatGptInitializeFunctionAgent {
|
|
16
|
+
export const execute = async <Model extends ILlmSchema.Model>(
|
|
17
|
+
ctx: AgenticaContext<Model>,
|
|
18
|
+
): Promise<AgenticaPrompt<Model>[]> => {
|
|
19
|
+
//----
|
|
20
|
+
// EXECUTE CHATGPT API
|
|
21
|
+
//----
|
|
22
|
+
const completionStream = await ctx.request("initialize", {
|
|
23
|
+
messages: [
|
|
24
|
+
// COMMON SYSTEM PROMPT
|
|
25
|
+
{
|
|
26
|
+
role: "system",
|
|
27
|
+
content: AgenticaDefaultPrompt.write(ctx.config),
|
|
28
|
+
} satisfies OpenAI.ChatCompletionSystemMessageParam,
|
|
29
|
+
// PREVIOUS HISTORIES
|
|
30
|
+
...ctx.histories.map(ChatGptHistoryDecoder.decode).flat(),
|
|
31
|
+
// USER INPUT
|
|
32
|
+
{
|
|
33
|
+
role: "user",
|
|
34
|
+
content: ctx.prompt.text,
|
|
35
|
+
},
|
|
36
|
+
{
|
|
37
|
+
// SYSTEM PROMPT
|
|
38
|
+
role: "system",
|
|
39
|
+
content:
|
|
40
|
+
ctx.config?.systemPrompt?.initialize?.(ctx.histories) ??
|
|
41
|
+
AgenticaSystemPrompt.INITIALIZE,
|
|
42
|
+
},
|
|
43
|
+
],
|
|
44
|
+
// GETTER FUNCTION
|
|
45
|
+
tools: [
|
|
46
|
+
{
|
|
47
|
+
type: "function",
|
|
48
|
+
function: {
|
|
49
|
+
name: FUNCTION.name,
|
|
50
|
+
description: FUNCTION.description,
|
|
51
|
+
parameters: FUNCTION.parameters as any,
|
|
52
|
+
},
|
|
53
|
+
},
|
|
54
|
+
],
|
|
55
|
+
tool_choice: "auto",
|
|
56
|
+
parallel_tool_calls: false,
|
|
57
|
+
});
|
|
58
|
+
|
|
59
|
+
const chunks = await StreamUtil.readAll(completionStream);
|
|
60
|
+
const completion = ChatGptCompletionMessageUtil.merge(chunks);
|
|
61
|
+
//----
|
|
62
|
+
// PROCESS COMPLETION
|
|
63
|
+
//----
|
|
64
|
+
const prompts: AgenticaPrompt<Model>[] = [];
|
|
65
|
+
for (const choice of completion.choices) {
|
|
66
|
+
if (
|
|
67
|
+
choice.message.role === "assistant" &&
|
|
68
|
+
!!choice.message.content?.length
|
|
69
|
+
) {
|
|
70
|
+
// @TODO this logic should call the dispatch function
|
|
71
|
+
prompts.push(
|
|
72
|
+
new AgenticaTextPrompt({
|
|
73
|
+
role: "assistant",
|
|
74
|
+
text: choice.message.content,
|
|
75
|
+
}),
|
|
76
|
+
);
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
if (
|
|
80
|
+
completion.choices.some(
|
|
81
|
+
(c) =>
|
|
82
|
+
!!c.message.tool_calls?.some(
|
|
83
|
+
(tc) =>
|
|
84
|
+
tc.type === "function" && tc.function.name === FUNCTION.name,
|
|
85
|
+
),
|
|
86
|
+
)
|
|
87
|
+
)
|
|
88
|
+
await ctx.initialize();
|
|
89
|
+
return prompts;
|
|
90
|
+
};
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
const FUNCTION: ILlmFunction<"chatgpt"> = typia.llm.application<
|
|
94
|
+
__IChatInitialApplication,
|
|
95
|
+
"chatgpt"
|
|
96
|
+
>().functions[0]!;
|