@agentica/core 0.7.0-dev.20250224
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +464 -0
- package/lib/Agentica.d.ts +124 -0
- package/lib/Agentica.js +271 -0
- package/lib/Agentica.js.map +1 -0
- package/lib/chatgpt/ChatGptAgent.d.ts +6 -0
- package/lib/chatgpt/ChatGptAgent.js +59 -0
- package/lib/chatgpt/ChatGptAgent.js.map +1 -0
- package/lib/chatgpt/ChatGptCallFunctionAgent.d.ts +5 -0
- package/lib/chatgpt/ChatGptCallFunctionAgent.js +362 -0
- package/lib/chatgpt/ChatGptCallFunctionAgent.js.map +1 -0
- package/lib/chatgpt/ChatGptCancelFunctionAgent.d.ts +8 -0
- package/lib/chatgpt/ChatGptCancelFunctionAgent.js +359 -0
- package/lib/chatgpt/ChatGptCancelFunctionAgent.js.map +1 -0
- package/lib/chatgpt/ChatGptDescribeFunctionAgent.d.ts +5 -0
- package/lib/chatgpt/ChatGptDescribeFunctionAgent.js +56 -0
- package/lib/chatgpt/ChatGptDescribeFunctionAgent.js.map +1 -0
- package/lib/chatgpt/ChatGptHistoryDecoder.d.ts +5 -0
- package/lib/chatgpt/ChatGptHistoryDecoder.js +77 -0
- package/lib/chatgpt/ChatGptHistoryDecoder.js.map +1 -0
- package/lib/chatgpt/ChatGptInitializeFunctionAgent.d.ts +5 -0
- package/lib/chatgpt/ChatGptInitializeFunctionAgent.js +1883 -0
- package/lib/chatgpt/ChatGptInitializeFunctionAgent.js.map +1 -0
- package/lib/chatgpt/ChatGptSelectFunctionAgent.d.ts +5 -0
- package/lib/chatgpt/ChatGptSelectFunctionAgent.js +381 -0
- package/lib/chatgpt/ChatGptSelectFunctionAgent.js.map +1 -0
- package/lib/functional/createHttpLlmApplication.d.ts +33 -0
- package/lib/functional/createHttpLlmApplication.js +7766 -0
- package/lib/functional/createHttpLlmApplication.js.map +1 -0
- package/lib/index.d.ts +16 -0
- package/lib/index.js +35 -0
- package/lib/index.js.map +1 -0
- package/lib/index.mjs +10366 -0
- package/lib/index.mjs.map +1 -0
- package/lib/internal/AgenticaConstant.d.ts +4 -0
- package/lib/internal/AgenticaConstant.js +9 -0
- package/lib/internal/AgenticaConstant.js.map +1 -0
- package/lib/internal/AgenticaCostAggregator.d.ts +5 -0
- package/lib/internal/AgenticaCostAggregator.js +30 -0
- package/lib/internal/AgenticaCostAggregator.js.map +1 -0
- package/lib/internal/AgenticaDefaultPrompt.d.ts +4 -0
- package/lib/internal/AgenticaDefaultPrompt.js +32 -0
- package/lib/internal/AgenticaDefaultPrompt.js.map +1 -0
- package/lib/internal/AgenticaOperationComposer.d.ts +9 -0
- package/lib/internal/AgenticaOperationComposer.js +58 -0
- package/lib/internal/AgenticaOperationComposer.js.map +1 -0
- package/lib/internal/AgenticaPromptFactory.d.ts +6 -0
- package/lib/internal/AgenticaPromptFactory.js +9 -0
- package/lib/internal/AgenticaPromptFactory.js.map +1 -0
- package/lib/internal/AgenticaPromptTransformer.d.ts +9 -0
- package/lib/internal/AgenticaPromptTransformer.js +58 -0
- package/lib/internal/AgenticaPromptTransformer.js.map +1 -0
- package/lib/internal/AgenticaSystemPrompt.d.ts +8 -0
- package/lib/internal/AgenticaSystemPrompt.js +13 -0
- package/lib/internal/AgenticaSystemPrompt.js.map +1 -0
- package/lib/internal/MathUtil.d.ts +3 -0
- package/lib/internal/MathUtil.js +8 -0
- package/lib/internal/MathUtil.js.map +1 -0
- package/lib/internal/Singleton.d.ts +1 -0
- package/lib/internal/Singleton.js +23 -0
- package/lib/internal/Singleton.js.map +1 -0
- package/lib/internal/__map_take.d.ts +1 -0
- package/lib/internal/__map_take.js +16 -0
- package/lib/internal/__map_take.js.map +1 -0
- package/lib/structures/IAgenticaConfig.d.ts +112 -0
- package/lib/structures/IAgenticaConfig.js +3 -0
- package/lib/structures/IAgenticaConfig.js.map +1 -0
- package/lib/structures/IAgenticaContext.d.ts +106 -0
- package/lib/structures/IAgenticaContext.js +3 -0
- package/lib/structures/IAgenticaContext.js.map +1 -0
- package/lib/structures/IAgenticaController.d.ts +110 -0
- package/lib/structures/IAgenticaController.js +3 -0
- package/lib/structures/IAgenticaController.js.map +1 -0
- package/lib/structures/IAgenticaEvent.d.ts +191 -0
- package/lib/structures/IAgenticaEvent.js +3 -0
- package/lib/structures/IAgenticaEvent.js.map +1 -0
- package/lib/structures/IAgenticaExecutor.d.ts +144 -0
- package/lib/structures/IAgenticaExecutor.js +3 -0
- package/lib/structures/IAgenticaExecutor.js.map +1 -0
- package/lib/structures/IAgenticaOperation.d.ts +48 -0
- package/lib/structures/IAgenticaOperation.js +3 -0
- package/lib/structures/IAgenticaOperation.js.map +1 -0
- package/lib/structures/IAgenticaOperationCollection.d.ts +46 -0
- package/lib/structures/IAgenticaOperationCollection.js +3 -0
- package/lib/structures/IAgenticaOperationCollection.js.map +1 -0
- package/lib/structures/IAgenticaOperationSelection.d.ts +51 -0
- package/lib/structures/IAgenticaOperationSelection.js +3 -0
- package/lib/structures/IAgenticaOperationSelection.js.map +1 -0
- package/lib/structures/IAgenticaPrompt.d.ts +139 -0
- package/lib/structures/IAgenticaPrompt.js +3 -0
- package/lib/structures/IAgenticaPrompt.js.map +1 -0
- package/lib/structures/IAgenticaProps.d.ts +59 -0
- package/lib/structures/IAgenticaProps.js +3 -0
- package/lib/structures/IAgenticaProps.js.map +1 -0
- package/lib/structures/IAgenticaProvider.d.ts +41 -0
- package/lib/structures/IAgenticaProvider.js +3 -0
- package/lib/structures/IAgenticaProvider.js.map +1 -0
- package/lib/structures/IAgenticaSystemPrompt.d.ts +116 -0
- package/lib/structures/IAgenticaSystemPrompt.js +3 -0
- package/lib/structures/IAgenticaSystemPrompt.js.map +1 -0
- package/lib/structures/IAgenticaTokenUsage.d.ts +50 -0
- package/lib/structures/IAgenticaTokenUsage.js +3 -0
- package/lib/structures/IAgenticaTokenUsage.js.map +1 -0
- package/lib/structures/internal/__IChatCancelFunctionsApplication.d.ts +22 -0
- package/lib/structures/internal/__IChatCancelFunctionsApplication.js +3 -0
- package/lib/structures/internal/__IChatCancelFunctionsApplication.js.map +1 -0
- package/lib/structures/internal/__IChatFunctionReference.d.ts +20 -0
- package/lib/structures/internal/__IChatFunctionReference.js +3 -0
- package/lib/structures/internal/__IChatFunctionReference.js.map +1 -0
- package/lib/structures/internal/__IChatInitialApplication.d.ts +14 -0
- package/lib/structures/internal/__IChatInitialApplication.js +3 -0
- package/lib/structures/internal/__IChatInitialApplication.js.map +1 -0
- package/lib/structures/internal/__IChatSelectFunctionsApplication.d.ts +23 -0
- package/lib/structures/internal/__IChatSelectFunctionsApplication.js +3 -0
- package/lib/structures/internal/__IChatSelectFunctionsApplication.js.map +1 -0
- package/lib/typings/AgenticaSource.d.ts +1 -0
- package/lib/typings/AgenticaSource.js +3 -0
- package/lib/typings/AgenticaSource.js.map +1 -0
- package/package.json +74 -0
- package/prompts/cancel.md +5 -0
- package/prompts/common.md +3 -0
- package/prompts/describe.md +7 -0
- package/prompts/execute.md +7 -0
- package/prompts/initialize.md +3 -0
- package/prompts/select.md +7 -0
- package/src/Agentica.ts +322 -0
- package/src/chatgpt/ChatGptAgent.ts +71 -0
- package/src/chatgpt/ChatGptCallFunctionAgent.ts +445 -0
- package/src/chatgpt/ChatGptCancelFunctionAgent.ts +283 -0
- package/src/chatgpt/ChatGptDescribeFunctionAgent.ts +51 -0
- package/src/chatgpt/ChatGptHistoryDecoder.ts +86 -0
- package/src/chatgpt/ChatGptInitializeFunctionAgent.ts +88 -0
- package/src/chatgpt/ChatGptSelectFunctionAgent.ts +316 -0
- package/src/functional/createHttpLlmApplication.ts +63 -0
- package/src/index.ts +19 -0
- package/src/internal/AgenticaConstant.ts +4 -0
- package/src/internal/AgenticaCostAggregator.ts +35 -0
- package/src/internal/AgenticaDefaultPrompt.ts +39 -0
- package/src/internal/AgenticaOperationComposer.ts +82 -0
- package/src/internal/AgenticaPromptFactory.ts +30 -0
- package/src/internal/AgenticaPromptTransformer.ts +83 -0
- package/src/internal/AgenticaSystemPrompt.ts +14 -0
- package/src/internal/MathUtil.ts +3 -0
- package/src/internal/Singleton.ts +22 -0
- package/src/internal/__map_take.ts +15 -0
- package/src/structures/IAgenticaConfig.ts +121 -0
- package/src/structures/IAgenticaContext.ts +128 -0
- package/src/structures/IAgenticaController.ts +130 -0
- package/src/structures/IAgenticaEvent.ts +224 -0
- package/src/structures/IAgenticaExecutor.ts +152 -0
- package/src/structures/IAgenticaOperation.ts +64 -0
- package/src/structures/IAgenticaOperationCollection.ts +50 -0
- package/src/structures/IAgenticaOperationSelection.ts +69 -0
- package/src/structures/IAgenticaPrompt.ts +173 -0
- package/src/structures/IAgenticaProps.ts +64 -0
- package/src/structures/IAgenticaProvider.ts +45 -0
- package/src/structures/IAgenticaSystemPrompt.ts +122 -0
- package/src/structures/IAgenticaTokenUsage.ts +52 -0
- package/src/structures/internal/__IChatCancelFunctionsApplication.ts +23 -0
- package/src/structures/internal/__IChatFunctionReference.ts +21 -0
- package/src/structures/internal/__IChatInitialApplication.ts +15 -0
- package/src/structures/internal/__IChatSelectFunctionsApplication.ts +24 -0
- package/src/typings/AgenticaSource.ts +6 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Wrtn Technologies
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,464 @@
|
|
|
1
|
+
# `@agentica/core`
|
|
2
|
+

|
|
3
|
+
|
|
4
|
+
[](https://github.com/wrtnlabs/agentica/blob/master/LICENSE)
|
|
5
|
+
[](https://www.npmjs.com/package/@agentica/core)
|
|
6
|
+
[](https://www.npmjs.com/package/@agentica/core)
|
|
7
|
+
[](https://github.com/wrtnlabs/agentica/actions?query=workflow%3Abuild)
|
|
8
|
+
|
|
9
|
+
The simplest **Agentic AI Library**, specialized in **LLM Function Calling**.
|
|
10
|
+
|
|
11
|
+
`@agentica/core` is the simplest library specialized for LLM (Large Language Model) function calling. You can provide functions to call by *Swagger/OpenAPI* document or *TypeScript class type*, and it will make everything possible. *Super AI Chatbot* development, or *Multi Agent Orchestration*, all of them can be realized by the function calling.
|
|
12
|
+
|
|
13
|
+
For example, if you provide **Swagger document** of a Shopping Mall Server, `@agentica/core` will compose **Super AI Chatbot** application. In the chatbot application, customers can purchase products just by conversation texts. If you wanna automate the counseling or refunding process, you also can do it just by delivering the Swagger document.
|
|
14
|
+
|
|
15
|
+
Also, the LLM function calling strategy is effective for the **Multi-Agent Orchestration**, and it is easier to develop than any other way. You don't need to learn any complicate framework and its specific paradigms and patterns. Just connect them through class, and deliver the **TypeScript class type**. `@agentica/agentica` will centralize and realize the multi-agent orchestration through function calling.
|
|
16
|
+
|
|
17
|
+
> https://github.com/user-attachments/assets/01604b53-aca4-41cb-91aa-3faf63549ea6
|
|
18
|
+
>
|
|
19
|
+
> Demonstration video of Shopping AI Chatbot
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
## How to Use
|
|
25
|
+
### Setup
|
|
26
|
+
```bash
|
|
27
|
+
npm install @agentica/core @samchon/openapi typia
|
|
28
|
+
npx typia setup
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
Install not only `@agentica/core`, but also [`@samchon/openapi`](https://github.com/samchon/openapi) and [`typia`](https://github.com/samchon/typia).
|
|
32
|
+
|
|
33
|
+
`@samchon/openapi` is an OpenAPI specification library which can convert Swagger/OpenAPI document to LLM function calling schema. And `typia` is a transformer (compiler) library which can compose LLM function calling schema from a TypeScript class type.
|
|
34
|
+
|
|
35
|
+
By the way, as `typia` is a transformer library analyzing TypeScript source code in the compilation level, it needs additional setup command `npx typia setup`. Also, if you're not using non-standard TypeScript compiler (not `tsc`) or developing the agent in the frontend environment, you have to setup [`@ryoppippi/unplugin-typia`](https://typia.io/docs/setup/#unplugin-typia) too.
|
|
36
|
+
|
|
37
|
+
### Chat with Backend Server
|
|
38
|
+
```typescript
|
|
39
|
+
import { IHttpLlmApplication } from "@samchon/openapi";
|
|
40
|
+
import { Agentica, createHttpApplication } from "@agentica/core";
|
|
41
|
+
import OpenAI from "openai";
|
|
42
|
+
import { IValidation } from "typia";
|
|
43
|
+
|
|
44
|
+
const main = async (): Promise<void> => {
|
|
45
|
+
// LOAD SWAGGER DOCUMENT, AND CONVERT TO LLM APPLICATION SCHEMA
|
|
46
|
+
const application: IValidation<IHttpLlmApplication<"chatgpt">> =
|
|
47
|
+
createHttpApplication({
|
|
48
|
+
model: "chatgpt",
|
|
49
|
+
document: OpenApi.convert(
|
|
50
|
+
await fetch("https://shopping-be.wrtn.ai/editor/swagger.json").then(
|
|
51
|
+
(r) => r.json()
|
|
52
|
+
)
|
|
53
|
+
),
|
|
54
|
+
});
|
|
55
|
+
if (application.success === false) {
|
|
56
|
+
console.error(application.errors);
|
|
57
|
+
throw new Error("Type error on the target swagger document");
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
// CREATE AN AGENT WITH THE APPLICATION
|
|
61
|
+
const agent: Agentica = new Agentica({
|
|
62
|
+
provider: {
|
|
63
|
+
type: "chatgpt",
|
|
64
|
+
model: "gpt-4o-mini",
|
|
65
|
+
api: new OpenAI({
|
|
66
|
+
apiKey: "YOUR_OPENAI_API_KEY",
|
|
67
|
+
}),
|
|
68
|
+
},
|
|
69
|
+
controllers: [
|
|
70
|
+
{
|
|
71
|
+
protocol: "http",
|
|
72
|
+
name: "shopping",
|
|
73
|
+
application: application.data,
|
|
74
|
+
connection: {
|
|
75
|
+
host: "https://shopping-be.wrtn.ai",
|
|
76
|
+
},
|
|
77
|
+
},
|
|
78
|
+
],
|
|
79
|
+
config: {
|
|
80
|
+
locale: "en-US",
|
|
81
|
+
},
|
|
82
|
+
});
|
|
83
|
+
|
|
84
|
+
// ADD EVENT LISTENERS
|
|
85
|
+
agent.on("select", async (select) => {
|
|
86
|
+
console.log("selected function", select.operation.function.name);
|
|
87
|
+
});
|
|
88
|
+
agent.on("execute", async (execute) => {
|
|
89
|
+
consoe.log("execute function", {
|
|
90
|
+
function: execute.operation.function.name,
|
|
91
|
+
arguments: execute.arguments,
|
|
92
|
+
value: execute.value,
|
|
93
|
+
});
|
|
94
|
+
});
|
|
95
|
+
|
|
96
|
+
// CONVERSATE TO AI CHATBOT
|
|
97
|
+
await agent.conversate("What you can do?");
|
|
98
|
+
};
|
|
99
|
+
main().catch(console.error);
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
Just load your swagger document, and put it into the `@agentica/core`.
|
|
103
|
+
|
|
104
|
+
Then you can start conversation with your backend server, and the API functions of the backend server would be automatically called. AI chatbot will analyze your conversation texts, and executes proper API functions by the LLM (Large Language Model) function calling feature.
|
|
105
|
+
|
|
106
|
+
From now on, every backend developer is also an AI developer.
|
|
107
|
+
|
|
108
|
+
### Chat with TypeScript Class
|
|
109
|
+
```typescript
|
|
110
|
+
import { Agentica } from "@agentica/core";
|
|
111
|
+
import typia, { tags } from "typia";
|
|
112
|
+
import OpenAI from "openai";
|
|
113
|
+
|
|
114
|
+
class BbsArticleService {
|
|
115
|
+
/**
|
|
116
|
+
* Create a new article.
|
|
117
|
+
*
|
|
118
|
+
* Writes a new article and archives it into the DB.
|
|
119
|
+
*
|
|
120
|
+
* @param props Properties of create function
|
|
121
|
+
* @returns Newly created article
|
|
122
|
+
*/
|
|
123
|
+
public async create(props: {
|
|
124
|
+
/**
|
|
125
|
+
* Information of the article to create
|
|
126
|
+
*/
|
|
127
|
+
input: IBbsArticle.ICreate;
|
|
128
|
+
}): Promise<IBbsArticle>;
|
|
129
|
+
|
|
130
|
+
/**
|
|
131
|
+
* Update an article.
|
|
132
|
+
*
|
|
133
|
+
* Updates an article with new content.
|
|
134
|
+
*
|
|
135
|
+
* @param props Properties of update function
|
|
136
|
+
* @param input New content to update
|
|
137
|
+
*/
|
|
138
|
+
public async update(props: {
|
|
139
|
+
/**
|
|
140
|
+
* Target article's {@link IBbsArticle.id}.
|
|
141
|
+
*/
|
|
142
|
+
id: string & tags.Format<"uuid">;
|
|
143
|
+
|
|
144
|
+
/**
|
|
145
|
+
* New content to update.
|
|
146
|
+
*/
|
|
147
|
+
input: IBbsArticle.IUpdate;
|
|
148
|
+
}): Promise<void>;
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
const main = async (): Promise<void> => {
|
|
152
|
+
const api: OpenAI = new OpenAI({
|
|
153
|
+
apiKey: "YOUR_OPENAI_API_KEY",
|
|
154
|
+
});
|
|
155
|
+
const agent: Agentica = new Agentica({
|
|
156
|
+
provider: {
|
|
157
|
+
type: "chatgpt",
|
|
158
|
+
model: "gpt-4o-mini",
|
|
159
|
+
api: new OpenAI({
|
|
160
|
+
apiKey: "YOUR_OPENAI_API_KEY",
|
|
161
|
+
}),
|
|
162
|
+
},
|
|
163
|
+
controllers: [
|
|
164
|
+
{
|
|
165
|
+
protocol: "class",
|
|
166
|
+
name: "vectorStore",
|
|
167
|
+
application: typia.llm.applicationOfValidate<
|
|
168
|
+
BbsArticleService,
|
|
169
|
+
"chatgpt"
|
|
170
|
+
>(),
|
|
171
|
+
execute: new BbsArticleService(),
|
|
172
|
+
},
|
|
173
|
+
],
|
|
174
|
+
});
|
|
175
|
+
await agent.conversate("I wanna write an article.");
|
|
176
|
+
};
|
|
177
|
+
main().catch(console.error);
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
You also can chat with a TypeScript class.
|
|
181
|
+
|
|
182
|
+
Just deliver the TypeScript type to the `@agentica/core`, and start conversation. Then `@agentica/core` will call the proper class functions by analyzing your conversation texts with LLM function calling feature.
|
|
183
|
+
|
|
184
|
+
From now on, every TypeScript classes you've developed can be the AI chatbot.
|
|
185
|
+
|
|
186
|
+
### Multi Agent Orchestration
|
|
187
|
+
```typescript
|
|
188
|
+
import { Agentica } from "@agentica/core";
|
|
189
|
+
import typia from "typia";
|
|
190
|
+
import OpenAI from "openai";
|
|
191
|
+
|
|
192
|
+
class OpenAIVectorStoreAgent {
|
|
193
|
+
/**
|
|
194
|
+
* Retrieve Vector DB with RAG.
|
|
195
|
+
*
|
|
196
|
+
* @param props Properties of Vector DB retrievelance
|
|
197
|
+
*/
|
|
198
|
+
public query(props: {
|
|
199
|
+
/**
|
|
200
|
+
* Keywords to look up.
|
|
201
|
+
*
|
|
202
|
+
* Put all the keywords you want to look up. However, keywords
|
|
203
|
+
* should only be included in the core, and all ambiguous things
|
|
204
|
+
* should be excluded to achieve accurate results.
|
|
205
|
+
*/
|
|
206
|
+
keywords: string;
|
|
207
|
+
}): Promise<IVectorStoreQueryResult>;
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
const main = async (): Promise<void> => {
|
|
211
|
+
const api: OpenAI = new OpenAI({
|
|
212
|
+
apiKey: "YOUR_OPENAI_API_KEY",
|
|
213
|
+
});
|
|
214
|
+
const agent: Agentica = new Agentica({
|
|
215
|
+
provider: {
|
|
216
|
+
type: "chatgpt",
|
|
217
|
+
model: "gpt-4o-mini",
|
|
218
|
+
api: new OpenAI({
|
|
219
|
+
apiKey: "YOUR_OPENAI_API_KEY",
|
|
220
|
+
}),
|
|
221
|
+
},
|
|
222
|
+
controllers: [
|
|
223
|
+
{
|
|
224
|
+
protocol: "class",
|
|
225
|
+
name: "vectorStore",
|
|
226
|
+
application: typia.llm.applicationOfValidate<
|
|
227
|
+
OpenAIVectorStoreAgent,
|
|
228
|
+
"chatgpt"
|
|
229
|
+
>(),
|
|
230
|
+
execute: new OpenAIVectorStoreAgent({
|
|
231
|
+
api,
|
|
232
|
+
id: "YOUR_OPENAI_VECTOR_STORE_ID",
|
|
233
|
+
}),
|
|
234
|
+
},
|
|
235
|
+
],
|
|
236
|
+
});
|
|
237
|
+
await agent.conversate("I wanna research economic articles");
|
|
238
|
+
};
|
|
239
|
+
main().catch(console.error);
|
|
240
|
+
```
|
|
241
|
+
|
|
242
|
+
In the `@agentica/core`, you can implement multi-agent orchestration super easily.
|
|
243
|
+
|
|
244
|
+
Just develop a TypeScript class which contains agent feature like Vector Store, and just deliver the TypeScript class type to the `@agentica/core` like above. The `@agentica/core` will centralize and realize the multi-agent orchestration by LLM function calling strategy to the TypeScript class.
|
|
245
|
+
|
|
246
|
+
### WebSocket Communication
|
|
247
|
+
`@agentica/core` provides WebSocket interaction module.
|
|
248
|
+
|
|
249
|
+
The websocket interface module is following RPC (Remote Procedure Call) paradigm of the [TGrid](https://github.com/samchon/tgrid), so it is very easy to interact between frontend application and backend server of the AI agent.
|
|
250
|
+
|
|
251
|
+
```typescript
|
|
252
|
+
import {
|
|
253
|
+
IAgenticaRpcListener,
|
|
254
|
+
IAgenticaRpcService,
|
|
255
|
+
Agentica,
|
|
256
|
+
AgenticaRpcService,
|
|
257
|
+
} from "@agentica/core";
|
|
258
|
+
import { WebSocketServer } from "tgrid";
|
|
259
|
+
|
|
260
|
+
const server: WebSocketServer<
|
|
261
|
+
null,
|
|
262
|
+
IAgenticaRpcService,
|
|
263
|
+
IAgenticaRpcListener
|
|
264
|
+
> = new WebSocketServer();
|
|
265
|
+
await server.open(3001, async (acceptor) => {
|
|
266
|
+
await acceptor.accept(
|
|
267
|
+
new AgenticaRpcService({
|
|
268
|
+
agent: new Agentica({ ... }),
|
|
269
|
+
listener: acceptor.getDriver(),
|
|
270
|
+
}),
|
|
271
|
+
);
|
|
272
|
+
});
|
|
273
|
+
```
|
|
274
|
+
|
|
275
|
+
When developing backend server, wrap `Agentica` to `AgenticaRpcService`.
|
|
276
|
+
|
|
277
|
+
If you're developing WebSocket protocol backend server, create a new `Agentica` instance, and wrap it to the `AgenticaRpcService` class. And then open the websocket server like above code. The WebSocket server will call the client functions of the `IAgenticaRpcListener` remotely.
|
|
278
|
+
|
|
279
|
+
```typescript
|
|
280
|
+
import { IAgenticaRpcListener, IAgenticaRpcService } from "@agentica/core";
|
|
281
|
+
import { Driver, WebSocketConnector } from "tgrid";
|
|
282
|
+
|
|
283
|
+
const connector: WebSocketConnector<
|
|
284
|
+
null,
|
|
285
|
+
IAgenticaRpcListener,
|
|
286
|
+
IAgenticaRpcService
|
|
287
|
+
> = new WebSocketConnector(null, {
|
|
288
|
+
text: async (evt) => {
|
|
289
|
+
console.log(evt.role, evt.text);
|
|
290
|
+
},
|
|
291
|
+
describe: async (evt) => {
|
|
292
|
+
console.log("describer", evt.text);
|
|
293
|
+
},
|
|
294
|
+
});
|
|
295
|
+
await connector.connect("ws://localhost:3001");
|
|
296
|
+
|
|
297
|
+
const driver: Driver<IAgenticaRpcService> = connector.getDriver();
|
|
298
|
+
await driver.conversate("Hello, what you can do?");
|
|
299
|
+
```
|
|
300
|
+
|
|
301
|
+
When developing frontend application, define `IAgenticaRpcListener` instance.
|
|
302
|
+
|
|
303
|
+
Otherwise you're developing WebSocket protocol client application, connect to the websocket backend server with its URL address, and provide `IAgenticaRpcListener` instance for event listening.
|
|
304
|
+
|
|
305
|
+
And then call the backend server's function `IAgenticaRpcService.conversate()` remotely through the `Driver<IAgenticaRpcService>` wrapping. The backend server will call your `IAgenticaRpcListener` functions remotely through the RPC paradigm.
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
## Principles
|
|
310
|
+
### Agent Strategy
|
|
311
|
+
```mermaid
|
|
312
|
+
sequenceDiagram
|
|
313
|
+
actor User
|
|
314
|
+
actor Agent
|
|
315
|
+
participant Selector
|
|
316
|
+
participant Caller
|
|
317
|
+
participant Describer
|
|
318
|
+
activate User
|
|
319
|
+
User-->>Agent: Conversate:<br/>user says
|
|
320
|
+
activate Agent
|
|
321
|
+
Agent->>Selector: Deliver conversation text
|
|
322
|
+
activate Selector
|
|
323
|
+
deactivate User
|
|
324
|
+
Note over Selector: Select or remove candidate functions
|
|
325
|
+
alt No candidate
|
|
326
|
+
Selector->>Agent: Talk like plain ChatGPT
|
|
327
|
+
deactivate Selector
|
|
328
|
+
Agent->>User: Conversate:<br/>agent says
|
|
329
|
+
activate User
|
|
330
|
+
deactivate User
|
|
331
|
+
end
|
|
332
|
+
deactivate Agent
|
|
333
|
+
loop Candidate functions exist
|
|
334
|
+
activate Agent
|
|
335
|
+
Agent->>Caller: Deliver conversation text
|
|
336
|
+
activate Caller
|
|
337
|
+
alt Contexts are enough
|
|
338
|
+
Note over Caller: Call fulfilled functions
|
|
339
|
+
Caller->>Describer: Function call histories
|
|
340
|
+
deactivate Caller
|
|
341
|
+
activate Describer
|
|
342
|
+
Describer->>Agent: Describe function calls
|
|
343
|
+
deactivate Describer
|
|
344
|
+
Agent->>User: Conversate:<br/>agent describes
|
|
345
|
+
activate User
|
|
346
|
+
deactivate User
|
|
347
|
+
else Contexts are not enough
|
|
348
|
+
break
|
|
349
|
+
Caller->>Agent: Request more information
|
|
350
|
+
end
|
|
351
|
+
Agent->>User: Conversate:<br/>agent requests
|
|
352
|
+
activate User
|
|
353
|
+
deactivate User
|
|
354
|
+
end
|
|
355
|
+
deactivate Agent
|
|
356
|
+
end
|
|
357
|
+
```
|
|
358
|
+
|
|
359
|
+
When user says, `@agentica/core` delivers the conversation text to the `selector` agent, and let the `selector` agent to find (or cancel) candidate functions from the context. If the `selector` agent could not find any candidate function to call and there is not any candidate function previously selected either, the `selector` agent will work just like a plain ChatGPT.
|
|
360
|
+
|
|
361
|
+
And `@agentica/core` enters to a loop statement until the candidate functions to be empty. In the loop statement, `caller` agent tries to LLM function calling by analyzing the user's conversation text. If context is enough to compose arguments of candidate functions, the `caller` agent actually calls the target functions, and let `decriber` agent to explain the function calling results. Otherwise the context is not enough to compose arguments, `caller` agent requests more information to user.
|
|
362
|
+
|
|
363
|
+
Such LLM (Large Language Model) function calling strategy separating `selector`, `caller`, and `describer` is the key logic of `@agentica/core`.
|
|
364
|
+
|
|
365
|
+
### Validation Feedback
|
|
366
|
+
```typescript
|
|
367
|
+
import { FunctionCall } from "pseudo";
|
|
368
|
+
import { ILlmFunctionOfValidate, IValidation } from "typia";
|
|
369
|
+
|
|
370
|
+
export const correctFunctionCall = (p: {
|
|
371
|
+
call: FunctionCall;
|
|
372
|
+
functions: Array<ILlmFunctionOfValidate<"chatgpt">>;
|
|
373
|
+
retry: (reason: string, errors?: IValidation.IError[]) => Promise<unknown>;
|
|
374
|
+
}): Promise<unknown> => {
|
|
375
|
+
// FIND FUNCTION
|
|
376
|
+
const func: ILlmFunctionOfValidate<"chatgpt"> | undefined =
|
|
377
|
+
p.functions.find((f) => f.name === p.call.name);
|
|
378
|
+
if (func === undefined) {
|
|
379
|
+
// never happened in my experience
|
|
380
|
+
return p.retry(
|
|
381
|
+
"Unable to find the matched function name. Try it again.",
|
|
382
|
+
);
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
// VALIDATE
|
|
386
|
+
const result: IValidation<unknown> = func.validate(p.call.arguments);
|
|
387
|
+
if (result.success === false) {
|
|
388
|
+
// 1st trial: 50% (gpt-4o-mini in shopping mall chatbot)
|
|
389
|
+
// 2nd trial with validation feedback: 99%
|
|
390
|
+
// 3nd trial with validation feedback again: never have failed
|
|
391
|
+
return p.retry(
|
|
392
|
+
"Type errors are detected. Correct it through validation errors",
|
|
393
|
+
{
|
|
394
|
+
errors: result.errors,
|
|
395
|
+
},
|
|
396
|
+
);
|
|
397
|
+
}
|
|
398
|
+
return result.data;
|
|
399
|
+
}
|
|
400
|
+
```
|
|
401
|
+
|
|
402
|
+
Is LLM function calling perfect?
|
|
403
|
+
|
|
404
|
+
The answer is not, and LLM (Large Language Model) providers like OpenAI take a lot of type level mistakes when composing the arguments of the target function to call. Even though an LLM function calling schema has defined an `Array<string>` type, LLM often fills it just by a `string` typed value.
|
|
405
|
+
|
|
406
|
+
Therefore, when developing an LLM function calling agent, the validation feedback process is essentially required. If LLM takes a type level mistake on arguments composition, the agent must feedback the most detailed validation errors, and let the LLM to retry the function calling referencing the validation errors.
|
|
407
|
+
|
|
408
|
+
About the validation feedback, `@agentica/core` is utilizing [`typia.validate<T>()`](https://typia.io/docs/validators/validate) and [`typia.llm.applicationOfValidate<Class, Model>()`](https://typia.io/docs/llm/application/#applicationofvalidate) functions. They construct validation logic by analyzing TypeScript source codes and types in the compilation level, so that detailed and accurate than any other validators like below.
|
|
409
|
+
|
|
410
|
+
Such validation feedback strategy and combination with `typia` runtime validator, `@agentica/core` has achieved the most ideal LLM function calling. In my experience, when using OpenAI's `gpt-4o-mini` model, it tends to construct invalid function calling arguments at the first trial about 50% of the time. By the way, if correct it through validation feedback with `typia`, success rate soars to 99%. And I've never had a failure when trying validation feedback twice.
|
|
411
|
+
|
|
412
|
+
Components | `typia` | `TypeBox` | `ajv` | `io-ts` | `zod` | `C.V.`
|
|
413
|
+
-------------------------|--------|-----------|-------|---------|-------|------------------
|
|
414
|
+
**Easy to use** | ✅ | ❌ | ❌ | ❌ | ❌ | ❌
|
|
415
|
+
[Object (simple)](https://github.com/samchon/typia/blob/master/test/src/structures/ObjectSimple.ts) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔
|
|
416
|
+
[Object (hierarchical)](https://github.com/samchon/typia/blob/master/test/src/structures/ObjectHierarchical.ts) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔
|
|
417
|
+
[Object (recursive)](https://github.com/samchon/typia/blob/master/test/src/structures/ObjectRecursive.ts) | ✔ | ❌ | ✔ | ✔ | ✔ | ✔ | ✔
|
|
418
|
+
[Object (union, implicit)](https://github.com/samchon/typia/blob/master/test/src/structures/ObjectUnionImplicit.ts) | ✅ | ❌ | ❌ | ❌ | ❌ | ❌
|
|
419
|
+
[Object (union, explicit)](https://github.com/samchon/typia/blob/master/test/src/structures/ObjectUnionExplicit.ts) | ✔ | ✔ | ✔ | ✔ | ✔ | ❌
|
|
420
|
+
[Object (additional tags)](https://github.com/samchon/typia/#comment-tags) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔
|
|
421
|
+
[Object (template literal types)](https://github.com/samchon/typia/blob/master/test/src/structures/TemplateUnion.ts) | ✔ | ✔ | ✔ | ❌ | ❌ | ❌
|
|
422
|
+
[Object (dynamic properties)](https://github.com/samchon/typia/blob/master/test/src/structures/DynamicTemplate.ts) | ✔ | ✔ | ✔ | ❌ | ❌ | ❌
|
|
423
|
+
[Array (rest tuple)](https://github.com/samchon/typia/blob/master/test/src/structures/TupleRestAtomic.ts) | ✅ | ❌ | ❌ | ❌ | ❌ | ❌
|
|
424
|
+
[Array (hierarchical)](https://github.com/samchon/typia/blob/master/test/src/structures/ArrayHierarchical.ts) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔
|
|
425
|
+
[Array (recursive)](https://github.com/samchon/typia/blob/master/test/src/structures/ArrayRecursive.ts) | ✔ | ✔ | ✔ | ✔ | ✔ | ❌
|
|
426
|
+
[Array (recursive, union)](https://github.com/samchon/typia/blob/master/test/src/structures/ArrayRecursiveUnionExplicit.ts) | ✔ | ✔ | ❌ | ✔ | ✔ | ❌
|
|
427
|
+
[Array (R+U, implicit)](https://github.com/samchon/typia/blob/master/test/src/structures/ArrayRecursiveUnionImplicit.ts) | ✅ | ❌ | ❌ | ❌ | ❌ | ❌
|
|
428
|
+
[Array (repeated)](https://github.com/samchon/typia/blob/master/test/src/structures/ArrayRepeatedNullable.ts) | ✅ | ❌ | ❌ | ❌ | ❌ | ❌
|
|
429
|
+
[Array (repeated, union)](https://github.com/samchon/typia/blob/master/test/src/structures/ArrayRepeatedUnionWithTuple.ts) | ✅ | ❌ | ❌ | ❌ | ❌ | ❌
|
|
430
|
+
[**Ultimate Union Type**](https://github.com/samchon/typia/blob/master/test/src/structures/UltimateUnion.ts) | ✅ | ❌ | ❌ | ❌ | ❌ | ❌
|
|
431
|
+
|
|
432
|
+
> `C.V.` means `class-validator`
|
|
433
|
+
|
|
434
|
+
### OpenAPI Specification
|
|
435
|
+
```mermaid
|
|
436
|
+
flowchart
|
|
437
|
+
subgraph "OpenAPI Specification"
|
|
438
|
+
v20("Swagger v2.0") --upgrades--> emended[["OpenAPI v3.1 (emended)"]]
|
|
439
|
+
v30("OpenAPI v3.0") --upgrades--> emended
|
|
440
|
+
v31("OpenAPI v3.1") --emends--> emended
|
|
441
|
+
end
|
|
442
|
+
subgraph "OpenAPI Generator"
|
|
443
|
+
emended --normalizes--> migration[["Migration Schema"]]
|
|
444
|
+
migration --"Artificial Intelligence"--> lfc{{"LLM Function Calling"}}
|
|
445
|
+
lfc --"OpenAI"--> chatgpt("ChatGPT")
|
|
446
|
+
lfc --"Anthropic"--> claude("Claude")
|
|
447
|
+
lfc --"Google"--> gemini("Gemini")
|
|
448
|
+
lfc --"Meta"--> llama("Llama")
|
|
449
|
+
end
|
|
450
|
+
```
|
|
451
|
+
|
|
452
|
+
`@agentica/core` obtains LLM function calling schemas from both Swagger/OpenAPI documents and TypeScript class types. The TypeScript class type can be converted to LLM function calling schema by [`typia.llm.applicationOfValidate<Class, Model>()`](https://typia.io/docs/llm/application#applicationofvalidate) function. Then how about OpenAPI document? How Swagger document can be LLM function calling schema.
|
|
453
|
+
|
|
454
|
+
The secret is on the above diagram.
|
|
455
|
+
|
|
456
|
+
In the OpenAPI specification, there are three versions with different definitions. And even in the same version, there are too much ambiguous and duplicated expressions. To resolve these problems, [`@samchon/openapi`](https://github.com/samchon/openapi) is transforming every OpenAPI documents to v3.1 emended specification. The `@samchon/openapi`'s emended v3.1 specification has removed every ambiguous and duplicated expressions for clarity.
|
|
457
|
+
|
|
458
|
+
With the v3.1 emended OpenAPI document, `@samchon/openapi` converts it to a migration schema that is near to the function structure. And as the last step, the migration schema will be transformed to a specific LLM provider's function calling schema. LLM function calling schemas are composed like this way.
|
|
459
|
+
|
|
460
|
+
> **Why do not directly convert, but intermediate?**
|
|
461
|
+
>
|
|
462
|
+
> If directly convert from each version of OpenAPI specification to specific LLM's function calling schema, I have to make much more converters increased by cartesian product. In current models, number of converters would be 12 = 3 x 4.
|
|
463
|
+
>
|
|
464
|
+
> However, if define intermediate schema, number of converters are shrunk to plus operation. In current models, I just need to develop only (7 = 3 + 4) converters, and this is the reason why I've defined intermediate specification. This way is economic.
|
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
import { IAgenticaConfig } from "./structures/IAgenticaConfig";
|
|
2
|
+
import { IAgenticaController } from "./structures/IAgenticaController";
|
|
3
|
+
import { IAgenticaEvent } from "./structures/IAgenticaEvent";
|
|
4
|
+
import { IAgenticaOperation } from "./structures/IAgenticaOperation";
|
|
5
|
+
import { IAgenticaPrompt } from "./structures/IAgenticaPrompt";
|
|
6
|
+
import { IAgenticaProps } from "./structures/IAgenticaProps";
|
|
7
|
+
import { IAgenticaProvider } from "./structures/IAgenticaProvider";
|
|
8
|
+
import { IAgenticaTokenUsage } from "./structures/IAgenticaTokenUsage";
|
|
9
|
+
/**
|
|
10
|
+
* Nestia A.I. chatbot agent.
|
|
11
|
+
*
|
|
12
|
+
* `Agentica` is a facade class for the super A.I. chatbot agent
|
|
13
|
+
* which performs the {@link converstate user's conversation function}
|
|
14
|
+
* with LLM (Large Language Model) function calling and manages the
|
|
15
|
+
* {@link getPromptHistories prompt histories}.
|
|
16
|
+
*
|
|
17
|
+
* To understand and compose the `Agentica` class exactly, reference
|
|
18
|
+
* below types concentrating on the documentation comments please.
|
|
19
|
+
* Especially, you have to be careful about the {@link IAgenticaProps}
|
|
20
|
+
* type which is used in the {@link constructor} function.
|
|
21
|
+
*
|
|
22
|
+
* - Constructors
|
|
23
|
+
* - {@link IAgenticaProps}
|
|
24
|
+
* - {@link IAgenticaProvider}
|
|
25
|
+
* - {@link IAgenticaController}
|
|
26
|
+
* - {@link IAgenticaConfig}
|
|
27
|
+
* - {@link IAgenticaSystemPrompt}
|
|
28
|
+
* - Accessors
|
|
29
|
+
* - {@link IAgenticaOperation}
|
|
30
|
+
* - {@link IAgenticaPrompt}
|
|
31
|
+
* - {@link IAgenticaEvent}
|
|
32
|
+
* - {@link IAgenticaTokenUsage}
|
|
33
|
+
*
|
|
34
|
+
* @author Samchon
|
|
35
|
+
*/
|
|
36
|
+
export declare class Agentica {
|
|
37
|
+
private readonly props;
|
|
38
|
+
private readonly operations_;
|
|
39
|
+
private readonly stack_;
|
|
40
|
+
private readonly prompt_histories_;
|
|
41
|
+
private readonly listeners_;
|
|
42
|
+
private readonly token_usage_;
|
|
43
|
+
private ready_;
|
|
44
|
+
private readonly executor_;
|
|
45
|
+
/**
|
|
46
|
+
* Initializer constructor.
|
|
47
|
+
*
|
|
48
|
+
* @param props Properties to construct the agent
|
|
49
|
+
*/
|
|
50
|
+
constructor(props: IAgenticaProps);
|
|
51
|
+
/**
|
|
52
|
+
* Conversate with the A.I. chatbot.
|
|
53
|
+
*
|
|
54
|
+
* User talks to the A.I. chatbot with the content.
|
|
55
|
+
*
|
|
56
|
+
* When the user's conversation implies the A.I. chatbot to execute a
|
|
57
|
+
* function calling, the returned chat prompts will contain the
|
|
58
|
+
* function calling information like {@link IAgenticaPrompt.IExecute}.
|
|
59
|
+
*
|
|
60
|
+
* @param content The content to talk
|
|
61
|
+
* @returns List of newly created chat prompts
|
|
62
|
+
*/
|
|
63
|
+
conversate(content: string): Promise<IAgenticaPrompt[]>;
|
|
64
|
+
/**
|
|
65
|
+
* Get configuration.
|
|
66
|
+
*/
|
|
67
|
+
getConfig(): IAgenticaConfig | undefined;
|
|
68
|
+
/**
|
|
69
|
+
* Get LLM Provider.
|
|
70
|
+
*/
|
|
71
|
+
getProvider(): IAgenticaProvider;
|
|
72
|
+
/**
|
|
73
|
+
* Get controllers.
|
|
74
|
+
*
|
|
75
|
+
* Get list of controllers, which are the collection of functions that
|
|
76
|
+
* the "Super A.I. Chatbot" can execute.
|
|
77
|
+
*/
|
|
78
|
+
getControllers(): ReadonlyArray<IAgenticaController>;
|
|
79
|
+
/**
|
|
80
|
+
* Get operations.
|
|
81
|
+
*
|
|
82
|
+
* Get list of operations, which has capsuled the pair of controller
|
|
83
|
+
* and function from the {@link getControllers controllers}.
|
|
84
|
+
*
|
|
85
|
+
* @returns
|
|
86
|
+
*/
|
|
87
|
+
getOperations(): ReadonlyArray<IAgenticaOperation>;
|
|
88
|
+
/**
|
|
89
|
+
* Get the chatbot's prompt histories.
|
|
90
|
+
*
|
|
91
|
+
* Get list of chat prompts that the chatbot has been conversated.
|
|
92
|
+
*
|
|
93
|
+
* @returns List of chat prompts
|
|
94
|
+
*/
|
|
95
|
+
getPromptHistories(): IAgenticaPrompt[];
|
|
96
|
+
/**
|
|
97
|
+
* Get token usage of the A.I. chatbot.
|
|
98
|
+
*
|
|
99
|
+
* Entire token usage of the A.I. chatbot during the conversating
|
|
100
|
+
* with the user by {@link conversate} method callings.
|
|
101
|
+
*
|
|
102
|
+
* @returns Cost of the A.I. chatbot
|
|
103
|
+
*/
|
|
104
|
+
getTokenUsage(): IAgenticaTokenUsage;
|
|
105
|
+
/**
|
|
106
|
+
* Add an event listener.
|
|
107
|
+
*
|
|
108
|
+
* Add an event listener to be called whenever the event is emitted.
|
|
109
|
+
*
|
|
110
|
+
* @param type Type of event
|
|
111
|
+
* @param listener Callback function to be called whenever the event is emitted
|
|
112
|
+
*/
|
|
113
|
+
on<Type extends IAgenticaEvent.Type>(type: Type, listener: (event: IAgenticaEvent.Mapper[Type]) => void | Promise<void>): void;
|
|
114
|
+
/**
|
|
115
|
+
* Erase an event listener.
|
|
116
|
+
*
|
|
117
|
+
* Erase an event listener to stop calling the callback function.
|
|
118
|
+
*
|
|
119
|
+
* @param type Type of event
|
|
120
|
+
* @param listener Callback function to erase
|
|
121
|
+
*/
|
|
122
|
+
off<Type extends IAgenticaEvent.Type>(type: Type, listener: (event: IAgenticaEvent.Mapper[Type]) => void | Promise<void>): void;
|
|
123
|
+
private dispatch;
|
|
124
|
+
}
|