@agentica/core 0.20.0 → 0.22.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/README.md +115 -413
  2. package/lib/Agentica.d.ts +2 -1
  3. package/lib/Agentica.js +15 -13
  4. package/lib/Agentica.js.map +1 -1
  5. package/lib/MicroAgentica.d.ts +2 -1
  6. package/lib/MicroAgentica.js +16 -11
  7. package/lib/MicroAgentica.js.map +1 -1
  8. package/lib/context/AgenticaContext.d.ts +4 -4
  9. package/lib/context/MicroAgenticaContext.d.ts +2 -2
  10. package/lib/context/internal/AgenticaOperationComposer.js +1 -8
  11. package/lib/context/internal/AgenticaOperationComposer.js.map +1 -1
  12. package/lib/events/AgenticaEvent.d.ts +3 -1
  13. package/lib/events/AgenticaTextEvent.d.ts +2 -2
  14. package/lib/events/AgenticaUserInputEvent.d.ts +10 -0
  15. package/lib/events/AgenticaUserInputEvent.js +3 -0
  16. package/lib/events/AgenticaUserInputEvent.js.map +1 -0
  17. package/lib/events/MicroAgenticaEvent.d.ts +3 -1
  18. package/lib/factory/events.d.ts +7 -3
  19. package/lib/factory/events.js +29 -4
  20. package/lib/factory/events.js.map +1 -1
  21. package/lib/factory/histories.d.ts +6 -3
  22. package/lib/factory/histories.js +59 -32
  23. package/lib/factory/histories.js.map +1 -1
  24. package/lib/functional/assertMcpController.js +0 -2
  25. package/lib/functional/assertMcpController.js.map +1 -1
  26. package/lib/histories/AgenticaHistory.d.ts +3 -1
  27. package/lib/histories/AgenticaTextHistory.d.ts +2 -2
  28. package/lib/histories/AgenticaUserInputHistory.d.ts +80 -0
  29. package/lib/histories/AgenticaUserInputHistory.js +3 -0
  30. package/lib/histories/AgenticaUserInputHistory.js.map +1 -0
  31. package/lib/histories/MicroAgenticaHistory.d.ts +2 -1
  32. package/lib/index.mjs +138 -105
  33. package/lib/index.mjs.map +1 -1
  34. package/lib/json/IAgenticaEventJson.d.ts +8 -1
  35. package/lib/json/IAgenticaHistoryJson.d.ts +15 -3
  36. package/lib/orchestrate/call.js +3 -17
  37. package/lib/orchestrate/call.js.map +1 -1
  38. package/lib/orchestrate/cancel.js +1 -1
  39. package/lib/orchestrate/cancel.js.map +1 -1
  40. package/lib/orchestrate/execute.js +13 -7
  41. package/lib/orchestrate/execute.js.map +1 -1
  42. package/lib/orchestrate/initialize.js +2 -6
  43. package/lib/orchestrate/initialize.js.map +1 -1
  44. package/lib/orchestrate/select.js +2 -6
  45. package/lib/orchestrate/select.js.map +1 -1
  46. package/lib/structures/IAgenticaExecutor.d.ts +10 -6
  47. package/lib/structures/IMicroAgenticaExecutor.d.ts +4 -1
  48. package/lib/transformers/AgenticaEventTransformer.js +0 -1
  49. package/lib/transformers/AgenticaEventTransformer.js.map +1 -1
  50. package/package.json +2 -2
  51. package/src/Agentica.ts +21 -18
  52. package/src/MicroAgentica.ts +20 -16
  53. package/src/context/AgenticaContext.ts +4 -4
  54. package/src/context/MicroAgenticaContext.ts +2 -2
  55. package/src/context/internal/AgenticaOperationComposer.ts +5 -6
  56. package/src/events/AgenticaEvent.ts +4 -1
  57. package/src/events/AgenticaTextEvent.ts +2 -4
  58. package/src/events/AgenticaUserInputEvent.ts +12 -0
  59. package/src/events/MicroAgenticaEvent.ts +4 -1
  60. package/src/factory/events.ts +26 -8
  61. package/src/factory/histories.ts +76 -43
  62. package/src/functional/assertMcpController.ts +1 -2
  63. package/src/histories/AgenticaHistory.ts +4 -1
  64. package/src/histories/AgenticaTextHistory.ts +2 -4
  65. package/src/histories/AgenticaUserInputHistory.ts +88 -0
  66. package/src/histories/MicroAgenticaHistory.ts +3 -1
  67. package/src/json/IAgenticaEventJson.ts +9 -1
  68. package/src/json/IAgenticaHistoryJson.ts +16 -4
  69. package/src/orchestrate/call.ts +15 -17
  70. package/src/orchestrate/cancel.ts +1 -1
  71. package/src/orchestrate/execute.ts +13 -7
  72. package/src/orchestrate/initialize.ts +2 -6
  73. package/src/orchestrate/select.ts +2 -7
  74. package/src/structures/IAgenticaExecutor.ts +16 -8
  75. package/src/structures/IMicroAgenticaExecutor.ts +10 -4
  76. package/src/transformers/AgenticaEventTransformer.ts +0 -1
package/README.md CHANGED
@@ -1,463 +1,165 @@
1
- # `@agentica/core`
1
+ # Agentica, AI Function Calling Framework
2
2
 
3
- ![agentica-conceptual-diagram](https://github.com/user-attachments/assets/d7ebbd1f-04d3-4b0d-9e2a-234e29dd6c57)
3
+ <!-- https://github.com/user-attachments/assets/5326cc59-5129-470d-abcb-c3f458b5c488 -->
4
4
 
5
- [![GitHub license](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/wrtnlabs/agentica/blob/master/LICENSE)
6
- [![npm version](https://img.shields.io/npm/v/@agentica/core.svg)](https://www.npmjs.com/package/@agentica/core)
7
- [![Downloads](https://img.shields.io/npm/dm/@agentica/core.svg)](https://www.npmjs.com/package/@agentica/core)
8
- [![Build Status](https://github.com/wrtnlabs/agentica/workflows/build/badge.svg)](https://github.com/wrtnlabs/agentica/actions?query=workflow%3Abuild)
9
-
10
- The simplest **Agentic AI** library, specialized in **LLM Function Calling**.
11
-
12
- Don't compose complicate agent graph or workflow, but just deliver **Swagger/OpenAPI** documents or **TypeScript class** types linearly to the `@agentica/core`. Then `@agentica/core` will do everything with the function calling.
13
-
14
- Look at the below demonstration, and feel how `@agentica/core` is easy and powerful.
15
-
16
- ```typescript
17
- import { Agentica } from "@agentica/core";
18
- import typia from "typia";
19
-
20
- const agent = new Agentica({
21
- controllers: [
22
- await fetch(
23
- "https://shopping-be.wrtn.ai/editor/swagger.json",
24
- ).then(r => r.json()),
25
- typia.llm.application<ShoppingCounselor>(),
26
- typia.llm.application<ShoppingPolicy>(),
27
- typia.llm.application<ShoppingSearchRag>(),
28
- ],
29
- });
30
- await agent.conversate("I wanna buy MacBook Pro");
31
- ```
32
-
33
- > https://github.com/user-attachments/assets/01604b53-aca4-41cb-91aa-3faf63549ea6
34
- >
35
- > Demonstration video of Shopping AI Chatbot
36
-
37
- ## How to Use
38
-
39
- ### Setup
40
-
41
- ```bash
42
- npm install @agentica/core @samchon/openapi typia
43
- npx typia setup
44
- ```
45
-
46
- Install not only `@agentica/core`, but also [`@samchon/openapi`](https://github.com/samchon/openapi) and [`typia`](https://github.com/samchon/typia).
47
-
48
- `@samchon/openapi` is an OpenAPI specification library which can convert Swagger/OpenAPI document to LLM function calling schema. And `typia` is a transformer (compiler) library which can compose LLM function calling schema from a TypeScript class type.
49
-
50
- By the way, as `typia` is a transformer library analyzing TypeScript source code in the compilation level, it needs additional setup command `npx typia setup`. Also, if you're not using non-standard TypeScript compiler (not `tsc`) or developing the agent in the frontend environment, you have to setup [`@ryoppippi/unplugin-typia`](https://typia.io/docs/setup/#unplugin-typia) too.
51
-
52
- ### Chat with Backend Server
53
-
54
- ```typescript
55
- import { Agentica, validateHttpLlmApplication } from "@agentica/core";
56
- import { IHttpLlmApplication } from "@samchon/openapi";
57
- import OpenAI from "openai";
58
- import { IValidation } from "typia";
59
-
60
- async function main(): Promise<void> {
61
- // LOAD SWAGGER DOCUMENT, AND CONVERT TO LLM APPLICATION SCHEMA
62
- const application: IValidation<IHttpLlmApplication<"chatgpt">>
63
- = validateHttpLlmApplication({
64
- model: "chatgpt",
65
- document: await fetch("https://shopping-be.wrtn.ai/editor/swagger.json").then(
66
- r => r.json()
67
- ),
68
- });
69
- if (application.success === false) {
70
- console.error(application.errors);
71
- throw new Error("Type error on the target swagger document");
72
- }
73
-
74
- // CREATE AN AGENT WITH THE APPLICATION
75
- const agent: Agentica<"chatgpt"> = new Agentica({
76
- model: "chatgpt",
77
- vendor: {
78
- api: new OpenAI({
79
- apiKey: "YOUR_OPENAI_API_KEY",
80
- }),
81
- model: "gpt-4o-mini",
82
- },
83
- controllers: [
84
- {
85
- protocol: "http",
86
- name: "shopping",
87
- application: application.data,
88
- connection: {
89
- host: "https://shopping-be.wrtn.ai",
90
- },
91
- },
92
- ],
93
- config: {
94
- locale: "en-US",
95
- },
96
- });
97
-
98
- // ADD EVENT LISTENERS
99
- agent.on("select", async (select) => {
100
- console.log("selected function", select.operation.function.name);
101
- });
102
- agent.on("execute", async (execute) => {
103
- consoe.log("execute function", {
104
- function: execute.operation.function.name,
105
- arguments: execute.arguments,
106
- value: execute.value,
107
- });
108
- });
109
-
110
- // CONVERSATE TO AI CHATBOT
111
- await agent.conversate("What you can do?");
112
- }
113
- main().catch(console.error);
114
- ```
115
-
116
- Just load your swagger document, and put it into the `@agentica/core`.
5
+ ![Logo](https://wrtnlabs.io/agentica/og.jpg?refresh)
117
6
 
118
- Then you can start conversation with your backend server, and the API functions of the backend server would be automatically called. AI chatbot will analyze your conversation texts, and executes proper API functions by the LLM (Large Language Model) function calling feature.
7
+ [![GitHub License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/wrtnlabs/agentica/blob/master/LICENSE)
8
+ [![NPM Version](https://img.shields.io/npm/v/@agentica/core.svg)](https://www.npmjs.com/package/@agentica/core)
9
+ [![NPM Downloads](https://img.shields.io/npm/dm/@agentica/core.svg)](https://www.npmjs.com/package/@agentica/core)
10
+ [![Build Status](https://github.com/wrtnlabs/agentica/workflows/build/badge.svg)](https://github.com/wrtnlabs/agentica/actions?query=workflow%3Abuild)
11
+ [![Guide Documents](https://img.shields.io/badge/Guide-Documents-forestgreen)](https://wrtnlabs.io/agentica/)
12
+ [![Discord Badge](https://dcbadge.limes.pink/api/server/https://discord.gg/aMhRmzkqCx?style=flat)](https://discord.gg/aMhRmzkqCx)
119
13
 
120
- From now on, every backend developer is also an AI developer.
14
+ Agentic AI framework specialized in AI Function Calling.
121
15
 
122
- ### Chat with TypeScript Class
16
+ Don't be afraid of AI agent development. Just list functions from three protocols below. This is everything you should do for AI agent development.
123
17
 
124
- ```typescript
125
- import { Agentica } from "@agentica/core";
126
- import OpenAI from "openai";
127
- import typia, { tags } from "typia";
128
-
129
- class BbsArticleService {
130
- /**
131
- * Create a new article.
132
- *
133
- * Writes a new article and archives it into the DB.
134
- *
135
- * @param props Properties of create function
136
- * @returns Newly created article
137
- */
138
- public async create(props: {
139
- /**
140
- * Information of the article to create
141
- */
142
- input: IBbsArticle.ICreate;
143
- }): Promise<IBbsArticle>;
144
-
145
- /**
146
- * Update an article.
147
- *
148
- * Updates an article with new content.
149
- *
150
- * @param props Properties of update function
151
- * @param input New content to update
152
- */
153
- public async update(props: {
154
- /**
155
- * Target article's {@link IBbsArticle.id}.
156
- */
157
- id: string & tags.Format<"uuid">;
158
-
159
- /**
160
- * New content to update.
161
- */
162
- input: IBbsArticle.IUpdate;
163
- }): Promise<void>;
164
- }
165
-
166
- async function main(): Promise<void> {
167
- const api: OpenAI = new OpenAI({
168
- apiKey: "YOUR_OPENAI_API_KEY",
169
- });
170
- const agent: Agentica<"chatgpt"> = new Agentica({
171
- model: "chatgpt",
172
- vendor: {
173
- api: new OpenAI({
174
- apiKey: "YOUR_OPENAI_API_KEY",
175
- }),
176
- model: "gpt-4o-mini",
177
- },
178
- controllers: [
179
- {
180
- protocol: "class",
181
- name: "vectorStore",
182
- application: typia.llm.application<
183
- BbsArticleService,
184
- "chatgpt"
185
- >(),
186
- execute: new BbsArticleService(),
187
- },
188
- ],
189
- });
190
- await agent.conversate("I wanna write an article.");
191
- }
192
- main().catch(console.error);
193
- ```
18
+ - TypeScript Class
19
+ - Swagger/OpenAPI Document
20
+ - MCP (Model Context Protocol) Server
194
21
 
195
- You also can chat with a TypeScript class.
22
+ Wanna make an e-commerce agent? Bring in e-commerce functions. Need a newspaper agent? Get API functions from the newspaper company. Just prepare any functions that you need, then it becomes an AI agent.
196
23
 
197
- Just deliver the TypeScript type to the `@agentica/core`, and start conversation. Then `@agentica/core` will call the proper class functions by analyzing your conversation texts with LLM function calling feature.
24
+ Are you a TypeScript developer? Then you're already an AI developer. Familiar with backend development? You're already well-versed in AI development. Anyone who can make functions can make AI agents.
198
25
 
199
- From now on, every TypeScript classes you've developed can be the AI chatbot.
200
-
201
- ### Multi Agent Orchestration
26
+ <!-- eslint-skip -->
202
27
 
203
28
  ```typescript
204
- import { Agentica } from "@agentica/core";
29
+ import { Agentica, assertHttpLlmApplication } from "@agentica/core";
205
30
  import OpenAI from "openai";
206
31
  import typia from "typia";
207
32
 
208
- class OpenAIVectorStoreAgent {
209
- /**
210
- * Retrieve Vector DB with RAG.
211
- *
212
- * @param props Properties of Vector DB retrievelance
213
- */
214
- public query(props: {
215
- /**
216
- * Keywords to look up.
217
- *
218
- * Put all the keywords you want to look up. However, keywords
219
- * should only be included in the core, and all ambiguous things
220
- * should be excluded to achieve accurate results.
221
- */
222
- keywords: string;
223
- }): Promise<IVectorStoreQueryResult>;
224
- }
225
-
226
- async function main(): Promise<void> {
227
- const api: OpenAI = new OpenAI({
228
- apiKey: "YOUR_OPENAI_API_KEY",
229
- });
230
- const agent: Agentica<"chatgpt"> = new Agentica({
231
- model: "chatgpt",
232
- context: {
233
- api: new OpenAI({
234
- apiKey: "YOUR_OPENAI_API_KEY",
235
- }),
236
- model: "gpt-4o-mini",
237
- },
238
- controllers: [
239
- {
240
- protocol: "class",
241
- name: "vectorStore",
242
- application: typia.llm.application<
243
- OpenAIVectorStoreAgent,
244
- "chatgpt"
245
- >(),
246
- execute: new OpenAIVectorStoreAgent({
247
- api,
248
- id: "YOUR_OPENAI_VECTOR_STORE_ID",
249
- }),
250
- },
251
- ],
252
- });
253
- await agent.conversate("I wanna research economic articles");
254
- }
255
- main().catch(console.error);
256
- ```
257
-
258
- In the `@agentica/core`, you can implement multi-agent orchestration super easily.
259
-
260
- Just develop a TypeScript class which contains agent feature like Vector Store, and just deliver the TypeScript class type to the `@agentica/core` like above. The `@agentica/core` will centralize and realize the multi-agent orchestration by LLM function calling strategy to the TypeScript class.
261
-
262
- ### If you want drastically improves function selection speed
263
-
264
- Use the [@agentica/pg-vector-selector](../pg-vector-selector/README.md)
265
-
266
- Just initialize and set the config
267
- when use this adapter, you should run the [connector-hive](https://github.com/wrtnlabs/connector-hive)
268
-
269
- ```typescript
270
- import { Agentica } from "@agentica/core";
271
- import { AgenticaPgVectorSelector } from "@agentica/pg-vector-selector";
272
- import typia from "typia";
273
-
274
- // Initialize with connector-hive server
275
- const selectorExecute = AgenticaPgVectorSelector.boot<"chatgpt">(
276
- "https://your-connector-hive-server.com"
277
- );
33
+ import { MobileFileSystem } from "./services/MobileFileSystem";
278
34
 
279
35
  const agent = new Agentica({
280
- model: "chatgpt",
281
36
  vendor: {
37
+ api: new OpenAI({ apiKey: "********" }),
282
38
  model: "gpt-4o-mini",
283
- api: new OpenAI({
284
- apiKey: process.env.CHATGPT_API_KEY,
285
- }),
286
39
  },
287
40
  controllers: [
288
- await fetch(
289
- "https://shopping-be.wrtn.ai/editor/swagger.json",
290
- ).then(r => r.json()),
291
- typia.llm.application<ShoppingCounselor>(),
292
- typia.llm.application<ShoppingPolicy>(),
293
- typia.llm.application<ShoppingSearchRag>(),
41
+ // functions from TypeScript class
42
+ {
43
+ protocol: "http",
44
+ application: typia.llm.application<MobileFileSystem, "chatgpt">(),
45
+ execute: new MobileFileSystem(),
46
+ },
47
+ // functions from Swagger/OpenAPI
48
+ {
49
+ protocol: "http",
50
+ application: assertHttpLlmApplication({
51
+ model: "chatgpt",
52
+ document: await fetch(
53
+ "https://shopping-be.wrtn.ai/editor/swagger.json",
54
+ ).then(r => r.json()),
55
+ }),
56
+ connection: {
57
+ host: "https://shopping-be.wrtn.ai",
58
+ headers: { Authorization: "Bearer ********" },
59
+ },
60
+ },
294
61
  ],
295
- config: {
296
- executor: {
297
- select: selectorExecute,
298
- }
299
- }
300
62
  });
301
63
  await agent.conversate("I wanna buy MacBook Pro");
302
64
  ```
303
65
 
304
- ## Principles
66
+ ## 📦 Setup
305
67
 
306
- ### Agent Strategy
307
-
308
- ```mermaid
309
- sequenceDiagram
310
- actor User
311
- actor Agent
312
- participant Selector
313
- participant Caller
314
- participant Describer
315
- activate User
316
- User-->>Agent: Conversate:<br/>user says
317
- activate Agent
318
- Agent->>Selector: Deliver conversation text
319
- activate Selector
320
- deactivate User
321
- Note over Selector: Select or remove candidate functions
322
- alt No candidate
323
- Selector->>Agent: Talk like plain ChatGPT
324
- deactivate Selector
325
- Agent->>User: Conversate:<br/>agent says
326
- activate User
327
- deactivate User
328
- end
329
- deactivate Agent
330
- loop Candidate functions exist
331
- activate Agent
332
- Agent->>Caller: Deliver conversation text
333
- activate Caller
334
- alt Contexts are enough
335
- Note over Caller: Call fulfilled functions
336
- Caller->>Describer: Function call histories
337
- deactivate Caller
338
- activate Describer
339
- Describer->>Agent: Describe function calls
340
- deactivate Describer
341
- Agent->>User: Conversate:<br/>agent describes
342
- activate User
343
- deactivate User
344
- else Contexts are not enough
345
- break
346
- Caller->>Agent: Request more information
347
- end
348
- Agent->>User: Conversate:<br/>agent requests
349
- activate User
350
- deactivate User
351
- end
352
- deactivate Agent
353
- end
68
+ ```bash
69
+ $ npx agentica start <directory>
70
+
71
+ ----------------------------------------
72
+ Agentica Setup Wizard
73
+ ----------------------------------------
74
+ ? Package Manager (use arrow keys)
75
+ > npm
76
+ pnpm
77
+ yarn (berry is not supported)
78
+ ? Project Type
79
+ NodeJS Agent Server
80
+ > NestJS Agent Server
81
+ React Client Application
82
+ Standalone Application
83
+ ? Embedded Controllers (multi-selectable)
84
+ (none)
85
+ Google Calendar
86
+ Google News
87
+ > Github
88
+ Reddit
89
+ Slack
90
+ ...
354
91
  ```
355
92
 
356
- When user says, `@agentica/core` delivers the conversation text to the `selector` agent, and let the `selector` agent to find (or cancel) candidate functions from the context. If the `selector` agent could not find any candidate function to call and there is not any candidate function previously selected either, the `selector` agent will work just like a plain ChatGPT.
93
+ The setup wizard helps you create a new project tailored to your needs.
357
94
 
358
- And `@agentica/core` enters to a loop statement until the candidate functions to be empty. In the loop statement, `caller` agent tries to LLM function calling by analyzing the user's conversation text. If context is enough to compose arguments of candidate functions, the `caller` agent actually calls the target functions, and let `decriber` agent to explain the function calling results. Otherwise the context is not enough to compose arguments, `caller` agent requests more information to user.
95
+ For reference, when selecting a project type, any option other than "Standalone Application" will implement the [WebSocket Protocol](https://wrtnlabs.io/agentica/docs/websocket/) for client-server communication.
359
96
 
360
- Such LLM (Large Language Model) function calling strategy separating `selector`, `caller`, and `describer` is the key logic of `@agentica/core`.
97
+ For comprehensive setup instructions, visit our [Getting Started](https://wrtnlabs.io/agentica/docs/) guide.
361
98
 
362
- ### Validation Feedback
363
-
364
- ```typescript
365
- import { FunctionCall } from "pseudo";
366
- import { ILlmFunction, IValidation } from "typia";
367
-
368
- export function correctFunctionCall(p: {
369
- call: FunctionCall;
370
- functions: Array<ILlmFunction<"chatgpt">>;
371
- retry: (reason: string, errors?: IValidation.IError[]) => Promise<unknown>;
372
- }): Promise<unknown> {
373
- // FIND FUNCTION
374
- const func: ILlmFunction<"chatgpt"> | undefined
375
- = p.functions.find(f => f.name === p.call.name);
376
- if (func === undefined) {
377
- // never happened in my experience
378
- return p.retry(
379
- "Unable to find the matched function name. Try it again.",
380
- );
381
- }
382
-
383
- // VALIDATE
384
- const result: IValidation<unknown> = func.validate(p.call.arguments);
385
- if (result.success === false) {
386
- // 1st trial: 50% (gpt-4o-mini in shopping mall chatbot)
387
- // 2nd trial with validation feedback: 99%
388
- // 3nd trial with validation feedback again: never have failed
389
- return p.retry(
390
- "Type errors are detected. Correct it through validation errors",
391
- {
392
- errors: result.errors,
393
- },
394
- );
395
- }
396
- return result.data;
397
- }
398
- ```
99
+ ## 💻 Playground
399
100
 
400
- Is LLM function calling perfect?
101
+ Experience Agentica firsthand through our [interactive playground](https://wrtnlabs.io/agentica/playground) before installing.
401
102
 
402
- The answer is not, and LLM (Large Language Model) vendors like OpenAI take a lot of type level mistakes when composing the arguments of the target function to call. Even though an LLM function calling schema has defined an `Array<string>` type, LLM often fills it just by a `string` typed value.
103
+ Our demonstrations showcase the power and simplicity of Agentica's function calling capabilities across different integration methods.
403
104
 
404
- Therefore, when developing an LLM function calling agent, the validation feedback process is essentially required. If LLM takes a type level mistake on arguments composition, the agent must feedback the most detailed validation errors, and let the LLM to retry the function calling referencing the validation errors.
105
+ - [TypeScript Class](https://wrtnlabs.io/agentica/playground/bbs)
106
+ - [Swagger/OpenAPI Document](https://wrtnlabs.io/agentica/playground/swagger)
107
+ - [Enterprise E-commerce Agent](https://wrtnlabs.io/agentica/playground/shopping)
405
108
 
406
- About the validation feedback, `@agentica/core` is utilizing [`typia.validate<T>()`](https://typia.io/docs/validators/validate) and [`typia.llm.application<Class, Model>()`](https://typia.io/docs/llm/application/#application) functions. They construct validation logic by analyzing TypeScript source codes and types in the compilation level, so that detailed and accurate than any other validators like below.
109
+ <!--
110
+ @todo this section would be changed after making tutorial playground
111
+ -->
407
112
 
408
- Such validation feedback strategy and combination with `typia` runtime validator, `@agentica/core` has achieved the most ideal LLM function calling. In my experience, when using OpenAI's `gpt-4o-mini` model, it tends to construct invalid function calling arguments at the first trial about 50% of the time. By the way, if correct it through validation feedback with `typia`, success rate soars to 99%. And I've never had a failure when trying validation feedback twice.
113
+ ## 📚 Documentation Resources
409
114
 
410
- | Components | `typia` | `TypeBox` | `ajv` | `io-ts` | `zod` | `C.V.` |
411
- | --------------------------------------------------------------------------------------------------------------------------- | ------- | --------- | ----- | ------- | ----- | ------ | --- |
412
- | **Easy to use** | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
413
- | [Object (simple)](https://github.com/samchon/typia/blob/master/test/src/structures/ObjectSimple.ts) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
414
- | [Object (hierarchical)](https://github.com/samchon/typia/blob/master/test/src/structures/ObjectHierarchical.ts) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
415
- | [Object (recursive)](https://github.com/samchon/typia/blob/master/test/src/structures/ObjectRecursive.ts) | ✔ | ❌ | ✔ | ✔ | ✔ | ✔ | ✔ |
416
- | [Object (union, implicit)](https://github.com/samchon/typia/blob/master/test/src/structures/ObjectUnionImplicit.ts) | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
417
- | [Object (union, explicit)](https://github.com/samchon/typia/blob/master/test/src/structures/ObjectUnionExplicit.ts) | ✔ | ✔ | ✔ | ✔ | ✔ | ❌ |
418
- | [Object (additional tags)](https://github.com/samchon/typia/#comment-tags) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
419
- | [Object (template literal types)](https://github.com/samchon/typia/blob/master/test/src/structures/TemplateUnion.ts) | ✔ | ✔ | ✔ | ❌ | ❌ | ❌ |
420
- | [Object (dynamic properties)](https://github.com/samchon/typia/blob/master/test/src/structures/DynamicTemplate.ts) | ✔ | ✔ | ✔ | ❌ | ❌ | ❌ |
421
- | [Array (rest tuple)](https://github.com/samchon/typia/blob/master/test/src/structures/TupleRestAtomic.ts) | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
422
- | [Array (hierarchical)](https://github.com/samchon/typia/blob/master/test/src/structures/ArrayHierarchical.ts) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
423
- | [Array (recursive)](https://github.com/samchon/typia/blob/master/test/src/structures/ArrayRecursive.ts) | ✔ | ✔ | ✔ | ✔ | ✔ | ❌ |
424
- | [Array (recursive, union)](https://github.com/samchon/typia/blob/master/test/src/structures/ArrayRecursiveUnionExplicit.ts) | ✔ | ✔ | ❌ | ✔ | ✔ | ❌ |
425
- | [Array (R+U, implicit)](https://github.com/samchon/typia/blob/master/test/src/structures/ArrayRecursiveUnionImplicit.ts) | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
426
- | [Array (repeated)](https://github.com/samchon/typia/blob/master/test/src/structures/ArrayRepeatedNullable.ts) | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
427
- | [Array (repeated, union)](https://github.com/samchon/typia/blob/master/test/src/structures/ArrayRepeatedUnionWithTuple.ts) | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
428
- | [**Ultimate Union Type**](https://github.com/samchon/typia/blob/master/test/src/structures/UltimateUnion.ts) | ✅ | ❌ | ❌ | ❌ | ❌ | ❌ |
115
+ Find comprehensive resources at our [official website](https://wrtnlabs.io/agentica).
429
116
 
430
- > `C.V.` means `class-validator`
117
+ - [Home](https://wrtnlabs.io/agentica)
118
+ - [Guide Documents](https://wrtnlabs.io/agentica/docs)
119
+ - [Tutorial](https://wrtnlabs.io/agentica/tutorial)
120
+ - [API Documents](https://wrtnlabs.io/agentica/api)
121
+ - [Youtube](https://www.youtube.com/@wrtnlabs)
122
+ - [Paper](https://wrtnlabs.io/agentica/paper)
431
123
 
432
- ### OpenAPI Specification
124
+ ## 🌟 Why Agentica?
433
125
 
434
126
  ```mermaid
435
127
  flowchart
436
- subgraph "OpenAPI Specification"
437
- v20("Swagger v2.0") --upgrades--> emended[["OpenAPI v3.1 (emended)"]]
438
- v30("OpenAPI v3.0") --upgrades--> emended
439
- v31("OpenAPI v3.1") --emends--> emended
128
+ subgraph "JSON Schema Specification"
129
+ schemav4("JSON Schema v4 ~ v7") --upgrades--> emended[["OpenAPI v3.1 (emended)"]]
130
+ schema2910("JSON Schema 2019-03") --upgrades--> emended
131
+ schema2020("JSON Schema 2020-12") --emends--> emended
440
132
  end
441
- subgraph "OpenAPI Generator"
442
- emended --normalizes--> migration[["Migration Schema"]]
443
- migration --"Artificial Intelligence"--> lfc{{"LLM Function Calling"}}
444
- lfc --"OpenAI"--> chatgpt("ChatGPT")
445
- lfc --"Anthropic"--> claude("Claude")
446
- lfc --"Google"--> gemini("Gemini")
447
- lfc --"Meta"--> llama("Llama")
133
+ subgraph "Agentica"
134
+ emended --"Artificial Intelligence"--> fc{{"AI Function Calling"}}
135
+ fc --"OpenAI"--> chatgpt("ChatGPT")
136
+ fc --"Google"--> gemini("Gemini")
137
+ fc --"Anthropic"--> claude("Claude")
138
+ fc --"High-Flyer"--> deepseek("DeepSeek")
139
+ fc --"Meta"--> llama("Llama")
140
+ chatgpt --"3.1"--> custom(["Custom JSON Schema"])
141
+ gemini --"3.0"--> custom(["Custom JSON Schema"])
142
+ claude --"3.1"--> standard(["Standard JSON Schema"])
143
+ deepseek --"3.1"--> standard
144
+ llama --"3.1"--> standard
448
145
  end
449
146
  ```
450
147
 
451
- `@agentica/core` obtains LLM function calling schemas from both Swagger/OpenAPI documents and TypeScript class types. The TypeScript class type can be converted to LLM function calling schema by [`typia.llm.application<Class, Model>()`](https://typia.io/docs/llm/application#application) function. Then how about OpenAPI document? How Swagger document can be LLM function calling schema.
148
+ Agentica enhances AI function calling by the following strategies:
452
149
 
453
- The secret is on the above diagram.
150
+ - [**Compiler Driven Development**](https://wrtnlabs.io/agentica/docs/concepts/compiler-driven-development): constructs function calling schema automatically by compiler skills without hand-writing.
151
+ - [**JSON Schema Conversion**](https://wrtnlabs.io/agentica/docs/core/vendor/#schema-specification): automatically handles specification differences between LLM vendors, ensuring seamless integration regardless of your chosen AI model.
152
+ - [**Validation Feedback**](https://wrtnlabs.io/agentica/docs/concepts/function-calling#validation-feedback): detects and corrects AI mistakes in argument composition, dramatically reducing errors and improving reliability.
153
+ - [**Selector Agent**](https://wrtnlabs.io/agentica/docs/concepts/function-calling#orchestration-strategy): filtering candidate functions to minimize context usage, optimize performance, and reduce token consumption.
454
154
 
455
- In the OpenAPI specification, there are three versions with different definitions. And even in the same version, there are too much ambiguous and duplicated expressions. To resolve these problems, [`@samchon/openapi`](https://github.com/samchon/openapi) is transforming every OpenAPI documents to v3.1 emended specification. The `@samchon/openapi`'s emended v3.1 specification has removed every ambiguous and duplicated expressions for clarity.
155
+ Thanks to these innovations, Agentica makes AI function calling easier, safer, and more accurate than before. Development becomes more intuitive since you only need to prepare functions relevant to your specific use case, and scaling your agent's capabilities is as simple as adding or removing functions.
456
156
 
457
- With the v3.1 emended OpenAPI document, `@samchon/openapi` converts it to a migration schema that is near to the function structure. And as the last step, the migration schema will be transformed to a specific LLM vendor's function calling schema. LLM function calling schemas are composed like this way.
157
+ In 2023, when OpenAI announced function calling, many predicted that function calling-driven AI development would become the mainstream. However, in reality, due to the difficulty and instability of function calling, the trend in AI development became agent workflow. Agent workflow, which is inflexible and must be created for specific purposes, has conquered the AI agent ecosystem.
158
+ By the way, as Agentica has resolved the difficulty and instability problems of function calling, the time has come to embrace function-driven AI development once again.
458
159
 
459
- > **Why do not directly convert, but intermediate?**
460
- >
461
- > If directly convert from each version of OpenAPI specification to specific LLM's function calling schema, I have to make much more converters increased by cartesian product. In current models, number of converters would be 12 = 3 x 4.
462
- >
463
- > However, if define intermediate schema, number of converters are shrunk to plus operation. In current models, I just need to develop only (7 = 3 + 4) converters, and this is the reason why I've defined intermediate specification. This way is economic.
160
+ | Type | Workflow | Vanilla Function Calling | Agentica Function Calling |
161
+ | ----------- | ------------- | ------------------------ | ------------------------- |
162
+ | Purpose | Specific | 🟢 General | 🟢 General |
163
+ | Difficulty | ❌ Difficult | ❌ Difficult | 🟢 Easy |
164
+ | Stability | 🟢 Stable | Unstable | 🟢 Stable |
165
+ | Flexibility | ❌ Inflexible | 🟢 Flexible | 🟢 Flexible |
package/lib/Agentica.d.ts CHANGED
@@ -2,6 +2,7 @@ import type { ILlmSchema } from "@samchon/openapi";
2
2
  import type { AgenticaOperation } from "./context/AgenticaOperation";
3
3
  import type { AgenticaEvent } from "./events/AgenticaEvent";
4
4
  import type { AgenticaHistory } from "./histories/AgenticaHistory";
5
+ import type { AgenticaUserInputHistory } from "./histories/AgenticaUserInputHistory";
5
6
  import type { IAgenticaConfig } from "./structures/IAgenticaConfig";
6
7
  import type { IAgenticaController } from "./structures/IAgenticaController";
7
8
  import type { IAgenticaProps } from "./structures/IAgenticaProps";
@@ -61,7 +62,7 @@ export declare class Agentica<Model extends ILlmSchema.Model> {
61
62
  * @param content The content to talk
62
63
  * @returns List of newly created chat prompts
63
64
  */
64
- conversate(content: string): Promise<AgenticaHistory<Model>[]>;
65
+ conversate(content: string | AgenticaUserInputHistory.Contents | Array<AgenticaUserInputHistory.Contents>): Promise<AgenticaHistory<Model>[]>;
65
66
  /**
66
67
  * Get configuration.
67
68
  */
package/lib/Agentica.js CHANGED
@@ -13,8 +13,8 @@ exports.Agentica = void 0;
13
13
  const AgenticaTokenUsage_1 = require("./context/AgenticaTokenUsage");
14
14
  const AgenticaOperationComposer_1 = require("./context/internal/AgenticaOperationComposer");
15
15
  const AgenticaTokenUsageAggregator_1 = require("./context/internal/AgenticaTokenUsageAggregator");
16
+ const factory_1 = require("./factory");
16
17
  const events_1 = require("./factory/events");
17
- const histories_1 = require("./factory/histories");
18
18
  const execute_1 = require("./orchestrate/execute");
19
19
  const AgenticaHistoryTransformer_1 = require("./transformers/AgenticaHistoryTransformer");
20
20
  const __map_take_1 = require("./utils/__map_take");
@@ -103,23 +103,25 @@ class Agentica {
103
103
  */
104
104
  conversate(content) {
105
105
  return __awaiter(this, void 0, void 0, function* () {
106
- const text = (0, histories_1.createTextHistory)({
107
- role: "user",
108
- text: content,
106
+ const prompt = (0, factory_1.createUserInputHistory)({
107
+ contents: Array.isArray(content)
108
+ ? content
109
+ : typeof content === "string"
110
+ ? [{
111
+ type: "text",
112
+ text: content,
113
+ }]
114
+ : [content],
109
115
  });
110
- this.dispatch((0, events_1.createTextEvent)({
111
- role: "user",
112
- stream: (0, StreamUtil_1.toAsyncGenerator)(content),
113
- done: () => true,
114
- get: () => content,
115
- join: () => __awaiter(this, void 0, void 0, function* () { return Promise.resolve(content); }),
116
+ this.dispatch((0, events_1.createUserInputEvent)({
117
+ contents: prompt.contents,
116
118
  })).catch(() => { });
117
119
  const newbie = yield this.executor_(this.getContext({
118
- prompt: text,
120
+ prompt,
119
121
  usage: this.token_usage_,
120
122
  }));
121
- this.histories_.push(text, ...newbie);
122
- return [text, ...newbie];
123
+ this.histories_.push(prompt, ...newbie);
124
+ return [prompt, ...newbie];
123
125
  });
124
126
  }
125
127
  /**