@agentica/core 0.12.2-dev.20250314 → 0.12.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. package/LICENSE +21 -21
  2. package/README.md +461 -461
  3. package/lib/context/AgenticaTokenUsage.d.ts +6 -6
  4. package/package.json +1 -1
  5. package/prompts/cancel.md +4 -4
  6. package/prompts/common.md +2 -2
  7. package/prompts/describe.md +6 -6
  8. package/prompts/execute.md +6 -6
  9. package/prompts/initialize.md +2 -2
  10. package/prompts/select.md +6 -6
  11. package/src/Agentica.ts +359 -359
  12. package/src/chatgpt/ChatGptAgent.ts +76 -76
  13. package/src/chatgpt/ChatGptCallFunctionAgent.ts +466 -466
  14. package/src/chatgpt/ChatGptCancelFunctionAgent.ts +280 -280
  15. package/src/chatgpt/ChatGptCompletionMessageUtil.ts +166 -166
  16. package/src/chatgpt/ChatGptDescribeFunctionAgent.ts +122 -122
  17. package/src/chatgpt/ChatGptHistoryDecoder.ts +88 -88
  18. package/src/chatgpt/ChatGptInitializeFunctionAgent.ts +96 -96
  19. package/src/chatgpt/ChatGptSelectFunctionAgent.ts +311 -311
  20. package/src/chatgpt/ChatGptUsageAggregator.ts +62 -62
  21. package/src/context/AgenticaCancelPrompt.ts +32 -32
  22. package/src/context/AgenticaClassOperation.ts +23 -23
  23. package/src/context/AgenticaContext.ts +130 -130
  24. package/src/context/AgenticaHttpOperation.ts +27 -27
  25. package/src/context/AgenticaOperation.ts +66 -66
  26. package/src/context/AgenticaOperationBase.ts +57 -57
  27. package/src/context/AgenticaOperationCollection.ts +52 -52
  28. package/src/context/AgenticaOperationSelection.ts +27 -27
  29. package/src/context/AgenticaTokenUsage.ts +170 -170
  30. package/src/context/internal/AgenticaTokenUsageAggregator.ts +66 -66
  31. package/src/context/internal/__IChatCancelFunctionsApplication.ts +23 -23
  32. package/src/context/internal/__IChatFunctionReference.ts +21 -21
  33. package/src/context/internal/__IChatInitialApplication.ts +15 -15
  34. package/src/context/internal/__IChatSelectFunctionsApplication.ts +24 -24
  35. package/src/events/AgenticaCallEvent.ts +36 -36
  36. package/src/events/AgenticaCancelEvent.ts +28 -28
  37. package/src/events/AgenticaDescribeEvent.ts +66 -66
  38. package/src/events/AgenticaEvent.ts +36 -36
  39. package/src/events/AgenticaEventBase.ts +7 -7
  40. package/src/events/AgenticaEventSource.ts +6 -6
  41. package/src/events/AgenticaExecuteEvent.ts +50 -50
  42. package/src/events/AgenticaInitializeEvent.ts +14 -14
  43. package/src/events/AgenticaRequestEvent.ts +45 -45
  44. package/src/events/AgenticaResponseEvent.ts +48 -48
  45. package/src/events/AgenticaSelectEvent.ts +37 -37
  46. package/src/events/AgenticaTextEvent.ts +62 -62
  47. package/src/functional/assertHttpLlmApplication.ts +55 -55
  48. package/src/functional/validateHttpLlmApplication.ts +66 -66
  49. package/src/index.ts +44 -44
  50. package/src/internal/AgenticaConstant.ts +4 -4
  51. package/src/internal/AgenticaDefaultPrompt.ts +43 -43
  52. package/src/internal/AgenticaOperationComposer.ts +96 -96
  53. package/src/internal/ByteArrayUtil.ts +5 -5
  54. package/src/internal/MPSCUtil.ts +111 -111
  55. package/src/internal/MathUtil.ts +3 -3
  56. package/src/internal/Singleton.ts +22 -22
  57. package/src/internal/StreamUtil.ts +64 -64
  58. package/src/internal/__map_take.ts +15 -15
  59. package/src/json/IAgenticaEventJson.ts +178 -178
  60. package/src/json/IAgenticaOperationJson.ts +36 -36
  61. package/src/json/IAgenticaOperationSelectionJson.ts +19 -19
  62. package/src/json/IAgenticaPromptJson.ts +130 -130
  63. package/src/json/IAgenticaTokenUsageJson.ts +107 -107
  64. package/src/prompts/AgenticaCancelPrompt.ts +32 -32
  65. package/src/prompts/AgenticaDescribePrompt.ts +41 -41
  66. package/src/prompts/AgenticaExecutePrompt.ts +52 -52
  67. package/src/prompts/AgenticaPrompt.ts +14 -14
  68. package/src/prompts/AgenticaPromptBase.ts +27 -27
  69. package/src/prompts/AgenticaSelectPrompt.ts +32 -32
  70. package/src/prompts/AgenticaTextPrompt.ts +31 -31
  71. package/src/structures/IAgenticaConfig.ts +123 -123
  72. package/src/structures/IAgenticaController.ts +133 -133
  73. package/src/structures/IAgenticaExecutor.ts +157 -157
  74. package/src/structures/IAgenticaProps.ts +69 -69
  75. package/src/structures/IAgenticaSystemPrompt.ts +125 -125
  76. package/src/structures/IAgenticaVendor.ts +39 -39
  77. package/src/transformers/AgenticaEventTransformer.ts +165 -165
  78. package/src/transformers/AgenticaPromptTransformer.ts +134 -134
@@ -1,69 +1,69 @@
1
- import { ILlmSchema } from "@samchon/openapi";
2
-
3
- import { IAgenticaPromptJson } from "../json/IAgenticaPromptJson";
4
- import { IAgenticaConfig } from "./IAgenticaConfig";
5
- import { IAgenticaController } from "./IAgenticaController";
6
- import { IAgenticaVendor } from "./IAgenticaVendor";
7
-
8
- /**
9
- * Properties of the Nestia Agent.
10
- *
11
- * `IAgenticaProps` is an interface that defines the properties
12
- * of the {@link Agentica.constructor}. In the `IAgenticaProps`,
13
- * there're everything to prepare to create a Super A.I. chatbot
14
- * performing the LLM (Large Language Model) function calling.
15
- *
16
- * At first, you have to specify the LLM service {@link vendor} like
17
- * OpenAI with its API key and client API. And then, you have to define
18
- * the {@link controllers} serving the functions to call. The controllers
19
- * are separated by two protocols; HTTP API and TypeScript class. At last,
20
- * you can {@link config configure} the agent by setting the locale, timezone,
21
- * and some of system prompts.
22
- *
23
- * Additionally, if you want to start from the previous A.I. chatbot
24
- * session, you can accomplish it by assigning the previous prompt
25
- * histories to the {@link histories} property.
26
- *
27
- * @author Samchon
28
- */
29
- export interface IAgenticaProps<Model extends ILlmSchema.Model> {
30
- /**
31
- * LLM schema model.
32
- */
33
- model: Model;
34
-
35
- /**
36
- * LLM service vendor.
37
- */
38
- vendor: IAgenticaVendor;
39
-
40
- /**
41
- * Controllers serving functions to call.
42
- */
43
- controllers: IAgenticaController<Model>[];
44
-
45
- /**
46
- * Configuration of agent.
47
- *
48
- * Configuration of A.I. chatbot agent including the user's locale,
49
- * timezone, and some of system prompts. Also, you can affect to the
50
- * LLM function selecting/calling logic by configuring additional
51
- * properties.
52
- *
53
- * If you don't configure this property, these values would be default.
54
- *
55
- * - `locale`: your system's locale and timezone
56
- * - `timezone`: your system's timezone
57
- * - `systemPrompt`: default prompts written in markdown
58
- * - https://github.com/wrtnlabs/agentica/tree/main/packages/core/prompts
59
- */
60
- config?: IAgenticaConfig<Model>;
61
-
62
- /**
63
- * Prompt histories.
64
- *
65
- * If you're starting the conversation from an existing session,
66
- * assign the previouis prompt histories to this property.
67
- */
68
- histories?: IAgenticaPromptJson[];
69
- }
1
+ import { ILlmSchema } from "@samchon/openapi";
2
+
3
+ import { IAgenticaPromptJson } from "../json/IAgenticaPromptJson";
4
+ import { IAgenticaConfig } from "./IAgenticaConfig";
5
+ import { IAgenticaController } from "./IAgenticaController";
6
+ import { IAgenticaVendor } from "./IAgenticaVendor";
7
+
8
+ /**
9
+ * Properties of the Nestia Agent.
10
+ *
11
+ * `IAgenticaProps` is an interface that defines the properties
12
+ * of the {@link Agentica.constructor}. In the `IAgenticaProps`,
13
+ * there're everything to prepare to create a Super A.I. chatbot
14
+ * performing the LLM (Large Language Model) function calling.
15
+ *
16
+ * At first, you have to specify the LLM service {@link vendor} like
17
+ * OpenAI with its API key and client API. And then, you have to define
18
+ * the {@link controllers} serving the functions to call. The controllers
19
+ * are separated by two protocols; HTTP API and TypeScript class. At last,
20
+ * you can {@link config configure} the agent by setting the locale, timezone,
21
+ * and some of system prompts.
22
+ *
23
+ * Additionally, if you want to start from the previous A.I. chatbot
24
+ * session, you can accomplish it by assigning the previous prompt
25
+ * histories to the {@link histories} property.
26
+ *
27
+ * @author Samchon
28
+ */
29
+ export interface IAgenticaProps<Model extends ILlmSchema.Model> {
30
+ /**
31
+ * LLM schema model.
32
+ */
33
+ model: Model;
34
+
35
+ /**
36
+ * LLM service vendor.
37
+ */
38
+ vendor: IAgenticaVendor;
39
+
40
+ /**
41
+ * Controllers serving functions to call.
42
+ */
43
+ controllers: IAgenticaController<Model>[];
44
+
45
+ /**
46
+ * Configuration of agent.
47
+ *
48
+ * Configuration of A.I. chatbot agent including the user's locale,
49
+ * timezone, and some of system prompts. Also, you can affect to the
50
+ * LLM function selecting/calling logic by configuring additional
51
+ * properties.
52
+ *
53
+ * If you don't configure this property, these values would be default.
54
+ *
55
+ * - `locale`: your system's locale and timezone
56
+ * - `timezone`: your system's timezone
57
+ * - `systemPrompt`: default prompts written in markdown
58
+ * - https://github.com/wrtnlabs/agentica/tree/main/packages/core/prompts
59
+ */
60
+ config?: IAgenticaConfig<Model>;
61
+
62
+ /**
63
+ * Prompt histories.
64
+ *
65
+ * If you're starting the conversation from an existing session,
66
+ * assign the previouis prompt histories to this property.
67
+ */
68
+ histories?: IAgenticaPromptJson[];
69
+ }
@@ -1,125 +1,125 @@
1
- import { ILlmSchema } from "@samchon/openapi";
2
-
3
- import { AgenticaExecutePrompt } from "../prompts/AgenticaExecutePrompt";
4
- import { AgenticaPrompt } from "../prompts/AgenticaPrompt";
5
- import { IAgenticaConfig } from "./IAgenticaConfig";
6
-
7
- /**
8
- * System prompt collection of the A.I. chatbot.
9
- *
10
- * `IAgenticaSystemPrompt` is a type represents a collection of system
11
- * prompts that would be used by the A.I. chatbot of {@link Agentica}.
12
- *
13
- * You can customize the system prompt by configuring the
14
- * {@link IAgenticaConfig.systemPrompt} property when creating a new
15
- * {@link Agentica} instance.
16
- *
17
- * If you don't configure any system prompts, the default system prompts
18
- * would be used which are written in the below directory as markdown
19
- * documents.
20
- *
21
- * - https://github.com/wrtnlabs/agentica/tree/main/packages/core/prompts
22
- *
23
- * @author Samchon
24
- */
25
- export interface IAgenticaSystemPrompt<Model extends ILlmSchema.Model> {
26
- /**
27
- * Common system prompt that would be used in every situation.
28
- *
29
- * @param config Configuration of the agent
30
- * @returns The common system prompt
31
- * @default https://github.com/wrtnlabs/agentica/tree/main/packages/core/prompts/common.md
32
- */
33
- common?: (config?: IAgenticaConfig<Model> | undefined) => string;
34
-
35
- /**
36
- * Initialize system prompt.
37
- *
38
- * When the A.I. chatbot has not informed any functions to the agent
39
- * yet because the user has not implied any function calling request yet,
40
- * {@link Agentica} says that it is a circumstance that nothing has
41
- * been initialized yet.
42
- *
43
- * In that case, the `initialize` system prompt would be used. You can
44
- * customize the `initialize` system prompt by assigning this function
45
- * with the given {@link AgenticaPrompt histories} parameter.
46
- *
47
- * @param histories Histories of the previous prompts
48
- * @returns initialize system prompt
49
- * @default https://github.com/wrtnlabs/agentica/tree/main/packages/core/prompts/initialize.md
50
- */
51
- initialize?: (histories: AgenticaPrompt<Model>[]) => string;
52
-
53
- /**
54
- * Select system prompt.
55
- *
56
- * The {@link Agentica} has a process selecting some candidate
57
- * functions to call by asking to the A.I. agent with the previous
58
- * prompt histories.
59
- *
60
- * In that case, this `select` system prompt would be used. You can
61
- * customize it by assigning this function with the given
62
- * {@link AgenticaPrompt histories} parameter.
63
- *
64
- * Note that, the `"select"` means only the function selection. It does
65
- * not contain the filling argument or executing the function. It
66
- * literally contains only the selection process.
67
- *
68
- * @param histories Histories of the previous prompts
69
- * @returns select system promopt
70
- * @default https://github.com/wrtnlabs/agentica/tree/main/packages/core/prompts/select.md
71
- */
72
- select?: (histories: AgenticaPrompt<Model>[]) => string;
73
-
74
- /**
75
- * Cancel system prompt.
76
- *
77
- * The {@link Agentica} has a process canceling some candidate
78
- * functions to call by asking to the A.I. agent with the previous
79
- * prompt histories.
80
- *
81
- * In that case, this `cancel` system prompt would be used. You can
82
- * customize it by assigning this function with the given
83
- * {@link AgenticaPrompt histories} parameter.
84
- *
85
- * @param histories Histories of the previous prompts
86
- * @returns cancel system prompt
87
- * @default https://github.com/wrtnlabs/agentica/tree/main/packages/core/prompts/cancel.md
88
- */
89
- cancel?: (histories: AgenticaPrompt<Model>[]) => string;
90
-
91
- /**
92
- * Execute system prompt.
93
- *
94
- * The {@link Agentica} has a process filling the arguments of some
95
- * selected candidate functions by the LLM (Large Language Model)
96
- * function calling feature with the previous prompt histories, and
97
- * executing the arguments filled function with validation feedback.
98
- *
99
- * In that case, this `execute` system prompt would be used. You can
100
- * customize it by assigning this function with the given
101
- * {@link AgenticaPrompt histories} parameter.
102
- *
103
- * @param histories Histories of the previous prompts
104
- * @returns execute system prompt
105
- * https://github.com/wrtnlabs/agentica/tree/main/packages/core/prompts/execute.md
106
- */
107
- execute?: (histories: AgenticaPrompt<Model>[]) => string;
108
-
109
- /**
110
- * Describe system prompt.
111
- *
112
- * The {@link Agentica} has a process describing the return values of
113
- * the executed functions by requesting to the A.I. agent with the
114
- * previous prompt histories.
115
- *
116
- * In that case, this `describe` system prompt would be used. You can
117
- * customize it by assigning this function with the given
118
- * {@link AgenticaPrompt histories} parameter.
119
- *
120
- * @param histories Histories of the previous prompts
121
- * @returns describe system prompt
122
- * @default https://github.com/wrtnlabs/agentica/tree/main/packages/core/prompts/describe.md
123
- */
124
- describe?: (histories: AgenticaExecutePrompt<Model>[]) => string;
125
- }
1
+ import { ILlmSchema } from "@samchon/openapi";
2
+
3
+ import { AgenticaExecutePrompt } from "../prompts/AgenticaExecutePrompt";
4
+ import { AgenticaPrompt } from "../prompts/AgenticaPrompt";
5
+ import { IAgenticaConfig } from "./IAgenticaConfig";
6
+
7
+ /**
8
+ * System prompt collection of the A.I. chatbot.
9
+ *
10
+ * `IAgenticaSystemPrompt` is a type represents a collection of system
11
+ * prompts that would be used by the A.I. chatbot of {@link Agentica}.
12
+ *
13
+ * You can customize the system prompt by configuring the
14
+ * {@link IAgenticaConfig.systemPrompt} property when creating a new
15
+ * {@link Agentica} instance.
16
+ *
17
+ * If you don't configure any system prompts, the default system prompts
18
+ * would be used which are written in the below directory as markdown
19
+ * documents.
20
+ *
21
+ * - https://github.com/wrtnlabs/agentica/tree/main/packages/core/prompts
22
+ *
23
+ * @author Samchon
24
+ */
25
+ export interface IAgenticaSystemPrompt<Model extends ILlmSchema.Model> {
26
+ /**
27
+ * Common system prompt that would be used in every situation.
28
+ *
29
+ * @param config Configuration of the agent
30
+ * @returns The common system prompt
31
+ * @default https://github.com/wrtnlabs/agentica/tree/main/packages/core/prompts/common.md
32
+ */
33
+ common?: (config?: IAgenticaConfig<Model> | undefined) => string;
34
+
35
+ /**
36
+ * Initialize system prompt.
37
+ *
38
+ * When the A.I. chatbot has not informed any functions to the agent
39
+ * yet because the user has not implied any function calling request yet,
40
+ * {@link Agentica} says that it is a circumstance that nothing has
41
+ * been initialized yet.
42
+ *
43
+ * In that case, the `initialize` system prompt would be used. You can
44
+ * customize the `initialize` system prompt by assigning this function
45
+ * with the given {@link AgenticaPrompt histories} parameter.
46
+ *
47
+ * @param histories Histories of the previous prompts
48
+ * @returns initialize system prompt
49
+ * @default https://github.com/wrtnlabs/agentica/tree/main/packages/core/prompts/initialize.md
50
+ */
51
+ initialize?: (histories: AgenticaPrompt<Model>[]) => string;
52
+
53
+ /**
54
+ * Select system prompt.
55
+ *
56
+ * The {@link Agentica} has a process selecting some candidate
57
+ * functions to call by asking to the A.I. agent with the previous
58
+ * prompt histories.
59
+ *
60
+ * In that case, this `select` system prompt would be used. You can
61
+ * customize it by assigning this function with the given
62
+ * {@link AgenticaPrompt histories} parameter.
63
+ *
64
+ * Note that, the `"select"` means only the function selection. It does
65
+ * not contain the filling argument or executing the function. It
66
+ * literally contains only the selection process.
67
+ *
68
+ * @param histories Histories of the previous prompts
69
+ * @returns select system promopt
70
+ * @default https://github.com/wrtnlabs/agentica/tree/main/packages/core/prompts/select.md
71
+ */
72
+ select?: (histories: AgenticaPrompt<Model>[]) => string;
73
+
74
+ /**
75
+ * Cancel system prompt.
76
+ *
77
+ * The {@link Agentica} has a process canceling some candidate
78
+ * functions to call by asking to the A.I. agent with the previous
79
+ * prompt histories.
80
+ *
81
+ * In that case, this `cancel` system prompt would be used. You can
82
+ * customize it by assigning this function with the given
83
+ * {@link AgenticaPrompt histories} parameter.
84
+ *
85
+ * @param histories Histories of the previous prompts
86
+ * @returns cancel system prompt
87
+ * @default https://github.com/wrtnlabs/agentica/tree/main/packages/core/prompts/cancel.md
88
+ */
89
+ cancel?: (histories: AgenticaPrompt<Model>[]) => string;
90
+
91
+ /**
92
+ * Execute system prompt.
93
+ *
94
+ * The {@link Agentica} has a process filling the arguments of some
95
+ * selected candidate functions by the LLM (Large Language Model)
96
+ * function calling feature with the previous prompt histories, and
97
+ * executing the arguments filled function with validation feedback.
98
+ *
99
+ * In that case, this `execute` system prompt would be used. You can
100
+ * customize it by assigning this function with the given
101
+ * {@link AgenticaPrompt histories} parameter.
102
+ *
103
+ * @param histories Histories of the previous prompts
104
+ * @returns execute system prompt
105
+ * https://github.com/wrtnlabs/agentica/tree/main/packages/core/prompts/execute.md
106
+ */
107
+ execute?: (histories: AgenticaPrompt<Model>[]) => string;
108
+
109
+ /**
110
+ * Describe system prompt.
111
+ *
112
+ * The {@link Agentica} has a process describing the return values of
113
+ * the executed functions by requesting to the A.I. agent with the
114
+ * previous prompt histories.
115
+ *
116
+ * In that case, this `describe` system prompt would be used. You can
117
+ * customize it by assigning this function with the given
118
+ * {@link AgenticaPrompt histories} parameter.
119
+ *
120
+ * @param histories Histories of the previous prompts
121
+ * @returns describe system prompt
122
+ * @default https://github.com/wrtnlabs/agentica/tree/main/packages/core/prompts/describe.md
123
+ */
124
+ describe?: (histories: AgenticaExecutePrompt<Model>[]) => string;
125
+ }
@@ -1,39 +1,39 @@
1
- import OpenAI from "openai";
2
-
3
- /**
4
- * LLM service vendor for Nestia Chat.
5
- *
6
- * `IAgenticaVendor` is a type represents an LLM
7
- * (Large Language Model) vendor of the {@link Agentica}.
8
- *
9
- * Currently, {@link Agentica} supports OpenAI SDK. However, it does
10
- * not mean that you can use only OpenAI's GPT model in the
11
- * {@link Agentica}. The OpenAI SDK is just a connection tool to the
12
- * LLM vendor's API, and you can use other LLM vendors by configuring
13
- * its `baseURL` and API key.
14
- *
15
- * Therefore, if you want to use another LLM vendor like Claude or
16
- * Gemini, please configure the `baseURL` to the {@link api}, and
17
- * set {@link IAgenticaController}'s schema model as "cluade" or
18
- * "gemini".
19
- *
20
- * @author Samchon
21
- */
22
- export interface IAgenticaVendor {
23
- /**
24
- * OpenAI API instance.
25
- */
26
- api: OpenAI;
27
-
28
- /**
29
- * Chat model to be used.
30
- *
31
- * `({}) & string` means to support third party hosting cloud(eg. openRouter, aws)
32
- */
33
- model: OpenAI.ChatModel | ({} & string);
34
-
35
- /**
36
- * Options for the request.
37
- */
38
- options?: OpenAI.RequestOptions | undefined;
39
- }
1
+ import OpenAI from "openai";
2
+
3
+ /**
4
+ * LLM service vendor for Nestia Chat.
5
+ *
6
+ * `IAgenticaVendor` is a type represents an LLM
7
+ * (Large Language Model) vendor of the {@link Agentica}.
8
+ *
9
+ * Currently, {@link Agentica} supports OpenAI SDK. However, it does
10
+ * not mean that you can use only OpenAI's GPT model in the
11
+ * {@link Agentica}. The OpenAI SDK is just a connection tool to the
12
+ * LLM vendor's API, and you can use other LLM vendors by configuring
13
+ * its `baseURL` and API key.
14
+ *
15
+ * Therefore, if you want to use another LLM vendor like Claude or
16
+ * Gemini, please configure the `baseURL` to the {@link api}, and
17
+ * set {@link IAgenticaController}'s schema model as "cluade" or
18
+ * "gemini".
19
+ *
20
+ * @author Samchon
21
+ */
22
+ export interface IAgenticaVendor {
23
+ /**
24
+ * OpenAI API instance.
25
+ */
26
+ api: OpenAI;
27
+
28
+ /**
29
+ * Chat model to be used.
30
+ *
31
+ * `({}) & string` means to support third party hosting cloud(eg. openRouter, aws)
32
+ */
33
+ model: OpenAI.ChatModel | ({} & string);
34
+
35
+ /**
36
+ * Options for the request.
37
+ */
38
+ options?: OpenAI.RequestOptions | undefined;
39
+ }