@agentica/core 0.15.6 → 0.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (195) hide show
  1. package/lib/Agentica.d.ts +21 -21
  2. package/lib/Agentica.js +27 -27
  3. package/lib/Agentica.js.map +1 -1
  4. package/lib/MicroAgentica.d.ts +115 -0
  5. package/lib/MicroAgentica.js +279 -0
  6. package/lib/MicroAgentica.js.map +1 -0
  7. package/lib/constants/AgenticaDefaultPrompt.d.ts +2 -1
  8. package/lib/constants/AgenticaDefaultPrompt.js +7 -5
  9. package/lib/constants/AgenticaDefaultPrompt.js.map +1 -1
  10. package/lib/context/AgenticaCancelPrompt.d.ts +3 -3
  11. package/lib/context/AgenticaContext.d.ts +12 -12
  12. package/lib/context/AgenticaOperationCollection.d.ts +1 -1
  13. package/lib/context/MicroAgenticaContext.d.ts +75 -0
  14. package/lib/{prompts/AgenticaCancelPrompt.js → context/MicroAgenticaContext.js} +1 -1
  15. package/lib/context/MicroAgenticaContext.js.map +1 -0
  16. package/lib/context/internal/AgenticaOperationComposer.d.ts +2 -1
  17. package/lib/context/internal/AgenticaOperationComposer.js +3 -2
  18. package/lib/context/internal/AgenticaOperationComposer.js.map +1 -1
  19. package/lib/context/internal/isAgenticaContext.d.ts +1 -0
  20. package/lib/context/internal/isAgenticaContext.js +10 -0
  21. package/lib/context/internal/isAgenticaContext.js.map +1 -0
  22. package/lib/events/AgenticaDescribeEvent.d.ts +4 -4
  23. package/lib/events/AgenticaEvent.d.ts +1 -0
  24. package/lib/events/AgenticaExecuteEvent.d.ts +5 -5
  25. package/lib/events/AgenticaSelectEvent.d.ts +2 -2
  26. package/lib/events/AgenticaTextEvent.d.ts +2 -2
  27. package/lib/events/MicroAgenticaEvent.d.ts +32 -0
  28. package/lib/{prompts/AgenticaTextPrompt.js → events/MicroAgenticaEvent.js} +1 -1
  29. package/lib/events/MicroAgenticaEvent.js.map +1 -0
  30. package/lib/factory/events.d.ts +2 -2
  31. package/lib/factory/events.js +5 -5
  32. package/lib/factory/events.js.map +1 -1
  33. package/lib/factory/histories.d.ts +33 -0
  34. package/lib/factory/{prompts.js → histories.js} +13 -13
  35. package/lib/factory/histories.js.map +1 -0
  36. package/lib/factory/index.d.ts +1 -1
  37. package/lib/factory/index.js +1 -1
  38. package/lib/factory/index.js.map +1 -1
  39. package/lib/functional/assertHttpLlmApplication.js +577 -387
  40. package/lib/functional/assertHttpLlmApplication.js.map +1 -1
  41. package/lib/functional/validateHttpLlmApplication.js +500 -348
  42. package/lib/functional/validateHttpLlmApplication.js.map +1 -1
  43. package/lib/histories/AgenticaCancelHistory.d.ts +8 -0
  44. package/lib/{prompts/AgenticaExecutePrompt.js → histories/AgenticaCancelHistory.js} +1 -1
  45. package/lib/histories/AgenticaCancelHistory.js.map +1 -0
  46. package/lib/histories/AgenticaDescribeHistory.d.ts +16 -0
  47. package/lib/histories/AgenticaDescribeHistory.js +3 -0
  48. package/lib/histories/AgenticaDescribeHistory.js.map +1 -0
  49. package/lib/{prompts/AgenticaExecutePrompt.d.ts → histories/AgenticaExecuteHistory.d.ts} +5 -5
  50. package/lib/{prompts/AgenticaDescribePrompt.js → histories/AgenticaExecuteHistory.js} +1 -1
  51. package/lib/histories/AgenticaExecuteHistory.js.map +1 -0
  52. package/lib/histories/AgenticaHistory.d.ts +17 -0
  53. package/lib/{prompts/AgenticaPrompt.js → histories/AgenticaHistory.js} +1 -1
  54. package/lib/histories/AgenticaHistory.js.map +1 -0
  55. package/lib/{prompts/AgenticaPromptBase.d.ts → histories/AgenticaHistoryBase.d.ts} +4 -4
  56. package/lib/{json/IAgenticaPromptJson.js → histories/AgenticaHistoryBase.js} +1 -1
  57. package/lib/histories/AgenticaHistoryBase.js.map +1 -0
  58. package/lib/histories/AgenticaSelectHistory.d.ts +8 -0
  59. package/lib/histories/AgenticaSelectHistory.js +3 -0
  60. package/lib/histories/AgenticaSelectHistory.js.map +1 -0
  61. package/lib/histories/AgenticaTextHistory.d.ts +6 -0
  62. package/lib/{prompts/AgenticaPromptBase.js → histories/AgenticaTextHistory.js} +1 -1
  63. package/lib/histories/AgenticaTextHistory.js.map +1 -0
  64. package/lib/histories/MicroAgenticaHistory.d.ts +13 -0
  65. package/lib/histories/MicroAgenticaHistory.js +3 -0
  66. package/lib/histories/MicroAgenticaHistory.js.map +1 -0
  67. package/lib/index.d.ts +12 -7
  68. package/lib/index.js +12 -7
  69. package/lib/index.js.map +1 -1
  70. package/lib/index.mjs +1393 -902
  71. package/lib/index.mjs.map +1 -1
  72. package/lib/json/IAgenticaEventJson.d.ts +6 -5
  73. package/lib/json/{IAgenticaPromptJson.d.ts → IAgenticaHistoryJson.d.ts} +8 -8
  74. package/lib/json/IAgenticaHistoryJson.js +3 -0
  75. package/lib/json/IAgenticaHistoryJson.js.map +1 -0
  76. package/lib/json/IAgenticaOperationSelectionJson.d.ts +7 -1
  77. package/lib/json/IMicroAgenticaEventJson.d.ts +13 -0
  78. package/lib/json/IMicroAgenticaEventJson.js +3 -0
  79. package/lib/json/IMicroAgenticaEventJson.js.map +1 -0
  80. package/lib/json/IMicroAgenticaHistoryJson.d.ts +19 -0
  81. package/lib/json/IMicroAgenticaHistoryJson.js +3 -0
  82. package/lib/json/IMicroAgenticaHistoryJson.js.map +1 -0
  83. package/lib/orchestrate/call.d.ts +4 -2
  84. package/lib/orchestrate/call.js +50 -41
  85. package/lib/orchestrate/call.js.map +1 -1
  86. package/lib/orchestrate/cancel.js +4 -4
  87. package/lib/orchestrate/cancel.js.map +1 -1
  88. package/lib/orchestrate/describe.d.ts +4 -3
  89. package/lib/orchestrate/describe.js +3 -3
  90. package/lib/orchestrate/describe.js.map +1 -1
  91. package/lib/orchestrate/execute.d.ts +2 -2
  92. package/lib/orchestrate/execute.js +1 -1
  93. package/lib/orchestrate/execute.js.map +1 -1
  94. package/lib/orchestrate/initialize.d.ts +2 -2
  95. package/lib/orchestrate/initialize.js +3 -3
  96. package/lib/orchestrate/initialize.js.map +1 -1
  97. package/lib/orchestrate/internal/selectFunction.d.ts +1 -5
  98. package/lib/orchestrate/internal/selectFunction.js +3 -0
  99. package/lib/orchestrate/internal/selectFunction.js.map +1 -1
  100. package/lib/orchestrate/select.d.ts +2 -2
  101. package/lib/orchestrate/select.js +5 -5
  102. package/lib/orchestrate/select.js.map +1 -1
  103. package/lib/structures/IAgenticaConfig.d.ts +6 -5
  104. package/lib/structures/IAgenticaController.d.ts +1 -1
  105. package/lib/structures/IAgenticaExecutor.d.ts +11 -9
  106. package/lib/structures/IAgenticaProps.d.ts +3 -3
  107. package/lib/structures/IAgenticaSystemPrompt.d.ts +13 -13
  108. package/lib/structures/IAgenticaVendor.d.ts +1 -1
  109. package/lib/structures/IMicroAgenticaConfig.d.ts +74 -0
  110. package/lib/structures/IMicroAgenticaConfig.js +3 -0
  111. package/lib/structures/IMicroAgenticaConfig.js.map +1 -0
  112. package/lib/structures/IMicroAgenticaExecutor.d.ts +57 -0
  113. package/lib/structures/IMicroAgenticaExecutor.js +3 -0
  114. package/lib/structures/IMicroAgenticaExecutor.js.map +1 -0
  115. package/lib/structures/IMicroAgenticaProps.d.ts +63 -0
  116. package/lib/structures/IMicroAgenticaProps.js +3 -0
  117. package/lib/structures/IMicroAgenticaProps.js.map +1 -0
  118. package/lib/structures/IMicroAgenticaSystemPrompt.d.ts +66 -0
  119. package/lib/structures/IMicroAgenticaSystemPrompt.js +3 -0
  120. package/lib/structures/IMicroAgenticaSystemPrompt.js.map +1 -0
  121. package/lib/transformers/AgenticaPromptTransformer.d.ts +19 -19
  122. package/lib/transformers/AgenticaPromptTransformer.js +27 -27
  123. package/lib/transformers/AgenticaPromptTransformer.js.map +1 -1
  124. package/package.json +7 -7
  125. package/src/Agentica.ts +34 -34
  126. package/src/MicroAgentica.ts +337 -0
  127. package/src/constants/AgenticaDefaultPrompt.ts +7 -3
  128. package/src/context/AgenticaCancelPrompt.ts +3 -3
  129. package/src/context/AgenticaContext.ts +12 -12
  130. package/src/context/AgenticaOperationCollection.ts +1 -1
  131. package/src/context/MicroAgenticaContext.ts +95 -0
  132. package/src/context/internal/AgenticaOperationComposer.ts +6 -3
  133. package/src/context/internal/isAgenticaContext.ts +13 -0
  134. package/src/events/AgenticaDescribeEvent.ts +4 -4
  135. package/src/events/AgenticaEvent.ts +6 -0
  136. package/src/events/AgenticaExecuteEvent.ts +5 -5
  137. package/src/events/AgenticaSelectEvent.ts +2 -2
  138. package/src/events/AgenticaTextEvent.ts +2 -2
  139. package/src/events/MicroAgenticaEvent.ts +41 -0
  140. package/src/factory/events.ts +8 -8
  141. package/src/factory/{prompts.ts → histories.ts} +18 -18
  142. package/src/factory/index.ts +1 -1
  143. package/src/histories/AgenticaCancelHistory.ts +13 -0
  144. package/src/histories/AgenticaDescribeHistory.ts +22 -0
  145. package/src/{prompts/AgenticaExecutePrompt.ts → histories/AgenticaExecuteHistory.ts} +7 -7
  146. package/src/histories/AgenticaHistory.ts +25 -0
  147. package/src/{prompts/AgenticaPromptBase.ts → histories/AgenticaHistoryBase.ts} +4 -4
  148. package/src/histories/AgenticaSelectHistory.ts +13 -0
  149. package/src/histories/AgenticaTextHistory.ts +10 -0
  150. package/src/histories/MicroAgenticaHistory.ts +18 -0
  151. package/src/index.ts +18 -15
  152. package/src/json/IAgenticaEventJson.ts +6 -5
  153. package/src/json/{IAgenticaPromptJson.ts → IAgenticaHistoryJson.ts} +13 -13
  154. package/src/json/IAgenticaOperationSelectionJson.ts +8 -1
  155. package/src/json/IMicroAgenticaEventJson.ts +21 -0
  156. package/src/json/IMicroAgenticaHistoryJson.ts +23 -0
  157. package/src/orchestrate/call.ts +83 -59
  158. package/src/orchestrate/cancel.ts +4 -4
  159. package/src/orchestrate/describe.ts +11 -7
  160. package/src/orchestrate/execute.ts +7 -7
  161. package/src/orchestrate/initialize.ts +6 -6
  162. package/src/orchestrate/internal/selectFunction.ts +3 -0
  163. package/src/orchestrate/select.ts +13 -13
  164. package/src/structures/IAgenticaConfig.ts +6 -5
  165. package/src/structures/IAgenticaController.ts +1 -1
  166. package/src/structures/IAgenticaExecutor.ts +12 -10
  167. package/src/structures/IAgenticaProps.ts +3 -3
  168. package/src/structures/IAgenticaSystemPrompt.ts +13 -13
  169. package/src/structures/IAgenticaVendor.ts +1 -1
  170. package/src/structures/IMicroAgenticaConfig.ts +82 -0
  171. package/src/structures/IMicroAgenticaExecutor.ts +63 -0
  172. package/src/structures/IMicroAgenticaProps.ts +70 -0
  173. package/src/structures/IMicroAgenticaSystemPrompt.ts +71 -0
  174. package/src/transformers/AgenticaPromptTransformer.ts +46 -46
  175. package/lib/factory/prompts.d.ts +0 -33
  176. package/lib/factory/prompts.js.map +0 -1
  177. package/lib/json/IAgenticaPromptJson.js.map +0 -1
  178. package/lib/prompts/AgenticaCancelPrompt.d.ts +0 -8
  179. package/lib/prompts/AgenticaCancelPrompt.js.map +0 -1
  180. package/lib/prompts/AgenticaDescribePrompt.d.ts +0 -16
  181. package/lib/prompts/AgenticaDescribePrompt.js.map +0 -1
  182. package/lib/prompts/AgenticaExecutePrompt.js.map +0 -1
  183. package/lib/prompts/AgenticaPrompt.d.ts +0 -17
  184. package/lib/prompts/AgenticaPrompt.js.map +0 -1
  185. package/lib/prompts/AgenticaPromptBase.js.map +0 -1
  186. package/lib/prompts/AgenticaSelectPrompt.d.ts +0 -8
  187. package/lib/prompts/AgenticaSelectPrompt.js +0 -3
  188. package/lib/prompts/AgenticaSelectPrompt.js.map +0 -1
  189. package/lib/prompts/AgenticaTextPrompt.d.ts +0 -6
  190. package/lib/prompts/AgenticaTextPrompt.js.map +0 -1
  191. package/src/prompts/AgenticaCancelPrompt.ts +0 -13
  192. package/src/prompts/AgenticaDescribePrompt.ts +0 -22
  193. package/src/prompts/AgenticaPrompt.ts +0 -25
  194. package/src/prompts/AgenticaSelectPrompt.ts +0 -13
  195. package/src/prompts/AgenticaTextPrompt.ts +0 -10
@@ -10,63 +10,71 @@ import type { IValidation } from "typia";
10
10
  import {
11
11
  ChatGptTypeChecker,
12
12
  HttpLlm,
13
+ LlmTypeCheckerV3_1,
13
14
  } from "@samchon/openapi";
14
15
 
15
16
  import type { AgenticaCancelPrompt } from "../context/AgenticaCancelPrompt";
16
17
  import type { AgenticaContext } from "../context/AgenticaContext";
17
18
  import type { AgenticaOperation } from "../context/AgenticaOperation";
19
+ import type { MicroAgenticaContext } from "../context/MicroAgenticaContext";
18
20
  import type { AgenticaCallEvent } from "../events/AgenticaCallEvent";
19
- import type { AgenticaExecutePrompt } from "../prompts/AgenticaExecutePrompt";
20
- import type { AgenticaPrompt } from "../prompts/AgenticaPrompt";
21
- import type { AgenticaTextPrompt } from "../prompts/AgenticaTextPrompt";
21
+ import type { AgenticaExecuteHistory } from "../histories/AgenticaExecuteHistory";
22
+ import type { AgenticaHistory } from "../histories/AgenticaHistory";
23
+ import type { AgenticaTextHistory } from "../histories/AgenticaTextHistory";
24
+ import type { MicroAgenticaHistory } from "../histories/MicroAgenticaHistory";
22
25
 
23
26
  import { AgenticaConstant } from "../constants/AgenticaConstant";
24
27
  import { AgenticaDefaultPrompt } from "../constants/AgenticaDefaultPrompt";
25
28
  import { AgenticaSystemPrompt } from "../constants/AgenticaSystemPrompt";
29
+ import { isAgenticaContext } from "../context/internal/isAgenticaContext";
26
30
  import { createCallEvent, createCancelEvent, createExecuteEvent, createTextEvent, createValidateEvent } from "../factory/events";
31
+ import { createCancelHistory, createExecuteHistory, createTextHistory, decodeHistory } from "../factory/histories";
27
32
  import { createOperationSelection } from "../factory/operations";
28
- import { createCancelPrompt, createExecutePrompt, createTextPrompt, decodePrompt } from "../factory/prompts";
29
33
  import { ChatGptCompletionMessageUtil } from "../utils/ChatGptCompletionMessageUtil";
30
34
  import { StreamUtil } from "../utils/StreamUtil";
31
35
 
32
36
  import { cancelFunction } from "./internal/cancelFunction";
33
37
 
34
- export async function call<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Model>): Promise<AgenticaPrompt<Model>[]> {
38
+ export async function call<Model extends ILlmSchema.Model>(
39
+ ctx: AgenticaContext<Model> | MicroAgenticaContext<Model>,
40
+ operations: AgenticaOperation<Model>[],
41
+ ): Promise<AgenticaHistory<Model>[]> {
35
42
  // ----
36
43
  // EXECUTE CHATGPT API
37
44
  // ----
38
45
  const completionStream = await ctx.request("call", {
39
46
  messages: [
40
- // COMMON SYSTEM PROMPT
41
- {
42
- role: "system",
43
- content: AgenticaDefaultPrompt.write(ctx.config),
44
- } satisfies OpenAI.ChatCompletionSystemMessageParam,
45
- // PREVIOUS HISTORIES
46
- ...ctx.histories.map(decodePrompt).flat(),
47
- // USER INPUT
48
- {
49
- role: "user",
50
- content: ctx.prompt.text,
51
- },
52
- // SYSTEM PROMPT
53
- {
47
+ // COMMON SYSTEM PROMPT
48
+ {
49
+ role: "system",
50
+ content: AgenticaDefaultPrompt.write(ctx.config),
51
+ } satisfies OpenAI.ChatCompletionSystemMessageParam,
52
+ // PREVIOUS HISTORIES
53
+ ...ctx.histories.map(decodeHistory).flat(),
54
+ // USER INPUT
55
+ {
56
+ role: "user",
57
+ content: ctx.prompt.text,
58
+ },
59
+ // SYSTEM PROMPT
60
+ ...(ctx.config?.systemPrompt?.execute === null
61
+ ? []
62
+ : [{
54
63
  role: "system",
55
- content:
56
- ctx.config?.systemPrompt?.execute?.(ctx.histories)
64
+ content: ctx.config?.systemPrompt?.execute?.(ctx.histories as MicroAgenticaHistory<Model>[])
57
65
  ?? AgenticaSystemPrompt.EXECUTE,
58
- },
66
+ } satisfies OpenAI.ChatCompletionSystemMessageParam]),
59
67
  ],
60
68
  // STACKED FUNCTIONS
61
- tools: ctx.stack.map(
69
+ tools: operations.map(
62
70
  s =>
63
71
  ({
64
72
  type: "function",
65
73
  function: {
66
- name: s.operation.name,
67
- description: s.operation.function.description,
68
- parameters: (s.operation.function.separated !== undefined
69
- ? (s.operation.function.separated.llm
74
+ name: s.name,
75
+ description: s.function.description,
76
+ parameters: (s.function.separated !== undefined
77
+ ? (s.function.separated.llm
70
78
  ?? ({
71
79
  type: "object",
72
80
  properties: {},
@@ -74,7 +82,7 @@ export async function call<Model extends ILlmSchema.Model>(ctx: AgenticaContext<
74
82
  additionalProperties: false,
75
83
  $defs: {},
76
84
  } satisfies IChatGptSchema.IParameters))
77
- : s.operation.function.parameters) as Record<string, any>,
85
+ : s.function.parameters) as Record<string, any>,
78
86
  },
79
87
  }) as OpenAI.ChatCompletionTool,
80
88
  ),
@@ -88,9 +96,9 @@ export async function call<Model extends ILlmSchema.Model>(ctx: AgenticaContext<
88
96
  const closures: Array<
89
97
  () => Promise<
90
98
  Array<
91
- | AgenticaExecutePrompt<Model>
99
+ | AgenticaExecuteHistory<Model>
92
100
  | AgenticaCancelPrompt<Model>
93
- | AgenticaTextPrompt
101
+ | AgenticaTextHistory
94
102
  >
95
103
  >
96
104
  > = [];
@@ -108,7 +116,7 @@ export async function call<Model extends ILlmSchema.Model>(ctx: AgenticaContext<
108
116
  }
109
117
  closures.push(
110
118
  async (): Promise<
111
- [AgenticaExecutePrompt<Model>, AgenticaCancelPrompt<Model>]
119
+ [AgenticaExecuteHistory<Model>, AgenticaCancelPrompt<Model>]
112
120
  > => {
113
121
  const call: AgenticaCallEvent<Model> = createCallEvent({
114
122
  id: tc.id,
@@ -124,7 +132,7 @@ export async function call<Model extends ILlmSchema.Model>(ctx: AgenticaContext<
124
132
  }
125
133
  await ctx.dispatch(call);
126
134
 
127
- const execute: AgenticaExecutePrompt<Model> = await propagate(
135
+ const execute: AgenticaExecuteHistory<Model> = await propagate(
128
136
  ctx,
129
137
  call,
130
138
  0,
@@ -138,21 +146,23 @@ export async function call<Model extends ILlmSchema.Model>(ctx: AgenticaContext<
138
146
  }),
139
147
  );
140
148
 
141
- await cancelFunction(ctx, {
142
- name: call.operation.name,
143
- reason: "completed",
144
- });
145
- void ctx.dispatch(
146
- createCancelEvent({
147
- selection: createOperationSelection({
148
- operation: call.operation,
149
- reason: "complete",
149
+ if (isAgenticaContext(ctx)) {
150
+ await cancelFunction(ctx, {
151
+ name: call.operation.name,
152
+ reason: "completed",
153
+ });
154
+ void ctx.dispatch(
155
+ createCancelEvent({
156
+ selection: createOperationSelection({
157
+ operation: call.operation,
158
+ reason: "complete",
159
+ }),
150
160
  }),
151
- }),
152
- );
161
+ );
162
+ }
153
163
  return [
154
164
  execute,
155
- createCancelPrompt({
165
+ createCancelHistory({
156
166
  id: call.id,
157
167
  selections: [
158
168
  createOperationSelection({
@@ -172,7 +182,7 @@ export async function call<Model extends ILlmSchema.Model>(ctx: AgenticaContext<
172
182
  && choice.message.content.length > 0
173
183
  ) {
174
184
  closures.push(async () => {
175
- const value: AgenticaTextPrompt = createTextPrompt({
185
+ const value: AgenticaTextHistory = createTextHistory({
176
186
  role: "assistant",
177
187
  text: choice.message.content!,
178
188
  });
@@ -192,7 +202,11 @@ export async function call<Model extends ILlmSchema.Model>(ctx: AgenticaContext<
192
202
  return (await Promise.all(closures.map(async fn => fn()))).flat();
193
203
  }
194
204
 
195
- async function propagate<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Model>, call: AgenticaCallEvent<Model>, retry: number): Promise<AgenticaExecutePrompt<Model>> {
205
+ async function propagate<Model extends ILlmSchema.Model>(
206
+ ctx: AgenticaContext<Model> | MicroAgenticaContext<Model>,
207
+ call: AgenticaCallEvent<Model>,
208
+ retry: number,
209
+ ): Promise<AgenticaExecuteHistory<Model>> {
196
210
  if (call.operation.protocol === "http") {
197
211
  // ----
198
212
  // HTTP PROTOCOL
@@ -210,7 +224,7 @@ async function propagate<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Mo
210
224
  }),
211
225
  );
212
226
  if (retry++ < (ctx.config?.retry ?? AgenticaConstant.RETRY)) {
213
- const trial: AgenticaExecutePrompt<Model> | null = await correct(
227
+ const trial: AgenticaExecuteHistory<Model> | null = await correct(
214
228
  ctx,
215
229
  call,
216
230
  retry,
@@ -236,7 +250,7 @@ async function propagate<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Mo
236
250
  (success === false
237
251
  ? await correct(ctx, call, retry, response.body)
238
252
  : null)
239
- ?? createExecutePrompt({
253
+ ?? createExecuteHistory({
240
254
  operation: call.operation,
241
255
  id: call.id,
242
256
  arguments: call.arguments,
@@ -246,7 +260,7 @@ async function propagate<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Mo
246
260
  }
247
261
  catch (error) {
248
262
  // DISPATCH ERROR
249
- return createExecutePrompt({
263
+ return createExecuteHistory({
250
264
  operation: call.operation,
251
265
  id: call.id,
252
266
  arguments: call.arguments,
@@ -285,7 +299,7 @@ async function propagate<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Mo
285
299
  (retry++ < (ctx.config?.retry ?? AgenticaConstant.RETRY)
286
300
  ? await correct(ctx, call, retry, check.errors)
287
301
  : null)
288
- ?? createExecutePrompt({
302
+ ?? createExecuteHistory({
289
303
  id: call.id,
290
304
  operation: call.operation,
291
305
  arguments: call.arguments,
@@ -300,7 +314,7 @@ async function propagate<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Mo
300
314
  // EXECUTE FUNCTION
301
315
  try {
302
316
  const value = await executeClassOperation(call.operation, call.arguments);
303
- return createExecutePrompt({
317
+ return createExecuteHistory({
304
318
  id: call.id,
305
319
  operation: call.operation,
306
320
  arguments: call.arguments,
@@ -308,7 +322,7 @@ async function propagate<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Mo
308
322
  });
309
323
  }
310
324
  catch (error) {
311
- return createExecutePrompt({
325
+ return createExecuteHistory({
312
326
  id: call.id,
313
327
  operation: call.operation,
314
328
  arguments: call.arguments,
@@ -358,7 +372,12 @@ async function executeClassOperation<Model extends ILlmSchema.Model>(operation:
358
372
  return ((execute as Record<string, unknown>)[operation.function.name] as (...args: unknown[]) => Promise<unknown>)(operationArguments);
359
373
  }
360
374
 
361
- async function correct<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Model>, call: AgenticaCallEvent<Model>, retry: number, error: unknown): Promise<AgenticaExecutePrompt<Model> | null> {
375
+ async function correct<Model extends ILlmSchema.Model>(
376
+ ctx: AgenticaContext<Model> | MicroAgenticaContext<Model>,
377
+ call: AgenticaCallEvent<Model>,
378
+ retry: number,
379
+ error: unknown,
380
+ ): Promise<AgenticaExecuteHistory<Model> | null> {
362
381
  // ----
363
382
  // EXECUTE CHATGPT API
364
383
  // ----
@@ -370,19 +389,22 @@ async function correct<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Mode
370
389
  content: AgenticaDefaultPrompt.write(ctx.config),
371
390
  } satisfies OpenAI.ChatCompletionSystemMessageParam,
372
391
  // PREVIOUS HISTORIES
373
- ...ctx.histories.map(decodePrompt).flat(),
392
+ ...ctx.histories.map(decodeHistory).flat(),
374
393
  // USER INPUT
375
394
  {
376
395
  role: "user",
377
396
  content: ctx.prompt.text,
378
397
  },
379
398
  // TYPE CORRECTION
380
- {
381
- role: "system",
382
- content:
383
- ctx.config?.systemPrompt?.execute?.(ctx.histories)
399
+ ...(ctx.config?.systemPrompt?.execute === null
400
+ ? []
401
+ : [{
402
+ role: "system",
403
+ content:
404
+ ctx.config?.systemPrompt?.execute?.(ctx.histories as MicroAgenticaHistory<Model>[])
384
405
  ?? AgenticaSystemPrompt.EXECUTE,
385
- },
406
+ } satisfies OpenAI.ChatCompletionSystemMessageParam]
407
+ ),
386
408
  {
387
409
  role: "assistant",
388
410
  tool_calls: [
@@ -495,5 +517,7 @@ function isObject($defs: Record<string, IChatGptSchema>, schema: IChatGptSchema)
495
517
  && isObject($defs, $defs[schema.$ref.split("/").at(-1)!]!))
496
518
  || (ChatGptTypeChecker.isAnyOf(schema)
497
519
  && schema.anyOf.every(schema => isObject($defs, schema)))
520
+ || (LlmTypeCheckerV3_1.isOneOf(schema)
521
+ && schema.oneOf.every(schema => isObject($defs, schema)))
498
522
  );
499
523
  }
@@ -16,7 +16,7 @@ import type { AgenticaEvent } from "../events/AgenticaEvent";
16
16
  import { AgenticaConstant } from "../constants/AgenticaConstant";
17
17
  import { AgenticaDefaultPrompt } from "../constants/AgenticaDefaultPrompt";
18
18
  import { AgenticaSystemPrompt } from "../constants/AgenticaSystemPrompt";
19
- import { createCancelPrompt, decodePrompt } from "../factory/prompts";
19
+ import { createCancelHistory, decodeHistory } from "../factory/histories";
20
20
  import { ChatGptCompletionMessageUtil } from "../utils/ChatGptCompletionMessageUtil";
21
21
  import { StreamUtil } from "../utils/StreamUtil";
22
22
 
@@ -78,7 +78,7 @@ export async function cancel<Model extends ILlmSchema.Model>(ctx: AgenticaContex
78
78
  }
79
79
 
80
80
  // RE-COLLECT SELECT FUNCTION EVENTS
81
- const collection: AgenticaCancelPrompt<Model> = createCancelPrompt({
81
+ const collection: AgenticaCancelPrompt<Model> = createCancelHistory({
82
82
  id: v4(),
83
83
  selections: [],
84
84
  });
@@ -137,7 +137,7 @@ async function step<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Model>,
137
137
  ),
138
138
  },
139
139
  // PREVIOUS HISTORIES
140
- ...ctx.histories.map(decodePrompt).flat(),
140
+ ...ctx.histories.map(decodeHistory).flat(),
141
141
  // USER INPUT
142
142
  {
143
143
  role: "user",
@@ -225,7 +225,7 @@ async function step<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Model>,
225
225
  continue;
226
226
  }
227
227
 
228
- const collection: AgenticaCancelPrompt<Model> = createCancelPrompt({
228
+ const collection: AgenticaCancelPrompt<Model> = createCancelHistory({
229
229
  id: tc.id,
230
230
  selections: [],
231
231
  });
@@ -2,18 +2,22 @@ import type { ILlmSchema } from "@samchon/openapi";
2
2
  import type OpenAI from "openai";
3
3
 
4
4
  import type { AgenticaContext } from "../context/AgenticaContext";
5
- import type { AgenticaDescribePrompt } from "../prompts/AgenticaDescribePrompt";
6
- import type { AgenticaExecutePrompt } from "../prompts/AgenticaExecutePrompt";
5
+ import type { MicroAgenticaContext } from "../context/MicroAgenticaContext";
6
+ import type { AgenticaDescribeHistory } from "../histories/AgenticaDescribeHistory";
7
+ import type { AgenticaExecuteHistory } from "../histories/AgenticaExecuteHistory";
7
8
 
8
9
  import { AgenticaDefaultPrompt } from "../constants/AgenticaDefaultPrompt";
9
10
  import { AgenticaSystemPrompt } from "../constants/AgenticaSystemPrompt";
10
11
  import { createDescribeEvent } from "../factory/events";
11
- import { createDescribePrompt, decodePrompt } from "../factory/prompts";
12
+ import { createDescribeHistory, decodeHistory } from "../factory/histories";
12
13
  import { ChatGptCompletionMessageUtil } from "../utils/ChatGptCompletionMessageUtil";
13
14
  import { MPSC } from "../utils/MPSC";
14
15
  import { StreamUtil } from "../utils/StreamUtil";
15
16
 
16
- export async function describe<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Model>, histories: AgenticaExecutePrompt<Model>[]): Promise<AgenticaDescribePrompt<Model>[]> {
17
+ export async function describe<Model extends ILlmSchema.Model>(
18
+ ctx: AgenticaContext<Model> | MicroAgenticaContext<Model>,
19
+ histories: AgenticaExecuteHistory<Model>[],
20
+ ): Promise<AgenticaDescribeHistory<Model>[]> {
17
21
  if (histories.length === 0) {
18
22
  return [];
19
23
  }
@@ -26,7 +30,7 @@ export async function describe<Model extends ILlmSchema.Model>(ctx: AgenticaCont
26
30
  content: AgenticaDefaultPrompt.write(ctx.config),
27
31
  } satisfies OpenAI.ChatCompletionSystemMessageParam,
28
32
  // FUNCTION CALLING HISTORIES
29
- ...histories.map(decodePrompt).flat(),
33
+ ...histories.map(decodeHistory).flat(),
30
34
  // SYSTEM PROMPT
31
35
  {
32
36
  role: "system",
@@ -105,7 +109,7 @@ export async function describe<Model extends ILlmSchema.Model>(ctx: AgenticaCont
105
109
  if (completion == null) {
106
110
  throw new Error("No completion received");
107
111
  }
108
- const descriptions: AgenticaDescribePrompt<Model>[] = completion.choices
112
+ const descriptions: AgenticaDescribeHistory<Model>[] = completion.choices
109
113
  .map(choice =>
110
114
  choice.message.role === "assistant"
111
115
  ? choice.message.content
@@ -114,7 +118,7 @@ export async function describe<Model extends ILlmSchema.Model>(ctx: AgenticaCont
114
118
  .filter(str => str !== null)
115
119
  .map(
116
120
  content =>
117
- createDescribePrompt({
121
+ createDescribeHistory({
118
122
  executes: histories,
119
123
  text: content,
120
124
  }),
@@ -1,8 +1,8 @@
1
1
  import type { ILlmSchema } from "@samchon/openapi";
2
2
 
3
3
  import type { AgenticaContext } from "../context/AgenticaContext";
4
- import type { AgenticaExecutePrompt } from "../prompts/AgenticaExecutePrompt";
5
- import type { AgenticaPrompt } from "../prompts/AgenticaPrompt";
4
+ import type { AgenticaExecuteHistory } from "../histories/AgenticaExecuteHistory";
5
+ import type { AgenticaHistory } from "../histories/AgenticaHistory";
6
6
  import type { IAgenticaExecutor } from "../structures/IAgenticaExecutor";
7
7
 
8
8
  import { call } from "./call";
@@ -13,8 +13,8 @@ import { cancelFunction } from "./internal/cancelFunction";
13
13
  import { select } from "./select";
14
14
 
15
15
  export function execute<Model extends ILlmSchema.Model>(executor: Partial<IAgenticaExecutor<Model>> | null) {
16
- return async (ctx: AgenticaContext<Model>): Promise<AgenticaPrompt<Model>[]> => {
17
- const histories: AgenticaPrompt<Model>[] = [];
16
+ return async (ctx: AgenticaContext<Model>): Promise<AgenticaHistory<Model>[]> => {
17
+ const histories: AgenticaHistory<Model>[] = [];
18
18
 
19
19
  // FUNCTIONS ARE NOT LISTED YET
20
20
  if (ctx.ready() === false) {
@@ -55,13 +55,13 @@ export function execute<Model extends ILlmSchema.Model>(executor: Partial<IAgent
55
55
  // FUNCTION CALLING LOOP
56
56
  while (true) {
57
57
  // EXECUTE FUNCTIONS
58
- const prompts: AgenticaPrompt<Model>[] = await (
58
+ const prompts: AgenticaHistory<Model>[] = await (
59
59
  executor?.call ?? call
60
- )(ctx);
60
+ )(ctx, ctx.stack.map(s => s.operation));
61
61
  histories.push(...prompts);
62
62
 
63
63
  // EXPLAIN RETURN VALUES
64
- const executes: AgenticaExecutePrompt<Model>[] = prompts.filter(
64
+ const executes: AgenticaExecuteHistory<Model>[] = prompts.filter(
65
65
  prompt => prompt.type === "execute",
66
66
  );
67
67
  for (const e of executes) {
@@ -5,12 +5,12 @@ import typia from "typia";
5
5
 
6
6
  import type { AgenticaContext } from "../context/AgenticaContext";
7
7
  import type { __IChatInitialApplication } from "../context/internal/__IChatInitialApplication";
8
- import type { AgenticaPrompt } from "../prompts/AgenticaPrompt";
8
+ import type { AgenticaHistory } from "../histories/AgenticaHistory";
9
9
 
10
10
  import { AgenticaDefaultPrompt } from "../constants/AgenticaDefaultPrompt";
11
11
  import { AgenticaSystemPrompt } from "../constants/AgenticaSystemPrompt";
12
12
  import { createTextEvent } from "../factory/events";
13
- import { createTextPrompt, decodePrompt } from "../factory/prompts";
13
+ import { createTextHistory, decodeHistory } from "../factory/histories";
14
14
  import { ChatGptCompletionMessageUtil } from "../utils/ChatGptCompletionMessageUtil";
15
15
  import { MPSC } from "../utils/MPSC";
16
16
  import { StreamUtil } from "../utils/StreamUtil";
@@ -20,7 +20,7 @@ const FUNCTION: ILlmFunction<"chatgpt"> = typia.llm.application<
20
20
  "chatgpt"
21
21
  >().functions[0]!;
22
22
 
23
- export async function initialize<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Model>): Promise<AgenticaPrompt<Model>[]> {
23
+ export async function initialize<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Model>): Promise<AgenticaHistory<Model>[]> {
24
24
  // ----
25
25
  // EXECUTE CHATGPT API
26
26
  // ----
@@ -32,7 +32,7 @@ export async function initialize<Model extends ILlmSchema.Model>(ctx: AgenticaCo
32
32
  content: AgenticaDefaultPrompt.write(ctx.config),
33
33
  } satisfies OpenAI.ChatCompletionSystemMessageParam,
34
34
  // PREVIOUS HISTORIES
35
- ...ctx.histories.map(decodePrompt).flat(),
35
+ ...ctx.histories.map(decodeHistory).flat(),
36
36
  // USER INPUT
37
37
  {
38
38
  role: "user",
@@ -137,14 +137,14 @@ export async function initialize<Model extends ILlmSchema.Model>(ctx: AgenticaCo
137
137
  // ----
138
138
  // PROCESS COMPLETION
139
139
  // ----
140
- const prompts: AgenticaPrompt<Model>[] = [];
140
+ const prompts: AgenticaHistory<Model>[] = [];
141
141
  for (const choice of completion.choices) {
142
142
  if (
143
143
  choice.message.role === "assistant"
144
144
  && choice.message.content != null
145
145
  ) {
146
146
  prompts.push(
147
- createTextPrompt({
147
+ createTextHistory({
148
148
  role: "assistant",
149
149
  text: choice.message.content,
150
150
  }),
@@ -8,6 +8,9 @@ import type { __IChatFunctionReference } from "../../context/internal/__IChatFun
8
8
  import { createSelectEvent } from "../../factory/events";
9
9
  import { createOperationSelection } from "../../factory/operations";
10
10
 
11
+ /**
12
+ * @internal
13
+ */
11
14
  export async function selectFunction<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Model>, reference: __IChatFunctionReference): Promise<AgenticaOperation<Model> | null> {
12
15
  const operation: AgenticaOperation<Model> | undefined
13
16
  = ctx.operations.flat.get(reference.name);
@@ -11,16 +11,16 @@ import type { AgenticaOperationSelection } from "../context/AgenticaOperationSel
11
11
  import type { __IChatFunctionReference } from "../context/internal/__IChatFunctionReference";
12
12
  import type { __IChatSelectFunctionsApplication } from "../context/internal/__IChatSelectFunctionsApplication";
13
13
  import type { AgenticaEvent } from "../events/AgenticaEvent";
14
- import type { AgenticaPrompt } from "../prompts/AgenticaPrompt";
15
- import type { AgenticaSelectPrompt } from "../prompts/AgenticaSelectPrompt";
16
- import type { AgenticaTextPrompt } from "../prompts/AgenticaTextPrompt";
14
+ import type { AgenticaHistory } from "../histories/AgenticaHistory";
15
+ import type { AgenticaSelectHistory } from "../histories/AgenticaSelectHistory";
16
+ import type { AgenticaTextHistory } from "../histories/AgenticaTextHistory";
17
17
 
18
18
  import { AgenticaConstant } from "../constants/AgenticaConstant";
19
19
  import { AgenticaDefaultPrompt } from "../constants/AgenticaDefaultPrompt";
20
20
  import { AgenticaSystemPrompt } from "../constants/AgenticaSystemPrompt";
21
21
  import { createTextEvent } from "../factory/events";
22
+ import { createSelectHistory, createTextHistory, decodeHistory } from "../factory/histories";
22
23
  import { createOperationSelection } from "../factory/operations";
23
- import { createSelectPrompt, createTextPrompt, decodePrompt } from "../factory/prompts";
24
24
  import { ChatGptCompletionMessageUtil } from "../utils/ChatGptCompletionMessageUtil";
25
25
  import { StreamUtil } from "../utils/StreamUtil";
26
26
 
@@ -37,7 +37,7 @@ interface IFailure {
37
37
  validation: IValidation.IFailure;
38
38
  }
39
39
 
40
- export async function select<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Model>): Promise<AgenticaPrompt<Model>[]> {
40
+ export async function select<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Model>): Promise<AgenticaHistory<Model>[]> {
41
41
  if (ctx.operations.divided === undefined) {
42
42
  return step(ctx, ctx.operations.array, 0);
43
43
  }
@@ -45,7 +45,7 @@ export async function select<Model extends ILlmSchema.Model>(ctx: AgenticaContex
45
45
  const stacks: AgenticaOperationSelection<Model>[][]
46
46
  = ctx.operations.divided.map(() => []);
47
47
  const events: AgenticaEvent<Model>[] = [];
48
- const prompts: AgenticaPrompt<Model>[][] = await Promise.all(
48
+ const prompts: AgenticaHistory<Model>[][] = await Promise.all(
49
49
  ctx.operations.divided.map(async (operations, i) =>
50
50
  step(
51
51
  {
@@ -82,7 +82,7 @@ export async function select<Model extends ILlmSchema.Model>(ctx: AgenticaContex
82
82
  }
83
83
 
84
84
  // RE-COLLECT SELECT FUNCTION EVENTS
85
- const collection: AgenticaSelectPrompt<Model> = createSelectPrompt({
85
+ const collection: AgenticaSelectHistory<Model> = createSelectHistory({
86
86
  id: v4(),
87
87
  selections: [],
88
88
  });
@@ -98,7 +98,7 @@ export async function select<Model extends ILlmSchema.Model>(ctx: AgenticaContex
98
98
  return [collection];
99
99
  }
100
100
 
101
- async function step<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Model>, operations: AgenticaOperation<Model>[], retry: number, failures?: IFailure[]): Promise<AgenticaPrompt<Model>[]> {
101
+ async function step<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Model>, operations: AgenticaOperation<Model>[], retry: number, failures?: IFailure[]): Promise<AgenticaHistory<Model>[]> {
102
102
  // ----
103
103
  // EXECUTE CHATGPT API
104
104
  // ----
@@ -141,7 +141,7 @@ async function step<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Model>,
141
141
  ),
142
142
  },
143
143
  // PREVIOUS HISTORIES
144
- ...ctx.histories.map(decodePrompt).flat(),
144
+ ...ctx.histories.map(decodeHistory).flat(),
145
145
  // USER INPUT
146
146
  {
147
147
  role: "user",
@@ -209,7 +209,7 @@ async function step<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Model>,
209
209
  // ----
210
210
  // PROCESS COMPLETION
211
211
  // ----
212
- const prompts: AgenticaPrompt<Model>[] = [];
212
+ const prompts: AgenticaHistory<Model>[] = [];
213
213
  for (const choice of completion.choices) {
214
214
  // TOOL CALLING HANDLER
215
215
  if (choice.message.tool_calls != null) {
@@ -227,8 +227,8 @@ async function step<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Model>,
227
227
  continue;
228
228
  }
229
229
 
230
- const collection: AgenticaSelectPrompt<Model>
231
- = createSelectPrompt({
230
+ const collection: AgenticaSelectHistory<Model>
231
+ = createSelectHistory({
232
232
  id: tc.id,
233
233
  selections: [],
234
234
  });
@@ -259,7 +259,7 @@ async function step<Model extends ILlmSchema.Model>(ctx: AgenticaContext<Model>,
259
259
  choice.message.role === "assistant"
260
260
  && choice.message.content != null
261
261
  ) {
262
- const text: AgenticaTextPrompt = createTextPrompt({
262
+ const text: AgenticaTextHistory = createTextHistory({
263
263
  role: "assistant",
264
264
  text: choice.message.content,
265
265
  });
@@ -1,21 +1,22 @@
1
1
  import type { ILlmSchema } from "@samchon/openapi";
2
2
 
3
3
  import type { AgenticaContext } from "../context/AgenticaContext";
4
- import type { AgenticaPrompt } from "../prompts/AgenticaPrompt";
4
+ import type { AgenticaHistory } from "../histories/AgenticaHistory";
5
5
 
6
6
  import type { IAgenticaExecutor } from "./IAgenticaExecutor";
7
7
  import type { IAgenticaSystemPrompt } from "./IAgenticaSystemPrompt";
8
8
 
9
9
  /**
10
- * Configuration for Nestia Agent.
10
+ * Configuration for Agentic Agent.
11
11
  *
12
12
  * `IAgenticaConfig` is an interface that defines the configuration
13
13
  * properties of the {@link Agentica}. With this configuration, you
14
- * can set the user's locale, timezone, and some of system prompts.
14
+ * can set the user's {@link locale}, {@link timezone}, and some of
15
+ * {@link systemPrompt system prompts}.
15
16
  *
16
17
  * Also, you can affect to the LLM function selecing/calling logic by
17
18
  * configuring additional properties. For an example, if you configure the
18
- * {@link capacity} property, the A.I. chatbot will divide the functions
19
+ * {@link capacity} property, the AI chatbot will divide the functions
19
20
  * into the several groups with the configured capacity and select proper
20
21
  * functions to call by operating the multiple LLM function selecting
21
22
  * agents parallelly.
@@ -40,7 +41,7 @@ export interface IAgenticaConfig<Model extends ILlmSchema.Model> {
40
41
  */
41
42
  executor?:
42
43
  | Partial<IAgenticaExecutor<Model>>
43
- | ((ctx: AgenticaContext<Model>) => Promise<AgenticaPrompt<Model>[]>);
44
+ | ((ctx: AgenticaContext<Model>) => Promise<AgenticaHistory<Model>[]>);
44
45
 
45
46
  /**
46
47
  * System prompt messages.
@@ -9,7 +9,7 @@ import type {
9
9
  } from "@samchon/openapi";
10
10
 
11
11
  /**
12
- * Controller of the Nestia Agent.
12
+ * Controller of the Agentica Agent.
13
13
  *
14
14
  * `IAgenticaController` is a type represents a controller of the
15
15
  * {@link Agentica}, which serves a set of functions to be called