@agentica/core 0.39.0 → 0.41.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/lib/Agentica.js +1 -1
  2. package/lib/Agentica.js.map +1 -1
  3. package/lib/MicroAgentica.js +1 -1
  4. package/lib/MicroAgentica.js.map +1 -1
  5. package/lib/context/AgenticaContext.d.ts +2 -1
  6. package/lib/context/AgenticaContextRequestResult.d.ts +12 -0
  7. package/lib/context/AgenticaContextRequestResult.js +3 -0
  8. package/lib/context/AgenticaContextRequestResult.js.map +1 -0
  9. package/lib/context/MicroAgenticaContext.d.ts +2 -1
  10. package/lib/events/AgenticaRequestEvent.d.ts +11 -4
  11. package/lib/events/AgenticaResponseEvent.d.ts +14 -22
  12. package/lib/factory/events.d.ts +7 -5
  13. package/lib/factory/events.js +3 -1
  14. package/lib/factory/events.js.map +1 -1
  15. package/lib/index.mjs +107 -24
  16. package/lib/index.mjs.map +1 -1
  17. package/lib/orchestrate/call.js +12 -5
  18. package/lib/orchestrate/call.js.map +1 -1
  19. package/lib/orchestrate/cancel.js +7 -3
  20. package/lib/orchestrate/cancel.js.map +1 -1
  21. package/lib/orchestrate/describe.js +16 -3
  22. package/lib/orchestrate/describe.js.map +1 -1
  23. package/lib/orchestrate/initialize.js +15 -3
  24. package/lib/orchestrate/initialize.js.map +1 -1
  25. package/lib/orchestrate/select.js +17 -5
  26. package/lib/orchestrate/select.js.map +1 -1
  27. package/lib/structures/IAgenticaConfig.d.ts +2 -54
  28. package/lib/structures/IAgenticaConfigBase.d.ts +82 -0
  29. package/lib/structures/IAgenticaConfigBase.js +3 -0
  30. package/lib/structures/IAgenticaConfigBase.js.map +1 -0
  31. package/lib/structures/IMicroAgenticaConfig.d.ts +2 -54
  32. package/lib/utils/ChatGptCompletionMessageUtil.spec.js +3 -3
  33. package/lib/utils/ChatGptCompletionMessageUtil.spec.js.map +1 -1
  34. package/lib/utils/request.d.ts +3 -2
  35. package/lib/utils/request.js +40 -9
  36. package/lib/utils/request.js.map +1 -1
  37. package/package.json +5 -5
  38. package/src/Agentica.ts +2 -2
  39. package/src/MicroAgentica.ts +2 -2
  40. package/src/context/AgenticaContext.ts +2 -1
  41. package/src/context/AgenticaContextRequestResult.ts +14 -0
  42. package/src/context/MicroAgenticaContext.ts +2 -1
  43. package/src/events/AgenticaRequestEvent.ts +21 -4
  44. package/src/events/AgenticaResponseEvent.ts +24 -25
  45. package/src/factory/events.ts +18 -8
  46. package/src/orchestrate/call.ts +15 -5
  47. package/src/orchestrate/cancel.ts +7 -3
  48. package/src/orchestrate/describe.ts +16 -2
  49. package/src/orchestrate/initialize.ts +15 -2
  50. package/src/orchestrate/select.ts +15 -2
  51. package/src/structures/IAgenticaConfig.ts +2 -58
  52. package/src/structures/IAgenticaConfigBase.ts +87 -0
  53. package/src/structures/IMicroAgenticaConfig.ts +2 -58
  54. package/src/utils/ChatGptCompletionMessageUtil.spec.ts +3 -3
  55. package/src/utils/request.ts +43 -10
@@ -1,5 +1,6 @@
1
1
  import type { AgenticaContext } from "../context/AgenticaContext";
2
2
 
3
+ import type { IAgenticaConfigBase } from "./IAgenticaConfigBase";
3
4
  import type { IAgenticaExecutor } from "./IAgenticaExecutor";
4
5
  import type { IAgenticaSystemPrompt } from "./IAgenticaSystemPrompt";
5
6
 
@@ -20,7 +21,7 @@ import type { IAgenticaSystemPrompt } from "./IAgenticaSystemPrompt";
20
21
  *
21
22
  * @author Samchon
22
23
  */
23
- export interface IAgenticaConfig {
24
+ export interface IAgenticaConfig extends IAgenticaConfigBase {
24
25
  /**
25
26
  * Agent executor.
26
27
  *
@@ -47,45 +48,6 @@ export interface IAgenticaConfig {
47
48
  */
48
49
  systemPrompt?: IAgenticaSystemPrompt;
49
50
 
50
- /**
51
- * Locale of the A.I. chatbot.
52
- *
53
- * If you configure this property, the A.I. chatbot will conversate with
54
- * the given locale. You can get the locale value by
55
- *
56
- * - Browser: `navigator.language`
57
- * - NodeJS: `process.env.LANG.split(".")[0]`
58
- *
59
- * @default your_locale
60
- */
61
- locale?: string;
62
-
63
- /**
64
- * Timezone of the A.I. chatbot.
65
- *
66
- * If you configure this property, the A.I. chatbot will consider the
67
- * given timezone. You can get the timezone value by
68
- * `Intl.DateTimeFormat().resolvedOptions().timeZone`.
69
- *
70
- * @default your_timezone
71
- */
72
- timezone?: string;
73
-
74
- /**
75
- * Retry count.
76
- *
77
- * If LLM function calling composed arguments are invalid,
78
- * the A.I. chatbot will retry to call the function with
79
- * the modified arguments.
80
- *
81
- * By the way, if you configure it to 0 or 1, the A.I. chatbot
82
- * will not retry the LLM function calling for correcting the
83
- * arguments.
84
- *
85
- * @default 3
86
- */
87
- retry?: number;
88
-
89
51
  /**
90
52
  * Capacity of the LLM function selecting.
91
53
  *
@@ -118,22 +80,4 @@ export interface IAgenticaConfig {
118
80
  * @default true
119
81
  */
120
82
  eliticism?: boolean;
121
-
122
- /**
123
- * Backoff strategy.
124
- *
125
- * If OpenAI SDK fails to connect LLM API Server, this Backoff factor
126
- * would be used to retry for the next connection.
127
- *
128
- * If the function returns `true`, the retry would be stopped.
129
- * Otherwise, the retry would be continued.
130
- *
131
- * @default (props) => throw props.error
132
- * @returns {number} The number of milliseconds to wait before the next retry
133
- * @throws {Error} If the function want to stop the retry, you can throw an error
134
- */
135
- backoffStrategy?: (props: {
136
- count: number;
137
- error: unknown;
138
- }) => number;
139
83
  }
@@ -0,0 +1,87 @@
1
+ /**
2
+ * Configuration for Agentic Agent.
3
+ *
4
+ * `IAgenticaConfig` is an interface that defines the configuration
5
+ * properties of the {@link Agentica}. With this configuration, you
6
+ * can set the user's {@link locale}, {@link timezone}, and some of
7
+ * {@link systemPrompt system prompts}.
8
+ *
9
+ * Also, you can affect to the LLM function selecing/calling logic by
10
+ * configuring additional properties. For an example, if you configure the
11
+ * {@link capacity} property, the AI chatbot will divide the functions
12
+ * into the several groups with the configured capacity and select proper
13
+ * functions to call by operating the multiple LLM function selecting
14
+ * agents parallelly.
15
+ *
16
+ * @author sunrabbit123
17
+ */
18
+ export interface IAgenticaConfigBase {
19
+
20
+ /**
21
+ * Locale of the A.I. chatbot.
22
+ *
23
+ * If you configure this property, the A.I. chatbot will conversate with
24
+ * the given locale. You can get the locale value by
25
+ *
26
+ * - Browser: `navigator.language`
27
+ * - NodeJS: `process.env.LANG.split(".")[0]`
28
+ *
29
+ * @default your_locale
30
+ */
31
+ locale?: string;
32
+
33
+ /**
34
+ * Timezone of the A.I. chatbot.
35
+ *
36
+ * If you configure this property, the A.I. chatbot will consider the
37
+ * given timezone. You can get the timezone value by
38
+ * `Intl.DateTimeFormat().resolvedOptions().timeZone`.
39
+ *
40
+ * @default your_timezone
41
+ */
42
+ timezone?: string;
43
+
44
+ /**
45
+ * Retry count.
46
+ *
47
+ * If LLM function calling composed arguments are invalid,
48
+ * the A.I. chatbot will retry to call the function with
49
+ * the modified arguments.
50
+ *
51
+ * By the way, if you configure it to 0 or 1, the A.I. chatbot
52
+ * will not retry the LLM function calling for correcting the
53
+ * arguments.
54
+ *
55
+ * @default 3
56
+ */
57
+ retry?: number;
58
+
59
+ /**
60
+ * Backoff strategy.
61
+ *
62
+ * If OpenAI SDK fails to connect LLM API Server, this Backoff factor
63
+ * would be used to retry for the next connection.
64
+ *
65
+ * If the function returns `true`, the retry would be stopped.
66
+ * Otherwise, the retry would be continued.
67
+ *
68
+ * @default (props) => throw props.error
69
+ * @returns {number} The number of milliseconds to wait before the next retry
70
+ * @throws {Error} If the function want to stop the retry, you can throw an error
71
+ */
72
+ backoffStrategy?: (props: {
73
+ count: number;
74
+ error: unknown;
75
+ }) => number;
76
+
77
+ /**
78
+ * Whether to enable streaming.
79
+ *
80
+ * If you set this property to `true`, the A.I. chatbot will enable streaming.
81
+ *
82
+ * If you set this property to `false`, the A.I. chatbot will not enable streaming.
83
+ *
84
+ * @default true
85
+ */
86
+ stream?: boolean;
87
+ }
@@ -1,3 +1,4 @@
1
+ import type { IAgenticaConfigBase } from "./IAgenticaConfigBase";
1
2
  import type { IMicroAgenticaExecutor } from "./IMicroAgenticaExecutor";
2
3
  import type { IMicroAgenticaSystemPrompt } from "./IMicroAgenticaSystemPrompt";
3
4
 
@@ -11,7 +12,7 @@ import type { IMicroAgenticaSystemPrompt } from "./IMicroAgenticaSystemPrompt";
11
12
  *
12
13
  * @author Samchon
13
14
  */
14
- export interface IMicroAgenticaConfig {
15
+ export interface IMicroAgenticaConfig extends IAgenticaConfigBase {
15
16
  /**
16
17
  * Agent executor.
17
18
  *
@@ -39,45 +40,6 @@ export interface IMicroAgenticaConfig {
39
40
  */
40
41
  systemPrompt?: IMicroAgenticaSystemPrompt;
41
42
 
42
- /**
43
- * Locale of the A.I. chatbot.
44
- *
45
- * If you configure this property, the A.I. chatbot will conversate with
46
- * the given locale. You can get the locale value by
47
- *
48
- * - Browser: `navigator.language`
49
- * - NodeJS: `process.env.LANG.split(".")[0]`
50
- *
51
- * @default your_locale
52
- */
53
- locale?: string;
54
-
55
- /**
56
- * Timezone of the A.I. chatbot.
57
- *
58
- * If you configure this property, the A.I. chatbot will consider the
59
- * given timezone. You can get the timezone value by
60
- * `Intl.DateTimeFormat().resolvedOptions().timeZone`.
61
- *
62
- * @default your_timezone
63
- */
64
- timezone?: string;
65
-
66
- /**
67
- * Retry count.
68
- *
69
- * If LLM function calling composed arguments are invalid,
70
- * the A.I. chatbot will retry to call the function with
71
- * the modified arguments.
72
- *
73
- * By the way, if you configure it to 0 or 1, the A.I. chatbot
74
- * will not retry the LLM function calling for correcting the
75
- * arguments.
76
- *
77
- * @default 3
78
- */
79
- retry?: number;
80
-
81
43
  /**
82
44
  * Whether to throw an exception when execution fails.
83
45
  *
@@ -91,22 +53,4 @@ export interface IMicroAgenticaConfig {
91
53
  * @default true
92
54
  */
93
55
  throw?: boolean;
94
-
95
- /**
96
- * Backoff strategy.
97
- *
98
- * If OpenAI SDK fails to connect LLM API Server, this Backoff factor
99
- * would be used to retry for the next connection.
100
- *
101
- * If the function returns `true`, the retry would be stopped.
102
- * Otherwise, the retry would be continued.
103
- *
104
- * @default (props) => throw props.error
105
- * @returns {number} The number of milliseconds to wait before the next retry
106
- * @throws {Error} If the function want to stop the retry, you can throw an error
107
- */
108
- backoffStrategy?: (props: {
109
- count: number;
110
- error: unknown;
111
- }) => number;
112
56
  }
@@ -42,9 +42,9 @@ describe("chatGptCompletionMessageUtil", () => {
42
42
  });
43
43
 
44
44
  it("should handle invalid JSON", () => {
45
- expect(() => {
46
- ChatGptCompletionMessageUtil.transformCompletionChunk("invalid json");
47
- }).toThrow();
45
+ const result = ChatGptCompletionMessageUtil.transformCompletionChunk("invalid json");
46
+ // https://github.com/wrtnlabs/agentica/pull/464/files
47
+ expect(result).toEqual("invalid json");
48
48
  });
49
49
  });
50
50
 
@@ -2,6 +2,7 @@ import type OpenAI from "openai";
2
2
 
3
3
  import { v4 } from "uuid";
4
4
 
5
+ import type { AgenticaContextRequestResult } from "../context/AgenticaContextRequestResult";
5
6
  import type { AgenticaTokenUsage } from "../context/AgenticaTokenUsage";
6
7
  import type { AgenticaEventSource, AgenticaRequestEvent, AgenticaResponseEvent } from "../events";
7
8
  import type { IAgenticaConfig, IAgenticaVendor, IMicroAgenticaConfig } from "../structures";
@@ -12,7 +13,7 @@ import { createRequestEvent } from "../factory";
12
13
  import { ChatGptCompletionMessageUtil } from "./ChatGptCompletionMessageUtil";
13
14
  import { streamDefaultReaderToAsyncGenerator, StreamUtil } from "./StreamUtil";
14
15
 
15
- export function getChatCompletionWithStreamingFunction(props: {
16
+ export function getChatCompletionFunction(props: {
16
17
  vendor: IAgenticaVendor;
17
18
  config?: IAgenticaConfig | IMicroAgenticaConfig;
18
19
  dispatch: (event: AgenticaRequestEvent | AgenticaResponseEvent) => Promise<void>;
@@ -21,17 +22,18 @@ export function getChatCompletionWithStreamingFunction(props: {
21
22
  }) {
22
23
  return async (
23
24
  source: AgenticaEventSource,
24
- body: Omit<OpenAI.ChatCompletionCreateParamsStreaming, "model" | "stream">,
25
- ) => {
25
+ body: Omit<OpenAI.ChatCompletionCreateParamsStreaming | OpenAI.ChatCompletionCreateParamsNonStreaming, "model">,
26
+ ): Promise<AgenticaContextRequestResult> => {
27
+ const streamOptions = props.config?.stream === true || props.config?.stream === undefined
28
+ ? { stream: true, stream_options: { include_usage: true } }
29
+ : { stream: false };
26
30
  const event: AgenticaRequestEvent = createRequestEvent({
27
31
  source,
32
+ stream: streamOptions.stream,
28
33
  body: {
29
34
  ...body,
30
35
  model: props.vendor.model,
31
- stream: true,
32
- stream_options: {
33
- include_usage: true,
34
- },
36
+ ...streamOptions,
35
37
  },
36
38
  options: {
37
39
  ...props.vendor.options,
@@ -61,6 +63,32 @@ export function getChatCompletionWithStreamingFunction(props: {
61
63
  }
62
64
  })();
63
65
 
66
+ if ("toReadableStream" in completion === false) {
67
+ if (completion.usage != null) {
68
+ AgenticaTokenUsageAggregator.aggregate({
69
+ kind: source,
70
+ completionUsage: completion.usage,
71
+ usage: props.usage,
72
+ });
73
+ }
74
+ void props.dispatch({
75
+ id: v4(),
76
+ type: "response",
77
+ request_id: event.id,
78
+ source,
79
+ stream: false,
80
+ body: event.body as OpenAI.ChatCompletionCreateParamsNonStreaming,
81
+ completion,
82
+ options: event.options,
83
+ join: async () => completion,
84
+ created_at: new Date().toISOString(),
85
+ }).catch(() => {});
86
+ return {
87
+ type: "none-stream",
88
+ value: completion,
89
+ };
90
+ }
91
+
64
92
  const [streamForEvent, temporaryStream] = StreamUtil.transform(
65
93
  completion.toReadableStream() as ReadableStream<Uint8Array>,
66
94
  value =>
@@ -93,8 +121,9 @@ export function getChatCompletionWithStreamingFunction(props: {
93
121
  type: "response",
94
122
  request_id: event.id,
95
123
  source,
96
- stream: streamDefaultReaderToAsyncGenerator(streamForStream.getReader(), props.abortSignal),
97
- body: event.body,
124
+ stream: true,
125
+ body: event.body as OpenAI.ChatCompletionCreateParamsStreaming,
126
+ completion: streamDefaultReaderToAsyncGenerator(streamForStream.getReader(), props.abortSignal),
98
127
  options: event.options,
99
128
  join: async () => {
100
129
  const chunks = await StreamUtil.readAll(streamForJoin, props.abortSignal);
@@ -102,6 +131,10 @@ export function getChatCompletionWithStreamingFunction(props: {
102
131
  },
103
132
  created_at: new Date().toISOString(),
104
133
  }).catch(() => {});
105
- return streamForReturn;
134
+
135
+ return {
136
+ type: "stream",
137
+ value: streamForReturn,
138
+ };
106
139
  };
107
140
  }