@agentica/core 0.8.3-dev.20250227 → 0.8.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/LICENSE +21 -21
  2. package/README.md +404 -404
  3. package/package.json +1 -1
  4. package/prompts/cancel.md +4 -4
  5. package/prompts/common.md +2 -2
  6. package/prompts/describe.md +6 -6
  7. package/prompts/execute.md +6 -6
  8. package/prompts/initialize.md +2 -2
  9. package/prompts/select.md +6 -6
  10. package/src/Agentica.ts +318 -318
  11. package/src/chatgpt/ChatGptAgent.ts +71 -71
  12. package/src/chatgpt/ChatGptCallFunctionAgent.ts +445 -445
  13. package/src/chatgpt/ChatGptCancelFunctionAgent.ts +283 -283
  14. package/src/chatgpt/ChatGptDescribeFunctionAgent.ts +51 -51
  15. package/src/chatgpt/ChatGptHistoryDecoder.ts +87 -87
  16. package/src/chatgpt/ChatGptInitializeFunctionAgent.ts +88 -88
  17. package/src/chatgpt/ChatGptSelectFunctionAgent.ts +318 -318
  18. package/src/functional/createHttpLlmApplication.ts +63 -63
  19. package/src/index.ts +19 -19
  20. package/src/internal/AgenticaConstant.ts +4 -4
  21. package/src/internal/AgenticaDefaultPrompt.ts +39 -39
  22. package/src/internal/AgenticaOperationComposer.ts +82 -82
  23. package/src/internal/AgenticaPromptFactory.ts +30 -30
  24. package/src/internal/AgenticaPromptTransformer.ts +83 -83
  25. package/src/internal/AgenticaTokenUsageAggregator.ts +115 -115
  26. package/src/internal/MathUtil.ts +3 -3
  27. package/src/internal/Singleton.ts +22 -22
  28. package/src/internal/__map_take.ts +15 -15
  29. package/src/structures/IAgenticaConfig.ts +121 -121
  30. package/src/structures/IAgenticaContext.ts +128 -128
  31. package/src/structures/IAgenticaController.ts +130 -130
  32. package/src/structures/IAgenticaEvent.ts +224 -224
  33. package/src/structures/IAgenticaExecutor.ts +152 -152
  34. package/src/structures/IAgenticaOperation.ts +64 -64
  35. package/src/structures/IAgenticaOperationCollection.ts +50 -50
  36. package/src/structures/IAgenticaOperationSelection.ts +69 -69
  37. package/src/structures/IAgenticaPrompt.ts +173 -173
  38. package/src/structures/IAgenticaProps.ts +64 -64
  39. package/src/structures/IAgenticaProvider.ts +45 -45
  40. package/src/structures/IAgenticaSystemPrompt.ts +122 -122
  41. package/src/structures/IAgenticaTokenUsage.ts +107 -107
  42. package/src/structures/internal/__IChatCancelFunctionsApplication.ts +23 -23
  43. package/src/structures/internal/__IChatFunctionReference.ts +21 -21
  44. package/src/structures/internal/__IChatInitialApplication.ts +15 -15
  45. package/src/structures/internal/__IChatSelectFunctionsApplication.ts +24 -24
  46. package/src/typings/AgenticaSource.ts +6 -6
@@ -1,115 +1,115 @@
1
- import OpenAI from "openai";
2
-
3
- import { IAgenticaTokenUsage } from "../structures/IAgenticaTokenUsage";
4
-
5
- export namespace AgenticaTokenUsageAggregator {
6
- export const aggregate = (props: {
7
- kind: Exclude<keyof IAgenticaTokenUsage, "aggregate">;
8
- completion: OpenAI.ChatCompletion;
9
- usage: IAgenticaTokenUsage;
10
- }): void => {
11
- if (!props.completion.usage) return;
12
-
13
- //----
14
- // COMPONENT
15
- //----
16
- const component: IAgenticaTokenUsage.IComponent = props.usage[props.kind];
17
-
18
- // TOTAL
19
- component.total += props.completion.usage.total_tokens;
20
-
21
- // PROMPT
22
- component.input.total += props.completion.usage.prompt_tokens;
23
- props.completion.usage.prompt_tokens_details?.audio_tokens ?? 0;
24
- component.input.cached +=
25
- props.completion.usage.prompt_tokens_details?.cached_tokens ?? 0;
26
-
27
- // COMPLETION
28
- component.output.total += props.completion.usage.total_tokens;
29
- component.output.accepted_prediction +=
30
- props.completion.usage.completion_tokens_details
31
- ?.accepted_prediction_tokens ?? 0;
32
- component.output.reasoning +=
33
- props.completion.usage.completion_tokens_details?.reasoning_tokens ?? 0;
34
- component.output.rejected_prediction +=
35
- props.completion.usage.completion_tokens_details
36
- ?.rejected_prediction_tokens ?? 0;
37
-
38
- //----
39
- // RE-AGGREGATE
40
- //----
41
- const sum = (getter: (comp: IAgenticaTokenUsage.IComponent) => number) =>
42
- Object.entries(props.usage)
43
- .filter(([key]) => key !== "aggregate")
44
- .map(([_, comp]) => getter(comp))
45
- .reduce((a, b) => a + b, 0);
46
- const aggregate: IAgenticaTokenUsage.IComponent = props.usage.aggregate;
47
- aggregate.total = sum((comp) => comp.total);
48
- aggregate.input.total = sum((comp) => comp.input.total);
49
- aggregate.input.cached = sum((comp) => comp.input.cached);
50
- aggregate.output.total = sum((comp) => comp.output.total);
51
- aggregate.output.reasoning = sum((comp) => comp.output.reasoning);
52
- aggregate.output.accepted_prediction = sum(
53
- (comp) => comp.output.accepted_prediction,
54
- );
55
- aggregate.output.rejected_prediction = sum(
56
- (comp) => comp.output.rejected_prediction,
57
- );
58
- };
59
-
60
- export const plus = (
61
- x: IAgenticaTokenUsage,
62
- y: IAgenticaTokenUsage,
63
- ): IAgenticaTokenUsage => {
64
- const component = (
65
- a: IAgenticaTokenUsage.IComponent,
66
- b: IAgenticaTokenUsage.IComponent,
67
- ): IAgenticaTokenUsage.IComponent => ({
68
- total: a.total + b.total,
69
- input: {
70
- total: a.input.total + b.input.total,
71
- cached: a.input.cached + b.input.cached,
72
- },
73
- output: {
74
- total: a.output.total + b.output.total,
75
- reasoning: a.output.reasoning + b.output.reasoning,
76
- accepted_prediction:
77
- a.output.accepted_prediction + b.output.accepted_prediction,
78
- rejected_prediction:
79
- a.output.rejected_prediction + b.output.rejected_prediction,
80
- },
81
- });
82
- return {
83
- aggregate: component(x.aggregate, y.aggregate),
84
- initialize: component(x.initialize, y.initialize),
85
- select: component(x.select, y.select),
86
- cancel: component(x.cancel, y.cancel),
87
- call: component(x.call, y.call),
88
- describe: component(x.describe, y.describe),
89
- };
90
- };
91
-
92
- export const zero = (): IAgenticaTokenUsage => {
93
- const component = (): IAgenticaTokenUsage.IComponent => ({
94
- total: 0,
95
- input: {
96
- total: 0,
97
- cached: 0,
98
- },
99
- output: {
100
- total: 0,
101
- reasoning: 0,
102
- accepted_prediction: 0,
103
- rejected_prediction: 0,
104
- },
105
- });
106
- return {
107
- aggregate: component(),
108
- initialize: component(),
109
- select: component(),
110
- cancel: component(),
111
- call: component(),
112
- describe: component(),
113
- };
114
- };
115
- }
1
+ import OpenAI from "openai";
2
+
3
+ import { IAgenticaTokenUsage } from "../structures/IAgenticaTokenUsage";
4
+
5
+ export namespace AgenticaTokenUsageAggregator {
6
+ export const aggregate = (props: {
7
+ kind: Exclude<keyof IAgenticaTokenUsage, "aggregate">;
8
+ completion: OpenAI.ChatCompletion;
9
+ usage: IAgenticaTokenUsage;
10
+ }): void => {
11
+ if (!props.completion.usage) return;
12
+
13
+ //----
14
+ // COMPONENT
15
+ //----
16
+ const component: IAgenticaTokenUsage.IComponent = props.usage[props.kind];
17
+
18
+ // TOTAL
19
+ component.total += props.completion.usage.total_tokens;
20
+
21
+ // PROMPT
22
+ component.input.total += props.completion.usage.prompt_tokens;
23
+ props.completion.usage.prompt_tokens_details?.audio_tokens ?? 0;
24
+ component.input.cached +=
25
+ props.completion.usage.prompt_tokens_details?.cached_tokens ?? 0;
26
+
27
+ // COMPLETION
28
+ component.output.total += props.completion.usage.total_tokens;
29
+ component.output.accepted_prediction +=
30
+ props.completion.usage.completion_tokens_details
31
+ ?.accepted_prediction_tokens ?? 0;
32
+ component.output.reasoning +=
33
+ props.completion.usage.completion_tokens_details?.reasoning_tokens ?? 0;
34
+ component.output.rejected_prediction +=
35
+ props.completion.usage.completion_tokens_details
36
+ ?.rejected_prediction_tokens ?? 0;
37
+
38
+ //----
39
+ // RE-AGGREGATE
40
+ //----
41
+ const sum = (getter: (comp: IAgenticaTokenUsage.IComponent) => number) =>
42
+ Object.entries(props.usage)
43
+ .filter(([key]) => key !== "aggregate")
44
+ .map(([_, comp]) => getter(comp))
45
+ .reduce((a, b) => a + b, 0);
46
+ const aggregate: IAgenticaTokenUsage.IComponent = props.usage.aggregate;
47
+ aggregate.total = sum((comp) => comp.total);
48
+ aggregate.input.total = sum((comp) => comp.input.total);
49
+ aggregate.input.cached = sum((comp) => comp.input.cached);
50
+ aggregate.output.total = sum((comp) => comp.output.total);
51
+ aggregate.output.reasoning = sum((comp) => comp.output.reasoning);
52
+ aggregate.output.accepted_prediction = sum(
53
+ (comp) => comp.output.accepted_prediction,
54
+ );
55
+ aggregate.output.rejected_prediction = sum(
56
+ (comp) => comp.output.rejected_prediction,
57
+ );
58
+ };
59
+
60
+ export const plus = (
61
+ x: IAgenticaTokenUsage,
62
+ y: IAgenticaTokenUsage,
63
+ ): IAgenticaTokenUsage => {
64
+ const component = (
65
+ a: IAgenticaTokenUsage.IComponent,
66
+ b: IAgenticaTokenUsage.IComponent,
67
+ ): IAgenticaTokenUsage.IComponent => ({
68
+ total: a.total + b.total,
69
+ input: {
70
+ total: a.input.total + b.input.total,
71
+ cached: a.input.cached + b.input.cached,
72
+ },
73
+ output: {
74
+ total: a.output.total + b.output.total,
75
+ reasoning: a.output.reasoning + b.output.reasoning,
76
+ accepted_prediction:
77
+ a.output.accepted_prediction + b.output.accepted_prediction,
78
+ rejected_prediction:
79
+ a.output.rejected_prediction + b.output.rejected_prediction,
80
+ },
81
+ });
82
+ return {
83
+ aggregate: component(x.aggregate, y.aggregate),
84
+ initialize: component(x.initialize, y.initialize),
85
+ select: component(x.select, y.select),
86
+ cancel: component(x.cancel, y.cancel),
87
+ call: component(x.call, y.call),
88
+ describe: component(x.describe, y.describe),
89
+ };
90
+ };
91
+
92
+ export const zero = (): IAgenticaTokenUsage => {
93
+ const component = (): IAgenticaTokenUsage.IComponent => ({
94
+ total: 0,
95
+ input: {
96
+ total: 0,
97
+ cached: 0,
98
+ },
99
+ output: {
100
+ total: 0,
101
+ reasoning: 0,
102
+ accepted_prediction: 0,
103
+ rejected_prediction: 0,
104
+ },
105
+ });
106
+ return {
107
+ aggregate: component(),
108
+ initialize: component(),
109
+ select: component(),
110
+ cancel: component(),
111
+ call: component(),
112
+ describe: component(),
113
+ };
114
+ };
115
+ }
@@ -1,3 +1,3 @@
1
- export namespace MathUtil {
2
- export const round = (value: number): number => Math.floor(value * 100) / 100;
3
- }
1
+ export namespace MathUtil {
2
+ export const round = (value: number): number => Math.floor(value * 100) / 100;
3
+ }
@@ -1,22 +1,22 @@
1
- /**
2
- * @internal
3
- */
4
- export class Singleton<T, Args extends any[] = []> {
5
- private readonly closure_: (...args: Args) => T;
6
- private value_: T | object;
7
-
8
- public constructor(closure: (...args: Args) => T) {
9
- this.closure_ = closure;
10
- this.value_ = NOT_MOUNTED_YET;
11
- }
12
-
13
- public get(...args: Args): T {
14
- if (this.value_ === NOT_MOUNTED_YET) this.value_ = this.closure_(...args);
15
- return this.value_ as T;
16
- }
17
- }
18
-
19
- /**
20
- * @internal
21
- */
22
- const NOT_MOUNTED_YET = {};
1
+ /**
2
+ * @internal
3
+ */
4
+ export class Singleton<T, Args extends any[] = []> {
5
+ private readonly closure_: (...args: Args) => T;
6
+ private value_: T | object;
7
+
8
+ public constructor(closure: (...args: Args) => T) {
9
+ this.closure_ = closure;
10
+ this.value_ = NOT_MOUNTED_YET;
11
+ }
12
+
13
+ public get(...args: Args): T {
14
+ if (this.value_ === NOT_MOUNTED_YET) this.value_ = this.closure_(...args);
15
+ return this.value_ as T;
16
+ }
17
+ }
18
+
19
+ /**
20
+ * @internal
21
+ */
22
+ const NOT_MOUNTED_YET = {};
@@ -1,15 +1,15 @@
1
- /**
2
- * @internal
3
- */
4
- export const __map_take = <Key, T>(
5
- dict: Map<Key, T>,
6
- key: Key,
7
- generator: () => T,
8
- ): T => {
9
- const oldbie: T | undefined = dict.get(key);
10
- if (oldbie) return oldbie;
11
-
12
- const value: T = generator();
13
- dict.set(key, value);
14
- return value;
15
- };
1
+ /**
2
+ * @internal
3
+ */
4
+ export const __map_take = <Key, T>(
5
+ dict: Map<Key, T>,
6
+ key: Key,
7
+ generator: () => T,
8
+ ): T => {
9
+ const oldbie: T | undefined = dict.get(key);
10
+ if (oldbie) return oldbie;
11
+
12
+ const value: T = generator();
13
+ dict.set(key, value);
14
+ return value;
15
+ };
@@ -1,121 +1,121 @@
1
- import { IAgenticaContext } from "./IAgenticaContext";
2
- import { IAgenticaExecutor } from "./IAgenticaExecutor";
3
- import { IAgenticaPrompt } from "./IAgenticaPrompt";
4
- import { IAgenticaSystemPrompt } from "./IAgenticaSystemPrompt";
5
-
6
- /**
7
- * Configuration for Nestia Agent.
8
- *
9
- * `IAgenticaConfig` is an interface that defines the configuration
10
- * properties of the {@link Agentica}. With this configuration, you
11
- * can set the user's locale, timezone, and some of system prompts.
12
- *
13
- * Also, you can affect to the LLM function selecing/calling logic by
14
- * configuring additional properties. For an example, if you configure the
15
- * {@link capacity} property, the A.I. chatbot will divide the functions
16
- * into the several groups with the configured capacity and select proper
17
- * functions to call by operating the multiple LLM function selecting
18
- * agents parallelly.
19
- *
20
- * @author Samchon
21
- */
22
- export interface IAgenticaConfig {
23
- /**
24
- * Locale of the A.I. chatbot.
25
- *
26
- * If you configure this property, the A.I. chatbot will conversate with
27
- * the given locale. You can get the locale value by
28
- *
29
- * - Browser: `navigator.language`
30
- * - NodeJS: `process.env.LANG.split(".")[0]`
31
- *
32
- * @default your_locale
33
- */
34
- locale?: string;
35
-
36
- /**
37
- * Timezone of the A.I. chatbot.
38
- *
39
- * If you configure this property, the A.I. chatbot will consider the
40
- * given timezone. You can get the timezone value by
41
- * `Intl.DateTimeFormat().resolvedOptions().timeZone`.
42
- *
43
- * @default your_timezone
44
- */
45
- timezone?: string;
46
-
47
- /**
48
- * Retry count.
49
- *
50
- * If LLM function calling composed arguments are invalid,
51
- * the A.I. chatbot will retry to call the function with
52
- * the modified arguments.
53
- *
54
- * By the way, if you configure it to 0 or 1, the A.I. chatbot
55
- * will not retry the LLM function calling for correcting the
56
- * arguments.
57
- *
58
- * @default 3
59
- */
60
- retry?: number;
61
-
62
- /**
63
- * Capacity of the LLM function selecting.
64
- *
65
- * When the A.I. chatbot selects a proper function to call, if the
66
- * number of functions registered in the
67
- * {@link IAgenticaProps.applications} is too much greater,
68
- * the A.I. chatbot often fallen into the hallucination.
69
- *
70
- * In that case, if you configure this property value, `Agentica`
71
- * will divide the functions into the several groups with the configured
72
- * capacity and select proper functions to call by operating the multiple
73
- * LLM function selecting agents parallelly.
74
- *
75
- * @default 100
76
- */
77
- capacity?: number;
78
-
79
- /**
80
- * Eliticism for the LLM function selecting.
81
- *
82
- * If you configure {@link capacity}, the A.I. chatbot will complete
83
- * the candidate functions to call which are selected by the multiple
84
- * LLM function selecting agents.
85
- *
86
- * Otherwise you configure this property as `false`, the A.I. chatbot
87
- * will not complete the candidate functions to call and just accept
88
- * every candidate functions to call which are selected by the multiple
89
- * LLM function selecting agents.
90
- *
91
- * @default true
92
- */
93
- eliticism?: boolean;
94
-
95
- /**
96
- * System prompt messages.
97
- *
98
- * System prompt messages if you want to customize the system prompt
99
- * messages for each situation.
100
- */
101
- systemPrompt?: IAgenticaSystemPrompt;
102
-
103
- /**
104
- * Agent executor.
105
- *
106
- * Executor function of Agentic AI's iteration plan to internal agents
107
- * running by the {@link Agentica.conversate} function.
108
- *
109
- * If you want to customize the agent execution plan, you can do it
110
- * by assigning you logic function of entire or partial to this property.
111
- * When customizing it, it would better to reference the
112
- * {@link ChatGptAgent.execute} function.
113
- *
114
- * @param ctx Context of the agent
115
- * @returns Lit of prompts generated by the executor
116
- * @default ChatGptAgent.execute
117
- */
118
- executor?:
119
- | Partial<IAgenticaExecutor>
120
- | ((ctx: IAgenticaContext) => Promise<IAgenticaPrompt[]>);
121
- }
1
+ import { IAgenticaContext } from "./IAgenticaContext";
2
+ import { IAgenticaExecutor } from "./IAgenticaExecutor";
3
+ import { IAgenticaPrompt } from "./IAgenticaPrompt";
4
+ import { IAgenticaSystemPrompt } from "./IAgenticaSystemPrompt";
5
+
6
+ /**
7
+ * Configuration for Nestia Agent.
8
+ *
9
+ * `IAgenticaConfig` is an interface that defines the configuration
10
+ * properties of the {@link Agentica}. With this configuration, you
11
+ * can set the user's locale, timezone, and some of system prompts.
12
+ *
13
+ * Also, you can affect to the LLM function selecing/calling logic by
14
+ * configuring additional properties. For an example, if you configure the
15
+ * {@link capacity} property, the A.I. chatbot will divide the functions
16
+ * into the several groups with the configured capacity and select proper
17
+ * functions to call by operating the multiple LLM function selecting
18
+ * agents parallelly.
19
+ *
20
+ * @author Samchon
21
+ */
22
+ export interface IAgenticaConfig {
23
+ /**
24
+ * Locale of the A.I. chatbot.
25
+ *
26
+ * If you configure this property, the A.I. chatbot will conversate with
27
+ * the given locale. You can get the locale value by
28
+ *
29
+ * - Browser: `navigator.language`
30
+ * - NodeJS: `process.env.LANG.split(".")[0]`
31
+ *
32
+ * @default your_locale
33
+ */
34
+ locale?: string;
35
+
36
+ /**
37
+ * Timezone of the A.I. chatbot.
38
+ *
39
+ * If you configure this property, the A.I. chatbot will consider the
40
+ * given timezone. You can get the timezone value by
41
+ * `Intl.DateTimeFormat().resolvedOptions().timeZone`.
42
+ *
43
+ * @default your_timezone
44
+ */
45
+ timezone?: string;
46
+
47
+ /**
48
+ * Retry count.
49
+ *
50
+ * If LLM function calling composed arguments are invalid,
51
+ * the A.I. chatbot will retry to call the function with
52
+ * the modified arguments.
53
+ *
54
+ * By the way, if you configure it to 0 or 1, the A.I. chatbot
55
+ * will not retry the LLM function calling for correcting the
56
+ * arguments.
57
+ *
58
+ * @default 3
59
+ */
60
+ retry?: number;
61
+
62
+ /**
63
+ * Capacity of the LLM function selecting.
64
+ *
65
+ * When the A.I. chatbot selects a proper function to call, if the
66
+ * number of functions registered in the
67
+ * {@link IAgenticaProps.applications} is too much greater,
68
+ * the A.I. chatbot often fallen into the hallucination.
69
+ *
70
+ * In that case, if you configure this property value, `Agentica`
71
+ * will divide the functions into the several groups with the configured
72
+ * capacity and select proper functions to call by operating the multiple
73
+ * LLM function selecting agents parallelly.
74
+ *
75
+ * @default 100
76
+ */
77
+ capacity?: number;
78
+
79
+ /**
80
+ * Eliticism for the LLM function selecting.
81
+ *
82
+ * If you configure {@link capacity}, the A.I. chatbot will complete
83
+ * the candidate functions to call which are selected by the multiple
84
+ * LLM function selecting agents.
85
+ *
86
+ * Otherwise you configure this property as `false`, the A.I. chatbot
87
+ * will not complete the candidate functions to call and just accept
88
+ * every candidate functions to call which are selected by the multiple
89
+ * LLM function selecting agents.
90
+ *
91
+ * @default true
92
+ */
93
+ eliticism?: boolean;
94
+
95
+ /**
96
+ * System prompt messages.
97
+ *
98
+ * System prompt messages if you want to customize the system prompt
99
+ * messages for each situation.
100
+ */
101
+ systemPrompt?: IAgenticaSystemPrompt;
102
+
103
+ /**
104
+ * Agent executor.
105
+ *
106
+ * Executor function of Agentic AI's iteration plan to internal agents
107
+ * running by the {@link Agentica.conversate} function.
108
+ *
109
+ * If you want to customize the agent execution plan, you can do it
110
+ * by assigning you logic function of entire or partial to this property.
111
+ * When customizing it, it would better to reference the
112
+ * {@link ChatGptAgent.execute} function.
113
+ *
114
+ * @param ctx Context of the agent
115
+ * @returns Lit of prompts generated by the executor
116
+ * @default ChatGptAgent.execute
117
+ */
118
+ executor?:
119
+ | Partial<IAgenticaExecutor>
120
+ | ((ctx: IAgenticaContext) => Promise<IAgenticaPrompt[]>);
121
+ }