@agentica/core 0.10.1-dev.20250302 → 0.10.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -21
- package/README.md +419 -419
- package/package.json +1 -1
- package/prompts/cancel.md +4 -4
- package/prompts/common.md +2 -2
- package/prompts/describe.md +6 -6
- package/prompts/execute.md +6 -6
- package/prompts/initialize.md +2 -2
- package/prompts/select.md +6 -6
- package/src/Agentica.ts +323 -323
- package/src/chatgpt/ChatGptAgent.ts +75 -75
- package/src/chatgpt/ChatGptCallFunctionAgent.ts +464 -464
- package/src/chatgpt/ChatGptCancelFunctionAgent.ts +287 -287
- package/src/chatgpt/ChatGptDescribeFunctionAgent.ts +52 -52
- package/src/chatgpt/ChatGptHistoryDecoder.ts +88 -88
- package/src/chatgpt/ChatGptInitializeFunctionAgent.ts +88 -88
- package/src/chatgpt/ChatGptSelectFunctionAgent.ts +319 -319
- package/src/functional/createHttpLlmApplication.ts +63 -63
- package/src/index.ts +19 -19
- package/src/internal/AgenticaConstant.ts +4 -4
- package/src/internal/AgenticaDefaultPrompt.ts +43 -43
- package/src/internal/AgenticaOperationComposer.ts +87 -87
- package/src/internal/AgenticaPromptFactory.ts +32 -32
- package/src/internal/AgenticaPromptTransformer.ts +86 -86
- package/src/internal/AgenticaTokenUsageAggregator.ts +115 -115
- package/src/internal/MathUtil.ts +3 -3
- package/src/internal/Singleton.ts +22 -22
- package/src/internal/__map_take.ts +15 -15
- package/src/structures/IAgenticaConfig.ts +123 -123
- package/src/structures/IAgenticaContext.ts +129 -129
- package/src/structures/IAgenticaController.ts +133 -133
- package/src/structures/IAgenticaEvent.ts +229 -229
- package/src/structures/IAgenticaExecutor.ts +156 -156
- package/src/structures/IAgenticaOperation.ts +63 -63
- package/src/structures/IAgenticaOperationCollection.ts +52 -52
- package/src/structures/IAgenticaOperationSelection.ts +68 -68
- package/src/structures/IAgenticaPrompt.ts +182 -182
- package/src/structures/IAgenticaProps.ts +70 -70
- package/src/structures/IAgenticaSystemPrompt.ts +124 -124
- package/src/structures/IAgenticaTokenUsage.ts +107 -107
- package/src/structures/IAgenticaVendor.ts +39 -39
- package/src/structures/internal/__IChatCancelFunctionsApplication.ts +23 -23
- package/src/structures/internal/__IChatFunctionReference.ts +21 -21
- package/src/structures/internal/__IChatInitialApplication.ts +15 -15
- package/src/structures/internal/__IChatSelectFunctionsApplication.ts +24 -24
- package/src/typings/AgenticaSource.ts +6 -6
|
@@ -1,124 +1,124 @@
|
|
|
1
|
-
import { ILlmSchema } from "@samchon/openapi";
|
|
2
|
-
|
|
3
|
-
import { IAgenticaConfig } from "./IAgenticaConfig";
|
|
4
|
-
import { IAgenticaPrompt } from "./IAgenticaPrompt";
|
|
5
|
-
|
|
6
|
-
/**
|
|
7
|
-
* System prompt collection of the A.I. chatbot.
|
|
8
|
-
*
|
|
9
|
-
* `IAgenticaSystemPrompt` is a type represents a collection of system
|
|
10
|
-
* prompts that would be used by the A.I. chatbot of {@link Agentica}.
|
|
11
|
-
*
|
|
12
|
-
* You can customize the system prompt by configuring the
|
|
13
|
-
* {@link IAgenticaConfig.systemPrompt} property when creating a new
|
|
14
|
-
* {@link Agentica} instance.
|
|
15
|
-
*
|
|
16
|
-
* If you don't configure any system prompts, the default system prompts
|
|
17
|
-
* would be used which are written in the below directory as markdown
|
|
18
|
-
* documents.
|
|
19
|
-
*
|
|
20
|
-
* - https://github.com/samchon/nestia/tree/master/packages/agent/prompts
|
|
21
|
-
*
|
|
22
|
-
* @author Samchon
|
|
23
|
-
*/
|
|
24
|
-
export interface IAgenticaSystemPrompt<Model extends ILlmSchema.Model> {
|
|
25
|
-
/**
|
|
26
|
-
* Common system prompt that would be used in every situation.
|
|
27
|
-
*
|
|
28
|
-
* @param config Configuration of the agent
|
|
29
|
-
* @returns The common system prompt
|
|
30
|
-
* @default https://github.com/samchon/nestia/blob/master/packages/agent/prompts/common.md
|
|
31
|
-
*/
|
|
32
|
-
common?: (config?: IAgenticaConfig<Model> | undefined) => string;
|
|
33
|
-
|
|
34
|
-
/**
|
|
35
|
-
* Initialize system prompt.
|
|
36
|
-
*
|
|
37
|
-
* When the A.I. chatbot has not informed any functions to the agent
|
|
38
|
-
* yet because the user has not implied any function calling request yet,
|
|
39
|
-
* {@link Agentica} says that it is a circumstance that nothing has
|
|
40
|
-
* been initialized yet.
|
|
41
|
-
*
|
|
42
|
-
* In that case, the `initialize` system prompt would be used. You can
|
|
43
|
-
* customize the `initialize` system prompt by assigning this function
|
|
44
|
-
* with the given {@link IAgenticaPrompt histories} parameter.
|
|
45
|
-
*
|
|
46
|
-
* @param histories Histories of the previous prompts
|
|
47
|
-
* @returns initialize system prompt
|
|
48
|
-
* @default https://github.com/samchon/nestia/blob/master/packages/agent/prompts/initialize.md
|
|
49
|
-
*/
|
|
50
|
-
initialize?: (histories: IAgenticaPrompt<Model>[]) => string;
|
|
51
|
-
|
|
52
|
-
/**
|
|
53
|
-
* Select system prompt.
|
|
54
|
-
*
|
|
55
|
-
* The {@link Agentica} has a process selecting some candidate
|
|
56
|
-
* functions to call by asking to the A.I. agent with the previous
|
|
57
|
-
* prompt histories.
|
|
58
|
-
*
|
|
59
|
-
* In that case, this `select` system prompt would be used. You can
|
|
60
|
-
* customize it by assigning this function with the given
|
|
61
|
-
* {@link IAgenticaPrompt histories} parameter.
|
|
62
|
-
*
|
|
63
|
-
* Note that, the `"select"` means only the function selection. It does
|
|
64
|
-
* not contain the filling argument or executing the function. It
|
|
65
|
-
* literally contains only the selection process.
|
|
66
|
-
*
|
|
67
|
-
* @param histories Histories of the previous prompts
|
|
68
|
-
* @returns select system promopt
|
|
69
|
-
* @default https://github.com/samchon/nestia/blob/master/packages/agent/prompts/select.md
|
|
70
|
-
*/
|
|
71
|
-
select?: (histories: IAgenticaPrompt<Model>[]) => string;
|
|
72
|
-
|
|
73
|
-
/**
|
|
74
|
-
* Cancel system prompt.
|
|
75
|
-
*
|
|
76
|
-
* The {@link Agentica} has a process canceling some candidate
|
|
77
|
-
* functions to call by asking to the A.I. agent with the previous
|
|
78
|
-
* prompt histories.
|
|
79
|
-
*
|
|
80
|
-
* In that case, this `cancel` system prompt would be used. You can
|
|
81
|
-
* customize it by assigning this function with the given
|
|
82
|
-
* {@link IAgenticaPrompt histories} parameter.
|
|
83
|
-
*
|
|
84
|
-
* @param histories Histories of the previous prompts
|
|
85
|
-
* @returns cancel system prompt
|
|
86
|
-
* @default https://github.com/samchon/nestia/blob/master/packages/agent/prompts/cancel.md
|
|
87
|
-
*/
|
|
88
|
-
cancel?: (histories: IAgenticaPrompt<Model>[]) => string;
|
|
89
|
-
|
|
90
|
-
/**
|
|
91
|
-
* Execute system prompt.
|
|
92
|
-
*
|
|
93
|
-
* The {@link Agentica} has a process filling the arguments of some
|
|
94
|
-
* selected candidate functions by the LLM (Large Language Model)
|
|
95
|
-
* function calling feature with the previous prompt histories, and
|
|
96
|
-
* executing the arguments filled function with validation feedback.
|
|
97
|
-
*
|
|
98
|
-
* In that case, this `execute` system prompt would be used. You can
|
|
99
|
-
* customize it by assigning this function with the given
|
|
100
|
-
* {@link IAgenticaPrompt histories} parameter.
|
|
101
|
-
*
|
|
102
|
-
* @param histories Histories of the previous prompts
|
|
103
|
-
* @returns execute system prompt
|
|
104
|
-
* https://github.com/samchon/nestia/blob/master/packages/agent/prompts/execute.md
|
|
105
|
-
*/
|
|
106
|
-
execute?: (histories: IAgenticaPrompt<Model>[]) => string;
|
|
107
|
-
|
|
108
|
-
/**
|
|
109
|
-
* Describe system prompt.
|
|
110
|
-
*
|
|
111
|
-
* The {@link Agentica} has a process describing the return values of
|
|
112
|
-
* the executed functions by requesting to the A.I. agent with the
|
|
113
|
-
* previous prompt histories.
|
|
114
|
-
*
|
|
115
|
-
* In that case, this `describe` system prompt would be used. You can
|
|
116
|
-
* customize it by assigning this function with the given
|
|
117
|
-
* {@link IAgenticaPrompt histories} parameter.
|
|
118
|
-
*
|
|
119
|
-
* @param histories Histories of the previous prompts
|
|
120
|
-
* @returns describe system prompt
|
|
121
|
-
* @default https://github.com/samchon/nestia/blob/master/packages/agent/prompts/describe.md
|
|
122
|
-
*/
|
|
123
|
-
describe?: (histories: IAgenticaPrompt.IExecute<Model>[]) => string;
|
|
124
|
-
}
|
|
1
|
+
import { ILlmSchema } from "@samchon/openapi";
|
|
2
|
+
|
|
3
|
+
import { IAgenticaConfig } from "./IAgenticaConfig";
|
|
4
|
+
import { IAgenticaPrompt } from "./IAgenticaPrompt";
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* System prompt collection of the A.I. chatbot.
|
|
8
|
+
*
|
|
9
|
+
* `IAgenticaSystemPrompt` is a type represents a collection of system
|
|
10
|
+
* prompts that would be used by the A.I. chatbot of {@link Agentica}.
|
|
11
|
+
*
|
|
12
|
+
* You can customize the system prompt by configuring the
|
|
13
|
+
* {@link IAgenticaConfig.systemPrompt} property when creating a new
|
|
14
|
+
* {@link Agentica} instance.
|
|
15
|
+
*
|
|
16
|
+
* If you don't configure any system prompts, the default system prompts
|
|
17
|
+
* would be used which are written in the below directory as markdown
|
|
18
|
+
* documents.
|
|
19
|
+
*
|
|
20
|
+
* - https://github.com/samchon/nestia/tree/master/packages/agent/prompts
|
|
21
|
+
*
|
|
22
|
+
* @author Samchon
|
|
23
|
+
*/
|
|
24
|
+
export interface IAgenticaSystemPrompt<Model extends ILlmSchema.Model> {
|
|
25
|
+
/**
|
|
26
|
+
* Common system prompt that would be used in every situation.
|
|
27
|
+
*
|
|
28
|
+
* @param config Configuration of the agent
|
|
29
|
+
* @returns The common system prompt
|
|
30
|
+
* @default https://github.com/samchon/nestia/blob/master/packages/agent/prompts/common.md
|
|
31
|
+
*/
|
|
32
|
+
common?: (config?: IAgenticaConfig<Model> | undefined) => string;
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* Initialize system prompt.
|
|
36
|
+
*
|
|
37
|
+
* When the A.I. chatbot has not informed any functions to the agent
|
|
38
|
+
* yet because the user has not implied any function calling request yet,
|
|
39
|
+
* {@link Agentica} says that it is a circumstance that nothing has
|
|
40
|
+
* been initialized yet.
|
|
41
|
+
*
|
|
42
|
+
* In that case, the `initialize` system prompt would be used. You can
|
|
43
|
+
* customize the `initialize` system prompt by assigning this function
|
|
44
|
+
* with the given {@link IAgenticaPrompt histories} parameter.
|
|
45
|
+
*
|
|
46
|
+
* @param histories Histories of the previous prompts
|
|
47
|
+
* @returns initialize system prompt
|
|
48
|
+
* @default https://github.com/samchon/nestia/blob/master/packages/agent/prompts/initialize.md
|
|
49
|
+
*/
|
|
50
|
+
initialize?: (histories: IAgenticaPrompt<Model>[]) => string;
|
|
51
|
+
|
|
52
|
+
/**
|
|
53
|
+
* Select system prompt.
|
|
54
|
+
*
|
|
55
|
+
* The {@link Agentica} has a process selecting some candidate
|
|
56
|
+
* functions to call by asking to the A.I. agent with the previous
|
|
57
|
+
* prompt histories.
|
|
58
|
+
*
|
|
59
|
+
* In that case, this `select` system prompt would be used. You can
|
|
60
|
+
* customize it by assigning this function with the given
|
|
61
|
+
* {@link IAgenticaPrompt histories} parameter.
|
|
62
|
+
*
|
|
63
|
+
* Note that, the `"select"` means only the function selection. It does
|
|
64
|
+
* not contain the filling argument or executing the function. It
|
|
65
|
+
* literally contains only the selection process.
|
|
66
|
+
*
|
|
67
|
+
* @param histories Histories of the previous prompts
|
|
68
|
+
* @returns select system promopt
|
|
69
|
+
* @default https://github.com/samchon/nestia/blob/master/packages/agent/prompts/select.md
|
|
70
|
+
*/
|
|
71
|
+
select?: (histories: IAgenticaPrompt<Model>[]) => string;
|
|
72
|
+
|
|
73
|
+
/**
|
|
74
|
+
* Cancel system prompt.
|
|
75
|
+
*
|
|
76
|
+
* The {@link Agentica} has a process canceling some candidate
|
|
77
|
+
* functions to call by asking to the A.I. agent with the previous
|
|
78
|
+
* prompt histories.
|
|
79
|
+
*
|
|
80
|
+
* In that case, this `cancel` system prompt would be used. You can
|
|
81
|
+
* customize it by assigning this function with the given
|
|
82
|
+
* {@link IAgenticaPrompt histories} parameter.
|
|
83
|
+
*
|
|
84
|
+
* @param histories Histories of the previous prompts
|
|
85
|
+
* @returns cancel system prompt
|
|
86
|
+
* @default https://github.com/samchon/nestia/blob/master/packages/agent/prompts/cancel.md
|
|
87
|
+
*/
|
|
88
|
+
cancel?: (histories: IAgenticaPrompt<Model>[]) => string;
|
|
89
|
+
|
|
90
|
+
/**
|
|
91
|
+
* Execute system prompt.
|
|
92
|
+
*
|
|
93
|
+
* The {@link Agentica} has a process filling the arguments of some
|
|
94
|
+
* selected candidate functions by the LLM (Large Language Model)
|
|
95
|
+
* function calling feature with the previous prompt histories, and
|
|
96
|
+
* executing the arguments filled function with validation feedback.
|
|
97
|
+
*
|
|
98
|
+
* In that case, this `execute` system prompt would be used. You can
|
|
99
|
+
* customize it by assigning this function with the given
|
|
100
|
+
* {@link IAgenticaPrompt histories} parameter.
|
|
101
|
+
*
|
|
102
|
+
* @param histories Histories of the previous prompts
|
|
103
|
+
* @returns execute system prompt
|
|
104
|
+
* https://github.com/samchon/nestia/blob/master/packages/agent/prompts/execute.md
|
|
105
|
+
*/
|
|
106
|
+
execute?: (histories: IAgenticaPrompt<Model>[]) => string;
|
|
107
|
+
|
|
108
|
+
/**
|
|
109
|
+
* Describe system prompt.
|
|
110
|
+
*
|
|
111
|
+
* The {@link Agentica} has a process describing the return values of
|
|
112
|
+
* the executed functions by requesting to the A.I. agent with the
|
|
113
|
+
* previous prompt histories.
|
|
114
|
+
*
|
|
115
|
+
* In that case, this `describe` system prompt would be used. You can
|
|
116
|
+
* customize it by assigning this function with the given
|
|
117
|
+
* {@link IAgenticaPrompt histories} parameter.
|
|
118
|
+
*
|
|
119
|
+
* @param histories Histories of the previous prompts
|
|
120
|
+
* @returns describe system prompt
|
|
121
|
+
* @default https://github.com/samchon/nestia/blob/master/packages/agent/prompts/describe.md
|
|
122
|
+
*/
|
|
123
|
+
describe?: (histories: IAgenticaPrompt.IExecute<Model>[]) => string;
|
|
124
|
+
}
|
|
@@ -1,107 +1,107 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Token usage information from the A.I. chatbot.
|
|
3
|
-
*
|
|
4
|
-
* `IAgenticaTokenUsage` is a structure representing the token usage
|
|
5
|
-
* information from the {@link Agentica} class. And you can get the
|
|
6
|
-
* token usage information by calling the {@link Agentica.getTokenUsage}
|
|
7
|
-
* method.
|
|
8
|
-
*
|
|
9
|
-
* For reference, `IAgenticaTokenUsage` provides only the token usage
|
|
10
|
-
* information, and does not contain any price or cost information. It is
|
|
11
|
-
* because the price or cost can be changed by below reasons.
|
|
12
|
-
*
|
|
13
|
-
* - Type of {@link IAgenticaProps.vendor LLM vendor}
|
|
14
|
-
* - {@link IAgenticaVendor.model} in the LLM vendor.
|
|
15
|
-
* - Just by a policy change of the LLM vendor company.
|
|
16
|
-
*
|
|
17
|
-
* @author Samchon
|
|
18
|
-
*/
|
|
19
|
-
export interface IAgenticaTokenUsage {
|
|
20
|
-
/**
|
|
21
|
-
* Aggregated token usage.
|
|
22
|
-
*/
|
|
23
|
-
aggregate: IAgenticaTokenUsage.IComponent;
|
|
24
|
-
|
|
25
|
-
/**
|
|
26
|
-
* Token uasge of initializer agent.
|
|
27
|
-
*/
|
|
28
|
-
initialize: IAgenticaTokenUsage.IComponent;
|
|
29
|
-
|
|
30
|
-
/**
|
|
31
|
-
* Token usage of function selector agent.
|
|
32
|
-
*/
|
|
33
|
-
select: IAgenticaTokenUsage.IComponent;
|
|
34
|
-
|
|
35
|
-
/**
|
|
36
|
-
* Token usage of function canceler agent.
|
|
37
|
-
*/
|
|
38
|
-
cancel: IAgenticaTokenUsage.IComponent;
|
|
39
|
-
|
|
40
|
-
/**
|
|
41
|
-
* Token usage of function caller agent.
|
|
42
|
-
*/
|
|
43
|
-
call: IAgenticaTokenUsage.IComponent;
|
|
44
|
-
|
|
45
|
-
/**
|
|
46
|
-
* Token usage of function calling describer agent.
|
|
47
|
-
*/
|
|
48
|
-
describe: IAgenticaTokenUsage.IComponent;
|
|
49
|
-
}
|
|
50
|
-
export namespace IAgenticaTokenUsage {
|
|
51
|
-
export interface IComponent {
|
|
52
|
-
/**
|
|
53
|
-
* Total token usage.
|
|
54
|
-
*/
|
|
55
|
-
total: number;
|
|
56
|
-
|
|
57
|
-
/**
|
|
58
|
-
* Input token usage of detailed.
|
|
59
|
-
*/
|
|
60
|
-
input: IInput;
|
|
61
|
-
|
|
62
|
-
/**
|
|
63
|
-
* Output token usage of detailed.
|
|
64
|
-
*/
|
|
65
|
-
output: IOutput;
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
/**
|
|
69
|
-
* Input token usage of detailed.
|
|
70
|
-
*/
|
|
71
|
-
export interface IInput {
|
|
72
|
-
/**
|
|
73
|
-
* Total amount of input token uasge.
|
|
74
|
-
*/
|
|
75
|
-
total: number;
|
|
76
|
-
|
|
77
|
-
/**
|
|
78
|
-
* Cached token usage.
|
|
79
|
-
*/
|
|
80
|
-
cached: number;
|
|
81
|
-
}
|
|
82
|
-
|
|
83
|
-
/**
|
|
84
|
-
* Output token usage of detailed.
|
|
85
|
-
*/
|
|
86
|
-
export interface IOutput {
|
|
87
|
-
/**
|
|
88
|
-
* Total amount of output token usage.
|
|
89
|
-
*/
|
|
90
|
-
total: number;
|
|
91
|
-
|
|
92
|
-
/**
|
|
93
|
-
* Reasoning token usage.
|
|
94
|
-
*/
|
|
95
|
-
reasoning: number;
|
|
96
|
-
|
|
97
|
-
/**
|
|
98
|
-
* Prediction token usage.
|
|
99
|
-
*/
|
|
100
|
-
accepted_prediction: number;
|
|
101
|
-
|
|
102
|
-
/**
|
|
103
|
-
* Rejected prediction token usage.
|
|
104
|
-
*/
|
|
105
|
-
rejected_prediction: number;
|
|
106
|
-
}
|
|
107
|
-
}
|
|
1
|
+
/**
|
|
2
|
+
* Token usage information from the A.I. chatbot.
|
|
3
|
+
*
|
|
4
|
+
* `IAgenticaTokenUsage` is a structure representing the token usage
|
|
5
|
+
* information from the {@link Agentica} class. And you can get the
|
|
6
|
+
* token usage information by calling the {@link Agentica.getTokenUsage}
|
|
7
|
+
* method.
|
|
8
|
+
*
|
|
9
|
+
* For reference, `IAgenticaTokenUsage` provides only the token usage
|
|
10
|
+
* information, and does not contain any price or cost information. It is
|
|
11
|
+
* because the price or cost can be changed by below reasons.
|
|
12
|
+
*
|
|
13
|
+
* - Type of {@link IAgenticaProps.vendor LLM vendor}
|
|
14
|
+
* - {@link IAgenticaVendor.model} in the LLM vendor.
|
|
15
|
+
* - Just by a policy change of the LLM vendor company.
|
|
16
|
+
*
|
|
17
|
+
* @author Samchon
|
|
18
|
+
*/
|
|
19
|
+
export interface IAgenticaTokenUsage {
|
|
20
|
+
/**
|
|
21
|
+
* Aggregated token usage.
|
|
22
|
+
*/
|
|
23
|
+
aggregate: IAgenticaTokenUsage.IComponent;
|
|
24
|
+
|
|
25
|
+
/**
|
|
26
|
+
* Token uasge of initializer agent.
|
|
27
|
+
*/
|
|
28
|
+
initialize: IAgenticaTokenUsage.IComponent;
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Token usage of function selector agent.
|
|
32
|
+
*/
|
|
33
|
+
select: IAgenticaTokenUsage.IComponent;
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* Token usage of function canceler agent.
|
|
37
|
+
*/
|
|
38
|
+
cancel: IAgenticaTokenUsage.IComponent;
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Token usage of function caller agent.
|
|
42
|
+
*/
|
|
43
|
+
call: IAgenticaTokenUsage.IComponent;
|
|
44
|
+
|
|
45
|
+
/**
|
|
46
|
+
* Token usage of function calling describer agent.
|
|
47
|
+
*/
|
|
48
|
+
describe: IAgenticaTokenUsage.IComponent;
|
|
49
|
+
}
|
|
50
|
+
export namespace IAgenticaTokenUsage {
|
|
51
|
+
export interface IComponent {
|
|
52
|
+
/**
|
|
53
|
+
* Total token usage.
|
|
54
|
+
*/
|
|
55
|
+
total: number;
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Input token usage of detailed.
|
|
59
|
+
*/
|
|
60
|
+
input: IInput;
|
|
61
|
+
|
|
62
|
+
/**
|
|
63
|
+
* Output token usage of detailed.
|
|
64
|
+
*/
|
|
65
|
+
output: IOutput;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Input token usage of detailed.
|
|
70
|
+
*/
|
|
71
|
+
export interface IInput {
|
|
72
|
+
/**
|
|
73
|
+
* Total amount of input token uasge.
|
|
74
|
+
*/
|
|
75
|
+
total: number;
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Cached token usage.
|
|
79
|
+
*/
|
|
80
|
+
cached: number;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
/**
|
|
84
|
+
* Output token usage of detailed.
|
|
85
|
+
*/
|
|
86
|
+
export interface IOutput {
|
|
87
|
+
/**
|
|
88
|
+
* Total amount of output token usage.
|
|
89
|
+
*/
|
|
90
|
+
total: number;
|
|
91
|
+
|
|
92
|
+
/**
|
|
93
|
+
* Reasoning token usage.
|
|
94
|
+
*/
|
|
95
|
+
reasoning: number;
|
|
96
|
+
|
|
97
|
+
/**
|
|
98
|
+
* Prediction token usage.
|
|
99
|
+
*/
|
|
100
|
+
accepted_prediction: number;
|
|
101
|
+
|
|
102
|
+
/**
|
|
103
|
+
* Rejected prediction token usage.
|
|
104
|
+
*/
|
|
105
|
+
rejected_prediction: number;
|
|
106
|
+
}
|
|
107
|
+
}
|
|
@@ -1,39 +1,39 @@
|
|
|
1
|
-
import OpenAI from "openai";
|
|
2
|
-
|
|
3
|
-
/**
|
|
4
|
-
* LLM service vendor for Nestia Chat.
|
|
5
|
-
*
|
|
6
|
-
* `IAgenticaVendor` is a type represents an LLM
|
|
7
|
-
* (Large Language Model) vendor of the {@link Agentica}.
|
|
8
|
-
*
|
|
9
|
-
* Currently, {@link Agentica} supports OpenAI SDK. However, it does
|
|
10
|
-
* not mean that you can use only OpenAI's GPT model in the
|
|
11
|
-
* {@link Agentica}. The OpenAI SDK is just a connection tool to the
|
|
12
|
-
* LLM vendor's API, and you can use other LLM vendors by configuring
|
|
13
|
-
* its `baseURL` and API key.
|
|
14
|
-
*
|
|
15
|
-
* Therefore, if you want to use another LLM vendor like Claude or
|
|
16
|
-
* Gemini, please configure the `baseURL` to the {@link api}, and
|
|
17
|
-
* set {@link IAgenticaController}'s schema model as "cluade" or
|
|
18
|
-
* "gemini".
|
|
19
|
-
*
|
|
20
|
-
* @author Samchon
|
|
21
|
-
*/
|
|
22
|
-
export interface IAgenticaVendor {
|
|
23
|
-
/**
|
|
24
|
-
* OpenAI API instance.
|
|
25
|
-
*/
|
|
26
|
-
api: OpenAI;
|
|
27
|
-
|
|
28
|
-
/**
|
|
29
|
-
* Chat model to be used.
|
|
30
|
-
*
|
|
31
|
-
* `({}) & string` means to support third party hosting cloud(eg. openRouter, aws)
|
|
32
|
-
*/
|
|
33
|
-
model: OpenAI.ChatModel | ({} & string);
|
|
34
|
-
|
|
35
|
-
/**
|
|
36
|
-
* Options for the request.
|
|
37
|
-
*/
|
|
38
|
-
options?: OpenAI.RequestOptions | undefined;
|
|
39
|
-
}
|
|
1
|
+
import OpenAI from "openai";
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* LLM service vendor for Nestia Chat.
|
|
5
|
+
*
|
|
6
|
+
* `IAgenticaVendor` is a type represents an LLM
|
|
7
|
+
* (Large Language Model) vendor of the {@link Agentica}.
|
|
8
|
+
*
|
|
9
|
+
* Currently, {@link Agentica} supports OpenAI SDK. However, it does
|
|
10
|
+
* not mean that you can use only OpenAI's GPT model in the
|
|
11
|
+
* {@link Agentica}. The OpenAI SDK is just a connection tool to the
|
|
12
|
+
* LLM vendor's API, and you can use other LLM vendors by configuring
|
|
13
|
+
* its `baseURL` and API key.
|
|
14
|
+
*
|
|
15
|
+
* Therefore, if you want to use another LLM vendor like Claude or
|
|
16
|
+
* Gemini, please configure the `baseURL` to the {@link api}, and
|
|
17
|
+
* set {@link IAgenticaController}'s schema model as "cluade" or
|
|
18
|
+
* "gemini".
|
|
19
|
+
*
|
|
20
|
+
* @author Samchon
|
|
21
|
+
*/
|
|
22
|
+
export interface IAgenticaVendor {
|
|
23
|
+
/**
|
|
24
|
+
* OpenAI API instance.
|
|
25
|
+
*/
|
|
26
|
+
api: OpenAI;
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Chat model to be used.
|
|
30
|
+
*
|
|
31
|
+
* `({}) & string` means to support third party hosting cloud(eg. openRouter, aws)
|
|
32
|
+
*/
|
|
33
|
+
model: OpenAI.ChatModel | ({} & string);
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* Options for the request.
|
|
37
|
+
*/
|
|
38
|
+
options?: OpenAI.RequestOptions | undefined;
|
|
39
|
+
}
|
|
@@ -1,23 +1,23 @@
|
|
|
1
|
-
import { __IChatFunctionReference } from "./__IChatFunctionReference";
|
|
2
|
-
|
|
3
|
-
export interface __IChatCancelFunctionsApplication {
|
|
4
|
-
/**
|
|
5
|
-
* Cancel a function from the candidate list to call.
|
|
6
|
-
*
|
|
7
|
-
* If you A.I. agent has understood that the user wants to cancel
|
|
8
|
-
* some candidate functions to call from the conversation, please cancel
|
|
9
|
-
* them through this function.
|
|
10
|
-
*
|
|
11
|
-
* Also, when you A.I. find a function that has been selected by the candidate
|
|
12
|
-
* pooling, cancel the function by calling this function. For reference, the
|
|
13
|
-
* candidate pooling means that user wants only one function to call, but you A.I.
|
|
14
|
-
* agent selects multiple candidate functions because the A.I. agent can't specify
|
|
15
|
-
* only one thing due to lack of specificity or homogeneity of candidate functions.
|
|
16
|
-
*
|
|
17
|
-
* Additionally, if you A.I. agent wants to cancel same function multiply, you can
|
|
18
|
-
* do it by assigning the same function name multiply in the `functions` property.
|
|
19
|
-
*
|
|
20
|
-
* @param props Properties of the function
|
|
21
|
-
*/
|
|
22
|
-
cancelFunctions(props: __IChatFunctionReference.IProps): Promise<void>;
|
|
23
|
-
}
|
|
1
|
+
import { __IChatFunctionReference } from "./__IChatFunctionReference";
|
|
2
|
+
|
|
3
|
+
export interface __IChatCancelFunctionsApplication {
|
|
4
|
+
/**
|
|
5
|
+
* Cancel a function from the candidate list to call.
|
|
6
|
+
*
|
|
7
|
+
* If you A.I. agent has understood that the user wants to cancel
|
|
8
|
+
* some candidate functions to call from the conversation, please cancel
|
|
9
|
+
* them through this function.
|
|
10
|
+
*
|
|
11
|
+
* Also, when you A.I. find a function that has been selected by the candidate
|
|
12
|
+
* pooling, cancel the function by calling this function. For reference, the
|
|
13
|
+
* candidate pooling means that user wants only one function to call, but you A.I.
|
|
14
|
+
* agent selects multiple candidate functions because the A.I. agent can't specify
|
|
15
|
+
* only one thing due to lack of specificity or homogeneity of candidate functions.
|
|
16
|
+
*
|
|
17
|
+
* Additionally, if you A.I. agent wants to cancel same function multiply, you can
|
|
18
|
+
* do it by assigning the same function name multiply in the `functions` property.
|
|
19
|
+
*
|
|
20
|
+
* @param props Properties of the function
|
|
21
|
+
*/
|
|
22
|
+
cancelFunctions(props: __IChatFunctionReference.IProps): Promise<void>;
|
|
23
|
+
}
|
|
@@ -1,21 +1,21 @@
|
|
|
1
|
-
export interface __IChatFunctionReference {
|
|
2
|
-
/**
|
|
3
|
-
* The reason of the function selection.
|
|
4
|
-
*
|
|
5
|
-
* Just write the reason why you've determined to select this function.
|
|
6
|
-
*/
|
|
7
|
-
reason: string;
|
|
8
|
-
|
|
9
|
-
/**
|
|
10
|
-
* Name of the target function to call.
|
|
11
|
-
*/
|
|
12
|
-
name: string;
|
|
13
|
-
}
|
|
14
|
-
export namespace __IChatFunctionReference {
|
|
15
|
-
export interface IProps {
|
|
16
|
-
/**
|
|
17
|
-
* List of target functions.
|
|
18
|
-
*/
|
|
19
|
-
functions: __IChatFunctionReference[];
|
|
20
|
-
}
|
|
21
|
-
}
|
|
1
|
+
export interface __IChatFunctionReference {
|
|
2
|
+
/**
|
|
3
|
+
* The reason of the function selection.
|
|
4
|
+
*
|
|
5
|
+
* Just write the reason why you've determined to select this function.
|
|
6
|
+
*/
|
|
7
|
+
reason: string;
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* Name of the target function to call.
|
|
11
|
+
*/
|
|
12
|
+
name: string;
|
|
13
|
+
}
|
|
14
|
+
export namespace __IChatFunctionReference {
|
|
15
|
+
export interface IProps {
|
|
16
|
+
/**
|
|
17
|
+
* List of target functions.
|
|
18
|
+
*/
|
|
19
|
+
functions: __IChatFunctionReference[];
|
|
20
|
+
}
|
|
21
|
+
}
|
|
@@ -1,15 +1,15 @@
|
|
|
1
|
-
import { IHttpLlmFunction } from "@samchon/openapi";
|
|
2
|
-
|
|
3
|
-
export interface __IChatInitialApplication {
|
|
4
|
-
/**
|
|
5
|
-
* Get list of API functions.
|
|
6
|
-
*
|
|
7
|
-
* If user seems like to request some function calling except this one,
|
|
8
|
-
* call this `getApiFunctions()` to get the list of candidate API functions
|
|
9
|
-
* provided from this application.
|
|
10
|
-
*
|
|
11
|
-
* Also, user just wants to list up every remote API functions that can be
|
|
12
|
-
* called from the backend server, utilize this function too.
|
|
13
|
-
*/
|
|
14
|
-
getApiFunctions({}): Promise<Array<IHttpLlmFunction<"chatgpt">>>;
|
|
15
|
-
}
|
|
1
|
+
import { IHttpLlmFunction } from "@samchon/openapi";
|
|
2
|
+
|
|
3
|
+
export interface __IChatInitialApplication {
|
|
4
|
+
/**
|
|
5
|
+
* Get list of API functions.
|
|
6
|
+
*
|
|
7
|
+
* If user seems like to request some function calling except this one,
|
|
8
|
+
* call this `getApiFunctions()` to get the list of candidate API functions
|
|
9
|
+
* provided from this application.
|
|
10
|
+
*
|
|
11
|
+
* Also, user just wants to list up every remote API functions that can be
|
|
12
|
+
* called from the backend server, utilize this function too.
|
|
13
|
+
*/
|
|
14
|
+
getApiFunctions({}): Promise<Array<IHttpLlmFunction<"chatgpt">>>;
|
|
15
|
+
}
|