@agentica/core 0.12.2-dev.20250314 → 0.12.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -21
- package/README.md +461 -461
- package/lib/context/AgenticaTokenUsage.d.ts +6 -6
- package/package.json +1 -1
- package/prompts/cancel.md +4 -4
- package/prompts/common.md +2 -2
- package/prompts/describe.md +6 -6
- package/prompts/execute.md +6 -6
- package/prompts/initialize.md +2 -2
- package/prompts/select.md +6 -6
- package/src/Agentica.ts +359 -359
- package/src/chatgpt/ChatGptAgent.ts +76 -76
- package/src/chatgpt/ChatGptCallFunctionAgent.ts +466 -466
- package/src/chatgpt/ChatGptCancelFunctionAgent.ts +280 -280
- package/src/chatgpt/ChatGptCompletionMessageUtil.ts +166 -166
- package/src/chatgpt/ChatGptDescribeFunctionAgent.ts +122 -122
- package/src/chatgpt/ChatGptHistoryDecoder.ts +88 -88
- package/src/chatgpt/ChatGptInitializeFunctionAgent.ts +96 -96
- package/src/chatgpt/ChatGptSelectFunctionAgent.ts +311 -311
- package/src/chatgpt/ChatGptUsageAggregator.ts +62 -62
- package/src/context/AgenticaCancelPrompt.ts +32 -32
- package/src/context/AgenticaClassOperation.ts +23 -23
- package/src/context/AgenticaContext.ts +130 -130
- package/src/context/AgenticaHttpOperation.ts +27 -27
- package/src/context/AgenticaOperation.ts +66 -66
- package/src/context/AgenticaOperationBase.ts +57 -57
- package/src/context/AgenticaOperationCollection.ts +52 -52
- package/src/context/AgenticaOperationSelection.ts +27 -27
- package/src/context/AgenticaTokenUsage.ts +170 -170
- package/src/context/internal/AgenticaTokenUsageAggregator.ts +66 -66
- package/src/context/internal/__IChatCancelFunctionsApplication.ts +23 -23
- package/src/context/internal/__IChatFunctionReference.ts +21 -21
- package/src/context/internal/__IChatInitialApplication.ts +15 -15
- package/src/context/internal/__IChatSelectFunctionsApplication.ts +24 -24
- package/src/events/AgenticaCallEvent.ts +36 -36
- package/src/events/AgenticaCancelEvent.ts +28 -28
- package/src/events/AgenticaDescribeEvent.ts +66 -66
- package/src/events/AgenticaEvent.ts +36 -36
- package/src/events/AgenticaEventBase.ts +7 -7
- package/src/events/AgenticaEventSource.ts +6 -6
- package/src/events/AgenticaExecuteEvent.ts +50 -50
- package/src/events/AgenticaInitializeEvent.ts +14 -14
- package/src/events/AgenticaRequestEvent.ts +45 -45
- package/src/events/AgenticaResponseEvent.ts +48 -48
- package/src/events/AgenticaSelectEvent.ts +37 -37
- package/src/events/AgenticaTextEvent.ts +62 -62
- package/src/functional/assertHttpLlmApplication.ts +55 -55
- package/src/functional/validateHttpLlmApplication.ts +66 -66
- package/src/index.ts +44 -44
- package/src/internal/AgenticaConstant.ts +4 -4
- package/src/internal/AgenticaDefaultPrompt.ts +43 -43
- package/src/internal/AgenticaOperationComposer.ts +96 -96
- package/src/internal/ByteArrayUtil.ts +5 -5
- package/src/internal/MPSCUtil.ts +111 -111
- package/src/internal/MathUtil.ts +3 -3
- package/src/internal/Singleton.ts +22 -22
- package/src/internal/StreamUtil.ts +64 -64
- package/src/internal/__map_take.ts +15 -15
- package/src/json/IAgenticaEventJson.ts +178 -178
- package/src/json/IAgenticaOperationJson.ts +36 -36
- package/src/json/IAgenticaOperationSelectionJson.ts +19 -19
- package/src/json/IAgenticaPromptJson.ts +130 -130
- package/src/json/IAgenticaTokenUsageJson.ts +107 -107
- package/src/prompts/AgenticaCancelPrompt.ts +32 -32
- package/src/prompts/AgenticaDescribePrompt.ts +41 -41
- package/src/prompts/AgenticaExecutePrompt.ts +52 -52
- package/src/prompts/AgenticaPrompt.ts +14 -14
- package/src/prompts/AgenticaPromptBase.ts +27 -27
- package/src/prompts/AgenticaSelectPrompt.ts +32 -32
- package/src/prompts/AgenticaTextPrompt.ts +31 -31
- package/src/structures/IAgenticaConfig.ts +123 -123
- package/src/structures/IAgenticaController.ts +133 -133
- package/src/structures/IAgenticaExecutor.ts +157 -157
- package/src/structures/IAgenticaProps.ts +69 -69
- package/src/structures/IAgenticaSystemPrompt.ts +125 -125
- package/src/structures/IAgenticaVendor.ts +39 -39
- package/src/transformers/AgenticaEventTransformer.ts +165 -165
- package/src/transformers/AgenticaPromptTransformer.ts +134 -134
|
@@ -1,52 +1,52 @@
|
|
|
1
|
-
import { IHttpResponse, ILlmSchema } from "@samchon/openapi";
|
|
2
|
-
|
|
3
|
-
import { AgenticaOperation } from "../context/AgenticaOperation";
|
|
4
|
-
import { IAgenticaPromptJson } from "../json/IAgenticaPromptJson";
|
|
5
|
-
import { AgenticaPromptBase } from "./AgenticaPromptBase";
|
|
6
|
-
|
|
7
|
-
export class AgenticaExecutePrompt<
|
|
8
|
-
Model extends ILlmSchema.Model,
|
|
9
|
-
Protocol extends "http" | "class" = any,
|
|
10
|
-
> extends AgenticaPromptBase<"execute", IAgenticaPromptJson.IExecute> {
|
|
11
|
-
public readonly id: string;
|
|
12
|
-
public readonly operation: Protocol extends "http"
|
|
13
|
-
? AgenticaOperation.Http<Model>
|
|
14
|
-
: Protocol extends "class"
|
|
15
|
-
? AgenticaOperation.Class<Model>
|
|
16
|
-
: AgenticaOperation<Model>;
|
|
17
|
-
public readonly arguments: Record<string, any>;
|
|
18
|
-
public readonly value: Protocol extends "http" ? IHttpResponse : any;
|
|
19
|
-
|
|
20
|
-
public constructor(props: AgenticaExecutePrompt.IProps<Model, Protocol>) {
|
|
21
|
-
super("execute");
|
|
22
|
-
this.id = props.id;
|
|
23
|
-
this.operation = props.operation;
|
|
24
|
-
this.arguments = props.arguments;
|
|
25
|
-
this.value = props.value;
|
|
26
|
-
}
|
|
27
|
-
|
|
28
|
-
public toJSON(): IAgenticaPromptJson.IExecute {
|
|
29
|
-
return {
|
|
30
|
-
type: this.type,
|
|
31
|
-
id: this.id,
|
|
32
|
-
operation: this.operation.toJSON(),
|
|
33
|
-
arguments: this.arguments,
|
|
34
|
-
value: this.value,
|
|
35
|
-
};
|
|
36
|
-
}
|
|
37
|
-
}
|
|
38
|
-
export namespace AgenticaExecutePrompt {
|
|
39
|
-
export interface IProps<
|
|
40
|
-
Model extends ILlmSchema.Model,
|
|
41
|
-
Protocol extends "http" | "class" = any,
|
|
42
|
-
> {
|
|
43
|
-
id: string;
|
|
44
|
-
operation: Protocol extends "http"
|
|
45
|
-
? AgenticaOperation.Http<Model>
|
|
46
|
-
: Protocol extends "class"
|
|
47
|
-
? AgenticaOperation.Class<Model>
|
|
48
|
-
: AgenticaOperation<Model>;
|
|
49
|
-
arguments: Record<string, any>;
|
|
50
|
-
value: Protocol extends "http" ? IHttpResponse : any;
|
|
51
|
-
}
|
|
52
|
-
}
|
|
1
|
+
import { IHttpResponse, ILlmSchema } from "@samchon/openapi";
|
|
2
|
+
|
|
3
|
+
import { AgenticaOperation } from "../context/AgenticaOperation";
|
|
4
|
+
import { IAgenticaPromptJson } from "../json/IAgenticaPromptJson";
|
|
5
|
+
import { AgenticaPromptBase } from "./AgenticaPromptBase";
|
|
6
|
+
|
|
7
|
+
export class AgenticaExecutePrompt<
|
|
8
|
+
Model extends ILlmSchema.Model,
|
|
9
|
+
Protocol extends "http" | "class" = any,
|
|
10
|
+
> extends AgenticaPromptBase<"execute", IAgenticaPromptJson.IExecute> {
|
|
11
|
+
public readonly id: string;
|
|
12
|
+
public readonly operation: Protocol extends "http"
|
|
13
|
+
? AgenticaOperation.Http<Model>
|
|
14
|
+
: Protocol extends "class"
|
|
15
|
+
? AgenticaOperation.Class<Model>
|
|
16
|
+
: AgenticaOperation<Model>;
|
|
17
|
+
public readonly arguments: Record<string, any>;
|
|
18
|
+
public readonly value: Protocol extends "http" ? IHttpResponse : any;
|
|
19
|
+
|
|
20
|
+
public constructor(props: AgenticaExecutePrompt.IProps<Model, Protocol>) {
|
|
21
|
+
super("execute");
|
|
22
|
+
this.id = props.id;
|
|
23
|
+
this.operation = props.operation;
|
|
24
|
+
this.arguments = props.arguments;
|
|
25
|
+
this.value = props.value;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
public toJSON(): IAgenticaPromptJson.IExecute {
|
|
29
|
+
return {
|
|
30
|
+
type: this.type,
|
|
31
|
+
id: this.id,
|
|
32
|
+
operation: this.operation.toJSON(),
|
|
33
|
+
arguments: this.arguments,
|
|
34
|
+
value: this.value,
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
export namespace AgenticaExecutePrompt {
|
|
39
|
+
export interface IProps<
|
|
40
|
+
Model extends ILlmSchema.Model,
|
|
41
|
+
Protocol extends "http" | "class" = any,
|
|
42
|
+
> {
|
|
43
|
+
id: string;
|
|
44
|
+
operation: Protocol extends "http"
|
|
45
|
+
? AgenticaOperation.Http<Model>
|
|
46
|
+
: Protocol extends "class"
|
|
47
|
+
? AgenticaOperation.Class<Model>
|
|
48
|
+
: AgenticaOperation<Model>;
|
|
49
|
+
arguments: Record<string, any>;
|
|
50
|
+
value: Protocol extends "http" ? IHttpResponse : any;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
@@ -1,14 +1,14 @@
|
|
|
1
|
-
import { ILlmSchema } from "@samchon/openapi";
|
|
2
|
-
|
|
3
|
-
import { AgenticaCancelPrompt } from "../context/AgenticaCancelPrompt";
|
|
4
|
-
import { AgenticaDescribePrompt } from "./AgenticaDescribePrompt";
|
|
5
|
-
import { AgenticaExecutePrompt } from "./AgenticaExecutePrompt";
|
|
6
|
-
import { AgenticaSelectPrompt } from "./AgenticaSelectPrompt";
|
|
7
|
-
import { AgenticaTextPrompt } from "./AgenticaTextPrompt";
|
|
8
|
-
|
|
9
|
-
export type AgenticaPrompt<Model extends ILlmSchema.Model> =
|
|
10
|
-
| AgenticaCancelPrompt<Model>
|
|
11
|
-
| AgenticaDescribePrompt<Model>
|
|
12
|
-
| AgenticaExecutePrompt<Model>
|
|
13
|
-
| AgenticaSelectPrompt<Model>
|
|
14
|
-
| AgenticaTextPrompt;
|
|
1
|
+
import { ILlmSchema } from "@samchon/openapi";
|
|
2
|
+
|
|
3
|
+
import { AgenticaCancelPrompt } from "../context/AgenticaCancelPrompt";
|
|
4
|
+
import { AgenticaDescribePrompt } from "./AgenticaDescribePrompt";
|
|
5
|
+
import { AgenticaExecutePrompt } from "./AgenticaExecutePrompt";
|
|
6
|
+
import { AgenticaSelectPrompt } from "./AgenticaSelectPrompt";
|
|
7
|
+
import { AgenticaTextPrompt } from "./AgenticaTextPrompt";
|
|
8
|
+
|
|
9
|
+
export type AgenticaPrompt<Model extends ILlmSchema.Model> =
|
|
10
|
+
| AgenticaCancelPrompt<Model>
|
|
11
|
+
| AgenticaDescribePrompt<Model>
|
|
12
|
+
| AgenticaExecutePrompt<Model>
|
|
13
|
+
| AgenticaSelectPrompt<Model>
|
|
14
|
+
| AgenticaTextPrompt;
|
|
@@ -1,27 +1,27 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Base class for all prompts in Agentica.
|
|
3
|
-
*
|
|
4
|
-
* `AgenticaPromptBase` is a base class for every prompt classes
|
|
5
|
-
* in Agentica. It is generated by {@link Agentica.conversate} function,
|
|
6
|
-
* and used for restoring the previous conversation history when
|
|
7
|
-
* constructing the {@link Agentica} instance.
|
|
8
|
-
*
|
|
9
|
-
* @template Type Discriminator type
|
|
10
|
-
* @template Json Primitive type of the prompt
|
|
11
|
-
* @author Samchon
|
|
12
|
-
*/
|
|
13
|
-
export abstract class AgenticaPromptBase<
|
|
14
|
-
Type extends string,
|
|
15
|
-
Json extends { type: Type },
|
|
16
|
-
> {
|
|
17
|
-
/**
|
|
18
|
-
* Discriminator type.
|
|
19
|
-
*/
|
|
20
|
-
public readonly type: Type;
|
|
21
|
-
|
|
22
|
-
protected constructor(type: Type) {
|
|
23
|
-
this.type = type;
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
public abstract toJSON(): Json;
|
|
27
|
-
}
|
|
1
|
+
/**
|
|
2
|
+
* Base class for all prompts in Agentica.
|
|
3
|
+
*
|
|
4
|
+
* `AgenticaPromptBase` is a base class for every prompt classes
|
|
5
|
+
* in Agentica. It is generated by {@link Agentica.conversate} function,
|
|
6
|
+
* and used for restoring the previous conversation history when
|
|
7
|
+
* constructing the {@link Agentica} instance.
|
|
8
|
+
*
|
|
9
|
+
* @template Type Discriminator type
|
|
10
|
+
* @template Json Primitive type of the prompt
|
|
11
|
+
* @author Samchon
|
|
12
|
+
*/
|
|
13
|
+
export abstract class AgenticaPromptBase<
|
|
14
|
+
Type extends string,
|
|
15
|
+
Json extends { type: Type },
|
|
16
|
+
> {
|
|
17
|
+
/**
|
|
18
|
+
* Discriminator type.
|
|
19
|
+
*/
|
|
20
|
+
public readonly type: Type;
|
|
21
|
+
|
|
22
|
+
protected constructor(type: Type) {
|
|
23
|
+
this.type = type;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
public abstract toJSON(): Json;
|
|
27
|
+
}
|
|
@@ -1,32 +1,32 @@
|
|
|
1
|
-
import { ILlmSchema } from "@samchon/openapi";
|
|
2
|
-
|
|
3
|
-
import { AgenticaOperationSelection } from "../context/AgenticaOperationSelection";
|
|
4
|
-
import { IAgenticaPromptJson } from "../json/IAgenticaPromptJson";
|
|
5
|
-
import { AgenticaPromptBase } from "./AgenticaPromptBase";
|
|
6
|
-
|
|
7
|
-
export class AgenticaSelectPrompt<
|
|
8
|
-
Model extends ILlmSchema.Model,
|
|
9
|
-
> extends AgenticaPromptBase<"select", IAgenticaPromptJson.ISelect> {
|
|
10
|
-
public readonly id: string;
|
|
11
|
-
public readonly selections: AgenticaOperationSelection<Model>[];
|
|
12
|
-
|
|
13
|
-
public constructor(props: AgenticaSelectPrompt.IProps<Model>) {
|
|
14
|
-
super("select");
|
|
15
|
-
this.id = props.id;
|
|
16
|
-
this.selections = props.selections;
|
|
17
|
-
}
|
|
18
|
-
|
|
19
|
-
public toJSON(): IAgenticaPromptJson.ISelect {
|
|
20
|
-
return {
|
|
21
|
-
type: this.type,
|
|
22
|
-
id: this.id,
|
|
23
|
-
selections: this.selections.map((s) => s.toJSON()),
|
|
24
|
-
};
|
|
25
|
-
}
|
|
26
|
-
}
|
|
27
|
-
export namespace AgenticaSelectPrompt {
|
|
28
|
-
export interface IProps<Model extends ILlmSchema.Model> {
|
|
29
|
-
id: string;
|
|
30
|
-
selections: AgenticaOperationSelection<Model>[];
|
|
31
|
-
}
|
|
32
|
-
}
|
|
1
|
+
import { ILlmSchema } from "@samchon/openapi";
|
|
2
|
+
|
|
3
|
+
import { AgenticaOperationSelection } from "../context/AgenticaOperationSelection";
|
|
4
|
+
import { IAgenticaPromptJson } from "../json/IAgenticaPromptJson";
|
|
5
|
+
import { AgenticaPromptBase } from "./AgenticaPromptBase";
|
|
6
|
+
|
|
7
|
+
export class AgenticaSelectPrompt<
|
|
8
|
+
Model extends ILlmSchema.Model,
|
|
9
|
+
> extends AgenticaPromptBase<"select", IAgenticaPromptJson.ISelect> {
|
|
10
|
+
public readonly id: string;
|
|
11
|
+
public readonly selections: AgenticaOperationSelection<Model>[];
|
|
12
|
+
|
|
13
|
+
public constructor(props: AgenticaSelectPrompt.IProps<Model>) {
|
|
14
|
+
super("select");
|
|
15
|
+
this.id = props.id;
|
|
16
|
+
this.selections = props.selections;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
public toJSON(): IAgenticaPromptJson.ISelect {
|
|
20
|
+
return {
|
|
21
|
+
type: this.type,
|
|
22
|
+
id: this.id,
|
|
23
|
+
selections: this.selections.map((s) => s.toJSON()),
|
|
24
|
+
};
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
export namespace AgenticaSelectPrompt {
|
|
28
|
+
export interface IProps<Model extends ILlmSchema.Model> {
|
|
29
|
+
id: string;
|
|
30
|
+
selections: AgenticaOperationSelection<Model>[];
|
|
31
|
+
}
|
|
32
|
+
}
|
|
@@ -1,31 +1,31 @@
|
|
|
1
|
-
import { IAgenticaPromptJson } from "../json/IAgenticaPromptJson";
|
|
2
|
-
import { AgenticaPromptBase } from "./AgenticaPromptBase";
|
|
3
|
-
|
|
4
|
-
export class AgenticaTextPrompt<
|
|
5
|
-
Role extends "assistant" | "user" = "assistant" | "user",
|
|
6
|
-
> extends AgenticaPromptBase<"text", IAgenticaPromptJson.IText> {
|
|
7
|
-
public readonly role: Role;
|
|
8
|
-
public readonly text: string;
|
|
9
|
-
|
|
10
|
-
public constructor(props: AgenticaTextPrompt.IProps<Role>) {
|
|
11
|
-
super("text");
|
|
12
|
-
this.role = props.role;
|
|
13
|
-
this.text = props.text;
|
|
14
|
-
}
|
|
15
|
-
|
|
16
|
-
public toJSON(): IAgenticaPromptJson.IText<Role> {
|
|
17
|
-
return {
|
|
18
|
-
type: this.type,
|
|
19
|
-
role: this.role,
|
|
20
|
-
text: this.text,
|
|
21
|
-
};
|
|
22
|
-
}
|
|
23
|
-
}
|
|
24
|
-
export namespace AgenticaTextPrompt {
|
|
25
|
-
export interface IProps<
|
|
26
|
-
Role extends "assistant" | "user" = "assistant" | "user",
|
|
27
|
-
> {
|
|
28
|
-
role: Role;
|
|
29
|
-
text: string;
|
|
30
|
-
}
|
|
31
|
-
}
|
|
1
|
+
import { IAgenticaPromptJson } from "../json/IAgenticaPromptJson";
|
|
2
|
+
import { AgenticaPromptBase } from "./AgenticaPromptBase";
|
|
3
|
+
|
|
4
|
+
export class AgenticaTextPrompt<
|
|
5
|
+
Role extends "assistant" | "user" = "assistant" | "user",
|
|
6
|
+
> extends AgenticaPromptBase<"text", IAgenticaPromptJson.IText> {
|
|
7
|
+
public readonly role: Role;
|
|
8
|
+
public readonly text: string;
|
|
9
|
+
|
|
10
|
+
public constructor(props: AgenticaTextPrompt.IProps<Role>) {
|
|
11
|
+
super("text");
|
|
12
|
+
this.role = props.role;
|
|
13
|
+
this.text = props.text;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
public toJSON(): IAgenticaPromptJson.IText<Role> {
|
|
17
|
+
return {
|
|
18
|
+
type: this.type,
|
|
19
|
+
role: this.role,
|
|
20
|
+
text: this.text,
|
|
21
|
+
};
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
export namespace AgenticaTextPrompt {
|
|
25
|
+
export interface IProps<
|
|
26
|
+
Role extends "assistant" | "user" = "assistant" | "user",
|
|
27
|
+
> {
|
|
28
|
+
role: Role;
|
|
29
|
+
text: string;
|
|
30
|
+
}
|
|
31
|
+
}
|
|
@@ -1,123 +1,123 @@
|
|
|
1
|
-
import { ILlmSchema } from "@samchon/openapi";
|
|
2
|
-
|
|
3
|
-
import { AgenticaContext } from "../context/AgenticaContext";
|
|
4
|
-
import { AgenticaPrompt } from "../prompts/AgenticaPrompt";
|
|
5
|
-
import { IAgenticaExecutor } from "./IAgenticaExecutor";
|
|
6
|
-
import { IAgenticaSystemPrompt } from "./IAgenticaSystemPrompt";
|
|
7
|
-
|
|
8
|
-
/**
|
|
9
|
-
* Configuration for Nestia Agent.
|
|
10
|
-
*
|
|
11
|
-
* `IAgenticaConfig` is an interface that defines the configuration
|
|
12
|
-
* properties of the {@link Agentica}. With this configuration, you
|
|
13
|
-
* can set the user's locale, timezone, and some of system prompts.
|
|
14
|
-
*
|
|
15
|
-
* Also, you can affect to the LLM function selecing/calling logic by
|
|
16
|
-
* configuring additional properties. For an example, if you configure the
|
|
17
|
-
* {@link capacity} property, the A.I. chatbot will divide the functions
|
|
18
|
-
* into the several groups with the configured capacity and select proper
|
|
19
|
-
* functions to call by operating the multiple LLM function selecting
|
|
20
|
-
* agents parallelly.
|
|
21
|
-
*
|
|
22
|
-
* @author Samchon
|
|
23
|
-
*/
|
|
24
|
-
export interface IAgenticaConfig<Model extends ILlmSchema.Model> {
|
|
25
|
-
/**
|
|
26
|
-
* Agent executor.
|
|
27
|
-
*
|
|
28
|
-
* Executor function of Agentic AI's iteration plan to internal agents
|
|
29
|
-
* running by the {@link Agentica.conversate} function.
|
|
30
|
-
*
|
|
31
|
-
* If you want to customize the agent execution plan, you can do it
|
|
32
|
-
* by assigning you logic function of entire or partial to this property.
|
|
33
|
-
* When customizing it, it would better to reference the
|
|
34
|
-
* {@link ChatGptAgent.execute} function.
|
|
35
|
-
*
|
|
36
|
-
* @param ctx Context of the agent
|
|
37
|
-
* @returns Lit of prompts generated by the executor
|
|
38
|
-
* @default ChatGptAgent.execute
|
|
39
|
-
*/
|
|
40
|
-
executor?:
|
|
41
|
-
| Partial<IAgenticaExecutor<Model>>
|
|
42
|
-
| ((ctx: AgenticaContext<Model>) => Promise<AgenticaPrompt<Model>[]>);
|
|
43
|
-
|
|
44
|
-
/**
|
|
45
|
-
* System prompt messages.
|
|
46
|
-
*
|
|
47
|
-
* System prompt messages if you want to customize the system prompt
|
|
48
|
-
* messages for each situation.
|
|
49
|
-
*/
|
|
50
|
-
systemPrompt?: IAgenticaSystemPrompt<Model>;
|
|
51
|
-
|
|
52
|
-
/**
|
|
53
|
-
* Locale of the A.I. chatbot.
|
|
54
|
-
*
|
|
55
|
-
* If you configure this property, the A.I. chatbot will conversate with
|
|
56
|
-
* the given locale. You can get the locale value by
|
|
57
|
-
*
|
|
58
|
-
* - Browser: `navigator.language`
|
|
59
|
-
* - NodeJS: `process.env.LANG.split(".")[0]`
|
|
60
|
-
*
|
|
61
|
-
* @default your_locale
|
|
62
|
-
*/
|
|
63
|
-
locale?: string;
|
|
64
|
-
|
|
65
|
-
/**
|
|
66
|
-
* Timezone of the A.I. chatbot.
|
|
67
|
-
*
|
|
68
|
-
* If you configure this property, the A.I. chatbot will consider the
|
|
69
|
-
* given timezone. You can get the timezone value by
|
|
70
|
-
* `Intl.DateTimeFormat().resolvedOptions().timeZone`.
|
|
71
|
-
*
|
|
72
|
-
* @default your_timezone
|
|
73
|
-
*/
|
|
74
|
-
timezone?: string;
|
|
75
|
-
|
|
76
|
-
/**
|
|
77
|
-
* Retry count.
|
|
78
|
-
*
|
|
79
|
-
* If LLM function calling composed arguments are invalid,
|
|
80
|
-
* the A.I. chatbot will retry to call the function with
|
|
81
|
-
* the modified arguments.
|
|
82
|
-
*
|
|
83
|
-
* By the way, if you configure it to 0 or 1, the A.I. chatbot
|
|
84
|
-
* will not retry the LLM function calling for correcting the
|
|
85
|
-
* arguments.
|
|
86
|
-
*
|
|
87
|
-
* @default 3
|
|
88
|
-
*/
|
|
89
|
-
retry?: number;
|
|
90
|
-
|
|
91
|
-
/**
|
|
92
|
-
* Capacity of the LLM function selecting.
|
|
93
|
-
*
|
|
94
|
-
* When the A.I. chatbot selects a proper function to call, if the
|
|
95
|
-
* number of functions registered in the
|
|
96
|
-
* {@link IAgenticaProps.applications} is too much greater,
|
|
97
|
-
* the A.I. chatbot often fallen into the hallucination.
|
|
98
|
-
*
|
|
99
|
-
* In that case, if you configure this property value, `Agentica`
|
|
100
|
-
* will divide the functions into the several groups with the configured
|
|
101
|
-
* capacity and select proper functions to call by operating the multiple
|
|
102
|
-
* LLM function selecting agents parallelly.
|
|
103
|
-
*
|
|
104
|
-
* @default 100
|
|
105
|
-
*/
|
|
106
|
-
capacity?: number;
|
|
107
|
-
|
|
108
|
-
/**
|
|
109
|
-
* Eliticism for the LLM function selecting.
|
|
110
|
-
*
|
|
111
|
-
* If you configure {@link capacity}, the A.I. chatbot will complete
|
|
112
|
-
* the candidate functions to call which are selected by the multiple
|
|
113
|
-
* LLM function selecting agents.
|
|
114
|
-
*
|
|
115
|
-
* Otherwise you configure this property as `false`, the A.I. chatbot
|
|
116
|
-
* will not complete the candidate functions to call and just accept
|
|
117
|
-
* every candidate functions to call which are selected by the multiple
|
|
118
|
-
* LLM function selecting agents.
|
|
119
|
-
*
|
|
120
|
-
* @default true
|
|
121
|
-
*/
|
|
122
|
-
eliticism?: boolean;
|
|
123
|
-
}
|
|
1
|
+
import { ILlmSchema } from "@samchon/openapi";
|
|
2
|
+
|
|
3
|
+
import { AgenticaContext } from "../context/AgenticaContext";
|
|
4
|
+
import { AgenticaPrompt } from "../prompts/AgenticaPrompt";
|
|
5
|
+
import { IAgenticaExecutor } from "./IAgenticaExecutor";
|
|
6
|
+
import { IAgenticaSystemPrompt } from "./IAgenticaSystemPrompt";
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Configuration for Nestia Agent.
|
|
10
|
+
*
|
|
11
|
+
* `IAgenticaConfig` is an interface that defines the configuration
|
|
12
|
+
* properties of the {@link Agentica}. With this configuration, you
|
|
13
|
+
* can set the user's locale, timezone, and some of system prompts.
|
|
14
|
+
*
|
|
15
|
+
* Also, you can affect to the LLM function selecing/calling logic by
|
|
16
|
+
* configuring additional properties. For an example, if you configure the
|
|
17
|
+
* {@link capacity} property, the A.I. chatbot will divide the functions
|
|
18
|
+
* into the several groups with the configured capacity and select proper
|
|
19
|
+
* functions to call by operating the multiple LLM function selecting
|
|
20
|
+
* agents parallelly.
|
|
21
|
+
*
|
|
22
|
+
* @author Samchon
|
|
23
|
+
*/
|
|
24
|
+
export interface IAgenticaConfig<Model extends ILlmSchema.Model> {
|
|
25
|
+
/**
|
|
26
|
+
* Agent executor.
|
|
27
|
+
*
|
|
28
|
+
* Executor function of Agentic AI's iteration plan to internal agents
|
|
29
|
+
* running by the {@link Agentica.conversate} function.
|
|
30
|
+
*
|
|
31
|
+
* If you want to customize the agent execution plan, you can do it
|
|
32
|
+
* by assigning you logic function of entire or partial to this property.
|
|
33
|
+
* When customizing it, it would better to reference the
|
|
34
|
+
* {@link ChatGptAgent.execute} function.
|
|
35
|
+
*
|
|
36
|
+
* @param ctx Context of the agent
|
|
37
|
+
* @returns Lit of prompts generated by the executor
|
|
38
|
+
* @default ChatGptAgent.execute
|
|
39
|
+
*/
|
|
40
|
+
executor?:
|
|
41
|
+
| Partial<IAgenticaExecutor<Model>>
|
|
42
|
+
| ((ctx: AgenticaContext<Model>) => Promise<AgenticaPrompt<Model>[]>);
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* System prompt messages.
|
|
46
|
+
*
|
|
47
|
+
* System prompt messages if you want to customize the system prompt
|
|
48
|
+
* messages for each situation.
|
|
49
|
+
*/
|
|
50
|
+
systemPrompt?: IAgenticaSystemPrompt<Model>;
|
|
51
|
+
|
|
52
|
+
/**
|
|
53
|
+
* Locale of the A.I. chatbot.
|
|
54
|
+
*
|
|
55
|
+
* If you configure this property, the A.I. chatbot will conversate with
|
|
56
|
+
* the given locale. You can get the locale value by
|
|
57
|
+
*
|
|
58
|
+
* - Browser: `navigator.language`
|
|
59
|
+
* - NodeJS: `process.env.LANG.split(".")[0]`
|
|
60
|
+
*
|
|
61
|
+
* @default your_locale
|
|
62
|
+
*/
|
|
63
|
+
locale?: string;
|
|
64
|
+
|
|
65
|
+
/**
|
|
66
|
+
* Timezone of the A.I. chatbot.
|
|
67
|
+
*
|
|
68
|
+
* If you configure this property, the A.I. chatbot will consider the
|
|
69
|
+
* given timezone. You can get the timezone value by
|
|
70
|
+
* `Intl.DateTimeFormat().resolvedOptions().timeZone`.
|
|
71
|
+
*
|
|
72
|
+
* @default your_timezone
|
|
73
|
+
*/
|
|
74
|
+
timezone?: string;
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* Retry count.
|
|
78
|
+
*
|
|
79
|
+
* If LLM function calling composed arguments are invalid,
|
|
80
|
+
* the A.I. chatbot will retry to call the function with
|
|
81
|
+
* the modified arguments.
|
|
82
|
+
*
|
|
83
|
+
* By the way, if you configure it to 0 or 1, the A.I. chatbot
|
|
84
|
+
* will not retry the LLM function calling for correcting the
|
|
85
|
+
* arguments.
|
|
86
|
+
*
|
|
87
|
+
* @default 3
|
|
88
|
+
*/
|
|
89
|
+
retry?: number;
|
|
90
|
+
|
|
91
|
+
/**
|
|
92
|
+
* Capacity of the LLM function selecting.
|
|
93
|
+
*
|
|
94
|
+
* When the A.I. chatbot selects a proper function to call, if the
|
|
95
|
+
* number of functions registered in the
|
|
96
|
+
* {@link IAgenticaProps.applications} is too much greater,
|
|
97
|
+
* the A.I. chatbot often fallen into the hallucination.
|
|
98
|
+
*
|
|
99
|
+
* In that case, if you configure this property value, `Agentica`
|
|
100
|
+
* will divide the functions into the several groups with the configured
|
|
101
|
+
* capacity and select proper functions to call by operating the multiple
|
|
102
|
+
* LLM function selecting agents parallelly.
|
|
103
|
+
*
|
|
104
|
+
* @default 100
|
|
105
|
+
*/
|
|
106
|
+
capacity?: number;
|
|
107
|
+
|
|
108
|
+
/**
|
|
109
|
+
* Eliticism for the LLM function selecting.
|
|
110
|
+
*
|
|
111
|
+
* If you configure {@link capacity}, the A.I. chatbot will complete
|
|
112
|
+
* the candidate functions to call which are selected by the multiple
|
|
113
|
+
* LLM function selecting agents.
|
|
114
|
+
*
|
|
115
|
+
* Otherwise you configure this property as `false`, the A.I. chatbot
|
|
116
|
+
* will not complete the candidate functions to call and just accept
|
|
117
|
+
* every candidate functions to call which are selected by the multiple
|
|
118
|
+
* LLM function selecting agents.
|
|
119
|
+
*
|
|
120
|
+
* @default true
|
|
121
|
+
*/
|
|
122
|
+
eliticism?: boolean;
|
|
123
|
+
}
|