@aigne/core 1.0.9 → 1.0.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/cjs/agent.d.ts +70 -16
- package/lib/cjs/agent.js +68 -16
- package/lib/cjs/context.d.ts +1 -1
- package/lib/cjs/definitions/data-schema.d.ts +61 -0
- package/lib/cjs/definitions/memory.d.ts +18 -5
- package/lib/cjs/definitions/open-api.d.ts +3 -3
- package/lib/cjs/definitions/preload.d.ts +43 -0
- package/lib/cjs/definitions/preload.js +34 -0
- package/lib/cjs/function-agent.d.ts +15 -56
- package/lib/cjs/function-agent.js +9 -6
- package/lib/cjs/index.d.ts +1 -1
- package/lib/cjs/index.js +1 -1
- package/lib/cjs/llm-agent.d.ts +17 -60
- package/lib/cjs/llm-agent.js +8 -5
- package/lib/cjs/llm-decision-agent.d.ts +31 -51
- package/lib/cjs/llm-decision-agent.js +30 -13
- package/lib/cjs/llm-model.d.ts +4 -4
- package/lib/cjs/llm-models/openai-llm-model.d.ts +4 -1
- package/lib/cjs/llm-models/openai-llm-model.js +14 -14
- package/lib/cjs/memorable.d.ts +21 -13
- package/lib/cjs/memorable.js +2 -2
- package/lib/cjs/open-api-agent.d.ts +9 -38
- package/lib/cjs/open-api-agent.js +12 -6
- package/lib/cjs/pipeline-agent.d.ts +14 -37
- package/lib/cjs/pipeline-agent.js +10 -10
- package/lib/cjs/runnable.d.ts +7 -24
- package/lib/cjs/sandbox-function-agent.d.ts +10 -38
- package/lib/cjs/sandbox-function-agent.js +9 -5
- package/lib/cjs/sandbox-function-runner.d.ts +8 -17
- package/lib/cjs/sandbox-function-runner.js +1 -0
- package/lib/cjs/utils/index.d.ts +1 -0
- package/lib/cjs/utils/index.js +1 -0
- package/lib/cjs/utils/logger.d.ts +3 -0
- package/lib/cjs/utils/logger.js +17 -0
- package/lib/cjs/utils/message-utils.d.ts +1 -1
- package/lib/cjs/utils/message-utils.js +1 -4
- package/lib/cjs/utils/open-api-parameter.js +3 -6
- package/lib/cjs/utils/runnable-type.d.ts +5 -3
- package/lib/cjs/utils/structured-output-schema.d.ts +2 -2
- package/lib/cjs/utils/type-utils.d.ts +1 -0
- package/lib/cjs/{logger.js → utils/type-utils.js} +0 -2
- package/lib/dts/agent.d.ts +70 -16
- package/lib/dts/context.d.ts +1 -1
- package/lib/dts/definitions/data-schema.d.ts +61 -0
- package/lib/dts/definitions/memory.d.ts +18 -5
- package/lib/dts/definitions/open-api.d.ts +3 -3
- package/lib/dts/definitions/preload.d.ts +43 -0
- package/lib/dts/function-agent.d.ts +15 -56
- package/lib/dts/index.d.ts +1 -1
- package/lib/dts/llm-agent.d.ts +17 -60
- package/lib/dts/llm-decision-agent.d.ts +31 -51
- package/lib/dts/llm-model.d.ts +4 -4
- package/lib/dts/llm-models/openai-llm-model.d.ts +4 -1
- package/lib/dts/memorable.d.ts +21 -13
- package/lib/dts/open-api-agent.d.ts +9 -38
- package/lib/dts/pipeline-agent.d.ts +14 -37
- package/lib/dts/runnable.d.ts +7 -24
- package/lib/dts/sandbox-function-agent.d.ts +10 -38
- package/lib/dts/sandbox-function-runner.d.ts +8 -17
- package/lib/dts/utils/index.d.ts +1 -0
- package/lib/dts/utils/logger.d.ts +3 -0
- package/lib/dts/utils/message-utils.d.ts +1 -1
- package/lib/dts/utils/runnable-type.d.ts +5 -3
- package/lib/dts/utils/structured-output-schema.d.ts +2 -2
- package/lib/dts/utils/type-utils.d.ts +1 -0
- package/lib/esm/agent.d.ts +70 -16
- package/lib/esm/agent.js +64 -9
- package/lib/esm/context.d.ts +1 -1
- package/lib/esm/definitions/data-schema.d.ts +61 -0
- package/lib/esm/definitions/memory.d.ts +18 -5
- package/lib/esm/definitions/open-api.d.ts +3 -3
- package/lib/esm/definitions/preload.d.ts +43 -0
- package/lib/esm/definitions/preload.js +31 -0
- package/lib/esm/function-agent.d.ts +15 -56
- package/lib/esm/function-agent.js +9 -6
- package/lib/esm/index.d.ts +1 -1
- package/lib/esm/index.js +1 -1
- package/lib/esm/llm-agent.d.ts +17 -60
- package/lib/esm/llm-agent.js +8 -5
- package/lib/esm/llm-decision-agent.d.ts +31 -51
- package/lib/esm/llm-decision-agent.js +32 -15
- package/lib/esm/llm-model.d.ts +4 -4
- package/lib/esm/llm-models/openai-llm-model.d.ts +4 -1
- package/lib/esm/llm-models/openai-llm-model.js +14 -14
- package/lib/esm/memorable.d.ts +21 -13
- package/lib/esm/memorable.js +2 -2
- package/lib/esm/open-api-agent.d.ts +9 -38
- package/lib/esm/open-api-agent.js +11 -5
- package/lib/esm/pipeline-agent.d.ts +14 -37
- package/lib/esm/pipeline-agent.js +9 -6
- package/lib/esm/runnable.d.ts +7 -24
- package/lib/esm/sandbox-function-agent.d.ts +10 -38
- package/lib/esm/sandbox-function-agent.js +9 -5
- package/lib/esm/sandbox-function-runner.d.ts +8 -17
- package/lib/esm/sandbox-function-runner.js +2 -1
- package/lib/esm/utils/index.d.ts +1 -0
- package/lib/esm/utils/index.js +1 -0
- package/lib/esm/utils/logger.d.ts +3 -0
- package/lib/esm/utils/logger.js +14 -0
- package/lib/esm/utils/message-utils.d.ts +1 -1
- package/lib/esm/utils/message-utils.js +1 -4
- package/lib/esm/utils/open-api-parameter.js +1 -1
- package/lib/esm/utils/runnable-type.d.ts +5 -3
- package/lib/esm/utils/structured-output-schema.d.ts +2 -2
- package/lib/esm/utils/type-utils.d.ts +1 -0
- package/lib/esm/utils/type-utils.js +1 -0
- package/package.json +4 -4
- package/lib/cjs/definitions/data-type-schema.d.ts +0 -42
- package/lib/cjs/logger.d.ts +0 -2
- package/lib/dts/definitions/data-type-schema.d.ts +0 -42
- package/lib/dts/logger.d.ts +0 -2
- package/lib/esm/definitions/data-type-schema.d.ts +0 -42
- package/lib/esm/logger.d.ts +0 -2
- package/lib/esm/logger.js +0 -2
- /package/lib/cjs/definitions/{data-type-schema.js → data-schema.js} +0 -0
- /package/lib/esm/definitions/{data-type-schema.js → data-schema.js} +0 -0
package/lib/cjs/llm-agent.d.ts
CHANGED
|
@@ -1,23 +1,15 @@
|
|
|
1
|
-
import { Agent, type AgentProcessOptions } from "./agent";
|
|
1
|
+
import { Agent, type AgentDefinition, type AgentMemories, type AgentPreloads, type AgentProcessInput, type AgentProcessOptions, type CreateAgentInputSchema, type CreateAgentMemoriesSchema, type CreateAgentMemoriesType, type CreateAgentOptions, type CreateAgentOutputSchema, type CreateAgentPreloadsSchema, type CreateAgentPreloadsType } from "./agent";
|
|
2
2
|
import type { Context, ContextState } from "./context";
|
|
3
|
-
import { type
|
|
4
|
-
import { type CreateRunnableMemory } from "./definitions/memory";
|
|
3
|
+
import { type SchemaToType } from "./definitions/data-schema";
|
|
5
4
|
import type { LLMModel, LLMModelInputMessage, LLMModelInputs } from "./llm-model";
|
|
6
|
-
import type {
|
|
7
|
-
import type { RunnableDefinition } from "./runnable";
|
|
5
|
+
import type { RunnableInput, RunnableOutput } from "./runnable";
|
|
8
6
|
import { OrderedRecord } from "./utils/ordered-map";
|
|
9
|
-
export declare class LLMAgent<I extends {
|
|
10
|
-
[name: string]: any;
|
|
11
|
-
} = {}, O extends {
|
|
12
|
-
[name: string]: any;
|
|
13
|
-
} = {}, Memories extends {
|
|
14
|
-
[name: string]: MemoryItemWithScore[];
|
|
15
|
-
} = {}, State extends ContextState = ContextState> extends Agent<I, O, Memories, State> {
|
|
7
|
+
export declare class LLMAgent<I extends RunnableInput = RunnableInput, O extends RunnableOutput = RunnableOutput, State extends ContextState = ContextState, Preloads extends AgentPreloads = AgentPreloads, Memories extends AgentMemories = AgentMemories> extends Agent<I, O, State, Preloads, Memories> {
|
|
16
8
|
definition: LLMAgentDefinition;
|
|
17
9
|
model?: LLMModel | undefined;
|
|
18
10
|
static create: typeof create;
|
|
19
11
|
constructor(definition: LLMAgentDefinition, context?: Context<State>, model?: LLMModel | undefined);
|
|
20
|
-
process(input: I, options: AgentProcessOptions
|
|
12
|
+
process(input: AgentProcessInput<I, Preloads, Memories>, options: AgentProcessOptions): AsyncGenerator<{
|
|
21
13
|
$text: string | undefined;
|
|
22
14
|
delta?: undefined;
|
|
23
15
|
} | {
|
|
@@ -27,7 +19,7 @@ export declare class LLMAgent<I extends {
|
|
|
27
19
|
private runWithStructuredOutput;
|
|
28
20
|
private runWithTextOutput;
|
|
29
21
|
}
|
|
30
|
-
export interface LLMAgentDefinition extends
|
|
22
|
+
export interface LLMAgentDefinition extends AgentDefinition {
|
|
31
23
|
type: "llm_agent";
|
|
32
24
|
primaryMemoryId?: string;
|
|
33
25
|
messages?: OrderedRecord<LLMModelInputMessage & {
|
|
@@ -36,32 +28,19 @@ export interface LLMAgentDefinition extends RunnableDefinition {
|
|
|
36
28
|
modelOptions?: LLMModelInputs["modelOptions"];
|
|
37
29
|
}
|
|
38
30
|
/**
|
|
39
|
-
*
|
|
31
|
+
* Create LLMAgent definition.
|
|
32
|
+
* @param options Options to create LLMAgent.
|
|
33
|
+
* @returns LLMAgent definition.
|
|
40
34
|
*/
|
|
41
|
-
|
|
42
|
-
[name: string]: DataTypeSchema;
|
|
43
|
-
}, O extends {
|
|
44
|
-
[name: string]: DataTypeSchema;
|
|
45
|
-
}, Memories extends {
|
|
46
|
-
[name: string]: CreateRunnableMemory<I>;
|
|
47
|
-
}, State extends ContextState> {
|
|
48
|
-
context?: Context<State>;
|
|
49
|
-
/**
|
|
50
|
-
* Agent name, used to identify the agent.
|
|
51
|
-
*/
|
|
52
|
-
name?: string;
|
|
53
|
-
/**
|
|
54
|
-
* Input variables for this agent.
|
|
55
|
-
*/
|
|
56
|
-
inputs: I;
|
|
35
|
+
declare function create<I extends CreateAgentInputSchema, O extends CreateAgentOutputSchema, State extends ContextState, Preloads extends CreateAgentPreloadsSchema<I>, Memories extends CreateAgentMemoriesSchema<I, {
|
|
57
36
|
/**
|
|
58
|
-
*
|
|
37
|
+
* Whether this memory is primary? Primary memory will be passed as messages to LLM chat model,
|
|
38
|
+
* otherwise, it will be placed in a system message.
|
|
39
|
+
*
|
|
40
|
+
* Only one primary memory is allowed.
|
|
59
41
|
*/
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
* Memories to be used in this agent.
|
|
63
|
-
*/
|
|
64
|
-
memories?: Memories;
|
|
42
|
+
primary?: boolean;
|
|
43
|
+
}>>(options: CreateAgentOptions<I, O, State, Preloads, Memories> & {
|
|
65
44
|
/**
|
|
66
45
|
* Options for LLM chat model.
|
|
67
46
|
*/
|
|
@@ -70,27 +49,5 @@ export interface CreateLLMAgentOptions<I extends {
|
|
|
70
49
|
* Messages to be passed to LLM chat model.
|
|
71
50
|
*/
|
|
72
51
|
messages?: LLMModelInputMessage[];
|
|
73
|
-
}
|
|
74
|
-
/**
|
|
75
|
-
* Create LLMAgent definition.
|
|
76
|
-
* @param options Options to create LLMAgent.
|
|
77
|
-
* @returns LLMAgent definition.
|
|
78
|
-
*/
|
|
79
|
-
declare function create<I extends {
|
|
80
|
-
[name: string]: DataTypeSchema;
|
|
81
|
-
}, O extends {
|
|
82
|
-
[name: string]: DataTypeSchema;
|
|
83
|
-
}, Memories extends {
|
|
84
|
-
[name: string]: CreateRunnableMemory<I> & {
|
|
85
|
-
/**
|
|
86
|
-
* Whether this memory is primary? Primary memory will be passed as messages to LLM chat model,
|
|
87
|
-
* otherwise, it will be placed in a system message.
|
|
88
|
-
*
|
|
89
|
-
* Only one primary memory is allowed.
|
|
90
|
-
*/
|
|
91
|
-
primary?: boolean;
|
|
92
|
-
};
|
|
93
|
-
}, State extends ContextState>({ context, ...options }: CreateLLMAgentOptions<I, O, Memories, State>): LLMAgent<SchemaMapType<I>, SchemaMapType<O>, {
|
|
94
|
-
[name in keyof Memories]: MemorableSearchOutput<Memories[name]["memory"]>;
|
|
95
|
-
}, State>;
|
|
52
|
+
}): LLMAgent<SchemaToType<I>, SchemaToType<O>, State, CreateAgentPreloadsType<I, Preloads>, CreateAgentMemoriesType<I, Memories>>;
|
|
96
53
|
export {};
|
package/lib/cjs/llm-agent.js
CHANGED
|
@@ -17,8 +17,9 @@ const nanoid_1 = require("nanoid");
|
|
|
17
17
|
const tsyringe_1 = require("tsyringe");
|
|
18
18
|
const agent_1 = require("./agent");
|
|
19
19
|
const constants_1 = require("./constants");
|
|
20
|
-
const
|
|
20
|
+
const data_schema_1 = require("./definitions/data-schema");
|
|
21
21
|
const memory_1 = require("./definitions/memory");
|
|
22
|
+
const preload_1 = require("./definitions/preload");
|
|
22
23
|
const message_utils_1 = require("./utils/message-utils");
|
|
23
24
|
const mustache_utils_1 = require("./utils/mustache-utils");
|
|
24
25
|
const ordered_map_1 = require("./utils/ordered-map");
|
|
@@ -104,10 +105,11 @@ exports.LLMAgent = LLMAgent = __decorate([
|
|
|
104
105
|
* @param options Options to create LLMAgent.
|
|
105
106
|
* @returns LLMAgent definition.
|
|
106
107
|
*/
|
|
107
|
-
function create(
|
|
108
|
+
function create(options) {
|
|
108
109
|
const agentId = options.name || (0, nanoid_1.nanoid)();
|
|
109
|
-
const inputs = (0,
|
|
110
|
-
const outputs = (0,
|
|
110
|
+
const inputs = (0, data_schema_1.schemaToDataType)(options.inputs);
|
|
111
|
+
const outputs = (0, data_schema_1.schemaToDataType)(options.outputs);
|
|
112
|
+
const preloads = (0, preload_1.preloadCreatorsToPreloads)(inputs, options.preloads);
|
|
111
113
|
const memories = (0, memory_1.toRunnableMemories)(agentId, inputs, options.memories ?? {});
|
|
112
114
|
const primaryMemoryNames = Object.entries(options.memories ?? {})
|
|
113
115
|
.filter(([, i]) => i.primary)
|
|
@@ -126,9 +128,10 @@ function create({ context, ...options }) {
|
|
|
126
128
|
type: "llm_agent",
|
|
127
129
|
inputs,
|
|
128
130
|
outputs,
|
|
131
|
+
preloads,
|
|
129
132
|
primaryMemoryId: primaryMemoryNames?.at(0),
|
|
130
133
|
memories,
|
|
131
134
|
modelOptions: options.modelOptions,
|
|
132
135
|
messages,
|
|
133
|
-
}, context);
|
|
136
|
+
}, options.context);
|
|
134
137
|
}
|
|
@@ -1,64 +1,41 @@
|
|
|
1
|
-
import { Agent, type AgentProcessOptions } from "./agent";
|
|
1
|
+
import { Agent, type AgentDefinition, type AgentMemories, type AgentPreloads, type AgentProcessInput, type AgentProcessOptions, type CreateAgentMemoriesSchema, type CreateAgentMemoriesType, type CreateAgentOptions, type CreateAgentPreloadsSchema, type CreateAgentPreloadsType } from "./agent";
|
|
2
2
|
import type { Context, ContextState } from "./context";
|
|
3
|
-
import type {
|
|
4
|
-
import {
|
|
5
|
-
import type {
|
|
6
|
-
import type { LLMModel } from "./llm-model";
|
|
7
|
-
import type {
|
|
8
|
-
import type { Runnable, RunnableDefinition } from "./runnable";
|
|
3
|
+
import type { TypeToSchema } from "./definitions/data-schema";
|
|
4
|
+
import type { BindAgentInput, BoundAgent, OmitBoundAgentInput } from "./definitions/preload";
|
|
5
|
+
import type { LLMAgentDefinition } from "./llm-agent";
|
|
6
|
+
import type { LLMModel, LLMModelInputMessage, LLMModelInputs } from "./llm-model";
|
|
7
|
+
import type { RunnableInput, RunnableOutput } from "./runnable";
|
|
9
8
|
import { OrderedRecord } from "./utils";
|
|
10
|
-
import type {
|
|
11
|
-
|
|
12
|
-
export declare class LLMDecisionAgent<I extends {
|
|
13
|
-
[name: string]: any;
|
|
14
|
-
} = {}, O extends {
|
|
15
|
-
[name: string]: any;
|
|
16
|
-
} = {}, Memories extends {
|
|
17
|
-
[name: string]: MemoryItemWithScore[];
|
|
18
|
-
} = {}, State extends ContextState = ContextState> extends Agent<I, O, Memories, State> {
|
|
9
|
+
import type { ExtractRunnableInputTypeIntersection, ExtractRunnableOutputType } from "./utils/runnable-type";
|
|
10
|
+
export declare class LLMDecisionAgent<I extends RunnableInput = RunnableInput, O extends RunnableOutput = RunnableOutput, State extends ContextState = ContextState, Preloads extends AgentPreloads = AgentPreloads, Memories extends AgentMemories = AgentMemories> extends Agent<I, O, State, Preloads, Memories> {
|
|
19
11
|
definition: LLMDecisionAgentDefinition;
|
|
20
12
|
model?: LLMModel | undefined;
|
|
21
13
|
static create: typeof create;
|
|
22
14
|
constructor(definition: LLMDecisionAgentDefinition, context?: Context<State>, model?: LLMModel | undefined);
|
|
23
|
-
process(input: I, options: AgentProcessOptions<Memories>): Promise<import("./runnable").RunnableResponse<O>>;
|
|
15
|
+
process(input: AgentProcessInput<I, Preloads, Memories>, options: AgentProcessOptions<Preloads, Memories>): Promise<import("./runnable").RunnableResponse<O>>;
|
|
24
16
|
}
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
}>, O extends UnionToIntersection<ExtractRunnableOutputType<Case["runnable"]>, {
|
|
35
|
-
[name: string]: DataTypeSchema;
|
|
36
|
-
}>, Memories extends {
|
|
37
|
-
[name: string]: CreateRunnableMemory<I>;
|
|
38
|
-
}, State extends ContextState> extends Pick<CreateLLMAgentOptions<I, O, Memories, State>, "name" | "memories" | "messages" | "modelOptions"> {
|
|
39
|
-
context: Context<State>;
|
|
17
|
+
declare function create<Case extends BoundAgent, InputType extends ExtractRunnableInputTypeIntersection<Case["runnable"]>, OutputType extends ExtractRunnableOutputType<Case["runnable"]>, State extends ContextState, Preloads extends CreateAgentPreloadsSchema<TypeToSchema<InputType>>, Memories extends CreateAgentMemoriesSchema<TypeToSchema<InputType>, {
|
|
18
|
+
/**
|
|
19
|
+
* Whether this memory is primary? Primary memory will be passed as messages to LLM chat model,
|
|
20
|
+
* otherwise, it will be placed in a system message.
|
|
21
|
+
*
|
|
22
|
+
* Only one primary memory is allowed.
|
|
23
|
+
*/
|
|
24
|
+
primary?: boolean;
|
|
25
|
+
}>>(options: Omit<CreateAgentOptions<never, never, State, Preloads, Memories>, "inputs" | "outputs"> & {
|
|
40
26
|
cases: {
|
|
41
27
|
[name: string]: Case;
|
|
42
28
|
};
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
*
|
|
54
|
-
* Only one primary memory is allowed.
|
|
55
|
-
*/
|
|
56
|
-
primary?: boolean;
|
|
57
|
-
};
|
|
58
|
-
}, State extends ContextState>({ context, ...options }: CreateLLMDecisionAgentOptions<Case, I, O, Memories, State>): LLMDecisionAgent<UnionToIntersection<ExtractRunnableInputType<Case["runnable"]>, {}>, ExtractRunnableOutputType<Case["runnable"]>, {
|
|
59
|
-
[name in keyof Memories]: MemorableSearchOutput<Memories[name]["memory"]>;
|
|
60
|
-
}, State>;
|
|
61
|
-
export interface LLMDecisionAgentDefinition extends RunnableDefinition, Pick<LLMAgentDefinition, "modelOptions" | "messages" | "primaryMemoryId"> {
|
|
29
|
+
/**
|
|
30
|
+
* Options for LLM chat model.
|
|
31
|
+
*/
|
|
32
|
+
modelOptions?: LLMModelInputs["modelOptions"];
|
|
33
|
+
/**
|
|
34
|
+
* Messages to be passed to LLM chat model.
|
|
35
|
+
*/
|
|
36
|
+
messages?: LLMModelInputMessage[];
|
|
37
|
+
}): LLMDecisionAgent<OmitBoundAgentInput<Case, "ai">, OutputType, State, CreateAgentPreloadsType<TypeToSchema<InputType>, Preloads>, CreateAgentMemoriesType<TypeToSchema<InputType>, Memories>>;
|
|
38
|
+
export interface LLMDecisionAgentDefinition extends AgentDefinition, Pick<LLMAgentDefinition, "modelOptions" | "messages" | "primaryMemoryId"> {
|
|
62
39
|
type: "llm_decision_agent";
|
|
63
40
|
cases?: OrderedRecord<LLMDecisionCase>;
|
|
64
41
|
}
|
|
@@ -69,5 +46,8 @@ export interface LLMDecisionCase {
|
|
|
69
46
|
runnable?: {
|
|
70
47
|
id?: string;
|
|
71
48
|
};
|
|
49
|
+
input?: {
|
|
50
|
+
[inputId: string]: BindAgentInput;
|
|
51
|
+
};
|
|
72
52
|
}
|
|
73
53
|
export {};
|
|
@@ -20,6 +20,7 @@ const constants_1 = require("./constants");
|
|
|
20
20
|
const memory_1 = require("./definitions/memory");
|
|
21
21
|
const utils_1 = require("./utils");
|
|
22
22
|
const message_utils_1 = require("./utils/message-utils");
|
|
23
|
+
const structured_output_schema_1 = require("./utils/structured-output-schema");
|
|
23
24
|
let LLMDecisionAgent = class LLMDecisionAgent extends agent_1.Agent {
|
|
24
25
|
definition;
|
|
25
26
|
model;
|
|
@@ -45,19 +46,23 @@ let LLMDecisionAgent = class LLMDecisionAgent extends agent_1.Agent {
|
|
|
45
46
|
const name = t.name || runnable.name;
|
|
46
47
|
if (!name)
|
|
47
48
|
throw new Error("Case name is required");
|
|
48
|
-
return {
|
|
49
|
+
return { ...t, name, runnable };
|
|
49
50
|
}));
|
|
50
51
|
const llmInputs = {
|
|
51
52
|
messages: messagesWithMemory,
|
|
52
53
|
modelOptions: definition.modelOptions,
|
|
53
54
|
tools: cases.map((t) => {
|
|
54
|
-
//
|
|
55
|
+
// Filter inputs that are bound from AI
|
|
56
|
+
const inputsFromAI = utils_1.OrderedRecord.fromArray(utils_1.OrderedRecord.filter(t.runnable.definition.inputs, (i) => t.input?.[i.id]?.from === "ai"));
|
|
57
|
+
const parameters = inputsFromAI.$indexes.length > 0
|
|
58
|
+
? (0, structured_output_schema_1.outputsToJsonSchema)(inputsFromAI)
|
|
59
|
+
: {};
|
|
55
60
|
return {
|
|
56
61
|
type: "function",
|
|
57
62
|
function: {
|
|
58
63
|
name: t.name,
|
|
59
64
|
description: t.description,
|
|
60
|
-
parameters
|
|
65
|
+
parameters,
|
|
61
66
|
},
|
|
62
67
|
};
|
|
63
68
|
}),
|
|
@@ -65,14 +70,16 @@ let LLMDecisionAgent = class LLMDecisionAgent extends agent_1.Agent {
|
|
|
65
70
|
};
|
|
66
71
|
const { toolCalls } = await model.run(llmInputs);
|
|
67
72
|
// TODO: support run multiple calls
|
|
68
|
-
const functionNameToCall = toolCalls?.[0]?.function
|
|
73
|
+
const { name: functionNameToCall, arguments: args } = toolCalls?.[0]?.function ?? {};
|
|
69
74
|
if (!functionNameToCall)
|
|
70
75
|
throw new Error("No any runnable called");
|
|
71
76
|
const caseToCall = cases.find((i) => i.name === functionNameToCall);
|
|
72
77
|
if (!caseToCall)
|
|
73
78
|
throw new Error("Case not found");
|
|
79
|
+
// Prepare arguments generated by LLM model
|
|
80
|
+
const llmArgs = args ? JSON.parse(args) : {};
|
|
74
81
|
// TODO: check result structure and omit undefined values
|
|
75
|
-
const output = await caseToCall.runnable.run(input, { stream: true });
|
|
82
|
+
const output = await caseToCall.runnable.run({ ...input, ...llmArgs }, { stream: true });
|
|
76
83
|
return (0, utils_1.extractOutputsFromRunnableOutput)(output, ({ $text, ...json }) => this.updateMemories([
|
|
77
84
|
...originalMessages,
|
|
78
85
|
{
|
|
@@ -90,14 +97,24 @@ exports.LLMDecisionAgent = LLMDecisionAgent = __decorate([
|
|
|
90
97
|
__param(2, (0, tsyringe_1.inject)(constants_1.TYPES.llmModel)),
|
|
91
98
|
__metadata("design:paramtypes", [Object, Object, Function])
|
|
92
99
|
], LLMDecisionAgent);
|
|
93
|
-
function create(
|
|
100
|
+
function create(options) {
|
|
94
101
|
const agentId = options.name || (0, nanoid_1.nanoid)();
|
|
95
|
-
const cases = utils_1.OrderedRecord.fromArray(Object.entries(options.cases).map(([name, c]) =>
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
102
|
+
const cases = utils_1.OrderedRecord.fromArray(Object.entries(options.cases).map(([name, c]) => {
|
|
103
|
+
const bindInputs = Object.entries(c.input ?? {});
|
|
104
|
+
return {
|
|
105
|
+
id: (0, nanoid_1.nanoid)(),
|
|
106
|
+
name: name || c.runnable.name,
|
|
107
|
+
description: c.description,
|
|
108
|
+
runnable: { id: c.runnable.id },
|
|
109
|
+
input: Object.fromEntries(bindInputs.map(([inputName, v]) => {
|
|
110
|
+
const input = c.runnable.definition.inputs[inputName] ||
|
|
111
|
+
utils_1.OrderedRecord.find(c.runnable.definition.inputs, (i) => i.name === inputName);
|
|
112
|
+
if (!input)
|
|
113
|
+
throw new Error(`Input ${inputName} not found`);
|
|
114
|
+
return [input.id, v];
|
|
115
|
+
})),
|
|
116
|
+
};
|
|
117
|
+
}));
|
|
101
118
|
const inputs = utils_1.OrderedRecord.merge(...Object.values(options.cases).map((i) => i.runnable.definition.inputs));
|
|
102
119
|
const outputs = utils_1.OrderedRecord.fromArray(utils_1.OrderedRecord.map(utils_1.OrderedRecord.merge(...Object.values(options.cases).map((i) => i.runnable.definition.outputs)), (o) => ({ ...o, required: false })));
|
|
103
120
|
const memories = (0, memory_1.toRunnableMemories)(agentId, inputs, options.memories ?? {});
|
|
@@ -123,5 +140,5 @@ function create({ context, ...options }) {
|
|
|
123
140
|
memories,
|
|
124
141
|
modelOptions: options.modelOptions,
|
|
125
142
|
cases,
|
|
126
|
-
}, context);
|
|
143
|
+
}, options.context);
|
|
127
144
|
}
|
package/lib/cjs/llm-model.d.ts
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { Agent } from "./agent";
|
|
2
2
|
import type { Context } from "./context";
|
|
3
3
|
export type Role = "system" | "user" | "assistant" | "tool";
|
|
4
|
-
export
|
|
4
|
+
export type LLMModelInputs = {
|
|
5
5
|
messages: LLMModelInputMessage[];
|
|
6
6
|
responseFormat?: {
|
|
7
7
|
type: "text";
|
|
@@ -23,7 +23,7 @@ export interface LLMModelInputs {
|
|
|
23
23
|
};
|
|
24
24
|
};
|
|
25
25
|
modelOptions?: LLMModelOptions;
|
|
26
|
-
}
|
|
26
|
+
};
|
|
27
27
|
export interface LLMModelInputMessage {
|
|
28
28
|
role: Role;
|
|
29
29
|
content: string | ({
|
|
@@ -60,7 +60,7 @@ export interface LLMModelOptions {
|
|
|
60
60
|
frequencyPenalty?: number;
|
|
61
61
|
presencePenalty?: number;
|
|
62
62
|
}
|
|
63
|
-
export
|
|
63
|
+
export type LLMModelOutputs = {
|
|
64
64
|
$text?: string | null;
|
|
65
65
|
toolCalls?: {
|
|
66
66
|
id?: string;
|
|
@@ -70,7 +70,7 @@ export interface LLMModelOutputs {
|
|
|
70
70
|
arguments?: string;
|
|
71
71
|
};
|
|
72
72
|
}[];
|
|
73
|
-
}
|
|
73
|
+
};
|
|
74
74
|
export declare abstract class LLMModel extends Agent<LLMModelInputs, LLMModelOutputs> {
|
|
75
75
|
constructor(context?: Context);
|
|
76
76
|
}
|
|
@@ -8,7 +8,9 @@ export declare class OpenaiLLMModel extends LLMModel {
|
|
|
8
8
|
private client;
|
|
9
9
|
setApiKey(apiKey: string): void;
|
|
10
10
|
process(input: LLMModelInputs): AsyncGenerator<{
|
|
11
|
-
$text: string
|
|
11
|
+
$text: string;
|
|
12
|
+
delta?: undefined;
|
|
13
|
+
} | {
|
|
12
14
|
delta: {
|
|
13
15
|
toolCalls: {
|
|
14
16
|
id?: string;
|
|
@@ -19,5 +21,6 @@ export declare class OpenaiLLMModel extends LLMModel {
|
|
|
19
21
|
};
|
|
20
22
|
}[];
|
|
21
23
|
};
|
|
24
|
+
$text?: undefined;
|
|
22
25
|
}, void, unknown>;
|
|
23
26
|
}
|
|
@@ -43,22 +43,22 @@ class OpenaiLLMModel extends llm_model_1.LLMModel {
|
|
|
43
43
|
const toolCalls = [];
|
|
44
44
|
for await (const chunk of res) {
|
|
45
45
|
const choice = chunk.choices?.[0];
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
46
|
+
if (choice?.delta.tool_calls?.length) {
|
|
47
|
+
for (const call of choice.delta.tool_calls) {
|
|
48
|
+
const tool = toolCalls[call.index] ?? { id: call.id || (0, nanoid_1.nanoid)() };
|
|
49
|
+
toolCalls[call.index] = tool;
|
|
50
|
+
if (call.type)
|
|
51
|
+
tool.type = call.type;
|
|
52
|
+
tool.function ??= {};
|
|
53
|
+
tool.function.name =
|
|
54
|
+
(tool.function.name || "") + (call.function?.name || "");
|
|
55
|
+
tool.function.arguments = (tool.function.arguments || "").concat(call.function?.arguments || "");
|
|
56
|
+
}
|
|
56
57
|
}
|
|
57
|
-
|
|
58
|
-
$text: choice
|
|
59
|
-
delta: { toolCalls },
|
|
60
|
-
};
|
|
58
|
+
if (choice?.delta.content)
|
|
59
|
+
yield { $text: choice.delta.content };
|
|
61
60
|
}
|
|
61
|
+
yield { delta: { toolCalls } };
|
|
62
62
|
}
|
|
63
63
|
}
|
|
64
64
|
exports.OpenaiLLMModel = OpenaiLLMModel;
|
package/lib/cjs/memorable.d.ts
CHANGED
|
@@ -1,8 +1,7 @@
|
|
|
1
|
+
import type { Context } from "./context";
|
|
1
2
|
import type { LLMModelInputMessage } from "./llm-model";
|
|
2
3
|
import { Runnable } from "./runnable";
|
|
3
|
-
export
|
|
4
|
-
[key: string]: any;
|
|
5
|
-
}
|
|
4
|
+
export type MemoryMetadata = Record<string, unknown>;
|
|
6
5
|
export type MemoryActionItem<T> = {
|
|
7
6
|
event: "add";
|
|
8
7
|
id: string;
|
|
@@ -31,7 +30,7 @@ export interface MemoryItem<T> {
|
|
|
31
30
|
memory: T;
|
|
32
31
|
metadata: MemoryMetadata;
|
|
33
32
|
}
|
|
34
|
-
export interface MemoryItemWithScore<T =
|
|
33
|
+
export interface MemoryItemWithScore<T = unknown> extends MemoryItem<T> {
|
|
35
34
|
score: number;
|
|
36
35
|
}
|
|
37
36
|
export type MemoryMessage = LLMModelInputMessage;
|
|
@@ -110,13 +109,19 @@ export type MemoryActions<T> = {
|
|
|
110
109
|
} | {
|
|
111
110
|
action: "delete";
|
|
112
111
|
inputs: {
|
|
113
|
-
filter: string | string[] | Record<string,
|
|
112
|
+
filter: string | string[] | Record<string, unknown>;
|
|
113
|
+
};
|
|
114
|
+
outputs: {
|
|
115
|
+
[name: string]: never;
|
|
114
116
|
};
|
|
115
|
-
outputs: {};
|
|
116
117
|
} | {
|
|
117
118
|
action: "reset";
|
|
118
|
-
inputs: {
|
|
119
|
-
|
|
119
|
+
inputs: {
|
|
120
|
+
[name: string]: never;
|
|
121
|
+
};
|
|
122
|
+
outputs: {
|
|
123
|
+
[name: string]: never;
|
|
124
|
+
};
|
|
120
125
|
};
|
|
121
126
|
export interface SortItem {
|
|
122
127
|
field: string;
|
|
@@ -124,7 +129,7 @@ export interface SortItem {
|
|
|
124
129
|
}
|
|
125
130
|
export type MemorySortOptions = SortItem | SortItem[];
|
|
126
131
|
export declare abstract class Memorable<T, C = undefined> extends Runnable<MemoryActions<T>, MemoryActions<T>["outputs"]> {
|
|
127
|
-
constructor();
|
|
132
|
+
constructor(context?: Context);
|
|
128
133
|
abstract runner?: MemoryRunner<T, C>;
|
|
129
134
|
abstract add(messages: Extract<MemoryActions<T>, {
|
|
130
135
|
action: "add";
|
|
@@ -169,15 +174,18 @@ export declare abstract class Memorable<T, C = undefined> extends Runnable<Memor
|
|
|
169
174
|
}>["outputs"]>;
|
|
170
175
|
abstract reset(): Promise<void>;
|
|
171
176
|
}
|
|
172
|
-
export
|
|
177
|
+
export type MemoryRunnerInput<C = undefined> = {
|
|
173
178
|
messages: MemoryMessage[];
|
|
174
179
|
userId?: string;
|
|
175
180
|
sessionId?: string;
|
|
176
181
|
metadata?: MemoryMetadata;
|
|
177
182
|
filter?: MemoryMetadata;
|
|
178
183
|
customData: C;
|
|
179
|
-
}
|
|
180
|
-
export
|
|
184
|
+
};
|
|
185
|
+
export type MemoryRunnerOutput<T> = {
|
|
186
|
+
actions: MemoryActionItem<T>[];
|
|
187
|
+
};
|
|
188
|
+
export declare abstract class MemoryRunner<T, C = undefined> extends Runnable<MemoryRunnerInput<C>, MemoryRunnerOutput<T>> {
|
|
181
189
|
constructor(name: string);
|
|
182
190
|
}
|
|
183
|
-
export type MemorableSearchOutput<T extends Memorable<
|
|
191
|
+
export type MemorableSearchOutput<T extends Memorable<unknown>> = Awaited<ReturnType<T["search"]>>["results"];
|
package/lib/cjs/memorable.js
CHANGED
|
@@ -5,14 +5,14 @@ const lodash_1 = require("lodash");
|
|
|
5
5
|
const runnable_1 = require("./runnable");
|
|
6
6
|
const utils_1 = require("./utils");
|
|
7
7
|
class Memorable extends runnable_1.Runnable {
|
|
8
|
-
constructor() {
|
|
8
|
+
constructor(context) {
|
|
9
9
|
super({
|
|
10
10
|
id: "memory",
|
|
11
11
|
type: "memory",
|
|
12
12
|
name: "Memory",
|
|
13
13
|
inputs: utils_1.OrderedRecord.fromArray([]),
|
|
14
14
|
outputs: utils_1.OrderedRecord.fromArray([]),
|
|
15
|
-
});
|
|
15
|
+
}, context);
|
|
16
16
|
}
|
|
17
17
|
}
|
|
18
18
|
exports.Memorable = Memorable;
|
|
@@ -1,55 +1,26 @@
|
|
|
1
|
-
import { Agent } from "./agent";
|
|
1
|
+
import { Agent, type AgentDefinition, type AgentMemories, type AgentPreloads, type AgentProcessInput, type CreateAgentInputSchema, type CreateAgentMemoriesSchema, type CreateAgentMemoriesType, type CreateAgentOptions, type CreateAgentOutputSchema, type CreateAgentPreloadsSchema, type CreateAgentPreloadsType } from "./agent";
|
|
2
2
|
import type { Context, ContextState } from "./context";
|
|
3
|
-
import { type
|
|
4
|
-
import type {
|
|
5
|
-
import type {
|
|
6
|
-
import type { MemorableSearchOutput, MemoryItemWithScore } from "./memorable";
|
|
7
|
-
import type { RunnableDefinition } from "./runnable";
|
|
3
|
+
import { type SchemaToType } from "./definitions/data-schema";
|
|
4
|
+
import type { AuthConfig, FetchRequest, HTTPMethod, OpenAPIDataType } from "./definitions/open-api";
|
|
5
|
+
import type { RunnableInput, RunnableOutput } from "./runnable";
|
|
8
6
|
import type { OrderedRecord } from "./utils";
|
|
9
|
-
export declare class OpenAPIAgent<I extends {
|
|
10
|
-
[name: string]: any;
|
|
11
|
-
} = {}, O extends {
|
|
12
|
-
[name: string]: any;
|
|
13
|
-
} = {}, Memories extends {
|
|
14
|
-
[name: string]: MemoryItemWithScore[];
|
|
15
|
-
} = {}, State extends ContextState = ContextState> extends Agent<I, O, Memories, State> {
|
|
7
|
+
export declare class OpenAPIAgent<I extends RunnableInput = RunnableInput, O extends RunnableOutput = RunnableOutput, State extends ContextState = ContextState, Preloads extends AgentPreloads = AgentPreloads, Memories extends AgentMemories = AgentMemories> extends Agent<I, O, State, Preloads, Memories> {
|
|
16
8
|
definition: OpenAPIAgentDefinition;
|
|
17
9
|
static create: typeof create;
|
|
18
10
|
constructor(definition: OpenAPIAgentDefinition, context?: Context<State>);
|
|
19
|
-
process(input: I): Promise<O>;
|
|
11
|
+
process(input: AgentProcessInput<I, Preloads, Memories>): Promise<O>;
|
|
20
12
|
fetch<T>(request: FetchRequest): Promise<T>;
|
|
21
13
|
}
|
|
22
|
-
export interface OpenAPIAgentDefinition extends
|
|
14
|
+
export interface OpenAPIAgentDefinition extends AgentDefinition {
|
|
23
15
|
type: "open_api_agent";
|
|
24
16
|
inputs: OrderedRecord<OpenAPIDataType>;
|
|
25
17
|
url: string;
|
|
26
18
|
method?: HTTPMethod;
|
|
27
19
|
auth?: AuthConfig;
|
|
28
20
|
}
|
|
29
|
-
|
|
30
|
-
[name: string]: OpenAPIDataTypeSchema;
|
|
31
|
-
}, O extends {
|
|
32
|
-
[name: string]: DataTypeSchema;
|
|
33
|
-
}, Memories extends {
|
|
34
|
-
[name: string]: CreateRunnableMemory<I>;
|
|
35
|
-
}, State extends ContextState> {
|
|
36
|
-
context?: Context<State>;
|
|
37
|
-
id?: string;
|
|
38
|
-
name?: string;
|
|
39
|
-
inputs: I;
|
|
40
|
-
outputs: O;
|
|
41
|
-
memories?: Memories;
|
|
21
|
+
declare function create<I extends CreateAgentInputSchema, O extends CreateAgentOutputSchema, State extends ContextState, Preloads extends CreateAgentPreloadsSchema<I>, Memories extends CreateAgentMemoriesSchema<I>>(options: CreateAgentOptions<I, O, State, Preloads, Memories> & {
|
|
42
22
|
url: string;
|
|
43
23
|
method?: HTTPMethod;
|
|
44
24
|
auth?: AuthConfig;
|
|
45
|
-
}
|
|
46
|
-
declare function create<I extends {
|
|
47
|
-
[name: string]: OpenAPIDataTypeSchema;
|
|
48
|
-
}, O extends {
|
|
49
|
-
[name: string]: DataTypeSchema;
|
|
50
|
-
}, Memories extends {
|
|
51
|
-
[name: string]: CreateRunnableMemory<I>;
|
|
52
|
-
}, State extends ContextState>({ context, ...options }: CreateOpenAPIAgentOptions<I, O, Memories, State>): OpenAPIAgent<SchemaMapType<I>, SchemaMapType<O>, {
|
|
53
|
-
[name in keyof Memories]: MemorableSearchOutput<Memories[name]["memory"]>;
|
|
54
|
-
}, State>;
|
|
25
|
+
}): OpenAPIAgent<SchemaToType<I>, SchemaToType<O>, State, CreateAgentPreloadsType<I, Preloads>, CreateAgentMemoriesType<I, Memories>>;
|
|
55
26
|
export {};
|
|
@@ -17,7 +17,9 @@ const nanoid_1 = require("nanoid");
|
|
|
17
17
|
const tsyringe_1 = require("tsyringe");
|
|
18
18
|
const agent_1 = require("./agent");
|
|
19
19
|
const constants_1 = require("./constants");
|
|
20
|
-
const
|
|
20
|
+
const data_schema_1 = require("./definitions/data-schema");
|
|
21
|
+
const memory_1 = require("./definitions/memory");
|
|
22
|
+
const preload_1 = require("./definitions/preload");
|
|
21
23
|
const fetch_open_api_1 = require("./utils/fetch-open-api");
|
|
22
24
|
const open_api_parameter_1 = require("./utils/open-api-parameter");
|
|
23
25
|
let OpenAPIAgent = class OpenAPIAgent extends agent_1.Agent {
|
|
@@ -45,18 +47,22 @@ exports.OpenAPIAgent = OpenAPIAgent = __decorate([
|
|
|
45
47
|
__param(1, (0, tsyringe_1.inject)(constants_1.TYPES.context)),
|
|
46
48
|
__metadata("design:paramtypes", [Object, Object])
|
|
47
49
|
], OpenAPIAgent);
|
|
48
|
-
function create(
|
|
49
|
-
const agentId = options.
|
|
50
|
-
const inputs = (0,
|
|
51
|
-
const outputs = (0,
|
|
50
|
+
function create(options) {
|
|
51
|
+
const agentId = options.name || (0, nanoid_1.nanoid)();
|
|
52
|
+
const inputs = (0, data_schema_1.schemaToDataType)(options.inputs);
|
|
53
|
+
const outputs = (0, data_schema_1.schemaToDataType)(options.outputs);
|
|
54
|
+
const preloads = (0, preload_1.preloadCreatorsToPreloads)(inputs, options.preloads);
|
|
55
|
+
const memories = (0, memory_1.toRunnableMemories)(agentId, inputs, options.memories || {});
|
|
52
56
|
return new OpenAPIAgent({
|
|
53
57
|
id: agentId,
|
|
54
58
|
name: options.name,
|
|
55
59
|
type: "open_api_agent",
|
|
56
60
|
inputs,
|
|
61
|
+
preloads,
|
|
62
|
+
memories,
|
|
57
63
|
outputs,
|
|
58
64
|
url: options.url,
|
|
59
65
|
method: options.method,
|
|
60
66
|
auth: options.auth,
|
|
61
|
-
}, context);
|
|
67
|
+
}, options.context);
|
|
62
68
|
}
|