langchain 0.0.182 → 0.0.184
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/agent.cjs +19 -13
- package/dist/agents/agent.d.ts +16 -17
- package/dist/agents/agent.js +17 -11
- package/dist/agents/executor.d.ts +10 -16
- package/dist/agents/toolkits/aws_sfn.d.ts +1 -4
- package/dist/agents/toolkits/conversational_retrieval/openai_functions.d.ts +1 -1
- package/dist/agents/toolkits/json/json.d.ts +1 -4
- package/dist/agents/toolkits/openapi/openapi.d.ts +1 -4
- package/dist/agents/toolkits/sql/sql.d.ts +1 -4
- package/dist/agents/toolkits/vectorstore/vectorstore.d.ts +2 -8
- package/dist/agents/types.d.ts +5 -5
- package/dist/chat_models/googlepalm.cjs +19 -1
- package/dist/chat_models/googlepalm.d.ts +5 -1
- package/dist/chat_models/googlepalm.js +20 -2
- package/dist/experimental/openai_assistant/index.cjs +221 -0
- package/dist/experimental/openai_assistant/index.d.ts +36 -0
- package/dist/experimental/openai_assistant/index.js +217 -0
- package/dist/experimental/openai_assistant/schema.cjs +2 -0
- package/dist/experimental/openai_assistant/schema.d.ts +12 -0
- package/dist/experimental/openai_assistant/schema.js +1 -0
- package/dist/load/import_map.cjs +2 -1
- package/dist/load/import_map.d.ts +1 -0
- package/dist/load/import_map.js +1 -0
- package/dist/tools/convert_to_openai.cjs +12 -1
- package/dist/tools/convert_to_openai.d.ts +1 -0
- package/dist/tools/convert_to_openai.js +10 -0
- package/dist/vectorstores/pinecone.cjs +4 -1
- package/dist/vectorstores/pinecone.d.ts +2 -1
- package/dist/vectorstores/pinecone.js +4 -1
- package/experimental/openai_assistant.cjs +1 -0
- package/experimental/openai_assistant.d.ts +1 -0
- package/experimental/openai_assistant.js +1 -0
- package/package.json +10 -2
package/dist/agents/agent.cjs
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.Agent = exports.LLMSingleActionAgent = exports.
|
|
3
|
+
exports.Agent = exports.LLMSingleActionAgent = exports.RunnableAgent = exports.BaseMultiActionAgent = exports.BaseSingleActionAgent = exports.BaseAgent = void 0;
|
|
4
4
|
const serializable_js_1 = require("../load/serializable.cjs");
|
|
5
5
|
/**
|
|
6
6
|
* Error class for parse errors in LangChain. Contains information about
|
|
@@ -66,12 +66,26 @@ class BaseSingleActionAgent extends BaseAgent {
|
|
|
66
66
|
}
|
|
67
67
|
}
|
|
68
68
|
exports.BaseSingleActionAgent = BaseSingleActionAgent;
|
|
69
|
+
/**
|
|
70
|
+
* Abstract base class for multi-action agents in LangChain. Extends the
|
|
71
|
+
* BaseAgent class and provides additional functionality specific to
|
|
72
|
+
* multi-action agents.
|
|
73
|
+
*/
|
|
74
|
+
class BaseMultiActionAgent extends BaseAgent {
|
|
75
|
+
_agentActionType() {
|
|
76
|
+
return "multi";
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
exports.BaseMultiActionAgent = BaseMultiActionAgent;
|
|
80
|
+
function isAgentAction(input) {
|
|
81
|
+
return !Array.isArray(input) && input?.tool !== undefined;
|
|
82
|
+
}
|
|
69
83
|
/**
|
|
70
84
|
* Class representing a single action agent which accepts runnables.
|
|
71
85
|
* Extends the BaseSingleActionAgent class and provides methods for
|
|
72
86
|
* planning agent actions with runnables.
|
|
73
87
|
*/
|
|
74
|
-
class RunnableAgent extends
|
|
88
|
+
class RunnableAgent extends BaseMultiActionAgent {
|
|
75
89
|
get inputKeys() {
|
|
76
90
|
return [];
|
|
77
91
|
}
|
|
@@ -110,21 +124,13 @@ class RunnableAgent extends BaseSingleActionAgent {
|
|
|
110
124
|
callbacks: callbackManager,
|
|
111
125
|
runName: "RunnableAgent",
|
|
112
126
|
});
|
|
127
|
+
if (isAgentAction(output)) {
|
|
128
|
+
return [output];
|
|
129
|
+
}
|
|
113
130
|
return output;
|
|
114
131
|
}
|
|
115
132
|
}
|
|
116
133
|
exports.RunnableAgent = RunnableAgent;
|
|
117
|
-
/**
|
|
118
|
-
* Abstract base class for multi-action agents in LangChain. Extends the
|
|
119
|
-
* BaseAgent class and provides additional functionality specific to
|
|
120
|
-
* multi-action agents.
|
|
121
|
-
*/
|
|
122
|
-
class BaseMultiActionAgent extends BaseAgent {
|
|
123
|
-
_agentActionType() {
|
|
124
|
-
return "multi";
|
|
125
|
-
}
|
|
126
|
-
}
|
|
127
|
-
exports.BaseMultiActionAgent = BaseMultiActionAgent;
|
|
128
134
|
/**
|
|
129
135
|
* Class representing a single action agent using a LLMChain in LangChain.
|
|
130
136
|
* Extends the BaseSingleActionAgent class and provides methods for
|
package/dist/agents/agent.d.ts
CHANGED
|
@@ -55,23 +55,6 @@ export declare abstract class BaseSingleActionAgent extends BaseAgent {
|
|
|
55
55
|
*/
|
|
56
56
|
abstract plan(steps: AgentStep[], inputs: ChainValues, callbackManager?: CallbackManager): Promise<AgentAction | AgentFinish>;
|
|
57
57
|
}
|
|
58
|
-
/**
|
|
59
|
-
* Class representing a single action agent which accepts runnables.
|
|
60
|
-
* Extends the BaseSingleActionAgent class and provides methods for
|
|
61
|
-
* planning agent actions with runnables.
|
|
62
|
-
*/
|
|
63
|
-
export declare class RunnableAgent<RunInput extends ChainValues & {
|
|
64
|
-
agent_scratchpad?: string | BaseMessage[];
|
|
65
|
-
stop?: string[];
|
|
66
|
-
}, RunOutput extends AgentAction | AgentFinish> extends BaseSingleActionAgent {
|
|
67
|
-
protected lc_runnable: boolean;
|
|
68
|
-
lc_namespace: string[];
|
|
69
|
-
runnable: Runnable<RunInput, RunOutput>;
|
|
70
|
-
stop?: string[];
|
|
71
|
-
get inputKeys(): string[];
|
|
72
|
-
constructor(fields: RunnableAgentInput<RunInput, RunOutput>);
|
|
73
|
-
plan(steps: AgentStep[], inputs: RunInput, callbackManager?: CallbackManager): Promise<AgentAction | AgentFinish>;
|
|
74
|
-
}
|
|
75
58
|
/**
|
|
76
59
|
* Abstract base class for multi-action agents in LangChain. Extends the
|
|
77
60
|
* BaseAgent class and provides additional functionality specific to
|
|
@@ -90,6 +73,22 @@ export declare abstract class BaseMultiActionAgent extends BaseAgent {
|
|
|
90
73
|
*/
|
|
91
74
|
abstract plan(steps: AgentStep[], inputs: ChainValues, callbackManager?: CallbackManager): Promise<AgentAction[] | AgentFinish>;
|
|
92
75
|
}
|
|
76
|
+
/**
|
|
77
|
+
* Class representing a single action agent which accepts runnables.
|
|
78
|
+
* Extends the BaseSingleActionAgent class and provides methods for
|
|
79
|
+
* planning agent actions with runnables.
|
|
80
|
+
*/
|
|
81
|
+
export declare class RunnableAgent extends BaseMultiActionAgent {
|
|
82
|
+
protected lc_runnable: boolean;
|
|
83
|
+
lc_namespace: string[];
|
|
84
|
+
runnable: Runnable<ChainValues & {
|
|
85
|
+
steps: AgentStep[];
|
|
86
|
+
}, AgentAction[] | AgentAction | AgentFinish>;
|
|
87
|
+
stop?: string[];
|
|
88
|
+
get inputKeys(): string[];
|
|
89
|
+
constructor(fields: RunnableAgentInput);
|
|
90
|
+
plan(steps: AgentStep[], inputs: ChainValues, callbackManager?: CallbackManager): Promise<AgentAction[] | AgentFinish>;
|
|
91
|
+
}
|
|
93
92
|
/**
|
|
94
93
|
* Interface for input data for creating a LLMSingleActionAgent.
|
|
95
94
|
*/
|
package/dist/agents/agent.js
CHANGED
|
@@ -61,12 +61,25 @@ export class BaseSingleActionAgent extends BaseAgent {
|
|
|
61
61
|
return "single";
|
|
62
62
|
}
|
|
63
63
|
}
|
|
64
|
+
/**
|
|
65
|
+
* Abstract base class for multi-action agents in LangChain. Extends the
|
|
66
|
+
* BaseAgent class and provides additional functionality specific to
|
|
67
|
+
* multi-action agents.
|
|
68
|
+
*/
|
|
69
|
+
export class BaseMultiActionAgent extends BaseAgent {
|
|
70
|
+
_agentActionType() {
|
|
71
|
+
return "multi";
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
function isAgentAction(input) {
|
|
75
|
+
return !Array.isArray(input) && input?.tool !== undefined;
|
|
76
|
+
}
|
|
64
77
|
/**
|
|
65
78
|
* Class representing a single action agent which accepts runnables.
|
|
66
79
|
* Extends the BaseSingleActionAgent class and provides methods for
|
|
67
80
|
* planning agent actions with runnables.
|
|
68
81
|
*/
|
|
69
|
-
export class RunnableAgent extends
|
|
82
|
+
export class RunnableAgent extends BaseMultiActionAgent {
|
|
70
83
|
get inputKeys() {
|
|
71
84
|
return [];
|
|
72
85
|
}
|
|
@@ -105,19 +118,12 @@ export class RunnableAgent extends BaseSingleActionAgent {
|
|
|
105
118
|
callbacks: callbackManager,
|
|
106
119
|
runName: "RunnableAgent",
|
|
107
120
|
});
|
|
121
|
+
if (isAgentAction(output)) {
|
|
122
|
+
return [output];
|
|
123
|
+
}
|
|
108
124
|
return output;
|
|
109
125
|
}
|
|
110
126
|
}
|
|
111
|
-
/**
|
|
112
|
-
* Abstract base class for multi-action agents in LangChain. Extends the
|
|
113
|
-
* BaseAgent class and provides additional functionality specific to
|
|
114
|
-
* multi-action agents.
|
|
115
|
-
*/
|
|
116
|
-
export class BaseMultiActionAgent extends BaseAgent {
|
|
117
|
-
_agentActionType() {
|
|
118
|
-
return "multi";
|
|
119
|
-
}
|
|
120
|
-
}
|
|
121
127
|
/**
|
|
122
128
|
* Class representing a single action agent using a LLMChain in LangChain.
|
|
123
129
|
* Extends the BaseSingleActionAgent class and provides methods for
|
|
@@ -2,7 +2,7 @@ import { BaseChain, ChainInputs } from "../chains/base.js";
|
|
|
2
2
|
import { BaseMultiActionAgent, BaseSingleActionAgent } from "./agent.js";
|
|
3
3
|
import { StoppingMethod } from "./types.js";
|
|
4
4
|
import { SerializedLLMChain } from "../chains/serde.js";
|
|
5
|
-
import { AgentAction, AgentFinish,
|
|
5
|
+
import { AgentAction, AgentFinish, AgentStep, ChainValues } from "../schema/index.js";
|
|
6
6
|
import { CallbackManagerForChainRun } from "../callbacks/manager.js";
|
|
7
7
|
import { OutputParserException } from "../schema/output_parser.js";
|
|
8
8
|
import { StructuredTool, Tool, ToolInputParsingException } from "../tools/base.js";
|
|
@@ -15,17 +15,17 @@ type ExtractToolType<T> = T extends {
|
|
|
15
15
|
* AgentExecutor. It extends ChainInputs and includes additional
|
|
16
16
|
* properties specific to agent execution.
|
|
17
17
|
*/
|
|
18
|
-
export interface AgentExecutorInput
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
}
|
|
22
|
-
agent: BaseSingleActionAgent | BaseMultiActionAgent | Runnable<RunInput, RunOutput>;
|
|
18
|
+
export interface AgentExecutorInput extends ChainInputs {
|
|
19
|
+
agent: BaseSingleActionAgent | BaseMultiActionAgent | Runnable<ChainValues & {
|
|
20
|
+
steps?: AgentStep[];
|
|
21
|
+
}, AgentAction[] | AgentAction | AgentFinish>;
|
|
23
22
|
tools: ExtractToolType<this["agent"]>[];
|
|
24
23
|
returnIntermediateSteps?: boolean;
|
|
25
24
|
maxIterations?: number;
|
|
26
25
|
earlyStoppingMethod?: StoppingMethod;
|
|
27
26
|
handleParsingErrors?: boolean | string | ((e: OutputParserException | ToolInputParsingException) => string);
|
|
28
27
|
}
|
|
28
|
+
export type AgentExecutorOutput = ChainValues;
|
|
29
29
|
/**
|
|
30
30
|
* Tool that just returns the query.
|
|
31
31
|
* Used for exception tracking.
|
|
@@ -39,10 +39,7 @@ export declare class ExceptionTool extends Tool {
|
|
|
39
39
|
* A chain managing an agent using tools.
|
|
40
40
|
* @augments BaseChain
|
|
41
41
|
*/
|
|
42
|
-
export declare class AgentExecutor
|
|
43
|
-
agent_scratchpad?: string | BaseMessage[];
|
|
44
|
-
stop?: string[];
|
|
45
|
-
} = any, RunOutput extends AgentAction | AgentFinish = any> extends BaseChain {
|
|
42
|
+
export declare class AgentExecutor extends BaseChain<ChainValues, AgentExecutorOutput> {
|
|
46
43
|
static lc_name(): string;
|
|
47
44
|
get lc_namespace(): string[];
|
|
48
45
|
agent: BaseSingleActionAgent | BaseMultiActionAgent;
|
|
@@ -63,12 +60,9 @@ export declare class AgentExecutor<RunInput extends ChainValues & {
|
|
|
63
60
|
handleParsingErrors: boolean | string | ((e: OutputParserException | ToolInputParsingException) => string);
|
|
64
61
|
get inputKeys(): string[];
|
|
65
62
|
get outputKeys(): string[];
|
|
66
|
-
constructor(input: AgentExecutorInput
|
|
63
|
+
constructor(input: AgentExecutorInput);
|
|
67
64
|
/** Create from agent and a list of tools. */
|
|
68
|
-
static fromAgentAndTools
|
|
69
|
-
agent_scratchpad?: string | BaseMessage[];
|
|
70
|
-
stop?: string[];
|
|
71
|
-
}, RunOutput extends AgentAction | AgentFinish>(fields: AgentExecutorInput<RunInput, RunOutput>): AgentExecutor<RunInput, RunOutput>;
|
|
65
|
+
static fromAgentAndTools(fields: AgentExecutorInput): AgentExecutor;
|
|
72
66
|
/**
|
|
73
67
|
* Method that checks if the agent execution should continue based on the
|
|
74
68
|
* number of iterations.
|
|
@@ -77,7 +71,7 @@ export declare class AgentExecutor<RunInput extends ChainValues & {
|
|
|
77
71
|
*/
|
|
78
72
|
private shouldContinue;
|
|
79
73
|
/** @ignore */
|
|
80
|
-
_call(inputs: ChainValues, runManager?: CallbackManagerForChainRun): Promise<
|
|
74
|
+
_call(inputs: ChainValues, runManager?: CallbackManagerForChainRun): Promise<AgentExecutorOutput>;
|
|
81
75
|
_chainType(): "agent_executor";
|
|
82
76
|
serialize(): SerializedLLMChain;
|
|
83
77
|
}
|
|
@@ -30,7 +30,4 @@ export declare class AWSSfnToolkit extends Toolkit {
|
|
|
30
30
|
asl: string;
|
|
31
31
|
constructor(args: AWSSfnToolkitArgs & SfnConfig);
|
|
32
32
|
}
|
|
33
|
-
export declare function createAWSSfnAgent(llm: BaseLanguageModel, toolkit: AWSSfnToolkit, args?: AWSSfnCreatePromptArgs): AgentExecutor
|
|
34
|
-
agent_scratchpad?: string | import("../../schema/index.js").BaseMessage[] | undefined;
|
|
35
|
-
stop?: string[] | undefined;
|
|
36
|
-
}, import("../../schema/index.js").AgentAction | import("../../schema/index.js").AgentFinish>;
|
|
33
|
+
export declare function createAWSSfnAgent(llm: BaseLanguageModel, toolkit: AWSSfnToolkit, args?: AWSSfnCreatePromptArgs): AgentExecutor;
|
|
@@ -17,4 +17,4 @@ export type ConversationalRetrievalAgentOptions = {
|
|
|
17
17
|
* @param options Optional ConversationalRetrievalAgentOptions to customize the agent.
|
|
18
18
|
* @returns A Promise that resolves to an initialized AgentExecutor.
|
|
19
19
|
*/
|
|
20
|
-
export declare function createConversationalRetrievalAgent(llm: ChatOpenAI, tools: StructuredTool[], options?: ConversationalRetrievalAgentOptions): Promise<import("../../executor.js").AgentExecutor
|
|
20
|
+
export declare function createConversationalRetrievalAgent(llm: ChatOpenAI, tools: StructuredTool[], options?: ConversationalRetrievalAgentOptions): Promise<import("../../executor.js").AgentExecutor>;
|
|
@@ -24,7 +24,4 @@ export declare class JsonToolkit extends Toolkit {
|
|
|
24
24
|
* @param args Optional prompt arguments used to create the JSON agent.
|
|
25
25
|
* @returns An AgentExecutor for executing the created JSON agent with the tools.
|
|
26
26
|
*/
|
|
27
|
-
export declare function createJsonAgent(llm: BaseLanguageModel, toolkit: JsonToolkit, args?: ZeroShotCreatePromptArgs): AgentExecutor
|
|
28
|
-
agent_scratchpad?: string | import("../../../schema/index.js").BaseMessage[] | undefined;
|
|
29
|
-
stop?: string[] | undefined;
|
|
30
|
-
}, import("../../../schema/index.js").AgentAction | import("../../../schema/index.js").AgentFinish>;
|
|
27
|
+
export declare function createJsonAgent(llm: BaseLanguageModel, toolkit: JsonToolkit, args?: ZeroShotCreatePromptArgs): AgentExecutor;
|
|
@@ -41,7 +41,4 @@ export declare class OpenApiToolkit extends RequestsToolkit {
|
|
|
41
41
|
*
|
|
42
42
|
* @link See https://js.langchain.com/docs/security for more information.
|
|
43
43
|
*/
|
|
44
|
-
export declare function createOpenApiAgent(llm: BaseLanguageModel, openApiToolkit: OpenApiToolkit, args?: ZeroShotCreatePromptArgs): AgentExecutor
|
|
45
|
-
agent_scratchpad?: string | import("../../../schema/index.js").BaseMessage[] | undefined;
|
|
46
|
-
stop?: string[] | undefined;
|
|
47
|
-
}, import("../../../schema/index.js").AgentAction | import("../../../schema/index.js").AgentFinish>;
|
|
44
|
+
export declare function createOpenApiAgent(llm: BaseLanguageModel, openApiToolkit: OpenApiToolkit, args?: ZeroShotCreatePromptArgs): AgentExecutor;
|
|
@@ -22,7 +22,4 @@ export declare class SqlToolkit extends Toolkit {
|
|
|
22
22
|
dialect: string;
|
|
23
23
|
constructor(db: SqlDatabase, llm?: BaseLanguageModel);
|
|
24
24
|
}
|
|
25
|
-
export declare function createSqlAgent(llm: BaseLanguageModel, toolkit: SqlToolkit, args?: SqlCreatePromptArgs): AgentExecutor
|
|
26
|
-
agent_scratchpad?: string | import("../../../schema/index.js").BaseMessage[] | undefined;
|
|
27
|
-
stop?: string[] | undefined;
|
|
28
|
-
}, import("../../../schema/index.js").AgentAction | import("../../../schema/index.js").AgentFinish>;
|
|
25
|
+
export declare function createSqlAgent(llm: BaseLanguageModel, toolkit: SqlToolkit, args?: SqlCreatePromptArgs): AgentExecutor;
|
|
@@ -34,11 +34,5 @@ export declare class VectorStoreRouterToolkit extends Toolkit {
|
|
|
34
34
|
llm: BaseLanguageModel;
|
|
35
35
|
constructor(vectorStoreInfos: VectorStoreInfo[], llm: BaseLanguageModel);
|
|
36
36
|
}
|
|
37
|
-
export declare function createVectorStoreAgent(llm: BaseLanguageModel, toolkit: VectorStoreToolkit, args?: ZeroShotCreatePromptArgs): AgentExecutor
|
|
38
|
-
|
|
39
|
-
stop?: string[] | undefined;
|
|
40
|
-
}, import("../../../schema/index.js").AgentAction | import("../../../schema/index.js").AgentFinish>;
|
|
41
|
-
export declare function createVectorStoreRouterAgent(llm: BaseLanguageModel, toolkit: VectorStoreRouterToolkit, args?: ZeroShotCreatePromptArgs): AgentExecutor<import("../../../schema/index.js").ChainValues & {
|
|
42
|
-
agent_scratchpad?: string | import("../../../schema/index.js").BaseMessage[] | undefined;
|
|
43
|
-
stop?: string[] | undefined;
|
|
44
|
-
}, import("../../../schema/index.js").AgentAction | import("../../../schema/index.js").AgentFinish>;
|
|
37
|
+
export declare function createVectorStoreAgent(llm: BaseLanguageModel, toolkit: VectorStoreToolkit, args?: ZeroShotCreatePromptArgs): AgentExecutor;
|
|
38
|
+
export declare function createVectorStoreRouterAgent(llm: BaseLanguageModel, toolkit: VectorStoreRouterToolkit, args?: ZeroShotCreatePromptArgs): AgentExecutor;
|
package/dist/agents/types.d.ts
CHANGED
|
@@ -17,11 +17,11 @@ export interface AgentInput {
|
|
|
17
17
|
* Interface defining the input for creating an agent that uses runnables.
|
|
18
18
|
* It includes the Runnable instance, and an optional list of stop strings.
|
|
19
19
|
*/
|
|
20
|
-
export interface RunnableAgentInput
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
20
|
+
export interface RunnableAgentInput {
|
|
21
|
+
runnable: Runnable<ChainValues & {
|
|
22
|
+
agent_scratchpad?: string | BaseMessage[];
|
|
23
|
+
stop?: string[];
|
|
24
|
+
}, AgentAction[] | AgentAction | AgentFinish>;
|
|
25
25
|
stop?: string[];
|
|
26
26
|
}
|
|
27
27
|
/**
|
|
@@ -88,7 +88,25 @@ class ChatGooglePaLM extends base_js_1.BaseChatModel {
|
|
|
88
88
|
if (this.topK && this.topK < 0) {
|
|
89
89
|
throw new Error("`topK` must be a positive integer");
|
|
90
90
|
}
|
|
91
|
-
this.examples =
|
|
91
|
+
this.examples =
|
|
92
|
+
fields?.examples?.map((example) => {
|
|
93
|
+
if (((0, index_js_1.isBaseMessage)(example.input) &&
|
|
94
|
+
typeof example.input.content !== "string") ||
|
|
95
|
+
((0, index_js_1.isBaseMessage)(example.output) &&
|
|
96
|
+
typeof example.output.content !== "string")) {
|
|
97
|
+
throw new Error("GooglePaLM example messages may only have string content.");
|
|
98
|
+
}
|
|
99
|
+
return {
|
|
100
|
+
input: {
|
|
101
|
+
...example.input,
|
|
102
|
+
content: example.input?.content,
|
|
103
|
+
},
|
|
104
|
+
output: {
|
|
105
|
+
...example.output,
|
|
106
|
+
content: example.output?.content,
|
|
107
|
+
},
|
|
108
|
+
};
|
|
109
|
+
}) ?? this.examples;
|
|
92
110
|
this.apiKey =
|
|
93
111
|
fields?.apiKey ?? (0, env_js_1.getEnvironmentVariable)("GOOGLE_PALM_API_KEY");
|
|
94
112
|
if (!this.apiKey) {
|
|
@@ -2,6 +2,10 @@ import type { protos } from "@google-ai/generativelanguage";
|
|
|
2
2
|
import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
|
|
3
3
|
import { BaseMessage, ChatResult } from "../schema/index.js";
|
|
4
4
|
import { BaseChatModel, BaseChatModelParams } from "./base.js";
|
|
5
|
+
export type BaseMessageExamplePair = {
|
|
6
|
+
input: BaseMessage;
|
|
7
|
+
output: BaseMessage;
|
|
8
|
+
};
|
|
5
9
|
/**
|
|
6
10
|
* An interface defining the input to the ChatGooglePaLM class.
|
|
7
11
|
*/
|
|
@@ -47,7 +51,7 @@ export interface GooglePaLMChatInput extends BaseChatModelParams {
|
|
|
47
51
|
* Note: The default value varies by model
|
|
48
52
|
*/
|
|
49
53
|
topK?: number;
|
|
50
|
-
examples?: protos.google.ai.generativelanguage.v1beta2.IExample[];
|
|
54
|
+
examples?: protos.google.ai.generativelanguage.v1beta2.IExample[] | BaseMessageExamplePair[];
|
|
51
55
|
/**
|
|
52
56
|
* Google Palm API key to use
|
|
53
57
|
*/
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { DiscussServiceClient } from "@google-ai/generativelanguage";
|
|
2
2
|
import { GoogleAuth } from "google-auth-library";
|
|
3
|
-
import { AIMessage, ChatMessage, } from "../schema/index.js";
|
|
3
|
+
import { AIMessage, ChatMessage, isBaseMessage, } from "../schema/index.js";
|
|
4
4
|
import { getEnvironmentVariable } from "../util/env.js";
|
|
5
5
|
import { BaseChatModel } from "./base.js";
|
|
6
6
|
function getMessageAuthor(message) {
|
|
@@ -85,7 +85,25 @@ export class ChatGooglePaLM extends BaseChatModel {
|
|
|
85
85
|
if (this.topK && this.topK < 0) {
|
|
86
86
|
throw new Error("`topK` must be a positive integer");
|
|
87
87
|
}
|
|
88
|
-
this.examples =
|
|
88
|
+
this.examples =
|
|
89
|
+
fields?.examples?.map((example) => {
|
|
90
|
+
if ((isBaseMessage(example.input) &&
|
|
91
|
+
typeof example.input.content !== "string") ||
|
|
92
|
+
(isBaseMessage(example.output) &&
|
|
93
|
+
typeof example.output.content !== "string")) {
|
|
94
|
+
throw new Error("GooglePaLM example messages may only have string content.");
|
|
95
|
+
}
|
|
96
|
+
return {
|
|
97
|
+
input: {
|
|
98
|
+
...example.input,
|
|
99
|
+
content: example.input?.content,
|
|
100
|
+
},
|
|
101
|
+
output: {
|
|
102
|
+
...example.output,
|
|
103
|
+
content: example.output?.content,
|
|
104
|
+
},
|
|
105
|
+
};
|
|
106
|
+
}) ?? this.examples;
|
|
89
107
|
this.apiKey =
|
|
90
108
|
fields?.apiKey ?? getEnvironmentVariable("GOOGLE_PALM_API_KEY");
|
|
91
109
|
if (!this.apiKey) {
|
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.OpenAIAssistantRunnable = void 0;
|
|
4
|
+
const openai_1 = require("openai");
|
|
5
|
+
const base_js_1 = require("../../schema/runnable/base.cjs");
|
|
6
|
+
const time_js_1 = require("../../util/time.cjs");
|
|
7
|
+
const base_js_2 = require("../../tools/base.cjs");
|
|
8
|
+
const convert_to_openai_js_1 = require("../../tools/convert_to_openai.cjs");
|
|
9
|
+
class OpenAIAssistantRunnable extends base_js_1.Runnable {
|
|
10
|
+
constructor(fields) {
|
|
11
|
+
super(fields);
|
|
12
|
+
Object.defineProperty(this, "lc_namespace", {
|
|
13
|
+
enumerable: true,
|
|
14
|
+
configurable: true,
|
|
15
|
+
writable: true,
|
|
16
|
+
value: ["langchain", "experimental", "openai_assistant"]
|
|
17
|
+
});
|
|
18
|
+
Object.defineProperty(this, "client", {
|
|
19
|
+
enumerable: true,
|
|
20
|
+
configurable: true,
|
|
21
|
+
writable: true,
|
|
22
|
+
value: void 0
|
|
23
|
+
});
|
|
24
|
+
Object.defineProperty(this, "assistantId", {
|
|
25
|
+
enumerable: true,
|
|
26
|
+
configurable: true,
|
|
27
|
+
writable: true,
|
|
28
|
+
value: void 0
|
|
29
|
+
});
|
|
30
|
+
Object.defineProperty(this, "pollIntervalMs", {
|
|
31
|
+
enumerable: true,
|
|
32
|
+
configurable: true,
|
|
33
|
+
writable: true,
|
|
34
|
+
value: 1000
|
|
35
|
+
});
|
|
36
|
+
Object.defineProperty(this, "asAgent", {
|
|
37
|
+
enumerable: true,
|
|
38
|
+
configurable: true,
|
|
39
|
+
writable: true,
|
|
40
|
+
value: void 0
|
|
41
|
+
});
|
|
42
|
+
this.client = fields.client ?? new openai_1.OpenAI(fields?.clientOptions);
|
|
43
|
+
this.assistantId = fields.assistantId;
|
|
44
|
+
this.asAgent = fields.asAgent ?? this.asAgent;
|
|
45
|
+
}
|
|
46
|
+
static async createAssistant({ model, name, instructions, tools, client, clientOptions, asAgent, pollIntervalMs, }) {
|
|
47
|
+
const formattedTools = tools?.map((tool) => {
|
|
48
|
+
// eslint-disable-next-line no-instanceof/no-instanceof
|
|
49
|
+
if (tool instanceof base_js_2.StructuredTool) {
|
|
50
|
+
return (0, convert_to_openai_js_1.formatToOpenAIAssistantTool)(tool);
|
|
51
|
+
}
|
|
52
|
+
return tool;
|
|
53
|
+
}) ?? [];
|
|
54
|
+
const oaiClient = client ?? new openai_1.OpenAI(clientOptions);
|
|
55
|
+
const assistant = await oaiClient.beta.assistants.create({
|
|
56
|
+
name,
|
|
57
|
+
instructions,
|
|
58
|
+
tools: formattedTools,
|
|
59
|
+
model,
|
|
60
|
+
});
|
|
61
|
+
return new this({
|
|
62
|
+
client: oaiClient,
|
|
63
|
+
assistantId: assistant.id,
|
|
64
|
+
asAgent,
|
|
65
|
+
pollIntervalMs,
|
|
66
|
+
});
|
|
67
|
+
}
|
|
68
|
+
async invoke(input, _options) {
|
|
69
|
+
let run;
|
|
70
|
+
if (this.asAgent && input.steps && input.steps.length > 0) {
|
|
71
|
+
const parsedStepsInput = await this._parseStepsInput(input);
|
|
72
|
+
run = await this.client.beta.threads.runs.submitToolOutputs(parsedStepsInput.threadId, parsedStepsInput.runId, {
|
|
73
|
+
tool_outputs: parsedStepsInput.toolOutputs,
|
|
74
|
+
});
|
|
75
|
+
}
|
|
76
|
+
else if (!("threadId" in input)) {
|
|
77
|
+
const thread = {
|
|
78
|
+
messages: [
|
|
79
|
+
{
|
|
80
|
+
role: "user",
|
|
81
|
+
content: input.content,
|
|
82
|
+
file_ids: input.fileIds,
|
|
83
|
+
metadata: input.messagesMetadata,
|
|
84
|
+
},
|
|
85
|
+
],
|
|
86
|
+
metadata: input.threadMetadata,
|
|
87
|
+
};
|
|
88
|
+
run = await this._createThreadAndRun({
|
|
89
|
+
...input,
|
|
90
|
+
thread,
|
|
91
|
+
});
|
|
92
|
+
}
|
|
93
|
+
else if (!("runId" in input)) {
|
|
94
|
+
await this.client.beta.threads.messages.create(input.threadId, {
|
|
95
|
+
content: input.content,
|
|
96
|
+
role: "user",
|
|
97
|
+
file_ids: input.file_ids,
|
|
98
|
+
metadata: input.messagesMetadata,
|
|
99
|
+
});
|
|
100
|
+
run = await this._createRun(input);
|
|
101
|
+
}
|
|
102
|
+
else {
|
|
103
|
+
// Submitting tool outputs to an existing run, outside the AgentExecutor
|
|
104
|
+
// framework.
|
|
105
|
+
run = await this.client.beta.threads.runs.submitToolOutputs(input.runId, input.threadId, {
|
|
106
|
+
tool_outputs: input.toolOutputs,
|
|
107
|
+
});
|
|
108
|
+
}
|
|
109
|
+
return this._getResponse(run.id, run.thread_id);
|
|
110
|
+
}
|
|
111
|
+
async _parseStepsInput(input) {
|
|
112
|
+
const { action: { runId, threadId }, } = input.steps[input.steps.length - 1];
|
|
113
|
+
const run = await this._waitForRun(runId, threadId);
|
|
114
|
+
const toolCalls = run.required_action?.submit_tool_outputs.tool_calls;
|
|
115
|
+
if (!toolCalls) {
|
|
116
|
+
return input;
|
|
117
|
+
}
|
|
118
|
+
const toolOutputs = toolCalls.flatMap((toolCall) => {
|
|
119
|
+
const matchedAction = input.steps.find((step) => step.action.toolCallId === toolCall.id);
|
|
120
|
+
return matchedAction
|
|
121
|
+
? [
|
|
122
|
+
{
|
|
123
|
+
output: matchedAction.observation,
|
|
124
|
+
tool_call_id: matchedAction.action.toolCallId,
|
|
125
|
+
},
|
|
126
|
+
]
|
|
127
|
+
: [];
|
|
128
|
+
});
|
|
129
|
+
return { toolOutputs, runId, threadId };
|
|
130
|
+
}
|
|
131
|
+
async _createRun({ instructions, model, tools, metadata, threadId, }) {
|
|
132
|
+
const run = this.client.beta.threads.runs.create(threadId, {
|
|
133
|
+
assistant_id: this.assistantId,
|
|
134
|
+
instructions,
|
|
135
|
+
model,
|
|
136
|
+
tools,
|
|
137
|
+
metadata,
|
|
138
|
+
});
|
|
139
|
+
return run;
|
|
140
|
+
}
|
|
141
|
+
async _createThreadAndRun(input) {
|
|
142
|
+
const params = [
|
|
143
|
+
"instructions",
|
|
144
|
+
"model",
|
|
145
|
+
"tools",
|
|
146
|
+
"run_metadata",
|
|
147
|
+
]
|
|
148
|
+
.filter((key) => key in input)
|
|
149
|
+
.reduce((obj, key) => {
|
|
150
|
+
const newObj = obj;
|
|
151
|
+
newObj[key] = input[key];
|
|
152
|
+
return newObj;
|
|
153
|
+
}, {});
|
|
154
|
+
const run = this.client.beta.threads.createAndRun({
|
|
155
|
+
...params,
|
|
156
|
+
thread: input.thread,
|
|
157
|
+
assistant_id: this.assistantId,
|
|
158
|
+
});
|
|
159
|
+
return run;
|
|
160
|
+
}
|
|
161
|
+
async _waitForRun(runId, threadId) {
|
|
162
|
+
let inProgress = true;
|
|
163
|
+
let run = {};
|
|
164
|
+
while (inProgress) {
|
|
165
|
+
run = await this.client.beta.threads.runs.retrieve(threadId, runId);
|
|
166
|
+
inProgress = ["in_progress", "queued"].includes(run.status);
|
|
167
|
+
if (inProgress) {
|
|
168
|
+
await (0, time_js_1.sleep)(this.pollIntervalMs);
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
return run;
|
|
172
|
+
}
|
|
173
|
+
async _getResponse(runId, threadId) {
|
|
174
|
+
const run = await this._waitForRun(runId, threadId);
|
|
175
|
+
if (run.status === "completed") {
|
|
176
|
+
const messages = await this.client.beta.threads.messages.list(threadId, {
|
|
177
|
+
order: "asc",
|
|
178
|
+
});
|
|
179
|
+
const newMessages = messages.data.filter((msg) => msg.run_id === runId);
|
|
180
|
+
if (!this.asAgent) {
|
|
181
|
+
return newMessages;
|
|
182
|
+
}
|
|
183
|
+
const answer = newMessages.flatMap((msg) => msg.content);
|
|
184
|
+
if (answer.every((item) => item.type === "text")) {
|
|
185
|
+
const answerString = answer
|
|
186
|
+
.map((item) => item.type === "text" && item.text.value)
|
|
187
|
+
.join("\n");
|
|
188
|
+
return {
|
|
189
|
+
returnValues: {
|
|
190
|
+
output: answerString,
|
|
191
|
+
},
|
|
192
|
+
log: "",
|
|
193
|
+
runId,
|
|
194
|
+
threadId,
|
|
195
|
+
};
|
|
196
|
+
}
|
|
197
|
+
}
|
|
198
|
+
else if (run.status === "requires_action") {
|
|
199
|
+
if (!this.asAgent) {
|
|
200
|
+
return run.required_action?.submit_tool_outputs.tool_calls ?? [];
|
|
201
|
+
}
|
|
202
|
+
const actions = [];
|
|
203
|
+
run.required_action?.submit_tool_outputs.tool_calls.forEach((item) => {
|
|
204
|
+
const functionCall = item.function;
|
|
205
|
+
const args = JSON.parse(functionCall.arguments);
|
|
206
|
+
actions.push({
|
|
207
|
+
tool: functionCall.name,
|
|
208
|
+
toolInput: args,
|
|
209
|
+
toolCallId: item.id,
|
|
210
|
+
log: "",
|
|
211
|
+
runId,
|
|
212
|
+
threadId,
|
|
213
|
+
});
|
|
214
|
+
});
|
|
215
|
+
return actions;
|
|
216
|
+
}
|
|
217
|
+
const runInfo = JSON.stringify(run, null, 2);
|
|
218
|
+
throw new Error(`Unexpected run status ${run.status}.\nFull run info:\n\n${runInfo}`);
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
exports.OpenAIAssistantRunnable = OpenAIAssistantRunnable;
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import { type ClientOptions, OpenAI as OpenAIClient } from "openai";
|
|
2
|
+
import { Runnable } from "../../schema/runnable/base.js";
|
|
3
|
+
import type { RunnableConfig } from "../../schema/runnable/config.js";
|
|
4
|
+
import type { OpenAIAssistantFinish, OpenAIAssistantAction, OpenAIToolType } from "./schema.js";
|
|
5
|
+
import { StructuredTool } from "../../tools/base.js";
|
|
6
|
+
type ThreadMessage = OpenAIClient.Beta.Threads.ThreadMessage;
|
|
7
|
+
type RequiredActionFunctionToolCall = OpenAIClient.Beta.Threads.RequiredActionFunctionToolCall;
|
|
8
|
+
type ExtractRunOutput<AsAgent extends boolean | undefined> = AsAgent extends true ? OpenAIAssistantFinish | OpenAIAssistantAction[] : ThreadMessage[] | RequiredActionFunctionToolCall[];
|
|
9
|
+
export type OpenAIAssistantRunnableInput<AsAgent extends boolean | undefined = undefined> = {
|
|
10
|
+
client?: OpenAIClient;
|
|
11
|
+
clientOptions?: ClientOptions;
|
|
12
|
+
assistantId: string;
|
|
13
|
+
pollIntervalMs?: number;
|
|
14
|
+
asAgent?: AsAgent;
|
|
15
|
+
};
|
|
16
|
+
export declare class OpenAIAssistantRunnable<AsAgent extends boolean | undefined, RunInput extends Record<string, any> = Record<string, any>> extends Runnable<RunInput, ExtractRunOutput<AsAgent>> {
|
|
17
|
+
lc_namespace: string[];
|
|
18
|
+
private client;
|
|
19
|
+
assistantId: string;
|
|
20
|
+
pollIntervalMs: number;
|
|
21
|
+
asAgent?: AsAgent;
|
|
22
|
+
constructor(fields: OpenAIAssistantRunnableInput<AsAgent>);
|
|
23
|
+
static createAssistant<AsAgent extends boolean>({ model, name, instructions, tools, client, clientOptions, asAgent, pollIntervalMs, }: Omit<OpenAIAssistantRunnableInput<AsAgent>, "assistantId"> & {
|
|
24
|
+
model: string;
|
|
25
|
+
name?: string;
|
|
26
|
+
instructions?: string;
|
|
27
|
+
tools?: OpenAIToolType | Array<StructuredTool>;
|
|
28
|
+
}): Promise<OpenAIAssistantRunnable<AsAgent, Record<string, any>>>;
|
|
29
|
+
invoke(input: RunInput, _options?: RunnableConfig): Promise<ExtractRunOutput<AsAgent>>;
|
|
30
|
+
private _parseStepsInput;
|
|
31
|
+
private _createRun;
|
|
32
|
+
private _createThreadAndRun;
|
|
33
|
+
private _waitForRun;
|
|
34
|
+
private _getResponse;
|
|
35
|
+
}
|
|
36
|
+
export {};
|
|
@@ -0,0 +1,217 @@
|
|
|
1
|
+
import { OpenAI as OpenAIClient } from "openai";
|
|
2
|
+
import { Runnable } from "../../schema/runnable/base.js";
|
|
3
|
+
import { sleep } from "../../util/time.js";
|
|
4
|
+
import { StructuredTool } from "../../tools/base.js";
|
|
5
|
+
import { formatToOpenAIAssistantTool } from "../../tools/convert_to_openai.js";
|
|
6
|
+
export class OpenAIAssistantRunnable extends Runnable {
|
|
7
|
+
constructor(fields) {
|
|
8
|
+
super(fields);
|
|
9
|
+
Object.defineProperty(this, "lc_namespace", {
|
|
10
|
+
enumerable: true,
|
|
11
|
+
configurable: true,
|
|
12
|
+
writable: true,
|
|
13
|
+
value: ["langchain", "experimental", "openai_assistant"]
|
|
14
|
+
});
|
|
15
|
+
Object.defineProperty(this, "client", {
|
|
16
|
+
enumerable: true,
|
|
17
|
+
configurable: true,
|
|
18
|
+
writable: true,
|
|
19
|
+
value: void 0
|
|
20
|
+
});
|
|
21
|
+
Object.defineProperty(this, "assistantId", {
|
|
22
|
+
enumerable: true,
|
|
23
|
+
configurable: true,
|
|
24
|
+
writable: true,
|
|
25
|
+
value: void 0
|
|
26
|
+
});
|
|
27
|
+
Object.defineProperty(this, "pollIntervalMs", {
|
|
28
|
+
enumerable: true,
|
|
29
|
+
configurable: true,
|
|
30
|
+
writable: true,
|
|
31
|
+
value: 1000
|
|
32
|
+
});
|
|
33
|
+
Object.defineProperty(this, "asAgent", {
|
|
34
|
+
enumerable: true,
|
|
35
|
+
configurable: true,
|
|
36
|
+
writable: true,
|
|
37
|
+
value: void 0
|
|
38
|
+
});
|
|
39
|
+
this.client = fields.client ?? new OpenAIClient(fields?.clientOptions);
|
|
40
|
+
this.assistantId = fields.assistantId;
|
|
41
|
+
this.asAgent = fields.asAgent ?? this.asAgent;
|
|
42
|
+
}
|
|
43
|
+
static async createAssistant({ model, name, instructions, tools, client, clientOptions, asAgent, pollIntervalMs, }) {
|
|
44
|
+
const formattedTools = tools?.map((tool) => {
|
|
45
|
+
// eslint-disable-next-line no-instanceof/no-instanceof
|
|
46
|
+
if (tool instanceof StructuredTool) {
|
|
47
|
+
return formatToOpenAIAssistantTool(tool);
|
|
48
|
+
}
|
|
49
|
+
return tool;
|
|
50
|
+
}) ?? [];
|
|
51
|
+
const oaiClient = client ?? new OpenAIClient(clientOptions);
|
|
52
|
+
const assistant = await oaiClient.beta.assistants.create({
|
|
53
|
+
name,
|
|
54
|
+
instructions,
|
|
55
|
+
tools: formattedTools,
|
|
56
|
+
model,
|
|
57
|
+
});
|
|
58
|
+
return new this({
|
|
59
|
+
client: oaiClient,
|
|
60
|
+
assistantId: assistant.id,
|
|
61
|
+
asAgent,
|
|
62
|
+
pollIntervalMs,
|
|
63
|
+
});
|
|
64
|
+
}
|
|
65
|
+
async invoke(input, _options) {
|
|
66
|
+
let run;
|
|
67
|
+
if (this.asAgent && input.steps && input.steps.length > 0) {
|
|
68
|
+
const parsedStepsInput = await this._parseStepsInput(input);
|
|
69
|
+
run = await this.client.beta.threads.runs.submitToolOutputs(parsedStepsInput.threadId, parsedStepsInput.runId, {
|
|
70
|
+
tool_outputs: parsedStepsInput.toolOutputs,
|
|
71
|
+
});
|
|
72
|
+
}
|
|
73
|
+
else if (!("threadId" in input)) {
|
|
74
|
+
const thread = {
|
|
75
|
+
messages: [
|
|
76
|
+
{
|
|
77
|
+
role: "user",
|
|
78
|
+
content: input.content,
|
|
79
|
+
file_ids: input.fileIds,
|
|
80
|
+
metadata: input.messagesMetadata,
|
|
81
|
+
},
|
|
82
|
+
],
|
|
83
|
+
metadata: input.threadMetadata,
|
|
84
|
+
};
|
|
85
|
+
run = await this._createThreadAndRun({
|
|
86
|
+
...input,
|
|
87
|
+
thread,
|
|
88
|
+
});
|
|
89
|
+
}
|
|
90
|
+
else if (!("runId" in input)) {
|
|
91
|
+
await this.client.beta.threads.messages.create(input.threadId, {
|
|
92
|
+
content: input.content,
|
|
93
|
+
role: "user",
|
|
94
|
+
file_ids: input.file_ids,
|
|
95
|
+
metadata: input.messagesMetadata,
|
|
96
|
+
});
|
|
97
|
+
run = await this._createRun(input);
|
|
98
|
+
}
|
|
99
|
+
else {
|
|
100
|
+
// Submitting tool outputs to an existing run, outside the AgentExecutor
|
|
101
|
+
// framework.
|
|
102
|
+
run = await this.client.beta.threads.runs.submitToolOutputs(input.runId, input.threadId, {
|
|
103
|
+
tool_outputs: input.toolOutputs,
|
|
104
|
+
});
|
|
105
|
+
}
|
|
106
|
+
return this._getResponse(run.id, run.thread_id);
|
|
107
|
+
}
|
|
108
|
+
async _parseStepsInput(input) {
|
|
109
|
+
const { action: { runId, threadId }, } = input.steps[input.steps.length - 1];
|
|
110
|
+
const run = await this._waitForRun(runId, threadId);
|
|
111
|
+
const toolCalls = run.required_action?.submit_tool_outputs.tool_calls;
|
|
112
|
+
if (!toolCalls) {
|
|
113
|
+
return input;
|
|
114
|
+
}
|
|
115
|
+
const toolOutputs = toolCalls.flatMap((toolCall) => {
|
|
116
|
+
const matchedAction = input.steps.find((step) => step.action.toolCallId === toolCall.id);
|
|
117
|
+
return matchedAction
|
|
118
|
+
? [
|
|
119
|
+
{
|
|
120
|
+
output: matchedAction.observation,
|
|
121
|
+
tool_call_id: matchedAction.action.toolCallId,
|
|
122
|
+
},
|
|
123
|
+
]
|
|
124
|
+
: [];
|
|
125
|
+
});
|
|
126
|
+
return { toolOutputs, runId, threadId };
|
|
127
|
+
}
|
|
128
|
+
async _createRun({ instructions, model, tools, metadata, threadId, }) {
|
|
129
|
+
const run = this.client.beta.threads.runs.create(threadId, {
|
|
130
|
+
assistant_id: this.assistantId,
|
|
131
|
+
instructions,
|
|
132
|
+
model,
|
|
133
|
+
tools,
|
|
134
|
+
metadata,
|
|
135
|
+
});
|
|
136
|
+
return run;
|
|
137
|
+
}
|
|
138
|
+
async _createThreadAndRun(input) {
|
|
139
|
+
const params = [
|
|
140
|
+
"instructions",
|
|
141
|
+
"model",
|
|
142
|
+
"tools",
|
|
143
|
+
"run_metadata",
|
|
144
|
+
]
|
|
145
|
+
.filter((key) => key in input)
|
|
146
|
+
.reduce((obj, key) => {
|
|
147
|
+
const newObj = obj;
|
|
148
|
+
newObj[key] = input[key];
|
|
149
|
+
return newObj;
|
|
150
|
+
}, {});
|
|
151
|
+
const run = this.client.beta.threads.createAndRun({
|
|
152
|
+
...params,
|
|
153
|
+
thread: input.thread,
|
|
154
|
+
assistant_id: this.assistantId,
|
|
155
|
+
});
|
|
156
|
+
return run;
|
|
157
|
+
}
|
|
158
|
+
async _waitForRun(runId, threadId) {
|
|
159
|
+
let inProgress = true;
|
|
160
|
+
let run = {};
|
|
161
|
+
while (inProgress) {
|
|
162
|
+
run = await this.client.beta.threads.runs.retrieve(threadId, runId);
|
|
163
|
+
inProgress = ["in_progress", "queued"].includes(run.status);
|
|
164
|
+
if (inProgress) {
|
|
165
|
+
await sleep(this.pollIntervalMs);
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
return run;
|
|
169
|
+
}
|
|
170
|
+
async _getResponse(runId, threadId) {
|
|
171
|
+
const run = await this._waitForRun(runId, threadId);
|
|
172
|
+
if (run.status === "completed") {
|
|
173
|
+
const messages = await this.client.beta.threads.messages.list(threadId, {
|
|
174
|
+
order: "asc",
|
|
175
|
+
});
|
|
176
|
+
const newMessages = messages.data.filter((msg) => msg.run_id === runId);
|
|
177
|
+
if (!this.asAgent) {
|
|
178
|
+
return newMessages;
|
|
179
|
+
}
|
|
180
|
+
const answer = newMessages.flatMap((msg) => msg.content);
|
|
181
|
+
if (answer.every((item) => item.type === "text")) {
|
|
182
|
+
const answerString = answer
|
|
183
|
+
.map((item) => item.type === "text" && item.text.value)
|
|
184
|
+
.join("\n");
|
|
185
|
+
return {
|
|
186
|
+
returnValues: {
|
|
187
|
+
output: answerString,
|
|
188
|
+
},
|
|
189
|
+
log: "",
|
|
190
|
+
runId,
|
|
191
|
+
threadId,
|
|
192
|
+
};
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
else if (run.status === "requires_action") {
|
|
196
|
+
if (!this.asAgent) {
|
|
197
|
+
return run.required_action?.submit_tool_outputs.tool_calls ?? [];
|
|
198
|
+
}
|
|
199
|
+
const actions = [];
|
|
200
|
+
run.required_action?.submit_tool_outputs.tool_calls.forEach((item) => {
|
|
201
|
+
const functionCall = item.function;
|
|
202
|
+
const args = JSON.parse(functionCall.arguments);
|
|
203
|
+
actions.push({
|
|
204
|
+
tool: functionCall.name,
|
|
205
|
+
toolInput: args,
|
|
206
|
+
toolCallId: item.id,
|
|
207
|
+
log: "",
|
|
208
|
+
runId,
|
|
209
|
+
threadId,
|
|
210
|
+
});
|
|
211
|
+
});
|
|
212
|
+
return actions;
|
|
213
|
+
}
|
|
214
|
+
const runInfo = JSON.stringify(run, null, 2);
|
|
215
|
+
throw new Error(`Unexpected run status ${run.status}.\nFull run info:\n\n${runInfo}`);
|
|
216
|
+
}
|
|
217
|
+
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import type { OpenAI as OpenAIClient } from "openai";
|
|
2
|
+
import type { AgentFinish, AgentAction } from "../../schema/index.js";
|
|
3
|
+
export type OpenAIAssistantFinish = AgentFinish & {
|
|
4
|
+
runId: string;
|
|
5
|
+
threadId: string;
|
|
6
|
+
};
|
|
7
|
+
export type OpenAIAssistantAction = AgentAction & {
|
|
8
|
+
toolCallId: string;
|
|
9
|
+
runId: string;
|
|
10
|
+
threadId: string;
|
|
11
|
+
};
|
|
12
|
+
export type OpenAIToolType = Array<OpenAIClient.Beta.AssistantCreateParams.AssistantToolsCode | OpenAIClient.Beta.AssistantCreateParams.AssistantToolsRetrieval | OpenAIClient.Beta.AssistantCreateParams.AssistantToolsFunction>;
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
package/dist/load/import_map.cjs
CHANGED
|
@@ -25,7 +25,7 @@ var __importStar = (this && this.__importStar) || function (mod) {
|
|
|
25
25
|
};
|
|
26
26
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
27
27
|
exports.chat_models__cloudflare_workersai = exports.chat_models__anthropic = exports.chat_models__openai = exports.chat_models__base = exports.document_transformers__openai_functions = exports.document_loaders__web__sort_xyz_blockchain = exports.document_loaders__web__serpapi = exports.document_loaders__web__searchapi = exports.document_loaders__base = exports.document = exports.memory = exports.text_splitter = exports.vectorstores__xata = exports.vectorstores__vectara = exports.vectorstores__prisma = exports.vectorstores__memory = exports.vectorstores__base = exports.prompts = exports.llms__fake = exports.llms__yandex = exports.llms__fireworks = exports.llms__ollama = exports.llms__cloudflare_workersai = exports.llms__aleph_alpha = exports.llms__ai21 = exports.llms__openai = exports.llms__base = exports.embeddings__voyage = exports.embeddings__minimax = exports.embeddings__openai = exports.embeddings__ollama = exports.embeddings__fake = exports.embeddings__cache_backed = exports.embeddings__base = exports.chains__openai_functions = exports.chains__combine_documents__reduce = exports.chains = exports.tools__render = exports.tools = exports.base_language = exports.agents__openai__output_parser = exports.agents__xml__output_parser = exports.agents__react__output_parser = exports.agents__format_scratchpad__log_to_message = exports.agents__format_scratchpad__xml = exports.agents__format_scratchpad__log = exports.agents__format_scratchpad = exports.agents__toolkits = exports.agents = exports.load__serializable = void 0;
|
|
28
|
-
exports.runnables__remote = exports.evaluation = exports.experimental__chains__violation_of_expectations = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__autogpt = exports.util__time = exports.util__math = exports.util__document = exports.storage__in_memory = exports.storage__encoder_backed = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__tavily_search_api = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__chaindesk = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__prompt_template = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__fake = exports.chat_models__yandex = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__fireworks = void 0;
|
|
28
|
+
exports.runnables__remote = exports.evaluation = exports.experimental__chains__violation_of_expectations = exports.experimental__chat_models__bittensor = exports.experimental__plan_and_execute = exports.experimental__generative_agents = exports.experimental__babyagi = exports.experimental__openai_assistant = exports.experimental__autogpt = exports.util__time = exports.util__math = exports.util__document = exports.storage__in_memory = exports.storage__encoder_backed = exports.stores__message__in_memory = exports.stores__file__in_memory = exports.stores__doc__in_memory = exports.cache = exports.retrievers__vespa = exports.retrievers__score_threshold = exports.retrievers__hyde = exports.retrievers__document_compressors__embeddings_filter = exports.retrievers__document_compressors__chain_extract = exports.retrievers__time_weighted = exports.retrievers__tavily_search_api = exports.retrievers__parent_document = exports.retrievers__multi_vector = exports.retrievers__multi_query = exports.retrievers__document_compressors = exports.retrievers__contextual_compression = exports.retrievers__databerry = exports.retrievers__chaindesk = exports.retrievers__remote = exports.output_parsers = exports.callbacks = exports.schema__storage = exports.schema__runnable = exports.schema__retriever = exports.schema__query_constructor = exports.schema__prompt_template = exports.schema__output_parser = exports.schema__document = exports.schema = exports.chat_models__fake = exports.chat_models__yandex = exports.chat_models__minimax = exports.chat_models__ollama = exports.chat_models__baiduwenxin = exports.chat_models__fireworks = void 0;
|
|
29
29
|
exports.load__serializable = __importStar(require("../load/serializable.cjs"));
|
|
30
30
|
exports.agents = __importStar(require("../agents/index.cjs"));
|
|
31
31
|
exports.agents__toolkits = __importStar(require("../agents/toolkits/index.cjs"));
|
|
@@ -117,6 +117,7 @@ exports.util__document = __importStar(require("../util/document.cjs"));
|
|
|
117
117
|
exports.util__math = __importStar(require("../util/math.cjs"));
|
|
118
118
|
exports.util__time = __importStar(require("../util/time.cjs"));
|
|
119
119
|
exports.experimental__autogpt = __importStar(require("../experimental/autogpt/index.cjs"));
|
|
120
|
+
exports.experimental__openai_assistant = __importStar(require("../experimental/openai_assistant/index.cjs"));
|
|
120
121
|
exports.experimental__babyagi = __importStar(require("../experimental/babyagi/index.cjs"));
|
|
121
122
|
exports.experimental__generative_agents = __importStar(require("../experimental/generative_agents/index.cjs"));
|
|
122
123
|
exports.experimental__plan_and_execute = __importStar(require("../experimental/plan_and_execute/index.cjs"));
|
|
@@ -89,6 +89,7 @@ export * as util__document from "../util/document.js";
|
|
|
89
89
|
export * as util__math from "../util/math.js";
|
|
90
90
|
export * as util__time from "../util/time.js";
|
|
91
91
|
export * as experimental__autogpt from "../experimental/autogpt/index.js";
|
|
92
|
+
export * as experimental__openai_assistant from "../experimental/openai_assistant/index.js";
|
|
92
93
|
export * as experimental__babyagi from "../experimental/babyagi/index.js";
|
|
93
94
|
export * as experimental__generative_agents from "../experimental/generative_agents/index.js";
|
|
94
95
|
export * as experimental__plan_and_execute from "../experimental/plan_and_execute/index.js";
|
package/dist/load/import_map.js
CHANGED
|
@@ -90,6 +90,7 @@ export * as util__document from "../util/document.js";
|
|
|
90
90
|
export * as util__math from "../util/math.js";
|
|
91
91
|
export * as util__time from "../util/time.js";
|
|
92
92
|
export * as experimental__autogpt from "../experimental/autogpt/index.js";
|
|
93
|
+
export * as experimental__openai_assistant from "../experimental/openai_assistant/index.js";
|
|
93
94
|
export * as experimental__babyagi from "../experimental/babyagi/index.js";
|
|
94
95
|
export * as experimental__generative_agents from "../experimental/generative_agents/index.js";
|
|
95
96
|
export * as experimental__plan_and_execute from "../experimental/plan_and_execute/index.js";
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.formatToOpenAITool = exports.formatToOpenAIFunction = void 0;
|
|
3
|
+
exports.formatToOpenAIAssistantTool = exports.formatToOpenAITool = exports.formatToOpenAIFunction = void 0;
|
|
4
4
|
const zod_to_json_schema_1 = require("zod-to-json-schema");
|
|
5
5
|
/**
|
|
6
6
|
* Formats a `StructuredTool` instance into a format that is compatible
|
|
@@ -27,3 +27,14 @@ function formatToOpenAITool(tool) {
|
|
|
27
27
|
};
|
|
28
28
|
}
|
|
29
29
|
exports.formatToOpenAITool = formatToOpenAITool;
|
|
30
|
+
function formatToOpenAIAssistantTool(tool) {
|
|
31
|
+
return {
|
|
32
|
+
type: "function",
|
|
33
|
+
function: {
|
|
34
|
+
name: tool.name,
|
|
35
|
+
description: tool.description,
|
|
36
|
+
parameters: (0, zod_to_json_schema_1.zodToJsonSchema)(tool.schema),
|
|
37
|
+
},
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
exports.formatToOpenAIAssistantTool = formatToOpenAIAssistantTool;
|
|
@@ -8,3 +8,4 @@ import { StructuredTool } from "./base.js";
|
|
|
8
8
|
*/
|
|
9
9
|
export declare function formatToOpenAIFunction(tool: StructuredTool): OpenAIClient.Chat.ChatCompletionCreateParams.Function;
|
|
10
10
|
export declare function formatToOpenAITool(tool: StructuredTool): OpenAIClient.Chat.ChatCompletionTool;
|
|
11
|
+
export declare function formatToOpenAIAssistantTool(tool: StructuredTool): OpenAIClient.Beta.AssistantCreateParams.AssistantToolsFunction;
|
|
@@ -22,3 +22,13 @@ export function formatToOpenAITool(tool) {
|
|
|
22
22
|
},
|
|
23
23
|
};
|
|
24
24
|
}
|
|
25
|
+
export function formatToOpenAIAssistantTool(tool) {
|
|
26
|
+
return {
|
|
27
|
+
type: "function",
|
|
28
|
+
function: {
|
|
29
|
+
name: tool.name,
|
|
30
|
+
description: tool.description,
|
|
31
|
+
parameters: zodToJsonSchema(tool.schema),
|
|
32
|
+
},
|
|
33
|
+
};
|
|
34
|
+
}
|
|
@@ -151,7 +151,7 @@ class PineconeStore extends base_js_1.VectorStore {
|
|
|
151
151
|
* @returns Promise that resolves when the delete operation is complete.
|
|
152
152
|
*/
|
|
153
153
|
async delete(params) {
|
|
154
|
-
const { deleteAll, ids } = params;
|
|
154
|
+
const { deleteAll, ids, filter } = params;
|
|
155
155
|
const namespace = this.pineconeIndex.namespace(this.namespace ?? "");
|
|
156
156
|
if (deleteAll) {
|
|
157
157
|
await namespace.deleteAll();
|
|
@@ -163,6 +163,9 @@ class PineconeStore extends base_js_1.VectorStore {
|
|
|
163
163
|
await namespace.deleteMany(batchIds);
|
|
164
164
|
}
|
|
165
165
|
}
|
|
166
|
+
else if (filter) {
|
|
167
|
+
await namespace.deleteMany(filter);
|
|
168
|
+
}
|
|
166
169
|
else {
|
|
167
170
|
throw new Error("Either ids or delete_all must be provided.");
|
|
168
171
|
}
|
|
@@ -12,11 +12,12 @@ export interface PineconeLibArgs extends AsyncCallerParams {
|
|
|
12
12
|
}
|
|
13
13
|
/**
|
|
14
14
|
* Type that defines the parameters for the delete operation in the
|
|
15
|
-
* PineconeStore class. It includes ids, deleteAll flag, and namespace.
|
|
15
|
+
* PineconeStore class. It includes ids, filter, deleteAll flag, and namespace.
|
|
16
16
|
*/
|
|
17
17
|
export type PineconeDeleteParams = {
|
|
18
18
|
ids?: string[];
|
|
19
19
|
deleteAll?: boolean;
|
|
20
|
+
filter?: object;
|
|
20
21
|
namespace?: string;
|
|
21
22
|
};
|
|
22
23
|
/**
|
|
@@ -122,7 +122,7 @@ export class PineconeStore extends VectorStore {
|
|
|
122
122
|
* @returns Promise that resolves when the delete operation is complete.
|
|
123
123
|
*/
|
|
124
124
|
async delete(params) {
|
|
125
|
-
const { deleteAll, ids } = params;
|
|
125
|
+
const { deleteAll, ids, filter } = params;
|
|
126
126
|
const namespace = this.pineconeIndex.namespace(this.namespace ?? "");
|
|
127
127
|
if (deleteAll) {
|
|
128
128
|
await namespace.deleteAll();
|
|
@@ -134,6 +134,9 @@ export class PineconeStore extends VectorStore {
|
|
|
134
134
|
await namespace.deleteMany(batchIds);
|
|
135
135
|
}
|
|
136
136
|
}
|
|
137
|
+
else if (filter) {
|
|
138
|
+
await namespace.deleteMany(filter);
|
|
139
|
+
}
|
|
137
140
|
else {
|
|
138
141
|
throw new Error("Either ids or delete_all must be provided.");
|
|
139
142
|
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
module.exports = require('../dist/experimental/openai_assistant/index.cjs');
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../dist/experimental/openai_assistant/index.js'
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../dist/experimental/openai_assistant/index.js'
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "langchain",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.184",
|
|
4
4
|
"description": "Typescript bindings for langchain",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"engines": {
|
|
@@ -754,6 +754,9 @@
|
|
|
754
754
|
"experimental/autogpt.cjs",
|
|
755
755
|
"experimental/autogpt.js",
|
|
756
756
|
"experimental/autogpt.d.ts",
|
|
757
|
+
"experimental/openai_assistant.cjs",
|
|
758
|
+
"experimental/openai_assistant.js",
|
|
759
|
+
"experimental/openai_assistant.d.ts",
|
|
757
760
|
"experimental/babyagi.cjs",
|
|
758
761
|
"experimental/babyagi.js",
|
|
759
762
|
"experimental/babyagi.d.ts",
|
|
@@ -800,7 +803,7 @@
|
|
|
800
803
|
"build:esm": "NODE_OPTIONS=--max-old-space-size=4096 tsc --outDir dist/ && rimraf dist/tests dist/**/tests",
|
|
801
804
|
"build:cjs": "NODE_OPTIONS=--max-old-space-size=4096 tsc --outDir dist-cjs/ -p tsconfig.cjs.json && node scripts/move-cjs-to-dist.js && rimraf dist-cjs",
|
|
802
805
|
"build:watch": "node scripts/create-entrypoints.js && tsc --outDir dist/ --watch",
|
|
803
|
-
"build:scripts": "node scripts/create-entrypoints.js && node scripts/check-tree-shaking.js",
|
|
806
|
+
"build:scripts": "node scripts/create-entrypoints.js && node scripts/check-tree-shaking.js && node scripts/generate-docs-llm-compatibility-table",
|
|
804
807
|
"lint": "NODE_OPTIONS=--max-old-space-size=4096 eslint src && dpdm --exit-code circular:1 --no-warning --no-tree src/*.ts src/**/*.ts",
|
|
805
808
|
"lint:fix": "yarn lint --fix",
|
|
806
809
|
"precommit": "lint-staged",
|
|
@@ -2628,6 +2631,11 @@
|
|
|
2628
2631
|
"import": "./experimental/autogpt.js",
|
|
2629
2632
|
"require": "./experimental/autogpt.cjs"
|
|
2630
2633
|
},
|
|
2634
|
+
"./experimental/openai_assistant": {
|
|
2635
|
+
"types": "./experimental/openai_assistant.d.ts",
|
|
2636
|
+
"import": "./experimental/openai_assistant.js",
|
|
2637
|
+
"require": "./experimental/openai_assistant.cjs"
|
|
2638
|
+
},
|
|
2631
2639
|
"./experimental/babyagi": {
|
|
2632
2640
|
"types": "./experimental/babyagi.d.ts",
|
|
2633
2641
|
"import": "./experimental/babyagi.js",
|