langchain 0.0.175 → 0.0.177
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chat_models/bedrock.cjs +25 -4
- package/dist/chat_models/bedrock.d.ts +2 -1
- package/dist/chat_models/bedrock.js +25 -4
- package/dist/chat_models/googlevertexai/common.cjs +46 -7
- package/dist/chat_models/googlevertexai/common.d.ts +7 -2
- package/dist/chat_models/googlevertexai/common.js +47 -8
- package/dist/chat_models/googlevertexai/index.cjs +4 -3
- package/dist/chat_models/googlevertexai/index.js +4 -3
- package/dist/chat_models/googlevertexai/web.cjs +2 -1
- package/dist/chat_models/googlevertexai/web.js +2 -1
- package/dist/chat_models/llama_cpp.cjs +31 -79
- package/dist/chat_models/llama_cpp.d.ts +15 -58
- package/dist/chat_models/llama_cpp.js +32 -80
- package/dist/chat_models/openai.cjs +91 -6
- package/dist/chat_models/openai.d.ts +10 -0
- package/dist/chat_models/openai.js +91 -6
- package/dist/embeddings/googlevertexai.cjs +1 -1
- package/dist/embeddings/googlevertexai.js +1 -1
- package/dist/embeddings/hf.cjs +10 -1
- package/dist/embeddings/hf.d.ts +4 -2
- package/dist/embeddings/hf.js +10 -1
- package/dist/embeddings/llama_cpp.cjs +67 -0
- package/dist/embeddings/llama_cpp.d.ts +26 -0
- package/dist/embeddings/llama_cpp.js +63 -0
- package/dist/embeddings/ollama.cjs +7 -1
- package/dist/embeddings/ollama.js +7 -1
- package/dist/experimental/hubs/makersuite/googlemakersuitehub.d.ts +2 -2
- package/dist/experimental/multimodal_embeddings/googlevertexai.cjs +1 -1
- package/dist/experimental/multimodal_embeddings/googlevertexai.d.ts +2 -1
- package/dist/experimental/multimodal_embeddings/googlevertexai.js +2 -2
- package/dist/experimental/plan_and_execute/agent_executor.cjs +7 -4
- package/dist/experimental/plan_and_execute/agent_executor.d.ts +4 -3
- package/dist/experimental/plan_and_execute/agent_executor.js +8 -5
- package/dist/experimental/plan_and_execute/prompt.cjs +25 -9
- package/dist/experimental/plan_and_execute/prompt.d.ts +9 -1
- package/dist/experimental/plan_and_execute/prompt.js +23 -8
- package/dist/llms/bedrock.cjs +25 -3
- package/dist/llms/bedrock.d.ts +2 -1
- package/dist/llms/bedrock.js +25 -3
- package/dist/llms/googlevertexai/common.cjs +46 -13
- package/dist/llms/googlevertexai/common.d.ts +8 -3
- package/dist/llms/googlevertexai/common.js +46 -13
- package/dist/llms/googlevertexai/index.cjs +4 -3
- package/dist/llms/googlevertexai/index.js +4 -3
- package/dist/llms/googlevertexai/web.cjs +2 -1
- package/dist/llms/googlevertexai/web.js +2 -1
- package/dist/llms/hf.cjs +10 -1
- package/dist/llms/hf.d.ts +3 -0
- package/dist/llms/hf.js +10 -1
- package/dist/llms/llama_cpp.cjs +25 -65
- package/dist/llms/llama_cpp.d.ts +7 -43
- package/dist/llms/llama_cpp.js +25 -65
- package/dist/load/import_constants.cjs +1 -0
- package/dist/load/import_constants.js +1 -0
- package/dist/prompts/few_shot.cjs +162 -1
- package/dist/prompts/few_shot.d.ts +90 -2
- package/dist/prompts/few_shot.js +160 -0
- package/dist/prompts/index.cjs +2 -1
- package/dist/prompts/index.d.ts +1 -1
- package/dist/prompts/index.js +1 -1
- package/dist/retrievers/zep.cjs +26 -3
- package/dist/retrievers/zep.d.ts +11 -2
- package/dist/retrievers/zep.js +26 -3
- package/dist/types/googlevertexai-types.d.ts +12 -10
- package/dist/util/bedrock.d.ts +2 -0
- package/dist/util/googlevertexai-connection.cjs +298 -10
- package/dist/util/googlevertexai-connection.d.ts +76 -7
- package/dist/util/googlevertexai-connection.js +294 -9
- package/dist/util/googlevertexai-gauth.cjs +36 -0
- package/dist/util/googlevertexai-gauth.d.ts +8 -0
- package/dist/util/googlevertexai-gauth.js +32 -0
- package/dist/util/googlevertexai-webauth.cjs +38 -2
- package/dist/util/googlevertexai-webauth.d.ts +2 -6
- package/dist/util/googlevertexai-webauth.js +38 -2
- package/dist/util/llama_cpp.cjs +34 -0
- package/dist/util/llama_cpp.d.ts +46 -0
- package/dist/util/llama_cpp.js +28 -0
- package/dist/util/openai-format-fndef.cjs +81 -0
- package/dist/util/openai-format-fndef.d.ts +44 -0
- package/dist/util/openai-format-fndef.js +77 -0
- package/dist/util/openapi.d.ts +2 -2
- package/dist/vectorstores/googlevertexai.d.ts +4 -4
- package/dist/vectorstores/pinecone.cjs +5 -5
- package/dist/vectorstores/pinecone.d.ts +2 -2
- package/dist/vectorstores/pinecone.js +5 -5
- package/embeddings/llama_cpp.cjs +1 -0
- package/embeddings/llama_cpp.d.ts +1 -0
- package/embeddings/llama_cpp.js +1 -0
- package/package.json +13 -5
package/dist/embeddings/hf.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { HfInference } from "@huggingface/inference";
|
|
1
|
+
import { HfInference, HfInferenceEndpoint } from "@huggingface/inference";
|
|
2
2
|
import { Embeddings, EmbeddingsParams } from "./base.js";
|
|
3
3
|
/**
|
|
4
4
|
* Interface that extends EmbeddingsParams and defines additional
|
|
@@ -7,6 +7,7 @@ import { Embeddings, EmbeddingsParams } from "./base.js";
|
|
|
7
7
|
export interface HuggingFaceInferenceEmbeddingsParams extends EmbeddingsParams {
|
|
8
8
|
apiKey?: string;
|
|
9
9
|
model?: string;
|
|
10
|
+
endpointUrl?: string;
|
|
10
11
|
}
|
|
11
12
|
/**
|
|
12
13
|
* Class that extends the Embeddings class and provides methods for
|
|
@@ -16,7 +17,8 @@ export interface HuggingFaceInferenceEmbeddingsParams extends EmbeddingsParams {
|
|
|
16
17
|
export declare class HuggingFaceInferenceEmbeddings extends Embeddings implements HuggingFaceInferenceEmbeddingsParams {
|
|
17
18
|
apiKey?: string;
|
|
18
19
|
model: string;
|
|
19
|
-
|
|
20
|
+
endpointUrl?: string;
|
|
21
|
+
client: HfInference | HfInferenceEndpoint;
|
|
20
22
|
constructor(fields?: HuggingFaceInferenceEmbeddingsParams);
|
|
21
23
|
_embed(texts: string[]): Promise<number[][]>;
|
|
22
24
|
/**
|
package/dist/embeddings/hf.js
CHANGED
|
@@ -21,6 +21,12 @@ export class HuggingFaceInferenceEmbeddings extends Embeddings {
|
|
|
21
21
|
writable: true,
|
|
22
22
|
value: void 0
|
|
23
23
|
});
|
|
24
|
+
Object.defineProperty(this, "endpointUrl", {
|
|
25
|
+
enumerable: true,
|
|
26
|
+
configurable: true,
|
|
27
|
+
writable: true,
|
|
28
|
+
value: void 0
|
|
29
|
+
});
|
|
24
30
|
Object.defineProperty(this, "client", {
|
|
25
31
|
enumerable: true,
|
|
26
32
|
configurable: true,
|
|
@@ -31,7 +37,10 @@ export class HuggingFaceInferenceEmbeddings extends Embeddings {
|
|
|
31
37
|
fields?.model ?? "sentence-transformers/distilbert-base-nli-mean-tokens";
|
|
32
38
|
this.apiKey =
|
|
33
39
|
fields?.apiKey ?? getEnvironmentVariable("HUGGINGFACEHUB_API_KEY");
|
|
34
|
-
this.
|
|
40
|
+
this.endpointUrl = fields?.endpointUrl;
|
|
41
|
+
this.client = this.endpointUrl
|
|
42
|
+
? new HfInference(this.apiKey).endpoint(this.endpointUrl)
|
|
43
|
+
: new HfInference(this.apiKey);
|
|
35
44
|
}
|
|
36
45
|
async _embed(texts) {
|
|
37
46
|
// replace newlines, which can negatively affect performance.
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.LlamaCppEmbeddings = void 0;
|
|
4
|
+
const llama_cpp_js_1 = require("../util/llama_cpp.cjs");
|
|
5
|
+
const base_js_1 = require("./base.cjs");
|
|
6
|
+
class LlamaCppEmbeddings extends base_js_1.Embeddings {
|
|
7
|
+
constructor(inputs) {
|
|
8
|
+
super(inputs);
|
|
9
|
+
Object.defineProperty(this, "_model", {
|
|
10
|
+
enumerable: true,
|
|
11
|
+
configurable: true,
|
|
12
|
+
writable: true,
|
|
13
|
+
value: void 0
|
|
14
|
+
});
|
|
15
|
+
Object.defineProperty(this, "_context", {
|
|
16
|
+
enumerable: true,
|
|
17
|
+
configurable: true,
|
|
18
|
+
writable: true,
|
|
19
|
+
value: void 0
|
|
20
|
+
});
|
|
21
|
+
const _inputs = inputs;
|
|
22
|
+
_inputs.embedding = true;
|
|
23
|
+
this._model = (0, llama_cpp_js_1.createLlamaModel)(_inputs);
|
|
24
|
+
this._context = (0, llama_cpp_js_1.createLlamaContext)(this._model, _inputs);
|
|
25
|
+
}
|
|
26
|
+
/**
|
|
27
|
+
* Generates embeddings for an array of texts.
|
|
28
|
+
* @param texts - An array of strings to generate embeddings for.
|
|
29
|
+
* @returns A Promise that resolves to an array of embeddings.
|
|
30
|
+
*/
|
|
31
|
+
async embedDocuments(texts) {
|
|
32
|
+
const tokensArray = [];
|
|
33
|
+
for (const text of texts) {
|
|
34
|
+
const encodings = await this.caller.call(() => new Promise((resolve) => {
|
|
35
|
+
resolve(this._context.encode(text));
|
|
36
|
+
}));
|
|
37
|
+
tokensArray.push(encodings);
|
|
38
|
+
}
|
|
39
|
+
const embeddings = [];
|
|
40
|
+
for (const tokens of tokensArray) {
|
|
41
|
+
const embedArray = [];
|
|
42
|
+
for (let i = 0; i < tokens.length; i += 1) {
|
|
43
|
+
const nToken = +tokens[i];
|
|
44
|
+
embedArray.push(nToken);
|
|
45
|
+
}
|
|
46
|
+
embeddings.push(embedArray);
|
|
47
|
+
}
|
|
48
|
+
return embeddings;
|
|
49
|
+
}
|
|
50
|
+
/**
|
|
51
|
+
* Generates an embedding for a single text.
|
|
52
|
+
* @param text - A string to generate an embedding for.
|
|
53
|
+
* @returns A Promise that resolves to an array of numbers representing the embedding.
|
|
54
|
+
*/
|
|
55
|
+
async embedQuery(text) {
|
|
56
|
+
const tokens = [];
|
|
57
|
+
const encodings = await this.caller.call(() => new Promise((resolve) => {
|
|
58
|
+
resolve(this._context.encode(text));
|
|
59
|
+
}));
|
|
60
|
+
for (let i = 0; i < encodings.length; i += 1) {
|
|
61
|
+
const token = +encodings[i];
|
|
62
|
+
tokens.push(token);
|
|
63
|
+
}
|
|
64
|
+
return tokens;
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
exports.LlamaCppEmbeddings = LlamaCppEmbeddings;
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import { LlamaModel, LlamaContext } from "node-llama-cpp";
|
|
2
|
+
import { LlamaBaseCppInputs } from "../util/llama_cpp.js";
|
|
3
|
+
import { Embeddings, EmbeddingsParams } from "./base.js";
|
|
4
|
+
/**
|
|
5
|
+
* Note that the modelPath is the only required parameter. For testing you
|
|
6
|
+
* can set this in the environment variable `LLAMA_PATH`.
|
|
7
|
+
*/
|
|
8
|
+
export interface LlamaCppEmbeddingsParams extends LlamaBaseCppInputs, EmbeddingsParams {
|
|
9
|
+
}
|
|
10
|
+
export declare class LlamaCppEmbeddings extends Embeddings {
|
|
11
|
+
_model: LlamaModel;
|
|
12
|
+
_context: LlamaContext;
|
|
13
|
+
constructor(inputs: LlamaCppEmbeddingsParams);
|
|
14
|
+
/**
|
|
15
|
+
* Generates embeddings for an array of texts.
|
|
16
|
+
* @param texts - An array of strings to generate embeddings for.
|
|
17
|
+
* @returns A Promise that resolves to an array of embeddings.
|
|
18
|
+
*/
|
|
19
|
+
embedDocuments(texts: string[]): Promise<number[][]>;
|
|
20
|
+
/**
|
|
21
|
+
* Generates an embedding for a single text.
|
|
22
|
+
* @param text - A string to generate an embedding for.
|
|
23
|
+
* @returns A Promise that resolves to an array of numbers representing the embedding.
|
|
24
|
+
*/
|
|
25
|
+
embedQuery(text: string): Promise<number[]>;
|
|
26
|
+
}
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
import { createLlamaModel, createLlamaContext, } from "../util/llama_cpp.js";
|
|
2
|
+
import { Embeddings } from "./base.js";
|
|
3
|
+
export class LlamaCppEmbeddings extends Embeddings {
|
|
4
|
+
constructor(inputs) {
|
|
5
|
+
super(inputs);
|
|
6
|
+
Object.defineProperty(this, "_model", {
|
|
7
|
+
enumerable: true,
|
|
8
|
+
configurable: true,
|
|
9
|
+
writable: true,
|
|
10
|
+
value: void 0
|
|
11
|
+
});
|
|
12
|
+
Object.defineProperty(this, "_context", {
|
|
13
|
+
enumerable: true,
|
|
14
|
+
configurable: true,
|
|
15
|
+
writable: true,
|
|
16
|
+
value: void 0
|
|
17
|
+
});
|
|
18
|
+
const _inputs = inputs;
|
|
19
|
+
_inputs.embedding = true;
|
|
20
|
+
this._model = createLlamaModel(_inputs);
|
|
21
|
+
this._context = createLlamaContext(this._model, _inputs);
|
|
22
|
+
}
|
|
23
|
+
/**
|
|
24
|
+
* Generates embeddings for an array of texts.
|
|
25
|
+
* @param texts - An array of strings to generate embeddings for.
|
|
26
|
+
* @returns A Promise that resolves to an array of embeddings.
|
|
27
|
+
*/
|
|
28
|
+
async embedDocuments(texts) {
|
|
29
|
+
const tokensArray = [];
|
|
30
|
+
for (const text of texts) {
|
|
31
|
+
const encodings = await this.caller.call(() => new Promise((resolve) => {
|
|
32
|
+
resolve(this._context.encode(text));
|
|
33
|
+
}));
|
|
34
|
+
tokensArray.push(encodings);
|
|
35
|
+
}
|
|
36
|
+
const embeddings = [];
|
|
37
|
+
for (const tokens of tokensArray) {
|
|
38
|
+
const embedArray = [];
|
|
39
|
+
for (let i = 0; i < tokens.length; i += 1) {
|
|
40
|
+
const nToken = +tokens[i];
|
|
41
|
+
embedArray.push(nToken);
|
|
42
|
+
}
|
|
43
|
+
embeddings.push(embedArray);
|
|
44
|
+
}
|
|
45
|
+
return embeddings;
|
|
46
|
+
}
|
|
47
|
+
/**
|
|
48
|
+
* Generates an embedding for a single text.
|
|
49
|
+
* @param text - A string to generate an embedding for.
|
|
50
|
+
* @returns A Promise that resolves to an array of numbers representing the embedding.
|
|
51
|
+
*/
|
|
52
|
+
async embedQuery(text) {
|
|
53
|
+
const tokens = [];
|
|
54
|
+
const encodings = await this.caller.call(() => new Promise((resolve) => {
|
|
55
|
+
resolve(this._context.encode(text));
|
|
56
|
+
}));
|
|
57
|
+
for (let i = 0; i < encodings.length; i += 1) {
|
|
58
|
+
const token = +encodings[i];
|
|
59
|
+
tokens.push(token);
|
|
60
|
+
}
|
|
61
|
+
return tokens;
|
|
62
|
+
}
|
|
63
|
+
}
|
|
@@ -81,7 +81,13 @@ class OllamaEmbeddings extends base_js_1.Embeddings {
|
|
|
81
81
|
}
|
|
82
82
|
async _request(prompt) {
|
|
83
83
|
const { model, baseUrl, requestOptions } = this;
|
|
84
|
-
|
|
84
|
+
let formattedBaseUrl = baseUrl;
|
|
85
|
+
if (formattedBaseUrl.startsWith("http://localhost:")) {
|
|
86
|
+
// Node 18 has issues with resolving "localhost"
|
|
87
|
+
// See https://github.com/node-fetch/node-fetch/issues/1624
|
|
88
|
+
formattedBaseUrl = formattedBaseUrl.replace("http://localhost:", "http://127.0.0.1:");
|
|
89
|
+
}
|
|
90
|
+
const response = await fetch(`${formattedBaseUrl}/api/embeddings`, {
|
|
85
91
|
method: "POST",
|
|
86
92
|
headers: { "Content-Type": "application/json" },
|
|
87
93
|
body: JSON.stringify({
|
|
@@ -78,7 +78,13 @@ export class OllamaEmbeddings extends Embeddings {
|
|
|
78
78
|
}
|
|
79
79
|
async _request(prompt) {
|
|
80
80
|
const { model, baseUrl, requestOptions } = this;
|
|
81
|
-
|
|
81
|
+
let formattedBaseUrl = baseUrl;
|
|
82
|
+
if (formattedBaseUrl.startsWith("http://localhost:")) {
|
|
83
|
+
// Node 18 has issues with resolving "localhost"
|
|
84
|
+
// See https://github.com/node-fetch/node-fetch/issues/1624
|
|
85
|
+
formattedBaseUrl = formattedBaseUrl.replace("http://localhost:", "http://127.0.0.1:");
|
|
86
|
+
}
|
|
87
|
+
const response = await fetch(`${formattedBaseUrl}/api/embeddings`, {
|
|
82
88
|
method: "POST",
|
|
83
89
|
headers: { "Content-Type": "application/json" },
|
|
84
90
|
body: JSON.stringify({
|
|
@@ -4,7 +4,7 @@ import { GoogleAuthOptions } from "google-auth-library";
|
|
|
4
4
|
import { PromptTemplate } from "../../../prompts/index.js";
|
|
5
5
|
import { BaseLanguageModel } from "../../../base_language/index.js";
|
|
6
6
|
import { AsyncCaller, AsyncCallerCallOptions } from "../../../util/async_caller.js";
|
|
7
|
-
import { GoogleResponse, GoogleVertexAIConnectionParams } from "../../../types/googlevertexai-types.js";
|
|
7
|
+
import { GoogleAbstractedClientOpsMethod, GoogleResponse, GoogleVertexAIConnectionParams } from "../../../types/googlevertexai-types.js";
|
|
8
8
|
import { GoogleConnection } from "../../../util/googlevertexai-connection.js";
|
|
9
9
|
/**
|
|
10
10
|
* Configuration that allows us to load or pull a prompt that has been created
|
|
@@ -134,7 +134,7 @@ export declare class DriveFileReadConnection extends GoogleConnection<DriveCallO
|
|
|
134
134
|
fileId: string;
|
|
135
135
|
constructor(fields: DriveFileReadParams, caller: AsyncCaller);
|
|
136
136
|
buildUrl(): Promise<string>;
|
|
137
|
-
buildMethod():
|
|
137
|
+
buildMethod(): GoogleAbstractedClientOpsMethod;
|
|
138
138
|
request(options?: DriveCallOptions): Promise<DriveFileMakerSuiteResponse>;
|
|
139
139
|
}
|
|
140
140
|
export interface CacheEntry {
|
|
@@ -54,7 +54,7 @@ class GoogleVertexAIMultimodalEmbeddings extends base_js_1.Embeddings {
|
|
|
54
54
|
* @returns An array of media embeddings.
|
|
55
55
|
*/
|
|
56
56
|
responseToEmbeddings(response) {
|
|
57
|
-
return response
|
|
57
|
+
return (response?.data).predictions.map((r) => ({
|
|
58
58
|
text: r.textEmbedding,
|
|
59
59
|
image: r.imageEmbedding,
|
|
60
60
|
}));
|
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
/// <reference types="node" resolution-mode="require"/>
|
|
2
2
|
import { GoogleAuthOptions } from "google-auth-library";
|
|
3
3
|
import { Embeddings, EmbeddingsParams } from "../../embeddings/base.js";
|
|
4
|
-
import { GoogleVertexAIBaseLLMInput, GoogleVertexAIBasePrediction
|
|
4
|
+
import { GoogleVertexAIBaseLLMInput, GoogleVertexAIBasePrediction } from "../../types/googlevertexai-types.js";
|
|
5
|
+
import { GoogleVertexAILLMResponse } from "../../util/googlevertexai-connection.js";
|
|
5
6
|
/**
|
|
6
7
|
* Parameters for the GoogleVertexAIMultimodalEmbeddings class, extending
|
|
7
8
|
* both EmbeddingsParams and GoogleVertexAIConnectionParams.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { GoogleAuth } from "google-auth-library";
|
|
2
2
|
import { Embeddings } from "../../embeddings/base.js";
|
|
3
|
-
import { GoogleVertexAILLMConnection } from "../../util/googlevertexai-connection.js";
|
|
3
|
+
import { GoogleVertexAILLMConnection, } from "../../util/googlevertexai-connection.js";
|
|
4
4
|
/**
|
|
5
5
|
* Class for generating embeddings for text and images using Google's
|
|
6
6
|
* Vertex AI. It extends the Embeddings base class and implements the
|
|
@@ -51,7 +51,7 @@ export class GoogleVertexAIMultimodalEmbeddings extends Embeddings {
|
|
|
51
51
|
* @returns An array of media embeddings.
|
|
52
52
|
*/
|
|
53
53
|
responseToEmbeddings(response) {
|
|
54
|
-
return response
|
|
54
|
+
return (response?.data).predictions.map((r) => ({
|
|
55
55
|
text: r.textEmbedding,
|
|
56
56
|
image: r.imageEmbedding,
|
|
57
57
|
}));
|
|
@@ -69,10 +69,10 @@ class PlanAndExecuteAgentExecutor extends base_js_1.BaseChain {
|
|
|
69
69
|
* @param llm The Large Language Model (LLM) used to generate responses.
|
|
70
70
|
* @returns A new LLMPlanner instance.
|
|
71
71
|
*/
|
|
72
|
-
static getDefaultPlanner({ llm }) {
|
|
72
|
+
static async getDefaultPlanner({ llm, tools, }) {
|
|
73
73
|
const plannerLlmChain = new llm_chain_js_1.LLMChain({
|
|
74
74
|
llm,
|
|
75
|
-
prompt: prompt_js_1.
|
|
75
|
+
prompt: await (0, prompt_js_1.getPlannerChatPrompt)(tools),
|
|
76
76
|
});
|
|
77
77
|
return new base_js_2.LLMPlanner(plannerLlmChain, new outputParser_js_1.PlanOutputParser());
|
|
78
78
|
}
|
|
@@ -104,9 +104,12 @@ class PlanAndExecuteAgentExecutor extends base_js_1.BaseChain {
|
|
|
104
104
|
* @param humanMessageTemplate The template for human messages. If not provided, a default template is used.
|
|
105
105
|
* @returns A new PlanAndExecuteAgentExecutor instance.
|
|
106
106
|
*/
|
|
107
|
-
static fromLLMAndTools({ llm, tools, humanMessageTemplate, }) {
|
|
107
|
+
static async fromLLMAndTools({ llm, tools, humanMessageTemplate, }) {
|
|
108
108
|
const executor = new PlanAndExecuteAgentExecutor({
|
|
109
|
-
planner: PlanAndExecuteAgentExecutor.getDefaultPlanner({
|
|
109
|
+
planner: await PlanAndExecuteAgentExecutor.getDefaultPlanner({
|
|
110
|
+
llm,
|
|
111
|
+
tools,
|
|
112
|
+
}),
|
|
110
113
|
stepExecutor: PlanAndExecuteAgentExecutor.getDefaultStepExecutor({
|
|
111
114
|
llm,
|
|
112
115
|
tools,
|
|
@@ -40,9 +40,10 @@ export declare class PlanAndExecuteAgentExecutor extends BaseChain {
|
|
|
40
40
|
* @param llm The Large Language Model (LLM) used to generate responses.
|
|
41
41
|
* @returns A new LLMPlanner instance.
|
|
42
42
|
*/
|
|
43
|
-
static getDefaultPlanner({ llm }: {
|
|
43
|
+
static getDefaultPlanner({ llm, tools, }: {
|
|
44
44
|
llm: BaseLanguageModel;
|
|
45
|
-
|
|
45
|
+
tools: Tool[];
|
|
46
|
+
}): Promise<LLMPlanner>;
|
|
46
47
|
/**
|
|
47
48
|
* Static method that returns a default step executor for the agent. It
|
|
48
49
|
* creates a new ChatAgent from a given LLM and a set of tools, and uses
|
|
@@ -71,7 +72,7 @@ export declare class PlanAndExecuteAgentExecutor extends BaseChain {
|
|
|
71
72
|
llm: BaseLanguageModel;
|
|
72
73
|
tools: Tool[];
|
|
73
74
|
humanMessageTemplate?: string;
|
|
74
|
-
} & Omit<PlanAndExecuteAgentExecutorInput, "planner" | "stepExecutor">): PlanAndExecuteAgentExecutor
|
|
75
|
+
} & Omit<PlanAndExecuteAgentExecutorInput, "planner" | "stepExecutor">): Promise<PlanAndExecuteAgentExecutor>;
|
|
75
76
|
/** @ignore */
|
|
76
77
|
_call(inputs: ChainValues, runManager?: CallbackManagerForChainRun): Promise<ChainValues>;
|
|
77
78
|
_chainType(): "agent_executor";
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { BaseChain } from "../../chains/base.js";
|
|
2
2
|
import { ListStepContainer, LLMPlanner, ChainStepExecutor, } from "./base.js";
|
|
3
3
|
import { AgentExecutor } from "../../agents/executor.js";
|
|
4
|
-
import { DEFAULT_STEP_EXECUTOR_HUMAN_CHAT_MESSAGE_TEMPLATE,
|
|
4
|
+
import { DEFAULT_STEP_EXECUTOR_HUMAN_CHAT_MESSAGE_TEMPLATE, getPlannerChatPrompt, } from "./prompt.js";
|
|
5
5
|
import { LLMChain } from "../../chains/llm_chain.js";
|
|
6
6
|
import { PlanOutputParser } from "./outputParser.js";
|
|
7
7
|
import { ChatAgent } from "../../agents/chat/index.js";
|
|
@@ -66,10 +66,10 @@ export class PlanAndExecuteAgentExecutor extends BaseChain {
|
|
|
66
66
|
* @param llm The Large Language Model (LLM) used to generate responses.
|
|
67
67
|
* @returns A new LLMPlanner instance.
|
|
68
68
|
*/
|
|
69
|
-
static getDefaultPlanner({ llm }) {
|
|
69
|
+
static async getDefaultPlanner({ llm, tools, }) {
|
|
70
70
|
const plannerLlmChain = new LLMChain({
|
|
71
71
|
llm,
|
|
72
|
-
prompt:
|
|
72
|
+
prompt: await getPlannerChatPrompt(tools),
|
|
73
73
|
});
|
|
74
74
|
return new LLMPlanner(plannerLlmChain, new PlanOutputParser());
|
|
75
75
|
}
|
|
@@ -101,9 +101,12 @@ export class PlanAndExecuteAgentExecutor extends BaseChain {
|
|
|
101
101
|
* @param humanMessageTemplate The template for human messages. If not provided, a default template is used.
|
|
102
102
|
* @returns A new PlanAndExecuteAgentExecutor instance.
|
|
103
103
|
*/
|
|
104
|
-
static fromLLMAndTools({ llm, tools, humanMessageTemplate, }) {
|
|
104
|
+
static async fromLLMAndTools({ llm, tools, humanMessageTemplate, }) {
|
|
105
105
|
const executor = new PlanAndExecuteAgentExecutor({
|
|
106
|
-
planner: PlanAndExecuteAgentExecutor.getDefaultPlanner({
|
|
106
|
+
planner: await PlanAndExecuteAgentExecutor.getDefaultPlanner({
|
|
107
|
+
llm,
|
|
108
|
+
tools,
|
|
109
|
+
}),
|
|
107
110
|
stepExecutor: PlanAndExecuteAgentExecutor.getDefaultStepExecutor({
|
|
108
111
|
llm,
|
|
109
112
|
tools,
|
|
@@ -1,23 +1,22 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.
|
|
3
|
+
exports.getPlannerChatPrompt = exports.DEFAULT_STEP_EXECUTOR_HUMAN_CHAT_MESSAGE_TEMPLATE = exports.PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE = void 0;
|
|
4
4
|
const chat_js_1 = require("../../prompts/chat.cjs");
|
|
5
5
|
exports.PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE = [
|
|
6
6
|
`Let's first understand the problem and devise a plan to solve the problem.`,
|
|
7
7
|
`Please output the plan starting with the header "Plan:"`,
|
|
8
|
-
`
|
|
8
|
+
`followed by a numbered list of steps.`,
|
|
9
9
|
`Please make the plan the minimum number of steps required`,
|
|
10
10
|
`to answer the query or complete the task accurately and precisely.`,
|
|
11
|
-
`
|
|
12
|
-
|
|
11
|
+
`You have a set of tools at your disposal to help you with this task:`,
|
|
12
|
+
"",
|
|
13
|
+
"{toolStrings}",
|
|
14
|
+
"",
|
|
15
|
+
`You must consider these tools when coming up with your plan.`,
|
|
16
|
+
`If the task is a question, the final step in the plan must be the following: "Given the above steps taken,`,
|
|
13
17
|
`please respond to the original query."`,
|
|
14
18
|
`At the end of your plan, say "<END_OF_PLAN>"`,
|
|
15
19
|
].join(" ");
|
|
16
|
-
exports.PLANNER_CHAT_PROMPT =
|
|
17
|
-
/* #__PURE__ */ chat_js_1.ChatPromptTemplate.fromMessages([
|
|
18
|
-
/* #__PURE__ */ chat_js_1.SystemMessagePromptTemplate.fromTemplate(exports.PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE),
|
|
19
|
-
/* #__PURE__ */ chat_js_1.HumanMessagePromptTemplate.fromTemplate(`{input}`),
|
|
20
|
-
]);
|
|
21
20
|
exports.DEFAULT_STEP_EXECUTOR_HUMAN_CHAT_MESSAGE_TEMPLATE = `Previous steps: {previous_steps}
|
|
22
21
|
|
|
23
22
|
Current objective: {current_step}
|
|
@@ -25,3 +24,20 @@ Current objective: {current_step}
|
|
|
25
24
|
{agent_scratchpad}
|
|
26
25
|
|
|
27
26
|
You may extract and combine relevant data from your previous steps when responding to me.`;
|
|
27
|
+
/**
|
|
28
|
+
* Add the tool descriptions to the planning system prompt in
|
|
29
|
+
* order to get a better suited plan that makes efficient use
|
|
30
|
+
* of the tools
|
|
31
|
+
* @param tools the tools available to the `planner`
|
|
32
|
+
* @returns
|
|
33
|
+
*/
|
|
34
|
+
const getPlannerChatPrompt = async (tools) => {
|
|
35
|
+
const toolStrings = tools
|
|
36
|
+
.map((tool) => `${tool.name}: ${tool.description}`)
|
|
37
|
+
.join("\n");
|
|
38
|
+
return /* #__PURE__ */ chat_js_1.ChatPromptTemplate.fromMessages([
|
|
39
|
+
chat_js_1.SystemMessagePromptTemplate.fromTemplate(exports.PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE),
|
|
40
|
+
chat_js_1.HumanMessagePromptTemplate.fromTemplate(`{input}`),
|
|
41
|
+
]).partial({ toolStrings });
|
|
42
|
+
};
|
|
43
|
+
exports.getPlannerChatPrompt = getPlannerChatPrompt;
|
|
@@ -1,4 +1,12 @@
|
|
|
1
1
|
import { ChatPromptTemplate } from "../../prompts/chat.js";
|
|
2
|
+
import { Tool } from "../../tools/base.js";
|
|
2
3
|
export declare const PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE: string;
|
|
3
|
-
export declare const PLANNER_CHAT_PROMPT: ChatPromptTemplate<any, any>;
|
|
4
4
|
export declare const DEFAULT_STEP_EXECUTOR_HUMAN_CHAT_MESSAGE_TEMPLATE = "Previous steps: {previous_steps}\n\nCurrent objective: {current_step}\n\n{agent_scratchpad}\n\nYou may extract and combine relevant data from your previous steps when responding to me.";
|
|
5
|
+
/**
|
|
6
|
+
* Add the tool descriptions to the planning system prompt in
|
|
7
|
+
* order to get a better suited plan that makes efficient use
|
|
8
|
+
* of the tools
|
|
9
|
+
* @param tools the tools available to the `planner`
|
|
10
|
+
* @returns
|
|
11
|
+
*/
|
|
12
|
+
export declare const getPlannerChatPrompt: (tools: Tool[]) => Promise<ChatPromptTemplate<import("../../schema/index.js").InputValues<string>, any>>;
|
|
@@ -2,19 +2,18 @@ import { ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemp
|
|
|
2
2
|
export const PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE = [
|
|
3
3
|
`Let's first understand the problem and devise a plan to solve the problem.`,
|
|
4
4
|
`Please output the plan starting with the header "Plan:"`,
|
|
5
|
-
`
|
|
5
|
+
`followed by a numbered list of steps.`,
|
|
6
6
|
`Please make the plan the minimum number of steps required`,
|
|
7
7
|
`to answer the query or complete the task accurately and precisely.`,
|
|
8
|
-
`
|
|
9
|
-
|
|
8
|
+
`You have a set of tools at your disposal to help you with this task:`,
|
|
9
|
+
"",
|
|
10
|
+
"{toolStrings}",
|
|
11
|
+
"",
|
|
12
|
+
`You must consider these tools when coming up with your plan.`,
|
|
13
|
+
`If the task is a question, the final step in the plan must be the following: "Given the above steps taken,`,
|
|
10
14
|
`please respond to the original query."`,
|
|
11
15
|
`At the end of your plan, say "<END_OF_PLAN>"`,
|
|
12
16
|
].join(" ");
|
|
13
|
-
export const PLANNER_CHAT_PROMPT =
|
|
14
|
-
/* #__PURE__ */ ChatPromptTemplate.fromMessages([
|
|
15
|
-
/* #__PURE__ */ SystemMessagePromptTemplate.fromTemplate(PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE),
|
|
16
|
-
/* #__PURE__ */ HumanMessagePromptTemplate.fromTemplate(`{input}`),
|
|
17
|
-
]);
|
|
18
17
|
export const DEFAULT_STEP_EXECUTOR_HUMAN_CHAT_MESSAGE_TEMPLATE = `Previous steps: {previous_steps}
|
|
19
18
|
|
|
20
19
|
Current objective: {current_step}
|
|
@@ -22,3 +21,19 @@ Current objective: {current_step}
|
|
|
22
21
|
{agent_scratchpad}
|
|
23
22
|
|
|
24
23
|
You may extract and combine relevant data from your previous steps when responding to me.`;
|
|
24
|
+
/**
|
|
25
|
+
* Add the tool descriptions to the planning system prompt in
|
|
26
|
+
* order to get a better suited plan that makes efficient use
|
|
27
|
+
* of the tools
|
|
28
|
+
* @param tools the tools available to the `planner`
|
|
29
|
+
* @returns
|
|
30
|
+
*/
|
|
31
|
+
export const getPlannerChatPrompt = async (tools) => {
|
|
32
|
+
const toolStrings = tools
|
|
33
|
+
.map((tool) => `${tool.name}: ${tool.description}`)
|
|
34
|
+
.join("\n");
|
|
35
|
+
return /* #__PURE__ */ ChatPromptTemplate.fromMessages([
|
|
36
|
+
SystemMessagePromptTemplate.fromTemplate(PLANNER_SYSTEM_PROMPT_MESSAGE_TEMPLATE),
|
|
37
|
+
HumanMessagePromptTemplate.fromTemplate(`{input}`),
|
|
38
|
+
]).partial({ toolStrings });
|
|
39
|
+
};
|
package/dist/llms/bedrock.cjs
CHANGED
|
@@ -92,6 +92,12 @@ class Bedrock extends base_js_1.LLM {
|
|
|
92
92
|
writable: true,
|
|
93
93
|
value: new eventstream_codec_1.EventStreamCodec(util_utf8_1.toUtf8, util_utf8_1.fromUtf8)
|
|
94
94
|
});
|
|
95
|
+
Object.defineProperty(this, "streaming", {
|
|
96
|
+
enumerable: true,
|
|
97
|
+
configurable: true,
|
|
98
|
+
writable: true,
|
|
99
|
+
value: false
|
|
100
|
+
});
|
|
95
101
|
this.model = fields?.model ?? this.model;
|
|
96
102
|
const allowedModels = ["ai21", "anthropic", "amazon"];
|
|
97
103
|
if (!allowedModels.includes(this.model.split(".")[0])) {
|
|
@@ -109,6 +115,7 @@ class Bedrock extends base_js_1.LLM {
|
|
|
109
115
|
this.endpointHost = fields?.endpointHost ?? fields?.endpointUrl;
|
|
110
116
|
this.stopSequences = fields?.stopSequences;
|
|
111
117
|
this.modelKwargs = fields?.modelKwargs;
|
|
118
|
+
this.streaming = fields?.streaming ?? this.streaming;
|
|
112
119
|
}
|
|
113
120
|
/** Call out to Bedrock service model.
|
|
114
121
|
Arguments:
|
|
@@ -120,10 +127,23 @@ class Bedrock extends base_js_1.LLM {
|
|
|
120
127
|
Example:
|
|
121
128
|
response = model.call("Tell me a joke.")
|
|
122
129
|
*/
|
|
123
|
-
async _call(prompt, options) {
|
|
130
|
+
async _call(prompt, options, runManager) {
|
|
124
131
|
const service = "bedrock-runtime";
|
|
125
132
|
const endpointHost = this.endpointHost ?? `${service}.${this.region}.amazonaws.com`;
|
|
126
133
|
const provider = this.model.split(".")[0];
|
|
134
|
+
if (this.streaming) {
|
|
135
|
+
const stream = this._streamResponseChunks(prompt, options, runManager);
|
|
136
|
+
let finalResult;
|
|
137
|
+
for await (const chunk of stream) {
|
|
138
|
+
if (finalResult === undefined) {
|
|
139
|
+
finalResult = chunk;
|
|
140
|
+
}
|
|
141
|
+
else {
|
|
142
|
+
finalResult = finalResult.concat(chunk);
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
return finalResult?.text ?? "";
|
|
146
|
+
}
|
|
127
147
|
const response = await this._signedFetch(prompt, options, {
|
|
128
148
|
bedrockMethod: "invoke",
|
|
129
149
|
endpointHost,
|
|
@@ -204,7 +224,8 @@ class Bedrock extends base_js_1.LLM {
|
|
|
204
224
|
text,
|
|
205
225
|
generationInfo: {},
|
|
206
226
|
});
|
|
207
|
-
|
|
227
|
+
// eslint-disable-next-line no-void
|
|
228
|
+
void runManager?.handleLLMNewToken(text);
|
|
208
229
|
}
|
|
209
230
|
}
|
|
210
231
|
}
|
|
@@ -215,7 +236,8 @@ class Bedrock extends base_js_1.LLM {
|
|
|
215
236
|
text,
|
|
216
237
|
generationInfo: {},
|
|
217
238
|
});
|
|
218
|
-
|
|
239
|
+
// eslint-disable-next-line no-void
|
|
240
|
+
void runManager?.handleLLMNewToken(text);
|
|
219
241
|
}
|
|
220
242
|
}
|
|
221
243
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
package/dist/llms/bedrock.d.ts
CHANGED
|
@@ -23,6 +23,7 @@ export declare class Bedrock extends LLM implements BaseBedrockInput {
|
|
|
23
23
|
stopSequences?: string[];
|
|
24
24
|
modelKwargs?: Record<string, unknown>;
|
|
25
25
|
codec: EventStreamCodec;
|
|
26
|
+
streaming: boolean;
|
|
26
27
|
get lc_secrets(): {
|
|
27
28
|
[key: string]: string;
|
|
28
29
|
} | undefined;
|
|
@@ -39,7 +40,7 @@ export declare class Bedrock extends LLM implements BaseBedrockInput {
|
|
|
39
40
|
Example:
|
|
40
41
|
response = model.call("Tell me a joke.")
|
|
41
42
|
*/
|
|
42
|
-
_call(prompt: string, options: this["ParsedCallOptions"]): Promise<string>;
|
|
43
|
+
_call(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
|
|
43
44
|
_signedFetch(prompt: string, options: this["ParsedCallOptions"], fields: {
|
|
44
45
|
bedrockMethod: "invoke" | "invoke-with-response-stream";
|
|
45
46
|
endpointHost: string;
|
package/dist/llms/bedrock.js
CHANGED
|
@@ -89,6 +89,12 @@ export class Bedrock extends LLM {
|
|
|
89
89
|
writable: true,
|
|
90
90
|
value: new EventStreamCodec(toUtf8, fromUtf8)
|
|
91
91
|
});
|
|
92
|
+
Object.defineProperty(this, "streaming", {
|
|
93
|
+
enumerable: true,
|
|
94
|
+
configurable: true,
|
|
95
|
+
writable: true,
|
|
96
|
+
value: false
|
|
97
|
+
});
|
|
92
98
|
this.model = fields?.model ?? this.model;
|
|
93
99
|
const allowedModels = ["ai21", "anthropic", "amazon"];
|
|
94
100
|
if (!allowedModels.includes(this.model.split(".")[0])) {
|
|
@@ -106,6 +112,7 @@ export class Bedrock extends LLM {
|
|
|
106
112
|
this.endpointHost = fields?.endpointHost ?? fields?.endpointUrl;
|
|
107
113
|
this.stopSequences = fields?.stopSequences;
|
|
108
114
|
this.modelKwargs = fields?.modelKwargs;
|
|
115
|
+
this.streaming = fields?.streaming ?? this.streaming;
|
|
109
116
|
}
|
|
110
117
|
/** Call out to Bedrock service model.
|
|
111
118
|
Arguments:
|
|
@@ -117,10 +124,23 @@ export class Bedrock extends LLM {
|
|
|
117
124
|
Example:
|
|
118
125
|
response = model.call("Tell me a joke.")
|
|
119
126
|
*/
|
|
120
|
-
async _call(prompt, options) {
|
|
127
|
+
async _call(prompt, options, runManager) {
|
|
121
128
|
const service = "bedrock-runtime";
|
|
122
129
|
const endpointHost = this.endpointHost ?? `${service}.${this.region}.amazonaws.com`;
|
|
123
130
|
const provider = this.model.split(".")[0];
|
|
131
|
+
if (this.streaming) {
|
|
132
|
+
const stream = this._streamResponseChunks(prompt, options, runManager);
|
|
133
|
+
let finalResult;
|
|
134
|
+
for await (const chunk of stream) {
|
|
135
|
+
if (finalResult === undefined) {
|
|
136
|
+
finalResult = chunk;
|
|
137
|
+
}
|
|
138
|
+
else {
|
|
139
|
+
finalResult = finalResult.concat(chunk);
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
return finalResult?.text ?? "";
|
|
143
|
+
}
|
|
124
144
|
const response = await this._signedFetch(prompt, options, {
|
|
125
145
|
bedrockMethod: "invoke",
|
|
126
146
|
endpointHost,
|
|
@@ -201,7 +221,8 @@ export class Bedrock extends LLM {
|
|
|
201
221
|
text,
|
|
202
222
|
generationInfo: {},
|
|
203
223
|
});
|
|
204
|
-
|
|
224
|
+
// eslint-disable-next-line no-void
|
|
225
|
+
void runManager?.handleLLMNewToken(text);
|
|
205
226
|
}
|
|
206
227
|
}
|
|
207
228
|
}
|
|
@@ -212,7 +233,8 @@ export class Bedrock extends LLM {
|
|
|
212
233
|
text,
|
|
213
234
|
generationInfo: {},
|
|
214
235
|
});
|
|
215
|
-
|
|
236
|
+
// eslint-disable-next-line no-void
|
|
237
|
+
void runManager?.handleLLMNewToken(text);
|
|
216
238
|
}
|
|
217
239
|
}
|
|
218
240
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|