langchain 0.0.146 → 0.0.148
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/chat_models/googlevertexai/web.cjs +1 -0
- package/chat_models/googlevertexai/web.d.ts +1 -0
- package/chat_models/googlevertexai/web.js +1 -0
- package/chat_models/googlevertexai.cjs +1 -1
- package/chat_models/googlevertexai.d.ts +1 -1
- package/chat_models/googlevertexai.js +1 -1
- package/dist/base_language/index.cjs +2 -2
- package/dist/base_language/index.d.ts +2 -1
- package/dist/base_language/index.js +1 -1
- package/dist/chains/base.d.ts +1 -1
- package/dist/chains/constitutional_ai/constitutional_principle.cjs +272 -1
- package/dist/chains/constitutional_ai/constitutional_principle.js +272 -1
- package/dist/chains/openai_functions/openapi.cjs +32 -27
- package/dist/chains/openai_functions/openapi.d.ts +9 -0
- package/dist/chains/openai_functions/openapi.js +31 -27
- package/dist/chat_models/base.d.ts +1 -1
- package/dist/chat_models/{googlevertexai.cjs → googlevertexai/common.cjs} +14 -26
- package/dist/chat_models/{googlevertexai.d.ts → googlevertexai/common.d.ts} +13 -22
- package/dist/chat_models/{googlevertexai.js → googlevertexai/common.js} +12 -24
- package/dist/chat_models/googlevertexai/index.cjs +36 -0
- package/dist/chat_models/googlevertexai/index.d.ts +21 -0
- package/dist/chat_models/googlevertexai/index.js +31 -0
- package/dist/chat_models/googlevertexai/web.cjs +33 -0
- package/dist/chat_models/googlevertexai/web.d.ts +19 -0
- package/dist/chat_models/googlevertexai/web.js +28 -0
- package/dist/chat_models/openai.cjs +1 -1
- package/dist/chat_models/openai.js +1 -1
- package/dist/document_loaders/web/notionapi.cjs +93 -70
- package/dist/document_loaders/web/notionapi.d.ts +33 -1
- package/dist/document_loaders/web/notionapi.js +89 -71
- package/dist/embeddings/googlevertexai.cjs +5 -1
- package/dist/embeddings/googlevertexai.d.ts +2 -1
- package/dist/embeddings/googlevertexai.js +5 -1
- package/dist/evaluation/agents/index.cjs +17 -0
- package/dist/evaluation/agents/index.d.ts +1 -0
- package/dist/evaluation/agents/index.js +1 -0
- package/dist/evaluation/agents/prompt.cjs +132 -0
- package/dist/evaluation/agents/prompt.d.ts +6 -0
- package/dist/evaluation/agents/prompt.js +129 -0
- package/dist/evaluation/agents/trajectory.cjs +189 -0
- package/dist/evaluation/agents/trajectory.d.ts +54 -0
- package/dist/evaluation/agents/trajectory.js +184 -0
- package/dist/evaluation/base.cjs +274 -0
- package/dist/evaluation/base.d.ts +232 -0
- package/dist/evaluation/base.js +263 -0
- package/dist/evaluation/comparison/index.cjs +17 -0
- package/dist/evaluation/comparison/index.d.ts +1 -0
- package/dist/evaluation/comparison/index.js +1 -0
- package/dist/evaluation/comparison/pairwise.cjs +244 -0
- package/dist/evaluation/comparison/pairwise.d.ts +50 -0
- package/dist/evaluation/comparison/pairwise.js +238 -0
- package/dist/evaluation/comparison/prompt.cjs +74 -0
- package/dist/evaluation/comparison/prompt.d.ts +21 -0
- package/dist/evaluation/comparison/prompt.js +71 -0
- package/dist/evaluation/criteria/criteria.cjs +259 -0
- package/dist/evaluation/criteria/criteria.d.ts +73 -0
- package/dist/evaluation/criteria/criteria.js +253 -0
- package/dist/evaluation/criteria/index.cjs +17 -0
- package/dist/evaluation/criteria/index.d.ts +1 -0
- package/dist/evaluation/criteria/index.js +1 -0
- package/dist/evaluation/criteria/prompt.cjs +36 -0
- package/dist/evaluation/criteria/prompt.d.ts +12 -0
- package/dist/evaluation/criteria/prompt.js +33 -0
- package/dist/evaluation/embedding_distance/base.cjs +163 -0
- package/dist/evaluation/embedding_distance/base.d.ts +78 -0
- package/dist/evaluation/embedding_distance/base.js +156 -0
- package/dist/evaluation/embedding_distance/index.cjs +17 -0
- package/dist/evaluation/embedding_distance/index.d.ts +1 -0
- package/dist/evaluation/embedding_distance/index.js +1 -0
- package/dist/evaluation/index.cjs +6 -0
- package/dist/evaluation/index.d.ts +6 -0
- package/dist/evaluation/index.js +6 -0
- package/dist/evaluation/loader.cjs +60 -0
- package/dist/evaluation/loader.d.ts +27 -0
- package/dist/evaluation/loader.js +56 -0
- package/dist/evaluation/types.cjs +2 -0
- package/dist/evaluation/types.d.ts +35 -0
- package/dist/evaluation/types.js +1 -0
- package/dist/experimental/llms/bittensor.cjs +141 -0
- package/dist/experimental/llms/bittensor.d.ts +33 -0
- package/dist/experimental/llms/bittensor.js +137 -0
- package/dist/experimental/multimodal_embeddings/googlevertexai.cjs +5 -1
- package/dist/experimental/multimodal_embeddings/googlevertexai.d.ts +2 -1
- package/dist/experimental/multimodal_embeddings/googlevertexai.js +5 -1
- package/dist/hub.d.ts +1 -1
- package/dist/llms/base.d.ts +1 -1
- package/dist/llms/{googlevertexai.js → googlevertexai/common.cjs} +21 -17
- package/dist/llms/{googlevertexai.d.ts → googlevertexai/common.d.ts} +13 -23
- package/dist/llms/{googlevertexai.cjs → googlevertexai/common.js} +17 -21
- package/dist/llms/googlevertexai/index.cjs +34 -0
- package/dist/llms/googlevertexai/index.d.ts +26 -0
- package/dist/llms/googlevertexai/index.js +30 -0
- package/dist/llms/googlevertexai/web.cjs +31 -0
- package/dist/llms/googlevertexai/web.d.ts +24 -0
- package/dist/llms/googlevertexai/web.js +27 -0
- package/dist/llms/openai-chat.cjs +1 -1
- package/dist/llms/openai-chat.js +1 -1
- package/dist/llms/openai.cjs +1 -1
- package/dist/llms/openai.js +1 -1
- package/dist/load/import_constants.cjs +3 -0
- package/dist/load/import_constants.js +3 -0
- package/dist/load/import_map.cjs +3 -2
- package/dist/load/import_map.d.ts +2 -1
- package/dist/load/import_map.js +2 -1
- package/dist/load/index.cjs +2 -1
- package/dist/load/index.js +2 -1
- package/dist/load/serializable.cjs +23 -4
- package/dist/load/serializable.js +23 -4
- package/dist/prompts/base.cjs +2 -2
- package/dist/prompts/base.d.ts +1 -1
- package/dist/prompts/base.js +1 -1
- package/dist/prompts/chat.cjs +2 -2
- package/dist/prompts/chat.d.ts +1 -1
- package/dist/prompts/chat.js +1 -1
- package/dist/retrievers/multi_query.cjs +140 -0
- package/dist/retrievers/multi_query.d.ts +33 -0
- package/dist/retrievers/multi_query.js +136 -0
- package/dist/schema/document.cjs +2 -2
- package/dist/schema/document.d.ts +1 -1
- package/dist/schema/document.js +1 -1
- package/dist/schema/output_parser.cjs +2 -2
- package/dist/schema/output_parser.d.ts +2 -1
- package/dist/schema/output_parser.js +1 -1
- package/dist/schema/retriever.cjs +2 -2
- package/dist/schema/retriever.d.ts +2 -1
- package/dist/schema/retriever.js +1 -1
- package/dist/schema/runnable/config.cjs +8 -0
- package/dist/schema/runnable/config.d.ts +3 -0
- package/dist/schema/runnable/config.js +4 -0
- package/dist/schema/{runnable.cjs → runnable/index.cjs} +290 -101
- package/dist/schema/{runnable.d.ts → runnable/index.d.ts} +127 -41
- package/dist/schema/{runnable.js → runnable/index.js} +284 -99
- package/dist/tools/base.d.ts +1 -1
- package/dist/types/googlevertexai-types.d.ts +11 -4
- package/dist/util/async_caller.cjs +35 -25
- package/dist/util/async_caller.d.ts +8 -0
- package/dist/util/async_caller.js +35 -25
- package/dist/util/googlevertexai-connection.cjs +14 -15
- package/dist/util/googlevertexai-connection.d.ts +7 -7
- package/dist/util/googlevertexai-connection.js +14 -15
- package/dist/util/googlevertexai-webauth.cjs +56 -0
- package/dist/util/googlevertexai-webauth.d.ts +25 -0
- package/dist/util/googlevertexai-webauth.js +52 -0
- package/dist/vectorstores/googlevertexai.cjs +9 -8
- package/dist/vectorstores/googlevertexai.d.ts +8 -7
- package/dist/vectorstores/googlevertexai.js +9 -8
- package/dist/vectorstores/pinecone.cjs +30 -22
- package/dist/vectorstores/pinecone.d.ts +3 -1
- package/dist/vectorstores/pinecone.js +30 -22
- package/dist/vectorstores/vectara.cjs +20 -23
- package/dist/vectorstores/vectara.d.ts +9 -2
- package/dist/vectorstores/vectara.js +20 -23
- package/experimental/llms/bittensor.cjs +1 -0
- package/experimental/llms/bittensor.d.ts +1 -0
- package/experimental/llms/bittensor.js +1 -0
- package/llms/googlevertexai/web.cjs +1 -0
- package/llms/googlevertexai/web.d.ts +1 -0
- package/llms/googlevertexai/web.js +1 -0
- package/llms/googlevertexai.cjs +1 -1
- package/llms/googlevertexai.d.ts +1 -1
- package/llms/googlevertexai.js +1 -1
- package/package.json +40 -3
- package/retrievers/multi_query.cjs +1 -0
- package/retrievers/multi_query.d.ts +1 -0
- package/retrievers/multi_query.js +1 -0
- package/schema/runnable.cjs +1 -1
- package/schema/runnable.d.ts +1 -1
- package/schema/runnable.js +1 -1
|
@@ -4,19 +4,11 @@ import { getBlockChildren } from "notion-to-md/build/utils/notion.js";
|
|
|
4
4
|
import { Document } from "../../document.js";
|
|
5
5
|
import { BaseDocumentLoader } from "../base.js";
|
|
6
6
|
import { AsyncCaller } from "../../util/async_caller.js";
|
|
7
|
-
const isPageResponse = (res) => !isNotionClientError(res) && res.object === "page";
|
|
8
|
-
const isDatabaseResponse = (res) => !isNotionClientError(res) && res.object === "database";
|
|
9
|
-
const isErrorResponse = (res) => isNotionClientError(res);
|
|
10
|
-
const isPage = (res) => isPageResponse(res) && isFullPage(res);
|
|
11
|
-
const isDatabase = (res) => isDatabaseResponse(res) && isFullDatabase(res);
|
|
12
|
-
const getTitle = (obj) => {
|
|
13
|
-
if (isPage(obj) && obj.properties.title.type === "title") {
|
|
14
|
-
return obj.properties.title.title[0]?.plain_text;
|
|
15
|
-
}
|
|
16
|
-
if (isDatabase(obj))
|
|
17
|
-
return obj.title[0]?.plain_text;
|
|
18
|
-
return null;
|
|
19
|
-
};
|
|
7
|
+
export const isPageResponse = (res) => !isNotionClientError(res) && res.object === "page";
|
|
8
|
+
export const isDatabaseResponse = (res) => !isNotionClientError(res) && res.object === "database";
|
|
9
|
+
export const isErrorResponse = (res) => isNotionClientError(res);
|
|
10
|
+
export const isPage = (res) => isPageResponse(res) && isFullPage(res);
|
|
11
|
+
export const isDatabase = (res) => isDatabaseResponse(res) && isFullDatabase(res);
|
|
20
12
|
/**
|
|
21
13
|
* A class that extends the BaseDocumentLoader class. It represents a
|
|
22
14
|
* document loader for loading documents from Notion using the Notion API.
|
|
@@ -104,11 +96,85 @@ export class NotionAPILoader extends BaseDocumentLoader {
|
|
|
104
96
|
this.rootTitle = "";
|
|
105
97
|
this.onDocumentLoaded = options.onDocumentLoaded ?? ((_ti, _cu) => { });
|
|
106
98
|
}
|
|
99
|
+
/**
|
|
100
|
+
* Adds a selection of page ids to the pageQueue and removes duplicates.
|
|
101
|
+
* @param items An array of string ids
|
|
102
|
+
*/
|
|
107
103
|
addToQueue(...items) {
|
|
108
104
|
const deDuped = items.filter((item) => !this.pageCompleted.concat(this.pageQueue).includes(item));
|
|
109
105
|
this.pageQueue.push(...deDuped);
|
|
110
106
|
this.pageQueueTotal += deDuped.length;
|
|
111
107
|
}
|
|
108
|
+
/**
|
|
109
|
+
* Parses a Notion GetResponse object (page or database) and returns a string of the title.
|
|
110
|
+
* @param obj The Notion GetResponse object to parse.
|
|
111
|
+
* @returns The string of the title.
|
|
112
|
+
*/
|
|
113
|
+
getTitle(obj) {
|
|
114
|
+
if (isPage(obj) && obj.properties.title.type === "title") {
|
|
115
|
+
return obj.properties.title.title[0]?.plain_text;
|
|
116
|
+
}
|
|
117
|
+
if (isDatabase(obj))
|
|
118
|
+
return obj.title[0]?.plain_text;
|
|
119
|
+
return null;
|
|
120
|
+
}
|
|
121
|
+
/**
|
|
122
|
+
* Parses the property type and returns a string
|
|
123
|
+
* @param page The Notion page property to parse.
|
|
124
|
+
* @returns A string of parsed property.
|
|
125
|
+
*/
|
|
126
|
+
getPropValue(prop) {
|
|
127
|
+
switch (prop.type) {
|
|
128
|
+
case "number": {
|
|
129
|
+
const propNumber = prop[prop.type];
|
|
130
|
+
return propNumber !== null ? propNumber.toString() : "";
|
|
131
|
+
}
|
|
132
|
+
case "url":
|
|
133
|
+
return prop[prop.type] || "";
|
|
134
|
+
case "select":
|
|
135
|
+
return prop[prop.type]?.name ?? "";
|
|
136
|
+
case "multi_select":
|
|
137
|
+
return `[${prop[prop.type].map((v) => `"${v.name}"`).join(", ")}]`;
|
|
138
|
+
case "status":
|
|
139
|
+
return prop[prop.type]?.name ?? "";
|
|
140
|
+
case "date":
|
|
141
|
+
return `${prop[prop.type]?.start ?? ""}${prop[prop.type]?.end ? ` - ${prop[prop.type]?.end}` : ""}`;
|
|
142
|
+
case "email":
|
|
143
|
+
return prop[prop.type] || "";
|
|
144
|
+
case "phone_number":
|
|
145
|
+
return prop[prop.type] || "";
|
|
146
|
+
case "checkbox":
|
|
147
|
+
return prop[prop.type].toString();
|
|
148
|
+
case "files":
|
|
149
|
+
return `[${prop[prop.type].map((v) => `"${v.name}"`).join(", ")}]`;
|
|
150
|
+
case "created_by":
|
|
151
|
+
return `["${prop[prop.type].object}", "${prop[prop.type].id}"]`;
|
|
152
|
+
case "created_time":
|
|
153
|
+
return prop[prop.type];
|
|
154
|
+
case "last_edited_by":
|
|
155
|
+
return `["${prop[prop.type].object}", "${prop[prop.type].id}"]`;
|
|
156
|
+
case "last_edited_time":
|
|
157
|
+
return prop[prop.type];
|
|
158
|
+
case "title":
|
|
159
|
+
return prop[prop.type]
|
|
160
|
+
.map((v) => this.n2mClient.annotatePlainText(v.plain_text, v.annotations))
|
|
161
|
+
.join("");
|
|
162
|
+
case "rich_text":
|
|
163
|
+
return prop[prop.type]
|
|
164
|
+
.map((v) => this.n2mClient.annotatePlainText(v.plain_text, v.annotations))
|
|
165
|
+
.join("");
|
|
166
|
+
case "people":
|
|
167
|
+
return `[${prop[prop.type]
|
|
168
|
+
.map((v) => `["${v.object}", "${v.id}"]`)
|
|
169
|
+
.join(", ")}]`;
|
|
170
|
+
case "unique_id":
|
|
171
|
+
return `${prop[prop.type].prefix || ""}${prop[prop.type].number}`;
|
|
172
|
+
case "relation":
|
|
173
|
+
return `[${prop[prop.type].map((v) => `"${v.id}"`).join(", ")}]`;
|
|
174
|
+
default:
|
|
175
|
+
return `Unsupported type: ${prop.type}`;
|
|
176
|
+
}
|
|
177
|
+
}
|
|
112
178
|
/**
|
|
113
179
|
* Parses the properties of a Notion page and returns them as key-value
|
|
114
180
|
* pairs.
|
|
@@ -116,60 +182,11 @@ export class NotionAPILoader extends BaseDocumentLoader {
|
|
|
116
182
|
* @returns An object containing the parsed properties as key-value pairs.
|
|
117
183
|
*/
|
|
118
184
|
parsePageProperties(page) {
|
|
119
|
-
return Object.
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
return [prop.type, prop[prop.type]];
|
|
125
|
-
case "select":
|
|
126
|
-
return [prop.type, prop[prop.type]?.name ?? ""];
|
|
127
|
-
case "multi_select":
|
|
128
|
-
return [
|
|
129
|
-
prop.type,
|
|
130
|
-
prop[prop.type].map((select) => select.name).join(", "),
|
|
131
|
-
];
|
|
132
|
-
case "status":
|
|
133
|
-
return [prop.type, prop[prop.type]?.name ?? ""];
|
|
134
|
-
case "date":
|
|
135
|
-
return [
|
|
136
|
-
prop.type,
|
|
137
|
-
`${prop[prop.type]?.start ?? ""}${prop[prop.type]?.end ? `- ${prop[prop.type]?.end}` : ""}`,
|
|
138
|
-
];
|
|
139
|
-
case "email":
|
|
140
|
-
return [prop.type, prop[prop.type]];
|
|
141
|
-
case "phone_number":
|
|
142
|
-
return [prop.type, prop[prop.type]];
|
|
143
|
-
case "checkbox":
|
|
144
|
-
return [prop.type, prop[prop.type].toString()];
|
|
145
|
-
// case "files":
|
|
146
|
-
case "created_by":
|
|
147
|
-
return [prop.type, prop[prop.type]];
|
|
148
|
-
case "created_time":
|
|
149
|
-
return [prop.type, prop[prop.type]];
|
|
150
|
-
case "last_edited_by":
|
|
151
|
-
return [prop.type, prop[prop.type]];
|
|
152
|
-
case "last_edited_time":
|
|
153
|
-
return [prop.type, prop[prop.type]];
|
|
154
|
-
// case "formula":
|
|
155
|
-
case "title":
|
|
156
|
-
return [
|
|
157
|
-
prop.type,
|
|
158
|
-
prop[prop.type].map((v) => v.plain_text).join(""),
|
|
159
|
-
];
|
|
160
|
-
case "rich_text":
|
|
161
|
-
return [
|
|
162
|
-
prop.type,
|
|
163
|
-
prop[prop.type].map((v) => v.plain_text).join(""),
|
|
164
|
-
];
|
|
165
|
-
case "people":
|
|
166
|
-
return [prop.type, prop[prop.type]];
|
|
167
|
-
// case "relation":
|
|
168
|
-
// case "rollup":
|
|
169
|
-
default:
|
|
170
|
-
return [prop.type, "Unsupported type"];
|
|
171
|
-
}
|
|
172
|
-
}));
|
|
185
|
+
return Object.entries(page.properties).reduce((accum, [propName, prop]) => {
|
|
186
|
+
const value = this.getPropValue(prop);
|
|
187
|
+
const props = { ...accum, [propName]: value };
|
|
188
|
+
return prop.type === "title" ? { ...props, _title: value } : props;
|
|
189
|
+
}, {});
|
|
173
190
|
}
|
|
174
191
|
/**
|
|
175
192
|
* Parses the details of a Notion page and returns them as an object.
|
|
@@ -177,10 +194,10 @@ export class NotionAPILoader extends BaseDocumentLoader {
|
|
|
177
194
|
* @returns An object containing the parsed details of the page.
|
|
178
195
|
*/
|
|
179
196
|
parsePageDetails(page) {
|
|
180
|
-
const
|
|
197
|
+
const { id, ...rest } = page;
|
|
181
198
|
return {
|
|
182
|
-
...
|
|
183
|
-
notionId:
|
|
199
|
+
...rest,
|
|
200
|
+
notionId: id,
|
|
184
201
|
properties: this.parsePageProperties(page),
|
|
185
202
|
};
|
|
186
203
|
}
|
|
@@ -313,7 +330,8 @@ export class NotionAPILoader extends BaseDocumentLoader {
|
|
|
313
330
|
}
|
|
314
331
|
throw new AggregateError(errors);
|
|
315
332
|
}
|
|
316
|
-
this.rootTitle =
|
|
333
|
+
this.rootTitle =
|
|
334
|
+
this.getTitle(resPage) || this.getTitle(resDatabase) || this.id;
|
|
317
335
|
let pageId = this.pageQueue.shift();
|
|
318
336
|
while (pageId) {
|
|
319
337
|
await this.loadPage(pageId);
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.GoogleVertexAIEmbeddings = void 0;
|
|
4
|
+
const google_auth_library_1 = require("google-auth-library");
|
|
4
5
|
const base_js_1 = require("./base.cjs");
|
|
5
6
|
const googlevertexai_connection_js_1 = require("../util/googlevertexai-connection.cjs");
|
|
6
7
|
const chunk_js_1 = require("../util/chunk.cjs");
|
|
@@ -34,7 +35,10 @@ class GoogleVertexAIEmbeddings extends base_js_1.Embeddings {
|
|
|
34
35
|
value: void 0
|
|
35
36
|
});
|
|
36
37
|
this.model = fields?.model ?? this.model;
|
|
37
|
-
this.connection = new googlevertexai_connection_js_1.GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller
|
|
38
|
+
this.connection = new googlevertexai_connection_js_1.GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, new google_auth_library_1.GoogleAuth({
|
|
39
|
+
scopes: "https://www.googleapis.com/auth/cloud-platform",
|
|
40
|
+
...fields?.authOptions,
|
|
41
|
+
}));
|
|
38
42
|
}
|
|
39
43
|
/**
|
|
40
44
|
* Takes an array of documents as input and returns a promise that
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import { GoogleAuthOptions } from "google-auth-library";
|
|
1
2
|
import { Embeddings, EmbeddingsParams } from "./base.js";
|
|
2
3
|
import { GoogleVertexAIBaseLLMInput } from "../types/googlevertexai-types.js";
|
|
3
4
|
/**
|
|
@@ -5,7 +6,7 @@ import { GoogleVertexAIBaseLLMInput } from "../types/googlevertexai-types.js";
|
|
|
5
6
|
* GoogleVertexAIEmbeddings instance. It extends EmbeddingsParams and
|
|
6
7
|
* GoogleVertexAIConnectionParams.
|
|
7
8
|
*/
|
|
8
|
-
export interface GoogleVertexAIEmbeddingsParams extends EmbeddingsParams, GoogleVertexAIBaseLLMInput {
|
|
9
|
+
export interface GoogleVertexAIEmbeddingsParams extends EmbeddingsParams, GoogleVertexAIBaseLLMInput<GoogleAuthOptions> {
|
|
9
10
|
}
|
|
10
11
|
/**
|
|
11
12
|
* Enables calls to the Google Cloud's Vertex AI API to access
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import { GoogleAuth } from "google-auth-library";
|
|
1
2
|
import { Embeddings } from "./base.js";
|
|
2
3
|
import { GoogleVertexAILLMConnection } from "../util/googlevertexai-connection.js";
|
|
3
4
|
import { chunkArray } from "../util/chunk.js";
|
|
@@ -31,7 +32,10 @@ export class GoogleVertexAIEmbeddings extends Embeddings {
|
|
|
31
32
|
value: void 0
|
|
32
33
|
});
|
|
33
34
|
this.model = fields?.model ?? this.model;
|
|
34
|
-
this.connection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller
|
|
35
|
+
this.connection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, new GoogleAuth({
|
|
36
|
+
scopes: "https://www.googleapis.com/auth/cloud-platform",
|
|
37
|
+
...fields?.authOptions,
|
|
38
|
+
}));
|
|
35
39
|
}
|
|
36
40
|
/**
|
|
37
41
|
* Takes an array of documents as input and returns a promise that
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
|
+
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
|
+
};
|
|
16
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
+
__exportStar(require("./trajectory.cjs"), exports);
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from "./trajectory.js";
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from "./trajectory.js";
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Prompt for trajectory evaluation chain.
|
|
4
|
+
*/
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.TOOL_FREE_EVAL_CHAT_PROMPT = exports.EVAL_CHAT_PROMPT = void 0;
|
|
7
|
+
const index_js_1 = require("../../prompts/index.cjs");
|
|
8
|
+
const EVAL_TEMPLATE = `An AI language model has been given access to the following set of tools to help answer a user's question.
|
|
9
|
+
|
|
10
|
+
The tools given to the AI model are:
|
|
11
|
+
[TOOL_DESCRIPTIONS]
|
|
12
|
+
{toolDescriptions}
|
|
13
|
+
[END_TOOL_DESCRIPTIONS]
|
|
14
|
+
|
|
15
|
+
The question the human asked the AI model was:
|
|
16
|
+
[QUESTION]
|
|
17
|
+
{question}
|
|
18
|
+
[END_QUESTION]{reference}
|
|
19
|
+
|
|
20
|
+
The AI language model decided to use the following set of tools to answer the question:
|
|
21
|
+
[AGENT_TRAJECTORY]
|
|
22
|
+
{agentTrajectory}
|
|
23
|
+
[END_AGENT_TRAJECTORY]
|
|
24
|
+
|
|
25
|
+
The AI language model's final answer to the question was:
|
|
26
|
+
[RESPONSE]
|
|
27
|
+
{answer}
|
|
28
|
+
[END_RESPONSE]
|
|
29
|
+
|
|
30
|
+
Let's do a detailed evaluation of the AI language model's answer step by step.
|
|
31
|
+
|
|
32
|
+
We consider the following criteria before giving a score from 1 to 5:
|
|
33
|
+
|
|
34
|
+
i. Is the final answer helpful?
|
|
35
|
+
ii. Does the AI language use a logical sequence of tools to answer the question?
|
|
36
|
+
iii. Does the AI language model use the tools in a helpful way?
|
|
37
|
+
iv. Does the AI language model use too many steps to answer the question?
|
|
38
|
+
v. Are the appropriate tools used to answer the question?`;
|
|
39
|
+
const EXAMPLE_INPUT = `An AI language model has been given access to the following set of tools to help answer a user's question.
|
|
40
|
+
|
|
41
|
+
The tools given to the AI model are:
|
|
42
|
+
[TOOL_DESCRIPTIONS]
|
|
43
|
+
Tool 1:
|
|
44
|
+
Name: Search
|
|
45
|
+
Description: useful for when you need to ask with search
|
|
46
|
+
|
|
47
|
+
Tool 2:
|
|
48
|
+
Name: Lookup
|
|
49
|
+
Description: useful for when you need to ask with lookup
|
|
50
|
+
|
|
51
|
+
Tool 3:
|
|
52
|
+
Name: Calculator
|
|
53
|
+
Description: useful for doing calculations
|
|
54
|
+
|
|
55
|
+
Tool 4:
|
|
56
|
+
Name: Search the Web (SerpAPI)
|
|
57
|
+
Description: useful for when you need to answer questions about current events
|
|
58
|
+
[END_TOOL_DESCRIPTIONS]
|
|
59
|
+
|
|
60
|
+
The question the human asked the AI model was: If laid the Statue of Liberty end to end, how many times would it stretch across the United States?
|
|
61
|
+
|
|
62
|
+
The AI language model decided to use the following set of tools to answer the question:
|
|
63
|
+
[AGENT_TRAJECTORY]
|
|
64
|
+
Step 1:
|
|
65
|
+
Tool used: Search the Web (SerpAPI)
|
|
66
|
+
Tool input: If laid the Statue of Liberty end to end, how many times would it stretch across the United States?
|
|
67
|
+
Tool output: The Statue of Liberty was given to the United States by France, as a symbol of the two countries' friendship. It was erected atop an American-designed ...
|
|
68
|
+
[END_AGENT_TRAJECTORY]
|
|
69
|
+
|
|
70
|
+
[RESPONSE]
|
|
71
|
+
The AI language model's final answer to the question was: There are different ways to measure the length of the United States, but if we use the distance between the Statue of Liberty and the westernmost point of the contiguous United States (Cape Alava, Washington), which is approximately 2,857 miles (4,596 km), and assume that the Statue of Liberty is 305 feet (93 meters) tall, then the statue would stretch across the United States approximately 17.5 times if laid end to end.
|
|
72
|
+
[END_RESPONSE]
|
|
73
|
+
|
|
74
|
+
Let's do a detailed evaluation of the AI language model's answer step by step.
|
|
75
|
+
|
|
76
|
+
We consider the following criteria before giving a score from 1 to 5:
|
|
77
|
+
|
|
78
|
+
i. Is the final answer helpful?
|
|
79
|
+
ii. Does the AI language use a logical sequence of tools to answer the question?
|
|
80
|
+
iii. Does the AI language model use the tools in a helpful way?
|
|
81
|
+
iv. Does the AI language model use too many steps to answer the question?
|
|
82
|
+
v. Are the appropriate tools used to answer the question?`;
|
|
83
|
+
const EXAMPLE_OUTPUT = `First, let's evaluate the final answer. The final uses good reasoning but is wrong. 2,857 divided by 305 is not 17.5.\
|
|
84
|
+
The model should have used the calculator to figure this out. Second does the model use a logical sequence of tools to answer the question?\
|
|
85
|
+
The way model uses the search is not helpful. The model should have used the search tool to figure the width of the US or the height of the statue.\
|
|
86
|
+
The model didn't use the calculator tool and gave an incorrect answer. The search API should be used for current events or specific questions.\
|
|
87
|
+
The tools were not used in a helpful way. The model did not use too many steps to answer the question.\
|
|
88
|
+
The model did not use the appropriate tools to answer the question.\
|
|
89
|
+
|
|
90
|
+
Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2.
|
|
91
|
+
|
|
92
|
+
Score: 2`;
|
|
93
|
+
exports.EVAL_CHAT_PROMPT =
|
|
94
|
+
/* #__PURE__ */ index_js_1.ChatPromptTemplate.fromPromptMessages([
|
|
95
|
+
/* #__PURE__ */ index_js_1.SystemMessagePromptTemplate.fromTemplate("You are a helpful assistant that evaluates language models."),
|
|
96
|
+
/* #__PURE__ */ index_js_1.HumanMessagePromptTemplate.fromTemplate(EXAMPLE_INPUT),
|
|
97
|
+
/* #__PURE__ */ index_js_1.AIMessagePromptTemplate.fromTemplate(EXAMPLE_OUTPUT),
|
|
98
|
+
/* #__PURE__ */ index_js_1.HumanMessagePromptTemplate.fromTemplate(EVAL_TEMPLATE),
|
|
99
|
+
]);
|
|
100
|
+
const TOOL_FREE_EVAL_TEMPLATE = `An AI language model has been given access to a set of tools to help answer a user's question.
|
|
101
|
+
|
|
102
|
+
The question the human asked the AI model was:
|
|
103
|
+
[QUESTION]
|
|
104
|
+
{question}
|
|
105
|
+
[END_QUESTION]{reference}
|
|
106
|
+
|
|
107
|
+
The AI language model decided to use the following set of tools to answer the question:
|
|
108
|
+
[AGENT_TRAJECTORY]
|
|
109
|
+
{agentTrajectory}
|
|
110
|
+
[END_AGENT_TRAJECTORY]
|
|
111
|
+
|
|
112
|
+
The AI language model's final answer to the question was:
|
|
113
|
+
[RESPONSE]
|
|
114
|
+
{answer}
|
|
115
|
+
[END_RESPONSE]
|
|
116
|
+
|
|
117
|
+
Let's do a detailed evaluation of the AI language model's answer step by step.
|
|
118
|
+
|
|
119
|
+
We consider the following criteria before giving a score from 1 to 5:
|
|
120
|
+
|
|
121
|
+
i. Is the final answer helpful?
|
|
122
|
+
ii. Does the AI language use a logical sequence of tools to answer the question?
|
|
123
|
+
iii. Does the AI language model use the tools in a helpful way?
|
|
124
|
+
iv. Does the AI language model use too many steps to answer the question?
|
|
125
|
+
v. Are the appropriate tools used to answer the question?`;
|
|
126
|
+
exports.TOOL_FREE_EVAL_CHAT_PROMPT =
|
|
127
|
+
/* #__PURE__ */ index_js_1.ChatPromptTemplate.fromPromptMessages([
|
|
128
|
+
/* #__PURE__ */ index_js_1.SystemMessagePromptTemplate.fromTemplate("You are a helpful assistant that evaluates language models."),
|
|
129
|
+
/* #__PURE__ */ index_js_1.HumanMessagePromptTemplate.fromTemplate(EXAMPLE_INPUT),
|
|
130
|
+
/* #__PURE__ */ index_js_1.AIMessagePromptTemplate.fromTemplate(EXAMPLE_OUTPUT),
|
|
131
|
+
/* #__PURE__ */ index_js_1.HumanMessagePromptTemplate.fromTemplate(TOOL_FREE_EVAL_TEMPLATE),
|
|
132
|
+
]);
|
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Prompt for trajectory evaluation chain.
|
|
3
|
+
*/
|
|
4
|
+
import { AIMessagePromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, } from "../../prompts/index.js";
|
|
5
|
+
const EVAL_TEMPLATE = `An AI language model has been given access to the following set of tools to help answer a user's question.
|
|
6
|
+
|
|
7
|
+
The tools given to the AI model are:
|
|
8
|
+
[TOOL_DESCRIPTIONS]
|
|
9
|
+
{toolDescriptions}
|
|
10
|
+
[END_TOOL_DESCRIPTIONS]
|
|
11
|
+
|
|
12
|
+
The question the human asked the AI model was:
|
|
13
|
+
[QUESTION]
|
|
14
|
+
{question}
|
|
15
|
+
[END_QUESTION]{reference}
|
|
16
|
+
|
|
17
|
+
The AI language model decided to use the following set of tools to answer the question:
|
|
18
|
+
[AGENT_TRAJECTORY]
|
|
19
|
+
{agentTrajectory}
|
|
20
|
+
[END_AGENT_TRAJECTORY]
|
|
21
|
+
|
|
22
|
+
The AI language model's final answer to the question was:
|
|
23
|
+
[RESPONSE]
|
|
24
|
+
{answer}
|
|
25
|
+
[END_RESPONSE]
|
|
26
|
+
|
|
27
|
+
Let's do a detailed evaluation of the AI language model's answer step by step.
|
|
28
|
+
|
|
29
|
+
We consider the following criteria before giving a score from 1 to 5:
|
|
30
|
+
|
|
31
|
+
i. Is the final answer helpful?
|
|
32
|
+
ii. Does the AI language use a logical sequence of tools to answer the question?
|
|
33
|
+
iii. Does the AI language model use the tools in a helpful way?
|
|
34
|
+
iv. Does the AI language model use too many steps to answer the question?
|
|
35
|
+
v. Are the appropriate tools used to answer the question?`;
|
|
36
|
+
const EXAMPLE_INPUT = `An AI language model has been given access to the following set of tools to help answer a user's question.
|
|
37
|
+
|
|
38
|
+
The tools given to the AI model are:
|
|
39
|
+
[TOOL_DESCRIPTIONS]
|
|
40
|
+
Tool 1:
|
|
41
|
+
Name: Search
|
|
42
|
+
Description: useful for when you need to ask with search
|
|
43
|
+
|
|
44
|
+
Tool 2:
|
|
45
|
+
Name: Lookup
|
|
46
|
+
Description: useful for when you need to ask with lookup
|
|
47
|
+
|
|
48
|
+
Tool 3:
|
|
49
|
+
Name: Calculator
|
|
50
|
+
Description: useful for doing calculations
|
|
51
|
+
|
|
52
|
+
Tool 4:
|
|
53
|
+
Name: Search the Web (SerpAPI)
|
|
54
|
+
Description: useful for when you need to answer questions about current events
|
|
55
|
+
[END_TOOL_DESCRIPTIONS]
|
|
56
|
+
|
|
57
|
+
The question the human asked the AI model was: If laid the Statue of Liberty end to end, how many times would it stretch across the United States?
|
|
58
|
+
|
|
59
|
+
The AI language model decided to use the following set of tools to answer the question:
|
|
60
|
+
[AGENT_TRAJECTORY]
|
|
61
|
+
Step 1:
|
|
62
|
+
Tool used: Search the Web (SerpAPI)
|
|
63
|
+
Tool input: If laid the Statue of Liberty end to end, how many times would it stretch across the United States?
|
|
64
|
+
Tool output: The Statue of Liberty was given to the United States by France, as a symbol of the two countries' friendship. It was erected atop an American-designed ...
|
|
65
|
+
[END_AGENT_TRAJECTORY]
|
|
66
|
+
|
|
67
|
+
[RESPONSE]
|
|
68
|
+
The AI language model's final answer to the question was: There are different ways to measure the length of the United States, but if we use the distance between the Statue of Liberty and the westernmost point of the contiguous United States (Cape Alava, Washington), which is approximately 2,857 miles (4,596 km), and assume that the Statue of Liberty is 305 feet (93 meters) tall, then the statue would stretch across the United States approximately 17.5 times if laid end to end.
|
|
69
|
+
[END_RESPONSE]
|
|
70
|
+
|
|
71
|
+
Let's do a detailed evaluation of the AI language model's answer step by step.
|
|
72
|
+
|
|
73
|
+
We consider the following criteria before giving a score from 1 to 5:
|
|
74
|
+
|
|
75
|
+
i. Is the final answer helpful?
|
|
76
|
+
ii. Does the AI language use a logical sequence of tools to answer the question?
|
|
77
|
+
iii. Does the AI language model use the tools in a helpful way?
|
|
78
|
+
iv. Does the AI language model use too many steps to answer the question?
|
|
79
|
+
v. Are the appropriate tools used to answer the question?`;
|
|
80
|
+
const EXAMPLE_OUTPUT = `First, let's evaluate the final answer. The final uses good reasoning but is wrong. 2,857 divided by 305 is not 17.5.\
|
|
81
|
+
The model should have used the calculator to figure this out. Second does the model use a logical sequence of tools to answer the question?\
|
|
82
|
+
The way model uses the search is not helpful. The model should have used the search tool to figure the width of the US or the height of the statue.\
|
|
83
|
+
The model didn't use the calculator tool and gave an incorrect answer. The search API should be used for current events or specific questions.\
|
|
84
|
+
The tools were not used in a helpful way. The model did not use too many steps to answer the question.\
|
|
85
|
+
The model did not use the appropriate tools to answer the question.\
|
|
86
|
+
|
|
87
|
+
Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2.
|
|
88
|
+
|
|
89
|
+
Score: 2`;
|
|
90
|
+
export const EVAL_CHAT_PROMPT =
|
|
91
|
+
/* #__PURE__ */ ChatPromptTemplate.fromPromptMessages([
|
|
92
|
+
/* #__PURE__ */ SystemMessagePromptTemplate.fromTemplate("You are a helpful assistant that evaluates language models."),
|
|
93
|
+
/* #__PURE__ */ HumanMessagePromptTemplate.fromTemplate(EXAMPLE_INPUT),
|
|
94
|
+
/* #__PURE__ */ AIMessagePromptTemplate.fromTemplate(EXAMPLE_OUTPUT),
|
|
95
|
+
/* #__PURE__ */ HumanMessagePromptTemplate.fromTemplate(EVAL_TEMPLATE),
|
|
96
|
+
]);
|
|
97
|
+
const TOOL_FREE_EVAL_TEMPLATE = `An AI language model has been given access to a set of tools to help answer a user's question.
|
|
98
|
+
|
|
99
|
+
The question the human asked the AI model was:
|
|
100
|
+
[QUESTION]
|
|
101
|
+
{question}
|
|
102
|
+
[END_QUESTION]{reference}
|
|
103
|
+
|
|
104
|
+
The AI language model decided to use the following set of tools to answer the question:
|
|
105
|
+
[AGENT_TRAJECTORY]
|
|
106
|
+
{agentTrajectory}
|
|
107
|
+
[END_AGENT_TRAJECTORY]
|
|
108
|
+
|
|
109
|
+
The AI language model's final answer to the question was:
|
|
110
|
+
[RESPONSE]
|
|
111
|
+
{answer}
|
|
112
|
+
[END_RESPONSE]
|
|
113
|
+
|
|
114
|
+
Let's do a detailed evaluation of the AI language model's answer step by step.
|
|
115
|
+
|
|
116
|
+
We consider the following criteria before giving a score from 1 to 5:
|
|
117
|
+
|
|
118
|
+
i. Is the final answer helpful?
|
|
119
|
+
ii. Does the AI language use a logical sequence of tools to answer the question?
|
|
120
|
+
iii. Does the AI language model use the tools in a helpful way?
|
|
121
|
+
iv. Does the AI language model use too many steps to answer the question?
|
|
122
|
+
v. Are the appropriate tools used to answer the question?`;
|
|
123
|
+
export const TOOL_FREE_EVAL_CHAT_PROMPT =
|
|
124
|
+
/* #__PURE__ */ ChatPromptTemplate.fromPromptMessages([
|
|
125
|
+
/* #__PURE__ */ SystemMessagePromptTemplate.fromTemplate("You are a helpful assistant that evaluates language models."),
|
|
126
|
+
/* #__PURE__ */ HumanMessagePromptTemplate.fromTemplate(EXAMPLE_INPUT),
|
|
127
|
+
/* #__PURE__ */ AIMessagePromptTemplate.fromTemplate(EXAMPLE_OUTPUT),
|
|
128
|
+
/* #__PURE__ */ HumanMessagePromptTemplate.fromTemplate(TOOL_FREE_EVAL_TEMPLATE),
|
|
129
|
+
]);
|