langchain 0.0.147 → 0.0.149

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. package/chat_models/googlevertexai/web.cjs +1 -0
  2. package/chat_models/googlevertexai/web.d.ts +1 -0
  3. package/chat_models/googlevertexai/web.js +1 -0
  4. package/chat_models/googlevertexai.cjs +1 -1
  5. package/chat_models/googlevertexai.d.ts +1 -1
  6. package/chat_models/googlevertexai.js +1 -1
  7. package/dist/chains/constitutional_ai/constitutional_principle.cjs +272 -1
  8. package/dist/chains/constitutional_ai/constitutional_principle.js +272 -1
  9. package/dist/chains/question_answering/load.cjs +12 -4
  10. package/dist/chains/question_answering/load.d.ts +2 -0
  11. package/dist/chains/question_answering/load.js +12 -4
  12. package/dist/chains/summarization/load.cjs +8 -4
  13. package/dist/chains/summarization/load.d.ts +2 -0
  14. package/dist/chains/summarization/load.js +8 -4
  15. package/dist/chat_models/{googlevertexai.cjs → googlevertexai/common.cjs} +14 -26
  16. package/dist/chat_models/{googlevertexai.d.ts → googlevertexai/common.d.ts} +13 -22
  17. package/dist/chat_models/{googlevertexai.js → googlevertexai/common.js} +12 -24
  18. package/dist/chat_models/googlevertexai/index.cjs +36 -0
  19. package/dist/chat_models/googlevertexai/index.d.ts +21 -0
  20. package/dist/chat_models/googlevertexai/index.js +31 -0
  21. package/dist/chat_models/googlevertexai/web.cjs +33 -0
  22. package/dist/chat_models/googlevertexai/web.d.ts +19 -0
  23. package/dist/chat_models/googlevertexai/web.js +28 -0
  24. package/dist/document_loaders/web/notionapi.cjs +93 -70
  25. package/dist/document_loaders/web/notionapi.d.ts +33 -1
  26. package/dist/document_loaders/web/notionapi.js +89 -71
  27. package/dist/embeddings/googlevertexai.cjs +5 -1
  28. package/dist/embeddings/googlevertexai.d.ts +2 -1
  29. package/dist/embeddings/googlevertexai.js +5 -1
  30. package/dist/evaluation/agents/index.cjs +17 -0
  31. package/dist/evaluation/agents/index.d.ts +1 -0
  32. package/dist/evaluation/agents/index.js +1 -0
  33. package/dist/evaluation/agents/prompt.cjs +132 -0
  34. package/dist/evaluation/agents/prompt.d.ts +6 -0
  35. package/dist/evaluation/agents/prompt.js +129 -0
  36. package/dist/evaluation/agents/trajectory.cjs +189 -0
  37. package/dist/evaluation/agents/trajectory.d.ts +54 -0
  38. package/dist/evaluation/agents/trajectory.js +184 -0
  39. package/dist/evaluation/base.cjs +274 -0
  40. package/dist/evaluation/base.d.ts +232 -0
  41. package/dist/evaluation/base.js +263 -0
  42. package/dist/evaluation/comparison/index.cjs +17 -0
  43. package/dist/evaluation/comparison/index.d.ts +1 -0
  44. package/dist/evaluation/comparison/index.js +1 -0
  45. package/dist/evaluation/comparison/pairwise.cjs +244 -0
  46. package/dist/evaluation/comparison/pairwise.d.ts +50 -0
  47. package/dist/evaluation/comparison/pairwise.js +238 -0
  48. package/dist/evaluation/comparison/prompt.cjs +74 -0
  49. package/dist/evaluation/comparison/prompt.d.ts +21 -0
  50. package/dist/evaluation/comparison/prompt.js +71 -0
  51. package/dist/evaluation/criteria/criteria.cjs +259 -0
  52. package/dist/evaluation/criteria/criteria.d.ts +73 -0
  53. package/dist/evaluation/criteria/criteria.js +253 -0
  54. package/dist/evaluation/criteria/index.cjs +17 -0
  55. package/dist/evaluation/criteria/index.d.ts +1 -0
  56. package/dist/evaluation/criteria/index.js +1 -0
  57. package/dist/evaluation/criteria/prompt.cjs +36 -0
  58. package/dist/evaluation/criteria/prompt.d.ts +12 -0
  59. package/dist/evaluation/criteria/prompt.js +33 -0
  60. package/dist/evaluation/embedding_distance/base.cjs +163 -0
  61. package/dist/evaluation/embedding_distance/base.d.ts +78 -0
  62. package/dist/evaluation/embedding_distance/base.js +156 -0
  63. package/dist/evaluation/embedding_distance/index.cjs +17 -0
  64. package/dist/evaluation/embedding_distance/index.d.ts +1 -0
  65. package/dist/evaluation/embedding_distance/index.js +1 -0
  66. package/dist/evaluation/index.cjs +6 -0
  67. package/dist/evaluation/index.d.ts +6 -0
  68. package/dist/evaluation/index.js +6 -0
  69. package/dist/evaluation/loader.cjs +60 -0
  70. package/dist/evaluation/loader.d.ts +27 -0
  71. package/dist/evaluation/loader.js +56 -0
  72. package/dist/evaluation/types.cjs +2 -0
  73. package/dist/evaluation/types.d.ts +35 -0
  74. package/dist/evaluation/types.js +1 -0
  75. package/dist/experimental/multimodal_embeddings/googlevertexai.cjs +5 -1
  76. package/dist/experimental/multimodal_embeddings/googlevertexai.d.ts +2 -1
  77. package/dist/experimental/multimodal_embeddings/googlevertexai.js +5 -1
  78. package/dist/llms/bedrock.cjs +9 -1
  79. package/dist/llms/bedrock.d.ts +3 -0
  80. package/dist/llms/bedrock.js +9 -1
  81. package/dist/llms/{googlevertexai.js → googlevertexai/common.cjs} +21 -17
  82. package/dist/llms/{googlevertexai.d.ts → googlevertexai/common.d.ts} +13 -23
  83. package/dist/llms/{googlevertexai.cjs → googlevertexai/common.js} +17 -21
  84. package/dist/llms/googlevertexai/index.cjs +34 -0
  85. package/dist/llms/googlevertexai/index.d.ts +26 -0
  86. package/dist/llms/googlevertexai/index.js +30 -0
  87. package/dist/llms/googlevertexai/web.cjs +31 -0
  88. package/dist/llms/googlevertexai/web.d.ts +24 -0
  89. package/dist/llms/googlevertexai/web.js +27 -0
  90. package/dist/load/import_constants.cjs +2 -0
  91. package/dist/load/import_constants.js +2 -0
  92. package/dist/load/import_map.cjs +2 -1
  93. package/dist/load/import_map.d.ts +1 -0
  94. package/dist/load/import_map.js +1 -0
  95. package/dist/load/serializable.cjs +23 -4
  96. package/dist/load/serializable.js +23 -4
  97. package/dist/retrievers/multi_query.cjs +140 -0
  98. package/dist/retrievers/multi_query.d.ts +33 -0
  99. package/dist/retrievers/multi_query.js +136 -0
  100. package/dist/retrievers/self_query/base.cjs +1 -1
  101. package/dist/retrievers/self_query/base.js +2 -2
  102. package/dist/retrievers/self_query/functional.cjs +1 -1
  103. package/dist/retrievers/self_query/functional.js +2 -2
  104. package/dist/retrievers/self_query/utils.cjs +46 -6
  105. package/dist/retrievers/self_query/utils.d.ts +7 -0
  106. package/dist/retrievers/self_query/utils.js +44 -5
  107. package/dist/schema/runnable/base.cjs +910 -0
  108. package/dist/schema/runnable/base.d.ts +300 -0
  109. package/dist/schema/runnable/base.js +896 -0
  110. package/dist/schema/runnable/index.cjs +19 -926
  111. package/dist/schema/runnable/index.d.ts +4 -298
  112. package/dist/schema/runnable/index.js +3 -914
  113. package/dist/schema/runnable/passthrough.cjs +31 -0
  114. package/dist/schema/runnable/passthrough.d.ts +11 -0
  115. package/dist/schema/runnable/passthrough.js +27 -0
  116. package/dist/schema/runnable/router.cjs +74 -0
  117. package/dist/schema/runnable/router.d.ts +29 -0
  118. package/dist/schema/runnable/router.js +70 -0
  119. package/dist/types/googlevertexai-types.d.ts +11 -4
  120. package/dist/util/googlevertexai-connection.cjs +14 -15
  121. package/dist/util/googlevertexai-connection.d.ts +7 -7
  122. package/dist/util/googlevertexai-connection.js +14 -15
  123. package/dist/util/googlevertexai-webauth.cjs +56 -0
  124. package/dist/util/googlevertexai-webauth.d.ts +25 -0
  125. package/dist/util/googlevertexai-webauth.js +52 -0
  126. package/dist/vectorstores/googlevertexai.cjs +9 -8
  127. package/dist/vectorstores/googlevertexai.d.ts +8 -7
  128. package/dist/vectorstores/googlevertexai.js +9 -8
  129. package/dist/vectorstores/opensearch.cjs +4 -2
  130. package/dist/vectorstores/opensearch.d.ts +4 -1
  131. package/dist/vectorstores/opensearch.js +4 -2
  132. package/llms/googlevertexai/web.cjs +1 -0
  133. package/llms/googlevertexai/web.d.ts +1 -0
  134. package/llms/googlevertexai/web.js +1 -0
  135. package/llms/googlevertexai.cjs +1 -1
  136. package/llms/googlevertexai.d.ts +1 -1
  137. package/llms/googlevertexai.js +1 -1
  138. package/package.json +32 -3
  139. package/retrievers/multi_query.cjs +1 -0
  140. package/retrievers/multi_query.d.ts +1 -0
  141. package/retrievers/multi_query.js +1 -0
@@ -0,0 +1,156 @@
1
+ import { distance, similarity } from "ml-distance";
2
+ import { PairwiseStringEvaluator, StringEvaluator, } from "../base.js";
3
+ import { OpenAIEmbeddings } from "../../embeddings/openai.js";
4
+ /**
5
+ * Get the distance function for the given distance type.
6
+ * @param distance The distance type.
7
+ * @return The distance function.
8
+ */
9
+ export function getDistanceCalculationFunction(distanceType) {
10
+ const distanceFunctions = {
11
+ cosine: (X, Y) => 1.0 - similarity.cosine(X, Y),
12
+ euclidean: distance.euclidean,
13
+ manhattan: distance.manhattan,
14
+ chebyshev: distance.chebyshev,
15
+ };
16
+ return distanceFunctions[distanceType];
17
+ }
18
+ /**
19
+ * Compute the score based on the distance metric.
20
+ * @param vectors The input vectors.
21
+ * @param distanceMetric The distance metric.
22
+ * @return The computed score.
23
+ */
24
+ export function computeEvaluationScore(vectors, distanceMetric) {
25
+ const metricFunction = getDistanceCalculationFunction(distanceMetric);
26
+ return metricFunction(vectors[0], vectors[1]);
27
+ }
28
+ /**
29
+ * Use embedding distances to score semantic difference between
30
+ * a prediction and reference.
31
+ */
32
+ export class EmbeddingDistanceEvalChain extends StringEvaluator {
33
+ constructor(fields) {
34
+ super();
35
+ Object.defineProperty(this, "requiresReference", {
36
+ enumerable: true,
37
+ configurable: true,
38
+ writable: true,
39
+ value: true
40
+ });
41
+ Object.defineProperty(this, "requiresInput", {
42
+ enumerable: true,
43
+ configurable: true,
44
+ writable: true,
45
+ value: false
46
+ });
47
+ Object.defineProperty(this, "outputKey", {
48
+ enumerable: true,
49
+ configurable: true,
50
+ writable: true,
51
+ value: "score"
52
+ });
53
+ Object.defineProperty(this, "embedding", {
54
+ enumerable: true,
55
+ configurable: true,
56
+ writable: true,
57
+ value: void 0
58
+ });
59
+ Object.defineProperty(this, "distanceMetric", {
60
+ enumerable: true,
61
+ configurable: true,
62
+ writable: true,
63
+ value: "cosine"
64
+ });
65
+ this.embedding = fields?.embedding || new OpenAIEmbeddings();
66
+ this.distanceMetric = fields?.distanceMetric || "cosine";
67
+ }
68
+ _chainType() {
69
+ return `embedding_${this.distanceMetric}_distance`;
70
+ }
71
+ async _evaluateStrings(args, config) {
72
+ const result = await this.call(args, config);
73
+ return { [this.outputKey]: result[this.outputKey] };
74
+ }
75
+ get inputKeys() {
76
+ return ["reference", "prediction"];
77
+ }
78
+ get outputKeys() {
79
+ return [this.outputKey];
80
+ }
81
+ async _call(values, _runManager) {
82
+ const { prediction, reference } = values;
83
+ if (!this.embedding)
84
+ throw new Error("Embedding is undefined");
85
+ const vectors = await this.embedding.embedDocuments([
86
+ prediction,
87
+ reference,
88
+ ]);
89
+ const score = computeEvaluationScore(vectors, this.distanceMetric);
90
+ return { [this.outputKey]: score };
91
+ }
92
+ }
93
+ /**
94
+ * Use embedding distances to score semantic difference between two predictions.
95
+ */
96
+ export class PairwiseEmbeddingDistanceEvalChain extends PairwiseStringEvaluator {
97
+ constructor(fields) {
98
+ super();
99
+ Object.defineProperty(this, "requiresReference", {
100
+ enumerable: true,
101
+ configurable: true,
102
+ writable: true,
103
+ value: false
104
+ });
105
+ Object.defineProperty(this, "requiresInput", {
106
+ enumerable: true,
107
+ configurable: true,
108
+ writable: true,
109
+ value: false
110
+ });
111
+ Object.defineProperty(this, "outputKey", {
112
+ enumerable: true,
113
+ configurable: true,
114
+ writable: true,
115
+ value: "score"
116
+ });
117
+ Object.defineProperty(this, "embedding", {
118
+ enumerable: true,
119
+ configurable: true,
120
+ writable: true,
121
+ value: void 0
122
+ });
123
+ Object.defineProperty(this, "distanceMetric", {
124
+ enumerable: true,
125
+ configurable: true,
126
+ writable: true,
127
+ value: "cosine"
128
+ });
129
+ this.embedding = fields?.embedding || new OpenAIEmbeddings();
130
+ this.distanceMetric = fields?.distanceMetric || "cosine";
131
+ }
132
+ _chainType() {
133
+ return `pairwise_embedding_${this.distanceMetric}_distance`;
134
+ }
135
+ async _evaluateStringPairs(args, config) {
136
+ const result = await this.call(args, config);
137
+ return { [this.outputKey]: result[this.outputKey] };
138
+ }
139
+ get inputKeys() {
140
+ return ["prediction", "predictionB"];
141
+ }
142
+ get outputKeys() {
143
+ return [this.outputKey];
144
+ }
145
+ async _call(values, _runManager) {
146
+ const { prediction, predictionB } = values;
147
+ if (!this.embedding)
148
+ throw new Error("Embedding is undefined");
149
+ const vectors = await this.embedding.embedDocuments([
150
+ prediction,
151
+ predictionB,
152
+ ]);
153
+ const score = computeEvaluationScore(vectors, this.distanceMetric);
154
+ return { [this.outputKey]: score };
155
+ }
156
+ }
@@ -0,0 +1,17 @@
1
+ "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
+ };
16
+ Object.defineProperty(exports, "__esModule", { value: true });
17
+ __exportStar(require("./base.cjs"), exports);
@@ -0,0 +1 @@
1
+ export * from "./base.js";
@@ -0,0 +1 @@
1
+ export * from "./base.js";
@@ -15,3 +15,9 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
15
15
  };
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
17
  __exportStar(require("./qa/index.cjs"), exports);
18
+ __exportStar(require("./criteria/index.cjs"), exports);
19
+ __exportStar(require("./agents/index.cjs"), exports);
20
+ __exportStar(require("./embedding_distance/index.cjs"), exports);
21
+ __exportStar(require("./comparison/index.cjs"), exports);
22
+ __exportStar(require("./types.cjs"), exports);
23
+ __exportStar(require("./loader.cjs"), exports);
@@ -1 +1,7 @@
1
1
  export * from "./qa/index.js";
2
+ export * from "./criteria/index.js";
3
+ export * from "./agents/index.js";
4
+ export * from "./embedding_distance/index.js";
5
+ export * from "./comparison/index.js";
6
+ export * from "./types.js";
7
+ export * from "./loader.js";
@@ -1 +1,7 @@
1
1
  export * from "./qa/index.js";
2
+ export * from "./criteria/index.js";
3
+ export * from "./agents/index.js";
4
+ export * from "./embedding_distance/index.js";
5
+ export * from "./comparison/index.js";
6
+ export * from "./types.js";
7
+ export * from "./loader.js";
@@ -0,0 +1,60 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.loadEvaluator = void 0;
4
+ const index_js_1 = require("./criteria/index.cjs");
5
+ const openai_js_1 = require("../chat_models/openai.cjs");
6
+ const index_js_2 = require("./comparison/index.cjs");
7
+ const index_js_3 = require("./embedding_distance/index.cjs");
8
+ const index_js_4 = require("./agents/index.cjs");
9
+ const base_js_1 = require("../chat_models/base.cjs");
10
+ /**
11
+ * Load the requested evaluation chain specified by a string
12
+ * @param type The type of evaluator to load.
13
+ * @param options
14
+ * - llm The language model to use for the evaluator.
15
+ * - criteria The criteria to use for the evaluator.
16
+ * - agentTools A list of tools available to the agent,for TrajectoryEvalChain.
17
+ */
18
+ async function loadEvaluator(type, options) {
19
+ const { llm, chainOptions, criteria, agentTools } = options || {};
20
+ const llm_ = llm ??
21
+ new openai_js_1.ChatOpenAI({
22
+ modelName: "gpt-4",
23
+ temperature: 0.0,
24
+ });
25
+ let evaluator;
26
+ switch (type) {
27
+ case "criteria":
28
+ evaluator = await index_js_1.CriteriaEvalChain.fromLLM(llm_, criteria, chainOptions);
29
+ break;
30
+ case "labeled_criteria":
31
+ evaluator = await index_js_1.LabeledCriteriaEvalChain.fromLLM(llm_, criteria, chainOptions);
32
+ break;
33
+ case "pairwise_string":
34
+ evaluator = await index_js_2.PairwiseStringEvalChain.fromLLM(llm_, criteria, chainOptions);
35
+ break;
36
+ case "labeled_pairwise_string":
37
+ evaluator = await index_js_2.LabeledPairwiseStringEvalChain.fromLLM(llm_, criteria, chainOptions);
38
+ break;
39
+ case "trajectory":
40
+ // eslint-disable-next-line no-instanceof/no-instanceof
41
+ if (!(llm_ instanceof base_js_1.BaseChatModel)) {
42
+ throw new Error("LLM must be an instance of a base chat model.");
43
+ }
44
+ evaluator = await index_js_4.TrajectoryEvalChain.fromLLM(llm_, agentTools, chainOptions);
45
+ break;
46
+ case "embedding_distance":
47
+ evaluator = new index_js_3.EmbeddingDistanceEvalChain({
48
+ embedding: options?.embedding,
49
+ distanceMetric: options?.distanceMetric,
50
+ });
51
+ break;
52
+ case "pairwise_embedding_distance":
53
+ evaluator = new index_js_3.PairwiseEmbeddingDistanceEvalChain({});
54
+ break;
55
+ default:
56
+ throw new Error(`Unknown type: ${type}`);
57
+ }
58
+ return evaluator;
59
+ }
60
+ exports.loadEvaluator = loadEvaluator;
@@ -0,0 +1,27 @@
1
+ import { BaseLanguageModel } from "../base_language/index.js";
2
+ import { CriteriaLike } from "./criteria/index.js";
3
+ import type { EvaluatorType } from "./types.js";
4
+ import { StructuredTool } from "../tools/index.js";
5
+ import { LLMEvalChainInput } from "./base.js";
6
+ import { EmbeddingDistanceEvalChainInput } from "./embedding_distance/index.js";
7
+ export type LoadEvaluatorOptions = EmbeddingDistanceEvalChainInput & {
8
+ llm?: BaseLanguageModel;
9
+ chainOptions?: Partial<Omit<LLMEvalChainInput, "llm">>;
10
+ /**
11
+ * The criteria to use for the evaluator.
12
+ */
13
+ criteria?: CriteriaLike;
14
+ /**
15
+ * A list of tools available to the agent, for TrajectoryEvalChain.
16
+ */
17
+ agentTools?: StructuredTool[];
18
+ };
19
+ /**
20
+ * Load the requested evaluation chain specified by a string
21
+ * @param type The type of evaluator to load.
22
+ * @param options
23
+ * - llm The language model to use for the evaluator.
24
+ * - criteria The criteria to use for the evaluator.
25
+ * - agentTools A list of tools available to the agent,for TrajectoryEvalChain.
26
+ */
27
+ export declare function loadEvaluator<T extends keyof EvaluatorType>(type: T, options?: LoadEvaluatorOptions): Promise<EvaluatorType[T]>;
@@ -0,0 +1,56 @@
1
+ import { CriteriaEvalChain, LabeledCriteriaEvalChain, } from "./criteria/index.js";
2
+ import { ChatOpenAI } from "../chat_models/openai.js";
3
+ import { LabeledPairwiseStringEvalChain, PairwiseStringEvalChain, } from "./comparison/index.js";
4
+ import { EmbeddingDistanceEvalChain, PairwiseEmbeddingDistanceEvalChain, } from "./embedding_distance/index.js";
5
+ import { TrajectoryEvalChain } from "./agents/index.js";
6
+ import { BaseChatModel } from "../chat_models/base.js";
7
+ /**
8
+ * Load the requested evaluation chain specified by a string
9
+ * @param type The type of evaluator to load.
10
+ * @param options
11
+ * - llm The language model to use for the evaluator.
12
+ * - criteria The criteria to use for the evaluator.
13
+ * - agentTools A list of tools available to the agent,for TrajectoryEvalChain.
14
+ */
15
+ export async function loadEvaluator(type, options) {
16
+ const { llm, chainOptions, criteria, agentTools } = options || {};
17
+ const llm_ = llm ??
18
+ new ChatOpenAI({
19
+ modelName: "gpt-4",
20
+ temperature: 0.0,
21
+ });
22
+ let evaluator;
23
+ switch (type) {
24
+ case "criteria":
25
+ evaluator = await CriteriaEvalChain.fromLLM(llm_, criteria, chainOptions);
26
+ break;
27
+ case "labeled_criteria":
28
+ evaluator = await LabeledCriteriaEvalChain.fromLLM(llm_, criteria, chainOptions);
29
+ break;
30
+ case "pairwise_string":
31
+ evaluator = await PairwiseStringEvalChain.fromLLM(llm_, criteria, chainOptions);
32
+ break;
33
+ case "labeled_pairwise_string":
34
+ evaluator = await LabeledPairwiseStringEvalChain.fromLLM(llm_, criteria, chainOptions);
35
+ break;
36
+ case "trajectory":
37
+ // eslint-disable-next-line no-instanceof/no-instanceof
38
+ if (!(llm_ instanceof BaseChatModel)) {
39
+ throw new Error("LLM must be an instance of a base chat model.");
40
+ }
41
+ evaluator = await TrajectoryEvalChain.fromLLM(llm_, agentTools, chainOptions);
42
+ break;
43
+ case "embedding_distance":
44
+ evaluator = new EmbeddingDistanceEvalChain({
45
+ embedding: options?.embedding,
46
+ distanceMetric: options?.distanceMetric,
47
+ });
48
+ break;
49
+ case "pairwise_embedding_distance":
50
+ evaluator = new PairwiseEmbeddingDistanceEvalChain({});
51
+ break;
52
+ default:
53
+ throw new Error(`Unknown type: ${type}`);
54
+ }
55
+ return evaluator;
56
+ }
@@ -0,0 +1,2 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1,35 @@
1
+ import { AgentTrajectoryEvaluator, LLMPairwiseStringEvaluator, LLMStringEvaluator, PairwiseStringEvaluator, StringEvaluator } from "./base.js";
2
+ export interface EvaluatorType {
3
+ /**
4
+ * The criteria evaluator, which evaluates a model based on a
5
+ * custom set of criteria without any reference labels.
6
+ * */
7
+ criteria: LLMStringEvaluator;
8
+ /**
9
+ * The labeled criteria evaluator, which evaluates a model based on a
10
+ * custom set of criteria, with a reference label.
11
+ * */
12
+ labeled_criteria: LLMStringEvaluator;
13
+ /**
14
+ * The pairwise string evaluator, which predicts the preferred prediction from
15
+ * between two models.
16
+ */
17
+ pairwise_string: LLMPairwiseStringEvaluator;
18
+ /**
19
+ * The labeled pairwise string evaluator, which predicts the preferred prediction
20
+ * from between two models based on a ground truth reference label.
21
+ * */
22
+ labeled_pairwise_string: LLMPairwiseStringEvaluator;
23
+ /**
24
+ * The agent trajectory evaluator, which grades the agent's intermediate steps.
25
+ */
26
+ trajectory: AgentTrajectoryEvaluator;
27
+ /**
28
+ * Compare a prediction to a reference label using embedding distance.
29
+ * */
30
+ embedding_distance: StringEvaluator;
31
+ /**
32
+ * Compare two predictions using embedding distance.
33
+ * */
34
+ pairwise_embedding_distance: PairwiseStringEvaluator;
35
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -1,6 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.GoogleVertexAIMultimodalEmbeddings = void 0;
4
+ const google_auth_library_1 = require("google-auth-library");
4
5
  const base_js_1 = require("../../embeddings/base.cjs");
5
6
  const googlevertexai_connection_js_1 = require("../../util/googlevertexai-connection.cjs");
6
7
  /**
@@ -24,7 +25,10 @@ class GoogleVertexAIMultimodalEmbeddings extends base_js_1.Embeddings {
24
25
  value: void 0
25
26
  });
26
27
  this.model = fields?.model ?? this.model;
27
- this.connection = new googlevertexai_connection_js_1.GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller);
28
+ this.connection = new googlevertexai_connection_js_1.GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, new google_auth_library_1.GoogleAuth({
29
+ scopes: "https://www.googleapis.com/auth/cloud-platform",
30
+ ...fields?.authOptions,
31
+ }));
28
32
  }
29
33
  /**
30
34
  * Converts media (text or image) to an instance that can be used for
@@ -1,11 +1,12 @@
1
1
  /// <reference types="node" resolution-mode="require"/>
2
+ import { GoogleAuthOptions } from "google-auth-library";
2
3
  import { Embeddings, EmbeddingsParams } from "../../embeddings/base.js";
3
4
  import { GoogleVertexAIBaseLLMInput, GoogleVertexAIBasePrediction, GoogleVertexAILLMResponse } from "../../types/googlevertexai-types.js";
4
5
  /**
5
6
  * Parameters for the GoogleVertexAIMultimodalEmbeddings class, extending
6
7
  * both EmbeddingsParams and GoogleVertexAIConnectionParams.
7
8
  */
8
- export interface GoogleVertexAIMultimodalEmbeddingsParams extends EmbeddingsParams, GoogleVertexAIBaseLLMInput {
9
+ export interface GoogleVertexAIMultimodalEmbeddingsParams extends EmbeddingsParams, GoogleVertexAIBaseLLMInput<GoogleAuthOptions> {
9
10
  }
10
11
  /**
11
12
  * An instance of media (text or image) that can be used for generating
@@ -1,3 +1,4 @@
1
+ import { GoogleAuth } from "google-auth-library";
1
2
  import { Embeddings } from "../../embeddings/base.js";
2
3
  import { GoogleVertexAILLMConnection } from "../../util/googlevertexai-connection.js";
3
4
  /**
@@ -21,7 +22,10 @@ export class GoogleVertexAIMultimodalEmbeddings extends Embeddings {
21
22
  value: void 0
22
23
  });
23
24
  this.model = fields?.model ?? this.model;
24
- this.connection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller);
25
+ this.connection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, new GoogleAuth({
26
+ scopes: "https://www.googleapis.com/auth/cloud-platform",
27
+ ...fields?.authOptions,
28
+ }));
25
29
  }
26
30
  /**
27
31
  * Converts media (text or image) to an instance that can be used for
@@ -112,6 +112,12 @@ class Bedrock extends base_js_1.LLM {
112
112
  writable: true,
113
113
  value: void 0
114
114
  });
115
+ Object.defineProperty(this, "endpointUrl", {
116
+ enumerable: true,
117
+ configurable: true,
118
+ writable: true,
119
+ value: void 0
120
+ });
115
121
  Object.defineProperty(this, "codec", {
116
122
  enumerable: true,
117
123
  configurable: true,
@@ -132,6 +138,7 @@ class Bedrock extends base_js_1.LLM {
132
138
  this.temperature = fields?.temperature ?? this.temperature;
133
139
  this.maxTokens = fields?.maxTokens ?? this.maxTokens;
134
140
  this.fetchFn = fields?.fetchFn ?? fetch;
141
+ this.endpointUrl = fields?.endpointUrl;
135
142
  }
136
143
  /** Call out to Bedrock service model.
137
144
  Arguments:
@@ -154,7 +161,8 @@ class Bedrock extends base_js_1.LLM {
154
161
  const provider = this.model.split(".")[0];
155
162
  const service = "bedrock";
156
163
  const inputBody = BedrockLLMInputOutputAdapter.prepareInput(provider, prompt, this.maxTokens, this.temperature);
157
- const url = new URL(`https://${service}.${this.region}.amazonaws.com/model/${this.model}/invoke-with-response-stream`);
164
+ const endpointUrl = this.endpointUrl ?? `${service}.${this.region}.amazonaws.com`;
165
+ const url = new URL(`https://${endpointUrl}/model/${this.model}/invoke-with-response-stream`);
158
166
  const request = new protocol_http_1.HttpRequest({
159
167
  hostname: url.hostname,
160
168
  path: url.pathname,
@@ -29,6 +29,8 @@ export interface BedrockInput {
29
29
  maxTokens?: number;
30
30
  /** A custom fetch function for low-level access to AWS API. Defaults to fetch() */
31
31
  fetchFn?: typeof fetch;
32
+ /** Override the default endpoint url */
33
+ endpointUrl?: string;
32
34
  }
33
35
  /**
34
36
  * A type of Large Language Model (LLM) that interacts with the Bedrock
@@ -46,6 +48,7 @@ export declare class Bedrock extends LLM implements BedrockInput {
46
48
  temperature?: number | undefined;
47
49
  maxTokens?: number | undefined;
48
50
  fetchFn: typeof fetch;
51
+ endpointUrl?: string;
49
52
  codec: EventStreamCodec;
50
53
  get lc_secrets(): {
51
54
  [key: string]: string;
@@ -109,6 +109,12 @@ export class Bedrock extends LLM {
109
109
  writable: true,
110
110
  value: void 0
111
111
  });
112
+ Object.defineProperty(this, "endpointUrl", {
113
+ enumerable: true,
114
+ configurable: true,
115
+ writable: true,
116
+ value: void 0
117
+ });
112
118
  Object.defineProperty(this, "codec", {
113
119
  enumerable: true,
114
120
  configurable: true,
@@ -129,6 +135,7 @@ export class Bedrock extends LLM {
129
135
  this.temperature = fields?.temperature ?? this.temperature;
130
136
  this.maxTokens = fields?.maxTokens ?? this.maxTokens;
131
137
  this.fetchFn = fields?.fetchFn ?? fetch;
138
+ this.endpointUrl = fields?.endpointUrl;
132
139
  }
133
140
  /** Call out to Bedrock service model.
134
141
  Arguments:
@@ -151,7 +158,8 @@ export class Bedrock extends LLM {
151
158
  const provider = this.model.split(".")[0];
152
159
  const service = "bedrock";
153
160
  const inputBody = BedrockLLMInputOutputAdapter.prepareInput(provider, prompt, this.maxTokens, this.temperature);
154
- const url = new URL(`https://${service}.${this.region}.amazonaws.com/model/${this.model}/invoke-with-response-stream`);
161
+ const endpointUrl = this.endpointUrl ?? `${service}.${this.region}.amazonaws.com`;
162
+ const url = new URL(`https://${endpointUrl}/model/${this.model}/invoke-with-response-stream`);
155
163
  const request = new HttpRequest({
156
164
  hostname: url.hostname,
157
165
  path: url.pathname,
@@ -1,22 +1,26 @@
1
- import { BaseLLM } from "./base.js";
2
- import { GoogleVertexAILLMConnection } from "../util/googlevertexai-connection.js";
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.BaseGoogleVertexAI = void 0;
4
+ const base_js_1 = require("../base.cjs");
3
5
  /**
4
- * Enables calls to the Google Cloud's Vertex AI API to access
5
- * Large Language Models.
6
- *
7
- * To use, you will need to have one of the following authentication
8
- * methods in place:
9
- * - You are logged into an account permitted to the Google Cloud project
10
- * using Vertex AI.
11
- * - You are running this on a machine using a service account permitted to
12
- * the Google Cloud project using Vertex AI.
13
- * - The `GOOGLE_APPLICATION_CREDENTIALS` environment variable is set to the
14
- * path of a credentials file for a service account permitted to the
15
- * Google Cloud project using Vertex AI.
6
+ * Base class for Google Vertex AI LLMs.
7
+ * Implemented subclasses must provide a GoogleVertexAILLMConnection
8
+ * with an appropriate auth client.
16
9
  */
17
- export class GoogleVertexAI extends BaseLLM {
10
+ class BaseGoogleVertexAI extends base_js_1.BaseLLM {
11
+ get lc_aliases() {
12
+ return {
13
+ model: "model_name",
14
+ };
15
+ }
18
16
  constructor(fields) {
19
17
  super(fields ?? {});
18
+ Object.defineProperty(this, "lc_serializable", {
19
+ enumerable: true,
20
+ configurable: true,
21
+ writable: true,
22
+ value: true
23
+ });
20
24
  Object.defineProperty(this, "model", {
21
25
  enumerable: true,
22
26
  configurable: true,
@@ -65,10 +69,9 @@ export class GoogleVertexAI extends BaseLLM {
65
69
  this.maxOutputTokens = fields?.maxOutputTokens ?? this.maxOutputTokens;
66
70
  this.topP = fields?.topP ?? this.topP;
67
71
  this.topK = fields?.topK ?? this.topK;
68
- this.connection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller);
69
72
  }
70
73
  _llmType() {
71
- return "googlevertexai";
74
+ return "vertexai";
72
75
  }
73
76
  async _generate(prompts, options) {
74
77
  const generations = await Promise.all(prompts.map((prompt) => this._generatePrompt(prompt, options)));
@@ -129,3 +132,4 @@ export class GoogleVertexAI extends BaseLLM {
129
132
  return result?.data?.predictions[0];
130
133
  }
131
134
  }
135
+ exports.BaseGoogleVertexAI = BaseGoogleVertexAI;
@@ -1,11 +1,8 @@
1
- import { BaseLLM } from "./base.js";
2
- import { Generation, LLMResult } from "../schema/index.js";
3
- import { GoogleVertexAIBaseLLMInput, GoogleVertexAIBasePrediction, GoogleVertexAILLMResponse } from "../types/googlevertexai-types.js";
4
- /**
5
- * Interface representing the input to the Google Vertex AI model.
6
- */
7
- export interface GoogleVertexAITextInput extends GoogleVertexAIBaseLLMInput {
8
- }
1
+ import { BaseLLM } from "../base.js";
2
+ import { Generation, LLMResult } from "../../schema/index.js";
3
+ import { GoogleVertexAILLMConnection } from "../../util/googlevertexai-connection.js";
4
+ import { GoogleVertexAIBaseLLMInput, GoogleVertexAIBasePrediction, GoogleVertexAILLMResponse } from "../../types/googlevertexai-types.js";
5
+ import { BaseLanguageModelCallOptions } from "../../base_language/index.js";
9
6
  /**
10
7
  * Interface representing the instance of text input to the Google Vertex
11
8
  * AI model.
@@ -32,27 +29,20 @@ interface TextPrediction extends GoogleVertexAIBasePrediction {
32
29
  content: string;
33
30
  }
34
31
  /**
35
- * Enables calls to the Google Cloud's Vertex AI API to access
36
- * Large Language Models.
37
- *
38
- * To use, you will need to have one of the following authentication
39
- * methods in place:
40
- * - You are logged into an account permitted to the Google Cloud project
41
- * using Vertex AI.
42
- * - You are running this on a machine using a service account permitted to
43
- * the Google Cloud project using Vertex AI.
44
- * - The `GOOGLE_APPLICATION_CREDENTIALS` environment variable is set to the
45
- * path of a credentials file for a service account permitted to the
46
- * Google Cloud project using Vertex AI.
32
+ * Base class for Google Vertex AI LLMs.
33
+ * Implemented subclasses must provide a GoogleVertexAILLMConnection
34
+ * with an appropriate auth client.
47
35
  */
48
- export declare class GoogleVertexAI extends BaseLLM implements GoogleVertexAITextInput {
36
+ export declare class BaseGoogleVertexAI<AuthOptions> extends BaseLLM implements GoogleVertexAIBaseLLMInput<AuthOptions> {
37
+ lc_serializable: boolean;
49
38
  model: string;
50
39
  temperature: number;
51
40
  maxOutputTokens: number;
52
41
  topP: number;
53
42
  topK: number;
54
- private connection;
55
- constructor(fields?: GoogleVertexAITextInput);
43
+ protected connection: GoogleVertexAILLMConnection<BaseLanguageModelCallOptions, GoogleVertexAILLMInstance, TextPrediction, AuthOptions>;
44
+ get lc_aliases(): Record<string, string>;
45
+ constructor(fields?: GoogleVertexAIBaseLLMInput<AuthOptions>);
56
46
  _llmType(): string;
57
47
  _generate(prompts: string[], options: this["ParsedCallOptions"]): Promise<LLMResult>;
58
48
  _generatePrompt(prompt: string, options: this["ParsedCallOptions"]): Promise<Generation[]>;