langchain 0.0.175 → 0.0.176

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/dist/chat_models/googlevertexai/common.cjs +46 -7
  2. package/dist/chat_models/googlevertexai/common.d.ts +7 -2
  3. package/dist/chat_models/googlevertexai/common.js +47 -8
  4. package/dist/chat_models/googlevertexai/index.cjs +4 -3
  5. package/dist/chat_models/googlevertexai/index.js +4 -3
  6. package/dist/chat_models/googlevertexai/web.cjs +2 -1
  7. package/dist/chat_models/googlevertexai/web.js +2 -1
  8. package/dist/embeddings/googlevertexai.cjs +1 -1
  9. package/dist/embeddings/googlevertexai.js +1 -1
  10. package/dist/experimental/hubs/makersuite/googlemakersuitehub.d.ts +2 -2
  11. package/dist/experimental/multimodal_embeddings/googlevertexai.cjs +1 -1
  12. package/dist/experimental/multimodal_embeddings/googlevertexai.d.ts +2 -1
  13. package/dist/experimental/multimodal_embeddings/googlevertexai.js +2 -2
  14. package/dist/experimental/plan_and_execute/agent_executor.cjs +7 -4
  15. package/dist/experimental/plan_and_execute/agent_executor.d.ts +4 -3
  16. package/dist/experimental/plan_and_execute/agent_executor.js +8 -5
  17. package/dist/experimental/plan_and_execute/prompt.cjs +25 -9
  18. package/dist/experimental/plan_and_execute/prompt.d.ts +9 -1
  19. package/dist/experimental/plan_and_execute/prompt.js +23 -8
  20. package/dist/llms/googlevertexai/common.cjs +46 -13
  21. package/dist/llms/googlevertexai/common.d.ts +8 -3
  22. package/dist/llms/googlevertexai/common.js +46 -13
  23. package/dist/llms/googlevertexai/index.cjs +4 -3
  24. package/dist/llms/googlevertexai/index.js +4 -3
  25. package/dist/llms/googlevertexai/web.cjs +2 -1
  26. package/dist/llms/googlevertexai/web.js +2 -1
  27. package/dist/types/googlevertexai-types.d.ts +12 -10
  28. package/dist/util/googlevertexai-connection.cjs +298 -10
  29. package/dist/util/googlevertexai-connection.d.ts +76 -7
  30. package/dist/util/googlevertexai-connection.js +294 -9
  31. package/dist/util/googlevertexai-gauth.cjs +36 -0
  32. package/dist/util/googlevertexai-gauth.d.ts +8 -0
  33. package/dist/util/googlevertexai-gauth.js +32 -0
  34. package/dist/util/googlevertexai-webauth.cjs +38 -2
  35. package/dist/util/googlevertexai-webauth.d.ts +2 -6
  36. package/dist/util/googlevertexai-webauth.js +38 -2
  37. package/dist/vectorstores/googlevertexai.d.ts +4 -4
  38. package/package.json +1 -1
@@ -2,6 +2,7 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.BaseGoogleVertexAI = void 0;
4
4
  const base_js_1 = require("../base.cjs");
5
+ const index_js_1 = require("../../schema/index.cjs");
5
6
  /**
6
7
  * Base class for Google Vertex AI LLMs.
7
8
  * Implemented subclasses must provide a GoogleVertexAILLMConnection
@@ -57,6 +58,12 @@ class BaseGoogleVertexAI extends base_js_1.BaseLLM {
57
58
  writable: true,
58
59
  value: void 0
59
60
  });
61
+ Object.defineProperty(this, "streamedConnection", {
62
+ enumerable: true,
63
+ configurable: true,
64
+ writable: true,
65
+ value: void 0
66
+ });
60
67
  this.model = fields?.model ?? this.model;
61
68
  // Change the defaults for code models
62
69
  if (this.model.startsWith("code-gecko")) {
@@ -73,26 +80,37 @@ class BaseGoogleVertexAI extends base_js_1.BaseLLM {
73
80
  _llmType() {
74
81
  return "vertexai";
75
82
  }
83
+ async *_streamResponseChunks(_input, _options, _runManager) {
84
+ // Make the call as a streaming request
85
+ const instance = this.formatInstance(_input);
86
+ const parameters = this.formatParameters();
87
+ const result = await this.streamedConnection.request([instance], parameters, _options);
88
+ // Get the streaming parser of the response
89
+ const stream = result.data;
90
+ // Loop until the end of the stream
91
+ // During the loop, yield each time we get a chunk from the streaming parser
92
+ // that is either available or added to the queue
93
+ while (!stream.streamDone) {
94
+ const output = await stream.nextChunk();
95
+ const chunk = output !== null
96
+ ? new index_js_1.GenerationChunk(this.extractGenerationFromPrediction(output.outputs[0]))
97
+ : new index_js_1.GenerationChunk({
98
+ text: "",
99
+ generationInfo: { finishReason: "stop" },
100
+ });
101
+ yield chunk;
102
+ }
103
+ }
76
104
  async _generate(prompts, options) {
77
105
  const generations = await Promise.all(prompts.map((prompt) => this._generatePrompt(prompt, options)));
78
106
  return { generations };
79
107
  }
80
108
  async _generatePrompt(prompt, options) {
81
109
  const instance = this.formatInstance(prompt);
82
- const parameters = {
83
- temperature: this.temperature,
84
- topK: this.topK,
85
- topP: this.topP,
86
- maxOutputTokens: this.maxOutputTokens,
87
- };
110
+ const parameters = this.formatParameters();
88
111
  const result = await this.connection.request([instance], parameters, options);
89
112
  const prediction = this.extractPredictionFromResponse(result);
90
- return [
91
- {
92
- text: prediction.content,
93
- generationInfo: prediction,
94
- },
95
- ];
113
+ return [this.extractGenerationFromPrediction(prediction)];
96
114
  }
97
115
  /**
98
116
  * Formats the input instance as a text instance for the Google Vertex AI
@@ -123,13 +141,28 @@ class BaseGoogleVertexAI extends base_js_1.BaseLLM {
123
141
  ? this.formatInstanceCode(prompt)
124
142
  : this.formatInstanceText(prompt);
125
143
  }
144
+ formatParameters() {
145
+ return {
146
+ temperature: this.temperature,
147
+ topK: this.topK,
148
+ topP: this.topP,
149
+ maxOutputTokens: this.maxOutputTokens,
150
+ };
151
+ }
126
152
  /**
127
153
  * Extracts the prediction from the API response.
128
154
  * @param result The API response from which to extract the prediction.
129
155
  * @returns A TextPrediction object representing the extracted prediction.
130
156
  */
131
157
  extractPredictionFromResponse(result) {
132
- return result?.data?.predictions[0];
158
+ return result?.data
159
+ ?.predictions[0];
160
+ }
161
+ extractGenerationFromPrediction(prediction) {
162
+ return {
163
+ text: prediction.content,
164
+ generationInfo: prediction,
165
+ };
133
166
  }
134
167
  }
135
168
  exports.BaseGoogleVertexAI = BaseGoogleVertexAI;
@@ -1,8 +1,9 @@
1
1
  import { BaseLLM } from "../base.js";
2
- import { Generation, LLMResult } from "../../schema/index.js";
3
- import { GoogleVertexAILLMConnection } from "../../util/googlevertexai-connection.js";
4
- import { GoogleVertexAIBaseLLMInput, GoogleVertexAIBasePrediction, GoogleVertexAILLMResponse } from "../../types/googlevertexai-types.js";
2
+ import { Generation, GenerationChunk, LLMResult } from "../../schema/index.js";
3
+ import { GoogleVertexAILLMConnection, GoogleVertexAILLMResponse } from "../../util/googlevertexai-connection.js";
4
+ import { GoogleVertexAIBaseLLMInput, GoogleVertexAIBasePrediction, GoogleVertexAIModelParams } from "../../types/googlevertexai-types.js";
5
5
  import { BaseLanguageModelCallOptions } from "../../base_language/index.js";
6
+ import { CallbackManagerForLLMRun } from "../../callbacks/index.js";
6
7
  /**
7
8
  * Interface representing the instance of text input to the Google Vertex
8
9
  * AI model.
@@ -41,9 +42,11 @@ export declare class BaseGoogleVertexAI<AuthOptions> extends BaseLLM implements
41
42
  topP: number;
42
43
  topK: number;
43
44
  protected connection: GoogleVertexAILLMConnection<BaseLanguageModelCallOptions, GoogleVertexAILLMInstance, TextPrediction, AuthOptions>;
45
+ protected streamedConnection: GoogleVertexAILLMConnection<BaseLanguageModelCallOptions, GoogleVertexAILLMInstance, TextPrediction, AuthOptions>;
44
46
  get lc_aliases(): Record<string, string>;
45
47
  constructor(fields?: GoogleVertexAIBaseLLMInput<AuthOptions>);
46
48
  _llmType(): string;
49
+ _streamResponseChunks(_input: string, _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
47
50
  _generate(prompts: string[], options: this["ParsedCallOptions"]): Promise<LLMResult>;
48
51
  _generatePrompt(prompt: string, options: this["ParsedCallOptions"]): Promise<Generation[]>;
49
52
  /**
@@ -67,11 +70,13 @@ export declare class BaseGoogleVertexAI<AuthOptions> extends BaseLLM implements
67
70
  * @returns A GoogleVertexAILLMInstance object representing the formatted instance.
68
71
  */
69
72
  formatInstance(prompt: string): GoogleVertexAILLMInstance;
73
+ formatParameters(): GoogleVertexAIModelParams;
70
74
  /**
71
75
  * Extracts the prediction from the API response.
72
76
  * @param result The API response from which to extract the prediction.
73
77
  * @returns A TextPrediction object representing the extracted prediction.
74
78
  */
75
79
  extractPredictionFromResponse(result: GoogleVertexAILLMResponse<TextPrediction>): TextPrediction;
80
+ extractGenerationFromPrediction(prediction: TextPrediction): Generation;
76
81
  }
77
82
  export {};
@@ -1,4 +1,5 @@
1
1
  import { BaseLLM } from "../base.js";
2
+ import { GenerationChunk } from "../../schema/index.js";
2
3
  /**
3
4
  * Base class for Google Vertex AI LLMs.
4
5
  * Implemented subclasses must provide a GoogleVertexAILLMConnection
@@ -54,6 +55,12 @@ export class BaseGoogleVertexAI extends BaseLLM {
54
55
  writable: true,
55
56
  value: void 0
56
57
  });
58
+ Object.defineProperty(this, "streamedConnection", {
59
+ enumerable: true,
60
+ configurable: true,
61
+ writable: true,
62
+ value: void 0
63
+ });
57
64
  this.model = fields?.model ?? this.model;
58
65
  // Change the defaults for code models
59
66
  if (this.model.startsWith("code-gecko")) {
@@ -70,26 +77,37 @@ export class BaseGoogleVertexAI extends BaseLLM {
70
77
  _llmType() {
71
78
  return "vertexai";
72
79
  }
80
+ async *_streamResponseChunks(_input, _options, _runManager) {
81
+ // Make the call as a streaming request
82
+ const instance = this.formatInstance(_input);
83
+ const parameters = this.formatParameters();
84
+ const result = await this.streamedConnection.request([instance], parameters, _options);
85
+ // Get the streaming parser of the response
86
+ const stream = result.data;
87
+ // Loop until the end of the stream
88
+ // During the loop, yield each time we get a chunk from the streaming parser
89
+ // that is either available or added to the queue
90
+ while (!stream.streamDone) {
91
+ const output = await stream.nextChunk();
92
+ const chunk = output !== null
93
+ ? new GenerationChunk(this.extractGenerationFromPrediction(output.outputs[0]))
94
+ : new GenerationChunk({
95
+ text: "",
96
+ generationInfo: { finishReason: "stop" },
97
+ });
98
+ yield chunk;
99
+ }
100
+ }
73
101
  async _generate(prompts, options) {
74
102
  const generations = await Promise.all(prompts.map((prompt) => this._generatePrompt(prompt, options)));
75
103
  return { generations };
76
104
  }
77
105
  async _generatePrompt(prompt, options) {
78
106
  const instance = this.formatInstance(prompt);
79
- const parameters = {
80
- temperature: this.temperature,
81
- topK: this.topK,
82
- topP: this.topP,
83
- maxOutputTokens: this.maxOutputTokens,
84
- };
107
+ const parameters = this.formatParameters();
85
108
  const result = await this.connection.request([instance], parameters, options);
86
109
  const prediction = this.extractPredictionFromResponse(result);
87
- return [
88
- {
89
- text: prediction.content,
90
- generationInfo: prediction,
91
- },
92
- ];
110
+ return [this.extractGenerationFromPrediction(prediction)];
93
111
  }
94
112
  /**
95
113
  * Formats the input instance as a text instance for the Google Vertex AI
@@ -120,12 +138,27 @@ export class BaseGoogleVertexAI extends BaseLLM {
120
138
  ? this.formatInstanceCode(prompt)
121
139
  : this.formatInstanceText(prompt);
122
140
  }
141
+ formatParameters() {
142
+ return {
143
+ temperature: this.temperature,
144
+ topK: this.topK,
145
+ topP: this.topP,
146
+ maxOutputTokens: this.maxOutputTokens,
147
+ };
148
+ }
123
149
  /**
124
150
  * Extracts the prediction from the API response.
125
151
  * @param result The API response from which to extract the prediction.
126
152
  * @returns A TextPrediction object representing the extracted prediction.
127
153
  */
128
154
  extractPredictionFromResponse(result) {
129
- return result?.data?.predictions[0];
155
+ return result?.data
156
+ ?.predictions[0];
157
+ }
158
+ extractGenerationFromPrediction(prediction) {
159
+ return {
160
+ text: prediction.content,
161
+ generationInfo: prediction,
162
+ };
130
163
  }
131
164
  }
@@ -1,9 +1,9 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.GoogleVertexAI = void 0;
4
- const google_auth_library_1 = require("google-auth-library");
5
4
  const googlevertexai_connection_js_1 = require("../../util/googlevertexai-connection.cjs");
6
5
  const common_js_1 = require("./common.cjs");
6
+ const googlevertexai_gauth_js_1 = require("../../util/googlevertexai-gauth.cjs");
7
7
  /**
8
8
  * Enables calls to the Google Cloud's Vertex AI API to access
9
9
  * Large Language Models.
@@ -24,11 +24,12 @@ class GoogleVertexAI extends common_js_1.BaseGoogleVertexAI {
24
24
  }
25
25
  constructor(fields) {
26
26
  super(fields);
27
- const client = new google_auth_library_1.GoogleAuth({
27
+ const client = new googlevertexai_gauth_js_1.GAuthClient({
28
28
  scopes: "https://www.googleapis.com/auth/cloud-platform",
29
29
  ...fields?.authOptions,
30
30
  });
31
- this.connection = new googlevertexai_connection_js_1.GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client);
31
+ this.connection = new googlevertexai_connection_js_1.GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client, false);
32
+ this.streamedConnection = new googlevertexai_connection_js_1.GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client, true);
32
33
  }
33
34
  }
34
35
  exports.GoogleVertexAI = GoogleVertexAI;
@@ -1,6 +1,6 @@
1
- import { GoogleAuth } from "google-auth-library";
2
1
  import { GoogleVertexAILLMConnection } from "../../util/googlevertexai-connection.js";
3
2
  import { BaseGoogleVertexAI } from "./common.js";
3
+ import { GAuthClient } from "../../util/googlevertexai-gauth.js";
4
4
  /**
5
5
  * Enables calls to the Google Cloud's Vertex AI API to access
6
6
  * Large Language Models.
@@ -21,10 +21,11 @@ export class GoogleVertexAI extends BaseGoogleVertexAI {
21
21
  }
22
22
  constructor(fields) {
23
23
  super(fields);
24
- const client = new GoogleAuth({
24
+ const client = new GAuthClient({
25
25
  scopes: "https://www.googleapis.com/auth/cloud-platform",
26
26
  ...fields?.authOptions,
27
27
  });
28
- this.connection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client);
28
+ this.connection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client, false);
29
+ this.streamedConnection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client, true);
29
30
  }
30
31
  }
@@ -25,7 +25,8 @@ class GoogleVertexAI extends common_js_1.BaseGoogleVertexAI {
25
25
  constructor(fields) {
26
26
  super(fields);
27
27
  const client = new googlevertexai_webauth_js_1.WebGoogleAuth(fields?.authOptions);
28
- this.connection = new googlevertexai_connection_js_1.GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client);
28
+ this.connection = new googlevertexai_connection_js_1.GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client, false);
29
+ this.streamedConnection = new googlevertexai_connection_js_1.GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client, true);
29
30
  }
30
31
  }
31
32
  exports.GoogleVertexAI = GoogleVertexAI;
@@ -22,6 +22,7 @@ export class GoogleVertexAI extends BaseGoogleVertexAI {
22
22
  constructor(fields) {
23
23
  super(fields);
24
24
  const client = new WebGoogleAuth(fields?.authOptions);
25
- this.connection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client);
25
+ this.connection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client, false);
26
+ this.streamedConnection = new GoogleVertexAILLMConnection({ ...fields, ...this }, this.caller, client, true);
26
27
  }
27
28
  }
@@ -45,19 +45,21 @@ export interface GoogleVertexAIBaseLLMInput<AuthOptions> extends BaseLLMParams,
45
45
  export interface GoogleResponse {
46
46
  data: any;
47
47
  }
48
- export interface GoogleVertexAIBasePrediction extends GoogleResponse {
48
+ export interface GoogleVertexAIBasePrediction {
49
49
  safetyAttributes?: any;
50
50
  }
51
- export interface GoogleVertexAILLMResponse<PredictionType extends GoogleVertexAIBasePrediction> {
52
- data: {
53
- predictions: PredictionType[];
54
- };
51
+ export interface GoogleVertexAILLMPredictions<PredictionType extends GoogleVertexAIBasePrediction> {
52
+ predictions: PredictionType[];
55
53
  }
54
+ export type GoogleAbstractedClientOpsMethod = "GET" | "POST";
55
+ export type GoogleAbstractedClientOpsResponseType = "json" | "stream";
56
+ export type GoogleAbstractedClientOps = {
57
+ url?: string;
58
+ method?: GoogleAbstractedClientOpsMethod;
59
+ data?: unknown;
60
+ responseType?: GoogleAbstractedClientOpsResponseType;
61
+ };
56
62
  export interface GoogleAbstractedClient {
57
- request: (opts: {
58
- url?: string;
59
- method?: "GET" | "POST";
60
- data?: unknown;
61
- }) => unknown;
63
+ request: (opts: GoogleAbstractedClientOps) => unknown;
62
64
  getProjectId: () => Promise<string>;
63
65
  }