@langchain/google-common 0.0.0 → 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -27,14 +27,14 @@ file storage.
27
27
  ## Google services supported
28
28
 
29
29
  * Gemini model through LLM and Chat classes (both through Google AI Studio and
30
- Google Cloud Vertex AI)
30
+ Google Cloud Vertex AI). Including:
31
+ * Function/Tool support
31
32
 
32
33
 
33
34
  ## TODO
34
35
 
35
36
  Tasks and services still to be implemented:
36
37
 
37
- * Functions for Gemini
38
38
  * PaLM Vertex AI support and backwards compatibility
39
39
  * PaLM MakerSuite support and backwards compatibility
40
40
  * Semantic Retrieval / AQA model
@@ -43,5 +43,10 @@ Tasks and services still to be implemented:
43
43
  * Multimodal embeddings
44
44
  * Vertex AI Search
45
45
  * Vertex AI Model Garden
46
+ * Online prediction endpoints
47
+ * Gemma
48
+ * Google managed models
49
+ * Claude
50
+ * AI Studio Tuned Models
46
51
  * MakerSuite / Google Drive Hub
47
52
  * Google Cloud Vector Store
@@ -5,10 +5,14 @@ const env_1 = require("@langchain/core/utils/env");
5
5
  const chat_models_1 = require("@langchain/core/language_models/chat_models");
6
6
  const outputs_1 = require("@langchain/core/outputs");
7
7
  const messages_1 = require("@langchain/core/messages");
8
+ const runnables_1 = require("@langchain/core/runnables");
9
+ const openai_tools_1 = require("@langchain/core/output_parsers/openai_tools");
8
10
  const common_js_1 = require("./utils/common.cjs");
9
11
  const connection_js_1 = require("./connection.cjs");
10
12
  const gemini_js_1 = require("./utils/gemini.cjs");
11
13
  const auth_js_1 = require("./auth.cjs");
14
+ const failed_handler_js_1 = require("./utils/failed_handler.cjs");
15
+ const zod_to_gemini_parameters_js_1 = require("./utils/zod_to_gemini_parameters.cjs");
12
16
  class ChatConnection extends connection_js_1.AbstractGoogleLLMConnection {
13
17
  formatContents(input, _parameters) {
14
18
  return input
@@ -25,19 +29,26 @@ class ChatGoogleBase extends chat_models_1.BaseChatModel {
25
29
  return "ChatGoogle";
26
30
  }
27
31
  constructor(fields) {
28
- super(fields ?? {});
32
+ super((0, failed_handler_js_1.ensureParams)(fields));
29
33
  Object.defineProperty(this, "lc_serializable", {
30
34
  enumerable: true,
31
35
  configurable: true,
32
36
  writable: true,
33
37
  value: true
34
38
  });
39
+ /** @deprecated Prefer `modelName` */
35
40
  Object.defineProperty(this, "model", {
36
41
  enumerable: true,
37
42
  configurable: true,
38
43
  writable: true,
39
44
  value: "gemini-pro"
40
45
  });
46
+ Object.defineProperty(this, "modelName", {
47
+ enumerable: true,
48
+ configurable: true,
49
+ writable: true,
50
+ value: "gemini-pro"
51
+ });
41
52
  Object.defineProperty(this, "temperature", {
42
53
  enumerable: true,
43
54
  configurable: true,
@@ -74,6 +85,12 @@ class ChatGoogleBase extends chat_models_1.BaseChatModel {
74
85
  writable: true,
75
86
  value: []
76
87
  });
88
+ Object.defineProperty(this, "safetyHandler", {
89
+ enumerable: true,
90
+ configurable: true,
91
+ writable: true,
92
+ value: void 0
93
+ });
77
94
  Object.defineProperty(this, "connection", {
78
95
  enumerable: true,
79
96
  configurable: true,
@@ -87,6 +104,8 @@ class ChatGoogleBase extends chat_models_1.BaseChatModel {
87
104
  value: void 0
88
105
  });
89
106
  (0, common_js_1.copyAndValidateModelParamsInto)(fields, this);
107
+ this.safetyHandler =
108
+ fields?.safetyHandler ?? new gemini_js_1.DefaultGeminiSafetyHandler();
90
109
  const client = this.buildClient(fields);
91
110
  this.buildConnection(fields ?? {}, client);
92
111
  }
@@ -117,15 +136,15 @@ class ChatGoogleBase extends chat_models_1.BaseChatModel {
117
136
  return "chat_integration";
118
137
  }
119
138
  async _generate(messages, options, _runManager) {
120
- const parameters = (0, common_js_1.copyAIModelParams)(this);
139
+ const parameters = (0, common_js_1.copyAIModelParams)(this, options);
121
140
  const response = await this.connection.request(messages, parameters, options);
122
- const ret = (0, gemini_js_1.responseToChatResult)(response);
141
+ const ret = (0, gemini_js_1.safeResponseToChatResult)(response, this.safetyHandler);
123
142
  return ret;
124
143
  }
125
- async *_streamResponseChunks(_messages, _options, _runManager) {
144
+ async *_streamResponseChunks(_messages, options, _runManager) {
126
145
  // Make the call as a streaming request
127
- const parameters = (0, common_js_1.copyAIModelParams)(this);
128
- const response = await this.streamedConnection.request(_messages, parameters, _options);
146
+ const parameters = (0, common_js_1.copyAIModelParams)(this, options);
147
+ const response = await this.streamedConnection.request(_messages, parameters, options);
129
148
  // Get the streaming parser of the response
130
149
  const stream = response.data;
131
150
  // Loop until the end of the stream
@@ -134,7 +153,7 @@ class ChatGoogleBase extends chat_models_1.BaseChatModel {
134
153
  while (!stream.streamDone) {
135
154
  const output = await stream.nextChunk();
136
155
  const chunk = output !== null
137
- ? (0, gemini_js_1.responseToChatGeneration)({ data: output })
156
+ ? (0, gemini_js_1.safeResponseToChatGeneration)({ data: output }, this.safetyHandler)
138
157
  : new outputs_1.ChatGenerationChunk({
139
158
  text: "",
140
159
  generationInfo: { finishReason: "stop" },
@@ -149,5 +168,94 @@ class ChatGoogleBase extends chat_models_1.BaseChatModel {
149
168
  _combineLLMOutput() {
150
169
  return [];
151
170
  }
171
+ withStructuredOutput(outputSchema, config) {
172
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
173
+ const schema = outputSchema;
174
+ const name = config?.name;
175
+ const method = config?.method;
176
+ const includeRaw = config?.includeRaw;
177
+ if (method === "jsonMode") {
178
+ throw new Error(`Google only supports "functionCalling" as a method.`);
179
+ }
180
+ let functionName = name ?? "extract";
181
+ let outputParser;
182
+ let tools;
183
+ if (isZodSchema(schema)) {
184
+ const jsonSchema = (0, zod_to_gemini_parameters_js_1.zodToGeminiParameters)(schema);
185
+ tools = [
186
+ {
187
+ functionDeclarations: [
188
+ {
189
+ name: functionName,
190
+ description: jsonSchema.description ?? "A function available to call.",
191
+ parameters: jsonSchema,
192
+ },
193
+ ],
194
+ },
195
+ ];
196
+ outputParser = new openai_tools_1.JsonOutputKeyToolsParser({
197
+ returnSingle: true,
198
+ keyName: functionName,
199
+ zodSchema: schema,
200
+ });
201
+ }
202
+ else {
203
+ let geminiFunctionDefinition;
204
+ if (typeof schema.name === "string" &&
205
+ typeof schema.parameters === "object" &&
206
+ schema.parameters != null) {
207
+ geminiFunctionDefinition = schema;
208
+ functionName = schema.name;
209
+ }
210
+ else {
211
+ geminiFunctionDefinition = {
212
+ name: functionName,
213
+ description: schema.description ?? "",
214
+ parameters: schema,
215
+ };
216
+ }
217
+ tools = [
218
+ {
219
+ functionDeclarations: [geminiFunctionDefinition],
220
+ },
221
+ ];
222
+ outputParser = new openai_tools_1.JsonOutputKeyToolsParser({
223
+ returnSingle: true,
224
+ keyName: functionName,
225
+ });
226
+ }
227
+ const llm = this.bind({
228
+ tools,
229
+ });
230
+ if (!includeRaw) {
231
+ return llm.pipe(outputParser).withConfig({
232
+ runName: "ChatGoogleStructuredOutput",
233
+ });
234
+ }
235
+ const parserAssign = runnables_1.RunnablePassthrough.assign({
236
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
237
+ parsed: (input, config) => outputParser.invoke(input.raw, config),
238
+ });
239
+ const parserNone = runnables_1.RunnablePassthrough.assign({
240
+ parsed: () => null,
241
+ });
242
+ const parsedWithFallback = parserAssign.withFallbacks({
243
+ fallbacks: [parserNone],
244
+ });
245
+ return runnables_1.RunnableSequence.from([
246
+ {
247
+ raw: llm,
248
+ },
249
+ parsedWithFallback,
250
+ ]).withConfig({
251
+ runName: "StructuredOutputRunnable",
252
+ });
253
+ }
152
254
  }
153
255
  exports.ChatGoogleBase = ChatGoogleBase;
256
+ function isZodSchema(
257
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
258
+ input) {
259
+ // Check for a characteristic method of Zod schemas
260
+ return typeof input?.parse === "function";
261
+ }
@@ -1,33 +1,38 @@
1
1
  import { type BaseMessage } from "@langchain/core/messages";
2
- import { type BaseLanguageModelCallOptions } from "@langchain/core/language_models/base";
3
2
  import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
4
3
  import { BaseChatModel, type BaseChatModelParams } from "@langchain/core/language_models/chat_models";
5
4
  import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
6
- import { GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAISafetySetting, GoogleConnectionParams, GooglePlatformType, GeminiContent } from "./types.js";
5
+ import { BaseLanguageModelInput, StructuredOutputMethodOptions } from "@langchain/core/language_models/base";
6
+ import type { z } from "zod";
7
+ import { Runnable } from "@langchain/core/runnables";
8
+ import { GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAISafetySetting, GoogleConnectionParams, GooglePlatformType, GeminiContent, GoogleAIBaseLanguageModelCallOptions } from "./types.js";
7
9
  import { AbstractGoogleLLMConnection } from "./connection.js";
8
10
  import { GoogleAbstractedClient } from "./auth.js";
9
- import { GoogleBaseLLMInput } from "./llms.js";
11
+ import type { GoogleBaseLLMInput, GoogleAISafetyHandler, GoogleAISafetyParams } from "./types.js";
10
12
  declare class ChatConnection<AuthOptions> extends AbstractGoogleLLMConnection<BaseMessage[], AuthOptions> {
11
13
  formatContents(input: BaseMessage[], _parameters: GoogleAIModelParams): GeminiContent[];
12
14
  }
13
15
  /**
14
16
  * Input to chat model class.
15
17
  */
16
- export interface ChatGoogleBaseInput<AuthOptions> extends BaseChatModelParams, GoogleConnectionParams<AuthOptions>, GoogleAIModelParams {
18
+ export interface ChatGoogleBaseInput<AuthOptions> extends BaseChatModelParams, GoogleConnectionParams<AuthOptions>, GoogleAIModelParams, GoogleAISafetyParams {
17
19
  }
18
20
  /**
19
21
  * Integration with a chat model.
20
22
  */
21
- export declare abstract class ChatGoogleBase<AuthOptions> extends BaseChatModel<BaseLanguageModelCallOptions> implements ChatGoogleBaseInput<AuthOptions> {
23
+ export declare abstract class ChatGoogleBase<AuthOptions> extends BaseChatModel<GoogleAIBaseLanguageModelCallOptions> implements ChatGoogleBaseInput<AuthOptions> {
22
24
  static lc_name(): string;
23
25
  lc_serializable: boolean;
26
+ /** @deprecated Prefer `modelName` */
24
27
  model: string;
28
+ modelName: string;
25
29
  temperature: number;
26
30
  maxOutputTokens: number;
27
31
  topP: number;
28
32
  topK: number;
29
33
  stopSequences: string[];
30
34
  safetySettings: GoogleAISafetySetting[];
35
+ safetyHandler: GoogleAISafetyHandler;
31
36
  protected connection: ChatConnection<AuthOptions>;
32
37
  protected streamedConnection: ChatConnection<AuthOptions>;
33
38
  constructor(fields?: ChatGoogleBaseInput<AuthOptions>);
@@ -39,8 +44,13 @@ export declare abstract class ChatGoogleBase<AuthOptions> extends BaseChatModel<
39
44
  get platform(): GooglePlatformType;
40
45
  _llmType(): string;
41
46
  _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], _runManager: CallbackManagerForLLMRun | undefined): Promise<ChatResult>;
42
- _streamResponseChunks(_messages: BaseMessage[], _options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
47
+ _streamResponseChunks(_messages: BaseMessage[], options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
43
48
  /** @ignore */
44
49
  _combineLLMOutput(): never[];
50
+ withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: z.ZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;
51
+ withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: z.ZodType<RunOutput> | Record<string, any>, config?: StructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {
52
+ raw: BaseMessage;
53
+ parsed: RunOutput;
54
+ }>;
45
55
  }
46
56
  export {};
@@ -2,10 +2,14 @@ import { getEnvironmentVariable } from "@langchain/core/utils/env";
2
2
  import { BaseChatModel, } from "@langchain/core/language_models/chat_models";
3
3
  import { ChatGenerationChunk } from "@langchain/core/outputs";
4
4
  import { AIMessageChunk } from "@langchain/core/messages";
5
+ import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables";
6
+ import { JsonOutputKeyToolsParser } from "@langchain/core/output_parsers/openai_tools";
5
7
  import { copyAIModelParams, copyAndValidateModelParamsInto, } from "./utils/common.js";
6
8
  import { AbstractGoogleLLMConnection } from "./connection.js";
7
- import { baseMessageToContent, responseToChatGeneration, responseToChatResult, } from "./utils/gemini.js";
9
+ import { baseMessageToContent, safeResponseToChatGeneration, safeResponseToChatResult, DefaultGeminiSafetyHandler, } from "./utils/gemini.js";
8
10
  import { ApiKeyGoogleAuth } from "./auth.js";
11
+ import { ensureParams } from "./utils/failed_handler.js";
12
+ import { zodToGeminiParameters } from "./utils/zod_to_gemini_parameters.js";
9
13
  class ChatConnection extends AbstractGoogleLLMConnection {
10
14
  formatContents(input, _parameters) {
11
15
  return input
@@ -22,19 +26,26 @@ export class ChatGoogleBase extends BaseChatModel {
22
26
  return "ChatGoogle";
23
27
  }
24
28
  constructor(fields) {
25
- super(fields ?? {});
29
+ super(ensureParams(fields));
26
30
  Object.defineProperty(this, "lc_serializable", {
27
31
  enumerable: true,
28
32
  configurable: true,
29
33
  writable: true,
30
34
  value: true
31
35
  });
36
+ /** @deprecated Prefer `modelName` */
32
37
  Object.defineProperty(this, "model", {
33
38
  enumerable: true,
34
39
  configurable: true,
35
40
  writable: true,
36
41
  value: "gemini-pro"
37
42
  });
43
+ Object.defineProperty(this, "modelName", {
44
+ enumerable: true,
45
+ configurable: true,
46
+ writable: true,
47
+ value: "gemini-pro"
48
+ });
38
49
  Object.defineProperty(this, "temperature", {
39
50
  enumerable: true,
40
51
  configurable: true,
@@ -71,6 +82,12 @@ export class ChatGoogleBase extends BaseChatModel {
71
82
  writable: true,
72
83
  value: []
73
84
  });
85
+ Object.defineProperty(this, "safetyHandler", {
86
+ enumerable: true,
87
+ configurable: true,
88
+ writable: true,
89
+ value: void 0
90
+ });
74
91
  Object.defineProperty(this, "connection", {
75
92
  enumerable: true,
76
93
  configurable: true,
@@ -84,6 +101,8 @@ export class ChatGoogleBase extends BaseChatModel {
84
101
  value: void 0
85
102
  });
86
103
  copyAndValidateModelParamsInto(fields, this);
104
+ this.safetyHandler =
105
+ fields?.safetyHandler ?? new DefaultGeminiSafetyHandler();
87
106
  const client = this.buildClient(fields);
88
107
  this.buildConnection(fields ?? {}, client);
89
108
  }
@@ -114,15 +133,15 @@ export class ChatGoogleBase extends BaseChatModel {
114
133
  return "chat_integration";
115
134
  }
116
135
  async _generate(messages, options, _runManager) {
117
- const parameters = copyAIModelParams(this);
136
+ const parameters = copyAIModelParams(this, options);
118
137
  const response = await this.connection.request(messages, parameters, options);
119
- const ret = responseToChatResult(response);
138
+ const ret = safeResponseToChatResult(response, this.safetyHandler);
120
139
  return ret;
121
140
  }
122
- async *_streamResponseChunks(_messages, _options, _runManager) {
141
+ async *_streamResponseChunks(_messages, options, _runManager) {
123
142
  // Make the call as a streaming request
124
- const parameters = copyAIModelParams(this);
125
- const response = await this.streamedConnection.request(_messages, parameters, _options);
143
+ const parameters = copyAIModelParams(this, options);
144
+ const response = await this.streamedConnection.request(_messages, parameters, options);
126
145
  // Get the streaming parser of the response
127
146
  const stream = response.data;
128
147
  // Loop until the end of the stream
@@ -131,7 +150,7 @@ export class ChatGoogleBase extends BaseChatModel {
131
150
  while (!stream.streamDone) {
132
151
  const output = await stream.nextChunk();
133
152
  const chunk = output !== null
134
- ? responseToChatGeneration({ data: output })
153
+ ? safeResponseToChatGeneration({ data: output }, this.safetyHandler)
135
154
  : new ChatGenerationChunk({
136
155
  text: "",
137
156
  generationInfo: { finishReason: "stop" },
@@ -146,4 +165,93 @@ export class ChatGoogleBase extends BaseChatModel {
146
165
  _combineLLMOutput() {
147
166
  return [];
148
167
  }
168
+ withStructuredOutput(outputSchema, config) {
169
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
170
+ const schema = outputSchema;
171
+ const name = config?.name;
172
+ const method = config?.method;
173
+ const includeRaw = config?.includeRaw;
174
+ if (method === "jsonMode") {
175
+ throw new Error(`Google only supports "functionCalling" as a method.`);
176
+ }
177
+ let functionName = name ?? "extract";
178
+ let outputParser;
179
+ let tools;
180
+ if (isZodSchema(schema)) {
181
+ const jsonSchema = zodToGeminiParameters(schema);
182
+ tools = [
183
+ {
184
+ functionDeclarations: [
185
+ {
186
+ name: functionName,
187
+ description: jsonSchema.description ?? "A function available to call.",
188
+ parameters: jsonSchema,
189
+ },
190
+ ],
191
+ },
192
+ ];
193
+ outputParser = new JsonOutputKeyToolsParser({
194
+ returnSingle: true,
195
+ keyName: functionName,
196
+ zodSchema: schema,
197
+ });
198
+ }
199
+ else {
200
+ let geminiFunctionDefinition;
201
+ if (typeof schema.name === "string" &&
202
+ typeof schema.parameters === "object" &&
203
+ schema.parameters != null) {
204
+ geminiFunctionDefinition = schema;
205
+ functionName = schema.name;
206
+ }
207
+ else {
208
+ geminiFunctionDefinition = {
209
+ name: functionName,
210
+ description: schema.description ?? "",
211
+ parameters: schema,
212
+ };
213
+ }
214
+ tools = [
215
+ {
216
+ functionDeclarations: [geminiFunctionDefinition],
217
+ },
218
+ ];
219
+ outputParser = new JsonOutputKeyToolsParser({
220
+ returnSingle: true,
221
+ keyName: functionName,
222
+ });
223
+ }
224
+ const llm = this.bind({
225
+ tools,
226
+ });
227
+ if (!includeRaw) {
228
+ return llm.pipe(outputParser).withConfig({
229
+ runName: "ChatGoogleStructuredOutput",
230
+ });
231
+ }
232
+ const parserAssign = RunnablePassthrough.assign({
233
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
234
+ parsed: (input, config) => outputParser.invoke(input.raw, config),
235
+ });
236
+ const parserNone = RunnablePassthrough.assign({
237
+ parsed: () => null,
238
+ });
239
+ const parsedWithFallback = parserAssign.withFallbacks({
240
+ fallbacks: [parserNone],
241
+ });
242
+ return RunnableSequence.from([
243
+ {
244
+ raw: llm,
245
+ },
246
+ parsedWithFallback,
247
+ ]).withConfig({
248
+ runName: "StructuredOutputRunnable",
249
+ });
250
+ }
251
+ }
252
+ function isZodSchema(
253
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
254
+ input) {
255
+ // Check for a characteristic method of Zod schemas
256
+ return typeof input?.parse === "function";
149
257
  }
@@ -2,6 +2,7 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.AbstractGoogleLLMConnection = exports.GoogleAIConnection = exports.GoogleHostConnection = exports.GoogleConnection = void 0;
4
4
  const env_1 = require("@langchain/core/utils/env");
5
+ const zod_to_gemini_parameters_js_1 = require("./utils/zod_to_gemini_parameters.cjs");
5
6
  class GoogleConnection {
6
7
  constructor(caller, client, streaming) {
7
8
  Object.defineProperty(this, "caller", {
@@ -123,12 +124,19 @@ exports.GoogleHostConnection = GoogleHostConnection;
123
124
  class GoogleAIConnection extends GoogleHostConnection {
124
125
  constructor(fields, caller, client, streaming) {
125
126
  super(fields, caller, client, streaming);
127
+ /** @deprecated Prefer `modelName` */
126
128
  Object.defineProperty(this, "model", {
127
129
  enumerable: true,
128
130
  configurable: true,
129
131
  writable: true,
130
132
  value: void 0
131
133
  });
134
+ Object.defineProperty(this, "modelName", {
135
+ enumerable: true,
136
+ configurable: true,
137
+ writable: true,
138
+ value: void 0
139
+ });
132
140
  Object.defineProperty(this, "client", {
133
141
  enumerable: true,
134
142
  configurable: true,
@@ -136,10 +144,10 @@ class GoogleAIConnection extends GoogleHostConnection {
136
144
  value: void 0
137
145
  });
138
146
  this.client = client;
139
- this.model = fields?.model ?? this.model;
147
+ this.modelName = fields?.modelName ?? fields?.model ?? this.modelName;
140
148
  }
141
149
  get modelFamily() {
142
- if (this.model.startsWith("gemini")) {
150
+ if (this.modelName.startsWith("gemini")) {
143
151
  return "gemini";
144
152
  }
145
153
  else {
@@ -156,13 +164,13 @@ class GoogleAIConnection extends GoogleHostConnection {
156
164
  }
157
165
  async buildUrlGenerativeLanguage() {
158
166
  const method = await this.buildUrlMethod();
159
- const url = `https://generativelanguage.googleapis.com/${this.apiVersion}/models/${this.model}:${method}`;
167
+ const url = `https://generativelanguage.googleapis.com/${this.apiVersion}/models/${this.modelName}:${method}`;
160
168
  return url;
161
169
  }
162
170
  async buildUrlVertex() {
163
171
  const projectId = await this.client.getProjectId();
164
172
  const method = await this.buildUrlMethod();
165
- const url = `https://${this.endpoint}/${this.apiVersion}/projects/${projectId}/locations/${this.location}/publishers/google/models/${this.model}:${method}`;
173
+ const url = `https://${this.endpoint}/${this.apiVersion}/projects/${projectId}/locations/${this.location}/publishers/google/models/${this.modelName}:${method}`;
166
174
  return url;
167
175
  }
168
176
  async buildUrl() {
@@ -205,6 +213,38 @@ class AbstractGoogleLLMConnection extends GoogleAIConnection {
205
213
  formatSafetySettings(_input, parameters) {
206
214
  return parameters.safetySettings ?? [];
207
215
  }
216
+ // Borrowed from the OpenAI invocation params test
217
+ isStructuredToolArray(tools) {
218
+ return (tools !== undefined &&
219
+ tools.every((tool) => Array.isArray(tool.lc_namespace)));
220
+ }
221
+ structuredToolToFunctionDeclaration(tool) {
222
+ const jsonSchema = (0, zod_to_gemini_parameters_js_1.zodToGeminiParameters)(tool.schema);
223
+ return {
224
+ name: tool.name,
225
+ description: tool.description,
226
+ parameters: jsonSchema,
227
+ };
228
+ }
229
+ structuredToolsToGeminiTools(tools) {
230
+ return [
231
+ {
232
+ functionDeclarations: tools.map(this.structuredToolToFunctionDeclaration),
233
+ },
234
+ ];
235
+ }
236
+ formatTools(_input, parameters) {
237
+ const tools = parameters?.tools;
238
+ if (!tools || tools.length === 0) {
239
+ return [];
240
+ }
241
+ if (this.isStructuredToolArray(tools)) {
242
+ return this.structuredToolsToGeminiTools(tools);
243
+ }
244
+ else {
245
+ return tools;
246
+ }
247
+ }
208
248
  formatData(input, parameters) {
209
249
  /*
210
250
  const parts = messageContentToParts(input);
@@ -217,11 +257,15 @@ class AbstractGoogleLLMConnection extends GoogleAIConnection {
217
257
  */
218
258
  const contents = this.formatContents(input, parameters);
219
259
  const generationConfig = this.formatGenerationConfig(input, parameters);
260
+ const tools = this.formatTools(input, parameters);
220
261
  const safetySettings = this.formatSafetySettings(input, parameters);
221
262
  const ret = {
222
263
  contents,
223
264
  generationConfig,
224
265
  };
266
+ if (tools && tools.length) {
267
+ ret.tools = tools;
268
+ }
225
269
  if (safetySettings && safetySettings.length) {
226
270
  ret.safetySettings = safetySettings;
227
271
  }
@@ -1,6 +1,7 @@
1
1
  import { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base";
2
2
  import { AsyncCaller, AsyncCallerCallOptions } from "@langchain/core/utils/async_caller";
3
- import type { GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleConnectionParams, GoogleLLMModelFamily, GooglePlatformType, GoogleResponse, GoogleLLMResponse, GeminiContent, GeminiGenerationConfig, GeminiRequest, GeminiSafetySetting } from "./types.js";
3
+ import { StructuredToolInterface } from "@langchain/core/tools";
4
+ import type { GoogleAIBaseLLMInput, GoogleConnectionParams, GoogleLLMModelFamily, GooglePlatformType, GoogleResponse, GoogleLLMResponse, GeminiContent, GeminiGenerationConfig, GeminiRequest, GeminiSafetySetting, GeminiTool, GeminiFunctionDeclaration, GoogleAIModelRequestParams } from "./types.js";
4
5
  import { GoogleAbstractedClient, GoogleAbstractedClientOpsMethod } from "./auth.js";
5
6
  export declare abstract class GoogleConnection<CallOptions extends AsyncCallerCallOptions, ResponseType extends GoogleResponse> {
6
7
  caller: AsyncCaller;
@@ -25,7 +26,9 @@ export declare abstract class GoogleHostConnection<CallOptions extends AsyncCall
25
26
  buildMethod(): GoogleAbstractedClientOpsMethod;
26
27
  }
27
28
  export declare abstract class GoogleAIConnection<CallOptions extends BaseLanguageModelCallOptions, MessageType, AuthOptions> extends GoogleHostConnection<CallOptions, GoogleLLMResponse, AuthOptions> implements GoogleAIBaseLLMInput<AuthOptions> {
29
+ /** @deprecated Prefer `modelName` */
28
30
  model: string;
31
+ modelName: string;
29
32
  client: GoogleAbstractedClient;
30
33
  constructor(fields: GoogleAIBaseLLMInput<AuthOptions> | undefined, caller: AsyncCaller, client: GoogleAbstractedClient, streaming?: boolean);
31
34
  get modelFamily(): GoogleLLMModelFamily;
@@ -34,14 +37,18 @@ export declare abstract class GoogleAIConnection<CallOptions extends BaseLanguag
34
37
  buildUrlGenerativeLanguage(): Promise<string>;
35
38
  buildUrlVertex(): Promise<string>;
36
39
  buildUrl(): Promise<string>;
37
- abstract formatData(input: MessageType, parameters: GoogleAIModelParams): unknown;
38
- request(input: MessageType, parameters: GoogleAIModelParams, options: CallOptions): Promise<GoogleLLMResponse>;
40
+ abstract formatData(input: MessageType, parameters: GoogleAIModelRequestParams): unknown;
41
+ request(input: MessageType, parameters: GoogleAIModelRequestParams, options: CallOptions): Promise<GoogleLLMResponse>;
39
42
  }
40
43
  export declare abstract class AbstractGoogleLLMConnection<MessageType, AuthOptions> extends GoogleAIConnection<BaseLanguageModelCallOptions, MessageType, AuthOptions> {
41
44
  buildUrlMethodGemini(): Promise<string>;
42
45
  buildUrlMethod(): Promise<string>;
43
- abstract formatContents(input: MessageType, parameters: GoogleAIModelParams): GeminiContent[];
44
- formatGenerationConfig(_input: MessageType, parameters: GoogleAIModelParams): GeminiGenerationConfig;
45
- formatSafetySettings(_input: MessageType, parameters: GoogleAIModelParams): GeminiSafetySetting[];
46
- formatData(input: MessageType, parameters: GoogleAIModelParams): GeminiRequest;
46
+ abstract formatContents(input: MessageType, parameters: GoogleAIModelRequestParams): GeminiContent[];
47
+ formatGenerationConfig(_input: MessageType, parameters: GoogleAIModelRequestParams): GeminiGenerationConfig;
48
+ formatSafetySettings(_input: MessageType, parameters: GoogleAIModelRequestParams): GeminiSafetySetting[];
49
+ isStructuredToolArray(tools?: unknown[]): tools is StructuredToolInterface[];
50
+ structuredToolToFunctionDeclaration(tool: StructuredToolInterface): GeminiFunctionDeclaration;
51
+ structuredToolsToGeminiTools(tools: StructuredToolInterface[]): GeminiTool[];
52
+ formatTools(_input: MessageType, parameters: GoogleAIModelRequestParams): GeminiTool[];
53
+ formatData(input: MessageType, parameters: GoogleAIModelRequestParams): GeminiRequest;
47
54
  }