@langchain/google-common 0.0.3 → 0.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/auth.cjs CHANGED
@@ -19,7 +19,8 @@ class GoogleAbstractedFetchClient {
19
19
  }
20
20
  const res = await fetch(url, fetchOptions);
21
21
  if (!res.ok) {
22
- const error = new Error(`Could not get access token for Google with status code: ${res.status}`);
22
+ const resText = await res.text();
23
+ const error = new Error(`Google request failed with status code ${res.status}: ${resText}`);
23
24
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
24
25
  error.response = res;
25
26
  throw error;
package/dist/auth.js CHANGED
@@ -16,7 +16,8 @@ export class GoogleAbstractedFetchClient {
16
16
  }
17
17
  const res = await fetch(url, fetchOptions);
18
18
  if (!res.ok) {
19
- const error = new Error(`Could not get access token for Google with status code: ${res.status}`);
19
+ const resText = await res.text();
20
+ const error = new Error(`Google request failed with status code ${res.status}: ${resText}`);
20
21
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
21
22
  error.response = res;
22
23
  throw error;
@@ -7,6 +7,7 @@ const outputs_1 = require("@langchain/core/outputs");
7
7
  const messages_1 = require("@langchain/core/messages");
8
8
  const runnables_1 = require("@langchain/core/runnables");
9
9
  const openai_tools_1 = require("@langchain/core/output_parsers/openai_tools");
10
+ const function_calling_1 = require("@langchain/core/utils/function_calling");
10
11
  const common_js_1 = require("./utils/common.cjs");
11
12
  const connection_js_1 = require("./connection.cjs");
12
13
  const gemini_js_1 = require("./utils/gemini.cjs");
@@ -16,10 +17,27 @@ const zod_to_gemini_parameters_js_1 = require("./utils/zod_to_gemini_parameters.
16
17
  class ChatConnection extends connection_js_1.AbstractGoogleLLMConnection {
17
18
  formatContents(input, _parameters) {
18
19
  return input
19
- .map((msg) => (0, gemini_js_1.baseMessageToContent)(msg))
20
+ .map((msg, i) => (0, gemini_js_1.baseMessageToContent)(msg, input[i - 1]))
20
21
  .reduce((acc, cur) => [...acc, ...cur]);
21
22
  }
22
23
  }
24
+ function convertToGeminiTools(structuredTools) {
25
+ return [
26
+ {
27
+ functionDeclarations: structuredTools.map((structuredTool) => {
28
+ if ((0, function_calling_1.isStructuredTool)(structuredTool)) {
29
+ const jsonSchema = (0, zod_to_gemini_parameters_js_1.zodToGeminiParameters)(structuredTool.schema);
30
+ return {
31
+ name: structuredTool.name,
32
+ description: structuredTool.description,
33
+ parameters: jsonSchema,
34
+ };
35
+ }
36
+ return structuredTool;
37
+ }),
38
+ },
39
+ ];
40
+ }
23
41
  /**
24
42
  * Integration with a chat model.
25
43
  */
@@ -36,7 +54,6 @@ class ChatGoogleBase extends chat_models_1.BaseChatModel {
36
54
  writable: true,
37
55
  value: true
38
56
  });
39
- /** @deprecated Prefer `modelName` */
40
57
  Object.defineProperty(this, "model", {
41
58
  enumerable: true,
42
59
  configurable: true,
@@ -131,19 +148,28 @@ class ChatGoogleBase extends chat_models_1.BaseChatModel {
131
148
  get platform() {
132
149
  return this.connection.platform;
133
150
  }
151
+ bindTools(tools, kwargs) {
152
+ return this.bind({ tools: convertToGeminiTools(tools), ...kwargs });
153
+ }
134
154
  // Replace
135
155
  _llmType() {
136
156
  return "chat_integration";
137
157
  }
158
+ /**
159
+ * Get the parameters used to invoke the model
160
+ */
161
+ invocationParams(options) {
162
+ return (0, common_js_1.copyAIModelParams)(this, options);
163
+ }
138
164
  async _generate(messages, options, _runManager) {
139
- const parameters = (0, common_js_1.copyAIModelParams)(this, options);
165
+ const parameters = this.invocationParams(options);
140
166
  const response = await this.connection.request(messages, parameters, options);
141
167
  const ret = (0, gemini_js_1.safeResponseToChatResult)(response, this.safetyHandler);
142
168
  return ret;
143
169
  }
144
170
  async *_streamResponseChunks(_messages, options, _runManager) {
145
171
  // Make the call as a streaming request
146
- const parameters = (0, common_js_1.copyAIModelParams)(this, options);
172
+ const parameters = this.invocationParams(options);
147
173
  const response = await this.streamedConnection.request(_messages, parameters, options);
148
174
  // Get the streaming parser of the response
149
175
  const stream = response.data;
@@ -2,9 +2,11 @@ import { type BaseMessage } from "@langchain/core/messages";
2
2
  import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
3
3
  import { BaseChatModel, type BaseChatModelParams } from "@langchain/core/language_models/chat_models";
4
4
  import { ChatGenerationChunk, ChatResult } from "@langchain/core/outputs";
5
+ import { AIMessageChunk } from "@langchain/core/messages";
5
6
  import { BaseLanguageModelInput, StructuredOutputMethodOptions } from "@langchain/core/language_models/base";
6
7
  import type { z } from "zod";
7
- import { Runnable } from "@langchain/core/runnables";
8
+ import { Runnable, RunnableInterface } from "@langchain/core/runnables";
9
+ import { StructuredToolInterface } from "@langchain/core/tools";
8
10
  import { GoogleAIBaseLLMInput, GoogleAIModelParams, GoogleAISafetySetting, GoogleConnectionParams, GooglePlatformType, GeminiContent, GoogleAIBaseLanguageModelCallOptions } from "./types.js";
9
11
  import { AbstractGoogleLLMConnection } from "./connection.js";
10
12
  import { GoogleAbstractedClient } from "./auth.js";
@@ -20,10 +22,9 @@ export interface ChatGoogleBaseInput<AuthOptions> extends BaseChatModelParams, G
20
22
  /**
21
23
  * Integration with a chat model.
22
24
  */
23
- export declare abstract class ChatGoogleBase<AuthOptions> extends BaseChatModel<GoogleAIBaseLanguageModelCallOptions> implements ChatGoogleBaseInput<AuthOptions> {
25
+ export declare abstract class ChatGoogleBase<AuthOptions> extends BaseChatModel<GoogleAIBaseLanguageModelCallOptions, AIMessageChunk> implements ChatGoogleBaseInput<AuthOptions> {
24
26
  static lc_name(): string;
25
27
  lc_serializable: boolean;
26
- /** @deprecated Prefer `modelName` */
27
28
  model: string;
28
29
  modelName: string;
29
30
  temperature: number;
@@ -42,7 +43,12 @@ export declare abstract class ChatGoogleBase<AuthOptions> extends BaseChatModel<
42
43
  buildClient(fields?: GoogleAIBaseLLMInput<AuthOptions>): GoogleAbstractedClient;
43
44
  buildConnection(fields: GoogleBaseLLMInput<AuthOptions>, client: GoogleAbstractedClient): void;
44
45
  get platform(): GooglePlatformType;
46
+ bindTools(tools: (StructuredToolInterface | Record<string, unknown>)[], kwargs?: Partial<GoogleAIBaseLanguageModelCallOptions>): RunnableInterface<BaseLanguageModelInput, AIMessageChunk, GoogleAIBaseLanguageModelCallOptions>;
45
47
  _llmType(): string;
48
+ /**
49
+ * Get the parameters used to invoke the model
50
+ */
51
+ invocationParams(options?: this["ParsedCallOptions"]): import("./types.js").GoogleAIModelRequestParams;
46
52
  _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], _runManager: CallbackManagerForLLMRun | undefined): Promise<ChatResult>;
47
53
  _streamResponseChunks(_messages: BaseMessage[], options: this["ParsedCallOptions"], _runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
48
54
  /** @ignore */
@@ -4,6 +4,7 @@ import { ChatGenerationChunk } from "@langchain/core/outputs";
4
4
  import { AIMessageChunk } from "@langchain/core/messages";
5
5
  import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables";
6
6
  import { JsonOutputKeyToolsParser } from "@langchain/core/output_parsers/openai_tools";
7
+ import { isStructuredTool } from "@langchain/core/utils/function_calling";
7
8
  import { copyAIModelParams, copyAndValidateModelParamsInto, } from "./utils/common.js";
8
9
  import { AbstractGoogleLLMConnection } from "./connection.js";
9
10
  import { baseMessageToContent, safeResponseToChatGeneration, safeResponseToChatResult, DefaultGeminiSafetyHandler, } from "./utils/gemini.js";
@@ -13,10 +14,27 @@ import { zodToGeminiParameters } from "./utils/zod_to_gemini_parameters.js";
13
14
  class ChatConnection extends AbstractGoogleLLMConnection {
14
15
  formatContents(input, _parameters) {
15
16
  return input
16
- .map((msg) => baseMessageToContent(msg))
17
+ .map((msg, i) => baseMessageToContent(msg, input[i - 1]))
17
18
  .reduce((acc, cur) => [...acc, ...cur]);
18
19
  }
19
20
  }
21
+ function convertToGeminiTools(structuredTools) {
22
+ return [
23
+ {
24
+ functionDeclarations: structuredTools.map((structuredTool) => {
25
+ if (isStructuredTool(structuredTool)) {
26
+ const jsonSchema = zodToGeminiParameters(structuredTool.schema);
27
+ return {
28
+ name: structuredTool.name,
29
+ description: structuredTool.description,
30
+ parameters: jsonSchema,
31
+ };
32
+ }
33
+ return structuredTool;
34
+ }),
35
+ },
36
+ ];
37
+ }
20
38
  /**
21
39
  * Integration with a chat model.
22
40
  */
@@ -33,7 +51,6 @@ export class ChatGoogleBase extends BaseChatModel {
33
51
  writable: true,
34
52
  value: true
35
53
  });
36
- /** @deprecated Prefer `modelName` */
37
54
  Object.defineProperty(this, "model", {
38
55
  enumerable: true,
39
56
  configurable: true,
@@ -128,19 +145,28 @@ export class ChatGoogleBase extends BaseChatModel {
128
145
  get platform() {
129
146
  return this.connection.platform;
130
147
  }
148
+ bindTools(tools, kwargs) {
149
+ return this.bind({ tools: convertToGeminiTools(tools), ...kwargs });
150
+ }
131
151
  // Replace
132
152
  _llmType() {
133
153
  return "chat_integration";
134
154
  }
155
+ /**
156
+ * Get the parameters used to invoke the model
157
+ */
158
+ invocationParams(options) {
159
+ return copyAIModelParams(this, options);
160
+ }
135
161
  async _generate(messages, options, _runManager) {
136
- const parameters = copyAIModelParams(this, options);
162
+ const parameters = this.invocationParams(options);
137
163
  const response = await this.connection.request(messages, parameters, options);
138
164
  const ret = safeResponseToChatResult(response, this.safetyHandler);
139
165
  return ret;
140
166
  }
141
167
  async *_streamResponseChunks(_messages, options, _runManager) {
142
168
  // Make the call as a streaming request
143
- const parameters = copyAIModelParams(this, options);
169
+ const parameters = this.invocationParams(options);
144
170
  const response = await this.streamedConnection.request(_messages, parameters, options);
145
171
  // Get the streaming parser of the response
146
172
  const stream = response.data;
@@ -124,7 +124,6 @@ exports.GoogleHostConnection = GoogleHostConnection;
124
124
  class GoogleAIConnection extends GoogleHostConnection {
125
125
  constructor(fields, caller, client, streaming) {
126
126
  super(fields, caller, client, streaming);
127
- /** @deprecated Prefer `modelName` */
128
127
  Object.defineProperty(this, "model", {
129
128
  enumerable: true,
130
129
  configurable: true,
@@ -144,10 +143,11 @@ class GoogleAIConnection extends GoogleHostConnection {
144
143
  value: void 0
145
144
  });
146
145
  this.client = client;
147
- this.modelName = fields?.modelName ?? fields?.model ?? this.modelName;
146
+ this.modelName = fields?.model ?? fields?.modelName ?? this.model;
147
+ this.model = this.modelName;
148
148
  }
149
149
  get modelFamily() {
150
- if (this.modelName.startsWith("gemini")) {
150
+ if (this.model.startsWith("gemini")) {
151
151
  return "gemini";
152
152
  }
153
153
  else {
@@ -164,13 +164,13 @@ class GoogleAIConnection extends GoogleHostConnection {
164
164
  }
165
165
  async buildUrlGenerativeLanguage() {
166
166
  const method = await this.buildUrlMethod();
167
- const url = `https://generativelanguage.googleapis.com/${this.apiVersion}/models/${this.modelName}:${method}`;
167
+ const url = `https://generativelanguage.googleapis.com/${this.apiVersion}/models/${this.model}:${method}`;
168
168
  return url;
169
169
  }
170
170
  async buildUrlVertex() {
171
171
  const projectId = await this.client.getProjectId();
172
172
  const method = await this.buildUrlMethod();
173
- const url = `https://${this.endpoint}/${this.apiVersion}/projects/${projectId}/locations/${this.location}/publishers/google/models/${this.modelName}:${method}`;
173
+ const url = `https://${this.endpoint}/${this.apiVersion}/projects/${projectId}/locations/${this.location}/publishers/google/models/${this.model}:${method}`;
174
174
  return url;
175
175
  }
176
176
  async buildUrl() {
@@ -26,7 +26,6 @@ export declare abstract class GoogleHostConnection<CallOptions extends AsyncCall
26
26
  buildMethod(): GoogleAbstractedClientOpsMethod;
27
27
  }
28
28
  export declare abstract class GoogleAIConnection<CallOptions extends BaseLanguageModelCallOptions, MessageType, AuthOptions> extends GoogleHostConnection<CallOptions, GoogleLLMResponse, AuthOptions> implements GoogleAIBaseLLMInput<AuthOptions> {
29
- /** @deprecated Prefer `modelName` */
30
29
  model: string;
31
30
  modelName: string;
32
31
  client: GoogleAbstractedClient;
@@ -119,7 +119,6 @@ export class GoogleHostConnection extends GoogleConnection {
119
119
  export class GoogleAIConnection extends GoogleHostConnection {
120
120
  constructor(fields, caller, client, streaming) {
121
121
  super(fields, caller, client, streaming);
122
- /** @deprecated Prefer `modelName` */
123
122
  Object.defineProperty(this, "model", {
124
123
  enumerable: true,
125
124
  configurable: true,
@@ -139,10 +138,11 @@ export class GoogleAIConnection extends GoogleHostConnection {
139
138
  value: void 0
140
139
  });
141
140
  this.client = client;
142
- this.modelName = fields?.modelName ?? fields?.model ?? this.modelName;
141
+ this.modelName = fields?.model ?? fields?.modelName ?? this.model;
142
+ this.model = this.modelName;
143
143
  }
144
144
  get modelFamily() {
145
- if (this.modelName.startsWith("gemini")) {
145
+ if (this.model.startsWith("gemini")) {
146
146
  return "gemini";
147
147
  }
148
148
  else {
@@ -159,13 +159,13 @@ export class GoogleAIConnection extends GoogleHostConnection {
159
159
  }
160
160
  async buildUrlGenerativeLanguage() {
161
161
  const method = await this.buildUrlMethod();
162
- const url = `https://generativelanguage.googleapis.com/${this.apiVersion}/models/${this.modelName}:${method}`;
162
+ const url = `https://generativelanguage.googleapis.com/${this.apiVersion}/models/${this.model}:${method}`;
163
163
  return url;
164
164
  }
165
165
  async buildUrlVertex() {
166
166
  const projectId = await this.client.getProjectId();
167
167
  const method = await this.buildUrlMethod();
168
- const url = `https://${this.endpoint}/${this.apiVersion}/projects/${projectId}/locations/${this.location}/publishers/google/models/${this.modelName}:${method}`;
168
+ const url = `https://${this.endpoint}/${this.apiVersion}/projects/${projectId}/locations/${this.location}/publishers/google/models/${this.model}:${method}`;
169
169
  return url;
170
170
  }
171
171
  async buildUrl() {
package/dist/llms.cjs CHANGED
@@ -59,6 +59,12 @@ class GoogleBaseLLM extends llms_1.LLM {
59
59
  writable: true,
60
60
  value: "gemini-pro"
61
61
  });
62
+ Object.defineProperty(this, "model", {
63
+ enumerable: true,
64
+ configurable: true,
65
+ writable: true,
66
+ value: "gemini-pro"
67
+ });
62
68
  Object.defineProperty(this, "temperature", {
63
69
  enumerable: true,
64
70
  configurable: true,
package/dist/llms.d.ts CHANGED
@@ -19,6 +19,7 @@ export declare abstract class GoogleBaseLLM<AuthOptions> extends LLM<BaseLanguag
19
19
  originalFields?: GoogleBaseLLMInput<AuthOptions>;
20
20
  lc_serializable: boolean;
21
21
  modelName: string;
22
+ model: string;
22
23
  temperature: number;
23
24
  maxOutputTokens: number;
24
25
  topP: number;
package/dist/llms.js CHANGED
@@ -56,6 +56,12 @@ export class GoogleBaseLLM extends LLM {
56
56
  writable: true,
57
57
  value: "gemini-pro"
58
58
  });
59
+ Object.defineProperty(this, "model", {
60
+ enumerable: true,
61
+ configurable: true,
62
+ writable: true,
63
+ value: "gemini-pro"
64
+ });
59
65
  Object.defineProperty(this, "temperature", {
60
66
  enumerable: true,
61
67
  configurable: true,
package/dist/types.d.ts CHANGED
@@ -38,9 +38,12 @@ export interface GoogleAISafetySetting {
38
38
  threshold: string;
39
39
  }
40
40
  export interface GoogleAIModelParams {
41
- /** @deprecated Prefer `modelName` */
42
- model?: string;
43
41
  /** Model to use */
42
+ model?: string;
43
+ /**
44
+ * Model to use
45
+ * Alias for `model`
46
+ */
44
47
  modelName?: string;
45
48
  /** Sampling temperature to use */
46
49
  temperature?: number;
@@ -8,7 +8,9 @@ function copyAIModelParams(params, options) {
8
8
  exports.copyAIModelParams = copyAIModelParams;
9
9
  function copyAIModelParamsInto(params, options, target) {
10
10
  const ret = target || {};
11
- ret.modelName = options?.modelName ?? params?.modelName ?? target.modelName;
11
+ const model = options?.model ?? params?.model ?? target.model;
12
+ ret.modelName =
13
+ model ?? options?.modelName ?? params?.modelName ?? target.modelName;
12
14
  ret.temperature =
13
15
  options?.temperature ?? params?.temperature ?? target.temperature;
14
16
  ret.maxOutputTokens =
@@ -74,6 +76,7 @@ function copyAIModelParamsInto(params, options, target) {
74
76
  throw new Error(`Cannot mix structured tools with Gemini tools.\nReceived ${structuredOutputTools.length} structured tools and ${geminiTools.length} Gemini tools.`);
75
77
  }
76
78
  ret.tools = geminiTools ?? structuredOutputTools;
79
+ console.log(ret);
77
80
  return ret;
78
81
  }
79
82
  exports.copyAIModelParamsInto = copyAIModelParamsInto;
@@ -91,7 +94,8 @@ function modelToFamily(modelName) {
91
94
  exports.modelToFamily = modelToFamily;
92
95
  function validateModelParams(params) {
93
96
  const testParams = params ?? {};
94
- switch (modelToFamily(testParams.modelName)) {
97
+ const model = testParams.model ?? testParams.modelName;
98
+ switch (modelToFamily(model)) {
95
99
  case "gemini":
96
100
  return (0, gemini_js_1.validateGeminiParams)(testParams);
97
101
  default:
@@ -4,7 +4,9 @@ export function copyAIModelParams(params, options) {
4
4
  }
5
5
  export function copyAIModelParamsInto(params, options, target) {
6
6
  const ret = target || {};
7
- ret.modelName = options?.modelName ?? params?.modelName ?? target.modelName;
7
+ const model = options?.model ?? params?.model ?? target.model;
8
+ ret.modelName =
9
+ model ?? options?.modelName ?? params?.modelName ?? target.modelName;
8
10
  ret.temperature =
9
11
  options?.temperature ?? params?.temperature ?? target.temperature;
10
12
  ret.maxOutputTokens =
@@ -70,6 +72,7 @@ export function copyAIModelParamsInto(params, options, target) {
70
72
  throw new Error(`Cannot mix structured tools with Gemini tools.\nReceived ${structuredOutputTools.length} structured tools and ${geminiTools.length} Gemini tools.`);
71
73
  }
72
74
  ret.tools = geminiTools ?? structuredOutputTools;
75
+ console.log(ret);
73
76
  return ret;
74
77
  }
75
78
  export function modelToFamily(modelName) {
@@ -85,7 +88,8 @@ export function modelToFamily(modelName) {
85
88
  }
86
89
  export function validateModelParams(params) {
87
90
  const testParams = params ?? {};
88
- switch (modelToFamily(testParams.modelName)) {
91
+ const model = testParams.model ?? testParams.modelName;
92
+ switch (modelToFamily(model)) {
89
93
  case "gemini":
90
94
  return validateGeminiParams(testParams);
91
95
  default:
@@ -1,6 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.MessageGeminiSafetyHandler = exports.DefaultGeminiSafetyHandler = exports.isModelGemini = exports.validateGeminiParams = exports.safeResponseToChatResult = exports.responseToChatResult = exports.safeResponseToBaseMessage = exports.responseToBaseMessage = exports.partsToBaseMessageFields = exports.responseToBaseMessageFields = exports.responseToChatGenerations = exports.partToChatGeneration = exports.partToMessage = exports.chunkToString = exports.safeResponseToChatGeneration = exports.responseToChatGeneration = exports.safeResponseToGeneration = exports.responseToGeneration = exports.safeResponseToString = exports.responseToString = exports.partToText = exports.responseToParts = exports.responseToGenerateContentResponseData = exports.toolsRawToTools = exports.partsToToolsRaw = exports.partsToMessageContent = exports.baseMessageToContent = exports.messageContentToParts = void 0;
3
+ exports.MessageGeminiSafetyHandler = exports.DefaultGeminiSafetyHandler = exports.isModelGemini = exports.validateGeminiParams = exports.safeResponseToChatResult = exports.responseToChatResult = exports.safeResponseToBaseMessage = exports.responseToBaseMessage = exports.partsToBaseMessageFields = exports.responseToBaseMessageFields = exports.responseToChatGenerations = exports.partToChatGeneration = exports.partToMessageChunk = exports.chunkToString = exports.safeResponseToChatGeneration = exports.responseToChatGeneration = exports.safeResponseToGeneration = exports.responseToGeneration = exports.safeResponseToString = exports.responseToString = exports.partToText = exports.responseToParts = exports.responseToGenerateContentResponseData = exports.toolsRawToTools = exports.partsToToolsRaw = exports.partsToMessageContent = exports.baseMessageToContent = exports.messageContentToParts = void 0;
4
+ const uuid_1 = require("uuid");
4
5
  const messages_1 = require("@langchain/core/messages");
5
6
  const outputs_1 = require("@langchain/core/outputs");
6
7
  const safety_js_1 = require("./safety.cjs");
@@ -54,12 +55,20 @@ function messageContentToParts(content) {
54
55
  .map((content) => {
55
56
  switch (content.type) {
56
57
  case "text":
57
- return messageContentText(content);
58
+ if ("text" in content) {
59
+ return messageContentText(content);
60
+ }
61
+ break;
58
62
  case "image_url":
59
- return messageContentImageUrl(content);
63
+ if ("image_url" in content) {
64
+ // Type guard for MessageContentImageUrl
65
+ return messageContentImageUrl(content);
66
+ }
67
+ break;
60
68
  default:
61
69
  throw new Error(`Unsupported type received while converting message to message parts`);
62
70
  }
71
+ throw new Error(`Cannot coerce "${content.type}" message part into a string.`);
63
72
  })
64
73
  .reduce((acc, val) => {
65
74
  if (val) {
@@ -99,7 +108,18 @@ function messageKwargsToParts(kwargs) {
99
108
  }
100
109
  function roleMessageToContent(role, message) {
101
110
  const contentParts = messageContentToParts(message.content);
102
- const toolParts = messageKwargsToParts(message.additional_kwargs);
111
+ let toolParts;
112
+ if ((0, messages_1.isAIMessage)(message) && !!message.tool_calls?.length) {
113
+ toolParts = message.tool_calls.map((toolCall) => ({
114
+ functionCall: {
115
+ name: toolCall.name,
116
+ args: toolCall.args,
117
+ },
118
+ }));
119
+ }
120
+ else {
121
+ toolParts = messageKwargsToParts(message.additional_kwargs);
122
+ }
103
123
  const parts = [...contentParts, ...toolParts];
104
124
  return [
105
125
  {
@@ -114,7 +134,7 @@ function systemMessageToContent(message) {
114
134
  ...roleMessageToContent("model", new messages_1.AIMessage("Ok")),
115
135
  ];
116
136
  }
117
- function toolMessageToContent(message) {
137
+ function toolMessageToContent(message, prevMessage) {
118
138
  const contentStr = typeof message.content === "string"
119
139
  ? message.content
120
140
  : message.content.reduce((acc, content) => {
@@ -125,6 +145,11 @@ function toolMessageToContent(message) {
125
145
  return acc;
126
146
  }
127
147
  }, "");
148
+ // Hacky :(
149
+ const responseName = ((0, messages_1.isAIMessage)(prevMessage) && !!prevMessage.tool_calls?.length
150
+ ? prevMessage.tool_calls[0].name
151
+ : prevMessage.name) ?? message.tool_call_id;
152
+ console.log(contentStr);
128
153
  try {
129
154
  const content = JSON.parse(contentStr);
130
155
  return [
@@ -133,8 +158,8 @@ function toolMessageToContent(message) {
133
158
  parts: [
134
159
  {
135
160
  functionResponse: {
136
- name: message.tool_call_id,
137
- response: content,
161
+ name: responseName,
162
+ response: { content },
138
163
  },
139
164
  },
140
165
  ],
@@ -148,10 +173,8 @@ function toolMessageToContent(message) {
148
173
  parts: [
149
174
  {
150
175
  functionResponse: {
151
- name: message.tool_call_id,
152
- response: {
153
- response: contentStr,
154
- },
176
+ name: responseName,
177
+ response: { content: contentStr },
155
178
  },
156
179
  },
157
180
  ],
@@ -159,7 +182,7 @@ function toolMessageToContent(message) {
159
182
  ];
160
183
  }
161
184
  }
162
- function baseMessageToContent(message) {
185
+ function baseMessageToContent(message, prevMessage) {
163
186
  const type = message._getType();
164
187
  switch (type) {
165
188
  case "system":
@@ -169,7 +192,10 @@ function baseMessageToContent(message) {
169
192
  case "ai":
170
193
  return roleMessageToContent("model", message);
171
194
  case "tool":
172
- return toolMessageToContent(message);
195
+ if (!prevMessage) {
196
+ throw new Error("Tool messages cannot be the first message passed to the model.");
197
+ }
198
+ return toolMessageToContent(message, prevMessage);
173
199
  default:
174
200
  console.log(`Unsupported message type: ${type}`);
175
201
  return [];
@@ -233,7 +259,7 @@ function toolRawToTool(raw) {
233
259
  }
234
260
  function functionCallPartToToolRaw(part) {
235
261
  return {
236
- id: part?.functionCall?.name ?? "",
262
+ id: (0, uuid_1.v4)().replace(/-/g, ""),
237
263
  type: "function",
238
264
  function: {
239
265
  name: part.functionCall.name,
@@ -338,7 +364,7 @@ exports.safeResponseToGeneration = safeResponseToGeneration;
338
364
  function responseToChatGeneration(response) {
339
365
  return new outputs_1.ChatGenerationChunk({
340
366
  text: responseToString(response),
341
- message: partToMessage(responseToParts(response)[0]),
367
+ message: partToMessageChunk(responseToParts(response)[0]),
342
368
  generationInfo: response,
343
369
  });
344
370
  }
@@ -365,7 +391,7 @@ function chunkToString(chunk) {
365
391
  }
366
392
  }
367
393
  exports.chunkToString = chunkToString;
368
- function partToMessage(part) {
394
+ function partToMessageChunk(part) {
369
395
  const fields = partsToBaseMessageFields([part]);
370
396
  if (typeof fields.content === "string") {
371
397
  return new messages_1.AIMessageChunk(fields);
@@ -381,9 +407,9 @@ function partToMessage(part) {
381
407
  }
382
408
  return new messages_1.AIMessageChunk(fields);
383
409
  }
384
- exports.partToMessage = partToMessage;
410
+ exports.partToMessageChunk = partToMessageChunk;
385
411
  function partToChatGeneration(part) {
386
- const message = partToMessage(part);
412
+ const message = partToMessageChunk(part);
387
413
  const text = partToText(part);
388
414
  return new outputs_1.ChatGenerationChunk({
389
415
  text,
@@ -393,7 +419,28 @@ function partToChatGeneration(part) {
393
419
  exports.partToChatGeneration = partToChatGeneration;
394
420
  function responseToChatGenerations(response) {
395
421
  const parts = responseToParts(response);
396
- const ret = parts.map((part) => partToChatGeneration(part));
422
+ let ret = parts.map((part) => partToChatGeneration(part));
423
+ if (ret.every((item) => typeof item.message.content === "string")) {
424
+ const combinedContent = ret.map((item) => item.message.content).join("");
425
+ const combinedText = ret.map((item) => item.text).join("");
426
+ const toolCallChunks = ret[ret.length - 1].message.additional_kwargs?.tool_calls?.map((toolCall, i) => ({
427
+ name: toolCall.function.name,
428
+ args: toolCall.function.arguments,
429
+ id: toolCall.id,
430
+ index: i,
431
+ }));
432
+ ret = [
433
+ new outputs_1.ChatGenerationChunk({
434
+ message: new messages_1.AIMessageChunk({
435
+ content: combinedContent,
436
+ additional_kwargs: ret[ret.length - 1].message.additional_kwargs,
437
+ tool_call_chunks: toolCallChunks,
438
+ }),
439
+ text: combinedText,
440
+ generationInfo: ret[ret.length - 1].generationInfo,
441
+ }),
442
+ ];
443
+ }
397
444
  return ret;
398
445
  }
399
446
  exports.responseToChatGenerations = responseToChatGenerations;
@@ -405,10 +452,30 @@ exports.responseToBaseMessageFields = responseToBaseMessageFields;
405
452
  function partsToBaseMessageFields(parts) {
406
453
  const fields = {
407
454
  content: partsToMessageContent(parts),
455
+ tool_calls: [],
456
+ invalid_tool_calls: [],
408
457
  };
409
458
  const rawTools = partsToToolsRaw(parts);
410
459
  if (rawTools.length > 0) {
411
460
  const tools = toolsRawToTools(rawTools);
461
+ for (const tool of tools) {
462
+ try {
463
+ fields.tool_calls?.push({
464
+ name: tool.function.name,
465
+ args: JSON.parse(tool.function.arguments),
466
+ id: tool.id,
467
+ });
468
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
469
+ }
470
+ catch (e) {
471
+ fields.invalid_tool_calls?.push({
472
+ name: tool.function.name,
473
+ args: JSON.parse(tool.function.arguments),
474
+ id: tool.id,
475
+ error: e.message,
476
+ });
477
+ }
478
+ }
412
479
  fields.additional_kwargs = {
413
480
  tool_calls: tools,
414
481
  };
@@ -1,8 +1,8 @@
1
- import { BaseMessage, BaseMessageChunk, BaseMessageFields, MessageContent } from "@langchain/core/messages";
1
+ import { AIMessageFields, BaseMessage, BaseMessageChunk, BaseMessageFields, MessageContent } from "@langchain/core/messages";
2
2
  import { ChatGeneration, ChatGenerationChunk, ChatResult, Generation } from "@langchain/core/outputs";
3
3
  import type { GoogleLLMResponse, GoogleAIModelParams, GeminiPart, GeminiContent, GenerateContentResponseData, GoogleAISafetyHandler } from "../types.js";
4
4
  export declare function messageContentToParts(content: MessageContent): GeminiPart[];
5
- export declare function baseMessageToContent(message: BaseMessage): GeminiContent[];
5
+ export declare function baseMessageToContent(message: BaseMessage, prevMessage?: BaseMessage): GeminiContent[];
6
6
  export declare function partsToMessageContent(parts: GeminiPart[]): MessageContent;
7
7
  interface FunctionCall {
8
8
  name: string;
@@ -34,11 +34,11 @@ export declare function safeResponseToGeneration(response: GoogleLLMResponse, sa
34
34
  export declare function responseToChatGeneration(response: GoogleLLMResponse): ChatGenerationChunk;
35
35
  export declare function safeResponseToChatGeneration(response: GoogleLLMResponse, safetyHandler: GoogleAISafetyHandler): ChatGenerationChunk;
36
36
  export declare function chunkToString(chunk: BaseMessageChunk): string;
37
- export declare function partToMessage(part: GeminiPart): BaseMessageChunk;
37
+ export declare function partToMessageChunk(part: GeminiPart): BaseMessageChunk;
38
38
  export declare function partToChatGeneration(part: GeminiPart): ChatGeneration;
39
39
  export declare function responseToChatGenerations(response: GoogleLLMResponse): ChatGeneration[];
40
40
  export declare function responseToBaseMessageFields(response: GoogleLLMResponse): BaseMessageFields;
41
- export declare function partsToBaseMessageFields(parts: GeminiPart[]): BaseMessageFields;
41
+ export declare function partsToBaseMessageFields(parts: GeminiPart[]): AIMessageFields;
42
42
  export declare function responseToBaseMessage(response: GoogleLLMResponse): BaseMessage;
43
43
  export declare function safeResponseToBaseMessage(response: GoogleLLMResponse, safetyHandler: GoogleAISafetyHandler): BaseMessage;
44
44
  export declare function responseToChatResult(response: GoogleLLMResponse): ChatResult;
@@ -1,4 +1,5 @@
1
- import { AIMessage, AIMessageChunk, } from "@langchain/core/messages";
1
+ import { v4 as uuidv4 } from "uuid";
2
+ import { AIMessage, AIMessageChunk, isAIMessage, } from "@langchain/core/messages";
2
3
  import { ChatGenerationChunk, } from "@langchain/core/outputs";
3
4
  import { GoogleAISafetyError } from "./safety.js";
4
5
  function messageContentText(content) {
@@ -51,12 +52,20 @@ export function messageContentToParts(content) {
51
52
  .map((content) => {
52
53
  switch (content.type) {
53
54
  case "text":
54
- return messageContentText(content);
55
+ if ("text" in content) {
56
+ return messageContentText(content);
57
+ }
58
+ break;
55
59
  case "image_url":
56
- return messageContentImageUrl(content);
60
+ if ("image_url" in content) {
61
+ // Type guard for MessageContentImageUrl
62
+ return messageContentImageUrl(content);
63
+ }
64
+ break;
57
65
  default:
58
66
  throw new Error(`Unsupported type received while converting message to message parts`);
59
67
  }
68
+ throw new Error(`Cannot coerce "${content.type}" message part into a string.`);
60
69
  })
61
70
  .reduce((acc, val) => {
62
71
  if (val) {
@@ -95,7 +104,18 @@ function messageKwargsToParts(kwargs) {
95
104
  }
96
105
  function roleMessageToContent(role, message) {
97
106
  const contentParts = messageContentToParts(message.content);
98
- const toolParts = messageKwargsToParts(message.additional_kwargs);
107
+ let toolParts;
108
+ if (isAIMessage(message) && !!message.tool_calls?.length) {
109
+ toolParts = message.tool_calls.map((toolCall) => ({
110
+ functionCall: {
111
+ name: toolCall.name,
112
+ args: toolCall.args,
113
+ },
114
+ }));
115
+ }
116
+ else {
117
+ toolParts = messageKwargsToParts(message.additional_kwargs);
118
+ }
99
119
  const parts = [...contentParts, ...toolParts];
100
120
  return [
101
121
  {
@@ -110,7 +130,7 @@ function systemMessageToContent(message) {
110
130
  ...roleMessageToContent("model", new AIMessage("Ok")),
111
131
  ];
112
132
  }
113
- function toolMessageToContent(message) {
133
+ function toolMessageToContent(message, prevMessage) {
114
134
  const contentStr = typeof message.content === "string"
115
135
  ? message.content
116
136
  : message.content.reduce((acc, content) => {
@@ -121,6 +141,11 @@ function toolMessageToContent(message) {
121
141
  return acc;
122
142
  }
123
143
  }, "");
144
+ // Hacky :(
145
+ const responseName = (isAIMessage(prevMessage) && !!prevMessage.tool_calls?.length
146
+ ? prevMessage.tool_calls[0].name
147
+ : prevMessage.name) ?? message.tool_call_id;
148
+ console.log(contentStr);
124
149
  try {
125
150
  const content = JSON.parse(contentStr);
126
151
  return [
@@ -129,8 +154,8 @@ function toolMessageToContent(message) {
129
154
  parts: [
130
155
  {
131
156
  functionResponse: {
132
- name: message.tool_call_id,
133
- response: content,
157
+ name: responseName,
158
+ response: { content },
134
159
  },
135
160
  },
136
161
  ],
@@ -144,10 +169,8 @@ function toolMessageToContent(message) {
144
169
  parts: [
145
170
  {
146
171
  functionResponse: {
147
- name: message.tool_call_id,
148
- response: {
149
- response: contentStr,
150
- },
172
+ name: responseName,
173
+ response: { content: contentStr },
151
174
  },
152
175
  },
153
176
  ],
@@ -155,7 +178,7 @@ function toolMessageToContent(message) {
155
178
  ];
156
179
  }
157
180
  }
158
- export function baseMessageToContent(message) {
181
+ export function baseMessageToContent(message, prevMessage) {
159
182
  const type = message._getType();
160
183
  switch (type) {
161
184
  case "system":
@@ -165,7 +188,10 @@ export function baseMessageToContent(message) {
165
188
  case "ai":
166
189
  return roleMessageToContent("model", message);
167
190
  case "tool":
168
- return toolMessageToContent(message);
191
+ if (!prevMessage) {
192
+ throw new Error("Tool messages cannot be the first message passed to the model.");
193
+ }
194
+ return toolMessageToContent(message, prevMessage);
169
195
  default:
170
196
  console.log(`Unsupported message type: ${type}`);
171
197
  return [];
@@ -227,7 +253,7 @@ function toolRawToTool(raw) {
227
253
  }
228
254
  function functionCallPartToToolRaw(part) {
229
255
  return {
230
- id: part?.functionCall?.name ?? "",
256
+ id: uuidv4().replace(/-/g, ""),
231
257
  type: "function",
232
258
  function: {
233
259
  name: part.functionCall.name,
@@ -323,7 +349,7 @@ export function safeResponseToGeneration(response, safetyHandler) {
323
349
  export function responseToChatGeneration(response) {
324
350
  return new ChatGenerationChunk({
325
351
  text: responseToString(response),
326
- message: partToMessage(responseToParts(response)[0]),
352
+ message: partToMessageChunk(responseToParts(response)[0]),
327
353
  generationInfo: response,
328
354
  });
329
355
  }
@@ -347,7 +373,7 @@ export function chunkToString(chunk) {
347
373
  throw new Error(`Unexpected chunk: ${chunk}`);
348
374
  }
349
375
  }
350
- export function partToMessage(part) {
376
+ export function partToMessageChunk(part) {
351
377
  const fields = partsToBaseMessageFields([part]);
352
378
  if (typeof fields.content === "string") {
353
379
  return new AIMessageChunk(fields);
@@ -364,7 +390,7 @@ export function partToMessage(part) {
364
390
  return new AIMessageChunk(fields);
365
391
  }
366
392
  export function partToChatGeneration(part) {
367
- const message = partToMessage(part);
393
+ const message = partToMessageChunk(part);
368
394
  const text = partToText(part);
369
395
  return new ChatGenerationChunk({
370
396
  text,
@@ -373,7 +399,28 @@ export function partToChatGeneration(part) {
373
399
  }
374
400
  export function responseToChatGenerations(response) {
375
401
  const parts = responseToParts(response);
376
- const ret = parts.map((part) => partToChatGeneration(part));
402
+ let ret = parts.map((part) => partToChatGeneration(part));
403
+ if (ret.every((item) => typeof item.message.content === "string")) {
404
+ const combinedContent = ret.map((item) => item.message.content).join("");
405
+ const combinedText = ret.map((item) => item.text).join("");
406
+ const toolCallChunks = ret[ret.length - 1].message.additional_kwargs?.tool_calls?.map((toolCall, i) => ({
407
+ name: toolCall.function.name,
408
+ args: toolCall.function.arguments,
409
+ id: toolCall.id,
410
+ index: i,
411
+ }));
412
+ ret = [
413
+ new ChatGenerationChunk({
414
+ message: new AIMessageChunk({
415
+ content: combinedContent,
416
+ additional_kwargs: ret[ret.length - 1].message.additional_kwargs,
417
+ tool_call_chunks: toolCallChunks,
418
+ }),
419
+ text: combinedText,
420
+ generationInfo: ret[ret.length - 1].generationInfo,
421
+ }),
422
+ ];
423
+ }
377
424
  return ret;
378
425
  }
379
426
  export function responseToBaseMessageFields(response) {
@@ -383,10 +430,30 @@ export function responseToBaseMessageFields(response) {
383
430
  export function partsToBaseMessageFields(parts) {
384
431
  const fields = {
385
432
  content: partsToMessageContent(parts),
433
+ tool_calls: [],
434
+ invalid_tool_calls: [],
386
435
  };
387
436
  const rawTools = partsToToolsRaw(parts);
388
437
  if (rawTools.length > 0) {
389
438
  const tools = toolsRawToTools(rawTools);
439
+ for (const tool of tools) {
440
+ try {
441
+ fields.tool_calls?.push({
442
+ name: tool.function.name,
443
+ args: JSON.parse(tool.function.arguments),
444
+ id: tool.id,
445
+ });
446
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
447
+ }
448
+ catch (e) {
449
+ fields.invalid_tool_calls?.push({
450
+ name: tool.function.name,
451
+ args: JSON.parse(tool.function.arguments),
452
+ id: tool.id,
453
+ error: e.message,
454
+ });
455
+ }
456
+ }
390
457
  fields.additional_kwargs = {
391
458
  tool_calls: tools,
392
459
  };
@@ -1,4 +1,5 @@
1
1
  "use strict";
2
+ /* eslint-disable @typescript-eslint/no-unused-vars */
2
3
  Object.defineProperty(exports, "__esModule", { value: true });
3
4
  exports.zodToGeminiParameters = void 0;
4
5
  const zod_to_json_schema_1 = require("zod-to-json-schema");
@@ -1,3 +1,4 @@
1
+ /* eslint-disable @typescript-eslint/no-unused-vars */
1
2
  import { zodToJsonSchema } from "zod-to-json-schema";
2
3
  export function zodToGeminiParameters(
3
4
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/google-common",
3
- "version": "0.0.3",
3
+ "version": "0.0.5",
4
4
  "description": "Core types and classes for Google services.",
5
5
  "type": "module",
6
6
  "engines": {
@@ -39,7 +39,8 @@
39
39
  "author": "LangChain",
40
40
  "license": "MIT",
41
41
  "dependencies": {
42
- "@langchain/core": "~0.1.1",
42
+ "@langchain/core": "~0.1.56",
43
+ "uuid": "^9.0.0",
43
44
  "zod-to-json-schema": "^3.22.4"
44
45
  },
45
46
  "devDependencies": {