@mcp-use/inspector 0.9.0-canary.1 → 0.9.0-canary.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,7 @@
1
- import { d as ChatMessage, g as isLangChainTool, q as convertToOpenAITool, r as isZodSchemaV3, s as isZodSchemaV4, l as interopSafeParseAsync, O as OutputParserException, u as BaseCumulativeTransformOutputParser, f as isAIMessage, v as parsePartialJson, _ as __export, m as BaseChatModel, n as getEnvironmentVariable, i as isInteropZodSchema, h as isOpenAITool, S as StructuredOutputParser, J as JsonOutputParser, t as toJsonSchema, w as getSchemaDescription, x as RunnableLambda, R as RunnablePassthrough, o as RunnableSequence, j as isDataContentBlock, k as convertToProviderContentBlock, b as AIMessage, T as ToolMessage, y as HumanMessageChunk, c as AIMessageChunk, z as SystemMessageChunk, F as FunctionMessageChunk, D as ToolMessageChunk, E as ChatMessageChunk, p as parseBase64DataUrl, G as parseMimeType, I as iife, C as ChatGenerationChunk, K as BaseLanguageModel, L as CallbackManager, M as GenerationChunk, N as callbackHandlerPrefersStreaming, P as concat, Q as RUN_KEY, U as isStructuredToolParams, V as isStructuredTool, W as isRunnableToolLike, X as ToolInputParsingException, Y as BaseLangChain, Z as ensureConfig, $ as mergeConfigs, a0 as _isToolCall, a1 as interopParseAsync, a2 as parseCallbackConfigArg, a3 as _configHasToolCallId, a4 as isSimpleStringZodSchema, a5 as validatesOnlyStrings, a6 as patchConfig, a7 as AsyncLocalStorageProviderSingleton, a8 as pickRunnableConfigKeys, a9 as isDirectToolOutput, aa as getAbortSignalError } from "./index-C74kq9On.js";
2
- import { Z as ZodFirstPartyTypeKind, o as objectType, s as stringType } from "./index-B6TgCktd.js";
3
- import { t as toJSONSchema, i as parse, q as prettifyError, r as validate } from "./chunk-VL2OQCWN-CX6x4kxU.js";
4
- import "./embeddings-D88gGGgZ.js";
1
+ import { M as ChatMessage, t as isLangChainTool, a1 as convertToOpenAITool, a2 as ZodFirstPartyTypeKind, a3 as toJSONSchema, a4 as isZodSchemaV3, a5 as isZodSchemaV4, W as BaseChatModel, X as getEnvironmentVariable, j as isInteropZodSchema, P as isOpenAITool, a6 as StructuredOutputParser, Y as JsonOutputParser, H as toJsonSchema, a7 as getSchemaDescription, a8 as RunnableLambda, Z as RunnablePassthrough, $ as RunnableSequence, Q as isDataContentBlock, S as convertToProviderContentBlock, J as AIMessage, o as ToolMessage, a9 as HumanMessageChunk, L as AIMessageChunk, aa as SystemMessageChunk, ab as FunctionMessageChunk, ac as ToolMessageChunk, ad as ChatMessageChunk, U as parseBase64DataUrl, ae as parseMimeType, af as iife, K as ChatGenerationChunk, a as isAIMessage } from "./index-B8yt0GKw.js";
2
+ import { bg as parse } from "./index-BaR5HKmC.js";
3
+ import { J as JsonOutputKeyToolsParser, c as convertLangChainToolCallToOpenAI, p as parseToolCall$2, m as makeInvalidToolCall } from "./index-BLXuIjh0.js";
4
+ import "./embeddings-CF86nH4i.js";
5
5
  import "./index-DX0TIfSM.js";
6
6
  const iife$1 = (fn) => fn();
7
7
  function isReasoningModel(model) {
@@ -45,10 +45,10 @@ function getEndpoint(config) {
45
45
  }
46
46
  return baseURL;
47
47
  }
48
- function _convertToOpenAITool(tool2, fields) {
48
+ function _convertToOpenAITool(tool, fields) {
49
49
  let toolDef;
50
- if (isLangChainTool(tool2)) toolDef = convertToOpenAITool(tool2);
51
- else toolDef = tool2;
50
+ if (isLangChainTool(tool)) toolDef = convertToOpenAITool(tool);
51
+ else toolDef = tool;
52
52
  if (fields?.strict !== void 0) toolDef.function.strict = fields.strict;
53
53
  return toolDef;
54
54
  }
@@ -118,17 +118,17 @@ function formatToOpenAIToolChoice(toolChoice) {
118
118
  };
119
119
  else return toolChoice;
120
120
  }
121
- function isBuiltInTool(tool2) {
122
- return "type" in tool2 && tool2.type !== "function";
121
+ function isBuiltInTool(tool) {
122
+ return "type" in tool && tool.type !== "function";
123
123
  }
124
124
  function isBuiltInToolChoice(tool_choice) {
125
125
  return tool_choice != null && typeof tool_choice === "object" && "type" in tool_choice && tool_choice.type !== "function";
126
126
  }
127
- function isCustomTool(tool2) {
128
- return typeof tool2 === "object" && tool2 !== null && "metadata" in tool2 && typeof tool2.metadata === "object" && tool2.metadata !== null && "customTool" in tool2.metadata && typeof tool2.metadata.customTool === "object" && tool2.metadata.customTool !== null;
127
+ function isCustomTool(tool) {
128
+ return typeof tool === "object" && tool !== null && "metadata" in tool && typeof tool.metadata === "object" && tool.metadata !== null && "customTool" in tool.metadata && typeof tool.metadata.customTool === "object" && tool.metadata.customTool !== null;
129
129
  }
130
- function isOpenAICustomTool(tool2) {
131
- return "type" in tool2 && tool2.type === "custom" && "custom" in tool2 && typeof tool2.custom === "object" && tool2.custom !== null;
130
+ function isOpenAICustomTool(tool) {
131
+ return "type" in tool && tool.type === "custom" && "custom" in tool && typeof tool.custom === "object" && tool.custom !== null;
132
132
  }
133
133
  function parseCustomToolCall(rawToolCall) {
134
134
  if (rawToolCall.type !== "custom_tool_call") return void 0;
@@ -145,42 +145,42 @@ function parseCustomToolCall(rawToolCall) {
145
145
  function isCustomToolCall(toolCall) {
146
146
  return toolCall.type === "tool_call" && "isCustomTool" in toolCall && toolCall.isCustomTool === true;
147
147
  }
148
- function convertCompletionsCustomTool(tool2) {
148
+ function convertCompletionsCustomTool(tool) {
149
149
  const getFormat = () => {
150
- if (!tool2.custom.format) return void 0;
151
- if (tool2.custom.format.type === "grammar") return {
150
+ if (!tool.custom.format) return void 0;
151
+ if (tool.custom.format.type === "grammar") return {
152
152
  type: "grammar",
153
- definition: tool2.custom.format.grammar.definition,
154
- syntax: tool2.custom.format.grammar.syntax
153
+ definition: tool.custom.format.grammar.definition,
154
+ syntax: tool.custom.format.grammar.syntax
155
155
  };
156
- if (tool2.custom.format.type === "text") return { type: "text" };
156
+ if (tool.custom.format.type === "text") return { type: "text" };
157
157
  return void 0;
158
158
  };
159
159
  return {
160
160
  type: "custom",
161
- name: tool2.custom.name,
162
- description: tool2.custom.description,
161
+ name: tool.custom.name,
162
+ description: tool.custom.description,
163
163
  format: getFormat()
164
164
  };
165
165
  }
166
- function convertResponsesCustomTool(tool2) {
166
+ function convertResponsesCustomTool(tool) {
167
167
  const getFormat = () => {
168
- if (!tool2.format) return void 0;
169
- if (tool2.format.type === "grammar") return {
168
+ if (!tool.format) return void 0;
169
+ if (tool.format.type === "grammar") return {
170
170
  type: "grammar",
171
171
  grammar: {
172
- definition: tool2.format.definition,
173
- syntax: tool2.format.syntax
172
+ definition: tool.format.definition,
173
+ syntax: tool.format.syntax
174
174
  }
175
175
  };
176
- if (tool2.format.type === "text") return { type: "text" };
176
+ if (tool.format.type === "text") return { type: "text" };
177
177
  return void 0;
178
178
  };
179
179
  return {
180
180
  type: "custom",
181
181
  custom: {
182
- name: tool2.name,
183
- description: tool2.description,
182
+ name: tool.name,
183
+ description: tool.description,
184
184
  format: getFormat()
185
185
  }
186
186
  };
@@ -321,8 +321,8 @@ class InvalidWebhookSignatureError extends Error {
321
321
  super(message);
322
322
  }
323
323
  }
324
- function isChatCompletionFunctionTool(tool2) {
325
- return tool2 !== void 0 && "function" in tool2 && tool2.function !== void 0;
324
+ function isChatCompletionFunctionTool(tool) {
325
+ return tool !== void 0 && "function" in tool && tool.function !== void 0;
326
326
  }
327
327
  function makeParseableResponseFormat$1(response_format, parser) {
328
328
  const obj = { ...response_format };
@@ -341,8 +341,8 @@ function makeParseableResponseFormat$1(response_format, parser) {
341
341
  function isAutoParsableResponseFormat(response_format) {
342
342
  return response_format?.["$brand"] === "auto-parseable-response-format";
343
343
  }
344
- function isAutoParsableTool$1(tool2) {
345
- return tool2?.["$brand"] === "auto-parseable-tool";
344
+ function isAutoParsableTool$1(tool) {
345
+ return tool?.["$brand"] === "auto-parseable-tool";
346
346
  }
347
347
  function maybeParseChatCompletion(completion, params) {
348
348
  if (!params || !hasAutoParseableInput$1(params)) {
@@ -379,7 +379,7 @@ function parseChatCompletion(completion, params) {
379
379
  message: {
380
380
  ...choice.message,
381
381
  ...choice.message.tool_calls ? {
382
- tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall$2(params, toolCall)) ?? void 0
382
+ tool_calls: choice.message.tool_calls?.map((toolCall) => parseToolCall$1(params, toolCall)) ?? void 0
383
383
  } : void 0,
384
384
  parsed: choice.message.content && !choice.message.refusal ? parseResponseFormat(params, choice.message.content) : null
385
385
  }
@@ -400,7 +400,7 @@ function parseResponseFormat(params, content) {
400
400
  }
401
401
  return null;
402
402
  }
403
- function parseToolCall$2(params, toolCall) {
403
+ function parseToolCall$1(params, toolCall) {
404
404
  const inputTool = params.tools?.find((inputTool2) => isChatCompletionFunctionTool(inputTool2) && inputTool2.function?.name === toolCall.function.name);
405
405
  return {
406
406
  ...toolCall,
@@ -431,12 +431,12 @@ function assertToolCallsAreChatCompletionFunctionToolCalls(toolCalls) {
431
431
  }
432
432
  }
433
433
  function validateInputTools(tools) {
434
- for (const tool2 of tools ?? []) {
435
- if (tool2.type !== "function") {
436
- throw new OpenAIError(`Currently only \`function\` tool types support auto-parsing; Received \`${tool2.type}\``);
434
+ for (const tool of tools ?? []) {
435
+ if (tool.type !== "function") {
436
+ throw new OpenAIError(`Currently only \`function\` tool types support auto-parsing; Received \`${tool.type}\``);
437
437
  }
438
- if (tool2.function.strict !== true) {
439
- throw new OpenAIError(`The \`${tool2.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`);
438
+ if (tool.function.strict !== true) {
439
+ throw new OpenAIError(`The \`${tool.function.name}\` tool is not marked with \`strict: true\`. Only strict function tools can be auto-parsed`);
440
440
  }
441
441
  }
442
442
  }
@@ -1602,7 +1602,7 @@ function parseResponse(response, params) {
1602
1602
  if (item.type === "function_call") {
1603
1603
  return {
1604
1604
  ...item,
1605
- parsed_arguments: parseToolCall$1(params, item)
1605
+ parsed_arguments: parseToolCall(params, item)
1606
1606
  };
1607
1607
  }
1608
1608
  if (item.type === "message") {
@@ -1660,13 +1660,13 @@ function hasAutoParseableInput(params) {
1660
1660
  }
1661
1661
  return false;
1662
1662
  }
1663
- function isAutoParsableTool(tool2) {
1664
- return tool2?.["$brand"] === "auto-parseable-tool";
1663
+ function isAutoParsableTool(tool) {
1664
+ return tool?.["$brand"] === "auto-parseable-tool";
1665
1665
  }
1666
1666
  function getInputToolByName(input_tools, name) {
1667
- return input_tools.find((tool2) => tool2.type === "function" && tool2.name === name);
1667
+ return input_tools.find((tool) => tool.type === "function" && tool.name === name);
1668
1668
  }
1669
- function parseToolCall$1(params, toolCall) {
1669
+ function parseToolCall(params, toolCall) {
1670
1670
  const inputTool = getInputToolByName(params.tools ?? [], toolCall.name);
1671
1671
  return {
1672
1672
  ...toolCall,
@@ -3723,24 +3723,24 @@ class AbstractChatCompletionRunner extends EventStream {
3723
3723
  const { tool_choice = "auto", stream, ...restParams } = params;
3724
3724
  const singleFunctionToCall = typeof tool_choice !== "string" && tool_choice.type === "function" && tool_choice?.function?.name;
3725
3725
  const { maxChatCompletions = DEFAULT_MAX_CHAT_COMPLETIONS } = options || {};
3726
- const inputTools = params.tools.map((tool2) => {
3727
- if (isAutoParsableTool$1(tool2)) {
3728
- if (!tool2.$callback) {
3726
+ const inputTools = params.tools.map((tool) => {
3727
+ if (isAutoParsableTool$1(tool)) {
3728
+ if (!tool.$callback) {
3729
3729
  throw new OpenAIError("Tool given to `.runTools()` that does not have an associated function");
3730
3730
  }
3731
3731
  return {
3732
3732
  type: "function",
3733
3733
  function: {
3734
- function: tool2.$callback,
3735
- name: tool2.function.name,
3736
- description: tool2.function.description || "",
3737
- parameters: tool2.function.parameters,
3738
- parse: tool2.$parseRaw,
3734
+ function: tool.$callback,
3735
+ name: tool.function.name,
3736
+ description: tool.function.description || "",
3737
+ parameters: tool.function.parameters,
3738
+ parse: tool.$parseRaw,
3739
3739
  strict: true
3740
3740
  }
3741
3741
  };
3742
3742
  }
3743
- return tool2;
3743
+ return tool;
3744
3744
  });
3745
3745
  const functionsByName = {};
3746
3746
  for (const f of inputTools) {
@@ -4260,7 +4260,7 @@ class ChatCompletionStream extends AbstractChatCompletionRunner {
4260
4260
  throw new Error("tool call snapshot missing `type`");
4261
4261
  }
4262
4262
  if (toolCallSnapshot.type === "function") {
4263
- const inputTool = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.tools?.find((tool2) => isChatCompletionFunctionTool(tool2) && tool2.function.name === toolCallSnapshot.function.name);
4263
+ const inputTool = __classPrivateFieldGet(this, _ChatCompletionStream_params, "f")?.tools?.find((tool) => isChatCompletionFunctionTool(tool) && tool.function.name === toolCallSnapshot.function.name);
4264
4264
  this._emit("tool_calls.function.arguments.done", {
4265
4265
  name: toolCallSnapshot.function.name,
4266
4266
  index: toolCallIndex,
@@ -8069,174 +8069,6 @@ OpenAI.Conversations = Conversations;
8069
8069
  OpenAI.Evals = Evals;
8070
8070
  OpenAI.Containers = Containers;
8071
8071
  OpenAI.Videos = Videos;
8072
- function parseToolCall(rawToolCall, options) {
8073
- if (rawToolCall.function === void 0) return void 0;
8074
- let functionArgs;
8075
- if (options?.partial) try {
8076
- functionArgs = parsePartialJson(rawToolCall.function.arguments ?? "{}");
8077
- } catch {
8078
- return void 0;
8079
- }
8080
- else try {
8081
- functionArgs = JSON.parse(rawToolCall.function.arguments);
8082
- } catch (e) {
8083
- throw new OutputParserException([
8084
- `Function "${rawToolCall.function.name}" arguments:`,
8085
- ``,
8086
- rawToolCall.function.arguments,
8087
- ``,
8088
- `are not valid JSON.`,
8089
- `Error: ${e.message}`
8090
- ].join("\n"));
8091
- }
8092
- const parsedToolCall = {
8093
- name: rawToolCall.function.name,
8094
- args: functionArgs,
8095
- type: "tool_call"
8096
- };
8097
- if (options?.returnId) parsedToolCall.id = rawToolCall.id;
8098
- return parsedToolCall;
8099
- }
8100
- function convertLangChainToolCallToOpenAI(toolCall) {
8101
- if (toolCall.id === void 0) throw new Error(`All OpenAI tool calls must have an "id" field.`);
8102
- return {
8103
- id: toolCall.id,
8104
- type: "function",
8105
- function: {
8106
- name: toolCall.name,
8107
- arguments: JSON.stringify(toolCall.args)
8108
- }
8109
- };
8110
- }
8111
- function makeInvalidToolCall(rawToolCall, errorMsg) {
8112
- return {
8113
- name: rawToolCall.function?.name,
8114
- args: rawToolCall.function?.arguments,
8115
- id: rawToolCall.id,
8116
- error: errorMsg,
8117
- type: "invalid_tool_call"
8118
- };
8119
- }
8120
- var JsonOutputToolsParser = class extends BaseCumulativeTransformOutputParser {
8121
- static lc_name() {
8122
- return "JsonOutputToolsParser";
8123
- }
8124
- returnId = false;
8125
- lc_namespace = [
8126
- "langchain",
8127
- "output_parsers",
8128
- "openai_tools"
8129
- ];
8130
- lc_serializable = true;
8131
- constructor(fields) {
8132
- super(fields);
8133
- this.returnId = fields?.returnId ?? this.returnId;
8134
- }
8135
- _diff() {
8136
- throw new Error("Not supported.");
8137
- }
8138
- async parse() {
8139
- throw new Error("Not implemented.");
8140
- }
8141
- async parseResult(generations) {
8142
- const result = await this.parsePartialResult(generations, false);
8143
- return result;
8144
- }
8145
- /**
8146
- * Parses the output and returns a JSON object. If `argsOnly` is true,
8147
- * only the arguments of the function call are returned.
8148
- * @param generations The output of the LLM to parse.
8149
- * @returns A JSON object representation of the function call or its arguments.
8150
- */
8151
- async parsePartialResult(generations, partial = true) {
8152
- const message = generations[0].message;
8153
- let toolCalls;
8154
- if (isAIMessage(message) && message.tool_calls?.length) toolCalls = message.tool_calls.map((toolCall) => {
8155
- const { id, ...rest } = toolCall;
8156
- if (!this.returnId) return rest;
8157
- return {
8158
- id,
8159
- ...rest
8160
- };
8161
- });
8162
- else if (message.additional_kwargs.tool_calls !== void 0) {
8163
- const rawToolCalls = JSON.parse(JSON.stringify(message.additional_kwargs.tool_calls));
8164
- toolCalls = rawToolCalls.map((rawToolCall) => {
8165
- return parseToolCall(rawToolCall, {
8166
- returnId: this.returnId,
8167
- partial
8168
- });
8169
- });
8170
- }
8171
- if (!toolCalls) return [];
8172
- const parsedToolCalls = [];
8173
- for (const toolCall of toolCalls) if (toolCall !== void 0) {
8174
- const backwardsCompatibleToolCall = {
8175
- type: toolCall.name,
8176
- args: toolCall.args,
8177
- id: toolCall.id
8178
- };
8179
- parsedToolCalls.push(backwardsCompatibleToolCall);
8180
- }
8181
- return parsedToolCalls;
8182
- }
8183
- };
8184
- var JsonOutputKeyToolsParser = class extends JsonOutputToolsParser {
8185
- static lc_name() {
8186
- return "JsonOutputKeyToolsParser";
8187
- }
8188
- lc_namespace = [
8189
- "langchain",
8190
- "output_parsers",
8191
- "openai_tools"
8192
- ];
8193
- lc_serializable = true;
8194
- returnId = false;
8195
- /** The type of tool calls to return. */
8196
- keyName;
8197
- /** Whether to return only the first tool call. */
8198
- returnSingle = false;
8199
- zodSchema;
8200
- constructor(params) {
8201
- super(params);
8202
- this.keyName = params.keyName;
8203
- this.returnSingle = params.returnSingle ?? this.returnSingle;
8204
- this.zodSchema = params.zodSchema;
8205
- }
8206
- async _validateResult(result) {
8207
- if (this.zodSchema === void 0) return result;
8208
- const zodParsedResult = await interopSafeParseAsync(this.zodSchema, result);
8209
- if (zodParsedResult.success) return zodParsedResult.data;
8210
- else throw new OutputParserException(`Failed to parse. Text: "${JSON.stringify(result, null, 2)}". Error: ${JSON.stringify(zodParsedResult.error?.issues)}`, JSON.stringify(result, null, 2));
8211
- }
8212
- async parsePartialResult(generations) {
8213
- const results = await super.parsePartialResult(generations);
8214
- const matchingResults = results.filter((result) => result.type === this.keyName);
8215
- let returnedValues = matchingResults;
8216
- if (!matchingResults.length) return void 0;
8217
- if (!this.returnId) returnedValues = matchingResults.map((result) => result.args);
8218
- if (this.returnSingle) return returnedValues[0];
8219
- return returnedValues;
8220
- }
8221
- async parseResult(generations) {
8222
- const results = await super.parsePartialResult(generations, false);
8223
- const matchingResults = results.filter((result) => result.type === this.keyName);
8224
- let returnedValues = matchingResults;
8225
- if (!matchingResults.length) return void 0;
8226
- if (!this.returnId) returnedValues = matchingResults.map((result) => result.args);
8227
- if (this.returnSingle) return this._validateResult(returnedValues[0]);
8228
- const toolCallResults = await Promise.all(returnedValues.map((value) => this._validateResult(value)));
8229
- return toolCallResults;
8230
- }
8231
- };
8232
- var openai_tools_exports = {};
8233
- __export(openai_tools_exports, {
8234
- JsonOutputKeyToolsParser: () => JsonOutputKeyToolsParser,
8235
- JsonOutputToolsParser: () => JsonOutputToolsParser,
8236
- convertLangChainToolCallToOpenAI: () => convertLangChainToolCallToOpenAI,
8237
- makeInvalidToolCall: () => makeInvalidToolCall,
8238
- parseToolCall: () => parseToolCall
8239
- });
8240
8072
  var BaseChatOpenAI = class extends BaseChatModel {
8241
8073
  temperature;
8242
8074
  topP;
@@ -8490,26 +8322,26 @@ var BaseChatOpenAI = class extends BaseChatModel {
8490
8322
  };
8491
8323
  return requestOptions;
8492
8324
  }
8493
- _convertChatOpenAIToolToCompletionsTool(tool2, fields) {
8494
- if (isCustomTool(tool2)) return convertResponsesCustomTool(tool2.metadata.customTool);
8495
- if (isOpenAITool(tool2)) {
8325
+ _convertChatOpenAIToolToCompletionsTool(tool, fields) {
8326
+ if (isCustomTool(tool)) return convertResponsesCustomTool(tool.metadata.customTool);
8327
+ if (isOpenAITool(tool)) {
8496
8328
  if (fields?.strict !== void 0) return {
8497
- ...tool2,
8329
+ ...tool,
8498
8330
  function: {
8499
- ...tool2.function,
8331
+ ...tool.function,
8500
8332
  strict: fields.strict
8501
8333
  }
8502
8334
  };
8503
- return tool2;
8335
+ return tool;
8504
8336
  }
8505
- return _convertToOpenAITool(tool2, fields);
8337
+ return _convertToOpenAITool(tool, fields);
8506
8338
  }
8507
8339
  bindTools(tools, kwargs) {
8508
8340
  let strict;
8509
8341
  if (kwargs?.strict !== void 0) strict = kwargs.strict;
8510
8342
  else if (this.supportsStrictToolCalling !== void 0) strict = this.supportsStrictToolCalling;
8511
8343
  return this.withConfig({
8512
- tools: tools.map((tool2) => isBuiltInTool(tool2) || isCustomTool(tool2) ? tool2 : this._convertChatOpenAIToolToCompletionsTool(tool2, { strict })),
8344
+ tools: tools.map((tool) => isBuiltInTool(tool) || isCustomTool(tool) ? tool : this._convertChatOpenAIToolToCompletionsTool(tool, { strict })),
8513
8345
  ...kwargs
8514
8346
  });
8515
8347
  }
@@ -8872,7 +8704,7 @@ const convertCompletionsMessageToBaseMessage = ({ message, rawResponse, includeR
8872
8704
  const toolCalls = [];
8873
8705
  const invalidToolCalls = [];
8874
8706
  for (const rawToolCall of rawToolCalls ?? []) try {
8875
- toolCalls.push(parseToolCall(rawToolCall, { returnId: true }));
8707
+ toolCalls.push(parseToolCall$2(rawToolCall, { returnId: true }));
8876
8708
  } catch (e) {
8877
8709
  invalidToolCalls.push(makeInvalidToolCall(rawToolCall, e.message));
8878
8710
  }
@@ -9143,7 +8975,7 @@ const convertResponsesMessageToAIMessage = (response) => {
9143
8975
  id: item.call_id
9144
8976
  };
9145
8977
  try {
9146
- tool_calls.push(parseToolCall(fnAdapter, { returnId: true }));
8978
+ tool_calls.push(parseToolCall$2(fnAdapter, { returnId: true }));
9147
8979
  } catch (e) {
9148
8980
  let errMessage;
9149
8981
  if (typeof e === "object" && e != null && "message" in e && typeof e.message === "string") errMessage = e.message;
@@ -9826,25 +9658,25 @@ var ChatOpenAIResponses = class extends BaseChatOpenAI {
9826
9658
  /** @internal */
9827
9659
  _reduceChatOpenAITools(tools, fields) {
9828
9660
  const reducedTools = [];
9829
- for (const tool2 of tools) if (isBuiltInTool(tool2)) {
9830
- if (tool2.type === "image_generation" && fields?.stream) tool2.partial_images = 1;
9831
- reducedTools.push(tool2);
9832
- } else if (isCustomTool(tool2)) {
9833
- const customToolData = tool2.metadata.customTool;
9661
+ for (const tool of tools) if (isBuiltInTool(tool)) {
9662
+ if (tool.type === "image_generation" && fields?.stream) tool.partial_images = 1;
9663
+ reducedTools.push(tool);
9664
+ } else if (isCustomTool(tool)) {
9665
+ const customToolData = tool.metadata.customTool;
9834
9666
  reducedTools.push({
9835
9667
  type: "custom",
9836
9668
  name: customToolData.name,
9837
9669
  description: customToolData.description,
9838
9670
  format: customToolData.format
9839
9671
  });
9840
- } else if (isOpenAITool(tool2)) reducedTools.push({
9672
+ } else if (isOpenAITool(tool)) reducedTools.push({
9841
9673
  type: "function",
9842
- name: tool2.function.name,
9843
- parameters: tool2.function.parameters,
9844
- description: tool2.function.description,
9674
+ name: tool.function.name,
9675
+ parameters: tool.function.parameters,
9676
+ description: tool.function.description,
9845
9677
  strict: fields?.strict ?? null
9846
9678
  });
9847
- else if (isOpenAICustomTool(tool2)) reducedTools.push(convertCompletionsCustomTool(tool2));
9679
+ else if (isOpenAICustomTool(tool)) reducedTools.push(convertCompletionsCustomTool(tool));
9848
9680
  return reducedTools;
9849
9681
  }
9850
9682
  };
@@ -9872,7 +9704,7 @@ var ChatOpenAICompletions = class extends BaseChatOpenAI {
9872
9704
  stream: this.streaming,
9873
9705
  functions: options?.functions,
9874
9706
  function_call: options?.function_call,
9875
- tools: options?.tools?.length ? options.tools.map((tool2) => this._convertChatOpenAIToolToCompletionsTool(tool2, { strict })) : void 0,
9707
+ tools: options?.tools?.length ? options.tools.map((tool) => this._convertChatOpenAIToolToCompletionsTool(tool, { strict })) : void 0,
9876
9708
  tool_choice: formatToOpenAIToolChoice(options?.tool_choice),
9877
9709
  response_format: this._getResponseFormat(options?.response_format),
9878
9710
  seed: options?.seed,
@@ -10146,546 +9978,6 @@ var ChatOpenAI = class ChatOpenAI2 extends BaseChatOpenAI {
10146
9978
  return newModel;
10147
9979
  }
10148
9980
  };
10149
- var llms_exports = {};
10150
- __export(llms_exports, {
10151
- BaseLLM: () => BaseLLM,
10152
- LLM: () => LLM
10153
- });
10154
- var BaseLLM = class BaseLLM2 extends BaseLanguageModel {
10155
- lc_namespace = [
10156
- "langchain",
10157
- "llms",
10158
- this._llmType()
10159
- ];
10160
- /**
10161
- * This method takes an input and options, and returns a string. It
10162
- * converts the input to a prompt value and generates a result based on
10163
- * the prompt.
10164
- * @param input Input for the LLM.
10165
- * @param options Options for the LLM call.
10166
- * @returns A string result based on the prompt.
10167
- */
10168
- async invoke(input, options) {
10169
- const promptValue = BaseLLM2._convertInputToPromptValue(input);
10170
- const result = await this.generatePrompt([promptValue], options, options?.callbacks);
10171
- return result.generations[0][0].text;
10172
- }
10173
- async *_streamResponseChunks(_input, _options, _runManager) {
10174
- throw new Error("Not implemented.");
10175
- }
10176
- _separateRunnableConfigFromCallOptionsCompat(options) {
10177
- const [runnableConfig, callOptions] = super._separateRunnableConfigFromCallOptions(options);
10178
- callOptions.signal = runnableConfig.signal;
10179
- return [runnableConfig, callOptions];
10180
- }
10181
- async *_streamIterator(input, options) {
10182
- if (this._streamResponseChunks === BaseLLM2.prototype._streamResponseChunks) yield this.invoke(input, options);
10183
- else {
10184
- const prompt = BaseLLM2._convertInputToPromptValue(input);
10185
- const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptionsCompat(options);
10186
- const callbackManager_ = await CallbackManager.configure(runnableConfig.callbacks, this.callbacks, runnableConfig.tags, this.tags, runnableConfig.metadata, this.metadata, { verbose: this.verbose });
10187
- const extra = {
10188
- options: callOptions,
10189
- invocation_params: this?.invocationParams(callOptions),
10190
- batch_size: 1
10191
- };
10192
- const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), [prompt.toString()], runnableConfig.runId, void 0, extra, void 0, void 0, runnableConfig.runName);
10193
- let generation = new GenerationChunk({ text: "" });
10194
- try {
10195
- for await (const chunk of this._streamResponseChunks(prompt.toString(), callOptions, runManagers?.[0])) {
10196
- if (!generation) generation = chunk;
10197
- else generation = generation.concat(chunk);
10198
- if (typeof chunk.text === "string") yield chunk.text;
10199
- }
10200
- } catch (err) {
10201
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
10202
- throw err;
10203
- }
10204
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMEnd({ generations: [[generation]] })));
10205
- }
10206
- }
10207
- /**
10208
- * This method takes prompt values, options, and callbacks, and generates
10209
- * a result based on the prompts.
10210
- * @param promptValues Prompt values for the LLM.
10211
- * @param options Options for the LLM call.
10212
- * @param callbacks Callbacks for the LLM call.
10213
- * @returns An LLMResult based on the prompts.
10214
- */
10215
- async generatePrompt(promptValues, options, callbacks) {
10216
- const prompts = promptValues.map((promptValue) => promptValue.toString());
10217
- return this.generate(prompts, options, callbacks);
10218
- }
10219
- /**
10220
- * Get the parameters used to invoke the model
10221
- */
10222
- invocationParams(_options) {
10223
- return {};
10224
- }
10225
- _flattenLLMResult(llmResult) {
10226
- const llmResults = [];
10227
- for (let i = 0; i < llmResult.generations.length; i += 1) {
10228
- const genList = llmResult.generations[i];
10229
- if (i === 0) llmResults.push({
10230
- generations: [genList],
10231
- llmOutput: llmResult.llmOutput
10232
- });
10233
- else {
10234
- const llmOutput = llmResult.llmOutput ? {
10235
- ...llmResult.llmOutput,
10236
- tokenUsage: {}
10237
- } : void 0;
10238
- llmResults.push({
10239
- generations: [genList],
10240
- llmOutput
10241
- });
10242
- }
10243
- }
10244
- return llmResults;
10245
- }
10246
- /** @ignore */
10247
- async _generateUncached(prompts, parsedOptions, handledOptions, startedRunManagers) {
10248
- let runManagers;
10249
- if (startedRunManagers !== void 0 && startedRunManagers.length === prompts.length) runManagers = startedRunManagers;
10250
- else {
10251
- const callbackManager_ = await CallbackManager.configure(handledOptions.callbacks, this.callbacks, handledOptions.tags, this.tags, handledOptions.metadata, this.metadata, { verbose: this.verbose });
10252
- const extra = {
10253
- options: parsedOptions,
10254
- invocation_params: this?.invocationParams(parsedOptions),
10255
- batch_size: prompts.length
10256
- };
10257
- runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), prompts, handledOptions.runId, void 0, extra, void 0, void 0, handledOptions?.runName);
10258
- }
10259
- const hasStreamingHandler = !!runManagers?.[0].handlers.find(callbackHandlerPrefersStreaming);
10260
- let output;
10261
- if (hasStreamingHandler && prompts.length === 1 && this._streamResponseChunks !== BaseLLM2.prototype._streamResponseChunks) try {
10262
- const stream = await this._streamResponseChunks(prompts[0], parsedOptions, runManagers?.[0]);
10263
- let aggregated;
10264
- for await (const chunk of stream) if (aggregated === void 0) aggregated = chunk;
10265
- else aggregated = concat(aggregated, chunk);
10266
- if (aggregated === void 0) throw new Error("Received empty response from chat model call.");
10267
- output = {
10268
- generations: [[aggregated]],
10269
- llmOutput: {}
10270
- };
10271
- await runManagers?.[0].handleLLMEnd(output);
10272
- } catch (e) {
10273
- await runManagers?.[0].handleLLMError(e);
10274
- throw e;
10275
- }
10276
- else {
10277
- try {
10278
- output = await this._generate(prompts, parsedOptions, runManagers?.[0]);
10279
- } catch (err) {
10280
- await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
10281
- throw err;
10282
- }
10283
- const flattenedOutputs = this._flattenLLMResult(output);
10284
- await Promise.all((runManagers ?? []).map((runManager, i) => runManager?.handleLLMEnd(flattenedOutputs[i])));
10285
- }
10286
- const runIds = runManagers?.map((manager) => manager.runId) || void 0;
10287
- Object.defineProperty(output, RUN_KEY, {
10288
- value: runIds ? { runIds } : void 0,
10289
- configurable: true
10290
- });
10291
- return output;
10292
- }
10293
- async _generateCached({ prompts, cache, llmStringKey, parsedOptions, handledOptions, runId }) {
10294
- const callbackManager_ = await CallbackManager.configure(handledOptions.callbacks, this.callbacks, handledOptions.tags, this.tags, handledOptions.metadata, this.metadata, { verbose: this.verbose });
10295
- const extra = {
10296
- options: parsedOptions,
10297
- invocation_params: this?.invocationParams(parsedOptions),
10298
- batch_size: prompts.length
10299
- };
10300
- const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), prompts, runId, void 0, extra, void 0, void 0, handledOptions?.runName);
10301
- const missingPromptIndices = [];
10302
- const results = await Promise.allSettled(prompts.map(async (prompt, index) => {
10303
- const result = await cache.lookup(prompt, llmStringKey);
10304
- if (result == null) missingPromptIndices.push(index);
10305
- return result;
10306
- }));
10307
- const cachedResults = results.map((result, index) => ({
10308
- result,
10309
- runManager: runManagers?.[index]
10310
- })).filter(({ result }) => result.status === "fulfilled" && result.value != null || result.status === "rejected");
10311
- const generations = [];
10312
- await Promise.all(cachedResults.map(async ({ result: promiseResult, runManager }, i) => {
10313
- if (promiseResult.status === "fulfilled") {
10314
- const result = promiseResult.value;
10315
- generations[i] = result.map((result$1) => {
10316
- result$1.generationInfo = {
10317
- ...result$1.generationInfo,
10318
- tokenUsage: {}
10319
- };
10320
- return result$1;
10321
- });
10322
- if (result.length) await runManager?.handleLLMNewToken(result[0].text);
10323
- return runManager?.handleLLMEnd({ generations: [result] }, void 0, void 0, void 0, { cached: true });
10324
- } else {
10325
- await runManager?.handleLLMError(promiseResult.reason, void 0, void 0, void 0, { cached: true });
10326
- return Promise.reject(promiseResult.reason);
10327
- }
10328
- }));
10329
- const output = {
10330
- generations,
10331
- missingPromptIndices,
10332
- startedRunManagers: runManagers
10333
- };
10334
- Object.defineProperty(output, RUN_KEY, {
10335
- value: runManagers ? { runIds: runManagers?.map((manager) => manager.runId) } : void 0,
10336
- configurable: true
10337
- });
10338
- return output;
10339
- }
10340
- /**
10341
- * Run the LLM on the given prompts and input, handling caching.
10342
- */
10343
- async generate(prompts, options, callbacks) {
10344
- if (!Array.isArray(prompts)) throw new Error("Argument 'prompts' is expected to be a string[]");
10345
- let parsedOptions;
10346
- if (Array.isArray(options)) parsedOptions = { stop: options };
10347
- else parsedOptions = options;
10348
- const [runnableConfig, callOptions] = this._separateRunnableConfigFromCallOptionsCompat(parsedOptions);
10349
- runnableConfig.callbacks = runnableConfig.callbacks ?? callbacks;
10350
- if (!this.cache) return this._generateUncached(prompts, callOptions, runnableConfig);
10351
- const { cache } = this;
10352
- const llmStringKey = this._getSerializedCacheKeyParametersForCall(callOptions);
10353
- const { generations, missingPromptIndices, startedRunManagers } = await this._generateCached({
10354
- prompts,
10355
- cache,
10356
- llmStringKey,
10357
- parsedOptions: callOptions,
10358
- handledOptions: runnableConfig,
10359
- runId: runnableConfig.runId
10360
- });
10361
- let llmOutput = {};
10362
- if (missingPromptIndices.length > 0) {
10363
- const results = await this._generateUncached(missingPromptIndices.map((i) => prompts[i]), callOptions, runnableConfig, startedRunManagers !== void 0 ? missingPromptIndices.map((i) => startedRunManagers?.[i]) : void 0);
10364
- await Promise.all(results.generations.map(async (generation, index) => {
10365
- const promptIndex = missingPromptIndices[index];
10366
- generations[promptIndex] = generation;
10367
- return cache.update(prompts[promptIndex], llmStringKey, generation);
10368
- }));
10369
- llmOutput = results.llmOutput ?? {};
10370
- }
10371
- return {
10372
- generations,
10373
- llmOutput
10374
- };
10375
- }
10376
- /**
10377
- * Get the identifying parameters of the LLM.
10378
- */
10379
- _identifyingParams() {
10380
- return {};
10381
- }
10382
- _modelType() {
10383
- return "base_llm";
10384
- }
10385
- };
10386
- var LLM = class extends BaseLLM {
10387
- async _generate(prompts, options, runManager) {
10388
- const generations = await Promise.all(prompts.map((prompt, promptIndex) => this._call(prompt, {
10389
- ...options,
10390
- promptIndex
10391
- }, runManager).then((text) => [{ text }])));
10392
- return { generations };
10393
- }
10394
- };
10395
- var tools_exports = {};
10396
- __export(tools_exports, {
10397
- BaseToolkit: () => BaseToolkit,
10398
- DynamicStructuredTool: () => DynamicStructuredTool,
10399
- DynamicTool: () => DynamicTool,
10400
- StructuredTool: () => StructuredTool,
10401
- Tool: () => Tool,
10402
- ToolInputParsingException: () => ToolInputParsingException,
10403
- isLangChainTool: () => isLangChainTool,
10404
- isRunnableToolLike: () => isRunnableToolLike,
10405
- isStructuredTool: () => isStructuredTool,
10406
- isStructuredToolParams: () => isStructuredToolParams,
10407
- tool: () => tool
10408
- });
10409
- var StructuredTool = class extends BaseLangChain {
10410
- /**
10411
- * Whether to return the tool's output directly.
10412
- *
10413
- * Setting this to true means that after the tool is called,
10414
- * an agent should stop looping.
10415
- */
10416
- returnDirect = false;
10417
- verboseParsingErrors = false;
10418
- get lc_namespace() {
10419
- return ["langchain", "tools"];
10420
- }
10421
- /**
10422
- * The tool response format.
10423
- *
10424
- * If "content" then the output of the tool is interpreted as the contents of a
10425
- * ToolMessage. If "content_and_artifact" then the output is expected to be a
10426
- * two-tuple corresponding to the (content, artifact) of a ToolMessage.
10427
- *
10428
- * @default "content"
10429
- */
10430
- responseFormat = "content";
10431
- /**
10432
- * Default config object for the tool runnable.
10433
- */
10434
- defaultConfig;
10435
- constructor(fields) {
10436
- super(fields ?? {});
10437
- this.verboseParsingErrors = fields?.verboseParsingErrors ?? this.verboseParsingErrors;
10438
- this.responseFormat = fields?.responseFormat ?? this.responseFormat;
10439
- this.defaultConfig = fields?.defaultConfig ?? this.defaultConfig;
10440
- this.metadata = fields?.metadata ?? this.metadata;
10441
- }
10442
- /**
10443
- * Invokes the tool with the provided input and configuration.
10444
- * @param input The input for the tool.
10445
- * @param config Optional configuration for the tool.
10446
- * @returns A Promise that resolves with the tool's output.
10447
- */
10448
- async invoke(input, config) {
10449
- let toolInput;
10450
- let enrichedConfig = ensureConfig(mergeConfigs(this.defaultConfig, config));
10451
- if (_isToolCall(input)) {
10452
- toolInput = input.args;
10453
- enrichedConfig = {
10454
- ...enrichedConfig,
10455
- toolCall: input
10456
- };
10457
- } else toolInput = input;
10458
- return this.call(toolInput, enrichedConfig);
10459
- }
10460
- /**
10461
- * @deprecated Use .invoke() instead. Will be removed in 0.3.0.
10462
- *
10463
- * Calls the tool with the provided argument, configuration, and tags. It
10464
- * parses the input according to the schema, handles any errors, and
10465
- * manages callbacks.
10466
- * @param arg The input argument for the tool.
10467
- * @param configArg Optional configuration or callbacks for the tool.
10468
- * @param tags Optional tags for the tool.
10469
- * @returns A Promise that resolves with a string.
10470
- */
10471
- async call(arg, configArg, tags) {
10472
- const inputForValidation = _isToolCall(arg) ? arg.args : arg;
10473
- let parsed;
10474
- if (isInteropZodSchema(this.schema)) try {
10475
- parsed = await interopParseAsync(this.schema, inputForValidation);
10476
- } catch (e) {
10477
- let message = `Received tool input did not match expected schema`;
10478
- if (this.verboseParsingErrors) message = `${message}
10479
- Details: ${e.message}`;
10480
- if (e instanceof Error && e.constructor.name === "ZodError") message = `${message}
10481
-
10482
- ${prettifyError(e)}`;
10483
- throw new ToolInputParsingException(message, JSON.stringify(arg));
10484
- }
10485
- else {
10486
- const result$1 = validate(inputForValidation, this.schema);
10487
- if (!result$1.valid) {
10488
- let message = `Received tool input did not match expected schema`;
10489
- if (this.verboseParsingErrors) message = `${message}
10490
- Details: ${result$1.errors.map((e) => `${e.keywordLocation}: ${e.error}`).join("\n")}`;
10491
- throw new ToolInputParsingException(message, JSON.stringify(arg));
10492
- }
10493
- parsed = inputForValidation;
10494
- }
10495
- const config = parseCallbackConfigArg(configArg);
10496
- const callbackManager_ = CallbackManager.configure(config.callbacks, this.callbacks, config.tags || tags, this.tags, config.metadata, this.metadata, { verbose: this.verbose });
10497
- const runManager = await callbackManager_?.handleToolStart(this.toJSON(), typeof arg === "string" ? arg : JSON.stringify(arg), config.runId, void 0, void 0, void 0, config.runName);
10498
- delete config.runId;
10499
- let result;
10500
- try {
10501
- result = await this._call(parsed, runManager, config);
10502
- } catch (e) {
10503
- await runManager?.handleToolError(e);
10504
- throw e;
10505
- }
10506
- let content;
10507
- let artifact;
10508
- if (this.responseFormat === "content_and_artifact") if (Array.isArray(result) && result.length === 2) [content, artifact] = result;
10509
- else throw new Error(`Tool response format is "content_and_artifact" but the output was not a two-tuple.
10510
- Result: ${JSON.stringify(result)}`);
10511
- else content = result;
10512
- let toolCallId;
10513
- if (_isToolCall(arg)) toolCallId = arg.id;
10514
- if (!toolCallId && _configHasToolCallId(config)) toolCallId = config.toolCall.id;
10515
- const formattedOutput = _formatToolOutput({
10516
- content,
10517
- artifact,
10518
- toolCallId,
10519
- name: this.name,
10520
- metadata: this.metadata
10521
- });
10522
- await runManager?.handleToolEnd(formattedOutput);
10523
- return formattedOutput;
10524
- }
10525
- };
10526
- var Tool = class extends StructuredTool {
10527
- schema = objectType({ input: stringType().optional() }).transform((obj) => obj.input);
10528
- constructor(fields) {
10529
- super(fields);
10530
- }
10531
- /**
10532
- * @deprecated Use .invoke() instead. Will be removed in 0.3.0.
10533
- *
10534
- * Calls the tool with the provided argument and callbacks. It handles
10535
- * string inputs specifically.
10536
- * @param arg The input argument for the tool, which can be a string, undefined, or an input of the tool's schema.
10537
- * @param callbacks Optional callbacks for the tool.
10538
- * @returns A Promise that resolves with a string.
10539
- */
10540
- call(arg, callbacks) {
10541
- const structuredArg = typeof arg === "string" || arg == null ? { input: arg } : arg;
10542
- return super.call(structuredArg, callbacks);
10543
- }
10544
- };
10545
- var DynamicTool = class extends Tool {
10546
- static lc_name() {
10547
- return "DynamicTool";
10548
- }
10549
- name;
10550
- description;
10551
- func;
10552
- constructor(fields) {
10553
- super(fields);
10554
- this.name = fields.name;
10555
- this.description = fields.description;
10556
- this.func = fields.func;
10557
- this.returnDirect = fields.returnDirect ?? this.returnDirect;
10558
- }
10559
- /**
10560
- * @deprecated Use .invoke() instead. Will be removed in 0.3.0.
10561
- */
10562
- async call(arg, configArg) {
10563
- const config = parseCallbackConfigArg(configArg);
10564
- if (config.runName === void 0) config.runName = this.name;
10565
- return super.call(arg, config);
10566
- }
10567
- /** @ignore */
10568
- async _call(input, runManager, parentConfig) {
10569
- return this.func(input, runManager, parentConfig);
10570
- }
10571
- };
10572
- var DynamicStructuredTool = class extends StructuredTool {
10573
- static lc_name() {
10574
- return "DynamicStructuredTool";
10575
- }
10576
- name;
10577
- description;
10578
- func;
10579
- schema;
10580
- constructor(fields) {
10581
- super(fields);
10582
- this.name = fields.name;
10583
- this.description = fields.description;
10584
- this.func = fields.func;
10585
- this.returnDirect = fields.returnDirect ?? this.returnDirect;
10586
- this.schema = fields.schema;
10587
- }
10588
- /**
10589
- * @deprecated Use .invoke() instead. Will be removed in 0.3.0.
10590
- */
10591
- async call(arg, configArg, tags) {
10592
- const config = parseCallbackConfigArg(configArg);
10593
- if (config.runName === void 0) config.runName = this.name;
10594
- return super.call(arg, config, tags);
10595
- }
10596
- _call(arg, runManager, parentConfig) {
10597
- return this.func(arg, runManager, parentConfig);
10598
- }
10599
- };
10600
- var BaseToolkit = class {
10601
- getTools() {
10602
- return this.tools;
10603
- }
10604
- };
10605
- function tool(func, fields) {
10606
- const isSimpleStringSchema = isSimpleStringZodSchema(fields.schema);
10607
- const isStringJSONSchema = validatesOnlyStrings(fields.schema);
10608
- if (!fields.schema || isSimpleStringSchema || isStringJSONSchema) return new DynamicTool({
10609
- ...fields,
10610
- description: fields.description ?? fields.schema?.description ?? `${fields.name} tool`,
10611
- func: async (input, runManager, config) => {
10612
- return new Promise((resolve, reject) => {
10613
- const childConfig = patchConfig(config, { callbacks: runManager?.getChild() });
10614
- AsyncLocalStorageProviderSingleton.runWithConfig(pickRunnableConfigKeys(childConfig), async () => {
10615
- try {
10616
- resolve(func(input, childConfig));
10617
- } catch (e) {
10618
- reject(e);
10619
- }
10620
- });
10621
- });
10622
- }
10623
- });
10624
- const schema = fields.schema;
10625
- const description = fields.description ?? fields.schema.description ?? `${fields.name} tool`;
10626
- return new DynamicStructuredTool({
10627
- ...fields,
10628
- description,
10629
- schema,
10630
- func: async (input, runManager, config) => {
10631
- return new Promise((resolve, reject) => {
10632
- let listener;
10633
- const cleanup = () => {
10634
- if (config?.signal && listener) config.signal.removeEventListener("abort", listener);
10635
- };
10636
- if (config?.signal) {
10637
- listener = () => {
10638
- cleanup();
10639
- reject(getAbortSignalError(config.signal));
10640
- };
10641
- config.signal.addEventListener("abort", listener);
10642
- }
10643
- const childConfig = patchConfig(config, { callbacks: runManager?.getChild() });
10644
- AsyncLocalStorageProviderSingleton.runWithConfig(pickRunnableConfigKeys(childConfig), async () => {
10645
- try {
10646
- const result = await func(input, childConfig);
10647
- if (config?.signal?.aborted) {
10648
- cleanup();
10649
- return;
10650
- }
10651
- cleanup();
10652
- resolve(result);
10653
- } catch (e) {
10654
- cleanup();
10655
- reject(e);
10656
- }
10657
- });
10658
- });
10659
- }
10660
- });
10661
- }
10662
- function _formatToolOutput(params) {
10663
- const { content, artifact, toolCallId, metadata } = params;
10664
- if (toolCallId && !isDirectToolOutput(content)) if (typeof content === "string" || Array.isArray(content) && content.every((item) => typeof item === "object")) return new ToolMessage({
10665
- status: "success",
10666
- content,
10667
- artifact,
10668
- tool_call_id: toolCallId,
10669
- name: params.name,
10670
- metadata
10671
- });
10672
- else return new ToolMessage({
10673
- status: "success",
10674
- content: _stringify(content),
10675
- artifact,
10676
- tool_call_id: toolCallId,
10677
- name: params.name,
10678
- metadata
10679
- });
10680
- else return content;
10681
- }
10682
- function _stringify(content) {
10683
- try {
10684
- return JSON.stringify(content, null, 2) ?? "";
10685
- } catch (_noOp) {
10686
- return `${content}`;
10687
- }
10688
- }
10689
9981
  export {
10690
9982
  BaseChatOpenAI,
10691
9983
  ChatOpenAI,