@use-lattice/litmus 0.121.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (199) hide show
  1. package/LICENSE +19 -0
  2. package/dist/src/accounts-Bt1oJb1Z.cjs +219 -0
  3. package/dist/src/accounts-DjOU8Rm3.js +178 -0
  4. package/dist/src/agentic-utils-D03IiXQc.js +153 -0
  5. package/dist/src/agentic-utils-Dh7xaMQM.cjs +180 -0
  6. package/dist/src/agents-C6BIMlZa.js +231 -0
  7. package/dist/src/agents-DvIpNX1L.cjs +666 -0
  8. package/dist/src/agents-ZP0RP9vV.cjs +231 -0
  9. package/dist/src/agents-maJXdjbR.js +665 -0
  10. package/dist/src/aimlapi-BTbQjG2E.cjs +30 -0
  11. package/dist/src/aimlapi-CwMxqfXP.js +30 -0
  12. package/dist/src/audio-BBUdvsde.cjs +97 -0
  13. package/dist/src/audio-D5DPZ7I-.js +97 -0
  14. package/dist/src/base-BEysXrkq.cjs +222 -0
  15. package/dist/src/base-C451JQfq.js +193 -0
  16. package/dist/src/blobs-BY8MDmpo.js +230 -0
  17. package/dist/src/blobs-BgcNn97m.cjs +256 -0
  18. package/dist/src/cache-BBE_lsTA.cjs +4 -0
  19. package/dist/src/cache-BkrqU5Ba.js +237 -0
  20. package/dist/src/cache-DsCxFlsZ.cjs +297 -0
  21. package/dist/src/chat-CPJWDP6a.cjs +289 -0
  22. package/dist/src/chat-CXX3xzkk.cjs +811 -0
  23. package/dist/src/chat-CcDgZFJ4.js +787 -0
  24. package/dist/src/chat-Dz5ZeGO2.js +289 -0
  25. package/dist/src/chatkit-Dw0mKkML.cjs +1158 -0
  26. package/dist/src/chatkit-swAIVuea.js +1157 -0
  27. package/dist/src/chunk-DEq-mXcV.js +15 -0
  28. package/dist/src/claude-agent-sdk-BXZJtOg6.js +379 -0
  29. package/dist/src/claude-agent-sdk-CkfyjDoG.cjs +383 -0
  30. package/dist/src/cloudflare-ai-BzpJcqUH.js +161 -0
  31. package/dist/src/cloudflare-ai-Cmy_R1y2.cjs +161 -0
  32. package/dist/src/cloudflare-gateway-B9tVQKok.cjs +272 -0
  33. package/dist/src/cloudflare-gateway-DrD3ew3H.js +272 -0
  34. package/dist/src/codex-sdk-Dezj9Nwm.js +1056 -0
  35. package/dist/src/codex-sdk-Dl9D4k5B.cjs +1060 -0
  36. package/dist/src/cometapi-C-9YvCHC.js +54 -0
  37. package/dist/src/cometapi-DHgDKoO2.cjs +54 -0
  38. package/dist/src/completion-B8Ctyxpr.js +120 -0
  39. package/dist/src/completion-Cxrt08sj.cjs +131 -0
  40. package/dist/src/createHash-BwgE13yv.cjs +27 -0
  41. package/dist/src/createHash-DmPQkvBh.js +15 -0
  42. package/dist/src/docker-BiqcTwLv.js +80 -0
  43. package/dist/src/docker-C7tEJnP-.cjs +80 -0
  44. package/dist/src/esm-C62Zofr1.cjs +409 -0
  45. package/dist/src/esm-DMVc93eh.js +379 -0
  46. package/dist/src/evalResult-C3NJPQOo.cjs +301 -0
  47. package/dist/src/evalResult-C7JJAPBb.js +295 -0
  48. package/dist/src/evalResult-DoVTZZWI.cjs +2 -0
  49. package/dist/src/extractor-DnMD3fwt.cjs +391 -0
  50. package/dist/src/extractor-DtlL28vL.js +374 -0
  51. package/dist/src/fetch-BTxakTSg.cjs +1133 -0
  52. package/dist/src/fetch-DQckpUFz.js +928 -0
  53. package/dist/src/fileExtensions-DnqA1y9x.js +85 -0
  54. package/dist/src/fileExtensions-bYh77CN8.cjs +114 -0
  55. package/dist/src/genaiTracer-CyZrmaK0.cjs +268 -0
  56. package/dist/src/genaiTracer-D3fD9dNV.js +256 -0
  57. package/dist/src/graders-BNscxFrU.js +13644 -0
  58. package/dist/src/graders-D2oE9Msq.js +2 -0
  59. package/dist/src/graders-c0Ez_w-9.cjs +2 -0
  60. package/dist/src/graders-d0F2M3e9.cjs +14056 -0
  61. package/dist/src/image-0ZhE0VlR.cjs +280 -0
  62. package/dist/src/image-CWE1pdNv.js +257 -0
  63. package/dist/src/image-D9ZK6hwL.js +163 -0
  64. package/dist/src/image-DKZgZITg.cjs +163 -0
  65. package/dist/src/index.cjs +11366 -0
  66. package/dist/src/index.d.cts +19640 -0
  67. package/dist/src/index.d.ts +19641 -0
  68. package/dist/src/index.js +11306 -0
  69. package/dist/src/invariant-Ddh24eXh.js +25 -0
  70. package/dist/src/invariant-kfQ8Bu82.cjs +30 -0
  71. package/dist/src/knowledgeBase-BgPyGFUd.cjs +122 -0
  72. package/dist/src/knowledgeBase-DyHilYaP.js +122 -0
  73. package/dist/src/litellm-CyMeneHS.js +135 -0
  74. package/dist/src/litellm-DWDF73yF.cjs +135 -0
  75. package/dist/src/logger-C40ZGil9.js +717 -0
  76. package/dist/src/logger-DyfK9PBt.cjs +917 -0
  77. package/dist/src/luma-ray-BAU9X_ep.cjs +315 -0
  78. package/dist/src/luma-ray-nwVseBbv.js +313 -0
  79. package/dist/src/messages-B5ADWTTv.js +245 -0
  80. package/dist/src/messages-BCnZfqrS.cjs +257 -0
  81. package/dist/src/meteor-DLZZ3osF.cjs +134 -0
  82. package/dist/src/meteor-DUiCJRC-.js +134 -0
  83. package/dist/src/modelslab-00cveB8L.cjs +163 -0
  84. package/dist/src/modelslab-D9sCU_L7.js +163 -0
  85. package/dist/src/nova-reel-CTapvqYH.js +276 -0
  86. package/dist/src/nova-reel-DlWuuroF.cjs +278 -0
  87. package/dist/src/nova-sonic-5UPWfeMv.cjs +363 -0
  88. package/dist/src/nova-sonic-BhSwQNym.js +363 -0
  89. package/dist/src/openai-BWrJK9d8.cjs +52 -0
  90. package/dist/src/openai-DumO8WQn.js +47 -0
  91. package/dist/src/openclaw-B8brrjC_.cjs +577 -0
  92. package/dist/src/openclaw-Bkayww9q.js +571 -0
  93. package/dist/src/opencode-sdk-7xjoDNiM.cjs +562 -0
  94. package/dist/src/opencode-sdk-SGwAPxht.js +558 -0
  95. package/dist/src/otlpReceiver-CoAHfAN9.cjs +15 -0
  96. package/dist/src/otlpReceiver-oO3EQwI9.js +14 -0
  97. package/dist/src/providerRegistry-4yjhaEM8.js +45 -0
  98. package/dist/src/providerRegistry-DhV4rJIc.cjs +50 -0
  99. package/dist/src/providers-B5RJVG-7.cjs +33609 -0
  100. package/dist/src/providers-BdmZCLzV.js +33262 -0
  101. package/dist/src/providers-CxtRxn8e.js +2 -0
  102. package/dist/src/providers-DnQLNbx1.cjs +3 -0
  103. package/dist/src/pythonUtils-BD0druiM.cjs +275 -0
  104. package/dist/src/pythonUtils-IBhn5YGR.js +249 -0
  105. package/dist/src/quiverai-BDOwZBsM.cjs +213 -0
  106. package/dist/src/quiverai-D3JTF5lD.js +213 -0
  107. package/dist/src/responses-B2LCDCXZ.js +667 -0
  108. package/dist/src/responses-BvNm4Xv9.cjs +685 -0
  109. package/dist/src/rubyUtils-B0NwnfpY.cjs +245 -0
  110. package/dist/src/rubyUtils-BroxzZ7c.cjs +2 -0
  111. package/dist/src/rubyUtils-hqVw5UvJ.js +222 -0
  112. package/dist/src/sagemaker-Cno2V-Sx.js +689 -0
  113. package/dist/src/sagemaker-fV_KUgs5.cjs +691 -0
  114. package/dist/src/server-BOuAXb06.cjs +238 -0
  115. package/dist/src/server-CtI-EWzm.cjs +2 -0
  116. package/dist/src/server-Cy3DZymt.js +189 -0
  117. package/dist/src/slack-CP8xBePa.js +135 -0
  118. package/dist/src/slack-DSQ1yXVb.cjs +135 -0
  119. package/dist/src/store-BwDDaBjb.cjs +246 -0
  120. package/dist/src/store-DcbLC593.cjs +2 -0
  121. package/dist/src/store-IGpqMIkv.js +240 -0
  122. package/dist/src/tables-3Q2cL7So.cjs +373 -0
  123. package/dist/src/tables-Bi2fjr4W.js +288 -0
  124. package/dist/src/telemetry-Bg2WqF79.js +161 -0
  125. package/dist/src/telemetry-D0x6u5kX.cjs +166 -0
  126. package/dist/src/telemetry-DXNimrI0.cjs +2 -0
  127. package/dist/src/text-B_UCRPp2.js +22 -0
  128. package/dist/src/text-CW1cyrwj.cjs +33 -0
  129. package/dist/src/tokenUsageUtils-NYT-WKS6.js +138 -0
  130. package/dist/src/tokenUsageUtils-bVa1ga6f.cjs +173 -0
  131. package/dist/src/transcription-Cl_W16Pr.js +122 -0
  132. package/dist/src/transcription-yt1EecY8.cjs +124 -0
  133. package/dist/src/transform-BCtGrl_W.cjs +228 -0
  134. package/dist/src/transform-Bv6gG2MJ.cjs +1688 -0
  135. package/dist/src/transform-CY1wbpRy.js +1507 -0
  136. package/dist/src/transform-DU8rUL9P.cjs +2 -0
  137. package/dist/src/transform-yWaShiKr.js +216 -0
  138. package/dist/src/transformersAvailability-BGkzavwb.js +35 -0
  139. package/dist/src/transformersAvailability-DKoRtQLy.cjs +35 -0
  140. package/dist/src/types-5aqHpBwE.cjs +3769 -0
  141. package/dist/src/types-Bn6D9c4U.js +3300 -0
  142. package/dist/src/util-BkKlTkI2.js +293 -0
  143. package/dist/src/util-CTh0bfOm.cjs +1119 -0
  144. package/dist/src/util-D17oBwo7.cjs +328 -0
  145. package/dist/src/util-DsS_-v4p.js +613 -0
  146. package/dist/src/util-DuntT1Ga.js +951 -0
  147. package/dist/src/util-aWjdCYMI.cjs +667 -0
  148. package/dist/src/utils-CisQwpjA.js +94 -0
  149. package/dist/src/utils-yWamDvmz.cjs +123 -0
  150. package/dist/tsconfig.tsbuildinfo +1 -0
  151. package/drizzle/0000_lush_hellion.sql +36 -0
  152. package/drizzle/0001_wide_calypso.sql +3 -0
  153. package/drizzle/0002_tidy_juggernaut.sql +1 -0
  154. package/drizzle/0003_lively_naoko.sql +8 -0
  155. package/drizzle/0004_minor_peter_quill.sql +19 -0
  156. package/drizzle/0005_silky_millenium_guard.sql +2 -0
  157. package/drizzle/0006_harsh_caretaker.sql +42 -0
  158. package/drizzle/0007_cloudy_wong.sql +1 -0
  159. package/drizzle/0008_broad_boomer.sql +2 -0
  160. package/drizzle/0009_strong_marten_broadcloak.sql +19 -0
  161. package/drizzle/0010_needy_bishop.sql +11 -0
  162. package/drizzle/0011_moaning_millenium_guard.sql +1 -0
  163. package/drizzle/0012_late_marten_broadcloak.sql +2 -0
  164. package/drizzle/0013_previous_dormammu.sql +9 -0
  165. package/drizzle/0014_lazy_captain_universe.sql +2 -0
  166. package/drizzle/0015_zippy_wallop.sql +29 -0
  167. package/drizzle/0016_jazzy_zemo.sql +2 -0
  168. package/drizzle/0017_reflective_praxagora.sql +4 -0
  169. package/drizzle/0018_fat_vanisher.sql +22 -0
  170. package/drizzle/0019_new_clint_barton.sql +8 -0
  171. package/drizzle/0020_skinny_maverick.sql +1 -0
  172. package/drizzle/0021_mysterious_madelyne_pryor.sql +13 -0
  173. package/drizzle/0022_sleepy_ultimo.sql +25 -0
  174. package/drizzle/0023_wooden_mandrill.sql +2 -0
  175. package/drizzle/AGENTS.md +68 -0
  176. package/drizzle/CLAUDE.md +1 -0
  177. package/drizzle/meta/0000_snapshot.json +221 -0
  178. package/drizzle/meta/0001_snapshot.json +214 -0
  179. package/drizzle/meta/0002_snapshot.json +221 -0
  180. package/drizzle/meta/0005_snapshot.json +369 -0
  181. package/drizzle/meta/0006_snapshot.json +638 -0
  182. package/drizzle/meta/0007_snapshot.json +640 -0
  183. package/drizzle/meta/0008_snapshot.json +649 -0
  184. package/drizzle/meta/0009_snapshot.json +554 -0
  185. package/drizzle/meta/0010_snapshot.json +619 -0
  186. package/drizzle/meta/0011_snapshot.json +627 -0
  187. package/drizzle/meta/0012_snapshot.json +639 -0
  188. package/drizzle/meta/0013_snapshot.json +717 -0
  189. package/drizzle/meta/0014_snapshot.json +717 -0
  190. package/drizzle/meta/0015_snapshot.json +897 -0
  191. package/drizzle/meta/0016_snapshot.json +1031 -0
  192. package/drizzle/meta/0018_snapshot.json +1210 -0
  193. package/drizzle/meta/0019_snapshot.json +1165 -0
  194. package/drizzle/meta/0020_snapshot.json +1232 -0
  195. package/drizzle/meta/0021_snapshot.json +1311 -0
  196. package/drizzle/meta/0022_snapshot.json +1481 -0
  197. package/drizzle/meta/0023_snapshot.json +1496 -0
  198. package/drizzle/meta/_journal.json +174 -0
  199. package/package.json +240 -0
@@ -0,0 +1,787 @@
1
+ import { T as state, b as getEnvInt, r as logger, v as getEnvBool, x as getEnvString, y as getEnvFloat } from "./logger-C40ZGil9.js";
2
+ import { r as importModule } from "./esm-DMVc93eh.js";
3
+ import { i as isJavascriptFile } from "./fileExtensions-DnqA1y9x.js";
4
+ import { d as maybeLoadToolsFromExternalFile, h as renderVarsInObject, l as maybeLoadFromExternalFileWithVars, u as maybeLoadResponseFormatFromExternalFile } from "./util-DuntT1Ga.js";
5
+ import { S as parseChatPrompt, T as transformTools, h as REQUEST_TIMEOUT_MS, w as transformToolChoice } from "./fetch-DQckpUFz.js";
6
+ import { r as fetchWithCache } from "./cache-BkrqU5Ba.js";
7
+ import { n as withGenAISpan } from "./genaiTracer-D3fD9dNV.js";
8
+ import { D as getAuthQueryParams, E as getAuthHeaders, O as getOAuthTokenWithExpiry, T as applyQueryParams, i as transformMCPToolsToOpenAi, k as renderAuthVars } from "./transform-CY1wbpRy.js";
9
+ import { t as OpenAiGenericProvider } from "./openai-DumO8WQn.js";
10
+ import { a as calculateOpenAICost, c as getTokenUsage, t as OPENAI_CHAT_MODELS } from "./util-DsS_-v4p.js";
11
+ import path from "path";
12
+ import { Client } from "@modelcontextprotocol/sdk/client/index.js";
13
+ //#region src/util/finishReason.ts
14
+ /**
15
+ * Mapping of provider-specific finish/stop reasons to standardized OpenAI-compatible values.
16
+ *
17
+ * This normalization allows consistent finish reason handling across different LLM providers:
18
+ *
19
+ * **OpenAI Standard Values:**
20
+ * - `stop`: Natural completion (reached end_of_turn, stop sequence, etc.)
21
+ * - `length`: Token limit reached (max_tokens, context length, etc.)
22
+ * - `content_filter`: Content filtering triggered
23
+ * - `tool_calls`: Model made function/tool calls
24
+ *
25
+ * **Provider Mappings:**
26
+ * - OpenAI: `function_call` (legacy) → `tool_calls` (current)
27
+ * - Anthropic: `end_turn` → `stop`, `stop_sequence` → `stop`, `max_tokens` → `length`, `tool_use` → `tool_calls`
28
+ *
29
+ * @example
30
+ * ```typescript
31
+ * normalizeFinishReason('end_turn') // Returns: 'stop'
32
+ * normalizeFinishReason('max_tokens') // Returns: 'length'
33
+ * normalizeFinishReason('tool_use') // Returns: 'tool_calls'
34
+ * normalizeFinishReason('function_call') // Returns: 'tool_calls'
35
+ * normalizeFinishReason('unknown') // Returns: 'unknown' (passthrough)
36
+ * ```
37
+ */
38
+ const FINISH_REASON_MAP = {
39
+ stop: "stop",
40
+ length: "length",
41
+ content_filter: "content_filter",
42
+ tool_calls: "tool_calls",
43
+ function_call: "tool_calls",
44
+ end_turn: "stop",
45
+ stop_sequence: "stop",
46
+ max_tokens: "length",
47
+ tool_use: "tool_calls"
48
+ };
49
+ /**
50
+ * Normalize a provider-specific finish or stop reason to a standard OpenAI-compatible value.
51
+ *
52
+ * This function standardizes finish reasons across different LLM providers to enable
53
+ * consistent handling in assertions and application logic. Unknown values are passed
54
+ * through unchanged to preserve provider-specific reasons.
55
+ *
56
+ * @param raw - The raw finish_reason/stop_reason from the provider response
57
+ * @returns A normalized finish reason string, or undefined if input is invalid
58
+ *
59
+ * @example Basic usage
60
+ * ```typescript
61
+ * const result = await provider.callApi('Hello world');
62
+ * const normalized = normalizeFinishReason(result.finishReason);
63
+ * // normalized will be one of: 'stop', 'length', 'content_filter', 'tool_calls', or original value
64
+ * ```
65
+ *
66
+ * @example With finish-reason assertion
67
+ * ```yaml
68
+ * assert:
69
+ * - type: finish-reason
70
+ * value: stop # Expects natural completion (works for both 'stop' and 'end_turn')
71
+ * ```
72
+ */
73
+ function normalizeFinishReason(raw) {
74
+ if (raw == null) return;
75
+ if (typeof raw !== "string") return;
76
+ const trimmed = raw.trim();
77
+ if (trimmed === "") return;
78
+ const key = trimmed.toLowerCase();
79
+ return FINISH_REASON_MAP[key] ?? key;
80
+ }
81
+ //#endregion
82
+ //#region src/providers/mcp/client.ts
83
+ /**
84
+ * Get the effective request options for MCP requests.
85
+ * Priority: config values > MCP_REQUEST_TIMEOUT_MS env var > undefined (SDK default of 60s)
86
+ */
87
+ function getEffectiveRequestOptions(config) {
88
+ const timeout = config.timeout ?? getEnvInt("MCP_REQUEST_TIMEOUT_MS");
89
+ if (!timeout && !config.resetTimeoutOnProgress && !config.maxTotalTimeout) return;
90
+ const options = {};
91
+ if (timeout) options.timeout = timeout;
92
+ if (config.resetTimeoutOnProgress) options.resetTimeoutOnProgress = config.resetTimeoutOnProgress;
93
+ if (config.maxTotalTimeout) options.maxTotalTimeout = config.maxTotalTimeout;
94
+ return options;
95
+ }
96
+ var MCPClient = class {
97
+ clients = /* @__PURE__ */ new Map();
98
+ tools = /* @__PURE__ */ new Map();
99
+ config;
100
+ transports = /* @__PURE__ */ new Map();
101
+ oauthConfigs = /* @__PURE__ */ new Map();
102
+ tokenExpiresAt = /* @__PURE__ */ new Map();
103
+ tokenRefreshLocks = /* @__PURE__ */ new Map();
104
+ get hasInitialized() {
105
+ return this.clients.size > 0;
106
+ }
107
+ get connectedServers() {
108
+ return Array.from(this.clients.keys());
109
+ }
110
+ /**
111
+ * Check if debug mode is enabled (config takes priority over env var)
112
+ */
113
+ get isDebugEnabled() {
114
+ return this.config.debug ?? getEnvBool("MCP_DEBUG") ?? false;
115
+ }
116
+ /**
117
+ * Check if verbose mode is enabled (config takes priority over env var)
118
+ */
119
+ get isVerboseEnabled() {
120
+ return this.config.verbose ?? getEnvBool("MCP_VERBOSE") ?? false;
121
+ }
122
+ constructor(config) {
123
+ this.config = config;
124
+ }
125
+ async initialize() {
126
+ if (!this.config.enabled) return;
127
+ const servers = this.config.servers || (this.config.server ? [this.config.server] : []);
128
+ for (const server of servers) {
129
+ logger.info(`connecting to server ${server.name || server.url || server.path || "default"}`);
130
+ await this.connectToServer(server);
131
+ }
132
+ }
133
+ async connectToServer(server) {
134
+ const serverKey = server.name || server.url || server.path || "default";
135
+ const client = new Client({
136
+ name: "promptfoo-MCP",
137
+ version: "1.0.0",
138
+ description: "Promptfoo MCP client for connecting to MCP servers during LLM evaluations"
139
+ });
140
+ let transport;
141
+ try {
142
+ const requestOptions = getEffectiveRequestOptions(this.config);
143
+ if (server.command && server.args) {
144
+ const { StdioClientTransport } = await import("@modelcontextprotocol/sdk/client/stdio.js");
145
+ transport = new StdioClientTransport({
146
+ command: server.command,
147
+ args: server.args,
148
+ env: process.env
149
+ });
150
+ await client.connect(transport, requestOptions);
151
+ } else if (server.path) {
152
+ const isJs = server.path.endsWith(".js");
153
+ const isPy = server.path.endsWith(".py");
154
+ if (!isJs && !isPy) throw new Error("Local server must be a .js or .py file");
155
+ const command = isPy ? process.platform === "win32" ? "python" : "python3" : process.execPath;
156
+ const { StdioClientTransport } = await import("@modelcontextprotocol/sdk/client/stdio.js");
157
+ transport = new StdioClientTransport({
158
+ command,
159
+ args: [server.path],
160
+ env: process.env
161
+ });
162
+ await client.connect(transport, requestOptions);
163
+ } else if (server.url) {
164
+ const renderedServer = renderAuthVars(server);
165
+ let authHeaders = {};
166
+ if (renderedServer.auth?.type === "oauth") {
167
+ const oauthAuth = renderedServer.auth;
168
+ logger.debug("[MCP] Fetching OAuth token");
169
+ const { accessToken, expiresAt } = await getOAuthTokenWithExpiry(oauthAuth, server.url);
170
+ authHeaders = { Authorization: `Bearer ${accessToken}` };
171
+ this.oauthConfigs.set(serverKey, {
172
+ serverKey,
173
+ serverConfig: server,
174
+ auth: oauthAuth
175
+ });
176
+ this.tokenExpiresAt.set(serverKey, expiresAt);
177
+ } else authHeaders = getAuthHeaders(renderedServer);
178
+ const headers = {
179
+ ...server.headers || {},
180
+ ...authHeaders
181
+ };
182
+ const queryParams = getAuthQueryParams(renderedServer);
183
+ const serverUrl = applyQueryParams(server.url, queryParams);
184
+ const transportOptions = {};
185
+ if (Object.keys(headers).length > 0) transportOptions.requestInit = { headers };
186
+ const hasOptions = Object.keys(transportOptions).length > 0;
187
+ try {
188
+ const { StreamableHTTPClientTransport } = await import("@modelcontextprotocol/sdk/client/streamableHttp.js");
189
+ transport = new StreamableHTTPClientTransport(new URL(serverUrl), hasOptions ? transportOptions : void 0);
190
+ await client.connect(transport, requestOptions);
191
+ logger.debug("Connected using Streamable HTTP transport");
192
+ } catch (error) {
193
+ logger.debug(`Failed to connect to MCP server with Streamable HTTP transport ${serverKey}: ${error}`);
194
+ const { SSEClientTransport } = await import("@modelcontextprotocol/sdk/client/sse.js");
195
+ transport = new SSEClientTransport(new URL(serverUrl), hasOptions ? transportOptions : void 0);
196
+ await client.connect(transport, requestOptions);
197
+ logger.debug("Connected using SSE transport");
198
+ }
199
+ } else throw new Error("Either command+args or path or url must be specified for MCP server");
200
+ if (this.config.pingOnConnect) try {
201
+ await client.ping(requestOptions);
202
+ logger.debug(`MCP server ${serverKey} ping successful`);
203
+ } catch (pingError) {
204
+ const pingErrorMessage = pingError instanceof Error ? pingError.message : String(pingError);
205
+ throw new Error(`MCP server ${serverKey} ping failed: ${pingErrorMessage}`);
206
+ }
207
+ const serverTools = (await client.listTools(void 0, requestOptions))?.tools?.map((tool) => ({
208
+ name: tool.name,
209
+ description: tool.description || "",
210
+ inputSchema: tool.inputSchema
211
+ })) || [];
212
+ let filteredTools = serverTools;
213
+ if (this.config.tools) filteredTools = serverTools.filter((tool) => this.config.tools?.includes(tool.name));
214
+ if (this.config.exclude_tools) filteredTools = filteredTools.filter((tool) => !this.config.exclude_tools?.includes(tool.name));
215
+ this.transports.set(serverKey, transport);
216
+ this.clients.set(serverKey, client);
217
+ this.tools.set(serverKey, filteredTools);
218
+ if (this.isVerboseEnabled) console.log(`Connected to MCP server ${serverKey} with tools:`, filteredTools.map((tool) => tool.name));
219
+ } catch (error) {
220
+ const errorMessage = error instanceof Error ? error.message : String(error);
221
+ if (this.isDebugEnabled) logger.error(`Failed to connect to MCP server ${serverKey}: ${errorMessage}`);
222
+ throw new Error(`Failed to connect to MCP server ${serverKey}: ${errorMessage}`);
223
+ }
224
+ }
225
+ getAllTools() {
226
+ return Array.from(this.tools.values()).flat();
227
+ }
228
+ /**
229
+ * Proactively refresh OAuth token for a server if it's close to expiration.
230
+ * Uses a locking mechanism to prevent concurrent refresh attempts.
231
+ */
232
+ async refreshOAuthTokenIfNeeded(serverKey) {
233
+ const oauthConfig = this.oauthConfigs.get(serverKey);
234
+ if (!oauthConfig) return;
235
+ await this.refreshOAuthToken(serverKey, oauthConfig, false);
236
+ }
237
+ hasValidToken(serverKey) {
238
+ const expiresAt = this.tokenExpiresAt.get(serverKey);
239
+ return expiresAt != null && this.clients.has(serverKey) && Date.now() + 6e4 < expiresAt;
240
+ }
241
+ async refreshOAuthToken(serverKey, oauthConfig, forceRefresh) {
242
+ while (true) {
243
+ const existingRefreshPromise = this.tokenRefreshLocks.get(serverKey)?.promise;
244
+ if (!existingRefreshPromise) break;
245
+ logger.debug(`[MCP] Token refresh already in progress for ${serverKey}, waiting...`);
246
+ try {
247
+ await existingRefreshPromise;
248
+ if (this.hasValidToken(serverKey)) return;
249
+ logger.debug(`[MCP] Token still needs refresh for ${serverKey}, refreshing again...`);
250
+ } catch {
251
+ logger.debug(`[MCP] Previous token refresh failed for ${serverKey}, retrying...`);
252
+ }
253
+ }
254
+ if (!forceRefresh && this.hasValidToken(serverKey)) {
255
+ logger.debug(`[MCP] Token for ${serverKey} still valid, no refresh needed`);
256
+ return;
257
+ }
258
+ logger.debug(`[MCP] Refreshing OAuth token for server ${serverKey}`);
259
+ const refreshLock = { promise: this.performTokenRefresh(serverKey, oauthConfig) };
260
+ this.tokenRefreshLocks.set(serverKey, refreshLock);
261
+ try {
262
+ await refreshLock.promise;
263
+ } finally {
264
+ if (this.tokenRefreshLocks.get(serverKey) === refreshLock) this.tokenRefreshLocks.delete(serverKey);
265
+ }
266
+ }
267
+ /**
268
+ * Perform the actual token refresh and reconnection.
269
+ */
270
+ async performTokenRefresh(serverKey, oauthConfig) {
271
+ const existingTransport = this.transports.get(serverKey);
272
+ const existingClient = this.clients.get(serverKey);
273
+ if (existingTransport) await existingTransport.close().catch(() => {});
274
+ if (existingClient) await existingClient.close().catch(() => {});
275
+ this.clients.delete(serverKey);
276
+ this.transports.delete(serverKey);
277
+ await this.connectToServer(oauthConfig.serverConfig);
278
+ logger.debug(`[MCP] Successfully refreshed OAuth token for server ${serverKey}`);
279
+ }
280
+ async callTool(name, args) {
281
+ const requestOptions = getEffectiveRequestOptions(this.config);
282
+ const disconnectedServers = [];
283
+ for (const [serverKey, serverTools] of this.tools.entries()) if (serverTools.some((tool) => tool.name === name)) {
284
+ try {
285
+ await this.refreshOAuthTokenIfNeeded(serverKey);
286
+ } catch (error) {
287
+ const errorMessage = error instanceof Error ? error.message : String(error);
288
+ logger.debug(`[MCP] Failed to refresh OAuth token for ${serverKey}, trying the next matching server: ${errorMessage}`);
289
+ }
290
+ const client = this.clients.get(serverKey);
291
+ if (!client) {
292
+ logger.debug(`[MCP] Server ${serverKey} is not connected, trying the next matching server`);
293
+ disconnectedServers.push(serverKey);
294
+ continue;
295
+ }
296
+ let currentClient = client;
297
+ let retried = false;
298
+ while (true) try {
299
+ const result = await currentClient.callTool({
300
+ name,
301
+ arguments: args
302
+ }, void 0, requestOptions);
303
+ let content = "";
304
+ if (result?.content) if (typeof result.content === "string") try {
305
+ const parsed = JSON.parse(result.content);
306
+ content = typeof parsed === "string" ? parsed : JSON.stringify(parsed);
307
+ } catch {
308
+ content = result.content;
309
+ }
310
+ else if (Buffer.isBuffer(result.content)) content = result.content.toString();
311
+ else content = JSON.stringify(result.content);
312
+ return { content };
313
+ } catch (error) {
314
+ const errorMessage = error instanceof Error ? error.message : String(error);
315
+ const isAuthError = errorMessage.includes("401") || errorMessage.includes("Unauthorized") || errorMessage.includes("authorization_endpoint") || errorMessage.includes("token");
316
+ const oauthConfig = this.oauthConfigs.get(serverKey);
317
+ if (!retried && isAuthError && oauthConfig) {
318
+ logger.debug(`[MCP] Auth error for ${serverKey}, attempting reactive token refresh`);
319
+ retried = true;
320
+ try {
321
+ await this.refreshOAuthToken(serverKey, oauthConfig, true);
322
+ const newClient = this.clients.get(serverKey);
323
+ if (newClient) {
324
+ currentClient = newClient;
325
+ continue;
326
+ }
327
+ } catch (refreshError) {
328
+ const refreshErrorMsg = refreshError instanceof Error ? refreshError.message : String(refreshError);
329
+ logger.error(`[MCP] Token refresh failed for ${serverKey}: ${refreshErrorMsg}`);
330
+ }
331
+ }
332
+ if (this.isDebugEnabled) logger.error(`Error calling tool ${name}: ${errorMessage}`);
333
+ return {
334
+ content: "",
335
+ error: errorMessage
336
+ };
337
+ }
338
+ }
339
+ if (disconnectedServers.length > 0) {
340
+ const plural = disconnectedServers.length > 1 ? "s are" : " is";
341
+ throw new Error(`Tool ${name} is known but MCP server${plural} disconnected: ${disconnectedServers.join(", ")}`);
342
+ }
343
+ throw new Error(`Tool ${name} not found in any connected MCP server`);
344
+ }
345
+ async cleanup() {
346
+ for (const [serverKey, client] of this.clients.entries()) try {
347
+ const transport = this.transports.get(serverKey);
348
+ if (transport) await transport.close();
349
+ await client.close();
350
+ } catch (error) {
351
+ if (this.isDebugEnabled) logger.error(`Error during cleanup: ${error instanceof Error ? error.message : String(error)}`);
352
+ }
353
+ this.clients.clear();
354
+ this.transports.clear();
355
+ this.tools.clear();
356
+ this.oauthConfigs.clear();
357
+ this.tokenExpiresAt.clear();
358
+ this.tokenRefreshLocks.clear();
359
+ }
360
+ };
361
+ //#endregion
362
+ //#region src/providers/openai/chat.ts
363
+ var OpenAiChatCompletionProvider = class OpenAiChatCompletionProvider extends OpenAiGenericProvider {
364
+ static OPENAI_CHAT_MODELS = OPENAI_CHAT_MODELS;
365
+ static OPENAI_CHAT_MODEL_NAMES = OPENAI_CHAT_MODELS.map((model) => model.id);
366
+ config;
367
+ mcpClient = null;
368
+ initializationPromise = null;
369
+ loadedFunctionCallbacks = {};
370
+ constructor(modelName, options = {}) {
371
+ if (!OpenAiChatCompletionProvider.OPENAI_CHAT_MODEL_NAMES.includes(modelName)) logger.debug(`Using unknown chat model: ${modelName}`);
372
+ super(modelName, options);
373
+ this.config = options.config || {};
374
+ if (this.config.mcp?.enabled) this.initializationPromise = this.initializeMCP();
375
+ }
376
+ async initializeMCP() {
377
+ this.mcpClient = new MCPClient(this.config.mcp);
378
+ await this.mcpClient.initialize();
379
+ }
380
+ async cleanup() {
381
+ if (this.mcpClient) {
382
+ await this.initializationPromise;
383
+ await this.mcpClient.cleanup();
384
+ this.mcpClient = null;
385
+ }
386
+ }
387
+ /**
388
+ * Loads a function from an external file
389
+ * @param fileRef The file reference in the format 'file://path/to/file:functionName'
390
+ * @returns The loaded function
391
+ */
392
+ async loadExternalFunction(fileRef) {
393
+ let filePath = fileRef.slice(7);
394
+ let functionName;
395
+ if (filePath.includes(":")) {
396
+ const splits = filePath.split(":");
397
+ if (splits[0] && isJavascriptFile(splits[0])) [filePath, functionName] = splits;
398
+ }
399
+ try {
400
+ const resolvedPath = path.resolve(state.basePath || "", filePath);
401
+ logger.debug(`Loading function from ${resolvedPath}${functionName ? `:${functionName}` : ""}`);
402
+ const requiredModule = await importModule(resolvedPath, functionName);
403
+ if (typeof requiredModule === "function") return requiredModule;
404
+ else if (requiredModule && typeof requiredModule === "object" && functionName && functionName in requiredModule) {
405
+ const fn = requiredModule[functionName];
406
+ if (typeof fn === "function") return fn;
407
+ }
408
+ throw new Error(`Function callback malformed: ${filePath} must export ${functionName ? `a named function '${functionName}'` : "a function or have a default export as a function"}`);
409
+ } catch (error) {
410
+ throw new Error(`Error loading function from ${filePath}: ${error.message || String(error)}`);
411
+ }
412
+ }
413
+ /**
414
+ * Executes a function callback with proper error handling
415
+ */
416
+ async executeFunctionCallback(functionName, args, config) {
417
+ try {
418
+ let callback = this.loadedFunctionCallbacks[functionName];
419
+ if (!callback) {
420
+ const callbackRef = config.functionToolCallbacks?.[functionName];
421
+ if (callbackRef && typeof callbackRef === "string") {
422
+ const callbackStr = callbackRef;
423
+ if (callbackStr.startsWith("file://")) callback = await this.loadExternalFunction(callbackStr);
424
+ else callback = new Function("return " + callbackStr)();
425
+ this.loadedFunctionCallbacks[functionName] = callback;
426
+ } else if (typeof callbackRef === "function") {
427
+ callback = callbackRef;
428
+ this.loadedFunctionCallbacks[functionName] = callback;
429
+ }
430
+ }
431
+ if (!callback) throw new Error(`No callback found for function '${functionName}'`);
432
+ logger.debug(`Executing function '${functionName}' with args: ${args}`);
433
+ const result = await callback(args);
434
+ if (result === void 0 || result === null) return "";
435
+ else if (typeof result === "object") try {
436
+ return JSON.stringify(result);
437
+ } catch (error) {
438
+ logger.warn(`Error stringifying result from function '${functionName}': ${error}`);
439
+ return String(result);
440
+ }
441
+ else return String(result);
442
+ } catch (error) {
443
+ logger.error(`Error executing function '${functionName}': ${error.message || String(error)}`);
444
+ throw error;
445
+ }
446
+ }
447
+ isGPT5Model() {
448
+ return this.modelName.startsWith("gpt-5") || this.modelName.includes("/gpt-5");
449
+ }
450
+ isReasoningModel() {
451
+ return this.modelName.startsWith("o1") || this.modelName.startsWith("o3") || this.modelName.startsWith("o4") || this.modelName.includes("/o1") || this.modelName.includes("/o3") || this.modelName.includes("/o4") || this.isGPT5Model();
452
+ }
453
+ supportsTemperature() {
454
+ return !this.isReasoningModel();
455
+ }
456
+ async getOpenAiBody(prompt, context, callApiOptions) {
457
+ const config = {
458
+ ...this.config,
459
+ ...context?.prompt?.config
460
+ };
461
+ const messages = parseChatPrompt(prompt, [{
462
+ role: "user",
463
+ content: prompt
464
+ }]);
465
+ const isReasoningModel = this.isReasoningModel();
466
+ const isGPT5Model = this.isGPT5Model();
467
+ const maxCompletionTokens = isReasoningModel ? config.max_completion_tokens ?? getEnvInt("OPENAI_MAX_COMPLETION_TOKENS") : void 0;
468
+ const maxTokensDefault = config.omitDefaults ? getEnvString("OPENAI_MAX_TOKENS") === void 0 ? void 0 : getEnvInt("OPENAI_MAX_TOKENS") : getEnvInt("OPENAI_MAX_TOKENS", 1024);
469
+ const maxTokens = isReasoningModel || isGPT5Model ? void 0 : config.max_tokens ?? maxTokensDefault;
470
+ const temperatureDefault = config.omitDefaults ? getEnvString("OPENAI_TEMPERATURE") === void 0 ? void 0 : getEnvFloat("OPENAI_TEMPERATURE") : getEnvFloat("OPENAI_TEMPERATURE", 0);
471
+ const temperature = this.supportsTemperature() ? config.temperature ?? temperatureDefault : void 0;
472
+ const reasoningEffort = isReasoningModel ? renderVarsInObject(config.reasoning_effort, context?.vars) : void 0;
473
+ const mcpTools = this.mcpClient ? transformMCPToolsToOpenAi(this.mcpClient.getAllTools()) : [];
474
+ const fileTools = transformTools(config.tools ? await maybeLoadToolsFromExternalFile(config.tools, context?.vars) || [] : [], "openai");
475
+ const allTools = [...mcpTools, ...fileTools];
476
+ const body = {
477
+ model: this.modelName,
478
+ messages,
479
+ seed: config.seed,
480
+ ...maxTokens === void 0 ? {} : { max_tokens: maxTokens },
481
+ ...maxCompletionTokens === void 0 ? {} : { max_completion_tokens: maxCompletionTokens },
482
+ ...reasoningEffort ? { reasoning_effort: reasoningEffort } : {},
483
+ ...temperature === void 0 ? {} : { temperature },
484
+ ...config.top_p !== void 0 || getEnvString("OPENAI_TOP_P") ? { top_p: config.top_p ?? getEnvFloat("OPENAI_TOP_P", 1) } : {},
485
+ ...config.presence_penalty !== void 0 || getEnvString("OPENAI_PRESENCE_PENALTY") ? { presence_penalty: config.presence_penalty ?? getEnvFloat("OPENAI_PRESENCE_PENALTY", 0) } : {},
486
+ ...config.frequency_penalty !== void 0 || getEnvString("OPENAI_FREQUENCY_PENALTY") ? { frequency_penalty: config.frequency_penalty ?? getEnvFloat("OPENAI_FREQUENCY_PENALTY", 0) } : {},
487
+ ...config.functions ? { functions: maybeLoadFromExternalFileWithVars(config.functions, context?.vars) } : {},
488
+ ...config.function_call ? { function_call: config.function_call } : {},
489
+ ...allTools.length > 0 ? { tools: allTools } : {},
490
+ ...config.tool_choice ? { tool_choice: transformToolChoice(config.tool_choice, "openai") } : {},
491
+ ...config.tool_resources ? { tool_resources: config.tool_resources } : {},
492
+ ...config.response_format ? { response_format: maybeLoadResponseFormatFromExternalFile(config.response_format, context?.vars) } : {},
493
+ ...callApiOptions?.includeLogProbs ? { logprobs: callApiOptions.includeLogProbs } : {},
494
+ ...config.stop ? { stop: config.stop } : {},
495
+ ...config.passthrough || {},
496
+ ...this.modelName.includes("audio") ? {
497
+ modalities: config.modalities || ["text", "audio"],
498
+ audio: config.audio || {
499
+ voice: "alloy",
500
+ format: "wav"
501
+ }
502
+ } : {},
503
+ ...isGPT5Model && config.verbosity ? { verbosity: config.verbosity } : {}
504
+ };
505
+ if (config.reasoning_effort && (isReasoningModel || this.modelName.includes("gpt-oss"))) body.reasoning_effort = config.reasoning_effort;
506
+ if (config.reasoning && (this.modelName.startsWith("o1") || this.modelName.startsWith("o3") || this.modelName.startsWith("o4") || this.modelName.includes("/o1") || this.modelName.includes("/o3") || this.modelName.includes("/o4"))) body.reasoning = config.reasoning;
507
+ if (config.service_tier) body.service_tier = config.service_tier;
508
+ if (config.user) body.user = config.user;
509
+ if (config.metadata) body.metadata = config.metadata;
510
+ if (config.store !== void 0) body.store = config.store;
511
+ return {
512
+ body,
513
+ config
514
+ };
515
+ }
516
+ async callApi(prompt, context, callApiOptions) {
517
+ if (this.initializationPromise != null) await this.initializationPromise;
518
+ if (this.requiresApiKey() && !this.getApiKey()) throw new Error(this.getMissingApiKeyErrorMessage());
519
+ const spanContext = {
520
+ system: "openai",
521
+ operationName: "chat",
522
+ model: this.modelName,
523
+ providerId: this.id(),
524
+ maxTokens: this.config.max_tokens,
525
+ temperature: this.config.temperature,
526
+ topP: this.config.top_p,
527
+ stopSequences: this.config.stop,
528
+ evalId: context?.evaluationId || context?.test?.metadata?.evaluationId,
529
+ testIndex: context?.test?.vars?.__testIdx,
530
+ promptLabel: context?.prompt?.label,
531
+ traceparent: context?.traceparent,
532
+ requestBody: prompt
533
+ };
534
+ const resultExtractor = (response) => {
535
+ const result = {};
536
+ if (response.tokenUsage) result.tokenUsage = {
537
+ prompt: response.tokenUsage.prompt,
538
+ completion: response.tokenUsage.completion,
539
+ total: response.tokenUsage.total,
540
+ cached: response.tokenUsage.cached,
541
+ completionDetails: {
542
+ reasoning: response.tokenUsage.completionDetails?.reasoning,
543
+ acceptedPrediction: response.tokenUsage.completionDetails?.acceptedPrediction,
544
+ rejectedPrediction: response.tokenUsage.completionDetails?.rejectedPrediction
545
+ }
546
+ };
547
+ if (response.finishReason) result.finishReasons = [response.finishReason];
548
+ if (response.cached !== void 0) result.cacheHit = response.cached;
549
+ if (response.output !== void 0) result.responseBody = typeof response.output === "string" ? response.output : JSON.stringify(response.output);
550
+ return result;
551
+ };
552
+ return withGenAISpan(spanContext, () => this.callApiInternal(prompt, context, callApiOptions), resultExtractor);
553
+ }
554
+ /**
555
+ * Internal implementation of callApi without tracing wrapper.
556
+ * This is called by callApi after setting up the tracing span.
557
+ */
558
+ async callApiInternal(prompt, context, callApiOptions) {
559
+ const { body, config } = await this.getOpenAiBody(prompt, context, callApiOptions);
560
+ let data;
561
+ let status;
562
+ let statusText;
563
+ let cached = false;
564
+ let latencyMs;
565
+ let deleteFromCache;
566
+ let responseHeaders;
567
+ try {
568
+ ({data, cached, status, statusText, latencyMs, deleteFromCache, headers: responseHeaders} = await fetchWithCache(`${this.getApiUrl()}/chat/completions`, {
569
+ method: "POST",
570
+ headers: {
571
+ "Content-Type": "application/json",
572
+ ...this.getApiKey() ? { Authorization: `Bearer ${this.getApiKey()}` } : {},
573
+ ...this.getOrganization() ? { "OpenAI-Organization": this.getOrganization() } : {},
574
+ ...config.headers
575
+ },
576
+ body: JSON.stringify(body)
577
+ }, REQUEST_TIMEOUT_MS, "json", context?.bustCache ?? context?.debug, this.config.maxRetries));
578
+ if (status < 200 || status >= 300) {
579
+ const errorMessage = `API error: ${status} ${statusText}\n${typeof data === "string" ? data : JSON.stringify(data)}`;
580
+ if (typeof data === "object" && data?.error?.code === "invalid_prompt") return {
581
+ output: errorMessage,
582
+ tokenUsage: data?.usage ? getTokenUsage(data, cached) : void 0,
583
+ latencyMs,
584
+ isRefusal: true,
585
+ guardrails: {
586
+ flagged: true,
587
+ flaggedInput: true
588
+ },
589
+ metadata: { http: {
590
+ status,
591
+ statusText,
592
+ headers: responseHeaders ?? {}
593
+ } }
594
+ };
595
+ return {
596
+ error: errorMessage,
597
+ metadata: { http: {
598
+ status,
599
+ statusText,
600
+ headers: responseHeaders ?? {}
601
+ } }
602
+ };
603
+ }
604
+ } catch (err) {
605
+ logger.error(`API call error: ${String(err)}`);
606
+ await deleteFromCache?.();
607
+ return {
608
+ error: `API call error: ${String(err)}`,
609
+ metadata: { http: {
610
+ status: 0,
611
+ statusText: "Error",
612
+ headers: responseHeaders ?? {}
613
+ } }
614
+ };
615
+ }
616
+ try {
617
+ const message = data.choices[0].message;
618
+ const finishReason = normalizeFinishReason(data.choices[0].finish_reason);
619
+ const contentFiltered = finishReason === FINISH_REASON_MAP.content_filter;
620
+ if (message.refusal) return {
621
+ output: message.refusal,
622
+ tokenUsage: getTokenUsage(data, cached),
623
+ cached,
624
+ latencyMs,
625
+ isRefusal: true,
626
+ ...finishReason && { finishReason },
627
+ guardrails: { flagged: true },
628
+ metadata: { http: {
629
+ status,
630
+ statusText,
631
+ headers: responseHeaders ?? {}
632
+ } }
633
+ };
634
+ if (contentFiltered) return {
635
+ output: message.content || "Content filtered by provider",
636
+ tokenUsage: getTokenUsage(data, cached),
637
+ cached,
638
+ latencyMs,
639
+ isRefusal: true,
640
+ finishReason: FINISH_REASON_MAP.content_filter,
641
+ guardrails: { flagged: true },
642
+ metadata: { http: {
643
+ status,
644
+ statusText,
645
+ headers: responseHeaders ?? {}
646
+ } }
647
+ };
648
+ let reasoning = "";
649
+ let output = "";
650
+ if (message.reasoning) {
651
+ reasoning = message.reasoning;
652
+ output = message.content;
653
+ } else if (message.content && (message.function_call || message.tool_calls)) if (Array.isArray(message.tool_calls) && message.tool_calls.length === 0) output = message.content;
654
+ else output = message;
655
+ else if (message.content === null || message.content === void 0 || message.content === "" && message.tool_calls) output = message.function_call || message.tool_calls;
656
+ else output = message.content;
657
+ const logProbs = data.choices[0].logprobs?.content?.map((logProbObj) => logProbObj.logprob);
658
+ if (config.response_format?.type === "json_schema" && typeof output === "string") try {
659
+ output = JSON.parse(output);
660
+ } catch (error) {
661
+ logger.error(`Failed to parse JSON output: ${error}`);
662
+ }
663
+ if (reasoning && (this.config.showThinking ?? true)) output = `Thinking: ${reasoning}\n\n${output}`;
664
+ const functionCalls = message.function_call ? [message.function_call] : message.tool_calls;
665
+ if (functionCalls && (config.functionToolCallbacks || this.mcpClient)) {
666
+ const results = [];
667
+ let hasSuccessfulCallback = false;
668
+ for (const functionCall of functionCalls) {
669
+ const functionName = functionCall.name || functionCall.function?.name;
670
+ if (this.mcpClient) {
671
+ if (this.mcpClient.getAllTools().find((tool) => tool.name === functionName)) try {
672
+ const args = functionCall.arguments || functionCall.function?.arguments || "{}";
673
+ const parsedArgs = typeof args === "string" ? JSON.parse(args) : args;
674
+ const mcpResult = await this.mcpClient.callTool(functionName, parsedArgs);
675
+ if (mcpResult?.error) results.push(`MCP Tool Error (${functionName}): ${mcpResult.error}`);
676
+ else {
677
+ const normalizeContent = (content) => {
678
+ if (content == null) return "";
679
+ if (typeof content === "string") return content;
680
+ if (Array.isArray(content)) return content.map((part) => {
681
+ if (typeof part === "string") return part;
682
+ if (part && typeof part === "object") {
683
+ if ("text" in part && part.text != null) return String(part.text);
684
+ if ("json" in part) return JSON.stringify(part.json);
685
+ if ("data" in part) return JSON.stringify(part.data);
686
+ return JSON.stringify(part);
687
+ }
688
+ return String(part);
689
+ }).join("\n");
690
+ return JSON.stringify(content);
691
+ };
692
+ const content = normalizeContent(mcpResult?.content);
693
+ results.push(`MCP Tool Result (${functionName}): ${content}`);
694
+ }
695
+ hasSuccessfulCallback = true;
696
+ continue;
697
+ } catch (error) {
698
+ logger.debug(`MCP tool execution failed for ${functionName}: ${error}`);
699
+ results.push(`MCP Tool Error (${functionName}): ${error}`);
700
+ hasSuccessfulCallback = true;
701
+ continue;
702
+ }
703
+ }
704
+ if (config.functionToolCallbacks && config.functionToolCallbacks[functionName]) try {
705
+ const functionResult = await this.executeFunctionCallback(functionName, functionCall.arguments || functionCall.function?.arguments, config);
706
+ results.push(functionResult);
707
+ hasSuccessfulCallback = true;
708
+ } catch (error) {
709
+ logger.debug(`Function callback failed for ${functionName} with error ${error}, falling back to original output`);
710
+ hasSuccessfulCallback = false;
711
+ break;
712
+ }
713
+ }
714
+ if (hasSuccessfulCallback && results.length > 0) return {
715
+ output: results.join("\n"),
716
+ tokenUsage: getTokenUsage(data, cached),
717
+ cached,
718
+ latencyMs,
719
+ logProbs,
720
+ ...finishReason && { finishReason },
721
+ cost: calculateOpenAICost(this.modelName, config, data.usage?.prompt_tokens, data.usage?.completion_tokens, data.usage?.audio_prompt_tokens, data.usage?.audio_completion_tokens),
722
+ guardrails: { flagged: contentFiltered },
723
+ metadata: { http: {
724
+ status,
725
+ statusText,
726
+ headers: responseHeaders ?? {}
727
+ } }
728
+ };
729
+ }
730
+ if (message.reasoning_content && typeof message.reasoning_content === "string" && typeof output === "string" && (this.config.showThinking ?? true)) output = `Thinking: ${message.reasoning_content}\n\n${output}`;
731
+ if (message.audio) return {
732
+ output: message.audio.transcript || "",
733
+ audio: {
734
+ id: message.audio.id,
735
+ expiresAt: message.audio.expires_at,
736
+ data: message.audio.data,
737
+ transcript: message.audio.transcript,
738
+ format: message.audio.format || "wav"
739
+ },
740
+ tokenUsage: getTokenUsage(data, cached),
741
+ cached,
742
+ latencyMs,
743
+ logProbs,
744
+ ...finishReason && { finishReason },
745
+ cost: calculateOpenAICost(this.modelName, config, data.usage?.prompt_tokens, data.usage?.completion_tokens, data.usage?.audio_prompt_tokens, data.usage?.audio_completion_tokens),
746
+ guardrails: { flagged: contentFiltered },
747
+ metadata: { http: {
748
+ status,
749
+ statusText,
750
+ headers: responseHeaders ?? {}
751
+ } }
752
+ };
753
+ return {
754
+ output,
755
+ tokenUsage: getTokenUsage(data, cached),
756
+ cached,
757
+ latencyMs,
758
+ logProbs,
759
+ ...finishReason && { finishReason },
760
+ cost: calculateOpenAICost(this.modelName, config, data.usage?.prompt_tokens, data.usage?.completion_tokens, data.usage?.audio_prompt_tokens, data.usage?.audio_completion_tokens),
761
+ guardrails: { flagged: contentFiltered },
762
+ metadata: {
763
+ http: {
764
+ status,
765
+ statusText,
766
+ headers: responseHeaders ?? {}
767
+ },
768
+ ...data.choices.length > 1 && { choices: data.choices }
769
+ }
770
+ };
771
+ } catch (err) {
772
+ await deleteFromCache?.();
773
+ return {
774
+ error: `API error: ${String(err)}: ${JSON.stringify(data)}`,
775
+ metadata: { http: {
776
+ status,
777
+ statusText,
778
+ headers: responseHeaders ?? {}
779
+ } }
780
+ };
781
+ }
782
+ }
783
+ };
784
+ //#endregion
785
+ export { normalizeFinishReason as i, MCPClient as n, FINISH_REASON_MAP as r, OpenAiChatCompletionProvider as t };
786
+
787
+ //# sourceMappingURL=chat-CcDgZFJ4.js.map