@use-lattice/litmus 0.121.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (199) hide show
  1. package/LICENSE +19 -0
  2. package/dist/src/accounts-Bt1oJb1Z.cjs +219 -0
  3. package/dist/src/accounts-DjOU8Rm3.js +178 -0
  4. package/dist/src/agentic-utils-D03IiXQc.js +153 -0
  5. package/dist/src/agentic-utils-Dh7xaMQM.cjs +180 -0
  6. package/dist/src/agents-C6BIMlZa.js +231 -0
  7. package/dist/src/agents-DvIpNX1L.cjs +666 -0
  8. package/dist/src/agents-ZP0RP9vV.cjs +231 -0
  9. package/dist/src/agents-maJXdjbR.js +665 -0
  10. package/dist/src/aimlapi-BTbQjG2E.cjs +30 -0
  11. package/dist/src/aimlapi-CwMxqfXP.js +30 -0
  12. package/dist/src/audio-BBUdvsde.cjs +97 -0
  13. package/dist/src/audio-D5DPZ7I-.js +97 -0
  14. package/dist/src/base-BEysXrkq.cjs +222 -0
  15. package/dist/src/base-C451JQfq.js +193 -0
  16. package/dist/src/blobs-BY8MDmpo.js +230 -0
  17. package/dist/src/blobs-BgcNn97m.cjs +256 -0
  18. package/dist/src/cache-BBE_lsTA.cjs +4 -0
  19. package/dist/src/cache-BkrqU5Ba.js +237 -0
  20. package/dist/src/cache-DsCxFlsZ.cjs +297 -0
  21. package/dist/src/chat-CPJWDP6a.cjs +289 -0
  22. package/dist/src/chat-CXX3xzkk.cjs +811 -0
  23. package/dist/src/chat-CcDgZFJ4.js +787 -0
  24. package/dist/src/chat-Dz5ZeGO2.js +289 -0
  25. package/dist/src/chatkit-Dw0mKkML.cjs +1158 -0
  26. package/dist/src/chatkit-swAIVuea.js +1157 -0
  27. package/dist/src/chunk-DEq-mXcV.js +15 -0
  28. package/dist/src/claude-agent-sdk-BXZJtOg6.js +379 -0
  29. package/dist/src/claude-agent-sdk-CkfyjDoG.cjs +383 -0
  30. package/dist/src/cloudflare-ai-BzpJcqUH.js +161 -0
  31. package/dist/src/cloudflare-ai-Cmy_R1y2.cjs +161 -0
  32. package/dist/src/cloudflare-gateway-B9tVQKok.cjs +272 -0
  33. package/dist/src/cloudflare-gateway-DrD3ew3H.js +272 -0
  34. package/dist/src/codex-sdk-Dezj9Nwm.js +1056 -0
  35. package/dist/src/codex-sdk-Dl9D4k5B.cjs +1060 -0
  36. package/dist/src/cometapi-C-9YvCHC.js +54 -0
  37. package/dist/src/cometapi-DHgDKoO2.cjs +54 -0
  38. package/dist/src/completion-B8Ctyxpr.js +120 -0
  39. package/dist/src/completion-Cxrt08sj.cjs +131 -0
  40. package/dist/src/createHash-BwgE13yv.cjs +27 -0
  41. package/dist/src/createHash-DmPQkvBh.js +15 -0
  42. package/dist/src/docker-BiqcTwLv.js +80 -0
  43. package/dist/src/docker-C7tEJnP-.cjs +80 -0
  44. package/dist/src/esm-C62Zofr1.cjs +409 -0
  45. package/dist/src/esm-DMVc93eh.js +379 -0
  46. package/dist/src/evalResult-C3NJPQOo.cjs +301 -0
  47. package/dist/src/evalResult-C7JJAPBb.js +295 -0
  48. package/dist/src/evalResult-DoVTZZWI.cjs +2 -0
  49. package/dist/src/extractor-DnMD3fwt.cjs +391 -0
  50. package/dist/src/extractor-DtlL28vL.js +374 -0
  51. package/dist/src/fetch-BTxakTSg.cjs +1133 -0
  52. package/dist/src/fetch-DQckpUFz.js +928 -0
  53. package/dist/src/fileExtensions-DnqA1y9x.js +85 -0
  54. package/dist/src/fileExtensions-bYh77CN8.cjs +114 -0
  55. package/dist/src/genaiTracer-CyZrmaK0.cjs +268 -0
  56. package/dist/src/genaiTracer-D3fD9dNV.js +256 -0
  57. package/dist/src/graders-BNscxFrU.js +13644 -0
  58. package/dist/src/graders-D2oE9Msq.js +2 -0
  59. package/dist/src/graders-c0Ez_w-9.cjs +2 -0
  60. package/dist/src/graders-d0F2M3e9.cjs +14056 -0
  61. package/dist/src/image-0ZhE0VlR.cjs +280 -0
  62. package/dist/src/image-CWE1pdNv.js +257 -0
  63. package/dist/src/image-D9ZK6hwL.js +163 -0
  64. package/dist/src/image-DKZgZITg.cjs +163 -0
  65. package/dist/src/index.cjs +11366 -0
  66. package/dist/src/index.d.cts +19640 -0
  67. package/dist/src/index.d.ts +19641 -0
  68. package/dist/src/index.js +11306 -0
  69. package/dist/src/invariant-Ddh24eXh.js +25 -0
  70. package/dist/src/invariant-kfQ8Bu82.cjs +30 -0
  71. package/dist/src/knowledgeBase-BgPyGFUd.cjs +122 -0
  72. package/dist/src/knowledgeBase-DyHilYaP.js +122 -0
  73. package/dist/src/litellm-CyMeneHS.js +135 -0
  74. package/dist/src/litellm-DWDF73yF.cjs +135 -0
  75. package/dist/src/logger-C40ZGil9.js +717 -0
  76. package/dist/src/logger-DyfK9PBt.cjs +917 -0
  77. package/dist/src/luma-ray-BAU9X_ep.cjs +315 -0
  78. package/dist/src/luma-ray-nwVseBbv.js +313 -0
  79. package/dist/src/messages-B5ADWTTv.js +245 -0
  80. package/dist/src/messages-BCnZfqrS.cjs +257 -0
  81. package/dist/src/meteor-DLZZ3osF.cjs +134 -0
  82. package/dist/src/meteor-DUiCJRC-.js +134 -0
  83. package/dist/src/modelslab-00cveB8L.cjs +163 -0
  84. package/dist/src/modelslab-D9sCU_L7.js +163 -0
  85. package/dist/src/nova-reel-CTapvqYH.js +276 -0
  86. package/dist/src/nova-reel-DlWuuroF.cjs +278 -0
  87. package/dist/src/nova-sonic-5UPWfeMv.cjs +363 -0
  88. package/dist/src/nova-sonic-BhSwQNym.js +363 -0
  89. package/dist/src/openai-BWrJK9d8.cjs +52 -0
  90. package/dist/src/openai-DumO8WQn.js +47 -0
  91. package/dist/src/openclaw-B8brrjC_.cjs +577 -0
  92. package/dist/src/openclaw-Bkayww9q.js +571 -0
  93. package/dist/src/opencode-sdk-7xjoDNiM.cjs +562 -0
  94. package/dist/src/opencode-sdk-SGwAPxht.js +558 -0
  95. package/dist/src/otlpReceiver-CoAHfAN9.cjs +15 -0
  96. package/dist/src/otlpReceiver-oO3EQwI9.js +14 -0
  97. package/dist/src/providerRegistry-4yjhaEM8.js +45 -0
  98. package/dist/src/providerRegistry-DhV4rJIc.cjs +50 -0
  99. package/dist/src/providers-B5RJVG-7.cjs +33609 -0
  100. package/dist/src/providers-BdmZCLzV.js +33262 -0
  101. package/dist/src/providers-CxtRxn8e.js +2 -0
  102. package/dist/src/providers-DnQLNbx1.cjs +3 -0
  103. package/dist/src/pythonUtils-BD0druiM.cjs +275 -0
  104. package/dist/src/pythonUtils-IBhn5YGR.js +249 -0
  105. package/dist/src/quiverai-BDOwZBsM.cjs +213 -0
  106. package/dist/src/quiverai-D3JTF5lD.js +213 -0
  107. package/dist/src/responses-B2LCDCXZ.js +667 -0
  108. package/dist/src/responses-BvNm4Xv9.cjs +685 -0
  109. package/dist/src/rubyUtils-B0NwnfpY.cjs +245 -0
  110. package/dist/src/rubyUtils-BroxzZ7c.cjs +2 -0
  111. package/dist/src/rubyUtils-hqVw5UvJ.js +222 -0
  112. package/dist/src/sagemaker-Cno2V-Sx.js +689 -0
  113. package/dist/src/sagemaker-fV_KUgs5.cjs +691 -0
  114. package/dist/src/server-BOuAXb06.cjs +238 -0
  115. package/dist/src/server-CtI-EWzm.cjs +2 -0
  116. package/dist/src/server-Cy3DZymt.js +189 -0
  117. package/dist/src/slack-CP8xBePa.js +135 -0
  118. package/dist/src/slack-DSQ1yXVb.cjs +135 -0
  119. package/dist/src/store-BwDDaBjb.cjs +246 -0
  120. package/dist/src/store-DcbLC593.cjs +2 -0
  121. package/dist/src/store-IGpqMIkv.js +240 -0
  122. package/dist/src/tables-3Q2cL7So.cjs +373 -0
  123. package/dist/src/tables-Bi2fjr4W.js +288 -0
  124. package/dist/src/telemetry-Bg2WqF79.js +161 -0
  125. package/dist/src/telemetry-D0x6u5kX.cjs +166 -0
  126. package/dist/src/telemetry-DXNimrI0.cjs +2 -0
  127. package/dist/src/text-B_UCRPp2.js +22 -0
  128. package/dist/src/text-CW1cyrwj.cjs +33 -0
  129. package/dist/src/tokenUsageUtils-NYT-WKS6.js +138 -0
  130. package/dist/src/tokenUsageUtils-bVa1ga6f.cjs +173 -0
  131. package/dist/src/transcription-Cl_W16Pr.js +122 -0
  132. package/dist/src/transcription-yt1EecY8.cjs +124 -0
  133. package/dist/src/transform-BCtGrl_W.cjs +228 -0
  134. package/dist/src/transform-Bv6gG2MJ.cjs +1688 -0
  135. package/dist/src/transform-CY1wbpRy.js +1507 -0
  136. package/dist/src/transform-DU8rUL9P.cjs +2 -0
  137. package/dist/src/transform-yWaShiKr.js +216 -0
  138. package/dist/src/transformersAvailability-BGkzavwb.js +35 -0
  139. package/dist/src/transformersAvailability-DKoRtQLy.cjs +35 -0
  140. package/dist/src/types-5aqHpBwE.cjs +3769 -0
  141. package/dist/src/types-Bn6D9c4U.js +3300 -0
  142. package/dist/src/util-BkKlTkI2.js +293 -0
  143. package/dist/src/util-CTh0bfOm.cjs +1119 -0
  144. package/dist/src/util-D17oBwo7.cjs +328 -0
  145. package/dist/src/util-DsS_-v4p.js +613 -0
  146. package/dist/src/util-DuntT1Ga.js +951 -0
  147. package/dist/src/util-aWjdCYMI.cjs +667 -0
  148. package/dist/src/utils-CisQwpjA.js +94 -0
  149. package/dist/src/utils-yWamDvmz.cjs +123 -0
  150. package/dist/tsconfig.tsbuildinfo +1 -0
  151. package/drizzle/0000_lush_hellion.sql +36 -0
  152. package/drizzle/0001_wide_calypso.sql +3 -0
  153. package/drizzle/0002_tidy_juggernaut.sql +1 -0
  154. package/drizzle/0003_lively_naoko.sql +8 -0
  155. package/drizzle/0004_minor_peter_quill.sql +19 -0
  156. package/drizzle/0005_silky_millenium_guard.sql +2 -0
  157. package/drizzle/0006_harsh_caretaker.sql +42 -0
  158. package/drizzle/0007_cloudy_wong.sql +1 -0
  159. package/drizzle/0008_broad_boomer.sql +2 -0
  160. package/drizzle/0009_strong_marten_broadcloak.sql +19 -0
  161. package/drizzle/0010_needy_bishop.sql +11 -0
  162. package/drizzle/0011_moaning_millenium_guard.sql +1 -0
  163. package/drizzle/0012_late_marten_broadcloak.sql +2 -0
  164. package/drizzle/0013_previous_dormammu.sql +9 -0
  165. package/drizzle/0014_lazy_captain_universe.sql +2 -0
  166. package/drizzle/0015_zippy_wallop.sql +29 -0
  167. package/drizzle/0016_jazzy_zemo.sql +2 -0
  168. package/drizzle/0017_reflective_praxagora.sql +4 -0
  169. package/drizzle/0018_fat_vanisher.sql +22 -0
  170. package/drizzle/0019_new_clint_barton.sql +8 -0
  171. package/drizzle/0020_skinny_maverick.sql +1 -0
  172. package/drizzle/0021_mysterious_madelyne_pryor.sql +13 -0
  173. package/drizzle/0022_sleepy_ultimo.sql +25 -0
  174. package/drizzle/0023_wooden_mandrill.sql +2 -0
  175. package/drizzle/AGENTS.md +68 -0
  176. package/drizzle/CLAUDE.md +1 -0
  177. package/drizzle/meta/0000_snapshot.json +221 -0
  178. package/drizzle/meta/0001_snapshot.json +214 -0
  179. package/drizzle/meta/0002_snapshot.json +221 -0
  180. package/drizzle/meta/0005_snapshot.json +369 -0
  181. package/drizzle/meta/0006_snapshot.json +638 -0
  182. package/drizzle/meta/0007_snapshot.json +640 -0
  183. package/drizzle/meta/0008_snapshot.json +649 -0
  184. package/drizzle/meta/0009_snapshot.json +554 -0
  185. package/drizzle/meta/0010_snapshot.json +619 -0
  186. package/drizzle/meta/0011_snapshot.json +627 -0
  187. package/drizzle/meta/0012_snapshot.json +639 -0
  188. package/drizzle/meta/0013_snapshot.json +717 -0
  189. package/drizzle/meta/0014_snapshot.json +717 -0
  190. package/drizzle/meta/0015_snapshot.json +897 -0
  191. package/drizzle/meta/0016_snapshot.json +1031 -0
  192. package/drizzle/meta/0018_snapshot.json +1210 -0
  193. package/drizzle/meta/0019_snapshot.json +1165 -0
  194. package/drizzle/meta/0020_snapshot.json +1232 -0
  195. package/drizzle/meta/0021_snapshot.json +1311 -0
  196. package/drizzle/meta/0022_snapshot.json +1481 -0
  197. package/drizzle/meta/0023_snapshot.json +1496 -0
  198. package/drizzle/meta/_journal.json +174 -0
  199. package/package.json +240 -0
@@ -0,0 +1,685 @@
1
+ const require_logger = require("./logger-DyfK9PBt.cjs");
2
+ const require_esm = require("./esm-C62Zofr1.cjs");
3
+ const require_fileExtensions = require("./fileExtensions-bYh77CN8.cjs");
4
+ const require_util = require("./util-CTh0bfOm.cjs");
5
+ const require_fetch = require("./fetch-BTxakTSg.cjs");
6
+ const require_cache = require("./cache-DsCxFlsZ.cjs");
7
+ const require_openai = require("./openai-BWrJK9d8.cjs");
8
+ const require_util$1 = require("./util-aWjdCYMI.cjs");
9
+ let path = require("path");
10
+ path = require_logger.__toESM(path);
11
+ //#region src/providers/functionCallbackUtils.ts
12
+ /**
13
+ * Handles function callback execution for AI providers.
14
+ * Provides a unified way to execute function callbacks across different provider formats.
15
+ */
16
+ var FunctionCallbackHandler = class {
17
+ loadedCallbacks = {};
18
+ mcpToolNames = null;
19
+ constructor(mcpClient) {
20
+ this.mcpClient = mcpClient;
21
+ }
22
+ /**
23
+ * Processes a function call by executing its callback or returning the original call
24
+ * @param call The function call to process (can be various formats)
25
+ * @param callbacks Configuration mapping function names to callbacks
26
+ * @param context Optional context to pass to the callback
27
+ * @returns The result of processing
28
+ */
29
+ async processCall(call, callbacks, context) {
30
+ const functionInfo = this.extractFunctionInfo(call);
31
+ if (this.mcpClient && functionInfo) {
32
+ if (this.mcpToolNames === null) {
33
+ const mcpTools = this.mcpClient.getAllTools();
34
+ this.mcpToolNames = new Set(mcpTools.map((tool) => tool.name));
35
+ }
36
+ if (this.mcpToolNames.has(functionInfo.name)) return await this.executeMcpTool(functionInfo.name, functionInfo.arguments);
37
+ }
38
+ if (!functionInfo || !callbacks || !callbacks[functionInfo.name]) return {
39
+ output: typeof call === "string" ? call : JSON.stringify(call),
40
+ isError: false
41
+ };
42
+ try {
43
+ return {
44
+ output: await this.executeCallback(functionInfo.name, functionInfo.arguments || "{}", callbacks, context),
45
+ isError: false
46
+ };
47
+ } catch (error) {
48
+ require_logger.logger.debug(`Function callback failed for ${functionInfo.name}: ${error}`);
49
+ return {
50
+ output: typeof call === "string" ? call : JSON.stringify(call),
51
+ isError: true
52
+ };
53
+ }
54
+ }
55
+ /**
56
+ * Processes multiple function calls
57
+ * @param calls Array of calls or a single call
58
+ * @param callbacks Configuration mapping function names to callbacks
59
+ * @param context Optional context to pass to callbacks
60
+ * @param options Processing options
61
+ * @returns Processed output in appropriate format
62
+ */
63
+ async processCalls(calls, callbacks, context, _options) {
64
+ if (!calls) return calls;
65
+ const isArray = Array.isArray(calls);
66
+ const callsArray = isArray ? calls : [calls];
67
+ const results = await Promise.all(callsArray.map((call) => this.processCall(call, callbacks, context)));
68
+ if (results.some((r, index) => !r.isError && r.output !== JSON.stringify(callsArray[index]))) {
69
+ const outputs = results.map((r) => r.output);
70
+ if (!isArray && outputs.length === 1) return outputs[0];
71
+ return outputs.every((o) => typeof o === "string") ? outputs.join("\n") : outputs;
72
+ }
73
+ if (!isArray && results.length === 1) return results[0].output;
74
+ return calls;
75
+ }
76
+ /**
77
+ * Extracts function name and arguments from various call formats
78
+ */
79
+ extractFunctionInfo(call) {
80
+ if (!call || typeof call !== "object") return null;
81
+ if (call.name && typeof call.name === "string") return {
82
+ name: call.name,
83
+ arguments: call.arguments
84
+ };
85
+ if (call.type === "function" && call.function?.name) return {
86
+ name: call.function.name,
87
+ arguments: call.function.arguments
88
+ };
89
+ return null;
90
+ }
91
+ /**
92
+ * Executes a function callback
93
+ */
94
+ async executeCallback(functionName, args, callbacks, context) {
95
+ let callback = this.loadedCallbacks[functionName];
96
+ if (!callback) {
97
+ const callbackConfig = callbacks[functionName];
98
+ if (typeof callbackConfig === "string") if (callbackConfig.startsWith("file://")) callback = await this.loadExternalFunction(callbackConfig);
99
+ else callback = new Function("return " + callbackConfig)();
100
+ else if (typeof callbackConfig === "function") callback = callbackConfig;
101
+ else throw new Error(`Invalid callback configuration for ${functionName}`);
102
+ this.loadedCallbacks[functionName] = callback;
103
+ }
104
+ const result = await callback(args, context);
105
+ return typeof result === "string" ? result : JSON.stringify(result);
106
+ }
107
+ /**
108
+ * Loads a function from an external file
109
+ */
110
+ async loadExternalFunction(fileRef) {
111
+ let filePath = fileRef.slice(7);
112
+ let functionName;
113
+ if (filePath.includes(":")) {
114
+ const splits = filePath.split(":");
115
+ if (splits[0] && require_fileExtensions.isJavascriptFile(splits[0])) [filePath, functionName] = splits;
116
+ }
117
+ try {
118
+ const resolvedPath = path.default.resolve(require_logger.state.basePath || "", filePath);
119
+ require_logger.logger.debug(`Loading function from ${resolvedPath}${functionName ? `:${functionName}` : ""}`);
120
+ const mod = await require_esm.importModule(resolvedPath);
121
+ const func = functionName && mod[functionName] ? mod[functionName] : mod.default || mod;
122
+ if (typeof func !== "function") throw new Error(`Expected ${resolvedPath}${functionName ? `:${functionName}` : ""} to export a function, got ${typeof func}`);
123
+ return func;
124
+ } catch (error) {
125
+ throw new Error(`Failed to load function from ${fileRef}: ${error}`);
126
+ }
127
+ }
128
+ /**
129
+ * Executes an MCP tool
130
+ */
131
+ async executeMcpTool(toolName, args) {
132
+ try {
133
+ if (!this.mcpClient) throw new Error("MCP client not available");
134
+ const parsedArgs = args == null || args === "" ? {} : typeof args === "string" ? JSON.parse(args) : args;
135
+ const result = await this.mcpClient.callTool(toolName, parsedArgs);
136
+ if (result?.error) return {
137
+ output: `MCP Tool Error (${toolName}): ${result.error}`,
138
+ isError: true
139
+ };
140
+ const normalizeContent = (content) => {
141
+ if (content == null) return "";
142
+ if (typeof content === "string") return content;
143
+ if (Array.isArray(content)) return content.map((part) => {
144
+ if (typeof part === "string") return part;
145
+ if (part && typeof part === "object") {
146
+ if ("text" in part && part.text != null) return String(part.text);
147
+ if ("json" in part) return JSON.stringify(part.json);
148
+ if ("data" in part) return JSON.stringify(part.data);
149
+ return JSON.stringify(part);
150
+ }
151
+ return String(part);
152
+ }).join("\n");
153
+ return JSON.stringify(content);
154
+ };
155
+ return {
156
+ output: `MCP Tool Result (${toolName}): ${normalizeContent(result?.content)}`,
157
+ isError: false
158
+ };
159
+ } catch (error) {
160
+ const errorMessage = error instanceof Error ? error.message : String(error);
161
+ require_logger.logger.debug(`MCP tool execution failed for ${toolName}: ${errorMessage}`);
162
+ return {
163
+ output: `MCP Tool Error (${toolName}): ${errorMessage}`,
164
+ isError: true
165
+ };
166
+ }
167
+ }
168
+ /**
169
+ * Sets the MCP client, preserving any loaded callbacks
170
+ */
171
+ setMcpClient(client) {
172
+ this.mcpClient = client;
173
+ this.mcpToolNames = null;
174
+ }
175
+ /**
176
+ * Clears the cached callbacks
177
+ */
178
+ clearCache() {
179
+ this.loadedCallbacks = {};
180
+ }
181
+ };
182
+ //#endregion
183
+ //#region src/providers/responses/processor.ts
184
+ /**
185
+ * Extract user-facing metadata from response data.
186
+ * Only includes fields that are useful for users viewing eval results.
187
+ */
188
+ function extractMetadata(data, processedOutput) {
189
+ const metadata = {};
190
+ if (typeof data.id === "string" && data.id) metadata.responseId = data.id;
191
+ if (typeof data.model === "string" && data.model) metadata.model = data.model;
192
+ if (Array.isArray(processedOutput.annotations) && processedOutput.annotations.length > 0) metadata.annotations = processedOutput.annotations;
193
+ return metadata;
194
+ }
195
+ /**
196
+ * Extract token usage from response data, handling both OpenAI Chat Completions format
197
+ * (prompt_tokens, completion_tokens) and Azure Responses format (input_tokens, output_tokens)
198
+ */
199
+ function getTokenUsage(data, cached) {
200
+ if (data.usage) if (cached) {
201
+ const totalTokens = data.usage.total_tokens || (data.usage.input_tokens || 0) + (data.usage.output_tokens || 0);
202
+ return {
203
+ cached: totalTokens,
204
+ total: totalTokens,
205
+ numRequests: 1
206
+ };
207
+ } else {
208
+ const promptTokens = data.usage.prompt_tokens || data.usage.input_tokens || 0;
209
+ const completionTokens = data.usage.completion_tokens || data.usage.output_tokens || 0;
210
+ return {
211
+ total: data.usage.total_tokens || promptTokens + completionTokens,
212
+ prompt: promptTokens,
213
+ completion: completionTokens,
214
+ numRequests: 1,
215
+ ...data.usage.completion_tokens_details ? { completionDetails: {
216
+ reasoning: data.usage.completion_tokens_details.reasoning_tokens,
217
+ acceptedPrediction: data.usage.completion_tokens_details.accepted_prediction_tokens,
218
+ rejectedPrediction: data.usage.completion_tokens_details.rejected_prediction_tokens
219
+ } } : {}
220
+ };
221
+ }
222
+ return {};
223
+ }
224
+ /**
225
+ * Shared response processor for OpenAI and Azure Responses APIs.
226
+ * Handles all response types with identical logic to ensure feature parity.
227
+ */
228
+ var ResponsesProcessor = class {
229
+ constructor(config) {
230
+ this.config = config;
231
+ }
232
+ async processResponseOutput(data, requestConfig, cached) {
233
+ require_logger.logger.debug(`Processing ${this.config.providerType} responses output`, {
234
+ responseId: data.id,
235
+ model: data.model
236
+ });
237
+ if (data.error) return { error: require_util$1.formatOpenAiError(data) };
238
+ try {
239
+ const context = {
240
+ config: requestConfig,
241
+ cached,
242
+ data
243
+ };
244
+ const processedOutput = await this.processOutput(data.output, context);
245
+ if (processedOutput.isRefusal) return {
246
+ output: processedOutput.refusal,
247
+ tokenUsage: getTokenUsage(data, cached),
248
+ isRefusal: true,
249
+ cached,
250
+ cost: this.config.costCalculator(this.config.modelName, data.usage, requestConfig),
251
+ raw: data,
252
+ metadata: extractMetadata(data, processedOutput)
253
+ };
254
+ let finalOutput = processedOutput.result;
255
+ if (requestConfig.response_format?.type === "json_schema" && typeof finalOutput === "string") try {
256
+ finalOutput = JSON.parse(finalOutput);
257
+ } catch (error) {
258
+ require_logger.logger.error(`Failed to parse JSON output: ${error}`);
259
+ }
260
+ const result = {
261
+ output: finalOutput,
262
+ tokenUsage: getTokenUsage(data, cached),
263
+ cached,
264
+ cost: this.config.costCalculator(this.config.modelName, data.usage, requestConfig),
265
+ raw: data,
266
+ metadata: extractMetadata(data, processedOutput)
267
+ };
268
+ if (processedOutput.annotations && processedOutput.annotations.length > 0) result.raw = {
269
+ ...data,
270
+ annotations: processedOutput.annotations
271
+ };
272
+ return result;
273
+ } catch (err) {
274
+ return { error: `Error parsing response: ${String(err)}\nResponse: ${JSON.stringify(data)}` };
275
+ }
276
+ }
277
+ async processOutput(output, context) {
278
+ if (this.config.modelName.includes("deep-research")) require_logger.logger.debug(`Deep research response structure: ${JSON.stringify(context.data, null, 2)}`);
279
+ if (!output || !Array.isArray(output) || output.length === 0) throw new Error("Invalid response format: Missing output array");
280
+ let result = "";
281
+ let refusal = "";
282
+ let isRefusal = false;
283
+ const annotations = [];
284
+ for (const item of output) {
285
+ if (!item || typeof item !== "object") {
286
+ require_logger.logger.warn(`Skipping invalid output item: ${JSON.stringify(item)}`);
287
+ continue;
288
+ }
289
+ const processed = await this.processOutputItem(item, context);
290
+ if (processed.isRefusal) {
291
+ refusal = processed.content || "";
292
+ isRefusal = true;
293
+ } else if (processed.content) if (result) result += "\n" + processed.content;
294
+ else result = processed.content;
295
+ if (processed.annotations) annotations.push(...processed.annotations);
296
+ }
297
+ return {
298
+ result,
299
+ refusal,
300
+ isRefusal,
301
+ annotations: annotations.length > 0 ? annotations : void 0
302
+ };
303
+ }
304
+ async processOutputItem(item, context) {
305
+ switch (item.type) {
306
+ case "function_call": return await this.processFunctionCall(item, context);
307
+ case "message": return await this.processMessage(item, context);
308
+ case "tool_result": return this.processToolResult(item);
309
+ case "reasoning": return this.processReasoning(item);
310
+ case "web_search_call": return this.processWebSearch(item);
311
+ case "code_interpreter_call": return this.processCodeInterpreter(item);
312
+ case "mcp_list_tools": return this.processMcpListTools(item);
313
+ case "mcp_call": return this.processMcpCall(item);
314
+ case "mcp_approval_request": return this.processMcpApprovalRequest(item);
315
+ default:
316
+ require_logger.logger.debug(`Unknown output item type: ${item.type}`);
317
+ return {};
318
+ }
319
+ }
320
+ async processFunctionCall(item, context) {
321
+ let functionResult;
322
+ if (item.arguments === "{}" && item.status === "completed") functionResult = JSON.stringify({
323
+ type: "function_call",
324
+ name: item.name,
325
+ status: "no_arguments_provided",
326
+ note: "Function called but no arguments were extracted. Consider using the correct Responses API tool format."
327
+ });
328
+ else functionResult = await this.config.functionCallbackHandler.processCalls(item, context.config.functionToolCallbacks);
329
+ return { content: functionResult };
330
+ }
331
+ async processMessage(item, context) {
332
+ if (item.role !== "assistant") return {};
333
+ let content = "";
334
+ let isRefusal = false;
335
+ let refusal = "";
336
+ const annotations = [];
337
+ if (item.content) for (const contentItem of item.content) {
338
+ if (!contentItem || typeof contentItem !== "object") {
339
+ require_logger.logger.warn(`Skipping invalid content item: ${JSON.stringify(contentItem)}`);
340
+ continue;
341
+ }
342
+ if (contentItem.type === "output_text") {
343
+ content += contentItem.text;
344
+ if (Array.isArray(contentItem.annotations) && contentItem.annotations.length > 0) annotations.push(...contentItem.annotations);
345
+ } else if (contentItem.type === "tool_use" || contentItem.type === "function_call") content = await this.config.functionCallbackHandler.processCalls(contentItem, context.config.functionToolCallbacks);
346
+ else if (contentItem.type === "refusal") {
347
+ refusal = contentItem.refusal;
348
+ isRefusal = true;
349
+ }
350
+ }
351
+ else if (item.refusal) {
352
+ refusal = item.refusal;
353
+ isRefusal = true;
354
+ }
355
+ return {
356
+ content: isRefusal ? refusal : content,
357
+ isRefusal,
358
+ annotations: annotations.length > 0 ? annotations : void 0
359
+ };
360
+ }
361
+ processToolResult(item) {
362
+ return Promise.resolve({ content: JSON.stringify(item) });
363
+ }
364
+ processReasoning(item) {
365
+ if (!item.summary || !item.summary.length) return Promise.resolve({});
366
+ const reasoningText = `Reasoning: ${item.summary.map((s) => s.text).join("\n")}`;
367
+ return Promise.resolve({ content: reasoningText });
368
+ }
369
+ processWebSearch(item) {
370
+ let content = "";
371
+ const action = item.action;
372
+ if (action) if (action.type === "search") content = `Web Search: "${action.query}"`;
373
+ else if (action.type === "open_page") content = `Opening page: ${action.url}`;
374
+ else if (action.type === "find_in_page") content = `Finding in page: "${action.query}"`;
375
+ else content = `Web action: ${action.type}`;
376
+ else content = `Web Search Call (status: ${item.status || "unknown"})`;
377
+ if (item.status === "failed" && item.error) content += ` (Error: ${item.error})`;
378
+ return Promise.resolve({ content });
379
+ }
380
+ processCodeInterpreter(item) {
381
+ let content = `Code Interpreter: ${item.code || "Running code..."}`;
382
+ if (item.status === "failed" && item.error) content += ` (Error: ${item.error})`;
383
+ return Promise.resolve({ content });
384
+ }
385
+ processMcpListTools(item) {
386
+ const content = `MCP Tools from ${item.server_label}: ${JSON.stringify(item.tools, null, 2)}`;
387
+ return Promise.resolve({ content });
388
+ }
389
+ processMcpCall(item) {
390
+ let content;
391
+ if (item.error) content = `MCP Tool Error (${item.name}): ${item.error}`;
392
+ else content = `MCP Tool Result (${item.name}): ${item.output}`;
393
+ return Promise.resolve({ content });
394
+ }
395
+ processMcpApprovalRequest(item) {
396
+ const content = `MCP Approval Required for ${item.server_label}.${item.name}: ${item.arguments}`;
397
+ return Promise.resolve({ content });
398
+ }
399
+ };
400
+ //#endregion
401
+ //#region src/providers/openai/responses.ts
402
+ var OpenAiResponsesProvider = class extends require_openai.OpenAiGenericProvider {
403
+ functionCallbackHandler = new FunctionCallbackHandler();
404
+ processor;
405
+ static OPENAI_RESPONSES_MODEL_NAMES = [
406
+ "gpt-4o",
407
+ "gpt-4o-2024-08-06",
408
+ "gpt-4o-2024-11-20",
409
+ "gpt-4o-2024-05-13",
410
+ "gpt-4o-2024-07-18",
411
+ "gpt-4o-mini",
412
+ "gpt-4o-mini-2024-07-18",
413
+ "gpt-4.1",
414
+ "gpt-4.1-2025-04-14",
415
+ "gpt-4.1-mini",
416
+ "gpt-4.1-mini-2025-04-14",
417
+ "gpt-4.1-nano",
418
+ "gpt-4.1-nano-2025-04-14",
419
+ "gpt-5",
420
+ "gpt-5-2025-08-07",
421
+ "gpt-5-chat",
422
+ "gpt-5-chat-latest",
423
+ "gpt-5-nano",
424
+ "gpt-5-nano-2025-08-07",
425
+ "gpt-5-mini",
426
+ "gpt-5-mini-2025-08-07",
427
+ "gpt-5-pro",
428
+ "gpt-5-pro-2025-10-06",
429
+ "gpt-5.1",
430
+ "gpt-5.1-2025-11-13",
431
+ "gpt-5.1-mini",
432
+ "gpt-5.1-nano",
433
+ "gpt-5.1-codex",
434
+ "gpt-5.1-codex-max",
435
+ "gpt-5.1-chat-latest",
436
+ "gpt-5.2",
437
+ "gpt-5.2-2025-12-11",
438
+ "gpt-5.2-chat-latest",
439
+ "gpt-5.2-codex",
440
+ "gpt-5.2-pro",
441
+ "gpt-5.2-pro-2025-12-11",
442
+ "gpt-5.3-chat-latest",
443
+ "gpt-5.3-codex",
444
+ "gpt-5.3-codex-spark",
445
+ "gpt-5.4",
446
+ "gpt-5.4-2026-03-05",
447
+ "gpt-5.4-mini",
448
+ "gpt-5.4-mini-2026-03-17",
449
+ "gpt-5.4-nano",
450
+ "gpt-5.4-nano-2026-03-17",
451
+ "gpt-5.4-pro",
452
+ "gpt-5.4-pro-2026-03-05",
453
+ "gpt-audio",
454
+ "gpt-audio-2025-08-28",
455
+ "gpt-audio-mini",
456
+ "gpt-audio-mini-2025-10-06",
457
+ "computer-use-preview",
458
+ "computer-use-preview-2025-03-11",
459
+ "o1",
460
+ "o1-2024-12-17",
461
+ "o1-preview",
462
+ "o1-preview-2024-09-12",
463
+ "o1-mini",
464
+ "o1-mini-2024-09-12",
465
+ "o1-pro",
466
+ "o1-pro-2025-03-19",
467
+ "o3-pro",
468
+ "o3-pro-2025-06-10",
469
+ "o3",
470
+ "o3-2025-04-16",
471
+ "o4-mini",
472
+ "o4-mini-2025-04-16",
473
+ "o3-mini",
474
+ "o3-mini-2025-01-31",
475
+ "codex-mini-latest",
476
+ "gpt-5-codex",
477
+ "o3-deep-research",
478
+ "o3-deep-research-2025-06-26",
479
+ "o4-mini-deep-research",
480
+ "o4-mini-deep-research-2025-06-26"
481
+ ];
482
+ config;
483
+ constructor(modelName, options = {}) {
484
+ super(modelName, options);
485
+ this.config = options.config || {};
486
+ this.processor = new ResponsesProcessor({
487
+ modelName: this.modelName,
488
+ providerType: "openai",
489
+ functionCallbackHandler: this.functionCallbackHandler,
490
+ costCalculator: (modelName, usage, config) => require_util$1.calculateOpenAICost(modelName, config, usage?.input_tokens, usage?.output_tokens, 0, 0) ?? 0
491
+ });
492
+ }
493
+ isGPT5Model() {
494
+ return this.modelName.startsWith("gpt-5") || this.modelName.includes("/gpt-5");
495
+ }
496
+ isReasoningModel() {
497
+ return this.modelName.startsWith("o1") || this.modelName.startsWith("o3") || this.modelName.startsWith("o4") || this.modelName.includes("/o1") || this.modelName.includes("/o3") || this.modelName.includes("/o4") || this.modelName === "codex-mini-latest" || this.isGPT5Model();
498
+ }
499
+ supportsTemperature() {
500
+ return !this.isReasoningModel();
501
+ }
502
+ async getOpenAiBody(prompt, context, _callApiOptions) {
503
+ const config = {
504
+ ...this.config,
505
+ ...context?.prompt?.config
506
+ };
507
+ let input;
508
+ try {
509
+ const parsedJson = JSON.parse(prompt);
510
+ if (Array.isArray(parsedJson)) input = parsedJson;
511
+ else input = prompt;
512
+ } catch {
513
+ input = prompt;
514
+ }
515
+ const isReasoningModel = this.isReasoningModel();
516
+ const maxOutputTokensDefault = config.omitDefaults ? require_logger.getEnvString("OPENAI_MAX_TOKENS") === void 0 ? void 0 : require_logger.getEnvInt("OPENAI_MAX_TOKENS") : require_logger.getEnvInt("OPENAI_MAX_TOKENS", 1024);
517
+ const reasoningMaxOutputTokensDefault = require_logger.getEnvInt("OPENAI_MAX_COMPLETION_TOKENS") ?? require_logger.getEnvInt("OPENAI_MAX_TOKENS");
518
+ const maxOutputTokens = config.max_output_tokens ?? (isReasoningModel ? reasoningMaxOutputTokensDefault : maxOutputTokensDefault);
519
+ const temperatureDefault = config.omitDefaults ? require_logger.getEnvString("OPENAI_TEMPERATURE") === void 0 ? void 0 : require_logger.getEnvFloat("OPENAI_TEMPERATURE") : require_logger.getEnvFloat("OPENAI_TEMPERATURE", 0);
520
+ const temperature = this.supportsTemperature() ? config.temperature ?? temperatureDefault : void 0;
521
+ const reasoningEffort = isReasoningModel ? require_util.renderVarsInObject(config.reasoning_effort, context?.vars) : void 0;
522
+ const instructions = config.instructions;
523
+ const responseFormat = require_util.maybeLoadResponseFormatFromExternalFile(config.response_format, context?.vars);
524
+ let textFormat;
525
+ if (responseFormat) if (responseFormat.type === "json_object") textFormat = { format: { type: "json_object" } };
526
+ else if (responseFormat.type === "json_schema") {
527
+ const schema = responseFormat.schema || responseFormat.json_schema?.schema;
528
+ textFormat = { format: {
529
+ type: "json_schema",
530
+ name: responseFormat.json_schema?.name || responseFormat.name || "response_schema",
531
+ schema,
532
+ strict: true
533
+ } };
534
+ } else textFormat = { format: { type: "text" } };
535
+ else textFormat = { format: { type: "text" } };
536
+ if (this.isGPT5Model() && config.verbosity) textFormat = {
537
+ ...textFormat,
538
+ verbosity: config.verbosity
539
+ };
540
+ const loadedTools = config.tools ? await require_util.maybeLoadToolsFromExternalFile(config.tools, context?.vars) : void 0;
541
+ const body = {
542
+ model: this.modelName,
543
+ input,
544
+ ...maxOutputTokens === void 0 ? {} : { max_output_tokens: maxOutputTokens },
545
+ ...reasoningEffort ? { reasoning: { effort: reasoningEffort } } : {},
546
+ ...temperature === void 0 ? {} : { temperature },
547
+ ...instructions ? { instructions } : {},
548
+ ...(!reasoningEffort || reasoningEffort === "none") && (config.top_p !== void 0 || require_logger.getEnvString("OPENAI_TOP_P")) ? { top_p: config.top_p ?? require_logger.getEnvFloat("OPENAI_TOP_P", 1) } : {},
549
+ ...loadedTools ? { tools: loadedTools } : {},
550
+ ...config.tool_choice ? { tool_choice: config.tool_choice } : {},
551
+ ...config.max_tool_calls ? { max_tool_calls: config.max_tool_calls } : {},
552
+ ...config.previous_response_id ? { previous_response_id: config.previous_response_id } : {},
553
+ text: textFormat,
554
+ ...config.truncation ? { truncation: config.truncation } : {},
555
+ ...config.metadata ? { metadata: config.metadata } : {},
556
+ ..."parallel_tool_calls" in config ? { parallel_tool_calls: Boolean(config.parallel_tool_calls) } : {},
557
+ ...config.stream ? { stream: config.stream } : {},
558
+ ..."store" in config ? { store: Boolean(config.store) } : {},
559
+ ...config.background ? { background: config.background } : {},
560
+ ...config.webhook_url ? { webhook_url: config.webhook_url } : {},
561
+ ...config.user ? { user: config.user } : {},
562
+ ...config.passthrough || {}
563
+ };
564
+ if (config.reasoning && this.isReasoningModel()) body.reasoning = config.reasoning;
565
+ return {
566
+ body,
567
+ config: {
568
+ ...config,
569
+ tools: loadedTools,
570
+ response_format: responseFormat
571
+ }
572
+ };
573
+ }
574
+ async callApi(prompt, context, callApiOptions) {
575
+ if (this.requiresApiKey() && !this.getApiKey()) throw new Error(this.getMissingApiKeyErrorMessage());
576
+ const { body, config } = await this.getOpenAiBody(prompt, context, callApiOptions);
577
+ const isDeepResearchModel = this.modelName.includes("deep-research");
578
+ if (isDeepResearchModel) {
579
+ if (!config.tools?.some((tool) => tool.type === "web_search_preview")) return { error: `Deep research model ${this.modelName} requires the web_search_preview tool to be configured. Add it to your provider config:\ntools:\n - type: web_search_preview` };
580
+ const mcpTools = config.tools?.filter((tool) => tool.type === "mcp") || [];
581
+ for (const mcpTool of mcpTools) if (mcpTool.require_approval !== "never") return { error: `Deep research model ${this.modelName} requires MCP tools to have require_approval: 'never'. Update your MCP tool configuration:\ntools:\n - type: mcp\n require_approval: never` };
582
+ }
583
+ let timeout = require_fetch.REQUEST_TIMEOUT_MS;
584
+ const isGpt5ProModel = /(^|\/)gpt-5(?:\.\d+)?-pro(?:-|$)/.test(this.modelName);
585
+ if (isDeepResearchModel || isGpt5ProModel) {
586
+ const evalTimeout = require_logger.getEnvInt("PROMPTFOO_EVAL_TIMEOUT_MS", 0);
587
+ timeout = evalTimeout > 0 ? evalTimeout : require_fetch.LONG_RUNNING_MODEL_TIMEOUT_MS;
588
+ require_logger.logger.debug(`Using timeout of ${timeout}ms for long-running model ${this.modelName}`);
589
+ }
590
+ let data;
591
+ let status;
592
+ let statusText;
593
+ let cached = false;
594
+ let deleteFromCache;
595
+ let responseHeaders;
596
+ try {
597
+ ({data, cached, status, statusText, deleteFromCache, headers: responseHeaders} = await require_cache.fetchWithCache(`${this.getApiUrl()}/responses`, {
598
+ method: "POST",
599
+ headers: {
600
+ "Content-Type": "application/json",
601
+ ...this.getApiKey() ? { Authorization: `Bearer ${this.getApiKey()}` } : {},
602
+ ...this.getOrganization() ? { "OpenAI-Organization": this.getOrganization() } : {},
603
+ ...config.headers
604
+ },
605
+ body: JSON.stringify(body)
606
+ }, timeout, "json", context?.bustCache ?? context?.debug, this.config.maxRetries));
607
+ if (status < 200 || status >= 300) {
608
+ const errorMessage = `API error: ${status} ${statusText}\n${typeof data === "string" ? data : JSON.stringify(data)}`;
609
+ if (typeof data === "object" && data?.error?.code === "invalid_prompt") return {
610
+ output: errorMessage,
611
+ tokenUsage: data?.usage ? require_util$1.getTokenUsage(data, cached) : void 0,
612
+ isRefusal: true,
613
+ metadata: { http: {
614
+ status,
615
+ statusText,
616
+ headers: responseHeaders ?? {}
617
+ } }
618
+ };
619
+ return {
620
+ error: errorMessage,
621
+ metadata: { http: {
622
+ status,
623
+ statusText,
624
+ headers: responseHeaders ?? {}
625
+ } }
626
+ };
627
+ }
628
+ } catch (err) {
629
+ require_logger.logger.error(`API call error: ${String(err)}`);
630
+ await deleteFromCache?.();
631
+ return {
632
+ error: `API call error: ${String(err)}`,
633
+ metadata: { http: {
634
+ status: 0,
635
+ statusText: "Error",
636
+ headers: responseHeaders ?? {}
637
+ } }
638
+ };
639
+ }
640
+ if (data.error?.message) {
641
+ await deleteFromCache?.();
642
+ return {
643
+ error: require_util$1.formatOpenAiError(data),
644
+ metadata: { http: {
645
+ status,
646
+ statusText,
647
+ headers: responseHeaders ?? {}
648
+ } }
649
+ };
650
+ }
651
+ const result = await this.processor.processResponseOutput(data, config, cached);
652
+ return {
653
+ ...result,
654
+ metadata: {
655
+ ...result.metadata,
656
+ http: {
657
+ status,
658
+ statusText,
659
+ headers: responseHeaders ?? {}
660
+ }
661
+ }
662
+ };
663
+ }
664
+ };
665
+ //#endregion
666
+ Object.defineProperty(exports, "FunctionCallbackHandler", {
667
+ enumerable: true,
668
+ get: function() {
669
+ return FunctionCallbackHandler;
670
+ }
671
+ });
672
+ Object.defineProperty(exports, "OpenAiResponsesProvider", {
673
+ enumerable: true,
674
+ get: function() {
675
+ return OpenAiResponsesProvider;
676
+ }
677
+ });
678
+ Object.defineProperty(exports, "ResponsesProcessor", {
679
+ enumerable: true,
680
+ get: function() {
681
+ return ResponsesProcessor;
682
+ }
683
+ });
684
+
685
+ //# sourceMappingURL=responses-BvNm4Xv9.cjs.map