@effect/ai-openai 0.37.2 → 4.0.0-beta.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (117) hide show
  1. package/dist/Generated.d.ts +70887 -0
  2. package/dist/Generated.d.ts.map +1 -0
  3. package/dist/Generated.js +4 -0
  4. package/dist/Generated.js.map +1 -0
  5. package/dist/OpenAiClient.d.ts +124 -0
  6. package/dist/OpenAiClient.d.ts.map +1 -0
  7. package/dist/OpenAiClient.js +128 -0
  8. package/dist/OpenAiClient.js.map +1 -0
  9. package/dist/{dts/OpenAiConfig.d.ts → OpenAiConfig.d.ts} +9 -9
  10. package/dist/OpenAiConfig.d.ts.map +1 -0
  11. package/dist/{esm/OpenAiConfig.js → OpenAiConfig.js} +8 -5
  12. package/dist/OpenAiConfig.js.map +1 -0
  13. package/dist/OpenAiError.d.ts +98 -0
  14. package/dist/OpenAiError.d.ts.map +1 -0
  15. package/dist/OpenAiError.js +10 -0
  16. package/dist/OpenAiError.js.map +1 -0
  17. package/dist/OpenAiLanguageModel.d.ts +318 -0
  18. package/dist/OpenAiLanguageModel.d.ts.map +1 -0
  19. package/dist/OpenAiLanguageModel.js +2207 -0
  20. package/dist/OpenAiLanguageModel.js.map +1 -0
  21. package/dist/{dts/OpenAiTelemetry.d.ts → OpenAiTelemetry.d.ts} +31 -13
  22. package/dist/OpenAiTelemetry.d.ts.map +1 -0
  23. package/dist/{esm/OpenAiTelemetry.js → OpenAiTelemetry.js} +11 -6
  24. package/dist/OpenAiTelemetry.js.map +1 -0
  25. package/dist/OpenAiTool.d.ts +479 -0
  26. package/dist/OpenAiTool.d.ts.map +1 -0
  27. package/dist/OpenAiTool.js +231 -0
  28. package/dist/OpenAiTool.js.map +1 -0
  29. package/dist/index.d.ts +58 -0
  30. package/dist/index.d.ts.map +1 -0
  31. package/dist/index.js +59 -0
  32. package/dist/index.js.map +1 -0
  33. package/dist/internal/errors.d.ts +2 -0
  34. package/dist/internal/errors.d.ts.map +1 -0
  35. package/dist/internal/errors.js +316 -0
  36. package/dist/internal/errors.js.map +1 -0
  37. package/dist/{dts/internal → internal}/utilities.d.ts.map +1 -1
  38. package/dist/{esm/internal → internal}/utilities.js +4 -3
  39. package/dist/internal/utilities.js.map +1 -0
  40. package/package.json +45 -97
  41. package/src/Generated.ts +28521 -20036
  42. package/src/OpenAiClient.ts +220 -1816
  43. package/src/OpenAiConfig.ts +20 -34
  44. package/src/OpenAiError.ts +107 -0
  45. package/src/OpenAiLanguageModel.ts +1807 -638
  46. package/src/OpenAiTelemetry.ts +24 -19
  47. package/src/OpenAiTool.ts +216 -70
  48. package/src/index.ts +35 -8
  49. package/src/internal/errors.ts +347 -0
  50. package/src/internal/utilities.ts +7 -5
  51. package/Generated/package.json +0 -6
  52. package/OpenAiClient/package.json +0 -6
  53. package/OpenAiConfig/package.json +0 -6
  54. package/OpenAiEmbeddingModel/package.json +0 -6
  55. package/OpenAiLanguageModel/package.json +0 -6
  56. package/OpenAiTelemetry/package.json +0 -6
  57. package/OpenAiTokenizer/package.json +0 -6
  58. package/OpenAiTool/package.json +0 -6
  59. package/README.md +0 -5
  60. package/dist/cjs/Generated.js +0 -7150
  61. package/dist/cjs/Generated.js.map +0 -1
  62. package/dist/cjs/OpenAiClient.js +0 -1567
  63. package/dist/cjs/OpenAiClient.js.map +0 -1
  64. package/dist/cjs/OpenAiConfig.js +0 -30
  65. package/dist/cjs/OpenAiConfig.js.map +0 -1
  66. package/dist/cjs/OpenAiEmbeddingModel.js +0 -155
  67. package/dist/cjs/OpenAiEmbeddingModel.js.map +0 -1
  68. package/dist/cjs/OpenAiLanguageModel.js +0 -1147
  69. package/dist/cjs/OpenAiLanguageModel.js.map +0 -1
  70. package/dist/cjs/OpenAiTelemetry.js +0 -38
  71. package/dist/cjs/OpenAiTelemetry.js.map +0 -1
  72. package/dist/cjs/OpenAiTokenizer.js +0 -83
  73. package/dist/cjs/OpenAiTokenizer.js.map +0 -1
  74. package/dist/cjs/OpenAiTool.js +0 -93
  75. package/dist/cjs/OpenAiTool.js.map +0 -1
  76. package/dist/cjs/index.js +0 -24
  77. package/dist/cjs/index.js.map +0 -1
  78. package/dist/cjs/internal/utilities.js +0 -32
  79. package/dist/cjs/internal/utilities.js.map +0 -1
  80. package/dist/dts/Generated.d.ts +0 -40661
  81. package/dist/dts/Generated.d.ts.map +0 -1
  82. package/dist/dts/OpenAiClient.d.ts +0 -3120
  83. package/dist/dts/OpenAiClient.d.ts.map +0 -1
  84. package/dist/dts/OpenAiConfig.d.ts.map +0 -1
  85. package/dist/dts/OpenAiEmbeddingModel.d.ts +0 -109
  86. package/dist/dts/OpenAiEmbeddingModel.d.ts.map +0 -1
  87. package/dist/dts/OpenAiLanguageModel.d.ts +0 -235
  88. package/dist/dts/OpenAiLanguageModel.d.ts.map +0 -1
  89. package/dist/dts/OpenAiTelemetry.d.ts.map +0 -1
  90. package/dist/dts/OpenAiTokenizer.d.ts +0 -17
  91. package/dist/dts/OpenAiTokenizer.d.ts.map +0 -1
  92. package/dist/dts/OpenAiTool.d.ts +0 -200
  93. package/dist/dts/OpenAiTool.d.ts.map +0 -1
  94. package/dist/dts/index.d.ts +0 -33
  95. package/dist/dts/index.d.ts.map +0 -1
  96. package/dist/esm/Generated.js +0 -7150
  97. package/dist/esm/Generated.js.map +0 -1
  98. package/dist/esm/OpenAiClient.js +0 -1504
  99. package/dist/esm/OpenAiClient.js.map +0 -1
  100. package/dist/esm/OpenAiConfig.js.map +0 -1
  101. package/dist/esm/OpenAiEmbeddingModel.js +0 -143
  102. package/dist/esm/OpenAiEmbeddingModel.js.map +0 -1
  103. package/dist/esm/OpenAiLanguageModel.js +0 -1134
  104. package/dist/esm/OpenAiLanguageModel.js.map +0 -1
  105. package/dist/esm/OpenAiTelemetry.js.map +0 -1
  106. package/dist/esm/OpenAiTokenizer.js +0 -73
  107. package/dist/esm/OpenAiTokenizer.js.map +0 -1
  108. package/dist/esm/OpenAiTool.js +0 -84
  109. package/dist/esm/OpenAiTool.js.map +0 -1
  110. package/dist/esm/index.js +0 -33
  111. package/dist/esm/index.js.map +0 -1
  112. package/dist/esm/internal/utilities.js.map +0 -1
  113. package/dist/esm/package.json +0 -4
  114. package/index/package.json +0 -6
  115. package/src/OpenAiEmbeddingModel.ts +0 -243
  116. package/src/OpenAiTokenizer.ts +0 -70
  117. /package/dist/{dts/internal → internal}/utilities.d.ts +0 -0
@@ -1,1134 +0,0 @@
1
- /**
2
- * @since 1.0.0
3
- */
4
- import * as AiError from "@effect/ai/AiError";
5
- import * as IdGenerator from "@effect/ai/IdGenerator";
6
- import * as LanguageModel from "@effect/ai/LanguageModel";
7
- import * as AiModel from "@effect/ai/Model";
8
- import * as Tool from "@effect/ai/Tool";
9
- import * as Context from "effect/Context";
10
- import * as DateTime from "effect/DateTime";
11
- import * as Effect from "effect/Effect";
12
- import * as Encoding from "effect/Encoding";
13
- import { dual } from "effect/Function";
14
- import * as Layer from "effect/Layer";
15
- import * as Predicate from "effect/Predicate";
16
- import * as Stream from "effect/Stream";
17
- import * as InternalUtilities from "./internal/utilities.js";
18
- import { OpenAiClient } from "./OpenAiClient.js";
19
- import { addGenAIAnnotations } from "./OpenAiTelemetry.js";
20
- import * as OpenAiTokenizer from "./OpenAiTokenizer.js";
21
- import * as OpenAiTool from "./OpenAiTool.js";
22
- // =============================================================================
23
- // Configuration
24
- // =============================================================================
25
- /**
26
- * @since 1.0.0
27
- * @category Context
28
- */
29
- export class Config extends /*#__PURE__*/Context.Tag("@effect/ai-openai/OpenAiLanguageModel/Config")() {
30
- /**
31
- * @since 1.0.0
32
- */
33
- static getOrUndefined = /*#__PURE__*/Effect.map(/*#__PURE__*/Effect.context(), context => context.unsafeMap.get(Config.key));
34
- }
35
- // =============================================================================
36
- // OpenAI Language Model
37
- // =============================================================================
38
- /**
39
- * @since 1.0.0
40
- * @category Ai Models
41
- */
42
- export const model = (model, config) => AiModel.make("openai", layer({
43
- model,
44
- config
45
- }));
46
- /**
47
- * @since 1.0.0
48
- * @category Ai Models
49
- */
50
- export const modelWithTokenizer = (model, config) => AiModel.make("openai", layerWithTokenizer({
51
- model,
52
- config
53
- }));
54
- /**
55
- * @since 1.0.0
56
- * @category Constructors
57
- */
58
- export const make = /*#__PURE__*/Effect.fnUntraced(function* (options) {
59
- const client = yield* OpenAiClient;
60
- const makeRequest = Effect.fnUntraced(function* (providerOptions) {
61
- const context = yield* Effect.context();
62
- const config = {
63
- model: options.model,
64
- ...options.config,
65
- ...context.unsafeMap.get(Config.key)
66
- };
67
- const messages = yield* prepareMessages(providerOptions, config);
68
- const {
69
- toolChoice,
70
- tools
71
- } = yield* prepareTools(providerOptions);
72
- const include = prepareInclude(providerOptions, config);
73
- const responseFormat = prepareResponseFormat(providerOptions);
74
- const verbosity = config.text?.verbosity;
75
- const request = {
76
- ...config,
77
- input: messages,
78
- include,
79
- text: {
80
- format: responseFormat,
81
- verbosity
82
- },
83
- tools,
84
- tool_choice: toolChoice
85
- };
86
- return request;
87
- });
88
- return yield* LanguageModel.make({
89
- generateText: Effect.fnUntraced(function* (options) {
90
- const request = yield* makeRequest(options);
91
- annotateRequest(options.span, request);
92
- const rawResponse = yield* client.createResponse(request);
93
- annotateResponse(options.span, rawResponse);
94
- return yield* makeResponse(rawResponse, options);
95
- }),
96
- streamText: Effect.fnUntraced(function* (options) {
97
- const request = yield* makeRequest(options);
98
- annotateRequest(options.span, request);
99
- return client.createResponseStream(request);
100
- }, (effect, options) => effect.pipe(Effect.flatMap(stream => makeStreamResponse(stream, options)), Stream.unwrap, Stream.map(response => {
101
- annotateStreamResponse(options.span, response);
102
- return response;
103
- })))
104
- });
105
- });
106
- /**
107
- * @since 1.0.0
108
- * @category Layers
109
- */
110
- export const layer = options => Layer.effect(LanguageModel.LanguageModel, make({
111
- model: options.model,
112
- config: options.config
113
- }));
114
- /**
115
- * @since 1.0.0
116
- * @category Layers
117
- */
118
- export const layerWithTokenizer = options => Layer.merge(layer(options), OpenAiTokenizer.layer(options));
119
- /**
120
- * @since 1.0.0
121
- * @category Configuration
122
- */
123
- export const withConfigOverride = /*#__PURE__*/dual(2, (self, overrides) => Effect.flatMap(Config.getOrUndefined, config => Effect.provideService(self, Config, {
124
- ...config,
125
- ...overrides
126
- })));
127
- // =============================================================================
128
- // Prompt Conversion
129
- // =============================================================================
130
- const getSystemMessageMode = model => model.startsWith("o") || model.startsWith("gpt-5") || model.startsWith("codex-") || model.startsWith("computer-use") ? "developer" : "system";
131
- const prepareMessages = /*#__PURE__*/Effect.fnUntraced(function* (options, config) {
132
- const messages = [];
133
- for (const message of options.prompt.content) {
134
- switch (message.role) {
135
- case "system":
136
- {
137
- messages.push({
138
- role: getSystemMessageMode(config.model),
139
- content: message.content
140
- });
141
- break;
142
- }
143
- case "user":
144
- {
145
- const content = [];
146
- for (let index = 0; index < message.content.length; index++) {
147
- const part = message.content[index];
148
- switch (part.type) {
149
- case "text":
150
- {
151
- content.push({
152
- type: "input_text",
153
- text: part.text
154
- });
155
- break;
156
- }
157
- case "file":
158
- {
159
- if (part.mediaType.startsWith("image/")) {
160
- const detail = getImageDetail(part);
161
- const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
162
- if (typeof part.data === "string" && isFileId(part.data, config)) {
163
- content.push({
164
- type: "input_image",
165
- file_id: part.data,
166
- detail
167
- });
168
- }
169
- if (part.data instanceof URL) {
170
- content.push({
171
- type: "input_image",
172
- image_url: part.data.toString(),
173
- detail
174
- });
175
- }
176
- if (part.data instanceof Uint8Array) {
177
- const base64 = Encoding.encodeBase64(part.data);
178
- const imageUrl = `data:${mediaType};base64,${base64}`;
179
- content.push({
180
- type: "input_image",
181
- image_url: imageUrl,
182
- detail
183
- });
184
- }
185
- } else if (part.mediaType === "application/pdf") {
186
- if (typeof part.data === "string" && isFileId(part.data, config)) {
187
- content.push({
188
- type: "input_file",
189
- file_id: part.data
190
- });
191
- }
192
- if (part.data instanceof URL) {
193
- content.push({
194
- type: "input_file",
195
- file_url: part.data.toString()
196
- });
197
- }
198
- if (part.data instanceof Uint8Array) {
199
- const base64 = Encoding.encodeBase64(part.data);
200
- const fileName = part.fileName ?? `part-${index}.pdf`;
201
- const fileData = `data:application/pdf;base64,${base64}`;
202
- content.push({
203
- type: "input_file",
204
- filename: fileName,
205
- file_data: fileData
206
- });
207
- }
208
- } else {
209
- return yield* new AiError.MalformedInput({
210
- module: "OpenAiLanguageModel",
211
- method: "prepareMessages",
212
- description: `Detected unsupported media type for file: '${part.mediaType}'`
213
- });
214
- }
215
- }
216
- }
217
- }
218
- messages.push({
219
- role: "user",
220
- content
221
- });
222
- break;
223
- }
224
- case "assistant":
225
- {
226
- const reasoningMessages = {};
227
- for (const part of message.content) {
228
- switch (part.type) {
229
- case "text":
230
- {
231
- messages.push({
232
- role: "assistant",
233
- content: [{
234
- type: "output_text",
235
- text: part.text
236
- }],
237
- id: getItemId(part)
238
- });
239
- break;
240
- }
241
- case "reasoning":
242
- {
243
- const options = part.options.openai;
244
- if (Predicate.isNotUndefined(options?.itemId)) {
245
- const reasoningMessage = reasoningMessages[options.itemId];
246
- const summaryParts = [];
247
- if (part.text.length > 0) {
248
- summaryParts.push({
249
- type: "summary_text",
250
- text: part.text
251
- });
252
- }
253
- if (Predicate.isUndefined(reasoningMessage)) {
254
- reasoningMessages[options.itemId] = {
255
- id: options.itemId,
256
- type: "reasoning",
257
- summary: summaryParts,
258
- encrypted_content: options.encryptedContent
259
- };
260
- messages.push(reasoningMessages[options.itemId]);
261
- } else {
262
- for (const summaryPart of summaryParts) {
263
- reasoningMessage.summary.push(summaryPart);
264
- }
265
- }
266
- }
267
- break;
268
- }
269
- case "tool-call":
270
- {
271
- if (!part.providerExecuted) {
272
- messages.push({
273
- id: getItemId(part),
274
- type: "function_call",
275
- call_id: part.id,
276
- name: part.name,
277
- arguments: JSON.stringify(part.params)
278
- });
279
- }
280
- break;
281
- }
282
- }
283
- }
284
- break;
285
- }
286
- case "tool":
287
- {
288
- for (const part of message.content) {
289
- messages.push({
290
- type: "function_call_output",
291
- call_id: part.id,
292
- output: JSON.stringify(part.result)
293
- });
294
- }
295
- break;
296
- }
297
- }
298
- }
299
- return messages;
300
- });
301
- // =============================================================================
302
- // Response Conversion
303
- // =============================================================================
304
- const makeResponse = /*#__PURE__*/Effect.fnUntraced(function* (response, options) {
305
- const idGenerator = yield* IdGenerator.IdGenerator;
306
- const webSearchTool = options.tools.find(tool => Tool.isProviderDefined(tool) && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview"));
307
- let hasToolCalls = false;
308
- const parts = [];
309
- const createdAt = new Date(response.created_at * 1000);
310
- parts.push({
311
- type: "response-metadata",
312
- id: response.id,
313
- modelId: response.model,
314
- timestamp: DateTime.formatIso(DateTime.unsafeFromDate(createdAt))
315
- });
316
- for (const part of response.output) {
317
- switch (part.type) {
318
- case "message":
319
- {
320
- for (const contentPart of part.content) {
321
- switch (contentPart.type) {
322
- case "output_text":
323
- {
324
- parts.push({
325
- type: "text",
326
- text: contentPart.text,
327
- metadata: {
328
- openai: {
329
- itemId: part.id
330
- }
331
- }
332
- });
333
- for (const annotation of contentPart.annotations) {
334
- if (annotation.type === "file_citation") {
335
- const metadata = {
336
- type: annotation.type,
337
- index: annotation.index
338
- };
339
- parts.push({
340
- type: "source",
341
- sourceType: "document",
342
- id: yield* idGenerator.generateId(),
343
- mediaType: "text/plain",
344
- title: annotation.filename ?? "Untitled Document",
345
- metadata: {
346
- openai: metadata
347
- }
348
- });
349
- }
350
- if (annotation.type === "url_citation") {
351
- const metadata = {
352
- type: annotation.type,
353
- startIndex: annotation.start_index,
354
- endIndex: annotation.end_index
355
- };
356
- parts.push({
357
- type: "source",
358
- sourceType: "url",
359
- id: yield* idGenerator.generateId(),
360
- url: annotation.url,
361
- title: annotation.title,
362
- metadata: {
363
- openai: metadata
364
- }
365
- });
366
- }
367
- }
368
- break;
369
- }
370
- case "refusal":
371
- {
372
- parts.push({
373
- type: "text",
374
- text: "",
375
- metadata: {
376
- openai: {
377
- refusal: contentPart.refusal
378
- }
379
- }
380
- });
381
- break;
382
- }
383
- }
384
- }
385
- break;
386
- }
387
- case "function_call":
388
- {
389
- hasToolCalls = true;
390
- const toolName = part.name;
391
- const toolParams = part.arguments;
392
- const params = yield* Effect.try({
393
- try: () => Tool.unsafeSecureJsonParse(toolParams),
394
- catch: cause => new AiError.MalformedOutput({
395
- module: "OpenAiLanguageModel",
396
- method: "makeResponse",
397
- description: "Failed to securely parse tool call parameters " + `for tool '${toolName}':\nParameters: ${toolParams}`,
398
- cause
399
- })
400
- });
401
- parts.push({
402
- type: "tool-call",
403
- id: part.call_id,
404
- name: toolName,
405
- params,
406
- metadata: {
407
- openai: {
408
- itemId: part.id
409
- }
410
- }
411
- });
412
- break;
413
- }
414
- case "code_interpreter_call":
415
- {
416
- parts.push({
417
- type: "tool-call",
418
- id: part.id,
419
- name: "OpenAiCodeInterpreter",
420
- params: {
421
- code: part.code,
422
- container_id: part.container_id
423
- },
424
- providerName: "code_interpreter",
425
- providerExecuted: true
426
- });
427
- parts.push({
428
- type: "tool-result",
429
- id: part.id,
430
- name: "OpenAiCodeInterpreter",
431
- isFailure: false,
432
- result: part.outputs,
433
- providerName: "code_interpreter",
434
- providerExecuted: true
435
- });
436
- break;
437
- }
438
- case "file_search_call":
439
- {
440
- parts.push({
441
- type: "tool-call",
442
- id: part.id,
443
- name: "OpenAiFileSearch",
444
- params: {},
445
- providerName: "file_search",
446
- providerExecuted: true
447
- });
448
- parts.push({
449
- type: "tool-result",
450
- id: part.id,
451
- name: "OpenAiFileSearch",
452
- isFailure: false,
453
- result: {
454
- status: part.status,
455
- queries: part.queries,
456
- ...(part.results && {
457
- results: part.results
458
- })
459
- },
460
- providerName: "file_search",
461
- providerExecuted: true
462
- });
463
- break;
464
- }
465
- case "web_search_call":
466
- {
467
- parts.push({
468
- type: "tool-call",
469
- id: part.id,
470
- name: webSearchTool?.name ?? "OpenAiWebSearch",
471
- params: {
472
- action: part.action
473
- },
474
- providerName: webSearchTool?.providerName ?? "web_search",
475
- providerExecuted: true
476
- });
477
- parts.push({
478
- type: "tool-result",
479
- id: part.id,
480
- name: webSearchTool?.name ?? "OpenAiWebSearch",
481
- isFailure: false,
482
- result: {
483
- status: part.status
484
- },
485
- providerName: webSearchTool?.providerName ?? "web_search",
486
- providerExecuted: true
487
- });
488
- break;
489
- }
490
- // TODO(Max): support computer use
491
- // case "computer_call": {
492
- // parts.push({
493
- // type: "tool-call",
494
- // id: part.id,
495
- // name: "OpenAiComputerUse",
496
- // params: { action: part.action },
497
- // providerName: webSearchTool?.providerName ?? "web_search",
498
- // providerExecuted: true
499
- // })
500
- //
501
- // parts.push({
502
- // type: "tool-result",
503
- // id: part.id,
504
- // name: webSearchTool?.name ?? "OpenAiWebSearch",
505
- // result: { status: part.status },
506
- // providerName: webSearchTool?.providerName ?? "web_search",
507
- // providerExecuted: true
508
- // })
509
- // break
510
- // }
511
- case "reasoning":
512
- {
513
- // If there are no summary parts, we have to add an empty one to
514
- // propagate the part identifier
515
- if (part.summary.length === 0) {
516
- parts.push({
517
- type: "reasoning",
518
- text: "",
519
- metadata: {
520
- openai: {
521
- itemId: part.id
522
- }
523
- }
524
- });
525
- } else {
526
- for (const summary of part.summary) {
527
- const metadata = {
528
- itemId: part.id,
529
- encryptedContent: part.encrypted_content ?? undefined
530
- };
531
- parts.push({
532
- type: "reasoning",
533
- text: summary.text,
534
- metadata: {
535
- openai: metadata
536
- }
537
- });
538
- }
539
- }
540
- break;
541
- }
542
- }
543
- }
544
- const finishReason = InternalUtilities.resolveFinishReason(response.incomplete_details?.reason, hasToolCalls);
545
- const metadata = {
546
- serviceTier: response.service_tier
547
- };
548
- parts.push({
549
- type: "finish",
550
- reason: finishReason,
551
- usage: {
552
- inputTokens: response.usage?.input_tokens,
553
- outputTokens: response.usage?.output_tokens,
554
- totalTokens: (response.usage?.input_tokens ?? 0) + (response.usage?.output_tokens ?? 0),
555
- reasoningTokens: response.usage?.output_tokens_details?.reasoning_tokens,
556
- cachedInputTokens: response.usage?.input_tokens_details?.cached_tokens
557
- },
558
- metadata: {
559
- openai: metadata
560
- }
561
- });
562
- return parts;
563
- });
564
- const makeStreamResponse = /*#__PURE__*/Effect.fnUntraced(function* (stream, options) {
565
- const idGenerator = yield* IdGenerator.IdGenerator;
566
- let hasToolCalls = false;
567
- const activeReasoning = {};
568
- const activeToolCalls = {};
569
- const webSearchTool = options.tools.find(tool => Tool.isProviderDefined(tool) && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview"));
570
- return stream.pipe(Stream.mapEffect(Effect.fnUntraced(function* (event) {
571
- const parts = [];
572
- switch (event.type) {
573
- case "response.created":
574
- {
575
- const createdAt = new Date(event.response.created_at * 1000);
576
- parts.push({
577
- type: "response-metadata",
578
- id: event.response.id,
579
- modelId: event.response.model,
580
- timestamp: DateTime.formatIso(DateTime.unsafeFromDate(createdAt))
581
- });
582
- break;
583
- }
584
- case "error":
585
- {
586
- parts.push({
587
- type: "error",
588
- error: event
589
- });
590
- break;
591
- }
592
- case "response.completed":
593
- case "response.incomplete":
594
- case "response.failed":
595
- {
596
- parts.push({
597
- type: "finish",
598
- reason: InternalUtilities.resolveFinishReason(event.response.incomplete_details?.reason, hasToolCalls),
599
- usage: {
600
- inputTokens: event.response.usage?.input_tokens,
601
- outputTokens: event.response.usage?.output_tokens,
602
- totalTokens: (event.response.usage?.input_tokens ?? 0) + (event.response.usage?.output_tokens ?? 0),
603
- reasoningTokens: event.response.usage?.output_tokens_details?.reasoning_tokens,
604
- cachedInputTokens: event.response.usage?.input_tokens_details?.cached_tokens
605
- },
606
- metadata: {
607
- openai: {
608
- serviceTier: event.response.service_tier
609
- }
610
- }
611
- });
612
- break;
613
- }
614
- case "response.output_item.added":
615
- {
616
- switch (event.item.type) {
617
- case "computer_call":
618
- {
619
- // TODO(Max): support computer use
620
- break;
621
- }
622
- case "file_search_call":
623
- {
624
- activeToolCalls[event.output_index] = {
625
- id: event.item.id,
626
- name: "OpenAiFileSearch"
627
- };
628
- parts.push({
629
- type: "tool-params-start",
630
- id: event.item.id,
631
- name: "OpenAiFileSearch",
632
- providerName: "file_search",
633
- providerExecuted: true
634
- });
635
- break;
636
- }
637
- case "function_call":
638
- {
639
- activeToolCalls[event.output_index] = {
640
- id: event.item.call_id,
641
- name: event.item.name
642
- };
643
- parts.push({
644
- type: "tool-params-start",
645
- id: event.item.call_id,
646
- name: event.item.name
647
- });
648
- break;
649
- }
650
- case "message":
651
- {
652
- parts.push({
653
- type: "text-start",
654
- id: event.item.id,
655
- metadata: {
656
- openai: {
657
- itemId: event.item.id
658
- }
659
- }
660
- });
661
- break;
662
- }
663
- case "reasoning":
664
- {
665
- activeReasoning[event.item.id] = {
666
- summaryParts: [0],
667
- encryptedContent: event.item.encrypted_content
668
- };
669
- parts.push({
670
- type: "reasoning-start",
671
- id: `${event.item.id}:0`,
672
- metadata: {
673
- openai: {
674
- itemId: event.item.id,
675
- encryptedContent: event.item.encrypted_content
676
- }
677
- }
678
- });
679
- break;
680
- }
681
- case "web_search_call":
682
- {
683
- activeToolCalls[event.output_index] = {
684
- id: event.item.id,
685
- name: webSearchTool?.name ?? "OpenAiWebSearch"
686
- };
687
- parts.push({
688
- type: "tool-params-start",
689
- id: event.item.id,
690
- name: webSearchTool?.name ?? "OpenAiWebSearch",
691
- providerName: webSearchTool?.providerName ?? "web_search",
692
- providerExecuted: true
693
- });
694
- break;
695
- }
696
- }
697
- break;
698
- }
699
- case "response.output_item.done":
700
- {
701
- switch (event.item.type) {
702
- case "code_interpreter_call":
703
- {
704
- parts.push({
705
- type: "tool-call",
706
- id: event.item.id,
707
- name: "OpenAiCodeInterpreter",
708
- params: {
709
- code: event.item.code,
710
- container_id: event.item.container_id
711
- },
712
- providerName: "code_interpreter",
713
- providerExecuted: true
714
- });
715
- parts.push({
716
- type: "tool-result",
717
- id: event.item.id,
718
- name: "OpenAiCodeInterpreter",
719
- isFailure: false,
720
- result: {
721
- outputs: event.item.outputs
722
- },
723
- providerName: "code_interpreter",
724
- providerExecuted: true
725
- });
726
- break;
727
- }
728
- // TODO(Max): support computer use
729
- case "computer_call":
730
- {
731
- break;
732
- }
733
- case "file_search_call":
734
- {
735
- delete activeToolCalls[event.output_index];
736
- parts.push({
737
- type: "tool-params-end",
738
- id: event.item.id
739
- });
740
- parts.push({
741
- type: "tool-call",
742
- id: event.item.id,
743
- name: "OpenAiFileSearch",
744
- params: {},
745
- providerName: "file_search",
746
- providerExecuted: true
747
- });
748
- parts.push({
749
- type: "tool-result",
750
- id: event.item.id,
751
- name: "OpenAiFileSearch",
752
- isFailure: false,
753
- result: {
754
- status: event.item.status,
755
- queries: event.item.queries,
756
- ...(event.item.results && {
757
- results: event.item.results
758
- })
759
- },
760
- providerName: "file_search",
761
- providerExecuted: true
762
- });
763
- break;
764
- }
765
- case "function_call":
766
- {
767
- hasToolCalls = true;
768
- const toolName = event.item.name;
769
- const toolParams = event.item.arguments;
770
- const params = yield* Effect.try({
771
- try: () => Tool.unsafeSecureJsonParse(toolParams),
772
- catch: cause => new AiError.MalformedOutput({
773
- module: "OpenAiLanguageModel",
774
- method: "makeStreamResponse",
775
- description: "Failed to securely parse tool call parameters " + `for tool '${toolName}':\nParameters: ${toolParams}`,
776
- cause
777
- })
778
- });
779
- parts.push({
780
- type: "tool-params-end",
781
- id: event.item.call_id
782
- });
783
- parts.push({
784
- type: "tool-call",
785
- id: event.item.call_id,
786
- name: toolName,
787
- params,
788
- metadata: {
789
- openai: {
790
- itemId: event.item.id
791
- }
792
- }
793
- });
794
- delete activeToolCalls[event.output_index];
795
- break;
796
- }
797
- case "message":
798
- {
799
- parts.push({
800
- type: "text-end",
801
- id: event.item.id
802
- });
803
- break;
804
- }
805
- case "reasoning":
806
- {
807
- const reasoningPart = activeReasoning[event.item.id];
808
- for (const summaryIndex of reasoningPart.summaryParts) {
809
- parts.push({
810
- type: "reasoning-end",
811
- id: `${event.item.id}:${summaryIndex}`,
812
- metadata: {
813
- openai: {
814
- itemId: event.item.id,
815
- encryptedContent: event.item.encrypted_content
816
- }
817
- }
818
- });
819
- }
820
- delete activeReasoning[event.item.id];
821
- break;
822
- }
823
- case "web_search_call":
824
- {
825
- delete activeToolCalls[event.output_index];
826
- parts.push({
827
- type: "tool-params-end",
828
- id: event.item.id
829
- });
830
- parts.push({
831
- type: "tool-call",
832
- id: event.item.id,
833
- name: "OpenAiWebSearch",
834
- params: {
835
- action: event.item.action
836
- },
837
- providerName: "web_search",
838
- providerExecuted: true
839
- });
840
- parts.push({
841
- type: "tool-result",
842
- id: event.item.id,
843
- name: "OpenAiWebSearch",
844
- isFailure: false,
845
- result: {
846
- status: event.item.status
847
- },
848
- providerName: "web_search",
849
- providerExecuted: true
850
- });
851
- break;
852
- }
853
- }
854
- break;
855
- }
856
- case "response.output_text.delta":
857
- {
858
- parts.push({
859
- type: "text-delta",
860
- id: event.item_id,
861
- delta: event.delta
862
- });
863
- break;
864
- }
865
- case "response.output_text.annotation.added":
866
- {
867
- if (event.annotation.type === "file_citation") {
868
- parts.push({
869
- type: "source",
870
- sourceType: "document",
871
- id: yield* idGenerator.generateId(),
872
- mediaType: "text/plain",
873
- title: event.annotation.filename ?? "Untitled Document",
874
- fileName: event.annotation.filename ?? event.annotation.file_id
875
- });
876
- }
877
- if (event.annotation.type === "url_citation") {
878
- parts.push({
879
- type: "source",
880
- sourceType: "url",
881
- id: yield* idGenerator.generateId(),
882
- url: event.annotation.url,
883
- title: event.annotation.title
884
- });
885
- }
886
- break;
887
- }
888
- case "response.function_call_arguments.delta":
889
- {
890
- const toolCallPart = activeToolCalls[event.output_index];
891
- if (Predicate.isNotUndefined(toolCallPart)) {
892
- parts.push({
893
- type: "tool-params-delta",
894
- id: toolCallPart.id,
895
- delta: event.delta
896
- });
897
- }
898
- break;
899
- }
900
- case "response.reasoning_summary_part.added":
901
- {
902
- // The first reasoning start is pushed in the `response.output_item.added` block
903
- if (event.summary_index > 0) {
904
- const reasoningPart = activeReasoning[event.item_id];
905
- if (Predicate.isNotUndefined(reasoningPart)) {
906
- reasoningPart.summaryParts.push(event.summary_index);
907
- }
908
- parts.push({
909
- type: "reasoning-start",
910
- id: `${event.item_id}:${event.summary_index}`,
911
- metadata: {
912
- openai: {
913
- itemId: event.item_id,
914
- encryptedContent: reasoningPart?.encryptedContent
915
- }
916
- }
917
- });
918
- }
919
- break;
920
- }
921
- case "response.reasoning_summary_text.delta":
922
- {
923
- parts.push({
924
- type: "reasoning-delta",
925
- id: `${event.item_id}:${event.summary_index}`,
926
- delta: event.delta,
927
- metadata: {
928
- openai: {
929
- itemId: event.item_id
930
- }
931
- }
932
- });
933
- break;
934
- }
935
- }
936
- return parts;
937
- })), Stream.flattenIterables);
938
- });
939
- // =============================================================================
940
- // Telemetry
941
- // =============================================================================
942
- const annotateRequest = (span, request) => {
943
- addGenAIAnnotations(span, {
944
- system: "openai",
945
- operation: {
946
- name: "chat"
947
- },
948
- request: {
949
- model: request.model,
950
- temperature: request.temperature,
951
- topP: request.top_p,
952
- maxTokens: request.max_output_tokens
953
- },
954
- openai: {
955
- request: {
956
- responseFormat: request.text?.format?.type,
957
- serviceTier: request.service_tier
958
- }
959
- }
960
- });
961
- };
962
- const annotateResponse = (span, response) => {
963
- const finishReason = response.incomplete_details?.reason;
964
- addGenAIAnnotations(span, {
965
- response: {
966
- id: response.id,
967
- model: response.model,
968
- finishReasons: Predicate.isNotUndefined(finishReason) ? [finishReason] : undefined
969
- },
970
- usage: {
971
- inputTokens: response.usage?.input_tokens,
972
- outputTokens: response.usage?.output_tokens
973
- },
974
- openai: {
975
- response: {
976
- serviceTier: response.service_tier
977
- }
978
- }
979
- });
980
- };
981
- const annotateStreamResponse = (span, part) => {
982
- if (part.type === "response-metadata") {
983
- addGenAIAnnotations(span, {
984
- response: {
985
- id: part.id,
986
- model: part.modelId
987
- }
988
- });
989
- }
990
- if (part.type === "finish") {
991
- const serviceTier = part.metadata?.openai?.serviceTier;
992
- addGenAIAnnotations(span, {
993
- response: {
994
- finishReasons: [part.reason]
995
- },
996
- usage: {
997
- inputTokens: part.usage.inputTokens,
998
- outputTokens: part.usage.outputTokens
999
- },
1000
- openai: {
1001
- response: {
1002
- serviceTier
1003
- }
1004
- }
1005
- });
1006
- }
1007
- };
1008
- const prepareTools = /*#__PURE__*/Effect.fnUntraced(function* (options) {
1009
- // Return immediately if no tools are in the toolkit
1010
- if (options.tools.length === 0) {
1011
- return {
1012
- tools: undefined,
1013
- toolChoice: undefined
1014
- };
1015
- }
1016
- const tools = [];
1017
- let toolChoice = undefined;
1018
- // Filter the incoming tools down to the set of allowed tools as indicated by
1019
- // the tool choice. This must be done here given that there is no tool name
1020
- // in OpenAI's provider-defined tools, so there would be no way to perform
1021
- // this filter otherwise
1022
- let allowedTools = options.tools;
1023
- if (typeof options.toolChoice === "object" && "oneOf" in options.toolChoice) {
1024
- const allowedToolNames = new Set(options.toolChoice.oneOf);
1025
- allowedTools = options.tools.filter(tool => allowedToolNames.has(tool.name));
1026
- toolChoice = options.toolChoice.mode === "required" ? "required" : "auto";
1027
- }
1028
- // Convert the tools in the toolkit to the provider-defined format
1029
- for (const tool of allowedTools) {
1030
- if (Tool.isUserDefined(tool)) {
1031
- tools.push({
1032
- type: "function",
1033
- name: tool.name,
1034
- description: Tool.getDescription(tool),
1035
- parameters: Tool.getJsonSchema(tool),
1036
- strict: true
1037
- });
1038
- }
1039
- if (Tool.isProviderDefined(tool)) {
1040
- switch (tool.id) {
1041
- case "openai.code_interpreter":
1042
- {
1043
- tools.push({
1044
- ...tool.args,
1045
- type: "code_interpreter"
1046
- });
1047
- break;
1048
- }
1049
- case "openai.file_search":
1050
- {
1051
- tools.push({
1052
- ...tool.args,
1053
- type: "file_search"
1054
- });
1055
- break;
1056
- }
1057
- case "openai.web_search":
1058
- {
1059
- tools.push({
1060
- ...tool.args,
1061
- type: "web_search"
1062
- });
1063
- break;
1064
- }
1065
- case "openai.web_search_preview":
1066
- {
1067
- tools.push({
1068
- ...tool.args,
1069
- type: "web_search_preview"
1070
- });
1071
- break;
1072
- }
1073
- default:
1074
- {
1075
- return yield* new AiError.MalformedInput({
1076
- module: "AnthropicLanguageModel",
1077
- method: "prepareTools",
1078
- description: `Received request to call unknown provider-defined tool '${tool.name}'`
1079
- });
1080
- }
1081
- }
1082
- }
1083
- }
1084
- if (options.toolChoice === "auto" || options.toolChoice === "none" || options.toolChoice === "required") {
1085
- toolChoice = options.toolChoice;
1086
- }
1087
- if (typeof options.toolChoice === "object" && "tool" in options.toolChoice) {
1088
- toolChoice = Predicate.isUndefined(OpenAiTool.getProviderDefinedToolName(options.toolChoice.tool)) ? {
1089
- type: "function",
1090
- name: options.toolChoice.tool
1091
- } : {
1092
- type: options.toolChoice.tool
1093
- };
1094
- }
1095
- return {
1096
- tools,
1097
- toolChoice
1098
- };
1099
- });
1100
- // =============================================================================
1101
- // Utilities
1102
- // =============================================================================
1103
- const isFileId = (data, config) => Predicate.isNotUndefined(config.fileIdPrefixes) && config.fileIdPrefixes.some(prefix => data.startsWith(prefix));
1104
- const getItemId = part => part.options.openai?.itemId;
1105
- const getImageDetail = part => part.options.openai?.imageDetail ?? "auto";
1106
- const prepareInclude = (options, config) => {
1107
- const include = new Set(config.include ?? []);
1108
- const codeInterpreterTool = options.tools.find(tool => Tool.isProviderDefined(tool) && tool.id === "openai.code_interpreter");
1109
- if (Predicate.isNotUndefined(codeInterpreterTool)) {
1110
- include.add("code_interpreter_call.outputs");
1111
- }
1112
- const webSearchTool = options.tools.find(tool => Tool.isProviderDefined(tool) && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview"));
1113
- if (Predicate.isNotUndefined(webSearchTool)) {
1114
- include.add("web_search_call.action.sources");
1115
- }
1116
- return Array.from(include);
1117
- };
1118
- const prepareResponseFormat = options => {
1119
- if (options.responseFormat.type === "json") {
1120
- const name = options.responseFormat.objectName;
1121
- const schema = options.responseFormat.schema;
1122
- return {
1123
- type: "json_schema",
1124
- name,
1125
- description: Tool.getDescriptionFromSchemaAst(schema.ast) ?? "Response with a JSON object",
1126
- schema: Tool.getJsonSchemaFromSchemaAst(schema.ast),
1127
- strict: true
1128
- };
1129
- }
1130
- return {
1131
- type: "text"
1132
- };
1133
- };
1134
- //# sourceMappingURL=OpenAiLanguageModel.js.map