@effect/ai-openai-compat 4.0.0-beta.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/LICENSE +21 -0
  2. package/dist/OpenAiClient.d.ts +739 -0
  3. package/dist/OpenAiClient.d.ts.map +1 -0
  4. package/dist/OpenAiClient.js +170 -0
  5. package/dist/OpenAiClient.js.map +1 -0
  6. package/dist/OpenAiConfig.d.ts +47 -0
  7. package/dist/OpenAiConfig.d.ts.map +1 -0
  8. package/dist/OpenAiConfig.js +25 -0
  9. package/dist/OpenAiConfig.js.map +1 -0
  10. package/dist/OpenAiError.d.ts +93 -0
  11. package/dist/OpenAiError.d.ts.map +1 -0
  12. package/dist/OpenAiError.js +5 -0
  13. package/dist/OpenAiError.js.map +1 -0
  14. package/dist/OpenAiLanguageModel.d.ts +285 -0
  15. package/dist/OpenAiLanguageModel.d.ts.map +1 -0
  16. package/dist/OpenAiLanguageModel.js +1223 -0
  17. package/dist/OpenAiLanguageModel.js.map +1 -0
  18. package/dist/OpenAiTelemetry.d.ts +120 -0
  19. package/dist/OpenAiTelemetry.d.ts.map +1 -0
  20. package/dist/OpenAiTelemetry.js +35 -0
  21. package/dist/OpenAiTelemetry.js.map +1 -0
  22. package/dist/index.d.ts +35 -0
  23. package/dist/index.d.ts.map +1 -0
  24. package/dist/index.js +36 -0
  25. package/dist/index.js.map +1 -0
  26. package/dist/internal/errors.d.ts +2 -0
  27. package/dist/internal/errors.d.ts.map +1 -0
  28. package/dist/internal/errors.js +286 -0
  29. package/dist/internal/errors.js.map +1 -0
  30. package/dist/internal/utilities.d.ts +2 -0
  31. package/dist/internal/utilities.d.ts.map +1 -0
  32. package/dist/internal/utilities.js +25 -0
  33. package/dist/internal/utilities.js.map +1 -0
  34. package/package.json +62 -0
  35. package/src/OpenAiClient.ts +998 -0
  36. package/src/OpenAiConfig.ts +64 -0
  37. package/src/OpenAiError.ts +102 -0
  38. package/src/OpenAiLanguageModel.ts +1638 -0
  39. package/src/OpenAiTelemetry.ts +159 -0
  40. package/src/index.ts +41 -0
  41. package/src/internal/errors.ts +327 -0
  42. package/src/internal/utilities.ts +33 -0
@@ -0,0 +1,1223 @@
1
+ /**
2
+ * OpenAI Language Model implementation.
3
+ *
4
+ * Provides a LanguageModel implementation for OpenAI's chat completions API,
5
+ * supporting text generation, structured output, tool calling, and streaming.
6
+ *
7
+ * @since 1.0.0
8
+ */
9
+ import * as DateTime from "effect/DateTime";
10
+ import * as Effect from "effect/Effect";
11
+ import * as Base64 from "effect/encoding/Base64";
12
+ import { dual } from "effect/Function";
13
+ import * as Layer from "effect/Layer";
14
+ import * as Predicate from "effect/Predicate";
15
+ import * as Redactable from "effect/Redactable";
16
+ import * as AST from "effect/SchemaAST";
17
+ import * as ServiceMap from "effect/ServiceMap";
18
+ import * as Stream from "effect/Stream";
19
+ import * as AiError from "effect/unstable/ai/AiError";
20
+ import * as LanguageModel from "effect/unstable/ai/LanguageModel";
21
+ import * as AiModel from "effect/unstable/ai/Model";
22
+ import { toCodecOpenAI } from "effect/unstable/ai/OpenAiStructuredOutput";
23
+ import * as Tool from "effect/unstable/ai/Tool";
24
+ import * as InternalUtilities from "./internal/utilities.js";
25
+ import { OpenAiClient } from "./OpenAiClient.js";
26
+ import { addGenAIAnnotations } from "./OpenAiTelemetry.js";
27
+ // =============================================================================
28
+ // Configuration
29
+ // =============================================================================
30
+ /**
31
+ * Service definition for OpenAI language model configuration.
32
+ *
33
+ * @since 1.0.0
34
+ * @category context
35
+ */
36
+ export class Config extends /*#__PURE__*/ServiceMap.Service()("@effect/ai-openai-compat/OpenAiLanguageModel/Config") {}
37
+ // =============================================================================
38
+ // Language Model
39
+ // =============================================================================
40
+ /**
41
+ * @since 1.0.0
42
+ * @category constructors
43
+ */
44
+ export const model = (model, config) => AiModel.make("openai", layer({
45
+ model,
46
+ config
47
+ }));
48
+ // TODO
49
+ // /**
50
+ // * @since 1.0.0
51
+ // * @category constructors
52
+ // */
53
+ // export const modelWithTokenizer = (
54
+ // model: string,
55
+ // config?: Omit<typeof Config.Service, "model">
56
+ // ): AiModel.Model<"openai", LanguageModel.LanguageModel | Tokenizer.Tokenizer, OpenAiClient> =>
57
+ // AiModel.make("openai", layerWithTokenizer({ model, config }))
58
+ /**
59
+ * Creates an OpenAI language model service.
60
+ *
61
+ * @since 1.0.0
62
+ * @category constructors
63
+ */
64
+ export const make = /*#__PURE__*/Effect.fnUntraced(function* ({
65
+ model,
66
+ config: providerConfig
67
+ }) {
68
+ const client = yield* OpenAiClient;
69
+ const makeConfig = Effect.gen(function* () {
70
+ const services = yield* Effect.services();
71
+ return {
72
+ model,
73
+ ...providerConfig,
74
+ ...services.mapUnsafe.get(Config.key)
75
+ };
76
+ });
77
+ const makeRequest = Effect.fnUntraced(function* ({
78
+ config,
79
+ options,
80
+ toolNameMapper
81
+ }) {
82
+ const include = new Set();
83
+ const capabilities = getModelCapabilities(config.model);
84
+ const messages = yield* prepareMessages({
85
+ config,
86
+ options,
87
+ capabilities,
88
+ include,
89
+ toolNameMapper
90
+ });
91
+ const {
92
+ toolChoice,
93
+ tools
94
+ } = yield* prepareTools({
95
+ config,
96
+ options,
97
+ toolNameMapper
98
+ });
99
+ const responseFormat = yield* prepareResponseFormat({
100
+ config,
101
+ options
102
+ });
103
+ const request = {
104
+ ...config,
105
+ input: messages,
106
+ include: include.size > 0 ? Array.from(include) : null,
107
+ text: {
108
+ verbosity: config.text?.verbosity ?? null,
109
+ format: responseFormat
110
+ },
111
+ ...(tools !== undefined ? {
112
+ tools
113
+ } : undefined),
114
+ ...(toolChoice !== undefined ? {
115
+ tool_choice: toolChoice
116
+ } : undefined)
117
+ };
118
+ return toChatCompletionsRequest(request);
119
+ });
120
+ return yield* LanguageModel.make({
121
+ generateText: Effect.fnUntraced(function* (options) {
122
+ const config = yield* makeConfig;
123
+ const toolNameMapper = new Tool.NameMapper(options.tools);
124
+ const request = yield* makeRequest({
125
+ config,
126
+ options,
127
+ toolNameMapper
128
+ });
129
+ annotateRequest(options.span, request);
130
+ const [rawResponse, response] = yield* client.createResponse(request);
131
+ annotateResponse(options.span, rawResponse);
132
+ return yield* makeResponse({
133
+ rawResponse,
134
+ response,
135
+ toolNameMapper
136
+ });
137
+ }),
138
+ streamText: Effect.fnUntraced(function* (options) {
139
+ const config = yield* makeConfig;
140
+ const toolNameMapper = new Tool.NameMapper(options.tools);
141
+ const request = yield* makeRequest({
142
+ config,
143
+ options,
144
+ toolNameMapper
145
+ });
146
+ annotateRequest(options.span, request);
147
+ const [response, stream] = yield* client.createResponseStream(request);
148
+ return yield* makeStreamResponse({
149
+ stream,
150
+ response,
151
+ toolNameMapper
152
+ });
153
+ }, (effect, options) => effect.pipe(Stream.unwrap, Stream.map(response => {
154
+ annotateStreamResponse(options.span, response);
155
+ return response;
156
+ })))
157
+ }).pipe(Effect.provideService(LanguageModel.CurrentCodecTransformer, toCodecOpenAI));
158
+ });
159
+ /**
160
+ * Creates a layer for the OpenAI language model.
161
+ *
162
+ * @since 1.0.0
163
+ * @category layers
164
+ */
165
+ export const layer = options => Layer.effect(LanguageModel.LanguageModel, make(options));
166
+ /**
167
+ * Provides config overrides for OpenAI language model operations.
168
+ *
169
+ * @since 1.0.0
170
+ * @category configuration
171
+ */
172
+ export const withConfigOverride = /*#__PURE__*/dual(2, (self, overrides) => Effect.flatMap(Effect.serviceOption(Config), config => Effect.provideService(self, Config, {
173
+ ...(config._tag === "Some" ? config.value : {}),
174
+ ...overrides
175
+ })));
176
+ // =============================================================================
177
+ // Prompt Conversion
178
+ // =============================================================================
179
+ const getSystemMessageMode = model => model.startsWith("o") || model.startsWith("gpt-5") || model.startsWith("codex-") || model.startsWith("computer-use") ? "developer" : "system";
180
+ const prepareMessages = /*#__PURE__*/Effect.fnUntraced(function* ({
181
+ config,
182
+ options,
183
+ capabilities,
184
+ include,
185
+ toolNameMapper
186
+ }) {
187
+ const hasConversation = Predicate.isNotNullish(config.conversation);
188
+ // Handle Included Features
189
+ if (config.top_logprobs !== undefined) {
190
+ include.add("message.output_text.logprobs");
191
+ }
192
+ if (config.store === false && capabilities.isReasoningModel) {
193
+ include.add("reasoning.encrypted_content");
194
+ }
195
+ const messages = [];
196
+ for (const message of options.prompt.content) {
197
+ switch (message.role) {
198
+ case "system":
199
+ {
200
+ messages.push({
201
+ role: getSystemMessageMode(config.model),
202
+ content: message.content
203
+ });
204
+ break;
205
+ }
206
+ case "user":
207
+ {
208
+ const content = [];
209
+ for (let index = 0; index < message.content.length; index++) {
210
+ const part = message.content[index];
211
+ switch (part.type) {
212
+ case "text":
213
+ {
214
+ content.push({
215
+ type: "input_text",
216
+ text: part.text
217
+ });
218
+ break;
219
+ }
220
+ case "file":
221
+ {
222
+ if (part.mediaType.startsWith("image/")) {
223
+ const detail = getImageDetail(part);
224
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
225
+ if (typeof part.data === "string" && isFileId(part.data, config)) {
226
+ content.push({
227
+ type: "input_image",
228
+ file_id: part.data,
229
+ detail
230
+ });
231
+ }
232
+ if (part.data instanceof URL) {
233
+ content.push({
234
+ type: "input_image",
235
+ image_url: part.data.toString(),
236
+ detail
237
+ });
238
+ }
239
+ if (part.data instanceof Uint8Array) {
240
+ const base64 = Base64.encode(part.data);
241
+ const imageUrl = `data:${mediaType};base64,${base64}`;
242
+ content.push({
243
+ type: "input_image",
244
+ image_url: imageUrl,
245
+ detail
246
+ });
247
+ }
248
+ } else if (part.mediaType === "application/pdf") {
249
+ if (typeof part.data === "string" && isFileId(part.data, config)) {
250
+ content.push({
251
+ type: "input_file",
252
+ file_id: part.data
253
+ });
254
+ }
255
+ if (part.data instanceof URL) {
256
+ content.push({
257
+ type: "input_file",
258
+ file_url: part.data.toString()
259
+ });
260
+ }
261
+ if (part.data instanceof Uint8Array) {
262
+ const base64 = Base64.encode(part.data);
263
+ const fileName = part.fileName ?? `part-${index}.pdf`;
264
+ const fileData = `data:application/pdf;base64,${base64}`;
265
+ content.push({
266
+ type: "input_file",
267
+ filename: fileName,
268
+ file_data: fileData
269
+ });
270
+ }
271
+ } else {
272
+ return yield* AiError.make({
273
+ module: "OpenAiLanguageModel",
274
+ method: "prepareMessages",
275
+ reason: new AiError.InvalidRequestError({
276
+ description: `Detected unsupported media type for file: '${part.mediaType}'`
277
+ })
278
+ });
279
+ }
280
+ }
281
+ }
282
+ }
283
+ messages.push({
284
+ role: "user",
285
+ content
286
+ });
287
+ break;
288
+ }
289
+ case "assistant":
290
+ {
291
+ const reasoningMessages = {};
292
+ for (const part of message.content) {
293
+ switch (part.type) {
294
+ case "text":
295
+ {
296
+ const id = getItemId(part);
297
+ // When in conversation mode, skip items that already exist in the
298
+ // conversation context to avoid "Duplicate item found" errors
299
+ if (hasConversation && Predicate.isNotNull(id)) {
300
+ break;
301
+ }
302
+ if (config.store === true && Predicate.isNotNull(id)) {
303
+ messages.push({
304
+ type: "item_reference",
305
+ id
306
+ });
307
+ break;
308
+ }
309
+ messages.push({
310
+ id: id,
311
+ type: "message",
312
+ role: "assistant",
313
+ status: part.options.openai?.status ?? "completed",
314
+ content: [{
315
+ type: "output_text",
316
+ text: part.text,
317
+ annotations: part.options.openai?.annotations ?? [],
318
+ logprobs: []
319
+ }]
320
+ });
321
+ break;
322
+ }
323
+ case "reasoning":
324
+ {
325
+ const id = getItemId(part);
326
+ const encryptedContent = getEncryptedContent(part);
327
+ if (hasConversation && Predicate.isNotNull(id)) {
328
+ break;
329
+ }
330
+ if (Predicate.isNotNull(id)) {
331
+ const message = reasoningMessages[id];
332
+ if (config.store === true) {
333
+ // Use item references to refer to reasoning (single reference)
334
+ // when the first part is encountered
335
+ if (Predicate.isUndefined(message)) {
336
+ messages.push({
337
+ type: "item_reference",
338
+ id
339
+ });
340
+ // Store unused reasoning message to mark its id as used
341
+ reasoningMessages[id] = {
342
+ type: "reasoning",
343
+ id,
344
+ summary: []
345
+ };
346
+ }
347
+ } else {
348
+ const summaryParts = [];
349
+ if (part.text.length > 0) {
350
+ summaryParts.push({
351
+ type: "summary_text",
352
+ text: part.text
353
+ });
354
+ }
355
+ if (Predicate.isUndefined(message)) {
356
+ reasoningMessages[id] = {
357
+ type: "reasoning",
358
+ id,
359
+ summary: summaryParts,
360
+ encrypted_content: encryptedContent ?? null
361
+ };
362
+ messages.push(reasoningMessages[id]);
363
+ } else {
364
+ message.summary.push(...summaryParts);
365
+ // Update encrypted content to enable setting it in the
366
+ // last summary part
367
+ if (Predicate.isNotNull(encryptedContent)) {
368
+ message.encrypted_content = encryptedContent;
369
+ }
370
+ }
371
+ }
372
+ }
373
+ break;
374
+ }
375
+ case "tool-call":
376
+ {
377
+ const id = getItemId(part);
378
+ const status = getStatus(part);
379
+ if (hasConversation && Predicate.isNotNull(id)) {
380
+ break;
381
+ }
382
+ if (config.store && Predicate.isNotNull(id)) {
383
+ messages.push({
384
+ type: "item_reference",
385
+ id
386
+ });
387
+ break;
388
+ }
389
+ if (part.providerExecuted) {
390
+ break;
391
+ }
392
+ const toolName = toolNameMapper.getProviderName(part.name);
393
+ messages.push({
394
+ type: "function_call",
395
+ name: toolName,
396
+ call_id: part.id,
397
+ // @effect-diagnostics-next-line preferSchemaOverJson:off
398
+ arguments: JSON.stringify(part.params),
399
+ ...(Predicate.isNotNull(id) ? {
400
+ id
401
+ } : {}),
402
+ ...(Predicate.isNotNull(status) ? {
403
+ status
404
+ } : {})
405
+ });
406
+ break;
407
+ }
408
+ // Assistant tool-result parts are always provider executed
409
+ case "tool-result":
410
+ {
411
+ // Skip execution denied results - these have no corresponding
412
+ // item in OpenAI's store
413
+ if (Predicate.hasProperty(part.result, "type") && part.result.type === "execution-denied") {
414
+ break;
415
+ }
416
+ if (hasConversation) {
417
+ break;
418
+ }
419
+ if (config.store === true) {
420
+ const id = getItemId(part) ?? part.id;
421
+ messages.push({
422
+ type: "item_reference",
423
+ id
424
+ });
425
+ }
426
+ }
427
+ }
428
+ }
429
+ break;
430
+ }
431
+ case "tool":
432
+ {
433
+ for (const part of message.content) {
434
+ if (part.type === "tool-approval-response") {
435
+ continue;
436
+ }
437
+ const status = getStatus(part);
438
+ messages.push({
439
+ type: "function_call_output",
440
+ call_id: part.id,
441
+ // @effect-diagnostics-next-line preferSchemaOverJson:off
442
+ output: typeof part.result === "string" ? part.result : JSON.stringify(part.result),
443
+ ...(Predicate.isNotNull(status) ? {
444
+ status
445
+ } : {})
446
+ });
447
+ }
448
+ break;
449
+ }
450
+ }
451
+ }
452
+ return messages;
453
+ });
454
+ // =============================================================================
455
+ // HTTP Details
456
+ // =============================================================================
457
+ const buildHttpRequestDetails = request => ({
458
+ method: request.method,
459
+ url: request.url,
460
+ urlParams: Array.from(request.urlParams),
461
+ hash: request.hash,
462
+ headers: Redactable.redact(request.headers)
463
+ });
464
+ const buildHttpResponseDetails = response => ({
465
+ status: response.status,
466
+ headers: Redactable.redact(response.headers)
467
+ });
468
+ const makeResponse = /*#__PURE__*/Effect.fnUntraced(function* ({
469
+ rawResponse,
470
+ response,
471
+ toolNameMapper
472
+ }) {
473
+ let hasToolCalls = false;
474
+ const parts = [];
475
+ const createdAt = new Date(rawResponse.created * 1000);
476
+ parts.push({
477
+ type: "response-metadata",
478
+ id: rawResponse.id,
479
+ modelId: rawResponse.model,
480
+ timestamp: DateTime.formatIso(DateTime.fromDateUnsafe(createdAt)),
481
+ request: buildHttpRequestDetails(response.request)
482
+ });
483
+ const choice = rawResponse.choices[0];
484
+ const message = choice?.message;
485
+ if (message !== undefined) {
486
+ if (message.content !== undefined && Predicate.isNotNull(message.content) && message.content.length > 0) {
487
+ parts.push({
488
+ type: "text",
489
+ text: message.content
490
+ });
491
+ }
492
+ if (message.tool_calls !== undefined) {
493
+ for (const [index, toolCall] of message.tool_calls.entries()) {
494
+ const toolId = toolCall.id ?? `${rawResponse.id}_tool_${index}`;
495
+ const toolName = toolNameMapper.getCustomName(toolCall.function?.name ?? "unknown_tool");
496
+ const toolParams = toolCall.function?.arguments ?? "{}";
497
+ const params = yield* Effect.try({
498
+ try: () => Tool.unsafeSecureJsonParse(toolParams),
499
+ catch: cause => AiError.make({
500
+ module: "OpenAiLanguageModel",
501
+ method: "makeResponse",
502
+ reason: new AiError.ToolParameterValidationError({
503
+ toolName,
504
+ toolParams: {},
505
+ description: `Failed to securely JSON parse tool parameters: ${cause}`
506
+ })
507
+ })
508
+ });
509
+ hasToolCalls = true;
510
+ parts.push({
511
+ type: "tool-call",
512
+ id: toolId,
513
+ name: toolName,
514
+ params,
515
+ metadata: {
516
+ openai: {
517
+ ...makeItemIdMetadata(toolCall.id)
518
+ }
519
+ }
520
+ });
521
+ }
522
+ }
523
+ }
524
+ const finishReason = InternalUtilities.resolveFinishReason(choice?.finish_reason, hasToolCalls);
525
+ const serviceTier = normalizeServiceTier(rawResponse.service_tier);
526
+ parts.push({
527
+ type: "finish",
528
+ reason: finishReason,
529
+ usage: getUsage(rawResponse.usage),
530
+ response: buildHttpResponseDetails(response),
531
+ ...(serviceTier !== undefined && {
532
+ metadata: {
533
+ openai: {
534
+ serviceTier
535
+ }
536
+ }
537
+ })
538
+ });
539
+ return parts;
540
+ });
541
+ const makeStreamResponse = /*#__PURE__*/Effect.fnUntraced(function* ({
542
+ stream,
543
+ response,
544
+ toolNameMapper
545
+ }) {
546
+ let serviceTier = undefined;
547
+ let usage = undefined;
548
+ let finishReason = undefined;
549
+ let metadataEmitted = false;
550
+ let textStarted = false;
551
+ let textId = "";
552
+ let hasToolCalls = false;
553
+ const activeToolCalls = {};
554
+ return stream.pipe(Stream.mapEffect(Effect.fnUntraced(function* (event) {
555
+ const parts = [];
556
+ if (event === "[DONE]") {
557
+ if (textStarted) {
558
+ parts.push({
559
+ type: "text-end",
560
+ id: textId,
561
+ metadata: {
562
+ openai: {
563
+ ...makeItemIdMetadata(textId)
564
+ }
565
+ }
566
+ });
567
+ }
568
+ for (const toolCall of Object.values(activeToolCalls)) {
569
+ const toolParams = toolCall.arguments.length > 0 ? toolCall.arguments : "{}";
570
+ const params = yield* Effect.try({
571
+ try: () => Tool.unsafeSecureJsonParse(toolParams),
572
+ catch: cause => AiError.make({
573
+ module: "OpenAiLanguageModel",
574
+ method: "makeStreamResponse",
575
+ reason: new AiError.ToolParameterValidationError({
576
+ toolName: toolCall.name,
577
+ toolParams: {},
578
+ description: `Failed to securely JSON parse tool parameters: ${cause}`
579
+ })
580
+ })
581
+ });
582
+ parts.push({
583
+ type: "tool-params-end",
584
+ id: toolCall.id
585
+ });
586
+ parts.push({
587
+ type: "tool-call",
588
+ id: toolCall.id,
589
+ name: toolCall.name,
590
+ params,
591
+ metadata: {
592
+ openai: {
593
+ ...makeItemIdMetadata(toolCall.id)
594
+ }
595
+ }
596
+ });
597
+ hasToolCalls = true;
598
+ }
599
+ const normalizedServiceTier = normalizeServiceTier(serviceTier);
600
+ parts.push({
601
+ type: "finish",
602
+ reason: InternalUtilities.resolveFinishReason(finishReason, hasToolCalls),
603
+ usage: getUsage(usage),
604
+ response: buildHttpResponseDetails(response),
605
+ ...(normalizedServiceTier !== undefined ? {
606
+ metadata: {
607
+ openai: {
608
+ serviceTier: normalizedServiceTier
609
+ }
610
+ }
611
+ } : undefined)
612
+ });
613
+ return parts;
614
+ }
615
+ if (event.service_tier !== undefined) {
616
+ serviceTier = event.service_tier;
617
+ }
618
+ if (event.usage !== undefined && Predicate.isNotNull(event.usage)) {
619
+ usage = event.usage;
620
+ }
621
+ if (!metadataEmitted) {
622
+ metadataEmitted = true;
623
+ textId = `${event.id}_message`;
624
+ parts.push({
625
+ type: "response-metadata",
626
+ id: event.id,
627
+ modelId: event.model,
628
+ timestamp: DateTime.formatIso(DateTime.fromDateUnsafe(new Date(event.created * 1000))),
629
+ request: buildHttpRequestDetails(response.request)
630
+ });
631
+ }
632
+ const choice = event.choices[0];
633
+ if (Predicate.isUndefined(choice)) {
634
+ return parts;
635
+ }
636
+ if (choice.delta?.content !== undefined && Predicate.isNotNull(choice.delta.content)) {
637
+ if (!textStarted) {
638
+ textStarted = true;
639
+ parts.push({
640
+ type: "text-start",
641
+ id: textId,
642
+ metadata: {
643
+ openai: {
644
+ ...makeItemIdMetadata(textId)
645
+ }
646
+ }
647
+ });
648
+ }
649
+ parts.push({
650
+ type: "text-delta",
651
+ id: textId,
652
+ delta: choice.delta.content
653
+ });
654
+ }
655
+ if (choice.delta?.tool_calls !== undefined) {
656
+ hasToolCalls = hasToolCalls || choice.delta.tool_calls.length > 0;
657
+ choice.delta.tool_calls.forEach((deltaTool, indexInChunk) => {
658
+ const toolIndex = deltaTool.index ?? indexInChunk;
659
+ const toolId = deltaTool.id ?? `${event.id}_tool_${toolIndex}`;
660
+ const providerToolName = deltaTool.function?.name;
661
+ const toolName = toolNameMapper.getCustomName(providerToolName ?? "unknown_tool");
662
+ const argumentsDelta = deltaTool.function?.arguments ?? "";
663
+ const activeToolCall = activeToolCalls[toolIndex];
664
+ if (Predicate.isUndefined(activeToolCall)) {
665
+ activeToolCalls[toolIndex] = {
666
+ id: toolId,
667
+ name: toolName,
668
+ arguments: argumentsDelta
669
+ };
670
+ parts.push({
671
+ type: "tool-params-start",
672
+ id: toolId,
673
+ name: toolName
674
+ });
675
+ } else {
676
+ activeToolCall.name = toolName;
677
+ activeToolCall.arguments = `${activeToolCall.arguments}${argumentsDelta}`;
678
+ }
679
+ if (argumentsDelta.length > 0) {
680
+ parts.push({
681
+ type: "tool-params-delta",
682
+ id: toolId,
683
+ delta: argumentsDelta
684
+ });
685
+ }
686
+ });
687
+ }
688
+ if (choice.finish_reason !== undefined && Predicate.isNotNull(choice.finish_reason)) {
689
+ finishReason = choice.finish_reason;
690
+ }
691
+ return parts;
692
+ })), Stream.flattenIterable);
693
+ });
694
+ // =============================================================================
695
+ // Telemetry
696
+ // =============================================================================
697
+ const annotateRequest = (span, request) => {
698
+ addGenAIAnnotations(span, {
699
+ system: "openai",
700
+ operation: {
701
+ name: "chat"
702
+ },
703
+ request: {
704
+ model: request.model,
705
+ temperature: request.temperature,
706
+ topP: request.top_p,
707
+ maxTokens: request.max_tokens
708
+ },
709
+ openai: {
710
+ request: {
711
+ responseFormat: request.response_format?.type,
712
+ serviceTier: request.service_tier
713
+ }
714
+ }
715
+ });
716
+ };
717
+ const annotateResponse = (span, response) => {
718
+ const finishReason = response.choices[0]?.finish_reason ?? undefined;
719
+ addGenAIAnnotations(span, {
720
+ response: {
721
+ id: response.id,
722
+ model: response.model,
723
+ finishReasons: finishReason !== undefined ? [finishReason] : undefined
724
+ },
725
+ usage: {
726
+ inputTokens: response.usage?.prompt_tokens,
727
+ outputTokens: response.usage?.completion_tokens
728
+ },
729
+ openai: {
730
+ response: {
731
+ serviceTier: response.service_tier
732
+ }
733
+ }
734
+ });
735
+ };
736
+ const annotateStreamResponse = (span, part) => {
737
+ if (part.type === "response-metadata") {
738
+ addGenAIAnnotations(span, {
739
+ response: {
740
+ id: part.id,
741
+ model: part.modelId
742
+ }
743
+ });
744
+ }
745
+ if (part.type === "finish") {
746
+ const serviceTier = part.metadata?.openai?.serviceTier;
747
+ addGenAIAnnotations(span, {
748
+ response: {
749
+ finishReasons: [part.reason]
750
+ },
751
+ usage: {
752
+ inputTokens: part.usage.inputTokens.total,
753
+ outputTokens: part.usage.outputTokens.total
754
+ },
755
+ openai: {
756
+ response: {
757
+ serviceTier
758
+ }
759
+ }
760
+ });
761
+ }
762
+ };
763
+ const unsupportedSchemaError = (error, method) => AiError.make({
764
+ module: "OpenAiLanguageModel",
765
+ method,
766
+ reason: new AiError.UnsupportedSchemaError({
767
+ description: error instanceof Error ? error.message : String(error)
768
+ })
769
+ });
770
+ const tryJsonSchema = (schema, method) => Effect.try({
771
+ try: () => Tool.getJsonSchemaFromSchema(schema, {
772
+ transformer: toCodecOpenAI
773
+ }),
774
+ catch: error => unsupportedSchemaError(error, method)
775
+ });
776
+ const tryToolJsonSchema = (tool, method) => Effect.try({
777
+ try: () => Tool.getJsonSchema(tool, {
778
+ transformer: toCodecOpenAI
779
+ }),
780
+ catch: error => unsupportedSchemaError(error, method)
781
+ });
782
+ const prepareTools = /*#__PURE__*/Effect.fnUntraced(function* ({
783
+ config,
784
+ options,
785
+ toolNameMapper
786
+ }) {
787
+ // Return immediately if no tools are in the toolkit
788
+ if (options.tools.length === 0) {
789
+ return {
790
+ tools: undefined,
791
+ toolChoice: undefined
792
+ };
793
+ }
794
+ const tools = [];
795
+ let toolChoice = undefined;
796
+ // Filter the incoming tools down to the set of allowed tools as indicated by
797
+ // the tool choice. This must be done here given that there is no tool name
798
+ // in OpenAI's provider-defined tools, so there would be no way to perform
799
+ // this filter otherwise
800
+ let allowedTools = options.tools;
801
+ if (typeof options.toolChoice === "object" && "oneOf" in options.toolChoice) {
802
+ const allowedToolNames = new Set(options.toolChoice.oneOf);
803
+ allowedTools = options.tools.filter(tool => allowedToolNames.has(tool.name));
804
+ toolChoice = options.toolChoice.mode === "required" ? "required" : "auto";
805
+ }
806
+ // Convert the tools in the toolkit to the provider-defined format
807
+ for (const tool of allowedTools) {
808
+ if (Tool.isUserDefined(tool)) {
809
+ const strict = Tool.getStrictMode(tool) ?? config.strictJsonSchema ?? true;
810
+ const parameters = yield* tryToolJsonSchema(tool, "prepareTools");
811
+ tools.push({
812
+ type: "function",
813
+ name: tool.name,
814
+ description: Tool.getDescription(tool) ?? null,
815
+ parameters: parameters,
816
+ strict
817
+ });
818
+ }
819
+ if (Tool.isProviderDefined(tool)) {
820
+ tools.push({
821
+ type: "function",
822
+ name: tool.providerName,
823
+ description: Tool.getDescription(tool) ?? null,
824
+ parameters: Tool.getJsonSchema(tool),
825
+ strict: config.strictJsonSchema ?? true
826
+ });
827
+ }
828
+ }
829
+ if (options.toolChoice === "auto" || options.toolChoice === "none" || options.toolChoice === "required") {
830
+ toolChoice = options.toolChoice;
831
+ }
832
+ if (typeof options.toolChoice === "object" && "tool" in options.toolChoice) {
833
+ const toolName = toolNameMapper.getProviderName(options.toolChoice.tool);
834
+ const providerNames = toolNameMapper.providerNames;
835
+ if (providerNames.includes(toolName)) {
836
+ toolChoice = {
837
+ type: "function",
838
+ name: toolName
839
+ };
840
+ } else {
841
+ toolChoice = {
842
+ type: "function",
843
+ name: options.toolChoice.tool
844
+ };
845
+ }
846
+ }
847
+ return {
848
+ tools,
849
+ toolChoice
850
+ };
851
+ });
852
+ const toChatCompletionsRequest = payload => {
853
+ const messages = toChatMessages(payload.input);
854
+ const responseFormat = toChatResponseFormat(payload.text?.format);
855
+ const tools = payload.tools !== undefined ? payload.tools.map(toChatTool).filter(tool => tool !== undefined) : [];
856
+ const toolChoice = toChatToolChoice(payload.tool_choice);
857
+ return {
858
+ model: payload.model ?? "",
859
+ messages: messages.length > 0 ? messages : [{
860
+ role: "user",
861
+ content: ""
862
+ }],
863
+ ...(payload.temperature !== undefined ? {
864
+ temperature: payload.temperature
865
+ } : undefined),
866
+ ...(payload.top_p !== undefined ? {
867
+ top_p: payload.top_p
868
+ } : undefined),
869
+ ...(payload.max_output_tokens !== undefined ? {
870
+ max_tokens: payload.max_output_tokens
871
+ } : undefined),
872
+ ...(payload.user !== undefined ? {
873
+ user: payload.user
874
+ } : undefined),
875
+ ...(payload.seed !== undefined ? {
876
+ seed: payload.seed
877
+ } : undefined),
878
+ ...(payload.parallel_tool_calls !== undefined ? {
879
+ parallel_tool_calls: payload.parallel_tool_calls
880
+ } : undefined),
881
+ ...(payload.service_tier !== undefined ? {
882
+ service_tier: payload.service_tier
883
+ } : undefined),
884
+ ...(responseFormat !== undefined ? {
885
+ response_format: responseFormat
886
+ } : undefined),
887
+ ...(tools.length > 0 ? {
888
+ tools
889
+ } : undefined),
890
+ ...(toolChoice !== undefined ? {
891
+ tool_choice: toolChoice
892
+ } : undefined)
893
+ };
894
+ };
895
+ const toChatResponseFormat = format => {
896
+ if (Predicate.isUndefined(format) || Predicate.isNull(format)) {
897
+ return undefined;
898
+ }
899
+ switch (format.type) {
900
+ case "json_object":
901
+ {
902
+ return {
903
+ type: "json_object"
904
+ };
905
+ }
906
+ case "json_schema":
907
+ {
908
+ return {
909
+ type: "json_schema",
910
+ json_schema: {
911
+ name: format.name,
912
+ schema: format.schema,
913
+ ...(format.description !== undefined ? {
914
+ description: format.description
915
+ } : undefined),
916
+ ...(Predicate.isNotNullish(format.strict) ? {
917
+ strict: format.strict
918
+ } : undefined)
919
+ }
920
+ };
921
+ }
922
+ default:
923
+ {
924
+ return undefined;
925
+ }
926
+ }
927
+ };
928
+ const toChatToolChoice = toolChoice => {
929
+ if (Predicate.isUndefined(toolChoice)) {
930
+ return undefined;
931
+ }
932
+ if (typeof toolChoice === "string") {
933
+ return toolChoice;
934
+ }
935
+ if (toolChoice.type === "allowed_tools") {
936
+ return toolChoice.mode;
937
+ }
938
+ if (toolChoice.type === "function") {
939
+ return {
940
+ type: "function",
941
+ function: {
942
+ name: toolChoice.name
943
+ }
944
+ };
945
+ }
946
+ const functionName = Predicate.hasProperty(toolChoice, "name") && typeof toolChoice.name === "string" ? toolChoice.name : toolChoice.type;
947
+ return {
948
+ type: "function",
949
+ function: {
950
+ name: functionName
951
+ }
952
+ };
953
+ };
954
+ const toChatTool = tool => {
955
+ if (tool.type === "function") {
956
+ return {
957
+ type: "function",
958
+ function: {
959
+ name: tool.name,
960
+ ...(tool.description !== undefined ? {
961
+ description: tool.description
962
+ } : undefined),
963
+ ...(Predicate.isNotNullish(tool.parameters) ? {
964
+ parameters: tool.parameters
965
+ } : undefined),
966
+ ...(Predicate.isNotNullish(tool.strict) ? {
967
+ strict: tool.strict
968
+ } : undefined)
969
+ }
970
+ };
971
+ }
972
+ if (tool.type === "custom") {
973
+ return {
974
+ type: "function",
975
+ function: {
976
+ name: tool.name,
977
+ parameters: {
978
+ type: "object",
979
+ additionalProperties: true
980
+ }
981
+ }
982
+ };
983
+ }
984
+ return undefined;
985
+ };
986
+ const toChatMessages = input => {
987
+ if (Predicate.isUndefined(input)) {
988
+ return [];
989
+ }
990
+ if (typeof input === "string") {
991
+ return [{
992
+ role: "user",
993
+ content: input
994
+ }];
995
+ }
996
+ const messages = [];
997
+ for (const item of input) {
998
+ messages.push(...toChatMessagesFromItem(item));
999
+ }
1000
+ return messages;
1001
+ };
1002
+ const toChatMessagesFromItem = item => {
1003
+ if (Predicate.hasProperty(item, "type") && item.type === "message") {
1004
+ return [{
1005
+ role: item.role,
1006
+ content: toAssistantChatMessageContent(item.content)
1007
+ }];
1008
+ }
1009
+ if (Predicate.hasProperty(item, "role")) {
1010
+ return [{
1011
+ role: item.role,
1012
+ content: toChatMessageContent(item.content)
1013
+ }];
1014
+ }
1015
+ switch (item.type) {
1016
+ case "function_call":
1017
+ {
1018
+ return [{
1019
+ role: "assistant",
1020
+ content: null,
1021
+ tool_calls: [{
1022
+ id: item.call_id,
1023
+ type: "function",
1024
+ function: {
1025
+ name: item.name,
1026
+ arguments: item.arguments
1027
+ }
1028
+ }]
1029
+ }];
1030
+ }
1031
+ case "function_call_output":
1032
+ {
1033
+ return [{
1034
+ role: "tool",
1035
+ tool_call_id: item.call_id,
1036
+ content: stringifyJson(item.output)
1037
+ }];
1038
+ }
1039
+ default:
1040
+ {
1041
+ return [];
1042
+ }
1043
+ }
1044
+ };
1045
+ const toAssistantChatMessageContent = content => {
1046
+ let text = "";
1047
+ for (const part of content) {
1048
+ if (part.type === "output_text" && typeof part.text === "string") {
1049
+ text += part.text;
1050
+ }
1051
+ if (part.type === "refusal" && typeof part.refusal === "string") {
1052
+ text += part.refusal;
1053
+ }
1054
+ }
1055
+ return text.length > 0 ? text : null;
1056
+ };
1057
+ const toChatMessageContent = content => {
1058
+ if (typeof content === "string") {
1059
+ return content;
1060
+ }
1061
+ const parts = [];
1062
+ for (const part of content) {
1063
+ switch (part.type) {
1064
+ case "input_text":
1065
+ {
1066
+ parts.push({
1067
+ type: "text",
1068
+ text: part.text
1069
+ });
1070
+ break;
1071
+ }
1072
+ case "input_image":
1073
+ {
1074
+ const imageUrl = part.image_url !== undefined ? part.image_url : part.file_id !== undefined ? `openai://file/${part.file_id}` : undefined;
1075
+ if (imageUrl !== undefined && Predicate.isNotNull(imageUrl)) {
1076
+ parts.push({
1077
+ type: "image_url",
1078
+ image_url: {
1079
+ url: imageUrl,
1080
+ ...(Predicate.isNotNullish(part.detail) ? {
1081
+ detail: part.detail
1082
+ } : undefined)
1083
+ }
1084
+ });
1085
+ }
1086
+ break;
1087
+ }
1088
+ case "input_file":
1089
+ {
1090
+ if (part.file_url !== undefined) {
1091
+ parts.push({
1092
+ type: "text",
1093
+ text: part.file_url
1094
+ });
1095
+ } else if (part.file_data !== undefined) {
1096
+ parts.push({
1097
+ type: "text",
1098
+ text: part.file_data
1099
+ });
1100
+ } else if (part.file_id !== undefined) {
1101
+ parts.push({
1102
+ type: "text",
1103
+ text: `openai://file/${part.file_id}`
1104
+ });
1105
+ }
1106
+ break;
1107
+ }
1108
+ }
1109
+ }
1110
+ if (parts.length === 0) {
1111
+ return "";
1112
+ }
1113
+ if (parts.every(part => part.type === "text")) {
1114
+ return parts.map(part => part.text).join("\n");
1115
+ }
1116
+ return parts;
1117
+ };
1118
+ const stringifyJson = value => typeof value === "string" ? value : JSON.stringify(value);
1119
+ // =============================================================================
1120
+ // Utilities
1121
+ // =============================================================================
1122
+ const isFileId = (data, config) => config.fileIdPrefixes != null && config.fileIdPrefixes.some(prefix => data.startsWith(prefix));
1123
+ const getItemId = part => part.options.openai?.itemId ?? null;
1124
+ const getStatus = part => part.options.openai?.status ?? null;
1125
+ const getEncryptedContent = part => part.options.openai?.encryptedContent ?? null;
1126
+ const getImageDetail = part => part.options.openai?.imageDetail ?? "auto";
1127
+ const makeItemIdMetadata = itemId => itemId !== undefined ? {
1128
+ itemId
1129
+ } : undefined;
1130
+ const normalizeServiceTier = serviceTier => {
1131
+ switch (serviceTier) {
1132
+ case undefined:
1133
+ return undefined;
1134
+ case "default":
1135
+ case "auto":
1136
+ case "flex":
1137
+ case "scale":
1138
+ case "priority":
1139
+ return serviceTier;
1140
+ default:
1141
+ return null;
1142
+ }
1143
+ };
1144
+ const prepareResponseFormat = /*#__PURE__*/Effect.fnUntraced(function* ({
1145
+ config,
1146
+ options
1147
+ }) {
1148
+ if (options.responseFormat.type === "json") {
1149
+ const name = options.responseFormat.objectName;
1150
+ const schema = options.responseFormat.schema;
1151
+ const jsonSchema = yield* tryJsonSchema(schema, "prepareResponseFormat");
1152
+ return {
1153
+ type: "json_schema",
1154
+ name,
1155
+ description: AST.resolveDescription(schema.ast) ?? "Response with a JSON object",
1156
+ schema: jsonSchema,
1157
+ strict: config.strictJsonSchema ?? true
1158
+ };
1159
+ }
1160
+ return {
1161
+ type: "text"
1162
+ };
1163
+ });
1164
+ const getModelCapabilities = modelId => {
1165
+ const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
1166
+ const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1167
+ // Use allowlist approach: only known reasoning models should use 'developer' role
1168
+ // This prevents issues with fine-tuned models, third-party models, and custom models
1169
+ const isReasoningModel = modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("codex-mini") || modelId.startsWith("computer-use-preview") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
1170
+ // https://platform.openai.com/docs/guides/latest-model#gpt-5-1-parameter-compatibility
1171
+ // GPT-5.1 and GPT-5.2 support temperature, topP, logProbs when reasoningEffort is none
1172
+ const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2");
1173
+ const systemMessageMode = isReasoningModel ? "developer" : "system";
1174
+ return {
1175
+ supportsFlexProcessing,
1176
+ supportsPriorityProcessing,
1177
+ isReasoningModel,
1178
+ systemMessageMode,
1179
+ supportsNonReasoningParameters
1180
+ };
1181
+ };
1182
+ const getUsage = usage => {
1183
+ if (Predicate.isNullish(usage)) {
1184
+ return {
1185
+ inputTokens: {
1186
+ uncached: undefined,
1187
+ total: undefined,
1188
+ cacheRead: undefined,
1189
+ cacheWrite: undefined
1190
+ },
1191
+ outputTokens: {
1192
+ total: undefined,
1193
+ text: undefined,
1194
+ reasoning: undefined
1195
+ }
1196
+ };
1197
+ }
1198
+ const inputTokens = usage.prompt_tokens;
1199
+ const outputTokens = usage.completion_tokens;
1200
+ const cachedTokens = getUsageDetailNumber(usage.prompt_tokens_details, "cached_tokens") ?? 0;
1201
+ const reasoningTokens = getUsageDetailNumber(usage.completion_tokens_details, "reasoning_tokens") ?? 0;
1202
+ return {
1203
+ inputTokens: {
1204
+ uncached: inputTokens - cachedTokens,
1205
+ total: inputTokens,
1206
+ cacheRead: cachedTokens,
1207
+ cacheWrite: undefined
1208
+ },
1209
+ outputTokens: {
1210
+ total: outputTokens,
1211
+ text: outputTokens - reasoningTokens,
1212
+ reasoning: reasoningTokens
1213
+ }
1214
+ };
1215
+ };
1216
+ const getUsageDetailNumber = (details, field) => {
1217
+ if (typeof details !== "object" || details === null) {
1218
+ return undefined;
1219
+ }
1220
+ const value = details[field];
1221
+ return typeof value === "number" ? value : undefined;
1222
+ };
1223
+ //# sourceMappingURL=OpenAiLanguageModel.js.map