@effect/ai-openai 0.37.1 → 4.0.0-beta.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (117) hide show
  1. package/dist/Generated.d.ts +70887 -0
  2. package/dist/Generated.d.ts.map +1 -0
  3. package/dist/Generated.js +4 -0
  4. package/dist/Generated.js.map +1 -0
  5. package/dist/OpenAiClient.d.ts +124 -0
  6. package/dist/OpenAiClient.d.ts.map +1 -0
  7. package/dist/OpenAiClient.js +128 -0
  8. package/dist/OpenAiClient.js.map +1 -0
  9. package/dist/{dts/OpenAiConfig.d.ts → OpenAiConfig.d.ts} +9 -9
  10. package/dist/OpenAiConfig.d.ts.map +1 -0
  11. package/dist/{esm/OpenAiConfig.js → OpenAiConfig.js} +8 -5
  12. package/dist/OpenAiConfig.js.map +1 -0
  13. package/dist/OpenAiError.d.ts +98 -0
  14. package/dist/OpenAiError.d.ts.map +1 -0
  15. package/dist/OpenAiError.js +10 -0
  16. package/dist/OpenAiError.js.map +1 -0
  17. package/dist/OpenAiLanguageModel.d.ts +318 -0
  18. package/dist/OpenAiLanguageModel.d.ts.map +1 -0
  19. package/dist/OpenAiLanguageModel.js +2207 -0
  20. package/dist/OpenAiLanguageModel.js.map +1 -0
  21. package/dist/{dts/OpenAiTelemetry.d.ts → OpenAiTelemetry.d.ts} +31 -13
  22. package/dist/OpenAiTelemetry.d.ts.map +1 -0
  23. package/dist/{esm/OpenAiTelemetry.js → OpenAiTelemetry.js} +11 -6
  24. package/dist/OpenAiTelemetry.js.map +1 -0
  25. package/dist/OpenAiTool.d.ts +479 -0
  26. package/dist/OpenAiTool.d.ts.map +1 -0
  27. package/dist/OpenAiTool.js +231 -0
  28. package/dist/OpenAiTool.js.map +1 -0
  29. package/dist/index.d.ts +58 -0
  30. package/dist/index.d.ts.map +1 -0
  31. package/dist/index.js +59 -0
  32. package/dist/index.js.map +1 -0
  33. package/dist/internal/errors.d.ts +2 -0
  34. package/dist/internal/errors.d.ts.map +1 -0
  35. package/dist/internal/errors.js +316 -0
  36. package/dist/internal/errors.js.map +1 -0
  37. package/dist/{dts/internal → internal}/utilities.d.ts.map +1 -1
  38. package/dist/{esm/internal → internal}/utilities.js +4 -3
  39. package/dist/internal/utilities.js.map +1 -0
  40. package/package.json +45 -97
  41. package/src/Generated.ts +28521 -20036
  42. package/src/OpenAiClient.ts +222 -1810
  43. package/src/OpenAiConfig.ts +20 -34
  44. package/src/OpenAiError.ts +107 -0
  45. package/src/OpenAiLanguageModel.ts +1807 -638
  46. package/src/OpenAiTelemetry.ts +24 -19
  47. package/src/OpenAiTool.ts +216 -70
  48. package/src/index.ts +35 -8
  49. package/src/internal/errors.ts +347 -0
  50. package/src/internal/utilities.ts +7 -5
  51. package/Generated/package.json +0 -6
  52. package/OpenAiClient/package.json +0 -6
  53. package/OpenAiConfig/package.json +0 -6
  54. package/OpenAiEmbeddingModel/package.json +0 -6
  55. package/OpenAiLanguageModel/package.json +0 -6
  56. package/OpenAiTelemetry/package.json +0 -6
  57. package/OpenAiTokenizer/package.json +0 -6
  58. package/OpenAiTool/package.json +0 -6
  59. package/README.md +0 -5
  60. package/dist/cjs/Generated.js +0 -7150
  61. package/dist/cjs/Generated.js.map +0 -1
  62. package/dist/cjs/OpenAiClient.js +0 -1562
  63. package/dist/cjs/OpenAiClient.js.map +0 -1
  64. package/dist/cjs/OpenAiConfig.js +0 -30
  65. package/dist/cjs/OpenAiConfig.js.map +0 -1
  66. package/dist/cjs/OpenAiEmbeddingModel.js +0 -155
  67. package/dist/cjs/OpenAiEmbeddingModel.js.map +0 -1
  68. package/dist/cjs/OpenAiLanguageModel.js +0 -1147
  69. package/dist/cjs/OpenAiLanguageModel.js.map +0 -1
  70. package/dist/cjs/OpenAiTelemetry.js +0 -38
  71. package/dist/cjs/OpenAiTelemetry.js.map +0 -1
  72. package/dist/cjs/OpenAiTokenizer.js +0 -83
  73. package/dist/cjs/OpenAiTokenizer.js.map +0 -1
  74. package/dist/cjs/OpenAiTool.js +0 -93
  75. package/dist/cjs/OpenAiTool.js.map +0 -1
  76. package/dist/cjs/index.js +0 -24
  77. package/dist/cjs/index.js.map +0 -1
  78. package/dist/cjs/internal/utilities.js +0 -32
  79. package/dist/cjs/internal/utilities.js.map +0 -1
  80. package/dist/dts/Generated.d.ts +0 -40661
  81. package/dist/dts/Generated.d.ts.map +0 -1
  82. package/dist/dts/OpenAiClient.d.ts +0 -3119
  83. package/dist/dts/OpenAiClient.d.ts.map +0 -1
  84. package/dist/dts/OpenAiConfig.d.ts.map +0 -1
  85. package/dist/dts/OpenAiEmbeddingModel.d.ts +0 -109
  86. package/dist/dts/OpenAiEmbeddingModel.d.ts.map +0 -1
  87. package/dist/dts/OpenAiLanguageModel.d.ts +0 -235
  88. package/dist/dts/OpenAiLanguageModel.d.ts.map +0 -1
  89. package/dist/dts/OpenAiTelemetry.d.ts.map +0 -1
  90. package/dist/dts/OpenAiTokenizer.d.ts +0 -17
  91. package/dist/dts/OpenAiTokenizer.d.ts.map +0 -1
  92. package/dist/dts/OpenAiTool.d.ts +0 -200
  93. package/dist/dts/OpenAiTool.d.ts.map +0 -1
  94. package/dist/dts/index.d.ts +0 -33
  95. package/dist/dts/index.d.ts.map +0 -1
  96. package/dist/esm/Generated.js +0 -7150
  97. package/dist/esm/Generated.js.map +0 -1
  98. package/dist/esm/OpenAiClient.js +0 -1499
  99. package/dist/esm/OpenAiClient.js.map +0 -1
  100. package/dist/esm/OpenAiConfig.js.map +0 -1
  101. package/dist/esm/OpenAiEmbeddingModel.js +0 -143
  102. package/dist/esm/OpenAiEmbeddingModel.js.map +0 -1
  103. package/dist/esm/OpenAiLanguageModel.js +0 -1134
  104. package/dist/esm/OpenAiLanguageModel.js.map +0 -1
  105. package/dist/esm/OpenAiTelemetry.js.map +0 -1
  106. package/dist/esm/OpenAiTokenizer.js +0 -73
  107. package/dist/esm/OpenAiTokenizer.js.map +0 -1
  108. package/dist/esm/OpenAiTool.js +0 -84
  109. package/dist/esm/OpenAiTool.js.map +0 -1
  110. package/dist/esm/index.js +0 -33
  111. package/dist/esm/index.js.map +0 -1
  112. package/dist/esm/internal/utilities.js.map +0 -1
  113. package/dist/esm/package.json +0 -4
  114. package/index/package.json +0 -6
  115. package/src/OpenAiEmbeddingModel.ts +0 -243
  116. package/src/OpenAiTokenizer.ts +0 -70
  117. /package/dist/{dts/internal → internal}/utilities.d.ts +0 -0
@@ -1,114 +1,117 @@
1
1
  /**
2
+ * OpenAI Language Model implementation.
3
+ *
4
+ * Provides a LanguageModel implementation for OpenAI's responses API,
5
+ * supporting text generation, structured output, tool calling, and streaming.
6
+ *
2
7
  * @since 1.0.0
3
8
  */
4
- import * as AiError from "@effect/ai/AiError"
5
- import * as IdGenerator from "@effect/ai/IdGenerator"
6
- import * as LanguageModel from "@effect/ai/LanguageModel"
7
- import * as AiModel from "@effect/ai/Model"
8
- import type * as Prompt from "@effect/ai/Prompt"
9
- import type * as Response from "@effect/ai/Response"
10
- import type * as Tokenizer from "@effect/ai/Tokenizer"
11
- import * as Tool from "@effect/ai/Tool"
12
- import * as Context from "effect/Context"
13
9
  import * as DateTime from "effect/DateTime"
14
10
  import * as Effect from "effect/Effect"
15
- import * as Encoding from "effect/Encoding"
11
+ import * as Base64 from "effect/encoding/Base64"
16
12
  import { dual } from "effect/Function"
17
13
  import * as Layer from "effect/Layer"
18
14
  import * as Predicate from "effect/Predicate"
15
+ import * as Redactable from "effect/Redactable"
16
+ import * as Schema from "effect/Schema"
17
+ import * as AST from "effect/SchemaAST"
18
+ import * as ServiceMap from "effect/ServiceMap"
19
19
  import * as Stream from "effect/Stream"
20
20
  import type { Span } from "effect/Tracer"
21
- import type { DeepMutable, Mutable, Simplify } from "effect/Types"
22
- import type * as Generated from "./Generated.js"
23
- import * as InternalUtilities from "./internal/utilities.js"
24
- import type { ResponseStreamEvent } from "./OpenAiClient.js"
25
- import { OpenAiClient } from "./OpenAiClient.js"
26
- import { addGenAIAnnotations } from "./OpenAiTelemetry.js"
27
- import * as OpenAiTokenizer from "./OpenAiTokenizer.js"
28
- import * as OpenAiTool from "./OpenAiTool.js"
21
+ import type { DeepMutable, Simplify } from "effect/Types"
22
+ import * as AiError from "effect/unstable/ai/AiError"
23
+ import * as IdGenerator from "effect/unstable/ai/IdGenerator"
24
+ import * as LanguageModel from "effect/unstable/ai/LanguageModel"
25
+ import * as AiModel from "effect/unstable/ai/Model"
26
+ import type * as Prompt from "effect/unstable/ai/Prompt"
27
+ import type * as Response from "effect/unstable/ai/Response"
28
+ import * as Tool from "effect/unstable/ai/Tool"
29
+ import type * as HttpClientRequest from "effect/unstable/http/HttpClientRequest"
30
+ import type * as HttpClientResponse from "effect/unstable/http/HttpClientResponse"
31
+ import * as Generated from "./Generated.ts"
32
+ import * as InternalUtilities from "./internal/utilities.ts"
33
+ import { OpenAiClient } from "./OpenAiClient.ts"
34
+ import { addGenAIAnnotations } from "./OpenAiTelemetry.ts"
35
+ import type * as OpenAiTool from "./OpenAiTool.ts"
36
+
37
+ const ResponseModelIds = Generated.ModelIdsResponses.members[1]
38
+ const SharedModelIds = Generated.ModelIdsShared.members[1]
29
39
 
30
40
  /**
31
41
  * @since 1.0.0
32
- * @category Models
42
+ * @category models
33
43
  */
34
- export type Model = typeof Generated.ChatModel.Encoded | typeof Generated.ModelIdsResponsesEnum.Encoded
44
+ export type Model = typeof ResponseModelIds.Encoded | typeof SharedModelIds.Encoded
45
+
46
+ /**
47
+ * Image detail level for vision requests.
48
+ */
49
+ type ImageDetail = "auto" | "low" | "high"
35
50
 
36
51
  // =============================================================================
37
52
  // Configuration
38
53
  // =============================================================================
39
54
 
40
55
  /**
56
+ * Service definition for OpenAI language model configuration.
57
+ *
41
58
  * @since 1.0.0
42
- * @category Context
59
+ * @category services
43
60
  */
44
- export class Config extends Context.Tag("@effect/ai-openai/OpenAiLanguageModel/Config")<
61
+ export class Config extends ServiceMap.Service<
45
62
  Config,
46
- Config.Service
47
- >() {
48
- /**
49
- * @since 1.0.0
50
- */
51
- static readonly getOrUndefined: Effect.Effect<Config.Service | undefined> = Effect.map(
52
- Effect.context<never>(),
53
- (context) => context.unsafeMap.get(Config.key)
54
- )
55
- }
56
-
57
- /**
58
- * @since 1.0.0
59
- */
60
- export declare namespace Config {
61
- /**
62
- * @since 1.0.0
63
- * @category Models
64
- */
65
- export interface Service extends
66
- Simplify<
67
- Partial<
68
- Omit<
69
- typeof Generated.CreateResponse.Encoded,
70
- "input" | "tools" | "tool_choice" | "stream" | "text"
71
- >
63
+ Simplify<
64
+ & Partial<
65
+ Omit<
66
+ typeof Generated.CreateResponse.Encoded,
67
+ "input" | "tools" | "tool_choice" | "stream" | "text"
72
68
  >
73
69
  >
74
- {
75
- /**
76
- * File ID prefixes used to identify file IDs in Responses API.
77
- * When undefined, all file data is treated as base64 content.
78
- *
79
- * Examples:
80
- * - OpenAI: ['file-'] for IDs like 'file-abc123'
81
- * - Azure OpenAI: ['assistant-'] for IDs like 'assistant-abc123'
82
- */
83
- readonly fileIdPrefixes?: ReadonlyArray<string>
84
- /**
85
- * Configuration options for a text response from the model.
86
- */
87
- readonly text?: {
70
+ & {
71
+ /**
72
+ * File ID prefixes used to identify file IDs in Responses API.
73
+ * When undefined, all file data is treated as base64 content.
74
+ *
75
+ * Examples:
76
+ * - OpenAI: ['file-'] for IDs like 'file-abc123'
77
+ * - Azure OpenAI: ['assistant-'] for IDs like 'assistant-abc123'
78
+ */
79
+ readonly fileIdPrefixes?: ReadonlyArray<string> | undefined
80
+ /**
81
+ * Configuration options for a text response from the model.
82
+ */
83
+ readonly text?: {
84
+ /**
85
+ * Constrains the verbosity of the model's response. Lower values will
86
+ * result in more concise responses, while higher values will result in
87
+ * more verbose responses.
88
+ *
89
+ * Defaults to `"medium"`.
90
+ */
91
+ readonly verbosity?: "low" | "medium" | "high" | undefined
92
+ } | undefined
88
93
  /**
89
- * Constrains the verbosity of the model's response. Lower values will
90
- * result in more concise responses, while higher values will result in
91
- * more verbose responses.
94
+ * Whether to use strict JSON schema validation.
92
95
  *
93
- * Defaults to `"medium"`.
96
+ * Defaults to `true`.
94
97
  */
95
- readonly verbosity?: "low" | "medium" | "high"
98
+ readonly strictJsonSchema?: boolean | undefined
96
99
  }
97
- }
98
- }
100
+ >
101
+ >()("@effect/ai-openai/OpenAiLanguageModel/Config") {}
99
102
 
100
103
  // =============================================================================
101
- // OpenAI Provider Options / Metadata
104
+ // Provider Options / Metadata
102
105
  // =============================================================================
103
106
 
104
- declare module "@effect/ai/Prompt" {
107
+ declare module "effect/unstable/ai/Prompt" {
105
108
  export interface FilePartOptions extends ProviderOptions {
106
109
  readonly openai?: {
107
110
  /**
108
111
  * The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`. Defaults to `auto`.
109
112
  */
110
- readonly imageDetail?: typeof Generated.ImageDetail.Encoded | undefined
111
- } | undefined
113
+ readonly imageDetail?: ImageDetail | null
114
+ } | null
112
115
  }
113
116
 
114
117
  export interface ReasoningPartOptions extends ProviderOptions {
@@ -116,14 +119,14 @@ declare module "@effect/ai/Prompt" {
116
119
  /**
117
120
  * The ID of the item to reference.
118
121
  */
119
- readonly itemId?: string | undefined
122
+ readonly itemId?: string | null
120
123
  /**
121
124
  * The encrypted content of the reasoning item - populated when a response
122
125
  * is generated with `reasoning.encrypted_content` in the `include`
123
126
  * parameter.
124
127
  */
125
- readonly encryptedContent?: string | undefined
126
- } | undefined
128
+ readonly encryptedContent?: string | null
129
+ } | null
127
130
  }
128
131
 
129
132
  export interface ToolCallPartOptions extends ProviderOptions {
@@ -131,8 +134,33 @@ declare module "@effect/ai/Prompt" {
131
134
  /**
132
135
  * The ID of the item to reference.
133
136
  */
134
- readonly itemId?: string | undefined
135
- } | undefined
137
+ readonly itemId?: string | null
138
+ /**
139
+ * The status of item.
140
+ */
141
+ readonly status?: typeof Generated.Message.Encoded["status"] | null
142
+ /**
143
+ * The ID of the approval request.
144
+ */
145
+ readonly approvalRequestId?: string | null
146
+ } | null
147
+ }
148
+
149
+ export interface ToolResultPartOptions extends ProviderOptions {
150
+ readonly openai?: {
151
+ /**
152
+ * The ID of the item to reference.
153
+ */
154
+ readonly itemId?: string | null
155
+ /**
156
+ * The status of item.
157
+ */
158
+ readonly status?: typeof Generated.Message.Encoded["status"] | null
159
+ /**
160
+ * The ID of the approval request.
161
+ */
162
+ readonly approvalId?: string | null
163
+ } | null
136
164
  }
137
165
 
138
166
  export interface TextPartOptions extends ProviderOptions {
@@ -140,71 +168,122 @@ declare module "@effect/ai/Prompt" {
140
168
  /**
141
169
  * The ID of the item to reference.
142
170
  */
143
- readonly itemId?: string | undefined
144
- } | undefined
171
+ readonly itemId?: string | null
172
+ /**
173
+ * The status of item.
174
+ */
175
+ readonly status?: typeof Generated.Message.Encoded["status"] | null
176
+ /**
177
+ * A list of annotations that apply to the output text.
178
+ */
179
+ readonly annotations?: ReadonlyArray<typeof Generated.Annotation.Encoded> | null
180
+ } | null
145
181
  }
146
182
  }
147
183
 
148
- declare module "@effect/ai/Response" {
184
+ declare module "effect/unstable/ai/Response" {
149
185
  export interface TextPartMetadata extends ProviderMetadata {
150
186
  readonly openai?: {
151
- readonly itemId?: string | undefined
187
+ readonly itemId?: string | null
152
188
  /**
153
189
  * If the model emits a refusal content part, the refusal explanation
154
190
  * from the model will be contained in the metadata of an empty text
155
191
  * part.
156
192
  */
157
- readonly refusal?: string | undefined
158
- } | undefined
193
+ readonly refusal?: string | null
194
+ /**
195
+ * The status of item.
196
+ */
197
+ readonly status?: typeof Generated.Message.Encoded["status"] | null
198
+ /**
199
+ * The text content part annotations.
200
+ */
201
+ readonly annotations?: ReadonlyArray<typeof Generated.Annotation.Encoded> | null
202
+ }
159
203
  }
160
204
 
161
205
  export interface TextStartPartMetadata extends ProviderMetadata {
162
206
  readonly openai?: {
163
- readonly itemId?: string | undefined
164
- } | undefined
207
+ readonly itemId?: string | null
208
+ } | null
209
+ }
210
+
211
+ export interface TextEndPartMetadata extends ProviderMetadata {
212
+ readonly openai?: {
213
+ readonly itemId?: string | null
214
+ readonly annotations?: ReadonlyArray<typeof Generated.Annotation.Encoded> | null
215
+ } | null
165
216
  }
166
217
 
167
218
  export interface ReasoningPartMetadata extends ProviderMetadata {
168
219
  readonly openai?: {
169
- readonly itemId?: string | undefined
170
- readonly encryptedContent?: string | undefined
171
- } | undefined
220
+ readonly itemId?: string | null
221
+ readonly encryptedContent?: string | null
222
+ } | null
172
223
  }
173
224
 
174
225
  export interface ReasoningStartPartMetadata extends ProviderMetadata {
175
226
  readonly openai?: {
176
- readonly itemId?: string | undefined
177
- readonly encryptedContent?: string | undefined
178
- } | undefined
227
+ readonly itemId?: string | null
228
+ readonly encryptedContent?: string | null
229
+ } | null
179
230
  }
180
231
 
181
232
  export interface ReasoningDeltaPartMetadata extends ProviderMetadata {
182
233
  readonly openai?: {
183
- readonly itemId?: string | undefined
184
- } | undefined
234
+ readonly itemId?: string | null
235
+ } | null
185
236
  }
186
237
 
187
238
  export interface ReasoningEndPartMetadata extends ProviderMetadata {
188
239
  readonly openai?: {
189
- readonly itemId?: string | undefined
190
- readonly encryptedContent?: string | undefined
191
- } | undefined
240
+ readonly itemId?: string | null
241
+ readonly encryptedContent?: string
242
+ } | null
192
243
  }
193
244
 
194
245
  export interface ToolCallPartMetadata extends ProviderMetadata {
195
246
  readonly openai?: {
196
- readonly itemId?: string | undefined
197
- } | undefined
247
+ readonly itemId?: string | null
248
+ } | null
198
249
  }
199
250
 
200
251
  export interface DocumentSourcePartMetadata extends ProviderMetadata {
201
- readonly openai?: {
202
- readonly type: "file_citation"
203
- /**
204
- * The index of the file in the list of files.
205
- */
206
- readonly index: number
207
- } | undefined
252
+ readonly openai?:
253
+ | {
254
+ readonly type: "file_citation"
255
+ /**
256
+ * The index of the file in the list of files.
257
+ */
258
+ readonly index: number
259
+ /**
260
+ * The ID of the file.
261
+ */
262
+ readonly fileId: string
263
+ }
264
+ | {
265
+ readonly type: "file_path"
266
+ /**
267
+ * The index of the file in the list of files.
268
+ */
269
+ readonly index: number
270
+ /**
271
+ * The ID of the file.
272
+ */
273
+ readonly fileId: string
274
+ }
275
+ | {
276
+ readonly type: "container_file_citation"
277
+ /**
278
+ * The ID of the file.
279
+ */
280
+ readonly fileId: string
281
+ /**
282
+ * The ID of the container file.
283
+ */
284
+ readonly containerId: string
285
+ }
286
+ | null
208
287
  }
209
288
 
210
289
  export interface UrlSourcePartMetadata extends ProviderMetadata {
@@ -218,82 +297,92 @@ declare module "@effect/ai/Response" {
218
297
  * The index of the last character of the URL citation in the message.
219
298
  */
220
299
  readonly endIndex: number
221
- } | undefined
300
+ } | null
222
301
  }
223
302
 
224
303
  export interface FinishPartMetadata extends ProviderMetadata {
225
304
  readonly openai?: {
226
- readonly serviceTier?: "default" | "auto" | "flex" | "scale" | "priority" | undefined
227
- } | undefined
228
- }
229
- }
230
-
231
- /**
232
- * @since 1.0.0
233
- */
234
- export declare namespace ProviderMetadata {
235
- /**
236
- * @since 1.0.0
237
- * @category Provider Metadata
238
- */
239
- export interface Service {
240
- "source": {} | {}
305
+ readonly serviceTier?: "default" | "auto" | "flex" | "scale" | "priority" | null
306
+ } | null
241
307
  }
242
308
  }
243
309
 
244
310
  // =============================================================================
245
- // OpenAI Language Model
311
+ // Language Model
246
312
  // =============================================================================
247
313
 
248
314
  /**
249
315
  * @since 1.0.0
250
- * @category Ai Models
316
+ * @category constructors
251
317
  */
252
318
  export const model = (
253
319
  model: (string & {}) | Model,
254
- config?: Omit<Config.Service, "model">
320
+ config?: Omit<typeof Config.Service, "model">
255
321
  ): AiModel.Model<"openai", LanguageModel.LanguageModel, OpenAiClient> =>
256
322
  AiModel.make("openai", layer({ model, config }))
257
323
 
258
- /**
259
- * @since 1.0.0
260
- * @category Ai Models
261
- */
262
- export const modelWithTokenizer = (
263
- model: (string & {}) | Model,
264
- config?: Omit<Config.Service, "model">
265
- ): AiModel.Model<"openai", LanguageModel.LanguageModel | Tokenizer.Tokenizer, OpenAiClient> =>
266
- AiModel.make("openai", layerWithTokenizer({ model, config }))
324
+ // TODO
325
+ // /**
326
+ // * @since 1.0.0
327
+ // * @category constructors
328
+ // */
329
+ // export const modelWithTokenizer = (
330
+ // model: (string & {}) | Model,
331
+ // config?: Omit<typeof Config.Service, "model">
332
+ // ): AiModel.Model<"openai", LanguageModel.LanguageModel | Tokenizer.Tokenizer, OpenAiClient> =>
333
+ // AiModel.make("openai", layerWithTokenizer({ model, config }))
267
334
 
268
335
  /**
336
+ * Creates an OpenAI language model service.
337
+ *
269
338
  * @since 1.0.0
270
- * @category Constructors
339
+ * @category constructors
271
340
  */
272
- export const make = Effect.fnUntraced(function*(options: {
341
+ export const make = Effect.fnUntraced(function*({ model, config: providerConfig }: {
273
342
  readonly model: (string & {}) | Model
274
- readonly config?: Omit<Config.Service, "model">
275
- }) {
343
+ readonly config?: Omit<typeof Config.Service, "model"> | undefined
344
+ }): Effect.fn.Return<LanguageModel.Service, never, OpenAiClient> {
276
345
  const client = yield* OpenAiClient
277
346
 
278
- const makeRequest: (providerOptions: LanguageModel.ProviderOptions) => Effect.Effect<
279
- typeof Generated.CreateResponse.Encoded,
280
- AiError.AiError
281
- > = Effect.fnUntraced(
282
- function*(providerOptions) {
283
- const context = yield* Effect.context<never>()
284
- const config = { model: options.model, ...options.config, ...context.unsafeMap.get(Config.key) }
285
- const messages = yield* prepareMessages(providerOptions, config)
286
- const { toolChoice, tools } = yield* prepareTools(providerOptions)
287
- const include = prepareInclude(providerOptions, config)
288
- const responseFormat = prepareResponseFormat(providerOptions)
289
- const verbosity = config.text?.verbosity
347
+ const makeConfig = Effect.gen(function*() {
348
+ const services = yield* Effect.services<never>()
349
+ return { model, ...providerConfig, ...services.mapUnsafe.get(Config.key) }
350
+ })
351
+
352
+ const makeRequest = Effect.fnUntraced(
353
+ function*<Tools extends ReadonlyArray<Tool.Any>>({ config, options, toolNameMapper }: {
354
+ readonly config: typeof Config.Service
355
+ readonly options: LanguageModel.ProviderOptions
356
+ readonly toolNameMapper: Tool.NameMapper<Tools>
357
+ }): Effect.fn.Return<typeof Generated.CreateResponse.Encoded, AiError.AiError> {
358
+ const include = new Set<typeof Generated.IncludeEnum.Encoded>()
359
+ const capabilities = getModelCapabilities(config.model!)
360
+ const messages = yield* prepareMessages({
361
+ config,
362
+ options,
363
+ capabilities,
364
+ include,
365
+ toolNameMapper
366
+ })
367
+ const { toolChoice, tools } = yield* prepareTools({
368
+ config,
369
+ options,
370
+ toolNameMapper
371
+ })
372
+ const responseFormat = prepareResponseFormat({
373
+ config,
374
+ options
375
+ })
290
376
  const request: typeof Generated.CreateResponse.Encoded = {
291
377
  ...config,
292
378
  input: messages,
293
- include,
294
- text: { format: responseFormat, verbosity },
295
- tools,
296
- tool_choice: toolChoice
379
+ include: include.size > 0 ? Array.from(include) : null,
380
+ text: {
381
+ verbosity: config.text?.verbosity ?? null,
382
+ format: responseFormat
383
+ },
384
+ ...(Predicate.isNotUndefined(tools) ? { tools } : undefined),
385
+ ...(Predicate.isNotUndefined(toolChoice) ? { tool_choice: toolChoice } : undefined)
297
386
  }
298
387
  return request
299
388
  }
@@ -302,22 +391,37 @@ export const make = Effect.fnUntraced(function*(options: {
302
391
  return yield* LanguageModel.make({
303
392
  generateText: Effect.fnUntraced(
304
393
  function*(options) {
305
- const request = yield* makeRequest(options)
394
+ const config = yield* makeConfig
395
+ const toolNameMapper = new Tool.NameMapper(options.tools)
396
+ const request = yield* makeRequest({ config, options, toolNameMapper })
306
397
  annotateRequest(options.span, request)
307
- const rawResponse = yield* client.createResponse(request)
398
+ const [rawResponse, response] = yield* client.createResponse(request)
308
399
  annotateResponse(options.span, rawResponse)
309
- return yield* makeResponse(rawResponse, options)
400
+ return yield* makeResponse({
401
+ options,
402
+ rawResponse,
403
+ response,
404
+ toolNameMapper
405
+ })
310
406
  }
311
407
  ),
312
408
  streamText: Effect.fnUntraced(
313
409
  function*(options) {
314
- const request = yield* makeRequest(options)
410
+ const config = yield* makeConfig
411
+ const toolNameMapper = new Tool.NameMapper(options.tools)
412
+ const request = yield* makeRequest({ config, options, toolNameMapper })
315
413
  annotateRequest(options.span, request)
316
- return client.createResponseStream(request)
414
+ const [response, stream] = yield* client.createResponseStream(request)
415
+ return yield* makeStreamResponse({
416
+ stream,
417
+ response,
418
+ config,
419
+ options,
420
+ toolNameMapper
421
+ })
317
422
  },
318
423
  (effect, options) =>
319
424
  effect.pipe(
320
- Effect.flatMap((stream) => makeStreamResponse(stream, options)),
321
425
  Stream.unwrap,
322
426
  Stream.map((response) => {
323
427
  annotateStreamResponse(options.span, response)
@@ -329,55 +433,61 @@ export const make = Effect.fnUntraced(function*(options: {
329
433
  })
330
434
 
331
435
  /**
436
+ * Creates a layer for the OpenAI language model.
437
+ *
332
438
  * @since 1.0.0
333
- * @category Layers
439
+ * @category layers
334
440
  */
335
441
  export const layer = (options: {
336
442
  readonly model: (string & {}) | Model
337
- readonly config?: Omit<Config.Service, "model">
443
+ readonly config?: Omit<typeof Config.Service, "model"> | undefined
338
444
  }): Layer.Layer<LanguageModel.LanguageModel, never, OpenAiClient> =>
339
- Layer.effect(LanguageModel.LanguageModel, make({ model: options.model, config: options.config }))
340
-
341
- /**
342
- * @since 1.0.0
343
- * @category Layers
344
- */
345
- export const layerWithTokenizer = (options: {
346
- readonly model: (string & {}) | Model
347
- readonly config?: Omit<Config.Service, "model">
348
- }): Layer.Layer<LanguageModel.LanguageModel | Tokenizer.Tokenizer, never, OpenAiClient> =>
349
- Layer.merge(layer(options), OpenAiTokenizer.layer(options))
445
+ Layer.effect(LanguageModel.LanguageModel, make(options))
350
446
 
351
447
  /**
448
+ * Provides config overrides for OpenAI language model operations.
449
+ *
352
450
  * @since 1.0.0
353
- * @category Configuration
451
+ * @category configuration
354
452
  */
355
453
  export const withConfigOverride: {
356
454
  /**
455
+ * Provides config overrides for OpenAI language model operations.
456
+ *
357
457
  * @since 1.0.0
358
- * @category Configuration
458
+ * @category configuration
359
459
  */
360
- (overrides: Config.Service): <A, E, R>(self: Effect.Effect<A, E, R>) => Effect.Effect<A, E, R>
460
+ (overrides: typeof Config.Service): <A, E, R>(self: Effect.Effect<A, E, R>) => Effect.Effect<A, E, Exclude<R, Config>>
361
461
  /**
462
+ * Provides config overrides for OpenAI language model operations.
463
+ *
362
464
  * @since 1.0.0
363
- * @category Configuration
465
+ * @category configuration
364
466
  */
365
- <A, E, R>(self: Effect.Effect<A, E, R>, overrides: Config.Service): Effect.Effect<A, E, R>
467
+ <A, E, R>(self: Effect.Effect<A, E, R>, overrides: typeof Config.Service): Effect.Effect<A, E, Exclude<R, Config>>
366
468
  } = dual<
367
469
  /**
470
+ * Provides config overrides for OpenAI language model operations.
471
+ *
368
472
  * @since 1.0.0
369
- * @category Configuration
473
+ * @category configuration
370
474
  */
371
- (overrides: Config.Service) => <A, E, R>(self: Effect.Effect<A, E, R>) => Effect.Effect<A, E, R>,
475
+ (overrides: typeof Config.Service) => <A, E, R>(self: Effect.Effect<A, E, R>) => Effect.Effect<A, E, Exclude<R, Config>>,
372
476
  /**
477
+ * Provides config overrides for OpenAI language model operations.
478
+ *
373
479
  * @since 1.0.0
374
- * @category Configuration
480
+ * @category configuration
375
481
  */
376
- <A, E, R>(self: Effect.Effect<A, E, R>, overrides: Config.Service) => Effect.Effect<A, E, R>
482
+ <A, E, R>(self: Effect.Effect<A, E, R>, overrides: typeof Config.Service) => Effect.Effect<A, E, Exclude<R, Config>>
377
483
  >(2, (self, overrides) =>
378
484
  Effect.flatMap(
379
- Config.getOrUndefined,
380
- (config) => Effect.provideService(self, Config, { ...config, ...overrides })
485
+ Effect.serviceOption(Config),
486
+ (config) =>
487
+ Effect.provideService(self, Config, {
488
+ ...(config._tag === "Some" ? config.value : {}),
489
+ ...overrides
490
+ })
381
491
  ))
382
492
 
383
493
  // =============================================================================
@@ -392,467 +502,898 @@ const getSystemMessageMode = (model: string): "system" | "developer" =>
392
502
  ? "developer"
393
503
  : "system"
394
504
 
395
- const prepareMessages: (
396
- options: LanguageModel.ProviderOptions,
397
- config: Config.Service
398
- ) => Effect.Effect<
399
- ReadonlyArray<typeof Generated.InputItem.Encoded>,
400
- AiError.AiError
401
- > = Effect.fnUntraced(function*(options, config) {
402
- const messages: Array<typeof Generated.InputItem.Encoded> = []
403
-
404
- for (const message of options.prompt.content) {
405
- switch (message.role) {
406
- case "system": {
407
- messages.push({
408
- role: getSystemMessageMode(config.model!),
409
- content: message.content
410
- })
411
- break
412
- }
505
+ const prepareMessages = Effect.fnUntraced(
506
+ function*<Tools extends ReadonlyArray<Tool.Any>>({
507
+ config,
508
+ options,
509
+ capabilities,
510
+ include,
511
+ toolNameMapper
512
+ }: {
513
+ readonly config: typeof Config.Service
514
+ readonly options: LanguageModel.ProviderOptions
515
+ readonly include: Set<typeof Generated.IncludeEnum.Encoded>
516
+ readonly capabilities: ModelCapabilities
517
+ readonly toolNameMapper: Tool.NameMapper<Tools>
518
+ }): Effect.fn.Return<ReadonlyArray<typeof Generated.InputItem.Encoded>, AiError.AiError> {
519
+ const processedApprovalIds = new Set<string>()
520
+
521
+ const hasConversation = Predicate.isNotNullish(config.conversation)
522
+
523
+ // Provider-Defined Tools
524
+ const applyPatchTool = options.tools.find((tool): tool is ReturnType<typeof OpenAiTool.ApplyPatch> =>
525
+ Tool.isProviderDefined(tool) && tool.name === "OpenAiApplyPatch"
526
+ )
527
+ const codeInterpreterTool = options.tools.find((tool): tool is ReturnType<typeof OpenAiTool.CodeInterpreter> =>
528
+ Tool.isProviderDefined(tool) && tool.name === "OpenAiCodeInterpreter"
529
+ )
530
+ const shellTool = options.tools.find((tool): tool is ReturnType<typeof OpenAiTool.Shell> =>
531
+ Tool.isProviderDefined(tool) && tool.name === "OpenAiFunctionShell"
532
+ )
533
+ const localShellTool = options.tools.find((tool): tool is ReturnType<typeof OpenAiTool.LocalShell> =>
534
+ Tool.isProviderDefined(tool) && tool.name === "OpenAiLocalShell"
535
+ )
536
+ const webSearchTool = options.tools.find((tool): tool is ReturnType<typeof OpenAiTool.WebSearch> =>
537
+ Tool.isProviderDefined(tool) && tool.name === "OpenAiWebSearch"
538
+ )
539
+ const webSearchPreviewTool = options.tools.find((tool): tool is ReturnType<typeof OpenAiTool.WebSearchPreview> =>
540
+ Tool.isProviderDefined(tool) && tool.name === "OpenAiWebSearchPreview"
541
+ )
413
542
 
414
- case "user": {
415
- const content: Array<typeof Generated.InputContent.Encoded> = []
543
+ // Handle Included Features
544
+ if (Predicate.isNotUndefined(config.top_logprobs)) {
545
+ include.add("message.output_text.logprobs")
546
+ }
547
+ if (config.store === false && capabilities.isReasoningModel) {
548
+ include.add("reasoning.encrypted_content")
549
+ }
550
+ if (Predicate.isNotUndefined(codeInterpreterTool)) {
551
+ include.add("code_interpreter_call.outputs")
552
+ }
553
+ if (Predicate.isNotUndefined(webSearchTool) || Predicate.isNotUndefined(webSearchPreviewTool)) {
554
+ include.add("web_search_call.action.sources")
555
+ }
416
556
 
417
- for (let index = 0; index < message.content.length; index++) {
418
- const part = message.content[index]
557
+ const messages: Array<typeof Generated.InputItem.Encoded> = []
419
558
 
420
- switch (part.type) {
421
- case "text": {
422
- content.push({ type: "input_text", text: part.text })
423
- break
424
- }
559
+ for (const message of options.prompt.content) {
560
+ switch (message.role) {
561
+ case "system": {
562
+ messages.push({
563
+ role: getSystemMessageMode(config.model!),
564
+ content: message.content
565
+ })
566
+ break
567
+ }
425
568
 
426
- case "file": {
427
- if (part.mediaType.startsWith("image/")) {
428
- const detail = getImageDetail(part)
429
- const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType
569
+ case "user": {
570
+ const content: Array<typeof Generated.InputContent.Encoded> = []
430
571
 
431
- if (typeof part.data === "string" && isFileId(part.data, config)) {
432
- content.push({ type: "input_image", file_id: part.data, detail })
433
- }
572
+ for (let index = 0; index < message.content.length; index++) {
573
+ const part = message.content[index]
434
574
 
435
- if (part.data instanceof URL) {
436
- content.push({ type: "input_image", image_url: part.data.toString(), detail })
437
- }
575
+ switch (part.type) {
576
+ case "text": {
577
+ content.push({ type: "input_text", text: part.text })
578
+ break
579
+ }
438
580
 
439
- if (part.data instanceof Uint8Array) {
440
- const base64 = Encoding.encodeBase64(part.data)
441
- const imageUrl = `data:${mediaType};base64,${base64}`
442
- content.push({ type: "input_image", image_url: imageUrl, detail })
443
- }
444
- } else if (part.mediaType === "application/pdf") {
445
- if (typeof part.data === "string" && isFileId(part.data, config)) {
446
- content.push({ type: "input_file", file_id: part.data })
447
- }
581
+ case "file": {
582
+ if (part.mediaType.startsWith("image/")) {
583
+ const detail = getImageDetail(part)
584
+ const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType
448
585
 
449
- if (part.data instanceof URL) {
450
- content.push({ type: "input_file", file_url: part.data.toString() })
451
- }
586
+ if (typeof part.data === "string" && isFileId(part.data, config)) {
587
+ content.push({ type: "input_image", file_id: part.data, detail })
588
+ }
589
+
590
+ if (part.data instanceof URL) {
591
+ content.push({ type: "input_image", image_url: part.data.toString(), detail })
592
+ }
593
+
594
+ if (part.data instanceof Uint8Array) {
595
+ const base64 = Base64.encode(part.data)
596
+ const imageUrl = `data:${mediaType};base64,${base64}`
597
+ content.push({ type: "input_image", image_url: imageUrl, detail })
598
+ }
599
+ } else if (part.mediaType === "application/pdf") {
600
+ if (typeof part.data === "string" && isFileId(part.data, config)) {
601
+ content.push({ type: "input_file", file_id: part.data })
602
+ }
452
603
 
453
- if (part.data instanceof Uint8Array) {
454
- const base64 = Encoding.encodeBase64(part.data)
455
- const fileName = part.fileName ?? `part-${index}.pdf`
456
- const fileData = `data:application/pdf;base64,${base64}`
457
- content.push({ type: "input_file", filename: fileName, file_data: fileData })
604
+ if (part.data instanceof URL) {
605
+ content.push({ type: "input_file", file_url: part.data.toString() })
606
+ }
607
+
608
+ if (part.data instanceof Uint8Array) {
609
+ const base64 = Base64.encode(part.data)
610
+ const fileName = part.fileName ?? `part-${index}.pdf`
611
+ const fileData = `data:application/pdf;base64,${base64}`
612
+ content.push({ type: "input_file", filename: fileName, file_data: fileData })
613
+ }
614
+ } else {
615
+ return yield* AiError.make({
616
+ module: "OpenAiLanguageModel",
617
+ method: "prepareMessages",
618
+ reason: new AiError.InvalidRequestError({
619
+ description: `Detected unsupported media type for file: '${part.mediaType}'`
620
+ })
621
+ })
458
622
  }
459
- } else {
460
- return yield* new AiError.MalformedInput({
461
- module: "OpenAiLanguageModel",
462
- method: "prepareMessages",
463
- description: `Detected unsupported media type for file: '${part.mediaType}'`
464
- })
465
623
  }
466
624
  }
467
625
  }
626
+
627
+ messages.push({ role: "user", content })
628
+
629
+ break
468
630
  }
469
631
 
470
- messages.push({ role: "user", content })
632
+ case "assistant": {
633
+ const reasoningMessages: Record<string, DeepMutable<typeof Generated.ReasoningItem.Encoded>> = {}
471
634
 
472
- break
473
- }
635
+ for (const part of message.content) {
636
+ switch (part.type) {
637
+ case "text": {
638
+ const id = getItemId(part)
474
639
 
475
- case "assistant": {
476
- const reasoningMessages: Record<string, DeepMutable<typeof Generated.ReasoningItem.Encoded>> = {}
640
+ // When in conversation mode, skip items that already exist in the
641
+ // conversation context to avoid "Duplicate item found" errors
642
+ if (hasConversation && Predicate.isNotNull(id)) {
643
+ break
644
+ }
477
645
 
478
- for (const part of message.content) {
479
- switch (part.type) {
480
- case "text": {
481
- messages.push({
482
- role: "assistant",
483
- content: [{ type: "output_text", text: part.text }],
484
- id: getItemId(part)
485
- })
486
- break
487
- }
646
+ if (config.store === true && Predicate.isNotNull(id)) {
647
+ messages.push({ type: "item_reference", id })
648
+ break
649
+ }
488
650
 
489
- case "reasoning": {
490
- const options = part.options.openai
651
+ messages.push({
652
+ id: id!,
653
+ type: "message",
654
+ role: "assistant",
655
+ status: part.options.openai?.status ?? "completed",
656
+ content: [{
657
+ type: "output_text",
658
+ text: part.text,
659
+ annotations: part.options.openai?.annotations ?? [],
660
+ logprobs: []
661
+ }]
662
+ })
663
+
664
+ break
665
+ }
491
666
 
492
- if (Predicate.isNotUndefined(options?.itemId)) {
493
- const reasoningMessage = reasoningMessages[options.itemId]
494
- const summaryParts: Mutable<typeof Generated.ReasoningItem.fields.summary.Encoded> = []
667
+ case "reasoning": {
668
+ const id = getItemId(part)
669
+ const encryptedContent = getEncryptedContent(part)
495
670
 
496
- if (part.text.length > 0) {
497
- summaryParts.push({ type: "summary_text", text: part.text })
671
+ if (hasConversation && Predicate.isNotNull(id)) {
672
+ break
498
673
  }
499
674
 
500
- if (Predicate.isUndefined(reasoningMessage)) {
501
- reasoningMessages[options.itemId] = {
502
- id: options.itemId,
503
- type: "reasoning",
504
- summary: summaryParts,
505
- encrypted_content: options.encryptedContent
506
- }
507
- messages.push(reasoningMessages[options.itemId])
508
- } else {
509
- for (const summaryPart of summaryParts) {
510
- reasoningMessage.summary.push(summaryPart)
675
+ if (Predicate.isNotNull(id)) {
676
+ const message = reasoningMessages[id]
677
+
678
+ if (config.store === true) {
679
+ // Use item references to refer to reasoning (single reference)
680
+ // when the first part is encountered
681
+ if (Predicate.isUndefined(message)) {
682
+ messages.push({ type: "item_reference", id })
683
+
684
+ // Store unused reasoning message to mark its id as used
685
+ reasoningMessages[id] = {
686
+ type: "reasoning",
687
+ id,
688
+ summary: []
689
+ }
690
+ }
691
+ } else {
692
+ const summaryParts: Array<typeof Generated.SummaryTextContent.Encoded> = []
693
+
694
+ if (part.text.length > 0) {
695
+ summaryParts.push({ type: "summary_text", text: part.text })
696
+ }
697
+
698
+ if (Predicate.isUndefined(message)) {
699
+ reasoningMessages[id] = {
700
+ type: "reasoning",
701
+ id,
702
+ summary: summaryParts,
703
+ encrypted_content: encryptedContent ?? null
704
+ }
705
+
706
+ messages.push(reasoningMessages[id])
707
+ } else {
708
+ message.summary.push(...summaryParts)
709
+
710
+ // Update encrypted content to enable setting it in the
711
+ // last summary part
712
+ if (Predicate.isNotNull(encryptedContent)) {
713
+ message.encrypted_content = encryptedContent
714
+ }
715
+ }
511
716
  }
512
717
  }
718
+
719
+ break
513
720
  }
514
721
 
515
- break
516
- }
722
+ case "tool-call": {
723
+ const id = getItemId(part)
724
+ const status = getStatus(part)
725
+
726
+ if (hasConversation && Predicate.isNotNull(id)) {
727
+ break
728
+ }
729
+
730
+ if (config.store && Predicate.isNotNull(id)) {
731
+ messages.push({ type: "item_reference", id })
732
+ break
733
+ }
734
+
735
+ if (part.providerExecuted) {
736
+ break
737
+ }
738
+
739
+ const toolName = toolNameMapper.getProviderName(part.name)
740
+
741
+ if (Predicate.isNotUndefined(localShellTool) && toolName === "local_shell") {
742
+ const params = yield* Schema.decodeUnknownEffect(localShellTool.parametersSchema)(part.params).pipe(
743
+ Effect.mapError((error) =>
744
+ AiError.make({
745
+ module: "OpenAiLanguageModel",
746
+ method: "prepareMessages",
747
+ reason: new AiError.ToolParameterValidationError({
748
+ toolName: "local_shell",
749
+ toolParams: part.params as Schema.Json,
750
+ description: error.message
751
+ })
752
+ })
753
+ )
754
+ )
755
+
756
+ messages.push({
757
+ id: id!,
758
+ type: "local_shell_call",
759
+ call_id: part.id,
760
+ status: status ?? "completed",
761
+ action: params.action
762
+ })
763
+
764
+ break
765
+ }
766
+
767
+ if (Predicate.isNotUndefined(shellTool) && toolName === "shell") {
768
+ const params = yield* Schema.decodeUnknownEffect(shellTool.parametersSchema)(part.params).pipe(
769
+ Effect.mapError((error) =>
770
+ AiError.make({
771
+ module: "OpenAiLanguageModel",
772
+ method: "prepareMessages",
773
+ reason: new AiError.ToolParameterValidationError({
774
+ toolName: "shell",
775
+ toolParams: part.params as Schema.Json,
776
+ description: error.message
777
+ })
778
+ })
779
+ )
780
+ )
781
+
782
+ messages.push({
783
+ id: id!,
784
+ type: "shell_call",
785
+ call_id: part.id,
786
+ status: status ?? "completed",
787
+ action: params.action
788
+ })
789
+
790
+ break
791
+ }
517
792
 
518
- case "tool-call": {
519
- if (!part.providerExecuted) {
520
793
  messages.push({
521
- id: getItemId(part),
522
794
  type: "function_call",
795
+ name: toolName,
523
796
  call_id: part.id,
524
- name: part.name,
525
- arguments: JSON.stringify(part.params)
797
+ arguments: JSON.stringify(part.params),
798
+ ...(Predicate.isNotNull(id) ? { id } : {}),
799
+ ...(Predicate.isNotNull(status) ? { status } : {})
526
800
  })
801
+
802
+ break
527
803
  }
528
804
 
529
- break
805
+ // Assistant tool-result parts are always provider executed
806
+ case "tool-result": {
807
+ // Skip execution denied results - these have no corresponding
808
+ // item in OpenAI's store
809
+ if (
810
+ Predicate.hasProperty(part.result, "type") &&
811
+ part.result.type === "execution-denied"
812
+ ) {
813
+ break
814
+ }
815
+
816
+ if (hasConversation) {
817
+ break
818
+ }
819
+
820
+ if (config.store === true) {
821
+ const id = getItemId(part) ?? part.id
822
+ messages.push({ type: "item_reference", id })
823
+ }
824
+ }
530
825
  }
531
826
  }
827
+
828
+ break
532
829
  }
533
830
 
534
- break
535
- }
831
+ case "tool": {
832
+ for (const part of message.content) {
833
+ if (part.type === "tool-approval-response") {
834
+ if (processedApprovalIds.has(part.approvalId)) {
835
+ continue
836
+ }
536
837
 
537
- case "tool": {
538
- for (const part of message.content) {
539
- messages.push({
540
- type: "function_call_output",
541
- call_id: part.id,
542
- output: JSON.stringify(part.result)
543
- })
544
- }
838
+ processedApprovalIds.add(part.approvalId)
839
+
840
+ if (config.store === true) {
841
+ messages.push({ type: "item_reference", id: part.approvalId })
842
+ }
843
+
844
+ messages.push({
845
+ type: "mcp_approval_response",
846
+ approval_request_id: part.approvalId,
847
+ approve: part.approved
848
+ } as any)
849
+
850
+ continue
851
+ }
852
+
853
+ // Skip execution-denied results that already have an approvalId -
854
+ // this indicates that the part was already handled via tool-approval-response
855
+ if (
856
+ Predicate.hasProperty(part.result, "type") &&
857
+ part.result.type === "execution-denied"
858
+ ) {
859
+ if (Predicate.isNotNullish(part.options.openai?.approvalId)) {
860
+ continue
861
+ }
862
+ }
863
+
864
+ const id = getItemId(part) ?? part.id
865
+ const status = getStatus(part)
866
+ const toolName = toolNameMapper.getProviderName(part.name)
867
+
868
+ if (Predicate.isNotUndefined(applyPatchTool) && toolName === "apply_patch") {
869
+ messages.push({
870
+ id,
871
+ type: "apply_patch_call_output",
872
+ call_id: part.id,
873
+ ...(part.result as any)
874
+ })
875
+ }
876
+
877
+ if (Predicate.isNotUndefined(shellTool) && toolName === "shell") {
878
+ messages.push({
879
+ id,
880
+ type: "shell_call_output",
881
+ call_id: part.id,
882
+ output: part.result as any,
883
+ ...(Predicate.isNotNull(status) ? { status } : {})
884
+ })
885
+ }
886
+
887
+ if (Predicate.isNotUndefined(localShellTool) && toolName === "local_shell") {
888
+ messages.push({
889
+ id,
890
+ type: "local_shell_call_output",
891
+ call_id: part.id,
892
+ output: part.result as any,
893
+ ...(Predicate.isNotNull(status) ? { status } : {})
894
+ })
895
+ }
896
+
897
+ messages.push({
898
+ type: "function_call_output",
899
+ call_id: part.id,
900
+ output: JSON.stringify(part.result),
901
+ ...(Predicate.isNotNull(status) ? { status } : {})
902
+ })
903
+ }
545
904
 
546
- break
905
+ break
906
+ }
547
907
  }
548
908
  }
909
+
910
+ return messages
549
911
  }
912
+ )
913
+
914
+ // =============================================================================
915
+ // HTTP Details
916
+ // =============================================================================
917
+
918
+ const buildHttpRequestDetails = (
919
+ request: HttpClientRequest.HttpClientRequest
920
+ ): typeof Response.HttpRequestDetails.Type => ({
921
+ method: request.method,
922
+ url: request.url,
923
+ urlParams: Array.from(request.urlParams),
924
+ hash: request.hash,
925
+ headers: Redactable.redact(request.headers) as Record<string, string>
926
+ })
550
927
 
551
- return messages
928
+ const buildHttpResponseDetails = (
929
+ response: HttpClientResponse.HttpClientResponse
930
+ ): typeof Response.HttpResponseDetails.Type => ({
931
+ status: response.status,
932
+ headers: Redactable.redact(response.headers) as Record<string, string>
552
933
  })
553
934
 
554
935
  // =============================================================================
555
936
  // Response Conversion
556
937
  // =============================================================================
557
938
 
558
- const makeResponse: (
559
- response: Generated.Response,
560
- options: LanguageModel.ProviderOptions
561
- ) => Effect.Effect<
562
- Array<Response.PartEncoded>,
563
- AiError.AiError,
564
- IdGenerator.IdGenerator
565
- > = Effect.fnUntraced(
566
- function*(response, options) {
939
+ type ResponseStreamEvent = typeof Generated.ResponseStreamEvent.Type
940
+
941
+ const makeResponse = Effect.fnUntraced(
942
+ function*<Tools extends ReadonlyArray<Tool.Any>>({
943
+ options,
944
+ rawResponse,
945
+ response,
946
+ toolNameMapper
947
+ }: {
948
+ readonly options: LanguageModel.ProviderOptions
949
+ readonly rawResponse: Generated.Response
950
+ readonly response: HttpClientResponse.HttpClientResponse
951
+ readonly toolNameMapper: Tool.NameMapper<Tools>
952
+ }): Effect.fn.Return<
953
+ Array<Response.PartEncoded>,
954
+ AiError.AiError,
955
+ IdGenerator.IdGenerator
956
+ > {
567
957
  const idGenerator = yield* IdGenerator.IdGenerator
568
958
 
959
+ const approvalRequests = getApprovalRequestIdMapping(options.prompt)
960
+
569
961
  const webSearchTool = options.tools.find((tool) =>
570
962
  Tool.isProviderDefined(tool) &&
571
- (tool.id === "openai.web_search" ||
572
- tool.id === "openai.web_search_preview")
963
+ (tool.name === "OpenAiWebSearch" ||
964
+ tool.name === "OpenAiWebSearchPreview")
573
965
  ) as Tool.AnyProviderDefined | undefined
574
966
 
575
967
  let hasToolCalls = false
576
968
  const parts: Array<Response.PartEncoded> = []
577
969
 
578
- const createdAt = new Date(response.created_at * 1000)
970
+ const createdAt = new Date(rawResponse.created_at * 1000)
579
971
  parts.push({
580
972
  type: "response-metadata",
581
- id: response.id,
582
- modelId: response.model,
583
- timestamp: DateTime.formatIso(DateTime.unsafeFromDate(createdAt))
973
+ id: rawResponse.id,
974
+ modelId: rawResponse.model as string,
975
+ timestamp: DateTime.formatIso(DateTime.fromDateUnsafe(createdAt)),
976
+ request: buildHttpRequestDetails(response.request)
584
977
  })
585
978
 
586
- for (const part of response.output) {
979
+ for (const part of rawResponse.output) {
587
980
  switch (part.type) {
588
- case "message": {
589
- for (const contentPart of part.content) {
590
- switch (contentPart.type) {
591
- case "output_text": {
592
- parts.push({
593
- type: "text",
594
- text: contentPart.text,
595
- metadata: { openai: { itemId: part.id } }
596
- })
981
+ case "apply_patch_call": {
982
+ const toolName = toolNameMapper.getCustomName("apply_patch")
983
+ parts.push({
984
+ type: "tool-call",
985
+ id: part.call_id,
986
+ name: toolName,
987
+ params: { call_id: part.call_id, operation: part.operation },
988
+ metadata: { openai: { ...makeItemIdMetadata(part.id) } }
989
+ })
990
+ break
991
+ }
597
992
 
598
- for (const annotation of contentPart.annotations) {
599
- if (annotation.type === "file_citation") {
600
- const metadata = {
601
- type: annotation.type,
602
- index: annotation.index
603
- }
604
-
605
- parts.push({
606
- type: "source",
607
- sourceType: "document",
608
- id: yield* idGenerator.generateId(),
609
- mediaType: "text/plain",
610
- title: annotation.filename ?? "Untitled Document",
611
- metadata: { openai: metadata }
612
- })
613
- }
614
-
615
- if (annotation.type === "url_citation") {
616
- const metadata = {
617
- type: annotation.type,
618
- startIndex: annotation.start_index,
619
- endIndex: annotation.end_index
620
- }
621
-
622
- parts.push({
623
- type: "source",
624
- sourceType: "url",
625
- id: yield* idGenerator.generateId(),
626
- url: annotation.url,
627
- title: annotation.title,
628
- metadata: { openai: metadata }
629
- })
630
- }
631
- }
632
-
633
- break
634
- }
635
- case "refusal": {
636
- parts.push({
637
- type: "text",
638
- text: "",
639
- metadata: { openai: { refusal: contentPart.refusal } }
640
- })
641
-
642
- break
643
- }
644
- }
645
- }
993
+ case "code_interpreter_call": {
994
+ const toolName = toolNameMapper.getCustomName("code_interpreter")
995
+ parts.push({
996
+ type: "tool-call",
997
+ id: part.id,
998
+ name: toolName,
999
+ params: { code: part.code, container_id: part.container_id },
1000
+ providerExecuted: true
1001
+ })
1002
+ parts.push({
1003
+ type: "tool-result",
1004
+ id: part.id,
1005
+ name: toolName,
1006
+ isFailure: false,
1007
+ result: { outputs: part.outputs },
1008
+ providerExecuted: true
1009
+ })
1010
+ break
1011
+ }
646
1012
 
1013
+ case "file_search_call": {
1014
+ const toolName = toolNameMapper.getCustomName("file_search")
1015
+ parts.push({
1016
+ type: "tool-call",
1017
+ id: part.id,
1018
+ name: toolName,
1019
+ params: {},
1020
+ providerExecuted: true
1021
+ })
1022
+ parts.push({
1023
+ type: "tool-result",
1024
+ id: part.id,
1025
+ name: toolName,
1026
+ isFailure: false,
1027
+ result: {
1028
+ status: part.status,
1029
+ queries: part.queries,
1030
+ results: part.results ?? null
1031
+ },
1032
+ providerExecuted: true
1033
+ })
647
1034
  break
648
1035
  }
649
1036
 
650
1037
  case "function_call": {
651
1038
  hasToolCalls = true
652
-
653
1039
  const toolName = part.name
654
1040
  const toolParams = part.arguments
655
-
656
1041
  const params = yield* Effect.try({
657
1042
  try: () => Tool.unsafeSecureJsonParse(toolParams),
658
1043
  catch: (cause) =>
659
- new AiError.MalformedOutput({
1044
+ AiError.make({
660
1045
  module: "OpenAiLanguageModel",
661
1046
  method: "makeResponse",
662
- description: "Failed to securely parse tool call parameters " +
663
- `for tool '${toolName}':\nParameters: ${toolParams}`,
664
- cause
1047
+ reason: new AiError.ToolParameterValidationError({
1048
+ toolName,
1049
+ toolParams: {},
1050
+ description: `Faled to securely JSON parse tool parameters: ${cause}`
1051
+ })
665
1052
  })
666
1053
  })
667
-
668
1054
  parts.push({
669
1055
  type: "tool-call",
670
1056
  id: part.call_id,
671
1057
  name: toolName,
672
1058
  params,
673
- metadata: { openai: { itemId: part.id } }
1059
+ metadata: { openai: { ...makeItemIdMetadata(part.id) } }
674
1060
  })
675
-
676
1061
  break
677
1062
  }
678
1063
 
679
- case "code_interpreter_call": {
1064
+ case "image_generation_call": {
1065
+ const toolName = toolNameMapper.getCustomName("image_generation")
680
1066
  parts.push({
681
1067
  type: "tool-call",
682
1068
  id: part.id,
683
- name: "OpenAiCodeInterpreter",
684
- params: { code: part.code, container_id: part.container_id },
685
- providerName: "code_interpreter",
1069
+ name: toolName,
1070
+ params: {},
686
1071
  providerExecuted: true
687
1072
  })
688
-
689
1073
  parts.push({
690
1074
  type: "tool-result",
691
1075
  id: part.id,
692
- name: "OpenAiCodeInterpreter",
1076
+ name: toolName,
693
1077
  isFailure: false,
694
- result: part.outputs,
695
- providerName: "code_interpreter",
696
- providerExecuted: true
1078
+ result: { result: part.result }
697
1079
  })
1080
+ break
1081
+ }
698
1082
 
1083
+ case "local_shell_call": {
1084
+ const toolName = toolNameMapper.getCustomName("local_shell")
1085
+ parts.push({
1086
+ type: "tool-call",
1087
+ id: part.call_id,
1088
+ name: toolName,
1089
+ params: { action: part.action },
1090
+ metadata: { openai: { ...makeItemIdMetadata(part.id) } }
1091
+ })
699
1092
  break
700
1093
  }
701
1094
 
702
- case "file_search_call": {
1095
+ case "mcp_call": {
1096
+ const toolId = Predicate.isNotNullish(part.approval_request_id)
1097
+ ? (approvalRequests.get(part.approval_request_id) ?? part.id)
1098
+ : part.id
1099
+
1100
+ const toolName = `mcp.${part.name}`
1101
+
703
1102
  parts.push({
704
1103
  type: "tool-call",
705
- id: part.id,
706
- name: "OpenAiFileSearch",
707
- params: {},
708
- providerName: "file_search",
1104
+ id: toolId,
1105
+ name: toolName,
1106
+ params: part.arguments,
709
1107
  providerExecuted: true
710
1108
  })
711
1109
 
712
1110
  parts.push({
713
1111
  type: "tool-result",
714
- id: part.id,
715
- name: "OpenAiFileSearch",
1112
+ id: toolId,
1113
+ name: toolName,
716
1114
  isFailure: false,
1115
+ providerExecuted: true,
717
1116
  result: {
718
- status: part.status,
719
- queries: part.queries,
720
- ...(part.results && { results: part.results })
1117
+ type: "call",
1118
+ name: part.name,
1119
+ arguments: part.arguments,
1120
+ server_label: part.server_label,
1121
+ ...(Predicate.isNotNullish(part.output) ? { output: part.output } : undefined),
1122
+ ...(Predicate.isNotNullish(part.error) ? { error: part.error } : undefined)
721
1123
  },
722
- providerName: "file_search",
723
- providerExecuted: true
1124
+ metadata: { openai: { ...makeItemIdMetadata(part.id) } }
724
1125
  })
725
1126
 
726
1127
  break
727
1128
  }
728
1129
 
729
- case "web_search_call": {
1130
+ case "mcp_list_tools": {
1131
+ // Skip
1132
+ break
1133
+ }
1134
+
1135
+ case "mcp_approval_request": {
1136
+ const approvalRequestId = (part as any).approval_request_id ?? part.id
1137
+ const toolId = yield* idGenerator.generateId()
1138
+ const toolName = `mcp.${part.name}`
1139
+
1140
+ const params = yield* Effect.try({
1141
+ try: () => Tool.unsafeSecureJsonParse(part.arguments),
1142
+ catch: (cause) =>
1143
+ AiError.make({
1144
+ module: "OpenAiLanguageModel",
1145
+ method: "makeResponse",
1146
+ reason: new AiError.ToolParameterValidationError({
1147
+ toolName,
1148
+ toolParams: {},
1149
+ description: `Failed securely JSON parse tool parameters: ${cause}`
1150
+ })
1151
+ })
1152
+ })
1153
+
730
1154
  parts.push({
731
1155
  type: "tool-call",
732
- id: part.id,
733
- name: webSearchTool?.name ?? "OpenAiWebSearch",
734
- params: { action: part.action },
735
- providerName: webSearchTool?.providerName ?? "web_search",
1156
+ id: toolId,
1157
+ name: toolName,
1158
+ params,
736
1159
  providerExecuted: true
737
1160
  })
738
1161
 
739
1162
  parts.push({
740
- type: "tool-result",
741
- id: part.id,
742
- name: webSearchTool?.name ?? "OpenAiWebSearch",
743
- isFailure: false,
744
- result: { status: part.status },
745
- providerName: webSearchTool?.providerName ?? "web_search",
746
- providerExecuted: true
1163
+ type: "tool-approval-request",
1164
+ toolCallId: toolId,
1165
+ approvalId: approvalRequestId
747
1166
  })
748
1167
 
749
1168
  break
750
1169
  }
751
1170
 
752
- // TODO(Max): support computer use
753
- // case "computer_call": {
754
- // parts.push({
755
- // type: "tool-call",
756
- // id: part.id,
757
- // name: "OpenAiComputerUse",
758
- // params: { action: part.action },
759
- // providerName: webSearchTool?.providerName ?? "web_search",
760
- // providerExecuted: true
761
- // })
762
- //
763
- // parts.push({
764
- // type: "tool-result",
765
- // id: part.id,
766
- // name: webSearchTool?.name ?? "OpenAiWebSearch",
767
- // result: { status: part.status },
768
- // providerName: webSearchTool?.providerName ?? "web_search",
769
- // providerExecuted: true
770
- // })
771
- // break
772
- // }
1171
+ case "message": {
1172
+ for (const contentPart of part.content) {
1173
+ switch (contentPart.type) {
1174
+ case "output_text": {
1175
+ const annotations = contentPart.annotations.length > 0
1176
+ ? { annotations: contentPart.annotations as any }
1177
+ : undefined
1178
+
1179
+ parts.push({
1180
+ type: "text",
1181
+ text: contentPart.text,
1182
+ metadata: {
1183
+ openai: {
1184
+ ...makeItemIdMetadata(part.id),
1185
+ ...annotations
1186
+ }
1187
+ }
1188
+ })
1189
+ for (const annotation of contentPart.annotations) {
1190
+ if (annotation.type === "container_file_citation") {
1191
+ parts.push({
1192
+ type: "source",
1193
+ sourceType: "document",
1194
+ id: yield* idGenerator.generateId(),
1195
+ mediaType: "text/plain",
1196
+ title: annotation.filename,
1197
+ fileName: annotation.filename,
1198
+ metadata: {
1199
+ openai: {
1200
+ type: annotation.type,
1201
+ fileId: annotation.file_id,
1202
+ containerId: annotation.container_id
1203
+ }
1204
+ }
1205
+ })
1206
+ }
1207
+ if (annotation.type === "file_citation") {
1208
+ parts.push({
1209
+ type: "source",
1210
+ sourceType: "document",
1211
+ id: yield* idGenerator.generateId(),
1212
+ mediaType: "text/plain",
1213
+ title: annotation.filename,
1214
+ fileName: annotation.filename,
1215
+ metadata: {
1216
+ openai: {
1217
+ type: annotation.type,
1218
+ fileId: annotation.file_id,
1219
+ index: annotation.index
1220
+ }
1221
+ }
1222
+ })
1223
+ }
1224
+ if (annotation.type === "file_path") {
1225
+ parts.push({
1226
+ type: "source",
1227
+ sourceType: "document",
1228
+ id: yield* idGenerator.generateId(),
1229
+ mediaType: "application/octet-stream",
1230
+ title: annotation.file_id,
1231
+ fileName: annotation.file_id,
1232
+ metadata: {
1233
+ openai: {
1234
+ type: annotation.type,
1235
+ fileId: annotation.file_id,
1236
+ index: annotation.index
1237
+ }
1238
+ }
1239
+ })
1240
+ }
1241
+ if (annotation.type === "url_citation") {
1242
+ parts.push({
1243
+ type: "source",
1244
+ sourceType: "url",
1245
+ id: yield* idGenerator.generateId(),
1246
+ url: annotation.url,
1247
+ title: annotation.title,
1248
+ metadata: {
1249
+ openai: {
1250
+ type: annotation.type,
1251
+ startIndex: annotation.start_index,
1252
+ endIndex: annotation.end_index
1253
+ }
1254
+ }
1255
+ })
1256
+ }
1257
+ }
1258
+ break
1259
+ }
1260
+ case "refusal": {
1261
+ parts.push({
1262
+ type: "text",
1263
+ text: "",
1264
+ metadata: { openai: { refusal: contentPart.refusal } }
1265
+ })
1266
+ break
1267
+ }
1268
+ }
1269
+ }
1270
+ break
1271
+ }
773
1272
 
774
1273
  case "reasoning": {
1274
+ const metadata = {
1275
+ openai: {
1276
+ ...makeItemIdMetadata(part.id),
1277
+ ...makeEncryptedContentMetadata(part.encrypted_content)
1278
+ }
1279
+ }
775
1280
  // If there are no summary parts, we have to add an empty one to
776
- // propagate the part identifier
1281
+ // propagate the part identifier and encrypted content
777
1282
  if (part.summary.length === 0) {
778
- parts.push({
779
- type: "reasoning",
780
- text: "",
781
- metadata: { openai: { itemId: part.id } }
782
- })
1283
+ parts.push({ type: "reasoning", text: "", metadata })
783
1284
  } else {
784
1285
  for (const summary of part.summary) {
785
- const metadata = {
786
- itemId: part.id,
787
- encryptedContent: part.encrypted_content ?? undefined
788
- }
789
- parts.push({
790
- type: "reasoning",
791
- text: summary.text,
792
- metadata: { openai: metadata }
793
- })
1286
+ parts.push({ type: "reasoning", text: summary.text, metadata })
794
1287
  }
795
1288
  }
1289
+ break
1290
+ }
796
1291
 
1292
+ case "shell_call": {
1293
+ const toolName = toolNameMapper.getCustomName("shell")
1294
+ parts.push({
1295
+ type: "tool-call",
1296
+ id: part.call_id,
1297
+ name: toolName,
1298
+ params: { action: part.action },
1299
+ metadata: { openai: { ...makeItemIdMetadata(part.id) } }
1300
+ })
1301
+ break
1302
+ }
1303
+
1304
+ case "web_search_call": {
1305
+ const toolName = toolNameMapper.getCustomName(
1306
+ webSearchTool?.name ?? "web_search"
1307
+ )
1308
+ parts.push({
1309
+ type: "tool-call",
1310
+ id: part.id,
1311
+ name: toolName,
1312
+ params: {},
1313
+ providerExecuted: true
1314
+ })
1315
+ parts.push({
1316
+ type: "tool-result",
1317
+ id: part.id,
1318
+ name: toolName,
1319
+ isFailure: false,
1320
+ result: { action: part.action, status: part.status },
1321
+ providerExecuted: true
1322
+ })
797
1323
  break
798
1324
  }
799
1325
  }
800
1326
  }
801
1327
 
802
1328
  const finishReason = InternalUtilities.resolveFinishReason(
803
- response.incomplete_details?.reason,
1329
+ rawResponse.incomplete_details?.reason,
804
1330
  hasToolCalls
805
1331
  )
806
1332
 
807
- const metadata = {
808
- serviceTier: response.service_tier
809
- }
810
-
811
1333
  parts.push({
812
1334
  type: "finish",
813
1335
  reason: finishReason,
814
- usage: {
815
- inputTokens: response.usage?.input_tokens,
816
- outputTokens: response.usage?.output_tokens,
817
- totalTokens: (response.usage?.input_tokens ?? 0) + (response.usage?.output_tokens ?? 0),
818
- reasoningTokens: response.usage?.output_tokens_details?.reasoning_tokens,
819
- cachedInputTokens: response.usage?.input_tokens_details?.cached_tokens
820
- },
821
- metadata: { openai: metadata }
1336
+ usage: getUsage(rawResponse.usage),
1337
+ response: buildHttpResponseDetails(response),
1338
+ ...(rawResponse.service_tier && { metadata: { openai: { serviceTier: rawResponse.service_tier } } })
822
1339
  })
823
1340
 
824
1341
  return parts
825
1342
  }
826
1343
  )
827
1344
 
828
- const makeStreamResponse: (
829
- stream: Stream.Stream<ResponseStreamEvent, AiError.AiError>,
830
- options: LanguageModel.ProviderOptions
831
- ) => Effect.Effect<
832
- Stream.Stream<Response.StreamPartEncoded, AiError.AiError>,
833
- never,
834
- IdGenerator.IdGenerator
835
- > = Effect.fnUntraced(
836
- function*(stream, options) {
1345
+ const makeStreamResponse = Effect.fnUntraced(
1346
+ function*<Tools extends ReadonlyArray<Tool.Any>>({
1347
+ stream,
1348
+ response,
1349
+ config,
1350
+ options,
1351
+ toolNameMapper
1352
+ }: {
1353
+ readonly config: typeof Config.Service
1354
+ readonly stream: Stream.Stream<ResponseStreamEvent, AiError.AiError>
1355
+ readonly response: HttpClientResponse.HttpClientResponse
1356
+ readonly options: LanguageModel.ProviderOptions
1357
+ readonly toolNameMapper: Tool.NameMapper<Tools>
1358
+ }): Effect.fn.Return<
1359
+ Stream.Stream<Response.StreamPartEncoded, AiError.AiError>,
1360
+ AiError.AiError,
1361
+ IdGenerator.IdGenerator
1362
+ > {
837
1363
  const idGenerator = yield* IdGenerator.IdGenerator
838
1364
 
1365
+ const approvalRequests = getApprovalRequestIdMapping(options.prompt)
1366
+ const streamApprovalRequests = new Map<string, string>()
1367
+
839
1368
  let hasToolCalls = false
840
1369
 
1370
+ // Track annotations for current message to include in text-end metadata
1371
+ const activeAnnotations: Array<typeof Generated.Annotation.Encoded> = []
1372
+
1373
+ // Track active reasoning items with state machine for proper concluding logic
841
1374
  const activeReasoning: Record<string, {
842
- readonly summaryParts: Array<number>
843
1375
  readonly encryptedContent: string | undefined
1376
+ readonly summaryParts: Record<number, "active" | "can-conclude" | "concluded">
844
1377
  }> = {}
845
1378
 
1379
+ // Track active tool calls with optional provider-specific state
846
1380
  const activeToolCalls: Record<number, {
847
1381
  readonly id: string
848
1382
  readonly name: string
1383
+ readonly applyPatch?: {
1384
+ hasDiff: boolean
1385
+ endEmitted: boolean
1386
+ }
1387
+ readonly codeInterpreter?: {
1388
+ readonly containerId: string
1389
+ }
849
1390
  }> = {}
850
1391
 
851
1392
  const webSearchTool = options.tools.find((tool) =>
852
1393
  Tool.isProviderDefined(tool) &&
853
- (tool.id === "openai.web_search" ||
854
- tool.id === "openai.web_search_preview")
855
- ) as Tool.AnyProviderDefined | undefined
1394
+ (tool.name === "OpenAiWebSearch" ||
1395
+ tool.name === "OpenAiWebSearchPreview")
1396
+ ) as ReturnType<typeof OpenAiTool.WebSearch> | ReturnType<typeof OpenAiTool.WebSearchPreview> | undefined
856
1397
 
857
1398
  return stream.pipe(
858
1399
  Stream.mapEffect(Effect.fnUntraced(function*(event) {
@@ -865,7 +1406,8 @@ const makeStreamResponse: (
865
1406
  type: "response-metadata",
866
1407
  id: event.response.id,
867
1408
  modelId: event.response.model,
868
- timestamp: DateTime.formatIso(DateTime.unsafeFromDate(createdAt))
1409
+ timestamp: DateTime.formatIso(DateTime.fromDateUnsafe(createdAt)),
1410
+ request: buildHttpRequestDetails(response.request)
869
1411
  })
870
1412
  break
871
1413
  }
@@ -884,35 +1426,101 @@ const makeStreamResponse: (
884
1426
  event.response.incomplete_details?.reason,
885
1427
  hasToolCalls
886
1428
  ),
887
- usage: {
888
- inputTokens: event.response.usage?.input_tokens,
889
- outputTokens: event.response.usage?.output_tokens,
890
- totalTokens: (event.response.usage?.input_tokens ?? 0) + (event.response.usage?.output_tokens ?? 0),
891
- reasoningTokens: event.response.usage?.output_tokens_details?.reasoning_tokens,
892
- cachedInputTokens: event.response.usage?.input_tokens_details?.cached_tokens
893
- },
894
- metadata: { openai: { serviceTier: event.response.service_tier } }
1429
+ usage: getUsage(event.response.usage),
1430
+ response: buildHttpResponseDetails(response),
1431
+ ...(event.response.service_tier && { metadata: { openai: { serviceTier: event.response.service_tier } } })
895
1432
  })
896
1433
  break
897
1434
  }
898
1435
 
899
1436
  case "response.output_item.added": {
900
1437
  switch (event.item.type) {
901
- case "computer_call": {
902
- // TODO(Max): support computer use
1438
+ case "apply_patch_call": {
1439
+ const toolId = event.item.call_id
1440
+ const toolName = toolNameMapper.getCustomName("apply_patch")
1441
+ const operation = event.item.operation
1442
+ activeToolCalls[event.output_index] = {
1443
+ id: toolId,
1444
+ name: toolName,
1445
+ applyPatch: {
1446
+ hasDiff: operation.type !== "delete_file",
1447
+ endEmitted: operation.type === "delete_file"
1448
+ }
1449
+ }
1450
+ parts.push({
1451
+ type: "tool-params-start",
1452
+ id: toolId,
1453
+ name: toolName
1454
+ })
1455
+
1456
+ if (operation.type === "delete_file") {
1457
+ parts.push({
1458
+ type: "tool-params-delta",
1459
+ id: toolId,
1460
+ delta: JSON.stringify({
1461
+ call_id: toolId,
1462
+ operation: operation
1463
+ })
1464
+ })
1465
+ parts.push({
1466
+ type: "tool-params-end",
1467
+ id: toolId
1468
+ })
1469
+ } else {
1470
+ parts.push({
1471
+ type: "tool-params-delta",
1472
+ id: toolId,
1473
+ delta: `{"call_id":"${InternalUtilities.escapeJSONDelta(toolId)}",` +
1474
+ `"operation":{"type":"${InternalUtilities.escapeJSONDelta(operation.type)}",` +
1475
+ `"path":"${InternalUtilities.escapeJSONDelta(operation.path)}","diff":"`
1476
+ })
1477
+ }
903
1478
  break
904
1479
  }
905
1480
 
906
- case "file_search_call": {
1481
+ case "code_interpreter_call": {
1482
+ const toolName = toolNameMapper.getCustomName("code_interpreter")
1483
+ activeToolCalls[event.output_index] = {
1484
+ id: event.item.id,
1485
+ name: toolName,
1486
+ codeInterpreter: { containerId: event.item.container_id }
1487
+ }
1488
+ parts.push({
1489
+ type: "tool-params-start",
1490
+ id: event.item.id,
1491
+ name: toolName,
1492
+ providerExecuted: true
1493
+ })
1494
+ parts.push({
1495
+ type: "tool-params-delta",
1496
+ id: event.item.id,
1497
+ delta: `{"containerId":"${event.item.container_id}","code":"`
1498
+ })
1499
+ break
1500
+ }
1501
+
1502
+ case "computer_call": {
1503
+ const toolName = toolNameMapper.getCustomName("computer_use")
907
1504
  activeToolCalls[event.output_index] = {
908
1505
  id: event.item.id,
909
- name: "OpenAiFileSearch"
1506
+ name: toolName
910
1507
  }
911
1508
  parts.push({
912
1509
  type: "tool-params-start",
913
1510
  id: event.item.id,
914
- name: "OpenAiFileSearch",
915
- providerName: "file_search",
1511
+ name: toolName,
1512
+ providerExecuted: true
1513
+ })
1514
+ break
1515
+ }
1516
+
1517
+ case "file_search_call": {
1518
+ const toolName = toolNameMapper.getCustomName("file_search")
1519
+ parts.push({
1520
+ type: "tool-call",
1521
+ id: event.item.id,
1522
+ name: toolName,
1523
+ params: {},
916
1524
  providerExecuted: true
917
1525
  })
918
1526
  break
@@ -931,43 +1539,89 @@ const makeStreamResponse: (
931
1539
  break
932
1540
  }
933
1541
 
1542
+ case "image_generation_call": {
1543
+ const toolName = toolNameMapper.getCustomName("image_generation")
1544
+ parts.push({
1545
+ type: "tool-call",
1546
+ id: event.item.id,
1547
+ name: toolName,
1548
+ params: {},
1549
+ providerExecuted: true
1550
+ })
1551
+ break
1552
+ }
1553
+
1554
+ case "mcp_call":
1555
+ case "mcp_list_tools":
1556
+ case "mcp_approval_request": {
1557
+ // We emit MCP tool call / approvals on `output_item.done` to facilitate:
1558
+ // - Aliasing tool call identifiers when an approval request id exists
1559
+ // - Emit a proper tool-approval-request part for MCP approvals
1560
+ break
1561
+ }
1562
+
934
1563
  case "message": {
1564
+ // Clear annotations for new message
1565
+ activeAnnotations.length = 0
935
1566
  parts.push({
936
1567
  type: "text-start",
937
1568
  id: event.item.id,
938
- metadata: { openai: { itemId: event.item.id } }
1569
+ metadata: { openai: { ...makeItemIdMetadata(event.item.id) } }
939
1570
  })
940
1571
  break
941
1572
  }
942
1573
 
943
1574
  case "reasoning": {
1575
+ const encryptedContent = event.item.encrypted_content ?? undefined
944
1576
  activeReasoning[event.item.id] = {
945
- summaryParts: [0],
946
- encryptedContent: event.item.encrypted_content
1577
+ encryptedContent,
1578
+ summaryParts: { 0: "active" }
947
1579
  }
948
1580
  parts.push({
949
1581
  type: "reasoning-start",
950
1582
  id: `${event.item.id}:0`,
951
1583
  metadata: {
952
1584
  openai: {
953
- itemId: event.item.id,
954
- encryptedContent: event.item.encrypted_content
1585
+ ...makeItemIdMetadata(event.item.id),
1586
+ ...makeEncryptedContentMetadata(event.item.encrypted_content)
955
1587
  }
956
1588
  }
957
1589
  })
958
1590
  break
959
1591
  }
960
1592
 
1593
+ case "shell_call": {
1594
+ const toolName = toolNameMapper.getCustomName("shell")
1595
+ activeToolCalls[event.output_index] = {
1596
+ id: event.item.id,
1597
+ name: toolName
1598
+ }
1599
+ break
1600
+ }
1601
+
961
1602
  case "web_search_call": {
1603
+ const toolName = toolNameMapper.getCustomName(
1604
+ webSearchTool?.providerName ?? "web_search"
1605
+ )
962
1606
  activeToolCalls[event.output_index] = {
963
1607
  id: event.item.id,
964
- name: webSearchTool?.name ?? "OpenAiWebSearch"
1608
+ name: toolName
965
1609
  }
966
1610
  parts.push({
967
1611
  type: "tool-params-start",
968
1612
  id: event.item.id,
969
1613
  name: webSearchTool?.name ?? "OpenAiWebSearch",
970
- providerName: webSearchTool?.providerName ?? "web_search",
1614
+ providerExecuted: true
1615
+ })
1616
+ parts.push({
1617
+ type: "tool-params-end",
1618
+ id: event.item.id
1619
+ })
1620
+ parts.push({
1621
+ type: "tool-call",
1622
+ id: event.item.id,
1623
+ name: toolName,
1624
+ params: {},
971
1625
  providerExecuted: true
972
1626
  })
973
1627
  break
@@ -979,34 +1633,63 @@ const makeStreamResponse: (
979
1633
 
980
1634
  case "response.output_item.done": {
981
1635
  switch (event.item.type) {
982
- case "code_interpreter_call": {
983
- parts.push({
984
- type: "tool-call",
985
- id: event.item.id,
986
- name: "OpenAiCodeInterpreter",
987
- params: { code: event.item.code, container_id: event.item.container_id },
988
- providerName: "code_interpreter",
989
- providerExecuted: true
990
- })
1636
+ case "apply_patch_call": {
1637
+ const toolCall = activeToolCalls[event.output_index]
1638
+ if (
1639
+ Predicate.isNotUndefined(toolCall.applyPatch) &&
1640
+ !toolCall.applyPatch.endEmitted &&
1641
+ event.item.operation.type !== "delete_file"
1642
+ ) {
1643
+ if (!toolCall.applyPatch.hasDiff) {
1644
+ parts.push({
1645
+ type: "tool-params-delta",
1646
+ id: toolCall.id,
1647
+ delta: InternalUtilities.escapeJSONDelta(event.item.operation.diff)
1648
+ })
1649
+ }
1650
+ parts.push({
1651
+ type: "tool-params-delta",
1652
+ id: toolCall.id,
1653
+ delta: `"}}`
1654
+ })
1655
+ parts.push({
1656
+ type: "tool-params-end",
1657
+ id: toolCall.id
1658
+ })
1659
+ toolCall.applyPatch.endEmitted = true
1660
+ }
1661
+ // Emit the final tool call with the complete diff when the status is completed
1662
+ if (Predicate.isNotUndefined(toolCall) && event.item.status === "completed") {
1663
+ const toolName = toolNameMapper.getCustomName("apply_patch")
1664
+ parts.push({
1665
+ type: "tool-call",
1666
+ id: toolCall.id,
1667
+ name: toolName,
1668
+ params: { call_id: event.item.call_id, operation: event.item.operation },
1669
+ metadata: { openai: { ...makeItemIdMetadata(event.item.id) } }
1670
+ })
1671
+ }
1672
+ delete activeToolCalls[event.output_index]
1673
+ break
1674
+ }
1675
+
1676
+ case "code_interpreter_call": {
1677
+ delete activeToolCalls[event.output_index]
1678
+ const toolName = toolNameMapper.getCustomName("code_interpreter")
991
1679
  parts.push({
992
1680
  type: "tool-result",
993
1681
  id: event.item.id,
994
- name: "OpenAiCodeInterpreter",
1682
+ name: toolName,
995
1683
  isFailure: false,
996
1684
  result: { outputs: event.item.outputs },
997
- providerName: "code_interpreter",
998
1685
  providerExecuted: true
999
1686
  })
1000
1687
  break
1001
1688
  }
1002
1689
 
1003
- // TODO(Max): support computer use
1004
1690
  case "computer_call": {
1005
- break
1006
- }
1007
-
1008
- case "file_search_call": {
1009
1691
  delete activeToolCalls[event.output_index]
1692
+ const toolName = toolNameMapper.getCustomName("computer_use")
1010
1693
  parts.push({
1011
1694
  type: "tool-params-end",
1012
1695
  id: event.item.id
@@ -1014,110 +1697,214 @@ const makeStreamResponse: (
1014
1697
  parts.push({
1015
1698
  type: "tool-call",
1016
1699
  id: event.item.id,
1017
- name: "OpenAiFileSearch",
1700
+ name: toolName,
1018
1701
  params: {},
1019
- providerName: "file_search",
1020
1702
  providerExecuted: true
1021
1703
  })
1022
1704
  parts.push({
1023
1705
  type: "tool-result",
1024
1706
  id: event.item.id,
1025
- name: "OpenAiFileSearch",
1707
+ name: toolName,
1026
1708
  isFailure: false,
1027
- result: {
1028
- status: event.item.status,
1029
- queries: event.item.queries,
1030
- ...(event.item.results && { results: event.item.results })
1031
- },
1032
- providerName: "file_search",
1709
+ result: { status: event.item.status ?? "completed" }
1710
+ })
1711
+ break
1712
+ }
1713
+
1714
+ case "file_search_call": {
1715
+ delete activeToolCalls[event.output_index]
1716
+ const toolName = toolNameMapper.getCustomName("file_search")
1717
+ const results = Predicate.isNotNullish(event.item.results)
1718
+ ? { results: event.item.results }
1719
+ : undefined
1720
+ parts.push({
1721
+ type: "tool-result",
1722
+ id: event.item.id,
1723
+ name: toolName,
1724
+ isFailure: false,
1725
+ result: { ...results, status: event.item.status, queries: event.item.queries },
1033
1726
  providerExecuted: true
1034
1727
  })
1035
1728
  break
1036
1729
  }
1037
1730
 
1038
1731
  case "function_call": {
1732
+ delete activeToolCalls[event.output_index]
1039
1733
  hasToolCalls = true
1040
-
1041
1734
  const toolName = event.item.name
1042
1735
  const toolParams = event.item.arguments
1043
-
1044
1736
  const params = yield* Effect.try({
1045
1737
  try: () => Tool.unsafeSecureJsonParse(toolParams),
1046
1738
  catch: (cause) =>
1047
- new AiError.MalformedOutput({
1739
+ AiError.make({
1048
1740
  module: "OpenAiLanguageModel",
1049
1741
  method: "makeStreamResponse",
1050
- description: "Failed to securely parse tool call parameters " +
1051
- `for tool '${toolName}':\nParameters: ${toolParams}`,
1052
- cause
1742
+ reason: new AiError.ToolParameterValidationError({
1743
+ toolName,
1744
+ toolParams: {},
1745
+ description: `Failed securely JSON parse tool parameters: ${cause}`
1746
+ })
1053
1747
  })
1054
1748
  })
1055
-
1056
1749
  parts.push({
1057
1750
  type: "tool-params-end",
1058
1751
  id: event.item.call_id
1059
1752
  })
1060
-
1061
1753
  parts.push({
1062
1754
  type: "tool-call",
1063
1755
  id: event.item.call_id,
1064
1756
  name: toolName,
1065
1757
  params,
1066
- metadata: { openai: { itemId: event.item.id } }
1758
+ metadata: { openai: { ...makeItemIdMetadata(event.item.id) } }
1759
+ })
1760
+ break
1761
+ }
1762
+
1763
+ case "image_generation_call": {
1764
+ const toolName = toolNameMapper.getCustomName("image_generation")
1765
+ parts.push({
1766
+ type: "tool-result",
1767
+ id: event.item.id,
1768
+ name: toolName,
1769
+ isFailure: false,
1770
+ result: { result: event.item.result },
1771
+ providerExecuted: true
1067
1772
  })
1773
+ break
1774
+ }
1068
1775
 
1069
- delete activeToolCalls[event.output_index]
1776
+ case "local_shell_call": {
1777
+ const toolName = toolNameMapper.getCustomName("local_shell")
1778
+ parts.push({
1779
+ type: "tool-call",
1780
+ id: event.item.call_id,
1781
+ name: toolName,
1782
+ params: { action: event.item.action },
1783
+ metadata: { openai: { ...makeItemIdMetadata(event.item.id) } }
1784
+ })
1785
+ break
1786
+ }
1787
+
1788
+ case "mcp_call": {
1789
+ const approvalRequestId = event.item.approval_request_id
1790
+
1791
+ // Track approval with our own tool call identifiers
1792
+ const toolId = Predicate.isNotNullish(approvalRequestId)
1793
+ ? (streamApprovalRequests.get(approvalRequestId) ?? approvalRequests.get(approvalRequestId) ??
1794
+ event.item.id)
1795
+ : event.item.id
1796
+
1797
+ const toolName = `mcp.${event.item.name}`
1798
+
1799
+ parts.push({
1800
+ type: "tool-call",
1801
+ id: toolId,
1802
+ name: toolName,
1803
+ params: event.item.arguments,
1804
+ providerExecuted: true
1805
+ })
1806
+
1807
+ parts.push({
1808
+ type: "tool-result",
1809
+ id: toolId,
1810
+ name: toolName,
1811
+ isFailure: false,
1812
+ providerExecuted: true,
1813
+ result: {
1814
+ type: "call",
1815
+ name: event.item.name,
1816
+ arguments: event.item.arguments,
1817
+ server_label: event.item.server_label,
1818
+ ...(Predicate.isNotNullish(event.item.output) ? { output: event.item.output } : undefined),
1819
+ ...(Predicate.isNotNullish(event.item.error) ? { error: event.item.error } : undefined)
1820
+ },
1821
+ metadata: { openai: { ...makeItemIdMetadata(event.item.id) } }
1822
+ })
1823
+
1824
+ break
1825
+ }
1826
+
1827
+ case "mcp_list_tools": {
1828
+ // Skip
1829
+ break
1830
+ }
1070
1831
 
1832
+ case "mcp_approval_request": {
1833
+ const toolId = yield* idGenerator.generateId()
1834
+ const approvalRequestId = (event.item as any).approval_request_id ?? event.item.id
1835
+ streamApprovalRequests.set(approvalRequestId, toolId)
1836
+ const toolName = `mcp.${event.item.name}`
1837
+ parts.push({
1838
+ type: "tool-call",
1839
+ id: toolId,
1840
+ name: toolName,
1841
+ params: event.item.arguments,
1842
+ providerExecuted: true
1843
+ })
1844
+ parts.push({
1845
+ type: "tool-approval-request",
1846
+ approvalId: approvalRequestId,
1847
+ toolCallId: toolId
1848
+ })
1071
1849
  break
1072
1850
  }
1073
1851
 
1074
1852
  case "message": {
1853
+ const annotations = activeAnnotations.length > 0
1854
+ ? { annotations: activeAnnotations.slice() }
1855
+ : undefined
1075
1856
  parts.push({
1076
1857
  type: "text-end",
1077
- id: event.item.id
1858
+ id: event.item.id,
1859
+ metadata: { openai: { ...annotations, ...makeItemIdMetadata(event.item.id) } }
1078
1860
  })
1079
1861
  break
1080
1862
  }
1081
1863
 
1082
1864
  case "reasoning": {
1083
1865
  const reasoningPart = activeReasoning[event.item.id]
1084
- for (const summaryIndex of reasoningPart.summaryParts) {
1085
- parts.push({
1086
- type: "reasoning-end",
1087
- id: `${event.item.id}:${summaryIndex}`,
1088
- metadata: {
1089
- openai: {
1090
- itemId: event.item.id,
1091
- encryptedContent: event.item.encrypted_content
1866
+ for (const [summaryIndex, status] of Object.entries(reasoningPart.summaryParts)) {
1867
+ if (status === "active" || status === "can-conclude") {
1868
+ parts.push({
1869
+ type: "reasoning-end",
1870
+ id: `${event.item.id}:${summaryIndex}`,
1871
+ metadata: {
1872
+ openai: {
1873
+ ...makeItemIdMetadata(event.item.id),
1874
+ ...makeEncryptedContentMetadata(event.item.encrypted_content)
1875
+ }
1092
1876
  }
1093
- }
1094
- })
1877
+ })
1878
+ }
1095
1879
  }
1096
1880
  delete activeReasoning[event.item.id]
1097
1881
  break
1098
1882
  }
1099
1883
 
1100
- case "web_search_call": {
1884
+ case "shell_call": {
1101
1885
  delete activeToolCalls[event.output_index]
1102
- parts.push({
1103
- type: "tool-params-end",
1104
- id: event.item.id
1105
- })
1886
+ const toolName = toolNameMapper.getCustomName("shell")
1106
1887
  parts.push({
1107
1888
  type: "tool-call",
1108
1889
  id: event.item.id,
1109
- name: "OpenAiWebSearch",
1890
+ name: toolName,
1110
1891
  params: { action: event.item.action },
1111
- providerName: "web_search",
1112
- providerExecuted: true
1892
+ metadata: { openai: { ...makeItemIdMetadata(event.item.id) } }
1113
1893
  })
1894
+ break
1895
+ }
1896
+
1897
+ case "web_search_call": {
1898
+ delete activeToolCalls[event.output_index]
1899
+ const toolName = toolNameMapper.getCustomName(
1900
+ webSearchTool?.name ?? "web_search"
1901
+ )
1114
1902
  parts.push({
1115
1903
  type: "tool-result",
1116
1904
  id: event.item.id,
1117
- name: "OpenAiWebSearch",
1905
+ name: toolName,
1118
1906
  isFailure: false,
1119
- result: { status: event.item.status },
1120
- providerName: "web_search",
1907
+ result: { action: event.item.action, status: event.item.status },
1121
1908
  providerExecuted: true
1122
1909
  })
1123
1910
  break
@@ -1137,23 +1924,71 @@ const makeStreamResponse: (
1137
1924
  }
1138
1925
 
1139
1926
  case "response.output_text.annotation.added": {
1140
- if (event.annotation.type === "file_citation") {
1927
+ const annotation = event.annotation as typeof Generated.Annotation.Encoded
1928
+ // Track annotation for text-end metadata
1929
+ activeAnnotations.push(annotation)
1930
+ if (annotation.type === "container_file_citation") {
1141
1931
  parts.push({
1142
1932
  type: "source",
1143
1933
  sourceType: "document",
1144
1934
  id: yield* idGenerator.generateId(),
1145
1935
  mediaType: "text/plain",
1146
- title: event.annotation.filename ?? "Untitled Document",
1147
- fileName: event.annotation.filename ?? event.annotation.file_id
1936
+ title: annotation.filename,
1937
+ fileName: annotation.filename,
1938
+ metadata: {
1939
+ openai: {
1940
+ type: annotation.type,
1941
+ fileId: annotation.file_id,
1942
+ containerId: annotation.container_id
1943
+ }
1944
+ }
1148
1945
  })
1149
- }
1150
- if (event.annotation.type === "url_citation") {
1946
+ } else if (annotation.type === "file_citation") {
1947
+ parts.push({
1948
+ type: "source",
1949
+ sourceType: "document",
1950
+ id: yield* idGenerator.generateId(),
1951
+ mediaType: "text/plain",
1952
+ title: annotation.filename,
1953
+ fileName: annotation.filename,
1954
+ metadata: {
1955
+ openai: {
1956
+ type: annotation.type,
1957
+ fileId: annotation.file_id,
1958
+ index: annotation.index
1959
+ }
1960
+ }
1961
+ })
1962
+ } else if (annotation.type === "file_path") {
1963
+ parts.push({
1964
+ type: "source",
1965
+ sourceType: "document",
1966
+ id: yield* idGenerator.generateId(),
1967
+ mediaType: "application/octet-stream",
1968
+ title: annotation.file_id,
1969
+ fileName: annotation.file_id,
1970
+ metadata: {
1971
+ openai: {
1972
+ type: annotation.type,
1973
+ fileId: annotation.file_id,
1974
+ index: annotation.index
1975
+ }
1976
+ }
1977
+ })
1978
+ } else if (annotation.type === "url_citation") {
1151
1979
  parts.push({
1152
1980
  type: "source",
1153
1981
  sourceType: "url",
1154
1982
  id: yield* idGenerator.generateId(),
1155
- url: event.annotation.url,
1156
- title: event.annotation.title
1983
+ url: annotation.url,
1984
+ title: annotation.title,
1985
+ metadata: {
1986
+ openai: {
1987
+ type: annotation.type,
1988
+ startIndex: annotation.start_index,
1989
+ endIndex: annotation.end_index
1990
+ }
1991
+ }
1157
1992
  })
1158
1993
  }
1159
1994
  break
@@ -1171,20 +2006,124 @@ const makeStreamResponse: (
1171
2006
  break
1172
2007
  }
1173
2008
 
2009
+ case "response.apply_patch_call_operation_diff.delta": {
2010
+ const toolCall = activeToolCalls[event.output_index]
2011
+ if (Predicate.isNotUndefined(toolCall?.applyPatch)) {
2012
+ parts.push({
2013
+ type: "tool-params-delta",
2014
+ id: toolCall.id,
2015
+ delta: InternalUtilities.escapeJSONDelta(event.delta)
2016
+ })
2017
+ toolCall.applyPatch.hasDiff = true
2018
+ }
2019
+ break
2020
+ }
2021
+
2022
+ case "response.apply_patch_call_operation_diff.done": {
2023
+ const toolCall = activeToolCalls[event.output_index]
2024
+ if (Predicate.isNotUndefined(toolCall?.applyPatch) && !toolCall.applyPatch.endEmitted) {
2025
+ if (!toolCall.applyPatch.hasDiff && Predicate.isNotUndefined(event.delta)) {
2026
+ parts.push({
2027
+ type: "tool-params-delta",
2028
+ id: toolCall.id,
2029
+ delta: InternalUtilities.escapeJSONDelta(event.delta)
2030
+ })
2031
+ toolCall.applyPatch.hasDiff = true
2032
+ }
2033
+ parts.push({
2034
+ type: "tool-params-delta",
2035
+ id: toolCall.id,
2036
+ delta: `"}}`
2037
+ })
2038
+ parts.push({
2039
+ type: "tool-params-end",
2040
+ id: toolCall.id
2041
+ })
2042
+ toolCall.applyPatch.endEmitted = true
2043
+ }
2044
+ break
2045
+ }
2046
+
2047
+ case "response.code_interpreter_call_code.delta": {
2048
+ const toolCall = activeToolCalls[event.output_index]
2049
+ if (Predicate.isNotUndefined(toolCall)) {
2050
+ parts.push({
2051
+ type: "tool-params-delta",
2052
+ id: toolCall.id,
2053
+ delta: InternalUtilities.escapeJSONDelta(event.delta)
2054
+ })
2055
+ }
2056
+ break
2057
+ }
2058
+
2059
+ case "response.code_interpreter_call_code.done": {
2060
+ const toolCall = activeToolCalls[event.output_index]
2061
+ if (Predicate.isNotUndefined(toolCall) && Predicate.isNotUndefined(toolCall.codeInterpreter)) {
2062
+ const toolName = toolNameMapper.getCustomName("code_interpreter")
2063
+ parts.push({
2064
+ type: "tool-params-delta",
2065
+ id: toolCall.id,
2066
+ delta: "\"}"
2067
+ })
2068
+ parts.push({ type: "tool-params-end", id: toolCall.id })
2069
+ parts.push({
2070
+ type: "tool-call",
2071
+ id: toolCall.id,
2072
+ name: toolName,
2073
+ params: {
2074
+ code: event.code,
2075
+ container_id: toolCall.codeInterpreter.containerId
2076
+ },
2077
+ providerExecuted: true
2078
+ })
2079
+ }
2080
+ break
2081
+ }
2082
+
2083
+ case "response.image_generation_call.partial_image": {
2084
+ const toolName = toolNameMapper.getCustomName("image_generation")
2085
+ parts.push({
2086
+ type: "tool-result",
2087
+ id: event.item_id,
2088
+ name: toolName,
2089
+ isFailure: false,
2090
+ providerExecuted: false,
2091
+ result: { result: event.partial_image_b64 },
2092
+ preliminary: true
2093
+ })
2094
+ break
2095
+ }
2096
+
1174
2097
  case "response.reasoning_summary_part.added": {
1175
2098
  // The first reasoning start is pushed in the `response.output_item.added` block
1176
2099
  if (event.summary_index > 0) {
1177
2100
  const reasoningPart = activeReasoning[event.item_id]
1178
2101
  if (Predicate.isNotUndefined(reasoningPart)) {
1179
- reasoningPart.summaryParts.push(event.summary_index)
2102
+ // Conclude all can-conclude parts before starting new one
2103
+ for (const [summaryIndex, status] of Object.entries(reasoningPart.summaryParts)) {
2104
+ if (status === "can-conclude") {
2105
+ parts.push({
2106
+ type: "reasoning-end",
2107
+ id: `${event.item_id}:${summaryIndex}`,
2108
+ metadata: {
2109
+ openai: {
2110
+ ...makeItemIdMetadata(event.item_id),
2111
+ ...makeEncryptedContentMetadata(reasoningPart.encryptedContent)
2112
+ }
2113
+ }
2114
+ })
2115
+ reasoningPart.summaryParts[Number(summaryIndex)] = "concluded"
2116
+ }
2117
+ }
2118
+ reasoningPart.summaryParts[event.summary_index] = "active"
1180
2119
  }
1181
2120
  parts.push({
1182
2121
  type: "reasoning-start",
1183
2122
  id: `${event.item_id}:${event.summary_index}`,
1184
2123
  metadata: {
1185
2124
  openai: {
1186
- itemId: event.item_id,
1187
- encryptedContent: reasoningPart?.encryptedContent
2125
+ ...makeItemIdMetadata(event.item_id),
2126
+ ...makeEncryptedContentMetadata(reasoningPart.encryptedContent)
1188
2127
  }
1189
2128
  }
1190
2129
  })
@@ -1197,15 +2136,34 @@ const makeStreamResponse: (
1197
2136
  type: "reasoning-delta",
1198
2137
  id: `${event.item_id}:${event.summary_index}`,
1199
2138
  delta: event.delta,
1200
- metadata: { openai: { itemId: event.item_id } }
2139
+ metadata: { openai: { ...makeItemIdMetadata(event.item_id) } }
1201
2140
  })
1202
2141
  break
1203
2142
  }
2143
+
2144
+ case "response.reasoning_summary_part.done": {
2145
+ // When OpenAI stores message data, we can immediately conclude the
2146
+ // reasoning part given that we do not need the encrypted content
2147
+ if (config.store === true) {
2148
+ parts.push({
2149
+ type: "reasoning-end",
2150
+ id: `${event.item_id}:${event.summary_index}`,
2151
+ metadata: { openai: { ...makeItemIdMetadata(event.item_id) } }
2152
+ })
2153
+ // Mark the summary part concluded
2154
+ activeReasoning[event.item_id].summaryParts[event.summary_index] = "concluded"
2155
+ } else {
2156
+ // Mark the summary part as can-conclude given we still need a
2157
+ // final summary part with the encrypted content
2158
+ activeReasoning[event.item_id].summaryParts[event.summary_index] = "can-conclude"
2159
+ }
2160
+ break
2161
+ }
1204
2162
  }
1205
2163
 
1206
2164
  return parts
1207
2165
  })),
1208
- Stream.flattenIterables
2166
+ Stream.flattenIterable
1209
2167
  )
1210
2168
  }
1211
2169
  )
@@ -1222,35 +2180,35 @@ const annotateRequest = (
1222
2180
  system: "openai",
1223
2181
  operation: { name: "chat" },
1224
2182
  request: {
1225
- model: request.model,
1226
- temperature: request.temperature,
1227
- topP: request.top_p,
1228
- maxTokens: request.max_output_tokens
2183
+ model: request.model as string,
2184
+ temperature: request.temperature as number | undefined,
2185
+ topP: request.top_p as number | undefined,
2186
+ maxTokens: request.max_output_tokens as number | undefined
1229
2187
  },
1230
2188
  openai: {
1231
2189
  request: {
1232
- responseFormat: request.text?.format?.type,
1233
- serviceTier: request.service_tier
2190
+ responseFormat: (request.text as any)?.format?.type,
2191
+ serviceTier: request.service_tier as string | undefined
1234
2192
  }
1235
2193
  }
1236
2194
  })
1237
2195
  }
1238
2196
 
1239
2197
  const annotateResponse = (span: Span, response: Generated.Response): void => {
1240
- const finishReason = response.incomplete_details?.reason
2198
+ const finishReason = response.incomplete_details?.reason as string | undefined
1241
2199
  addGenAIAnnotations(span, {
1242
2200
  response: {
1243
2201
  id: response.id,
1244
- model: response.model,
2202
+ model: response.model as string,
1245
2203
  finishReasons: Predicate.isNotUndefined(finishReason) ? [finishReason] : undefined
1246
2204
  },
1247
2205
  usage: {
1248
- inputTokens: response.usage?.input_tokens,
1249
- outputTokens: response.usage?.output_tokens
2206
+ inputTokens: response.usage?.input_tokens as number | undefined,
2207
+ outputTokens: response.usage?.output_tokens as number | undefined
1250
2208
  },
1251
2209
  openai: {
1252
2210
  response: {
1253
- serviceTier: response.service_tier
2211
+ serviceTier: response.service_tier as string | undefined
1254
2212
  }
1255
2213
  }
1256
2214
  })
@@ -1266,14 +2224,14 @@ const annotateStreamResponse = (span: Span, part: Response.StreamPartEncoded) =>
1266
2224
  })
1267
2225
  }
1268
2226
  if (part.type === "finish") {
1269
- const serviceTier = part.metadata?.openai?.serviceTier as string | undefined
2227
+ const serviceTier = (part.metadata as any)?.openai?.serviceTier as string | undefined
1270
2228
  addGenAIAnnotations(span, {
1271
2229
  response: {
1272
2230
  finishReasons: [part.reason]
1273
2231
  },
1274
2232
  usage: {
1275
- inputTokens: part.usage.inputTokens,
1276
- outputTokens: part.usage.outputTokens
2233
+ inputTokens: part.usage.inputTokens.total,
2234
+ outputTokens: part.usage.outputTokens.total
1277
2235
  },
1278
2236
  openai: {
1279
2237
  response: { serviceTier }
@@ -1283,15 +2241,23 @@ const annotateStreamResponse = (span: Span, part: Response.StreamPartEncoded) =>
1283
2241
  }
1284
2242
 
1285
2243
  // =============================================================================
1286
- // Tool Calling
2244
+ // Tool Conversion
1287
2245
  // =============================================================================
1288
2246
 
1289
- type OpenAiToolChoice = typeof Generated.CreateResponse.fields.tool_choice.from.Encoded
1290
-
1291
- const prepareTools: (options: LanguageModel.ProviderOptions) => Effect.Effect<{
2247
+ type OpenAiToolChoice = typeof Generated.CreateResponse.Encoded["tool_choice"]
2248
+
2249
+ const prepareTools = Effect.fnUntraced(function*<Tools extends ReadonlyArray<Tool.Any>>({
2250
+ config,
2251
+ options,
2252
+ toolNameMapper
2253
+ }: {
2254
+ readonly config: typeof Config.Service
2255
+ readonly options: LanguageModel.ProviderOptions
2256
+ readonly toolNameMapper: Tool.NameMapper<Tools>
2257
+ }): Effect.fn.Return<{
1292
2258
  readonly tools: ReadonlyArray<typeof Generated.Tool.Encoded> | undefined
1293
2259
  readonly toolChoice: OpenAiToolChoice | undefined
1294
- }, AiError.AiError> = Effect.fnUntraced(function*(options) {
2260
+ }, AiError.AiError> {
1295
2261
  // Return immediately if no tools are in the toolkit
1296
2262
  if (options.tools.length === 0) {
1297
2263
  return { tools: undefined, toolChoice: undefined }
@@ -1314,50 +2280,152 @@ const prepareTools: (options: LanguageModel.ProviderOptions) => Effect.Effect<{
1314
2280
  // Convert the tools in the toolkit to the provider-defined format
1315
2281
  for (const tool of allowedTools) {
1316
2282
  if (Tool.isUserDefined(tool)) {
2283
+ const strict = Tool.getStrictMode(tool) ?? config.strictJsonSchema ?? true
1317
2284
  tools.push({
1318
2285
  type: "function",
1319
2286
  name: tool.name,
1320
- description: Tool.getDescription(tool as any),
1321
- parameters: Tool.getJsonSchema(tool as any) as any,
1322
- strict: true
2287
+ description: Tool.getDescription(tool) ?? null,
2288
+ parameters: Tool.getJsonSchema(tool) as { readonly [x: string]: Schema.Json },
2289
+ strict
1323
2290
  })
1324
2291
  }
1325
2292
 
1326
2293
  if (Tool.isProviderDefined(tool)) {
1327
- switch (tool.id) {
1328
- case "openai.code_interpreter": {
2294
+ const openAiTool = tool as OpenAiTool.OpenAiTool
2295
+ switch (openAiTool.name) {
2296
+ case "OpenAiApplyPatch": {
2297
+ tools.push({ type: "apply_patch" })
2298
+ break
2299
+ }
2300
+ case "OpenAiCodeInterpreter": {
2301
+ const args = yield* Schema.decodeUnknownEffect(openAiTool.argsSchema)(tool.args).pipe(
2302
+ Effect.mapError((error) =>
2303
+ AiError.make({
2304
+ module: "OpenAiLanguageModel",
2305
+ method: "prepareTools",
2306
+ reason: new AiError.ToolConfigurationError({
2307
+ toolName: openAiTool.name,
2308
+ description: error.message
2309
+ })
2310
+ })
2311
+ )
2312
+ )
1329
2313
  tools.push({
1330
- ...tool.args,
2314
+ ...args,
1331
2315
  type: "code_interpreter"
1332
2316
  })
1333
2317
  break
1334
2318
  }
1335
- case "openai.file_search": {
2319
+ case "OpenAiFileSearch": {
2320
+ const args = yield* Schema.decodeUnknownEffect(openAiTool.argsSchema)(tool.args).pipe(
2321
+ Effect.mapError((error) =>
2322
+ AiError.make({
2323
+ module: "OpenAiLanguageModel",
2324
+ method: "prepareTools",
2325
+ reason: new AiError.ToolConfigurationError({
2326
+ toolName: openAiTool.name,
2327
+ description: error.message
2328
+ })
2329
+ })
2330
+ )
2331
+ )
1336
2332
  tools.push({
1337
- ...tool.args,
2333
+ ...args,
1338
2334
  type: "file_search"
1339
2335
  })
1340
2336
  break
1341
2337
  }
1342
- case "openai.web_search": {
2338
+ case "OpenAiShell": {
2339
+ tools.push({ type: "shell" })
2340
+ break
2341
+ }
2342
+ case "OpenAiImageGeneration": {
2343
+ const args = yield* Schema.decodeUnknownEffect(openAiTool.argsSchema)(tool.args).pipe(
2344
+ Effect.mapError((error) =>
2345
+ AiError.make({
2346
+ module: "OpenAiLanguageModel",
2347
+ method: "prepareTools",
2348
+ reason: new AiError.ToolConfigurationError({
2349
+ toolName: openAiTool.name,
2350
+ description: error.message
2351
+ })
2352
+ })
2353
+ )
2354
+ )
2355
+ tools.push({
2356
+ ...args,
2357
+ type: "image_generation"
2358
+ })
2359
+ break
2360
+ }
2361
+ case "OpenAiLocalShell": {
2362
+ tools.push({ type: "local_shell" })
2363
+ break
2364
+ }
2365
+ case "OpenAiMcp": {
2366
+ const args = yield* Schema.decodeUnknownEffect(openAiTool.argsSchema)(tool.args).pipe(
2367
+ Effect.mapError((error) =>
2368
+ AiError.make({
2369
+ module: "OpenAiLanguageModel",
2370
+ method: "prepareTools",
2371
+ reason: new AiError.ToolConfigurationError({
2372
+ toolName: openAiTool.name,
2373
+ description: error.message
2374
+ })
2375
+ })
2376
+ )
2377
+ )
2378
+ tools.push({
2379
+ ...args,
2380
+ type: "mcp"
2381
+ })
2382
+ break
2383
+ }
2384
+ case "OpenAiWebSearch": {
2385
+ const args = yield* Schema.decodeUnknownEffect(openAiTool.argsSchema)(tool.args).pipe(
2386
+ Effect.mapError((error) =>
2387
+ AiError.make({
2388
+ module: "OpenAiLanguageModel",
2389
+ method: "prepareTools",
2390
+ reason: new AiError.ToolConfigurationError({
2391
+ toolName: openAiTool.name,
2392
+ description: error.message
2393
+ })
2394
+ })
2395
+ )
2396
+ )
1343
2397
  tools.push({
1344
- ...tool.args,
2398
+ ...args,
1345
2399
  type: "web_search"
1346
2400
  })
1347
2401
  break
1348
2402
  }
1349
- case "openai.web_search_preview": {
2403
+ case "OpenAiWebSearchPreview": {
2404
+ const args = yield* Schema.decodeUnknownEffect(openAiTool.argsSchema)(tool.args).pipe(
2405
+ Effect.mapError((error) =>
2406
+ AiError.make({
2407
+ module: "OpenAiLanguageModel",
2408
+ method: "prepareTools",
2409
+ reason: new AiError.ToolConfigurationError({
2410
+ toolName: openAiTool.name,
2411
+ description: error.message
2412
+ })
2413
+ })
2414
+ )
2415
+ )
1350
2416
  tools.push({
1351
- ...tool.args,
2417
+ ...args,
1352
2418
  type: "web_search_preview"
1353
2419
  })
1354
2420
  break
1355
2421
  }
1356
2422
  default: {
1357
- return yield* new AiError.MalformedInput({
1358
- module: "AnthropicLanguageModel",
2423
+ return yield* AiError.make({
2424
+ module: "OpenAiLanguageModel",
1359
2425
  method: "prepareTools",
1360
- description: `Received request to call unknown provider-defined tool '${tool.name}'`
2426
+ reason: new AiError.InvalidRequestError({
2427
+ description: `Unknown provider-defined tool '${tool.name}'`
2428
+ })
1361
2429
  })
1362
2430
  }
1363
2431
  }
@@ -1369,9 +2437,13 @@ const prepareTools: (options: LanguageModel.ProviderOptions) => Effect.Effect<{
1369
2437
  }
1370
2438
 
1371
2439
  if (typeof options.toolChoice === "object" && "tool" in options.toolChoice) {
1372
- toolChoice = Predicate.isUndefined(OpenAiTool.getProviderDefinedToolName(options.toolChoice.tool))
1373
- ? { type: "function", name: options.toolChoice.tool }
1374
- : { type: options.toolChoice.tool }
2440
+ const toolName = toolNameMapper.getProviderName(options.toolChoice.tool)
2441
+ const providerNames = toolNameMapper.providerNames
2442
+ if (providerNames.includes(toolName)) {
2443
+ toolChoice = { type: toolName as any }
2444
+ } else {
2445
+ toolChoice = { type: "function", name: options.toolChoice.tool }
2446
+ }
1375
2447
  }
1376
2448
 
1377
2449
  return { tools, toolChoice }
@@ -1381,59 +2453,156 @@ const prepareTools: (options: LanguageModel.ProviderOptions) => Effect.Effect<{
1381
2453
  // Utilities
1382
2454
  // =============================================================================
1383
2455
 
1384
- const isFileId = (data: string, config: Config.Service): boolean =>
1385
- Predicate.isNotUndefined(config.fileIdPrefixes) && config.fileIdPrefixes.some((prefix) => data.startsWith(prefix))
2456
+ const isFileId = (data: string, config: typeof Config.Service): boolean =>
2457
+ config.fileIdPrefixes != null && config.fileIdPrefixes.some((prefix) => data.startsWith(prefix))
1386
2458
 
1387
2459
  const getItemId = (
2460
+ part:
2461
+ | Prompt.TextPart
2462
+ | Prompt.ReasoningPart
2463
+ | Prompt.ToolCallPart
2464
+ | Prompt.ToolResultPart
2465
+ ): string | null => part.options.openai?.itemId ?? null
2466
+ const getStatus = (
1388
2467
  part:
1389
2468
  | Prompt.TextPart
1390
2469
  | Prompt.ToolCallPart
1391
- ): string | undefined => part.options.openai?.itemId
2470
+ | Prompt.ToolResultPart
2471
+ ): typeof Generated.Message.Encoded["status"] | null => part.options.openai?.status ?? null
2472
+ const getEncryptedContent = (
2473
+ part: Prompt.ReasoningPart
2474
+ ): string | null => part.options.openai?.encryptedContent ?? null
2475
+
2476
+ const getImageDetail = (part: Prompt.FilePart): ImageDetail => part.options.openai?.imageDetail ?? "auto"
1392
2477
 
1393
- const getImageDetail = (part: Prompt.FilePart): typeof Generated.ImageDetail.Encoded =>
1394
- part.options.openai?.imageDetail ?? "auto"
2478
+ const makeItemIdMetadata = (itemId: string | undefined) => Predicate.isNotUndefined(itemId) ? { itemId } : undefined
1395
2479
 
1396
- const prepareInclude = (
1397
- options: LanguageModel.ProviderOptions,
1398
- config: Config.Service
1399
- ): ReadonlyArray<typeof Generated.IncludeEnum.Encoded> => {
1400
- const include: Set<typeof Generated.IncludeEnum.Encoded> = new Set(config.include ?? [])
2480
+ const makeEncryptedContentMetadata = (encryptedContent: string | null | undefined) =>
2481
+ Predicate.isNotNullish(encryptedContent) ? { encryptedContent } : undefined
2482
+
2483
+ const prepareResponseFormat = ({ config, options }: {
2484
+ readonly config: typeof Config.Service
2485
+ readonly options: LanguageModel.ProviderOptions
2486
+ }): typeof Generated.TextResponseFormatConfiguration.Encoded => {
2487
+ if (options.responseFormat.type === "json") {
2488
+ const name = options.responseFormat.objectName
2489
+ const schema = options.responseFormat.schema
2490
+ return {
2491
+ type: "json_schema",
2492
+ name,
2493
+ description: AST.resolveDescription(schema.ast) ?? "Response with a JSON object",
2494
+ schema: Tool.getJsonSchemaFromSchema(schema) as any,
2495
+ strict: config.strictJsonSchema ?? true
2496
+ }
2497
+ }
2498
+ return { type: "text" }
2499
+ }
1401
2500
 
1402
- const codeInterpreterTool = options.tools.find((tool) =>
1403
- Tool.isProviderDefined(tool) &&
1404
- tool.id === "openai.code_interpreter"
1405
- ) as Tool.AnyProviderDefined | undefined
2501
+ interface ModelCapabilities {
2502
+ readonly isReasoningModel: boolean
2503
+ readonly systemMessageMode: "remove" | "system" | "developer"
2504
+ readonly supportsFlexProcessing: boolean
2505
+ readonly supportsPriorityProcessing: boolean
2506
+ /**
2507
+ * Allow temperature, topP, logProbs when reasoningEffort is none.
2508
+ */
2509
+ readonly supportsNonReasoningParameters: boolean
2510
+ }
1406
2511
 
1407
- if (Predicate.isNotUndefined(codeInterpreterTool)) {
1408
- include.add("code_interpreter_call.outputs")
2512
+ const getModelCapabilities = (modelId: string): ModelCapabilities => {
2513
+ const supportsFlexProcessing = modelId.startsWith("o3") ||
2514
+ modelId.startsWith("o4-mini") ||
2515
+ (modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat"))
2516
+
2517
+ const supportsPriorityProcessing = modelId.startsWith("gpt-4") ||
2518
+ modelId.startsWith("gpt-5-mini") ||
2519
+ (modelId.startsWith("gpt-5") &&
2520
+ !modelId.startsWith("gpt-5-nano") &&
2521
+ !modelId.startsWith("gpt-5-chat")) ||
2522
+ modelId.startsWith("o3") ||
2523
+ modelId.startsWith("o4-mini")
2524
+
2525
+ // Use allowlist approach: only known reasoning models should use 'developer' role
2526
+ // This prevents issues with fine-tuned models, third-party models, and custom models
2527
+ const isReasoningModel = modelId.startsWith("o1") ||
2528
+ modelId.startsWith("o3") ||
2529
+ modelId.startsWith("o4-mini") ||
2530
+ modelId.startsWith("codex-mini") ||
2531
+ modelId.startsWith("computer-use-preview") ||
2532
+ (modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat"))
2533
+
2534
+ // https://platform.openai.com/docs/guides/latest-model#gpt-5-1-parameter-compatibility
2535
+ // GPT-5.1 and GPT-5.2 support temperature, topP, logProbs when reasoningEffort is none
2536
+ const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2")
2537
+
2538
+ const systemMessageMode = isReasoningModel ? "developer" : "system"
2539
+
2540
+ return {
2541
+ supportsFlexProcessing,
2542
+ supportsPriorityProcessing,
2543
+ isReasoningModel,
2544
+ systemMessageMode,
2545
+ supportsNonReasoningParameters
1409
2546
  }
2547
+ }
2548
+
2549
+ const getApprovalRequestIdMapping = (prompt: Prompt.Prompt): ReadonlyMap<string, string> => {
2550
+ const mapping = new Map<string, string>()
2551
+
2552
+ for (const message of prompt.content) {
2553
+ if (message.role !== "assistant") {
2554
+ continue
2555
+ }
1410
2556
 
1411
- const webSearchTool = options.tools.find((tool) =>
1412
- Tool.isProviderDefined(tool) &&
1413
- (tool.id === "openai.web_search" ||
1414
- tool.id === "openai.web_search_preview")
1415
- ) as Tool.AnyProviderDefined | undefined
2557
+ for (const part of message.content) {
2558
+ if (part.type !== "tool-call") {
2559
+ continue
2560
+ }
2561
+
2562
+ const approvalRequestId = part.options.openai?.approvalRequestId
1416
2563
 
1417
- if (Predicate.isNotUndefined(webSearchTool)) {
1418
- include.add("web_search_call.action.sources")
2564
+ if (Predicate.isNotNullish(approvalRequestId)) {
2565
+ mapping.set(approvalRequestId, part.id)
2566
+ }
2567
+ }
1419
2568
  }
1420
2569
 
1421
- return Array.from(include)
2570
+ return mapping
1422
2571
  }
1423
2572
 
1424
- const prepareResponseFormat = (
1425
- options: LanguageModel.ProviderOptions
1426
- ): typeof Generated.TextResponseFormatConfiguration.Encoded => {
1427
- if (options.responseFormat.type === "json") {
1428
- const name = options.responseFormat.objectName
1429
- const schema = options.responseFormat.schema
2573
+ const getUsage = (usage: Generated.ResponseUsage | null | undefined): Response.Usage => {
2574
+ if (Predicate.isNullish(usage)) {
1430
2575
  return {
1431
- type: "json_schema",
1432
- name,
1433
- description: Tool.getDescriptionFromSchemaAst(schema.ast) ?? "Response with a JSON object",
1434
- schema: Tool.getJsonSchemaFromSchemaAst(schema.ast) as any,
1435
- strict: true
2576
+ inputTokens: {
2577
+ uncached: undefined,
2578
+ total: undefined,
2579
+ cacheRead: undefined,
2580
+ cacheWrite: undefined
2581
+ },
2582
+ outputTokens: {
2583
+ total: undefined,
2584
+ text: undefined,
2585
+ reasoning: undefined
2586
+ }
2587
+ }
2588
+ }
2589
+
2590
+ const inputTokens = usage.input_tokens
2591
+ const outputTokens = usage.output_tokens
2592
+ const cachedTokens = usage.input_tokens_details.cached_tokens
2593
+ const reasoningTokens = usage.output_tokens_details.reasoning_tokens
2594
+
2595
+ return {
2596
+ inputTokens: {
2597
+ uncached: inputTokens - cachedTokens,
2598
+ total: inputTokens,
2599
+ cacheRead: cachedTokens,
2600
+ cacheWrite: undefined
2601
+ },
2602
+ outputTokens: {
2603
+ total: outputTokens,
2604
+ text: outputTokens - reasoningTokens,
2605
+ reasoning: reasoningTokens
1436
2606
  }
1437
2607
  }
1438
- return { type: "text" }
1439
2608
  }