opencodekit 0.20.8 → 0.21.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/dist/index.js +1 -1
  2. package/dist/template/.opencode/AGENTS.md +12 -0
  3. package/dist/template/.opencode/memory.db +0 -0
  4. package/dist/template/.opencode/memory.db-shm +0 -0
  5. package/dist/template/.opencode/memory.db-wal +0 -0
  6. package/dist/template/.opencode/opencode.json +83 -609
  7. package/dist/template/.opencode/opencodex-fast.jsonc +1 -1
  8. package/dist/template/.opencode/package.json +1 -1
  9. package/dist/template/.opencode/plugin/copilot-auth.ts +27 -12
  10. package/dist/template/.opencode/plugin/prompt-leverage.ts +193 -0
  11. package/dist/template/.opencode/plugin/prompt-leverage.ts.bak +228 -0
  12. package/dist/template/.opencode/plugin/sdk/copilot/copilot-provider.ts +14 -2
  13. package/dist/template/.opencode/plugin/sdk/copilot/index.ts +2 -2
  14. package/dist/template/.opencode/plugin/sdk/copilot/responses/convert-to-openai-responses-input.ts +335 -0
  15. package/dist/template/.opencode/plugin/sdk/copilot/responses/map-openai-responses-finish-reason.ts +22 -0
  16. package/dist/template/.opencode/plugin/sdk/copilot/responses/openai-config.ts +18 -0
  17. package/dist/template/.opencode/plugin/sdk/copilot/responses/openai-error.ts +22 -0
  18. package/dist/template/.opencode/plugin/sdk/copilot/responses/openai-responses-api-types.ts +214 -0
  19. package/dist/template/.opencode/plugin/sdk/copilot/responses/openai-responses-language-model.ts +1770 -0
  20. package/dist/template/.opencode/plugin/sdk/copilot/responses/openai-responses-prepare-tools.ts +173 -0
  21. package/dist/template/.opencode/plugin/sdk/copilot/responses/openai-responses-settings.ts +1 -0
  22. package/dist/template/.opencode/plugin/sdk/copilot/responses/tool/code-interpreter.ts +87 -0
  23. package/dist/template/.opencode/plugin/sdk/copilot/responses/tool/file-search.ts +127 -0
  24. package/dist/template/.opencode/plugin/sdk/copilot/responses/tool/image-generation.ts +114 -0
  25. package/dist/template/.opencode/plugin/sdk/copilot/responses/tool/local-shell.ts +64 -0
  26. package/dist/template/.opencode/plugin/sdk/copilot/responses/tool/web-search-preview.ts +103 -0
  27. package/dist/template/.opencode/plugin/sdk/copilot/responses/tool/web-search.ts +102 -0
  28. package/dist/template/.opencode/skill/gh-address-comments/SKILL.md +29 -0
  29. package/dist/template/.opencode/skill/gh-address-comments/scripts/fetch_comments.py +237 -0
  30. package/dist/template/.opencode/skill/gh-fix-ci/SKILL.md +38 -0
  31. package/dist/template/.opencode/skill/gh-fix-ci/scripts/inspect_pr_checks.py +509 -0
  32. package/dist/template/.opencode/skill/prompt-leverage/SKILL.md +90 -0
  33. package/dist/template/.opencode/skill/prompt-leverage/references/framework.md +91 -0
  34. package/dist/template/.opencode/skill/prompt-leverage/scripts/augment_prompt.py +157 -0
  35. package/dist/template/.opencode/skill/screenshot/SKILL.md +48 -0
  36. package/dist/template/.opencode/skill/screenshot/scripts/ensure_macos_permissions.sh +54 -0
  37. package/dist/template/.opencode/skill/screenshot/scripts/macos_display_info.swift +22 -0
  38. package/dist/template/.opencode/skill/screenshot/scripts/macos_permissions.swift +40 -0
  39. package/dist/template/.opencode/skill/screenshot/scripts/macos_window_info.swift +126 -0
  40. package/dist/template/.opencode/skill/screenshot/scripts/take_screenshot.ps1 +163 -0
  41. package/dist/template/.opencode/skill/screenshot/scripts/take_screenshot.py +585 -0
  42. package/dist/template/.opencode/skill/security-threat-model/SKILL.md +36 -0
  43. package/dist/template/.opencode/skill/security-threat-model/references/prompt-template.md +255 -0
  44. package/dist/template/.opencode/skill/security-threat-model/references/security-controls-and-assets.md +32 -0
  45. package/dist/template/.opencode/skill/skill-installer/SKILL.md +58 -0
  46. package/dist/template/.opencode/skill/skill-installer/scripts/github_utils.py +21 -0
  47. package/dist/template/.opencode/skill/skill-installer/scripts/install-skill-from-github.py +313 -0
  48. package/dist/template/.opencode/skill/skill-installer/scripts/list-skills.py +106 -0
  49. package/package.json +1 -1
@@ -0,0 +1,1770 @@
1
+ import {
2
+ APICallError,
3
+ type JSONValue,
4
+ type LanguageModelV3,
5
+ type LanguageModelV3CallOptions,
6
+ type LanguageModelV3Content,
7
+ type LanguageModelV3ProviderTool,
8
+ type LanguageModelV3StreamPart,
9
+ type SharedV3ProviderMetadata,
10
+ type SharedV3Warning,
11
+ } from "@ai-sdk/provider"
12
+ import {
13
+ combineHeaders,
14
+ createEventSourceResponseHandler,
15
+ createJsonResponseHandler,
16
+ generateId,
17
+ parseProviderOptions,
18
+ type ParseResult,
19
+ postJsonToApi,
20
+ } from "@ai-sdk/provider-utils"
21
+ import { z } from "zod/v4"
22
+ import type { OpenAIConfig } from "./openai-config.js"
23
+ import { openaiFailedResponseHandler } from "./openai-error.js"
24
+ import { codeInterpreterInputSchema, codeInterpreterOutputSchema } from "./tool/code-interpreter.js"
25
+ import { fileSearchOutputSchema } from "./tool/file-search.js"
26
+ import { imageGenerationOutputSchema } from "./tool/image-generation.js"
27
+ import { convertToOpenAIResponsesInput } from "./convert-to-openai-responses-input.js"
28
+ import { mapOpenAIResponseFinishReason } from "./map-openai-responses-finish-reason.js"
29
+ import type { OpenAIResponsesIncludeOptions, OpenAIResponsesIncludeValue } from "./openai-responses-api-types.js"
30
+ import { prepareResponsesTools } from "./openai-responses-prepare-tools.js"
31
+ import type { OpenAIResponsesModelId } from "./openai-responses-settings.js"
32
+ import { localShellInputSchema } from "./tool/local-shell.js"
33
+
34
+ const webSearchCallItem = z.object({
35
+ type: z.literal("web_search_call"),
36
+ id: z.string(),
37
+ status: z.string(),
38
+ action: z
39
+ .discriminatedUnion("type", [
40
+ z.object({
41
+ type: z.literal("search"),
42
+ query: z.string().nullish(),
43
+ }),
44
+ z.object({
45
+ type: z.literal("open_page"),
46
+ url: z.string(),
47
+ }),
48
+ z.object({
49
+ type: z.literal("find"),
50
+ url: z.string(),
51
+ pattern: z.string(),
52
+ }),
53
+ ])
54
+ .nullish(),
55
+ })
56
+
57
+ const fileSearchCallItem = z.object({
58
+ type: z.literal("file_search_call"),
59
+ id: z.string(),
60
+ queries: z.array(z.string()),
61
+ results: z
62
+ .array(
63
+ z.object({
64
+ attributes: z.record(z.string(), z.unknown()),
65
+ file_id: z.string(),
66
+ filename: z.string(),
67
+ score: z.number(),
68
+ text: z.string(),
69
+ }),
70
+ )
71
+ .nullish(),
72
+ })
73
+
74
+ const codeInterpreterCallItem = z.object({
75
+ type: z.literal("code_interpreter_call"),
76
+ id: z.string(),
77
+ code: z.string().nullable(),
78
+ container_id: z.string(),
79
+ outputs: z
80
+ .array(
81
+ z.discriminatedUnion("type", [
82
+ z.object({ type: z.literal("logs"), logs: z.string() }),
83
+ z.object({ type: z.literal("image"), url: z.string() }),
84
+ ]),
85
+ )
86
+ .nullable(),
87
+ })
88
+
89
+ const localShellCallItem = z.object({
90
+ type: z.literal("local_shell_call"),
91
+ id: z.string(),
92
+ call_id: z.string(),
93
+ action: z.object({
94
+ type: z.literal("exec"),
95
+ command: z.array(z.string()),
96
+ timeout_ms: z.number().optional(),
97
+ user: z.string().optional(),
98
+ working_directory: z.string().optional(),
99
+ env: z.record(z.string(), z.string()).optional(),
100
+ }),
101
+ })
102
+
103
+ const imageGenerationCallItem = z.object({
104
+ type: z.literal("image_generation_call"),
105
+ id: z.string(),
106
+ result: z.string(),
107
+ })
108
+
109
+ /**
110
+ * `top_logprobs` request body argument can be set to an integer between
111
+ * 0 and 20 specifying the number of most likely tokens to return at each
112
+ * token position, each with an associated log probability.
113
+ *
114
+ * @see https://platform.openai.com/docs/api-reference/responses/create#responses_create-top_logprobs
115
+ */
116
+ const TOP_LOGPROBS_MAX = 20
117
+
118
+ const LOGPROBS_SCHEMA = z.array(
119
+ z.object({
120
+ token: z.string(),
121
+ logprob: z.number(),
122
+ top_logprobs: z.array(
123
+ z.object({
124
+ token: z.string(),
125
+ logprob: z.number(),
126
+ }),
127
+ ),
128
+ }),
129
+ )
130
+
131
+ export class OpenAIResponsesLanguageModel implements LanguageModelV3 {
132
+ readonly specificationVersion = "v3"
133
+
134
+ readonly modelId: OpenAIResponsesModelId
135
+
136
+ private readonly config: OpenAIConfig
137
+
138
+ constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig) {
139
+ this.modelId = modelId
140
+ this.config = config
141
+ }
142
+
143
+ readonly supportedUrls: Record<string, RegExp[]> = {
144
+ "image/*": [/^https?:\/\/.*$/],
145
+ "application/pdf": [/^https?:\/\/.*$/],
146
+ }
147
+
148
+ get provider(): string {
149
+ return this.config.provider
150
+ }
151
+
152
+ private async getArgs({
153
+ maxOutputTokens,
154
+ temperature,
155
+ stopSequences,
156
+ topP,
157
+ topK,
158
+ presencePenalty,
159
+ frequencyPenalty,
160
+ seed,
161
+ prompt,
162
+ providerOptions,
163
+ tools,
164
+ toolChoice,
165
+ responseFormat,
166
+ }: LanguageModelV3CallOptions) {
167
+ const warnings: SharedV3Warning[] = []
168
+ const modelConfig = getResponsesModelConfig(this.modelId)
169
+
170
+ if (topK != null) {
171
+ warnings.push({ type: "unsupported", feature: "topK" })
172
+ }
173
+
174
+ if (seed != null) {
175
+ warnings.push({ type: "unsupported", feature: "seed" })
176
+ }
177
+
178
+ if (presencePenalty != null) {
179
+ warnings.push({
180
+ type: "unsupported",
181
+ feature: "presencePenalty",
182
+ })
183
+ }
184
+
185
+ if (frequencyPenalty != null) {
186
+ warnings.push({
187
+ type: "unsupported",
188
+ feature: "frequencyPenalty",
189
+ })
190
+ }
191
+
192
+ if (stopSequences != null) {
193
+ warnings.push({ type: "unsupported", feature: "stopSequences" })
194
+ }
195
+
196
+ const openaiOptions = await parseProviderOptions({
197
+ provider: "copilot",
198
+ providerOptions,
199
+ schema: openaiResponsesProviderOptionsSchema,
200
+ })
201
+
202
+ const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
203
+ prompt,
204
+ systemMessageMode: modelConfig.systemMessageMode,
205
+ fileIdPrefixes: this.config.fileIdPrefixes,
206
+ store: openaiOptions?.store ?? true,
207
+ hasLocalShellTool: hasOpenAITool("openai.local_shell"),
208
+ })
209
+
210
+ warnings.push(...inputWarnings)
211
+
212
+ const strictJsonSchema = openaiOptions?.strictJsonSchema ?? false
213
+
214
+ let include: OpenAIResponsesIncludeOptions = openaiOptions?.include
215
+
216
+ function addInclude(key: OpenAIResponsesIncludeValue) {
217
+ include = include != null ? [...include, key] : [key]
218
+ }
219
+
220
+ function hasOpenAITool(id: string) {
221
+ return tools?.find((tool) => tool.type === "provider" && tool.id === id) != null
222
+ }
223
+
224
+ // when logprobs are requested, automatically include them:
225
+ const topLogprobs =
226
+ typeof openaiOptions?.logprobs === "number"
227
+ ? openaiOptions?.logprobs
228
+ : openaiOptions?.logprobs === true
229
+ ? TOP_LOGPROBS_MAX
230
+ : undefined
231
+
232
+ if (topLogprobs) {
233
+ addInclude("message.output_text.logprobs")
234
+ }
235
+
236
+ // when a web search tool is present, automatically include the sources:
237
+ const webSearchToolName = (
238
+ tools?.find(
239
+ (tool) =>
240
+ tool.type === "provider" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview"),
241
+ ) as LanguageModelV3ProviderTool | undefined
242
+ )?.name
243
+
244
+ if (webSearchToolName) {
245
+ addInclude("web_search_call.action.sources")
246
+ }
247
+
248
+ // when a code interpreter tool is present, automatically include the outputs:
249
+ if (hasOpenAITool("openai.code_interpreter")) {
250
+ addInclude("code_interpreter_call.outputs")
251
+ }
252
+
253
+ const baseArgs = {
254
+ model: this.modelId,
255
+ input,
256
+ temperature,
257
+ top_p: topP,
258
+ max_output_tokens: maxOutputTokens,
259
+
260
+ ...((responseFormat?.type === "json" || openaiOptions?.textVerbosity) && {
261
+ text: {
262
+ ...(responseFormat?.type === "json" && {
263
+ format:
264
+ responseFormat.schema != null
265
+ ? {
266
+ type: "json_schema",
267
+ strict: strictJsonSchema,
268
+ name: responseFormat.name ?? "response",
269
+ description: responseFormat.description,
270
+ schema: responseFormat.schema,
271
+ }
272
+ : { type: "json_object" },
273
+ }),
274
+ ...(openaiOptions?.textVerbosity && {
275
+ verbosity: openaiOptions.textVerbosity,
276
+ }),
277
+ },
278
+ }),
279
+
280
+ // provider options:
281
+ max_tool_calls: openaiOptions?.maxToolCalls,
282
+ metadata: openaiOptions?.metadata,
283
+ parallel_tool_calls: openaiOptions?.parallelToolCalls,
284
+ previous_response_id: openaiOptions?.previousResponseId,
285
+ store: openaiOptions?.store,
286
+ user: openaiOptions?.user,
287
+ instructions: openaiOptions?.instructions,
288
+ service_tier: openaiOptions?.serviceTier,
289
+ include,
290
+ prompt_cache_key: openaiOptions?.promptCacheKey,
291
+ safety_identifier: openaiOptions?.safetyIdentifier,
292
+ top_logprobs: topLogprobs,
293
+
294
+ // model-specific settings:
295
+ ...(modelConfig.isReasoningModel &&
296
+ (openaiOptions?.reasoningEffort != null || openaiOptions?.reasoningSummary != null) && {
297
+ reasoning: {
298
+ ...(openaiOptions?.reasoningEffort != null && {
299
+ effort: openaiOptions.reasoningEffort,
300
+ }),
301
+ ...(openaiOptions?.reasoningSummary != null && {
302
+ summary: openaiOptions.reasoningSummary,
303
+ }),
304
+ },
305
+ }),
306
+ ...(modelConfig.requiredAutoTruncation && {
307
+ truncation: "auto",
308
+ }),
309
+ }
310
+
311
+ if (modelConfig.isReasoningModel) {
312
+ // remove unsupported settings for reasoning models
313
+ // see https://platform.openai.com/docs/guides/reasoning#limitations
314
+ if (baseArgs.temperature != null) {
315
+ baseArgs.temperature = undefined
316
+ warnings.push({
317
+ type: "unsupported",
318
+ feature: "temperature",
319
+ details: "temperature is not supported for reasoning models",
320
+ })
321
+ }
322
+
323
+ if (baseArgs.top_p != null) {
324
+ baseArgs.top_p = undefined
325
+ warnings.push({
326
+ type: "unsupported",
327
+ feature: "topP",
328
+ details: "topP is not supported for reasoning models",
329
+ })
330
+ }
331
+ } else {
332
+ if (openaiOptions?.reasoningEffort != null) {
333
+ warnings.push({
334
+ type: "unsupported",
335
+ feature: "reasoningEffort",
336
+ details: "reasoningEffort is not supported for non-reasoning models",
337
+ })
338
+ }
339
+
340
+ if (openaiOptions?.reasoningSummary != null) {
341
+ warnings.push({
342
+ type: "unsupported",
343
+ feature: "reasoningSummary",
344
+ details: "reasoningSummary is not supported for non-reasoning models",
345
+ })
346
+ }
347
+ }
348
+
349
+ // Validate flex processing support
350
+ if (openaiOptions?.serviceTier === "flex" && !modelConfig.supportsFlexProcessing) {
351
+ warnings.push({
352
+ type: "unsupported",
353
+ feature: "serviceTier",
354
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models",
355
+ })
356
+ // Remove from args if not supported
357
+ delete (baseArgs as any).service_tier
358
+ }
359
+
360
+ // Validate priority processing support
361
+ if (openaiOptions?.serviceTier === "priority" && !modelConfig.supportsPriorityProcessing) {
362
+ warnings.push({
363
+ type: "unsupported",
364
+ feature: "serviceTier",
365
+ details:
366
+ "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported",
367
+ })
368
+ // Remove from args if not supported
369
+ delete (baseArgs as any).service_tier
370
+ }
371
+
372
+ const {
373
+ tools: openaiTools,
374
+ toolChoice: openaiToolChoice,
375
+ toolWarnings,
376
+ } = prepareResponsesTools({
377
+ tools,
378
+ toolChoice,
379
+ strictJsonSchema,
380
+ })
381
+
382
+ return {
383
+ webSearchToolName,
384
+ args: {
385
+ ...baseArgs,
386
+ tools: openaiTools,
387
+ tool_choice: openaiToolChoice,
388
+ },
389
+ warnings: [...warnings, ...toolWarnings],
390
+ }
391
+ }
392
+
393
+ async doGenerate(options: LanguageModelV3CallOptions) {
394
+ const { args: body, warnings, webSearchToolName } = await this.getArgs(options)
395
+ const url = this.config.url({
396
+ path: "/responses",
397
+ modelId: this.modelId,
398
+ })
399
+
400
+ const {
401
+ responseHeaders,
402
+ value: response,
403
+ rawValue: rawResponse,
404
+ } = await postJsonToApi({
405
+ url,
406
+ headers: combineHeaders(this.config.headers(), options.headers),
407
+ body,
408
+ failedResponseHandler: openaiFailedResponseHandler,
409
+ successfulResponseHandler: createJsonResponseHandler(
410
+ z.object({
411
+ id: z.string(),
412
+ created_at: z.number(),
413
+ error: z
414
+ .object({
415
+ code: z.string(),
416
+ message: z.string(),
417
+ })
418
+ .nullish(),
419
+ model: z.string(),
420
+ output: z.array(
421
+ z.discriminatedUnion("type", [
422
+ z.object({
423
+ type: z.literal("message"),
424
+ role: z.literal("assistant"),
425
+ id: z.string(),
426
+ content: z.array(
427
+ z.object({
428
+ type: z.literal("output_text"),
429
+ text: z.string(),
430
+ logprobs: LOGPROBS_SCHEMA.nullish(),
431
+ annotations: z.array(
432
+ z.discriminatedUnion("type", [
433
+ z.object({
434
+ type: z.literal("url_citation"),
435
+ start_index: z.number(),
436
+ end_index: z.number(),
437
+ url: z.string(),
438
+ title: z.string(),
439
+ }),
440
+ z.object({
441
+ type: z.literal("file_citation"),
442
+ file_id: z.string(),
443
+ filename: z.string().nullish(),
444
+ index: z.number().nullish(),
445
+ start_index: z.number().nullish(),
446
+ end_index: z.number().nullish(),
447
+ quote: z.string().nullish(),
448
+ }),
449
+ z.object({
450
+ type: z.literal("container_file_citation"),
451
+ }),
452
+ ]),
453
+ ),
454
+ }),
455
+ ),
456
+ }),
457
+ webSearchCallItem,
458
+ fileSearchCallItem,
459
+ codeInterpreterCallItem,
460
+ imageGenerationCallItem,
461
+ localShellCallItem,
462
+ z.object({
463
+ type: z.literal("function_call"),
464
+ call_id: z.string(),
465
+ name: z.string(),
466
+ arguments: z.string(),
467
+ id: z.string(),
468
+ }),
469
+ z.object({
470
+ type: z.literal("computer_call"),
471
+ id: z.string(),
472
+ status: z.string().optional(),
473
+ }),
474
+ z.object({
475
+ type: z.literal("reasoning"),
476
+ id: z.string(),
477
+ encrypted_content: z.string().nullish(),
478
+ summary: z.array(
479
+ z.object({
480
+ type: z.literal("summary_text"),
481
+ text: z.string(),
482
+ }),
483
+ ),
484
+ }),
485
+ ]),
486
+ ),
487
+ service_tier: z.string().nullish(),
488
+ incomplete_details: z.object({ reason: z.string() }).nullish(),
489
+ usage: usageSchema,
490
+ }),
491
+ ),
492
+ abortSignal: options.abortSignal,
493
+ fetch: this.config.fetch,
494
+ })
495
+
496
+ if (response.error) {
497
+ throw new APICallError({
498
+ message: response.error.message,
499
+ url,
500
+ requestBodyValues: body,
501
+ statusCode: 400,
502
+ responseHeaders,
503
+ responseBody: rawResponse as string,
504
+ isRetryable: false,
505
+ })
506
+ }
507
+
508
+ const content: Array<LanguageModelV3Content> = []
509
+ const logprobs: Array<z.infer<typeof LOGPROBS_SCHEMA>> = []
510
+
511
+ // flag that checks if there have been client-side tool calls (not executed by openai)
512
+ let hasFunctionCall = false
513
+
514
+ // map response content to content array
515
+ for (const part of response.output) {
516
+ switch (part.type) {
517
+ case "reasoning": {
518
+ // when there are no summary parts, we need to add an empty reasoning part:
519
+ if (part.summary.length === 0) {
520
+ part.summary.push({ type: "summary_text", text: "" })
521
+ }
522
+
523
+ for (const summary of part.summary) {
524
+ content.push({
525
+ type: "reasoning" as const,
526
+ text: summary.text,
527
+ providerMetadata: {
528
+ openai: {
529
+ itemId: part.id,
530
+ reasoningEncryptedContent: part.encrypted_content ?? null,
531
+ },
532
+ },
533
+ })
534
+ }
535
+ break
536
+ }
537
+
538
+ case "image_generation_call": {
539
+ content.push({
540
+ type: "tool-call",
541
+ toolCallId: part.id,
542
+ toolName: "image_generation",
543
+ input: "{}",
544
+ providerExecuted: true,
545
+ })
546
+
547
+ content.push({
548
+ type: "tool-result",
549
+ toolCallId: part.id,
550
+ toolName: "image_generation",
551
+ result: {
552
+ result: part.result,
553
+ } satisfies z.infer<typeof imageGenerationOutputSchema>,
554
+ })
555
+
556
+ break
557
+ }
558
+
559
+ case "local_shell_call": {
560
+ content.push({
561
+ type: "tool-call",
562
+ toolCallId: part.call_id,
563
+ toolName: "local_shell",
564
+ input: JSON.stringify({ action: part.action } satisfies z.infer<typeof localShellInputSchema>),
565
+ providerMetadata: {
566
+ openai: {
567
+ itemId: part.id,
568
+ },
569
+ },
570
+ })
571
+
572
+ break
573
+ }
574
+
575
+ case "message": {
576
+ for (const contentPart of part.content) {
577
+ if (options.providerOptions?.openai?.logprobs && contentPart.logprobs) {
578
+ logprobs.push(contentPart.logprobs)
579
+ }
580
+
581
+ content.push({
582
+ type: "text",
583
+ text: contentPart.text,
584
+ providerMetadata: {
585
+ openai: {
586
+ itemId: part.id,
587
+ },
588
+ },
589
+ })
590
+
591
+ for (const annotation of contentPart.annotations) {
592
+ if (annotation.type === "url_citation") {
593
+ content.push({
594
+ type: "source",
595
+ sourceType: "url",
596
+ id: this.config.generateId?.() ?? generateId(),
597
+ url: annotation.url,
598
+ title: annotation.title,
599
+ })
600
+ } else if (annotation.type === "file_citation") {
601
+ content.push({
602
+ type: "source",
603
+ sourceType: "document",
604
+ id: this.config.generateId?.() ?? generateId(),
605
+ mediaType: "text/plain",
606
+ title: annotation.quote ?? annotation.filename ?? "Document",
607
+ filename: annotation.filename ?? annotation.file_id,
608
+ })
609
+ }
610
+ }
611
+ }
612
+
613
+ break
614
+ }
615
+
616
+ case "function_call": {
617
+ hasFunctionCall = true
618
+
619
+ content.push({
620
+ type: "tool-call",
621
+ toolCallId: part.call_id,
622
+ toolName: part.name,
623
+ input: part.arguments,
624
+ providerMetadata: {
625
+ openai: {
626
+ itemId: part.id,
627
+ },
628
+ },
629
+ })
630
+ break
631
+ }
632
+
633
+ case "web_search_call": {
634
+ content.push({
635
+ type: "tool-call",
636
+ toolCallId: part.id,
637
+ toolName: webSearchToolName ?? "web_search",
638
+ input: JSON.stringify({ action: part.action }),
639
+ providerExecuted: true,
640
+ })
641
+
642
+ content.push({
643
+ type: "tool-result",
644
+ toolCallId: part.id,
645
+ toolName: webSearchToolName ?? "web_search",
646
+ result: { status: part.status },
647
+ })
648
+
649
+ break
650
+ }
651
+
652
+ case "computer_call": {
653
+ content.push({
654
+ type: "tool-call",
655
+ toolCallId: part.id,
656
+ toolName: "computer_use",
657
+ input: "",
658
+ providerExecuted: true,
659
+ })
660
+
661
+ content.push({
662
+ type: "tool-result",
663
+ toolCallId: part.id,
664
+ toolName: "computer_use",
665
+ result: {
666
+ type: "computer_use_tool_result",
667
+ status: part.status || "completed",
668
+ },
669
+ })
670
+ break
671
+ }
672
+
673
+ case "file_search_call": {
674
+ content.push({
675
+ type: "tool-call",
676
+ toolCallId: part.id,
677
+ toolName: "file_search",
678
+ input: "{}",
679
+ providerExecuted: true,
680
+ })
681
+
682
+ content.push({
683
+ type: "tool-result",
684
+ toolCallId: part.id,
685
+ toolName: "file_search",
686
+ result: {
687
+ queries: part.queries,
688
+ results:
689
+ part.results?.map((result) => ({
690
+ attributes: result.attributes as Record<string, JSONValue>,
691
+ fileId: result.file_id,
692
+ filename: result.filename,
693
+ score: result.score,
694
+ text: result.text,
695
+ })) ?? null,
696
+ } satisfies z.infer<typeof fileSearchOutputSchema>,
697
+ })
698
+ break
699
+ }
700
+
701
+ case "code_interpreter_call": {
702
+ content.push({
703
+ type: "tool-call",
704
+ toolCallId: part.id,
705
+ toolName: "code_interpreter",
706
+ input: JSON.stringify({
707
+ code: part.code,
708
+ containerId: part.container_id,
709
+ } satisfies z.infer<typeof codeInterpreterInputSchema>),
710
+ providerExecuted: true,
711
+ })
712
+
713
+ content.push({
714
+ type: "tool-result",
715
+ toolCallId: part.id,
716
+ toolName: "code_interpreter",
717
+ result: {
718
+ outputs: part.outputs,
719
+ } satisfies z.infer<typeof codeInterpreterOutputSchema>,
720
+ })
721
+ break
722
+ }
723
+ }
724
+ }
725
+
726
+ const providerMetadata: SharedV3ProviderMetadata = {
727
+ openai: { responseId: response.id },
728
+ }
729
+
730
+ if (logprobs.length > 0) {
731
+ providerMetadata.openai.logprobs = logprobs
732
+ }
733
+
734
+ if (typeof response.service_tier === "string") {
735
+ providerMetadata.openai.serviceTier = response.service_tier
736
+ }
737
+
738
+ return {
739
+ content,
740
+ finishReason: {
741
+ unified: mapOpenAIResponseFinishReason({
742
+ finishReason: response.incomplete_details?.reason,
743
+ hasFunctionCall,
744
+ }),
745
+ raw: response.incomplete_details?.reason,
746
+ },
747
+ usage: {
748
+ inputTokens: {
749
+ total: response.usage.input_tokens,
750
+ noCache:
751
+ response.usage.input_tokens_details?.cached_tokens != null
752
+ ? response.usage.input_tokens - response.usage.input_tokens_details.cached_tokens
753
+ : undefined,
754
+ cacheRead: response.usage.input_tokens_details?.cached_tokens ?? undefined,
755
+ cacheWrite: undefined,
756
+ },
757
+ outputTokens: {
758
+ total: response.usage.output_tokens,
759
+ text: undefined,
760
+ reasoning: response.usage.output_tokens_details?.reasoning_tokens ?? undefined,
761
+ },
762
+ raw: response.usage,
763
+ },
764
+ request: { body },
765
+ response: {
766
+ id: response.id,
767
+ timestamp: new Date(response.created_at * 1000),
768
+ modelId: response.model,
769
+ headers: responseHeaders,
770
+ body: rawResponse,
771
+ },
772
+ providerMetadata,
773
+ warnings,
774
+ }
775
+ }
776
+
777
+ async doStream(options: LanguageModelV3CallOptions) {
778
+ const { args: body, warnings, webSearchToolName } = await this.getArgs(options)
779
+
780
+ const { responseHeaders, value: response } = await postJsonToApi({
781
+ url: this.config.url({
782
+ path: "/responses",
783
+ modelId: this.modelId,
784
+ }),
785
+ headers: combineHeaders(this.config.headers(), options.headers),
786
+ body: {
787
+ ...body,
788
+ stream: true,
789
+ },
790
+ failedResponseHandler: openaiFailedResponseHandler,
791
+ successfulResponseHandler: createEventSourceResponseHandler(openaiResponsesChunkSchema),
792
+ abortSignal: options.abortSignal,
793
+ fetch: this.config.fetch,
794
+ })
795
+
796
+ // oxlint-disable-next-line no-this-alias -- needed for closure scope inside generator
797
+ const self = this
798
+
799
+ let finishReason: {
800
+ unified: ReturnType<typeof mapOpenAIResponseFinishReason>
801
+ raw: string | undefined
802
+ } = {
803
+ unified: "other",
804
+ raw: undefined,
805
+ }
806
+ const usage: {
807
+ inputTokens: number | undefined
808
+ outputTokens: number | undefined
809
+ totalTokens: number | undefined
810
+ reasoningTokens: number | undefined
811
+ cachedInputTokens: number | undefined
812
+ } = {
813
+ inputTokens: undefined,
814
+ outputTokens: undefined,
815
+ totalTokens: undefined,
816
+ reasoningTokens: undefined,
817
+ cachedInputTokens: undefined,
818
+ }
819
+ const logprobs: Array<z.infer<typeof LOGPROBS_SCHEMA>> = []
820
+ let responseId: string | null = null
821
+ const ongoingToolCalls: Record<
822
+ number,
823
+ | {
824
+ toolName: string
825
+ toolCallId: string
826
+ codeInterpreter?: {
827
+ containerId: string
828
+ }
829
+ }
830
+ | undefined
831
+ > = {}
832
+
833
+ // flag that checks if there have been client-side tool calls (not executed by openai)
834
+ let hasFunctionCall = false
835
+
836
+ // Track reasoning by output_index instead of item_id
837
+ // GitHub Copilot rotates encrypted item IDs on every event
838
+ const activeReasoning: Record<
839
+ number,
840
+ {
841
+ canonicalId: string // the item.id from output_item.added
842
+ encryptedContent?: string | null
843
+ summaryParts: number[]
844
+ }
845
+ > = {}
846
+
847
+ // Track current active reasoning output_index for correlating summary events
848
+ let currentReasoningOutputIndex: number | null = null
849
+
850
+ // Track a stable text part id for the current assistant message.
851
+ // Copilot may change item_id across text deltas; normalize to one id.
852
+ let currentTextId: string | null = null
853
+
854
+ let serviceTier: string | undefined
855
+
856
+ return {
857
+ stream: response.pipeThrough(
858
+ new TransformStream<ParseResult<z.infer<typeof openaiResponsesChunkSchema>>, LanguageModelV3StreamPart>({
859
+ start(controller) {
860
+ controller.enqueue({ type: "stream-start", warnings })
861
+ },
862
+
863
+ transform(chunk, controller) {
864
+ if (options.includeRawChunks) {
865
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue })
866
+ }
867
+
868
+ // handle failed chunk parsing / validation:
869
+ if (!chunk.success) {
870
+ finishReason = {
871
+ unified: "error",
872
+ raw: undefined,
873
+ }
874
+ controller.enqueue({ type: "error", error: chunk.error })
875
+ return
876
+ }
877
+
878
+ const value = chunk.value
879
+
880
+ if (isResponseOutputItemAddedChunk(value)) {
881
+ if (value.item.type === "function_call") {
882
+ ongoingToolCalls[value.output_index] = {
883
+ toolName: value.item.name,
884
+ toolCallId: value.item.call_id,
885
+ }
886
+
887
+ controller.enqueue({
888
+ type: "tool-input-start",
889
+ id: value.item.call_id,
890
+ toolName: value.item.name,
891
+ })
892
+ } else if (value.item.type === "web_search_call") {
893
+ ongoingToolCalls[value.output_index] = {
894
+ toolName: webSearchToolName ?? "web_search",
895
+ toolCallId: value.item.id,
896
+ }
897
+
898
+ controller.enqueue({
899
+ type: "tool-input-start",
900
+ id: value.item.id,
901
+ toolName: webSearchToolName ?? "web_search",
902
+ })
903
+ } else if (value.item.type === "computer_call") {
904
+ ongoingToolCalls[value.output_index] = {
905
+ toolName: "computer_use",
906
+ toolCallId: value.item.id,
907
+ }
908
+
909
+ controller.enqueue({
910
+ type: "tool-input-start",
911
+ id: value.item.id,
912
+ toolName: "computer_use",
913
+ })
914
+ } else if (value.item.type === "code_interpreter_call") {
915
+ ongoingToolCalls[value.output_index] = {
916
+ toolName: "code_interpreter",
917
+ toolCallId: value.item.id,
918
+ codeInterpreter: {
919
+ containerId: value.item.container_id,
920
+ },
921
+ }
922
+
923
+ controller.enqueue({
924
+ type: "tool-input-start",
925
+ id: value.item.id,
926
+ toolName: "code_interpreter",
927
+ })
928
+
929
+ controller.enqueue({
930
+ type: "tool-input-delta",
931
+ id: value.item.id,
932
+ delta: `{"containerId":"${value.item.container_id}","code":"`,
933
+ })
934
+ } else if (value.item.type === "file_search_call") {
935
+ controller.enqueue({
936
+ type: "tool-call",
937
+ toolCallId: value.item.id,
938
+ toolName: "file_search",
939
+ input: "{}",
940
+ providerExecuted: true,
941
+ })
942
+ } else if (value.item.type === "image_generation_call") {
943
+ controller.enqueue({
944
+ type: "tool-call",
945
+ toolCallId: value.item.id,
946
+ toolName: "image_generation",
947
+ input: "{}",
948
+ providerExecuted: true,
949
+ })
950
+ } else if (value.item.type === "message") {
951
+ // Start a stable text part for this assistant message
952
+ currentTextId = value.item.id
953
+ controller.enqueue({
954
+ type: "text-start",
955
+ id: value.item.id,
956
+ providerMetadata: {
957
+ openai: {
958
+ itemId: value.item.id,
959
+ },
960
+ },
961
+ })
962
+ } else if (isResponseOutputItemAddedReasoningChunk(value)) {
963
+ activeReasoning[value.output_index] = {
964
+ canonicalId: value.item.id,
965
+ encryptedContent: value.item.encrypted_content,
966
+ summaryParts: [0],
967
+ }
968
+ currentReasoningOutputIndex = value.output_index
969
+
970
+ controller.enqueue({
971
+ type: "reasoning-start",
972
+ id: `${value.item.id}:0`,
973
+ providerMetadata: {
974
+ openai: {
975
+ itemId: value.item.id,
976
+ reasoningEncryptedContent: value.item.encrypted_content ?? null,
977
+ },
978
+ },
979
+ })
980
+ }
981
+ } else if (isResponseOutputItemDoneChunk(value)) {
982
+ if (value.item.type === "function_call") {
983
+ ongoingToolCalls[value.output_index] = undefined
984
+ hasFunctionCall = true
985
+
986
+ controller.enqueue({
987
+ type: "tool-input-end",
988
+ id: value.item.call_id,
989
+ })
990
+
991
+ controller.enqueue({
992
+ type: "tool-call",
993
+ toolCallId: value.item.call_id,
994
+ toolName: value.item.name,
995
+ input: value.item.arguments,
996
+ providerMetadata: {
997
+ openai: {
998
+ itemId: value.item.id,
999
+ },
1000
+ },
1001
+ })
1002
+ } else if (value.item.type === "web_search_call") {
1003
+ ongoingToolCalls[value.output_index] = undefined
1004
+
1005
+ controller.enqueue({
1006
+ type: "tool-input-end",
1007
+ id: value.item.id,
1008
+ })
1009
+
1010
+ controller.enqueue({
1011
+ type: "tool-call",
1012
+ toolCallId: value.item.id,
1013
+ toolName: "web_search",
1014
+ input: JSON.stringify({ action: value.item.action }),
1015
+ providerExecuted: true,
1016
+ })
1017
+
1018
+ controller.enqueue({
1019
+ type: "tool-result",
1020
+ toolCallId: value.item.id,
1021
+ toolName: "web_search",
1022
+ result: { status: value.item.status },
1023
+ })
1024
+ } else if (value.item.type === "computer_call") {
1025
+ ongoingToolCalls[value.output_index] = undefined
1026
+
1027
+ controller.enqueue({
1028
+ type: "tool-input-end",
1029
+ id: value.item.id,
1030
+ })
1031
+
1032
+ controller.enqueue({
1033
+ type: "tool-call",
1034
+ toolCallId: value.item.id,
1035
+ toolName: "computer_use",
1036
+ input: "",
1037
+ providerExecuted: true,
1038
+ })
1039
+
1040
+ controller.enqueue({
1041
+ type: "tool-result",
1042
+ toolCallId: value.item.id,
1043
+ toolName: "computer_use",
1044
+ result: {
1045
+ type: "computer_use_tool_result",
1046
+ status: value.item.status || "completed",
1047
+ },
1048
+ })
1049
+ } else if (value.item.type === "file_search_call") {
1050
+ ongoingToolCalls[value.output_index] = undefined
1051
+
1052
+ controller.enqueue({
1053
+ type: "tool-result",
1054
+ toolCallId: value.item.id,
1055
+ toolName: "file_search",
1056
+ result: {
1057
+ queries: value.item.queries,
1058
+ results:
1059
+ value.item.results?.map((result) => ({
1060
+ attributes: result.attributes as Record<string, JSONValue>,
1061
+ fileId: result.file_id,
1062
+ filename: result.filename,
1063
+ score: result.score,
1064
+ text: result.text,
1065
+ })) ?? null,
1066
+ } satisfies z.infer<typeof fileSearchOutputSchema>,
1067
+ })
1068
+ } else if (value.item.type === "code_interpreter_call") {
1069
+ ongoingToolCalls[value.output_index] = undefined
1070
+
1071
+ controller.enqueue({
1072
+ type: "tool-result",
1073
+ toolCallId: value.item.id,
1074
+ toolName: "code_interpreter",
1075
+ result: {
1076
+ outputs: value.item.outputs,
1077
+ } satisfies z.infer<typeof codeInterpreterOutputSchema>,
1078
+ })
1079
+ } else if (value.item.type === "image_generation_call") {
1080
+ controller.enqueue({
1081
+ type: "tool-result",
1082
+ toolCallId: value.item.id,
1083
+ toolName: "image_generation",
1084
+ result: {
1085
+ result: value.item.result,
1086
+ } satisfies z.infer<typeof imageGenerationOutputSchema>,
1087
+ })
1088
+ } else if (value.item.type === "local_shell_call") {
1089
+ ongoingToolCalls[value.output_index] = undefined
1090
+
1091
+ controller.enqueue({
1092
+ type: "tool-call",
1093
+ toolCallId: value.item.call_id,
1094
+ toolName: "local_shell",
1095
+ input: JSON.stringify({
1096
+ action: {
1097
+ type: "exec",
1098
+ command: value.item.action.command,
1099
+ timeoutMs: value.item.action.timeout_ms,
1100
+ user: value.item.action.user,
1101
+ workingDirectory: value.item.action.working_directory,
1102
+ env: value.item.action.env,
1103
+ },
1104
+ } satisfies z.infer<typeof localShellInputSchema>),
1105
+ providerMetadata: {
1106
+ openai: { itemId: value.item.id },
1107
+ },
1108
+ })
1109
+ } else if (value.item.type === "message") {
1110
+ if (currentTextId) {
1111
+ controller.enqueue({
1112
+ type: "text-end",
1113
+ id: currentTextId,
1114
+ })
1115
+ currentTextId = null
1116
+ }
1117
+ } else if (isResponseOutputItemDoneReasoningChunk(value)) {
1118
+ const activeReasoningPart = activeReasoning[value.output_index]
1119
+ if (activeReasoningPart) {
1120
+ for (const summaryIndex of activeReasoningPart.summaryParts) {
1121
+ controller.enqueue({
1122
+ type: "reasoning-end",
1123
+ id: `${activeReasoningPart.canonicalId}:${summaryIndex}`,
1124
+ providerMetadata: {
1125
+ openai: {
1126
+ itemId: activeReasoningPart.canonicalId,
1127
+ reasoningEncryptedContent: value.item.encrypted_content ?? null,
1128
+ },
1129
+ },
1130
+ })
1131
+ }
1132
+ delete activeReasoning[value.output_index]
1133
+ if (currentReasoningOutputIndex === value.output_index) {
1134
+ currentReasoningOutputIndex = null
1135
+ }
1136
+ }
1137
+ }
1138
+ } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
1139
+ const toolCall = ongoingToolCalls[value.output_index]
1140
+
1141
+ if (toolCall != null) {
1142
+ controller.enqueue({
1143
+ type: "tool-input-delta",
1144
+ id: toolCall.toolCallId,
1145
+ delta: value.delta,
1146
+ })
1147
+ }
1148
+ } else if (isResponseImageGenerationCallPartialImageChunk(value)) {
1149
+ controller.enqueue({
1150
+ type: "tool-result",
1151
+ toolCallId: value.item_id,
1152
+ toolName: "image_generation",
1153
+ result: {
1154
+ result: value.partial_image_b64,
1155
+ } satisfies z.infer<typeof imageGenerationOutputSchema>,
1156
+ })
1157
+ } else if (isResponseCodeInterpreterCallCodeDeltaChunk(value)) {
1158
+ const toolCall = ongoingToolCalls[value.output_index]
1159
+
1160
+ if (toolCall != null) {
1161
+ controller.enqueue({
1162
+ type: "tool-input-delta",
1163
+ id: toolCall.toolCallId,
1164
+ // The delta is code, which is embedding in a JSON string.
1165
+ // To escape it, we use JSON.stringify and slice to remove the outer quotes.
1166
+ delta: JSON.stringify(value.delta).slice(1, -1),
1167
+ })
1168
+ }
1169
+ } else if (isResponseCodeInterpreterCallCodeDoneChunk(value)) {
1170
+ const toolCall = ongoingToolCalls[value.output_index]
1171
+
1172
+ if (toolCall != null) {
1173
+ controller.enqueue({
1174
+ type: "tool-input-delta",
1175
+ id: toolCall.toolCallId,
1176
+ delta: '"}',
1177
+ })
1178
+
1179
+ controller.enqueue({
1180
+ type: "tool-input-end",
1181
+ id: toolCall.toolCallId,
1182
+ })
1183
+
1184
+ // immediately send the tool call after the input end:
1185
+ controller.enqueue({
1186
+ type: "tool-call",
1187
+ toolCallId: toolCall.toolCallId,
1188
+ toolName: "code_interpreter",
1189
+ input: JSON.stringify({
1190
+ code: value.code,
1191
+ containerId: toolCall.codeInterpreter!.containerId,
1192
+ } satisfies z.infer<typeof codeInterpreterInputSchema>),
1193
+ providerExecuted: true,
1194
+ })
1195
+ }
1196
+ } else if (isResponseCreatedChunk(value)) {
1197
+ responseId = value.response.id
1198
+ controller.enqueue({
1199
+ type: "response-metadata",
1200
+ id: value.response.id,
1201
+ timestamp: new Date(value.response.created_at * 1000),
1202
+ modelId: value.response.model,
1203
+ })
1204
+ } else if (isTextDeltaChunk(value)) {
1205
+ // Ensure a text-start exists, and normalize deltas to a stable id
1206
+ if (!currentTextId) {
1207
+ currentTextId = value.item_id
1208
+ controller.enqueue({
1209
+ type: "text-start",
1210
+ id: currentTextId,
1211
+ providerMetadata: {
1212
+ openai: { itemId: value.item_id },
1213
+ },
1214
+ })
1215
+ }
1216
+
1217
+ controller.enqueue({
1218
+ type: "text-delta",
1219
+ id: currentTextId,
1220
+ delta: value.delta,
1221
+ })
1222
+
1223
+ if (options.providerOptions?.openai?.logprobs && value.logprobs) {
1224
+ logprobs.push(value.logprobs)
1225
+ }
1226
+ } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
1227
+ const activeItem =
1228
+ currentReasoningOutputIndex !== null ? activeReasoning[currentReasoningOutputIndex] : null
1229
+
1230
+ // the first reasoning start is pushed in isResponseOutputItemAddedReasoningChunk.
1231
+ if (activeItem && value.summary_index > 0) {
1232
+ activeItem.summaryParts.push(value.summary_index)
1233
+
1234
+ controller.enqueue({
1235
+ type: "reasoning-start",
1236
+ id: `${activeItem.canonicalId}:${value.summary_index}`,
1237
+ providerMetadata: {
1238
+ openai: {
1239
+ itemId: activeItem.canonicalId,
1240
+ reasoningEncryptedContent: activeItem.encryptedContent ?? null,
1241
+ },
1242
+ },
1243
+ })
1244
+ }
1245
+ } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
1246
+ const activeItem =
1247
+ currentReasoningOutputIndex !== null ? activeReasoning[currentReasoningOutputIndex] : null
1248
+
1249
+ if (activeItem) {
1250
+ controller.enqueue({
1251
+ type: "reasoning-delta",
1252
+ id: `${activeItem.canonicalId}:${value.summary_index}`,
1253
+ delta: value.delta,
1254
+ providerMetadata: {
1255
+ openai: {
1256
+ itemId: activeItem.canonicalId,
1257
+ },
1258
+ },
1259
+ })
1260
+ }
1261
+ } else if (isResponseFinishedChunk(value)) {
1262
+ finishReason = {
1263
+ unified: mapOpenAIResponseFinishReason({
1264
+ finishReason: value.response.incomplete_details?.reason,
1265
+ hasFunctionCall,
1266
+ }),
1267
+ raw: value.response.incomplete_details?.reason ?? undefined,
1268
+ }
1269
+ usage.inputTokens = value.response.usage.input_tokens
1270
+ usage.outputTokens = value.response.usage.output_tokens
1271
+ usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens
1272
+ usage.reasoningTokens = value.response.usage.output_tokens_details?.reasoning_tokens ?? undefined
1273
+ usage.cachedInputTokens = value.response.usage.input_tokens_details?.cached_tokens ?? undefined
1274
+ if (typeof value.response.service_tier === "string") {
1275
+ serviceTier = value.response.service_tier
1276
+ }
1277
+ } else if (isResponseAnnotationAddedChunk(value)) {
1278
+ if (value.annotation.type === "url_citation") {
1279
+ controller.enqueue({
1280
+ type: "source",
1281
+ sourceType: "url",
1282
+ id: self.config.generateId?.() ?? generateId(),
1283
+ url: value.annotation.url,
1284
+ title: value.annotation.title,
1285
+ })
1286
+ } else if (value.annotation.type === "file_citation") {
1287
+ controller.enqueue({
1288
+ type: "source",
1289
+ sourceType: "document",
1290
+ id: self.config.generateId?.() ?? generateId(),
1291
+ mediaType: "text/plain",
1292
+ title: value.annotation.quote ?? value.annotation.filename ?? "Document",
1293
+ filename: value.annotation.filename ?? value.annotation.file_id,
1294
+ })
1295
+ }
1296
+ } else if (isErrorChunk(value)) {
1297
+ controller.enqueue({ type: "error", error: value })
1298
+ }
1299
+ },
1300
+
1301
+ flush(controller) {
1302
+ // Close any dangling text part
1303
+ if (currentTextId) {
1304
+ controller.enqueue({ type: "text-end", id: currentTextId })
1305
+ currentTextId = null
1306
+ }
1307
+
1308
+ const providerMetadata: SharedV3ProviderMetadata = {
1309
+ openai: {
1310
+ responseId,
1311
+ },
1312
+ }
1313
+
1314
+ if (logprobs.length > 0) {
1315
+ providerMetadata.openai.logprobs = logprobs
1316
+ }
1317
+
1318
+ if (serviceTier !== undefined) {
1319
+ providerMetadata.openai.serviceTier = serviceTier
1320
+ }
1321
+
1322
+ controller.enqueue({
1323
+ type: "finish",
1324
+ finishReason,
1325
+ usage: {
1326
+ inputTokens: {
1327
+ total: usage.inputTokens,
1328
+ noCache:
1329
+ usage.inputTokens != null && usage.cachedInputTokens != null
1330
+ ? usage.inputTokens - usage.cachedInputTokens
1331
+ : undefined,
1332
+ cacheRead: usage.cachedInputTokens,
1333
+ cacheWrite: undefined,
1334
+ },
1335
+ outputTokens: {
1336
+ total: usage.outputTokens,
1337
+ text: undefined,
1338
+ reasoning: usage.reasoningTokens,
1339
+ },
1340
+ raw: {
1341
+ input_tokens: usage.inputTokens,
1342
+ output_tokens: usage.outputTokens,
1343
+ total_tokens: usage.totalTokens,
1344
+ },
1345
+ },
1346
+ providerMetadata,
1347
+ })
1348
+ },
1349
+ }),
1350
+ ),
1351
+ request: { body },
1352
+ response: { headers: responseHeaders },
1353
+ }
1354
+ }
1355
+ }
1356
+
1357
+ const usageSchema = z.object({
1358
+ input_tokens: z.number(),
1359
+ input_tokens_details: z.object({ cached_tokens: z.number().nullish() }).nullish(),
1360
+ output_tokens: z.number(),
1361
+ output_tokens_details: z.object({ reasoning_tokens: z.number().nullish() }).nullish(),
1362
+ })
1363
+
1364
+ const textDeltaChunkSchema = z.object({
1365
+ type: z.literal("response.output_text.delta"),
1366
+ item_id: z.string(),
1367
+ delta: z.string(),
1368
+ logprobs: LOGPROBS_SCHEMA.nullish(),
1369
+ })
1370
+
1371
+ const errorChunkSchema = z.object({
1372
+ type: z.literal("error"),
1373
+ code: z.string(),
1374
+ message: z.string(),
1375
+ param: z.string().nullish(),
1376
+ sequence_number: z.number(),
1377
+ })
1378
+
1379
+ const responseFinishedChunkSchema = z.object({
1380
+ type: z.enum(["response.completed", "response.incomplete"]),
1381
+ response: z.object({
1382
+ incomplete_details: z.object({ reason: z.string() }).nullish(),
1383
+ usage: usageSchema,
1384
+ service_tier: z.string().nullish(),
1385
+ }),
1386
+ })
1387
+
1388
+ const responseCreatedChunkSchema = z.object({
1389
+ type: z.literal("response.created"),
1390
+ response: z.object({
1391
+ id: z.string(),
1392
+ created_at: z.number(),
1393
+ model: z.string(),
1394
+ service_tier: z.string().nullish(),
1395
+ }),
1396
+ })
1397
+
1398
+ const responseOutputItemAddedSchema = z.object({
1399
+ type: z.literal("response.output_item.added"),
1400
+ output_index: z.number(),
1401
+ item: z.discriminatedUnion("type", [
1402
+ z.object({
1403
+ type: z.literal("message"),
1404
+ id: z.string(),
1405
+ }),
1406
+ z.object({
1407
+ type: z.literal("reasoning"),
1408
+ id: z.string(),
1409
+ encrypted_content: z.string().nullish(),
1410
+ }),
1411
+ z.object({
1412
+ type: z.literal("function_call"),
1413
+ id: z.string(),
1414
+ call_id: z.string(),
1415
+ name: z.string(),
1416
+ arguments: z.string(),
1417
+ }),
1418
+ z.object({
1419
+ type: z.literal("web_search_call"),
1420
+ id: z.string(),
1421
+ status: z.string(),
1422
+ action: z
1423
+ .object({
1424
+ type: z.literal("search"),
1425
+ query: z.string().optional(),
1426
+ })
1427
+ .nullish(),
1428
+ }),
1429
+ z.object({
1430
+ type: z.literal("computer_call"),
1431
+ id: z.string(),
1432
+ status: z.string(),
1433
+ }),
1434
+ z.object({
1435
+ type: z.literal("file_search_call"),
1436
+ id: z.string(),
1437
+ }),
1438
+ z.object({
1439
+ type: z.literal("image_generation_call"),
1440
+ id: z.string(),
1441
+ }),
1442
+ z.object({
1443
+ type: z.literal("code_interpreter_call"),
1444
+ id: z.string(),
1445
+ container_id: z.string(),
1446
+ code: z.string().nullable(),
1447
+ outputs: z
1448
+ .array(
1449
+ z.discriminatedUnion("type", [
1450
+ z.object({ type: z.literal("logs"), logs: z.string() }),
1451
+ z.object({ type: z.literal("image"), url: z.string() }),
1452
+ ]),
1453
+ )
1454
+ .nullable(),
1455
+ status: z.string(),
1456
+ }),
1457
+ ]),
1458
+ })
1459
+
1460
+ const responseOutputItemDoneSchema = z.object({
1461
+ type: z.literal("response.output_item.done"),
1462
+ output_index: z.number(),
1463
+ item: z.discriminatedUnion("type", [
1464
+ z.object({
1465
+ type: z.literal("message"),
1466
+ id: z.string(),
1467
+ }),
1468
+ z.object({
1469
+ type: z.literal("reasoning"),
1470
+ id: z.string(),
1471
+ encrypted_content: z.string().nullish(),
1472
+ }),
1473
+ z.object({
1474
+ type: z.literal("function_call"),
1475
+ id: z.string(),
1476
+ call_id: z.string(),
1477
+ name: z.string(),
1478
+ arguments: z.string(),
1479
+ status: z.literal("completed"),
1480
+ }),
1481
+ codeInterpreterCallItem,
1482
+ imageGenerationCallItem,
1483
+ webSearchCallItem,
1484
+ fileSearchCallItem,
1485
+ localShellCallItem,
1486
+ z.object({
1487
+ type: z.literal("computer_call"),
1488
+ id: z.string(),
1489
+ status: z.literal("completed"),
1490
+ }),
1491
+ ]),
1492
+ })
1493
+
1494
+ const responseFunctionCallArgumentsDeltaSchema = z.object({
1495
+ type: z.literal("response.function_call_arguments.delta"),
1496
+ item_id: z.string(),
1497
+ output_index: z.number(),
1498
+ delta: z.string(),
1499
+ })
1500
+
1501
+ const responseImageGenerationCallPartialImageSchema = z.object({
1502
+ type: z.literal("response.image_generation_call.partial_image"),
1503
+ item_id: z.string(),
1504
+ output_index: z.number(),
1505
+ partial_image_b64: z.string(),
1506
+ })
1507
+
1508
+ const responseCodeInterpreterCallCodeDeltaSchema = z.object({
1509
+ type: z.literal("response.code_interpreter_call_code.delta"),
1510
+ item_id: z.string(),
1511
+ output_index: z.number(),
1512
+ delta: z.string(),
1513
+ })
1514
+
1515
+ const responseCodeInterpreterCallCodeDoneSchema = z.object({
1516
+ type: z.literal("response.code_interpreter_call_code.done"),
1517
+ item_id: z.string(),
1518
+ output_index: z.number(),
1519
+ code: z.string(),
1520
+ })
1521
+
1522
+ const responseAnnotationAddedSchema = z.object({
1523
+ type: z.literal("response.output_text.annotation.added"),
1524
+ annotation: z.discriminatedUnion("type", [
1525
+ z.object({
1526
+ type: z.literal("url_citation"),
1527
+ url: z.string(),
1528
+ title: z.string(),
1529
+ }),
1530
+ z.object({
1531
+ type: z.literal("file_citation"),
1532
+ file_id: z.string(),
1533
+ filename: z.string().nullish(),
1534
+ index: z.number().nullish(),
1535
+ start_index: z.number().nullish(),
1536
+ end_index: z.number().nullish(),
1537
+ quote: z.string().nullish(),
1538
+ }),
1539
+ ]),
1540
+ })
1541
+
1542
+ const responseReasoningSummaryPartAddedSchema = z.object({
1543
+ type: z.literal("response.reasoning_summary_part.added"),
1544
+ item_id: z.string(),
1545
+ summary_index: z.number(),
1546
+ })
1547
+
1548
+ const responseReasoningSummaryTextDeltaSchema = z.object({
1549
+ type: z.literal("response.reasoning_summary_text.delta"),
1550
+ item_id: z.string(),
1551
+ summary_index: z.number(),
1552
+ delta: z.string(),
1553
+ })
1554
+
1555
+ const openaiResponsesChunkSchema = z.union([
1556
+ textDeltaChunkSchema,
1557
+ responseFinishedChunkSchema,
1558
+ responseCreatedChunkSchema,
1559
+ responseOutputItemAddedSchema,
1560
+ responseOutputItemDoneSchema,
1561
+ responseFunctionCallArgumentsDeltaSchema,
1562
+ responseImageGenerationCallPartialImageSchema,
1563
+ responseCodeInterpreterCallCodeDeltaSchema,
1564
+ responseCodeInterpreterCallCodeDoneSchema,
1565
+ responseAnnotationAddedSchema,
1566
+ responseReasoningSummaryPartAddedSchema,
1567
+ responseReasoningSummaryTextDeltaSchema,
1568
+ errorChunkSchema,
1569
+ z.object({ type: z.string() }).loose(), // fallback for unknown chunks
1570
+ ])
1571
+
1572
+ type ExtractByType<T, K extends T extends { type: infer U } ? U : never> = T extends { type: K } ? T : never
1573
+
1574
+ function isTextDeltaChunk(
1575
+ chunk: z.infer<typeof openaiResponsesChunkSchema>,
1576
+ ): chunk is z.infer<typeof textDeltaChunkSchema> {
1577
+ return chunk.type === "response.output_text.delta"
1578
+ }
1579
+
1580
+ function isResponseOutputItemDoneChunk(
1581
+ chunk: z.infer<typeof openaiResponsesChunkSchema>,
1582
+ ): chunk is z.infer<typeof responseOutputItemDoneSchema> {
1583
+ return chunk.type === "response.output_item.done"
1584
+ }
1585
+
1586
+ function isResponseOutputItemDoneReasoningChunk(chunk: z.infer<typeof openaiResponsesChunkSchema>): chunk is z.infer<
1587
+ typeof responseOutputItemDoneSchema
1588
+ > & {
1589
+ item: ExtractByType<z.infer<typeof responseOutputItemDoneSchema>["item"], "reasoning">
1590
+ } {
1591
+ return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning"
1592
+ }
1593
+
1594
+ function isResponseFinishedChunk(
1595
+ chunk: z.infer<typeof openaiResponsesChunkSchema>,
1596
+ ): chunk is z.infer<typeof responseFinishedChunkSchema> {
1597
+ return chunk.type === "response.completed" || chunk.type === "response.incomplete"
1598
+ }
1599
+
1600
+ function isResponseCreatedChunk(
1601
+ chunk: z.infer<typeof openaiResponsesChunkSchema>,
1602
+ ): chunk is z.infer<typeof responseCreatedChunkSchema> {
1603
+ return chunk.type === "response.created"
1604
+ }
1605
+
1606
+ function isResponseFunctionCallArgumentsDeltaChunk(
1607
+ chunk: z.infer<typeof openaiResponsesChunkSchema>,
1608
+ ): chunk is z.infer<typeof responseFunctionCallArgumentsDeltaSchema> {
1609
+ return chunk.type === "response.function_call_arguments.delta"
1610
+ }
1611
+ function isResponseImageGenerationCallPartialImageChunk(
1612
+ chunk: z.infer<typeof openaiResponsesChunkSchema>,
1613
+ ): chunk is z.infer<typeof responseImageGenerationCallPartialImageSchema> {
1614
+ return chunk.type === "response.image_generation_call.partial_image"
1615
+ }
1616
+
1617
+ function isResponseCodeInterpreterCallCodeDeltaChunk(
1618
+ chunk: z.infer<typeof openaiResponsesChunkSchema>,
1619
+ ): chunk is z.infer<typeof responseCodeInterpreterCallCodeDeltaSchema> {
1620
+ return chunk.type === "response.code_interpreter_call_code.delta"
1621
+ }
1622
+
1623
+ function isResponseCodeInterpreterCallCodeDoneChunk(
1624
+ chunk: z.infer<typeof openaiResponsesChunkSchema>,
1625
+ ): chunk is z.infer<typeof responseCodeInterpreterCallCodeDoneSchema> {
1626
+ return chunk.type === "response.code_interpreter_call_code.done"
1627
+ }
1628
+
1629
+ function isResponseOutputItemAddedChunk(
1630
+ chunk: z.infer<typeof openaiResponsesChunkSchema>,
1631
+ ): chunk is z.infer<typeof responseOutputItemAddedSchema> {
1632
+ return chunk.type === "response.output_item.added"
1633
+ }
1634
+
1635
+ function isResponseOutputItemAddedReasoningChunk(chunk: z.infer<typeof openaiResponsesChunkSchema>): chunk is z.infer<
1636
+ typeof responseOutputItemAddedSchema
1637
+ > & {
1638
+ item: ExtractByType<z.infer<typeof responseOutputItemAddedSchema>["item"], "reasoning">
1639
+ } {
1640
+ return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning"
1641
+ }
1642
+
1643
+ function isResponseAnnotationAddedChunk(
1644
+ chunk: z.infer<typeof openaiResponsesChunkSchema>,
1645
+ ): chunk is z.infer<typeof responseAnnotationAddedSchema> {
1646
+ return chunk.type === "response.output_text.annotation.added"
1647
+ }
1648
+
1649
+ function isResponseReasoningSummaryPartAddedChunk(
1650
+ chunk: z.infer<typeof openaiResponsesChunkSchema>,
1651
+ ): chunk is z.infer<typeof responseReasoningSummaryPartAddedSchema> {
1652
+ return chunk.type === "response.reasoning_summary_part.added"
1653
+ }
1654
+
1655
+ function isResponseReasoningSummaryTextDeltaChunk(
1656
+ chunk: z.infer<typeof openaiResponsesChunkSchema>,
1657
+ ): chunk is z.infer<typeof responseReasoningSummaryTextDeltaSchema> {
1658
+ return chunk.type === "response.reasoning_summary_text.delta"
1659
+ }
1660
+
1661
+ function isErrorChunk(chunk: z.infer<typeof openaiResponsesChunkSchema>): chunk is z.infer<typeof errorChunkSchema> {
1662
+ return chunk.type === "error"
1663
+ }
1664
+
1665
+ type ResponsesModelConfig = {
1666
+ isReasoningModel: boolean
1667
+ systemMessageMode: "remove" | "system" | "developer"
1668
+ requiredAutoTruncation: boolean
1669
+ supportsFlexProcessing: boolean
1670
+ supportsPriorityProcessing: boolean
1671
+ }
1672
+
1673
+ function getResponsesModelConfig(modelId: string): ResponsesModelConfig {
1674
+ const supportsFlexProcessing =
1675
+ modelId.startsWith("o3") ||
1676
+ modelId.startsWith("o4-mini") ||
1677
+ (modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat"))
1678
+ const supportsPriorityProcessing =
1679
+ modelId.startsWith("gpt-4") ||
1680
+ modelId.startsWith("gpt-5-mini") ||
1681
+ (modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat")) ||
1682
+ modelId.startsWith("o3") ||
1683
+ modelId.startsWith("o4-mini")
1684
+ const defaults = {
1685
+ requiredAutoTruncation: false,
1686
+ systemMessageMode: "system" as const,
1687
+ supportsFlexProcessing,
1688
+ supportsPriorityProcessing,
1689
+ }
1690
+
1691
+ // gpt-5-chat models are non-reasoning
1692
+ if (modelId.startsWith("gpt-5-chat")) {
1693
+ return {
1694
+ ...defaults,
1695
+ isReasoningModel: false,
1696
+ }
1697
+ }
1698
+
1699
+ // o series reasoning models:
1700
+ if (
1701
+ modelId.startsWith("o") ||
1702
+ modelId.startsWith("gpt-5") ||
1703
+ modelId.startsWith("codex-") ||
1704
+ modelId.startsWith("computer-use")
1705
+ ) {
1706
+ if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
1707
+ return {
1708
+ ...defaults,
1709
+ isReasoningModel: true,
1710
+ systemMessageMode: "remove",
1711
+ }
1712
+ }
1713
+
1714
+ return {
1715
+ ...defaults,
1716
+ isReasoningModel: true,
1717
+ systemMessageMode: "developer",
1718
+ }
1719
+ }
1720
+
1721
+ // gpt models:
1722
+ return {
1723
+ ...defaults,
1724
+ isReasoningModel: false,
1725
+ }
1726
+ }
1727
+
1728
+ // TODO AI SDK 6: use optional here instead of nullish
1729
+ const openaiResponsesProviderOptionsSchema = z.object({
1730
+ include: z
1731
+ .array(z.enum(["reasoning.encrypted_content", "file_search_call.results", "message.output_text.logprobs"]))
1732
+ .nullish(),
1733
+ instructions: z.string().nullish(),
1734
+
1735
+ /**
1736
+ * Return the log probabilities of the tokens.
1737
+ *
1738
+ * Setting to true will return the log probabilities of the tokens that
1739
+ * were generated.
1740
+ *
1741
+ * Setting to a number will return the log probabilities of the top n
1742
+ * tokens that were generated.
1743
+ *
1744
+ * @see https://platform.openai.com/docs/api-reference/responses/create
1745
+ * @see https://cookbook.openai.com/examples/using_logprobs
1746
+ */
1747
+ logprobs: z.union([z.boolean(), z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
1748
+
1749
+ /**
1750
+ * The maximum number of total calls to built-in tools that can be processed in a response.
1751
+ * This maximum number applies across all built-in tool calls, not per individual tool.
1752
+ * Any further attempts to call a tool by the model will be ignored.
1753
+ */
1754
+ maxToolCalls: z.number().nullish(),
1755
+
1756
+ metadata: z.any().nullish(),
1757
+ parallelToolCalls: z.boolean().nullish(),
1758
+ previousResponseId: z.string().nullish(),
1759
+ promptCacheKey: z.string().nullish(),
1760
+ reasoningEffort: z.string().nullish(),
1761
+ reasoningSummary: z.string().nullish(),
1762
+ safetyIdentifier: z.string().nullish(),
1763
+ serviceTier: z.enum(["auto", "flex", "priority"]).nullish(),
1764
+ store: z.boolean().nullish(),
1765
+ strictJsonSchema: z.boolean().nullish(),
1766
+ textVerbosity: z.enum(["low", "medium", "high"]).nullish(),
1767
+ user: z.string().nullish(),
1768
+ })
1769
+
1770
+ export type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>