rird 1.0.200
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/AGENTS.md +27 -0
- package/Dockerfile +18 -0
- package/README.md +15 -0
- package/bin/opencode +336 -0
- package/bin/pty-wrapper.js +285 -0
- package/bunfig.toml +4 -0
- package/facebook_ads_library.png +0 -0
- package/nul`nif +0 -0
- package/package.json +111 -0
- package/parsers-config.ts +239 -0
- package/rird-1.0.199.tgz +0 -0
- package/script/build-windows.ts +54 -0
- package/script/build.ts +167 -0
- package/script/postinstall.mjs +544 -0
- package/script/publish-registries.ts +187 -0
- package/script/publish.ts +72 -0
- package/script/schema.ts +47 -0
- package/src/acp/README.md +164 -0
- package/src/acp/agent.ts +1063 -0
- package/src/acp/session.ts +101 -0
- package/src/acp/types.ts +22 -0
- package/src/agent/agent.ts +367 -0
- package/src/agent/generate.txt +75 -0
- package/src/agent/prompt/compaction.txt +12 -0
- package/src/agent/prompt/explore.txt +18 -0
- package/src/agent/prompt/summary.txt +10 -0
- package/src/agent/prompt/title.txt +36 -0
- package/src/auth/index.ts +70 -0
- package/src/bun/index.ts +114 -0
- package/src/bus/bus-event.ts +43 -0
- package/src/bus/global.ts +10 -0
- package/src/bus/index.ts +105 -0
- package/src/cli/bootstrap.ts +17 -0
- package/src/cli/cmd/acp.ts +88 -0
- package/src/cli/cmd/agent.ts +256 -0
- package/src/cli/cmd/auth.ts +391 -0
- package/src/cli/cmd/cmd.ts +7 -0
- package/src/cli/cmd/debug/config.ts +15 -0
- package/src/cli/cmd/debug/file.ts +91 -0
- package/src/cli/cmd/debug/index.ts +43 -0
- package/src/cli/cmd/debug/lsp.ts +48 -0
- package/src/cli/cmd/debug/ripgrep.ts +83 -0
- package/src/cli/cmd/debug/scrap.ts +15 -0
- package/src/cli/cmd/debug/skill.ts +15 -0
- package/src/cli/cmd/debug/snapshot.ts +48 -0
- package/src/cli/cmd/export.ts +88 -0
- package/src/cli/cmd/generate.ts +38 -0
- package/src/cli/cmd/github.ts +1400 -0
- package/src/cli/cmd/import.ts +98 -0
- package/src/cli/cmd/mcp.ts +654 -0
- package/src/cli/cmd/models.ts +77 -0
- package/src/cli/cmd/pr.ts +112 -0
- package/src/cli/cmd/run.ts +368 -0
- package/src/cli/cmd/serve.ts +31 -0
- package/src/cli/cmd/session.ts +106 -0
- package/src/cli/cmd/stats.ts +298 -0
- package/src/cli/cmd/tui/app.tsx +696 -0
- package/src/cli/cmd/tui/attach.ts +30 -0
- package/src/cli/cmd/tui/component/border.tsx +21 -0
- package/src/cli/cmd/tui/component/dialog-agent.tsx +31 -0
- package/src/cli/cmd/tui/component/dialog-command.tsx +124 -0
- package/src/cli/cmd/tui/component/dialog-mcp.tsx +86 -0
- package/src/cli/cmd/tui/component/dialog-model.tsx +245 -0
- package/src/cli/cmd/tui/component/dialog-provider.tsx +224 -0
- package/src/cli/cmd/tui/component/dialog-session-list.tsx +102 -0
- package/src/cli/cmd/tui/component/dialog-session-rename.tsx +31 -0
- package/src/cli/cmd/tui/component/dialog-stash.tsx +86 -0
- package/src/cli/cmd/tui/component/dialog-status.tsx +162 -0
- package/src/cli/cmd/tui/component/dialog-tag.tsx +44 -0
- package/src/cli/cmd/tui/component/dialog-theme-list.tsx +50 -0
- package/src/cli/cmd/tui/component/did-you-know.tsx +85 -0
- package/src/cli/cmd/tui/component/logo.tsx +35 -0
- package/src/cli/cmd/tui/component/prompt/autocomplete.tsx +574 -0
- package/src/cli/cmd/tui/component/prompt/history.tsx +108 -0
- package/src/cli/cmd/tui/component/prompt/index.tsx +1090 -0
- package/src/cli/cmd/tui/component/prompt/stash.tsx +101 -0
- package/src/cli/cmd/tui/component/tips.ts +27 -0
- package/src/cli/cmd/tui/component/todo-item.tsx +32 -0
- package/src/cli/cmd/tui/context/args.tsx +14 -0
- package/src/cli/cmd/tui/context/directory.ts +13 -0
- package/src/cli/cmd/tui/context/exit.tsx +23 -0
- package/src/cli/cmd/tui/context/helper.tsx +25 -0
- package/src/cli/cmd/tui/context/keybind.tsx +101 -0
- package/src/cli/cmd/tui/context/kv.tsx +49 -0
- package/src/cli/cmd/tui/context/local.tsx +354 -0
- package/src/cli/cmd/tui/context/prompt.tsx +18 -0
- package/src/cli/cmd/tui/context/route.tsx +46 -0
- package/src/cli/cmd/tui/context/sdk.tsx +74 -0
- package/src/cli/cmd/tui/context/sync.tsx +372 -0
- package/src/cli/cmd/tui/context/theme/aura.json +69 -0
- package/src/cli/cmd/tui/context/theme/ayu.json +80 -0
- package/src/cli/cmd/tui/context/theme/catppuccin-frappe.json +233 -0
- package/src/cli/cmd/tui/context/theme/catppuccin-macchiato.json +233 -0
- package/src/cli/cmd/tui/context/theme/catppuccin.json +112 -0
- package/src/cli/cmd/tui/context/theme/cobalt2.json +228 -0
- package/src/cli/cmd/tui/context/theme/cursor.json +249 -0
- package/src/cli/cmd/tui/context/theme/dracula.json +219 -0
- package/src/cli/cmd/tui/context/theme/everforest.json +241 -0
- package/src/cli/cmd/tui/context/theme/flexoki.json +237 -0
- package/src/cli/cmd/tui/context/theme/github.json +233 -0
- package/src/cli/cmd/tui/context/theme/gruvbox.json +95 -0
- package/src/cli/cmd/tui/context/theme/kanagawa.json +77 -0
- package/src/cli/cmd/tui/context/theme/lucent-orng.json +227 -0
- package/src/cli/cmd/tui/context/theme/material.json +235 -0
- package/src/cli/cmd/tui/context/theme/matrix.json +77 -0
- package/src/cli/cmd/tui/context/theme/mercury.json +252 -0
- package/src/cli/cmd/tui/context/theme/monokai.json +221 -0
- package/src/cli/cmd/tui/context/theme/nightowl.json +221 -0
- package/src/cli/cmd/tui/context/theme/nord.json +223 -0
- package/src/cli/cmd/tui/context/theme/one-dark.json +84 -0
- package/src/cli/cmd/tui/context/theme/orng.json +245 -0
- package/src/cli/cmd/tui/context/theme/palenight.json +222 -0
- package/src/cli/cmd/tui/context/theme/rird.json +245 -0
- package/src/cli/cmd/tui/context/theme/rosepine.json +234 -0
- package/src/cli/cmd/tui/context/theme/solarized.json +223 -0
- package/src/cli/cmd/tui/context/theme/synthwave84.json +226 -0
- package/src/cli/cmd/tui/context/theme/tokyonight.json +243 -0
- package/src/cli/cmd/tui/context/theme/vercel.json +245 -0
- package/src/cli/cmd/tui/context/theme/vesper.json +218 -0
- package/src/cli/cmd/tui/context/theme/zenburn.json +223 -0
- package/src/cli/cmd/tui/context/theme.tsx +1109 -0
- package/src/cli/cmd/tui/event.ts +40 -0
- package/src/cli/cmd/tui/routes/home.tsx +138 -0
- package/src/cli/cmd/tui/routes/session/dialog-fork-from-timeline.tsx +64 -0
- package/src/cli/cmd/tui/routes/session/dialog-message.tsx +109 -0
- package/src/cli/cmd/tui/routes/session/dialog-subagent.tsx +26 -0
- package/src/cli/cmd/tui/routes/session/dialog-timeline.tsx +47 -0
- package/src/cli/cmd/tui/routes/session/footer.tsx +88 -0
- package/src/cli/cmd/tui/routes/session/header.tsx +125 -0
- package/src/cli/cmd/tui/routes/session/index.tsx +1864 -0
- package/src/cli/cmd/tui/routes/session/sidebar.tsx +318 -0
- package/src/cli/cmd/tui/spawn.ts +60 -0
- package/src/cli/cmd/tui/thread.ts +142 -0
- package/src/cli/cmd/tui/ui/dialog-alert.tsx +57 -0
- package/src/cli/cmd/tui/ui/dialog-confirm.tsx +83 -0
- package/src/cli/cmd/tui/ui/dialog-help.tsx +38 -0
- package/src/cli/cmd/tui/ui/dialog-prompt.tsx +77 -0
- package/src/cli/cmd/tui/ui/dialog-select.tsx +332 -0
- package/src/cli/cmd/tui/ui/dialog.tsx +170 -0
- package/src/cli/cmd/tui/ui/spinner.ts +368 -0
- package/src/cli/cmd/tui/ui/toast.tsx +100 -0
- package/src/cli/cmd/tui/util/clipboard.ts +127 -0
- package/src/cli/cmd/tui/util/editor.ts +32 -0
- package/src/cli/cmd/tui/util/terminal.ts +114 -0
- package/src/cli/cmd/tui/worker.ts +63 -0
- package/src/cli/cmd/uninstall.ts +344 -0
- package/src/cli/cmd/upgrade.ts +100 -0
- package/src/cli/cmd/web.ts +84 -0
- package/src/cli/error.ts +56 -0
- package/src/cli/ui.ts +84 -0
- package/src/cli/upgrade.ts +25 -0
- package/src/command/index.ts +80 -0
- package/src/command/template/initialize.txt +10 -0
- package/src/command/template/review.txt +97 -0
- package/src/config/config.ts +995 -0
- package/src/config/markdown.ts +41 -0
- package/src/env/index.ts +26 -0
- package/src/file/ignore.ts +83 -0
- package/src/file/index.ts +328 -0
- package/src/file/ripgrep.ts +393 -0
- package/src/file/time.ts +64 -0
- package/src/file/watcher.ts +103 -0
- package/src/flag/flag.ts +46 -0
- package/src/format/formatter.ts +315 -0
- package/src/format/index.ts +137 -0
- package/src/global/index.ts +52 -0
- package/src/id/id.ts +73 -0
- package/src/ide/index.ts +76 -0
- package/src/index.ts +240 -0
- package/src/installation/index.ts +239 -0
- package/src/lsp/client.ts +229 -0
- package/src/lsp/index.ts +485 -0
- package/src/lsp/language.ts +116 -0
- package/src/lsp/server.ts +1895 -0
- package/src/mcp/auth.ts +135 -0
- package/src/mcp/index.ts +690 -0
- package/src/mcp/oauth-callback.ts +200 -0
- package/src/mcp/oauth-provider.ts +154 -0
- package/src/patch/index.ts +622 -0
- package/src/permission/index.ts +199 -0
- package/src/plugin/index.ts +91 -0
- package/src/project/bootstrap.ts +31 -0
- package/src/project/instance.ts +78 -0
- package/src/project/project.ts +221 -0
- package/src/project/state.ts +65 -0
- package/src/project/vcs.ts +76 -0
- package/src/provider/auth.ts +143 -0
- package/src/provider/models-macro.ts +11 -0
- package/src/provider/models.ts +106 -0
- package/src/provider/provider.ts +1071 -0
- package/src/provider/sdk/openai-compatible/src/README.md +5 -0
- package/src/provider/sdk/openai-compatible/src/index.ts +2 -0
- package/src/provider/sdk/openai-compatible/src/openai-compatible-provider.ts +100 -0
- package/src/provider/sdk/openai-compatible/src/responses/convert-to-openai-responses-input.ts +303 -0
- package/src/provider/sdk/openai-compatible/src/responses/map-openai-responses-finish-reason.ts +22 -0
- package/src/provider/sdk/openai-compatible/src/responses/openai-config.ts +18 -0
- package/src/provider/sdk/openai-compatible/src/responses/openai-error.ts +22 -0
- package/src/provider/sdk/openai-compatible/src/responses/openai-responses-api-types.ts +207 -0
- package/src/provider/sdk/openai-compatible/src/responses/openai-responses-language-model.ts +1713 -0
- package/src/provider/sdk/openai-compatible/src/responses/openai-responses-prepare-tools.ts +177 -0
- package/src/provider/sdk/openai-compatible/src/responses/openai-responses-settings.ts +1 -0
- package/src/provider/sdk/openai-compatible/src/responses/tool/code-interpreter.ts +88 -0
- package/src/provider/sdk/openai-compatible/src/responses/tool/file-search.ts +128 -0
- package/src/provider/sdk/openai-compatible/src/responses/tool/image-generation.ts +115 -0
- package/src/provider/sdk/openai-compatible/src/responses/tool/local-shell.ts +65 -0
- package/src/provider/sdk/openai-compatible/src/responses/tool/web-search-preview.ts +104 -0
- package/src/provider/sdk/openai-compatible/src/responses/tool/web-search.ts +103 -0
- package/src/provider/transform.ts +455 -0
- package/src/pty/index.ts +231 -0
- package/src/security/guardrails.test.ts +341 -0
- package/src/security/guardrails.ts +558 -0
- package/src/security/index.ts +19 -0
- package/src/server/error.ts +36 -0
- package/src/server/project.ts +79 -0
- package/src/server/server.ts +2642 -0
- package/src/server/tui.ts +71 -0
- package/src/session/compaction.ts +223 -0
- package/src/session/index.ts +461 -0
- package/src/session/llm.ts +201 -0
- package/src/session/message-v2.ts +690 -0
- package/src/session/message.ts +189 -0
- package/src/session/processor.ts +409 -0
- package/src/session/prompt/act-switch.txt +5 -0
- package/src/session/prompt/anthropic-20250930.txt +166 -0
- package/src/session/prompt/anthropic.txt +85 -0
- package/src/session/prompt/anthropic_spoof.txt +1 -0
- package/src/session/prompt/beast.txt +103 -0
- package/src/session/prompt/codex.txt +304 -0
- package/src/session/prompt/copilot-gpt-5.txt +138 -0
- package/src/session/prompt/gemini.txt +85 -0
- package/src/session/prompt/max-steps.txt +16 -0
- package/src/session/prompt/plan-reminder-anthropic.txt +35 -0
- package/src/session/prompt/plan.txt +24 -0
- package/src/session/prompt/polaris.txt +84 -0
- package/src/session/prompt/qwen.txt +106 -0
- package/src/session/prompt.ts +1509 -0
- package/src/session/retry.ts +86 -0
- package/src/session/revert.ts +108 -0
- package/src/session/sensitive-filter.test.ts +327 -0
- package/src/session/sensitive-filter.ts +466 -0
- package/src/session/status.ts +76 -0
- package/src/session/summary.ts +194 -0
- package/src/session/system.ts +120 -0
- package/src/session/todo.ts +37 -0
- package/src/share/share-next.ts +194 -0
- package/src/share/share.ts +87 -0
- package/src/shell/shell.ts +67 -0
- package/src/skill/index.ts +1 -0
- package/src/skill/skill.ts +83 -0
- package/src/snapshot/index.ts +197 -0
- package/src/storage/storage.ts +226 -0
- package/src/tests/agent.test.ts +308 -0
- package/src/tests/build-guards.test.ts +267 -0
- package/src/tests/config.test.ts +664 -0
- package/src/tests/tool-registry.test.ts +589 -0
- package/src/tool/bash.ts +317 -0
- package/src/tool/bash.txt +158 -0
- package/src/tool/batch.ts +175 -0
- package/src/tool/batch.txt +24 -0
- package/src/tool/codesearch.ts +168 -0
- package/src/tool/codesearch.txt +12 -0
- package/src/tool/edit.ts +675 -0
- package/src/tool/edit.txt +10 -0
- package/src/tool/glob.ts +65 -0
- package/src/tool/glob.txt +6 -0
- package/src/tool/grep.ts +121 -0
- package/src/tool/grep.txt +8 -0
- package/src/tool/invalid.ts +17 -0
- package/src/tool/ls.ts +110 -0
- package/src/tool/ls.txt +1 -0
- package/src/tool/lsp-diagnostics.ts +26 -0
- package/src/tool/lsp-diagnostics.txt +1 -0
- package/src/tool/lsp-hover.ts +31 -0
- package/src/tool/lsp-hover.txt +1 -0
- package/src/tool/lsp.ts +87 -0
- package/src/tool/lsp.txt +19 -0
- package/src/tool/multiedit.ts +46 -0
- package/src/tool/multiedit.txt +41 -0
- package/src/tool/patch.ts +233 -0
- package/src/tool/patch.txt +1 -0
- package/src/tool/read.ts +219 -0
- package/src/tool/read.txt +12 -0
- package/src/tool/registry.ts +162 -0
- package/src/tool/skill.ts +100 -0
- package/src/tool/task.ts +136 -0
- package/src/tool/task.txt +51 -0
- package/src/tool/todo.ts +39 -0
- package/src/tool/todoread.txt +14 -0
- package/src/tool/todowrite.txt +167 -0
- package/src/tool/tool.ts +71 -0
- package/src/tool/webfetch.ts +198 -0
- package/src/tool/webfetch.txt +13 -0
- package/src/tool/websearch.ts +180 -0
- package/src/tool/websearch.txt +11 -0
- package/src/tool/write.ts +110 -0
- package/src/tool/write.txt +8 -0
- package/src/util/archive.ts +16 -0
- package/src/util/color.ts +19 -0
- package/src/util/context.ts +25 -0
- package/src/util/defer.ts +12 -0
- package/src/util/eventloop.ts +20 -0
- package/src/util/filesystem.ts +83 -0
- package/src/util/fn.ts +11 -0
- package/src/util/iife.ts +3 -0
- package/src/util/keybind.ts +102 -0
- package/src/util/lazy.ts +11 -0
- package/src/util/license.ts +325 -0
- package/src/util/locale.ts +81 -0
- package/src/util/lock.ts +98 -0
- package/src/util/log.ts +180 -0
- package/src/util/queue.ts +32 -0
- package/src/util/rpc.ts +42 -0
- package/src/util/scrap.ts +10 -0
- package/src/util/signal.ts +12 -0
- package/src/util/timeout.ts +14 -0
- package/src/util/token.ts +7 -0
- package/src/util/wildcard.ts +54 -0
- package/sst-env.d.ts +9 -0
- package/test/agent/agent.test.ts +146 -0
- package/test/bun.test.ts +53 -0
- package/test/cli/github-remote.test.ts +80 -0
- package/test/config/agent-color.test.ts +66 -0
- package/test/config/config.test.ts +535 -0
- package/test/config/markdown.test.ts +89 -0
- package/test/file/ignore.test.ts +10 -0
- package/test/fixture/fixture.ts +36 -0
- package/test/fixture/lsp/fake-lsp-server.js +77 -0
- package/test/ide/ide.test.ts +82 -0
- package/test/keybind.test.ts +421 -0
- package/test/lsp/client.test.ts +95 -0
- package/test/mcp/headers.test.ts +153 -0
- package/test/patch/patch.test.ts +348 -0
- package/test/preload.ts +57 -0
- package/test/project/project.test.ts +72 -0
- package/test/provider/provider.test.ts +1809 -0
- package/test/provider/transform.test.ts +411 -0
- package/test/session/retry.test.ts +111 -0
- package/test/session/session.test.ts +71 -0
- package/test/skill/skill.test.ts +131 -0
- package/test/snapshot/snapshot.test.ts +939 -0
- package/test/tool/__snapshots__/tool.test.ts.snap +9 -0
- package/test/tool/bash.test.ts +434 -0
- package/test/tool/grep.test.ts +108 -0
- package/test/tool/patch.test.ts +259 -0
- package/test/tool/read.test.ts +42 -0
- package/test/util/iife.test.ts +36 -0
- package/test/util/lazy.test.ts +50 -0
- package/test/util/timeout.test.ts +21 -0
- package/test/util/wildcard.test.ts +55 -0
- package/tsconfig.json +16 -0
|
@@ -0,0 +1,1713 @@
|
|
|
1
|
+
import {
|
|
2
|
+
APICallError,
|
|
3
|
+
type LanguageModelV2,
|
|
4
|
+
type LanguageModelV2CallWarning,
|
|
5
|
+
type LanguageModelV2Content,
|
|
6
|
+
type LanguageModelV2FinishReason,
|
|
7
|
+
type LanguageModelV2ProviderDefinedTool,
|
|
8
|
+
type LanguageModelV2StreamPart,
|
|
9
|
+
type LanguageModelV2Usage,
|
|
10
|
+
type SharedV2ProviderMetadata,
|
|
11
|
+
} from "@ai-sdk/provider"
|
|
12
|
+
import {
|
|
13
|
+
combineHeaders,
|
|
14
|
+
createEventSourceResponseHandler,
|
|
15
|
+
createJsonResponseHandler,
|
|
16
|
+
generateId,
|
|
17
|
+
parseProviderOptions,
|
|
18
|
+
type ParseResult,
|
|
19
|
+
postJsonToApi,
|
|
20
|
+
} from "@ai-sdk/provider-utils"
|
|
21
|
+
import { z } from "zod/v4"
|
|
22
|
+
import type { OpenAIConfig } from "./openai-config"
|
|
23
|
+
import { openaiFailedResponseHandler } from "./openai-error"
|
|
24
|
+
import { codeInterpreterInputSchema, codeInterpreterOutputSchema } from "./tool/code-interpreter"
|
|
25
|
+
import { fileSearchOutputSchema } from "./tool/file-search"
|
|
26
|
+
import { imageGenerationOutputSchema } from "./tool/image-generation"
|
|
27
|
+
import { convertToOpenAIResponsesInput } from "./convert-to-openai-responses-input"
|
|
28
|
+
import { mapOpenAIResponseFinishReason } from "./map-openai-responses-finish-reason"
|
|
29
|
+
import type { OpenAIResponsesIncludeOptions, OpenAIResponsesIncludeValue } from "./openai-responses-api-types"
|
|
30
|
+
import { prepareResponsesTools } from "./openai-responses-prepare-tools"
|
|
31
|
+
import type { OpenAIResponsesModelId } from "./openai-responses-settings"
|
|
32
|
+
import { localShellInputSchema } from "./tool/local-shell"
|
|
33
|
+
|
|
34
|
+
const webSearchCallItem = z.object({
|
|
35
|
+
type: z.literal("web_search_call"),
|
|
36
|
+
id: z.string(),
|
|
37
|
+
status: z.string(),
|
|
38
|
+
action: z
|
|
39
|
+
.discriminatedUnion("type", [
|
|
40
|
+
z.object({
|
|
41
|
+
type: z.literal("search"),
|
|
42
|
+
query: z.string().nullish(),
|
|
43
|
+
}),
|
|
44
|
+
z.object({
|
|
45
|
+
type: z.literal("open_page"),
|
|
46
|
+
url: z.string(),
|
|
47
|
+
}),
|
|
48
|
+
z.object({
|
|
49
|
+
type: z.literal("find"),
|
|
50
|
+
url: z.string(),
|
|
51
|
+
pattern: z.string(),
|
|
52
|
+
}),
|
|
53
|
+
])
|
|
54
|
+
.nullish(),
|
|
55
|
+
})
|
|
56
|
+
|
|
57
|
+
const fileSearchCallItem = z.object({
|
|
58
|
+
type: z.literal("file_search_call"),
|
|
59
|
+
id: z.string(),
|
|
60
|
+
queries: z.array(z.string()),
|
|
61
|
+
results: z
|
|
62
|
+
.array(
|
|
63
|
+
z.object({
|
|
64
|
+
attributes: z.record(z.string(), z.unknown()),
|
|
65
|
+
file_id: z.string(),
|
|
66
|
+
filename: z.string(),
|
|
67
|
+
score: z.number(),
|
|
68
|
+
text: z.string(),
|
|
69
|
+
}),
|
|
70
|
+
)
|
|
71
|
+
.nullish(),
|
|
72
|
+
})
|
|
73
|
+
|
|
74
|
+
const codeInterpreterCallItem = z.object({
|
|
75
|
+
type: z.literal("code_interpreter_call"),
|
|
76
|
+
id: z.string(),
|
|
77
|
+
code: z.string().nullable(),
|
|
78
|
+
container_id: z.string(),
|
|
79
|
+
outputs: z
|
|
80
|
+
.array(
|
|
81
|
+
z.discriminatedUnion("type", [
|
|
82
|
+
z.object({ type: z.literal("logs"), logs: z.string() }),
|
|
83
|
+
z.object({ type: z.literal("image"), url: z.string() }),
|
|
84
|
+
]),
|
|
85
|
+
)
|
|
86
|
+
.nullable(),
|
|
87
|
+
})
|
|
88
|
+
|
|
89
|
+
const localShellCallItem = z.object({
|
|
90
|
+
type: z.literal("local_shell_call"),
|
|
91
|
+
id: z.string(),
|
|
92
|
+
call_id: z.string(),
|
|
93
|
+
action: z.object({
|
|
94
|
+
type: z.literal("exec"),
|
|
95
|
+
command: z.array(z.string()),
|
|
96
|
+
timeout_ms: z.number().optional(),
|
|
97
|
+
user: z.string().optional(),
|
|
98
|
+
working_directory: z.string().optional(),
|
|
99
|
+
env: z.record(z.string(), z.string()).optional(),
|
|
100
|
+
}),
|
|
101
|
+
})
|
|
102
|
+
|
|
103
|
+
const imageGenerationCallItem = z.object({
|
|
104
|
+
type: z.literal("image_generation_call"),
|
|
105
|
+
id: z.string(),
|
|
106
|
+
result: z.string(),
|
|
107
|
+
})
|
|
108
|
+
|
|
109
|
+
/**
|
|
110
|
+
* `top_logprobs` request body argument can be set to an integer between
|
|
111
|
+
* 0 and 20 specifying the number of most likely tokens to return at each
|
|
112
|
+
* token position, each with an associated log probability.
|
|
113
|
+
*
|
|
114
|
+
* @see https://platform.openai.com/docs/api-reference/responses/create#responses_create-top_logprobs
|
|
115
|
+
*/
|
|
116
|
+
const TOP_LOGPROBS_MAX = 20
|
|
117
|
+
|
|
118
|
+
const LOGPROBS_SCHEMA = z.array(
|
|
119
|
+
z.object({
|
|
120
|
+
token: z.string(),
|
|
121
|
+
logprob: z.number(),
|
|
122
|
+
top_logprobs: z.array(
|
|
123
|
+
z.object({
|
|
124
|
+
token: z.string(),
|
|
125
|
+
logprob: z.number(),
|
|
126
|
+
}),
|
|
127
|
+
),
|
|
128
|
+
}),
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
export class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|
132
|
+
readonly specificationVersion = "v2"
|
|
133
|
+
|
|
134
|
+
readonly modelId: OpenAIResponsesModelId
|
|
135
|
+
|
|
136
|
+
private readonly config: OpenAIConfig
|
|
137
|
+
|
|
138
|
+
constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig) {
|
|
139
|
+
this.modelId = modelId
|
|
140
|
+
this.config = config
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
readonly supportedUrls: Record<string, RegExp[]> = {
|
|
144
|
+
"image/*": [/^https?:\/\/.*$/],
|
|
145
|
+
"application/pdf": [/^https?:\/\/.*$/],
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
get provider(): string {
|
|
149
|
+
return this.config.provider
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
private async getArgs({
|
|
153
|
+
maxOutputTokens,
|
|
154
|
+
temperature,
|
|
155
|
+
stopSequences,
|
|
156
|
+
topP,
|
|
157
|
+
topK,
|
|
158
|
+
presencePenalty,
|
|
159
|
+
frequencyPenalty,
|
|
160
|
+
seed,
|
|
161
|
+
prompt,
|
|
162
|
+
providerOptions,
|
|
163
|
+
tools,
|
|
164
|
+
toolChoice,
|
|
165
|
+
responseFormat,
|
|
166
|
+
}: Parameters<LanguageModelV2["doGenerate"]>[0]) {
|
|
167
|
+
const warnings: LanguageModelV2CallWarning[] = []
|
|
168
|
+
const modelConfig = getResponsesModelConfig(this.modelId)
|
|
169
|
+
|
|
170
|
+
if (topK != null) {
|
|
171
|
+
warnings.push({ type: "unsupported-setting", setting: "topK" })
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
if (seed != null) {
|
|
175
|
+
warnings.push({ type: "unsupported-setting", setting: "seed" })
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
if (presencePenalty != null) {
|
|
179
|
+
warnings.push({
|
|
180
|
+
type: "unsupported-setting",
|
|
181
|
+
setting: "presencePenalty",
|
|
182
|
+
})
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
if (frequencyPenalty != null) {
|
|
186
|
+
warnings.push({
|
|
187
|
+
type: "unsupported-setting",
|
|
188
|
+
setting: "frequencyPenalty",
|
|
189
|
+
})
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
if (stopSequences != null) {
|
|
193
|
+
warnings.push({ type: "unsupported-setting", setting: "stopSequences" })
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
const openaiOptions = await parseProviderOptions({
|
|
197
|
+
provider: "openai",
|
|
198
|
+
providerOptions,
|
|
199
|
+
schema: openaiResponsesProviderOptionsSchema,
|
|
200
|
+
})
|
|
201
|
+
|
|
202
|
+
const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
|
|
203
|
+
prompt,
|
|
204
|
+
systemMessageMode: modelConfig.systemMessageMode,
|
|
205
|
+
fileIdPrefixes: this.config.fileIdPrefixes,
|
|
206
|
+
store: openaiOptions?.store ?? true,
|
|
207
|
+
hasLocalShellTool: hasOpenAITool("openai.local_shell"),
|
|
208
|
+
})
|
|
209
|
+
|
|
210
|
+
warnings.push(...inputWarnings)
|
|
211
|
+
|
|
212
|
+
const strictJsonSchema = openaiOptions?.strictJsonSchema ?? false
|
|
213
|
+
|
|
214
|
+
let include: OpenAIResponsesIncludeOptions = openaiOptions?.include
|
|
215
|
+
|
|
216
|
+
function addInclude(key: OpenAIResponsesIncludeValue) {
|
|
217
|
+
include = include != null ? [...include, key] : [key]
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
function hasOpenAITool(id: string) {
|
|
221
|
+
return tools?.find((tool) => tool.type === "provider-defined" && tool.id === id) != null
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
// when logprobs are requested, automatically include them:
|
|
225
|
+
const topLogprobs =
|
|
226
|
+
typeof openaiOptions?.logprobs === "number"
|
|
227
|
+
? openaiOptions?.logprobs
|
|
228
|
+
: openaiOptions?.logprobs === true
|
|
229
|
+
? TOP_LOGPROBS_MAX
|
|
230
|
+
: undefined
|
|
231
|
+
|
|
232
|
+
if (topLogprobs) {
|
|
233
|
+
addInclude("message.output_text.logprobs")
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
// when a web search tool is present, automatically include the sources:
|
|
237
|
+
const webSearchToolName = (
|
|
238
|
+
tools?.find(
|
|
239
|
+
(tool) =>
|
|
240
|
+
tool.type === "provider-defined" &&
|
|
241
|
+
(tool.id === "openai.web_search" || tool.id === "openai.web_search_preview"),
|
|
242
|
+
) as LanguageModelV2ProviderDefinedTool | undefined
|
|
243
|
+
)?.name
|
|
244
|
+
|
|
245
|
+
if (webSearchToolName) {
|
|
246
|
+
addInclude("web_search_call.action.sources")
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
// when a code interpreter tool is present, automatically include the outputs:
|
|
250
|
+
if (hasOpenAITool("openai.code_interpreter")) {
|
|
251
|
+
addInclude("code_interpreter_call.outputs")
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
const baseArgs = {
|
|
255
|
+
model: this.modelId,
|
|
256
|
+
input,
|
|
257
|
+
temperature,
|
|
258
|
+
top_p: topP,
|
|
259
|
+
max_output_tokens: maxOutputTokens,
|
|
260
|
+
|
|
261
|
+
...((responseFormat?.type === "json" || openaiOptions?.textVerbosity) && {
|
|
262
|
+
text: {
|
|
263
|
+
...(responseFormat?.type === "json" && {
|
|
264
|
+
format:
|
|
265
|
+
responseFormat.schema != null
|
|
266
|
+
? {
|
|
267
|
+
type: "json_schema",
|
|
268
|
+
strict: strictJsonSchema,
|
|
269
|
+
name: responseFormat.name ?? "response",
|
|
270
|
+
description: responseFormat.description,
|
|
271
|
+
schema: responseFormat.schema,
|
|
272
|
+
}
|
|
273
|
+
: { type: "json_object" },
|
|
274
|
+
}),
|
|
275
|
+
...(openaiOptions?.textVerbosity && {
|
|
276
|
+
verbosity: openaiOptions.textVerbosity,
|
|
277
|
+
}),
|
|
278
|
+
},
|
|
279
|
+
}),
|
|
280
|
+
|
|
281
|
+
// provider options:
|
|
282
|
+
max_tool_calls: openaiOptions?.maxToolCalls,
|
|
283
|
+
metadata: openaiOptions?.metadata,
|
|
284
|
+
parallel_tool_calls: openaiOptions?.parallelToolCalls,
|
|
285
|
+
previous_response_id: openaiOptions?.previousResponseId,
|
|
286
|
+
store: openaiOptions?.store,
|
|
287
|
+
user: openaiOptions?.user,
|
|
288
|
+
instructions: openaiOptions?.instructions,
|
|
289
|
+
service_tier: openaiOptions?.serviceTier,
|
|
290
|
+
include,
|
|
291
|
+
prompt_cache_key: openaiOptions?.promptCacheKey,
|
|
292
|
+
safety_identifier: openaiOptions?.safetyIdentifier,
|
|
293
|
+
top_logprobs: topLogprobs,
|
|
294
|
+
|
|
295
|
+
// model-specific settings:
|
|
296
|
+
...(modelConfig.isReasoningModel &&
|
|
297
|
+
(openaiOptions?.reasoningEffort != null || openaiOptions?.reasoningSummary != null) && {
|
|
298
|
+
reasoning: {
|
|
299
|
+
...(openaiOptions?.reasoningEffort != null && {
|
|
300
|
+
effort: openaiOptions.reasoningEffort,
|
|
301
|
+
}),
|
|
302
|
+
...(openaiOptions?.reasoningSummary != null && {
|
|
303
|
+
summary: openaiOptions.reasoningSummary,
|
|
304
|
+
}),
|
|
305
|
+
},
|
|
306
|
+
}),
|
|
307
|
+
...(modelConfig.requiredAutoTruncation && {
|
|
308
|
+
truncation: "auto",
|
|
309
|
+
}),
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
if (modelConfig.isReasoningModel) {
|
|
313
|
+
// remove unsupported settings for reasoning models
|
|
314
|
+
// see https://platform.openai.com/docs/guides/reasoning#limitations
|
|
315
|
+
if (baseArgs.temperature != null) {
|
|
316
|
+
baseArgs.temperature = undefined
|
|
317
|
+
warnings.push({
|
|
318
|
+
type: "unsupported-setting",
|
|
319
|
+
setting: "temperature",
|
|
320
|
+
details: "temperature is not supported for reasoning models",
|
|
321
|
+
})
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
if (baseArgs.top_p != null) {
|
|
325
|
+
baseArgs.top_p = undefined
|
|
326
|
+
warnings.push({
|
|
327
|
+
type: "unsupported-setting",
|
|
328
|
+
setting: "topP",
|
|
329
|
+
details: "topP is not supported for reasoning models",
|
|
330
|
+
})
|
|
331
|
+
}
|
|
332
|
+
} else {
|
|
333
|
+
if (openaiOptions?.reasoningEffort != null) {
|
|
334
|
+
warnings.push({
|
|
335
|
+
type: "unsupported-setting",
|
|
336
|
+
setting: "reasoningEffort",
|
|
337
|
+
details: "reasoningEffort is not supported for non-reasoning models",
|
|
338
|
+
})
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
if (openaiOptions?.reasoningSummary != null) {
|
|
342
|
+
warnings.push({
|
|
343
|
+
type: "unsupported-setting",
|
|
344
|
+
setting: "reasoningSummary",
|
|
345
|
+
details: "reasoningSummary is not supported for non-reasoning models",
|
|
346
|
+
})
|
|
347
|
+
}
|
|
348
|
+
}
|
|
349
|
+
|
|
350
|
+
// Validate flex processing support
|
|
351
|
+
if (openaiOptions?.serviceTier === "flex" && !modelConfig.supportsFlexProcessing) {
|
|
352
|
+
warnings.push({
|
|
353
|
+
type: "unsupported-setting",
|
|
354
|
+
setting: "serviceTier",
|
|
355
|
+
details: "flex processing is only available for o3, o4-mini, and gpt-5 models",
|
|
356
|
+
})
|
|
357
|
+
// Remove from args if not supported
|
|
358
|
+
delete (baseArgs as any).service_tier
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
// Validate priority processing support
|
|
362
|
+
if (openaiOptions?.serviceTier === "priority" && !modelConfig.supportsPriorityProcessing) {
|
|
363
|
+
warnings.push({
|
|
364
|
+
type: "unsupported-setting",
|
|
365
|
+
setting: "serviceTier",
|
|
366
|
+
details:
|
|
367
|
+
"priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported",
|
|
368
|
+
})
|
|
369
|
+
// Remove from args if not supported
|
|
370
|
+
delete (baseArgs as any).service_tier
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
const {
|
|
374
|
+
tools: openaiTools,
|
|
375
|
+
toolChoice: openaiToolChoice,
|
|
376
|
+
toolWarnings,
|
|
377
|
+
} = prepareResponsesTools({
|
|
378
|
+
tools,
|
|
379
|
+
toolChoice,
|
|
380
|
+
strictJsonSchema,
|
|
381
|
+
})
|
|
382
|
+
|
|
383
|
+
return {
|
|
384
|
+
webSearchToolName,
|
|
385
|
+
args: {
|
|
386
|
+
...baseArgs,
|
|
387
|
+
tools: openaiTools,
|
|
388
|
+
tool_choice: openaiToolChoice,
|
|
389
|
+
},
|
|
390
|
+
warnings: [...warnings, ...toolWarnings],
|
|
391
|
+
}
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
async doGenerate(
|
|
395
|
+
options: Parameters<LanguageModelV2["doGenerate"]>[0],
|
|
396
|
+
): Promise<Awaited<ReturnType<LanguageModelV2["doGenerate"]>>> {
|
|
397
|
+
const { args: body, warnings, webSearchToolName } = await this.getArgs(options)
|
|
398
|
+
const url = this.config.url({
|
|
399
|
+
path: "/responses",
|
|
400
|
+
modelId: this.modelId,
|
|
401
|
+
})
|
|
402
|
+
|
|
403
|
+
const {
|
|
404
|
+
responseHeaders,
|
|
405
|
+
value: response,
|
|
406
|
+
rawValue: rawResponse,
|
|
407
|
+
} = await postJsonToApi({
|
|
408
|
+
url,
|
|
409
|
+
headers: combineHeaders(this.config.headers(), options.headers),
|
|
410
|
+
body,
|
|
411
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
412
|
+
successfulResponseHandler: createJsonResponseHandler(
|
|
413
|
+
z.object({
|
|
414
|
+
id: z.string(),
|
|
415
|
+
created_at: z.number(),
|
|
416
|
+
error: z
|
|
417
|
+
.object({
|
|
418
|
+
code: z.string(),
|
|
419
|
+
message: z.string(),
|
|
420
|
+
})
|
|
421
|
+
.nullish(),
|
|
422
|
+
model: z.string(),
|
|
423
|
+
output: z.array(
|
|
424
|
+
z.discriminatedUnion("type", [
|
|
425
|
+
z.object({
|
|
426
|
+
type: z.literal("message"),
|
|
427
|
+
role: z.literal("assistant"),
|
|
428
|
+
id: z.string(),
|
|
429
|
+
content: z.array(
|
|
430
|
+
z.object({
|
|
431
|
+
type: z.literal("output_text"),
|
|
432
|
+
text: z.string(),
|
|
433
|
+
logprobs: LOGPROBS_SCHEMA.nullish(),
|
|
434
|
+
annotations: z.array(
|
|
435
|
+
z.discriminatedUnion("type", [
|
|
436
|
+
z.object({
|
|
437
|
+
type: z.literal("url_citation"),
|
|
438
|
+
start_index: z.number(),
|
|
439
|
+
end_index: z.number(),
|
|
440
|
+
url: z.string(),
|
|
441
|
+
title: z.string(),
|
|
442
|
+
}),
|
|
443
|
+
z.object({
|
|
444
|
+
type: z.literal("file_citation"),
|
|
445
|
+
file_id: z.string(),
|
|
446
|
+
filename: z.string().nullish(),
|
|
447
|
+
index: z.number().nullish(),
|
|
448
|
+
start_index: z.number().nullish(),
|
|
449
|
+
end_index: z.number().nullish(),
|
|
450
|
+
quote: z.string().nullish(),
|
|
451
|
+
}),
|
|
452
|
+
z.object({
|
|
453
|
+
type: z.literal("container_file_citation"),
|
|
454
|
+
}),
|
|
455
|
+
]),
|
|
456
|
+
),
|
|
457
|
+
}),
|
|
458
|
+
),
|
|
459
|
+
}),
|
|
460
|
+
webSearchCallItem,
|
|
461
|
+
fileSearchCallItem,
|
|
462
|
+
codeInterpreterCallItem,
|
|
463
|
+
imageGenerationCallItem,
|
|
464
|
+
localShellCallItem,
|
|
465
|
+
z.object({
|
|
466
|
+
type: z.literal("function_call"),
|
|
467
|
+
call_id: z.string(),
|
|
468
|
+
name: z.string(),
|
|
469
|
+
arguments: z.string(),
|
|
470
|
+
id: z.string(),
|
|
471
|
+
}),
|
|
472
|
+
z.object({
|
|
473
|
+
type: z.literal("computer_call"),
|
|
474
|
+
id: z.string(),
|
|
475
|
+
status: z.string().optional(),
|
|
476
|
+
}),
|
|
477
|
+
z.object({
|
|
478
|
+
type: z.literal("reasoning"),
|
|
479
|
+
id: z.string(),
|
|
480
|
+
encrypted_content: z.string().nullish(),
|
|
481
|
+
summary: z.array(
|
|
482
|
+
z.object({
|
|
483
|
+
type: z.literal("summary_text"),
|
|
484
|
+
text: z.string(),
|
|
485
|
+
}),
|
|
486
|
+
),
|
|
487
|
+
}),
|
|
488
|
+
]),
|
|
489
|
+
),
|
|
490
|
+
service_tier: z.string().nullish(),
|
|
491
|
+
incomplete_details: z.object({ reason: z.string() }).nullish(),
|
|
492
|
+
usage: usageSchema,
|
|
493
|
+
}),
|
|
494
|
+
),
|
|
495
|
+
abortSignal: options.abortSignal,
|
|
496
|
+
fetch: this.config.fetch,
|
|
497
|
+
})
|
|
498
|
+
|
|
499
|
+
if (response.error) {
|
|
500
|
+
throw new APICallError({
|
|
501
|
+
message: response.error.message,
|
|
502
|
+
url,
|
|
503
|
+
requestBodyValues: body,
|
|
504
|
+
statusCode: 400,
|
|
505
|
+
responseHeaders,
|
|
506
|
+
responseBody: rawResponse as string,
|
|
507
|
+
isRetryable: false,
|
|
508
|
+
})
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
const content: Array<LanguageModelV2Content> = []
|
|
512
|
+
const logprobs: Array<z.infer<typeof LOGPROBS_SCHEMA>> = []
|
|
513
|
+
|
|
514
|
+
// flag that checks if there have been client-side tool calls (not executed by openai)
|
|
515
|
+
let hasFunctionCall = false
|
|
516
|
+
|
|
517
|
+
// map response content to content array
|
|
518
|
+
for (const part of response.output) {
|
|
519
|
+
switch (part.type) {
|
|
520
|
+
case "reasoning": {
|
|
521
|
+
// when there are no summary parts, we need to add an empty reasoning part:
|
|
522
|
+
if (part.summary.length === 0) {
|
|
523
|
+
part.summary.push({ type: "summary_text", text: "" })
|
|
524
|
+
}
|
|
525
|
+
|
|
526
|
+
for (const summary of part.summary) {
|
|
527
|
+
content.push({
|
|
528
|
+
type: "reasoning" as const,
|
|
529
|
+
text: summary.text,
|
|
530
|
+
providerMetadata: {
|
|
531
|
+
openai: {
|
|
532
|
+
itemId: part.id,
|
|
533
|
+
reasoningEncryptedContent: part.encrypted_content ?? null,
|
|
534
|
+
},
|
|
535
|
+
},
|
|
536
|
+
})
|
|
537
|
+
}
|
|
538
|
+
break
|
|
539
|
+
}
|
|
540
|
+
|
|
541
|
+
case "image_generation_call": {
|
|
542
|
+
content.push({
|
|
543
|
+
type: "tool-call",
|
|
544
|
+
toolCallId: part.id,
|
|
545
|
+
toolName: "image_generation",
|
|
546
|
+
input: "{}",
|
|
547
|
+
providerExecuted: true,
|
|
548
|
+
})
|
|
549
|
+
|
|
550
|
+
content.push({
|
|
551
|
+
type: "tool-result",
|
|
552
|
+
toolCallId: part.id,
|
|
553
|
+
toolName: "image_generation",
|
|
554
|
+
result: {
|
|
555
|
+
result: part.result,
|
|
556
|
+
} satisfies z.infer<typeof imageGenerationOutputSchema>,
|
|
557
|
+
providerExecuted: true,
|
|
558
|
+
})
|
|
559
|
+
|
|
560
|
+
break
|
|
561
|
+
}
|
|
562
|
+
|
|
563
|
+
case "local_shell_call": {
|
|
564
|
+
content.push({
|
|
565
|
+
type: "tool-call",
|
|
566
|
+
toolCallId: part.call_id,
|
|
567
|
+
toolName: "local_shell",
|
|
568
|
+
input: JSON.stringify({ action: part.action } satisfies z.infer<typeof localShellInputSchema>),
|
|
569
|
+
providerMetadata: {
|
|
570
|
+
openai: {
|
|
571
|
+
itemId: part.id,
|
|
572
|
+
},
|
|
573
|
+
},
|
|
574
|
+
})
|
|
575
|
+
|
|
576
|
+
break
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
case "message": {
|
|
580
|
+
for (const contentPart of part.content) {
|
|
581
|
+
if (options.providerOptions?.openai?.logprobs && contentPart.logprobs) {
|
|
582
|
+
logprobs.push(contentPart.logprobs)
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
content.push({
|
|
586
|
+
type: "text",
|
|
587
|
+
text: contentPart.text,
|
|
588
|
+
providerMetadata: {
|
|
589
|
+
openai: {
|
|
590
|
+
itemId: part.id,
|
|
591
|
+
},
|
|
592
|
+
},
|
|
593
|
+
})
|
|
594
|
+
|
|
595
|
+
for (const annotation of contentPart.annotations) {
|
|
596
|
+
if (annotation.type === "url_citation") {
|
|
597
|
+
content.push({
|
|
598
|
+
type: "source",
|
|
599
|
+
sourceType: "url",
|
|
600
|
+
id: this.config.generateId?.() ?? generateId(),
|
|
601
|
+
url: annotation.url,
|
|
602
|
+
title: annotation.title,
|
|
603
|
+
})
|
|
604
|
+
} else if (annotation.type === "file_citation") {
|
|
605
|
+
content.push({
|
|
606
|
+
type: "source",
|
|
607
|
+
sourceType: "document",
|
|
608
|
+
id: this.config.generateId?.() ?? generateId(),
|
|
609
|
+
mediaType: "text/plain",
|
|
610
|
+
title: annotation.quote ?? annotation.filename ?? "Document",
|
|
611
|
+
filename: annotation.filename ?? annotation.file_id,
|
|
612
|
+
})
|
|
613
|
+
}
|
|
614
|
+
}
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
break
|
|
618
|
+
}
|
|
619
|
+
|
|
620
|
+
case "function_call": {
|
|
621
|
+
hasFunctionCall = true
|
|
622
|
+
|
|
623
|
+
content.push({
|
|
624
|
+
type: "tool-call",
|
|
625
|
+
toolCallId: part.call_id,
|
|
626
|
+
toolName: part.name,
|
|
627
|
+
input: part.arguments,
|
|
628
|
+
providerMetadata: {
|
|
629
|
+
openai: {
|
|
630
|
+
itemId: part.id,
|
|
631
|
+
},
|
|
632
|
+
},
|
|
633
|
+
})
|
|
634
|
+
break
|
|
635
|
+
}
|
|
636
|
+
|
|
637
|
+
case "web_search_call": {
|
|
638
|
+
content.push({
|
|
639
|
+
type: "tool-call",
|
|
640
|
+
toolCallId: part.id,
|
|
641
|
+
toolName: webSearchToolName ?? "web_search",
|
|
642
|
+
input: JSON.stringify({ action: part.action }),
|
|
643
|
+
providerExecuted: true,
|
|
644
|
+
})
|
|
645
|
+
|
|
646
|
+
content.push({
|
|
647
|
+
type: "tool-result",
|
|
648
|
+
toolCallId: part.id,
|
|
649
|
+
toolName: webSearchToolName ?? "web_search",
|
|
650
|
+
result: { status: part.status },
|
|
651
|
+
providerExecuted: true,
|
|
652
|
+
})
|
|
653
|
+
|
|
654
|
+
break
|
|
655
|
+
}
|
|
656
|
+
|
|
657
|
+
case "computer_call": {
|
|
658
|
+
content.push({
|
|
659
|
+
type: "tool-call",
|
|
660
|
+
toolCallId: part.id,
|
|
661
|
+
toolName: "computer_use",
|
|
662
|
+
input: "",
|
|
663
|
+
providerExecuted: true,
|
|
664
|
+
})
|
|
665
|
+
|
|
666
|
+
content.push({
|
|
667
|
+
type: "tool-result",
|
|
668
|
+
toolCallId: part.id,
|
|
669
|
+
toolName: "computer_use",
|
|
670
|
+
result: {
|
|
671
|
+
type: "computer_use_tool_result",
|
|
672
|
+
status: part.status || "completed",
|
|
673
|
+
},
|
|
674
|
+
providerExecuted: true,
|
|
675
|
+
})
|
|
676
|
+
break
|
|
677
|
+
}
|
|
678
|
+
|
|
679
|
+
case "file_search_call": {
|
|
680
|
+
content.push({
|
|
681
|
+
type: "tool-call",
|
|
682
|
+
toolCallId: part.id,
|
|
683
|
+
toolName: "file_search",
|
|
684
|
+
input: "{}",
|
|
685
|
+
providerExecuted: true,
|
|
686
|
+
})
|
|
687
|
+
|
|
688
|
+
content.push({
|
|
689
|
+
type: "tool-result",
|
|
690
|
+
toolCallId: part.id,
|
|
691
|
+
toolName: "file_search",
|
|
692
|
+
result: {
|
|
693
|
+
queries: part.queries,
|
|
694
|
+
results:
|
|
695
|
+
part.results?.map((result) => ({
|
|
696
|
+
attributes: result.attributes,
|
|
697
|
+
fileId: result.file_id,
|
|
698
|
+
filename: result.filename,
|
|
699
|
+
score: result.score,
|
|
700
|
+
text: result.text,
|
|
701
|
+
})) ?? null,
|
|
702
|
+
} satisfies z.infer<typeof fileSearchOutputSchema>,
|
|
703
|
+
providerExecuted: true,
|
|
704
|
+
})
|
|
705
|
+
break
|
|
706
|
+
}
|
|
707
|
+
|
|
708
|
+
case "code_interpreter_call": {
|
|
709
|
+
content.push({
|
|
710
|
+
type: "tool-call",
|
|
711
|
+
toolCallId: part.id,
|
|
712
|
+
toolName: "code_interpreter",
|
|
713
|
+
input: JSON.stringify({
|
|
714
|
+
code: part.code,
|
|
715
|
+
containerId: part.container_id,
|
|
716
|
+
} satisfies z.infer<typeof codeInterpreterInputSchema>),
|
|
717
|
+
providerExecuted: true,
|
|
718
|
+
})
|
|
719
|
+
|
|
720
|
+
content.push({
|
|
721
|
+
type: "tool-result",
|
|
722
|
+
toolCallId: part.id,
|
|
723
|
+
toolName: "code_interpreter",
|
|
724
|
+
result: {
|
|
725
|
+
outputs: part.outputs,
|
|
726
|
+
} satisfies z.infer<typeof codeInterpreterOutputSchema>,
|
|
727
|
+
providerExecuted: true,
|
|
728
|
+
})
|
|
729
|
+
break
|
|
730
|
+
}
|
|
731
|
+
}
|
|
732
|
+
}
|
|
733
|
+
|
|
734
|
+
const providerMetadata: SharedV2ProviderMetadata = {
|
|
735
|
+
openai: { responseId: response.id },
|
|
736
|
+
}
|
|
737
|
+
|
|
738
|
+
if (logprobs.length > 0) {
|
|
739
|
+
providerMetadata.openai.logprobs = logprobs
|
|
740
|
+
}
|
|
741
|
+
|
|
742
|
+
if (typeof response.service_tier === "string") {
|
|
743
|
+
providerMetadata.openai.serviceTier = response.service_tier
|
|
744
|
+
}
|
|
745
|
+
|
|
746
|
+
return {
|
|
747
|
+
content,
|
|
748
|
+
finishReason: mapOpenAIResponseFinishReason({
|
|
749
|
+
finishReason: response.incomplete_details?.reason,
|
|
750
|
+
hasFunctionCall,
|
|
751
|
+
}),
|
|
752
|
+
usage: {
|
|
753
|
+
inputTokens: response.usage.input_tokens,
|
|
754
|
+
outputTokens: response.usage.output_tokens,
|
|
755
|
+
totalTokens: response.usage.input_tokens + response.usage.output_tokens,
|
|
756
|
+
reasoningTokens: response.usage.output_tokens_details?.reasoning_tokens ?? undefined,
|
|
757
|
+
cachedInputTokens: response.usage.input_tokens_details?.cached_tokens ?? undefined,
|
|
758
|
+
},
|
|
759
|
+
request: { body },
|
|
760
|
+
response: {
|
|
761
|
+
id: response.id,
|
|
762
|
+
timestamp: new Date(response.created_at * 1000),
|
|
763
|
+
modelId: response.model,
|
|
764
|
+
headers: responseHeaders,
|
|
765
|
+
body: rawResponse,
|
|
766
|
+
},
|
|
767
|
+
providerMetadata,
|
|
768
|
+
warnings,
|
|
769
|
+
}
|
|
770
|
+
}
|
|
771
|
+
|
|
772
|
+
async doStream(
|
|
773
|
+
options: Parameters<LanguageModelV2["doStream"]>[0],
|
|
774
|
+
): Promise<Awaited<ReturnType<LanguageModelV2["doStream"]>>> {
|
|
775
|
+
const { args: body, warnings, webSearchToolName } = await this.getArgs(options)
|
|
776
|
+
|
|
777
|
+
const { responseHeaders, value: response } = await postJsonToApi({
|
|
778
|
+
url: this.config.url({
|
|
779
|
+
path: "/responses",
|
|
780
|
+
modelId: this.modelId,
|
|
781
|
+
}),
|
|
782
|
+
headers: combineHeaders(this.config.headers(), options.headers),
|
|
783
|
+
body: {
|
|
784
|
+
...body,
|
|
785
|
+
stream: true,
|
|
786
|
+
},
|
|
787
|
+
failedResponseHandler: openaiFailedResponseHandler,
|
|
788
|
+
successfulResponseHandler: createEventSourceResponseHandler(openaiResponsesChunkSchema),
|
|
789
|
+
abortSignal: options.abortSignal,
|
|
790
|
+
fetch: this.config.fetch,
|
|
791
|
+
})
|
|
792
|
+
|
|
793
|
+
const self = this
|
|
794
|
+
|
|
795
|
+
let finishReason: LanguageModelV2FinishReason = "unknown"
|
|
796
|
+
const usage: LanguageModelV2Usage = {
|
|
797
|
+
inputTokens: undefined,
|
|
798
|
+
outputTokens: undefined,
|
|
799
|
+
totalTokens: undefined,
|
|
800
|
+
}
|
|
801
|
+
const logprobs: Array<z.infer<typeof LOGPROBS_SCHEMA>> = []
|
|
802
|
+
let responseId: string | null = null
|
|
803
|
+
const ongoingToolCalls: Record<
|
|
804
|
+
number,
|
|
805
|
+
| {
|
|
806
|
+
toolName: string
|
|
807
|
+
toolCallId: string
|
|
808
|
+
codeInterpreter?: {
|
|
809
|
+
containerId: string
|
|
810
|
+
}
|
|
811
|
+
}
|
|
812
|
+
| undefined
|
|
813
|
+
> = {}
|
|
814
|
+
|
|
815
|
+
// flag that checks if there have been client-side tool calls (not executed by openai)
|
|
816
|
+
let hasFunctionCall = false
|
|
817
|
+
|
|
818
|
+
const activeReasoning: Record<
|
|
819
|
+
string,
|
|
820
|
+
{
|
|
821
|
+
encryptedContent?: string | null
|
|
822
|
+
summaryParts: number[]
|
|
823
|
+
}
|
|
824
|
+
> = {}
|
|
825
|
+
|
|
826
|
+
// Track a stable text part id for the current assistant message.
|
|
827
|
+
// Copilot may change item_id across text deltas; normalize to one id.
|
|
828
|
+
let currentTextId: string | null = null
|
|
829
|
+
|
|
830
|
+
let serviceTier: string | undefined
|
|
831
|
+
|
|
832
|
+
return {
|
|
833
|
+
stream: response.pipeThrough(
|
|
834
|
+
new TransformStream<ParseResult<z.infer<typeof openaiResponsesChunkSchema>>, LanguageModelV2StreamPart>({
|
|
835
|
+
start(controller) {
|
|
836
|
+
controller.enqueue({ type: "stream-start", warnings })
|
|
837
|
+
},
|
|
838
|
+
|
|
839
|
+
transform(chunk, controller) {
|
|
840
|
+
if (options.includeRawChunks) {
|
|
841
|
+
controller.enqueue({ type: "raw", rawValue: chunk.rawValue })
|
|
842
|
+
}
|
|
843
|
+
|
|
844
|
+
// handle failed chunk parsing / validation:
|
|
845
|
+
if (!chunk.success) {
|
|
846
|
+
finishReason = "error"
|
|
847
|
+
controller.enqueue({ type: "error", error: chunk.error })
|
|
848
|
+
return
|
|
849
|
+
}
|
|
850
|
+
|
|
851
|
+
const value = chunk.value
|
|
852
|
+
|
|
853
|
+
if (isResponseOutputItemAddedChunk(value)) {
|
|
854
|
+
if (value.item.type === "function_call") {
|
|
855
|
+
ongoingToolCalls[value.output_index] = {
|
|
856
|
+
toolName: value.item.name,
|
|
857
|
+
toolCallId: value.item.call_id,
|
|
858
|
+
}
|
|
859
|
+
|
|
860
|
+
controller.enqueue({
|
|
861
|
+
type: "tool-input-start",
|
|
862
|
+
id: value.item.call_id,
|
|
863
|
+
toolName: value.item.name,
|
|
864
|
+
})
|
|
865
|
+
} else if (value.item.type === "web_search_call") {
|
|
866
|
+
ongoingToolCalls[value.output_index] = {
|
|
867
|
+
toolName: webSearchToolName ?? "web_search",
|
|
868
|
+
toolCallId: value.item.id,
|
|
869
|
+
}
|
|
870
|
+
|
|
871
|
+
controller.enqueue({
|
|
872
|
+
type: "tool-input-start",
|
|
873
|
+
id: value.item.id,
|
|
874
|
+
toolName: webSearchToolName ?? "web_search",
|
|
875
|
+
})
|
|
876
|
+
} else if (value.item.type === "computer_call") {
|
|
877
|
+
ongoingToolCalls[value.output_index] = {
|
|
878
|
+
toolName: "computer_use",
|
|
879
|
+
toolCallId: value.item.id,
|
|
880
|
+
}
|
|
881
|
+
|
|
882
|
+
controller.enqueue({
|
|
883
|
+
type: "tool-input-start",
|
|
884
|
+
id: value.item.id,
|
|
885
|
+
toolName: "computer_use",
|
|
886
|
+
})
|
|
887
|
+
} else if (value.item.type === "code_interpreter_call") {
|
|
888
|
+
ongoingToolCalls[value.output_index] = {
|
|
889
|
+
toolName: "code_interpreter",
|
|
890
|
+
toolCallId: value.item.id,
|
|
891
|
+
codeInterpreter: {
|
|
892
|
+
containerId: value.item.container_id,
|
|
893
|
+
},
|
|
894
|
+
}
|
|
895
|
+
|
|
896
|
+
controller.enqueue({
|
|
897
|
+
type: "tool-input-start",
|
|
898
|
+
id: value.item.id,
|
|
899
|
+
toolName: "code_interpreter",
|
|
900
|
+
})
|
|
901
|
+
|
|
902
|
+
controller.enqueue({
|
|
903
|
+
type: "tool-input-delta",
|
|
904
|
+
id: value.item.id,
|
|
905
|
+
delta: `{"containerId":"${value.item.container_id}","code":"`,
|
|
906
|
+
})
|
|
907
|
+
} else if (value.item.type === "file_search_call") {
|
|
908
|
+
controller.enqueue({
|
|
909
|
+
type: "tool-call",
|
|
910
|
+
toolCallId: value.item.id,
|
|
911
|
+
toolName: "file_search",
|
|
912
|
+
input: "{}",
|
|
913
|
+
providerExecuted: true,
|
|
914
|
+
})
|
|
915
|
+
} else if (value.item.type === "image_generation_call") {
|
|
916
|
+
controller.enqueue({
|
|
917
|
+
type: "tool-call",
|
|
918
|
+
toolCallId: value.item.id,
|
|
919
|
+
toolName: "image_generation",
|
|
920
|
+
input: "{}",
|
|
921
|
+
providerExecuted: true,
|
|
922
|
+
})
|
|
923
|
+
} else if (value.item.type === "message") {
|
|
924
|
+
// Start a stable text part for this assistant message
|
|
925
|
+
currentTextId = value.item.id
|
|
926
|
+
controller.enqueue({
|
|
927
|
+
type: "text-start",
|
|
928
|
+
id: value.item.id,
|
|
929
|
+
providerMetadata: {
|
|
930
|
+
openai: {
|
|
931
|
+
itemId: value.item.id,
|
|
932
|
+
},
|
|
933
|
+
},
|
|
934
|
+
})
|
|
935
|
+
} else if (isResponseOutputItemAddedReasoningChunk(value)) {
|
|
936
|
+
activeReasoning[value.item.id] = {
|
|
937
|
+
encryptedContent: value.item.encrypted_content,
|
|
938
|
+
summaryParts: [0],
|
|
939
|
+
}
|
|
940
|
+
|
|
941
|
+
controller.enqueue({
|
|
942
|
+
type: "reasoning-start",
|
|
943
|
+
id: `${value.item.id}:0`,
|
|
944
|
+
providerMetadata: {
|
|
945
|
+
openai: {
|
|
946
|
+
itemId: value.item.id,
|
|
947
|
+
reasoningEncryptedContent: value.item.encrypted_content ?? null,
|
|
948
|
+
},
|
|
949
|
+
},
|
|
950
|
+
})
|
|
951
|
+
}
|
|
952
|
+
} else if (isResponseOutputItemDoneChunk(value)) {
|
|
953
|
+
if (value.item.type === "function_call") {
|
|
954
|
+
ongoingToolCalls[value.output_index] = undefined
|
|
955
|
+
hasFunctionCall = true
|
|
956
|
+
|
|
957
|
+
controller.enqueue({
|
|
958
|
+
type: "tool-input-end",
|
|
959
|
+
id: value.item.call_id,
|
|
960
|
+
})
|
|
961
|
+
|
|
962
|
+
controller.enqueue({
|
|
963
|
+
type: "tool-call",
|
|
964
|
+
toolCallId: value.item.call_id,
|
|
965
|
+
toolName: value.item.name,
|
|
966
|
+
input: value.item.arguments,
|
|
967
|
+
providerMetadata: {
|
|
968
|
+
openai: {
|
|
969
|
+
itemId: value.item.id,
|
|
970
|
+
},
|
|
971
|
+
},
|
|
972
|
+
})
|
|
973
|
+
} else if (value.item.type === "web_search_call") {
|
|
974
|
+
ongoingToolCalls[value.output_index] = undefined
|
|
975
|
+
|
|
976
|
+
controller.enqueue({
|
|
977
|
+
type: "tool-input-end",
|
|
978
|
+
id: value.item.id,
|
|
979
|
+
})
|
|
980
|
+
|
|
981
|
+
controller.enqueue({
|
|
982
|
+
type: "tool-call",
|
|
983
|
+
toolCallId: value.item.id,
|
|
984
|
+
toolName: "web_search",
|
|
985
|
+
input: JSON.stringify({ action: value.item.action }),
|
|
986
|
+
providerExecuted: true,
|
|
987
|
+
})
|
|
988
|
+
|
|
989
|
+
controller.enqueue({
|
|
990
|
+
type: "tool-result",
|
|
991
|
+
toolCallId: value.item.id,
|
|
992
|
+
toolName: "web_search",
|
|
993
|
+
result: { status: value.item.status },
|
|
994
|
+
providerExecuted: true,
|
|
995
|
+
})
|
|
996
|
+
} else if (value.item.type === "computer_call") {
|
|
997
|
+
ongoingToolCalls[value.output_index] = undefined
|
|
998
|
+
|
|
999
|
+
controller.enqueue({
|
|
1000
|
+
type: "tool-input-end",
|
|
1001
|
+
id: value.item.id,
|
|
1002
|
+
})
|
|
1003
|
+
|
|
1004
|
+
controller.enqueue({
|
|
1005
|
+
type: "tool-call",
|
|
1006
|
+
toolCallId: value.item.id,
|
|
1007
|
+
toolName: "computer_use",
|
|
1008
|
+
input: "",
|
|
1009
|
+
providerExecuted: true,
|
|
1010
|
+
})
|
|
1011
|
+
|
|
1012
|
+
controller.enqueue({
|
|
1013
|
+
type: "tool-result",
|
|
1014
|
+
toolCallId: value.item.id,
|
|
1015
|
+
toolName: "computer_use",
|
|
1016
|
+
result: {
|
|
1017
|
+
type: "computer_use_tool_result",
|
|
1018
|
+
status: value.item.status || "completed",
|
|
1019
|
+
},
|
|
1020
|
+
providerExecuted: true,
|
|
1021
|
+
})
|
|
1022
|
+
} else if (value.item.type === "file_search_call") {
|
|
1023
|
+
ongoingToolCalls[value.output_index] = undefined
|
|
1024
|
+
|
|
1025
|
+
controller.enqueue({
|
|
1026
|
+
type: "tool-result",
|
|
1027
|
+
toolCallId: value.item.id,
|
|
1028
|
+
toolName: "file_search",
|
|
1029
|
+
result: {
|
|
1030
|
+
queries: value.item.queries,
|
|
1031
|
+
results:
|
|
1032
|
+
value.item.results?.map((result) => ({
|
|
1033
|
+
attributes: result.attributes,
|
|
1034
|
+
fileId: result.file_id,
|
|
1035
|
+
filename: result.filename,
|
|
1036
|
+
score: result.score,
|
|
1037
|
+
text: result.text,
|
|
1038
|
+
})) ?? null,
|
|
1039
|
+
} satisfies z.infer<typeof fileSearchOutputSchema>,
|
|
1040
|
+
providerExecuted: true,
|
|
1041
|
+
})
|
|
1042
|
+
} else if (value.item.type === "code_interpreter_call") {
|
|
1043
|
+
ongoingToolCalls[value.output_index] = undefined
|
|
1044
|
+
|
|
1045
|
+
controller.enqueue({
|
|
1046
|
+
type: "tool-result",
|
|
1047
|
+
toolCallId: value.item.id,
|
|
1048
|
+
toolName: "code_interpreter",
|
|
1049
|
+
result: {
|
|
1050
|
+
outputs: value.item.outputs,
|
|
1051
|
+
} satisfies z.infer<typeof codeInterpreterOutputSchema>,
|
|
1052
|
+
providerExecuted: true,
|
|
1053
|
+
})
|
|
1054
|
+
} else if (value.item.type === "image_generation_call") {
|
|
1055
|
+
controller.enqueue({
|
|
1056
|
+
type: "tool-result",
|
|
1057
|
+
toolCallId: value.item.id,
|
|
1058
|
+
toolName: "image_generation",
|
|
1059
|
+
result: {
|
|
1060
|
+
result: value.item.result,
|
|
1061
|
+
} satisfies z.infer<typeof imageGenerationOutputSchema>,
|
|
1062
|
+
providerExecuted: true,
|
|
1063
|
+
})
|
|
1064
|
+
} else if (value.item.type === "local_shell_call") {
|
|
1065
|
+
ongoingToolCalls[value.output_index] = undefined
|
|
1066
|
+
|
|
1067
|
+
controller.enqueue({
|
|
1068
|
+
type: "tool-call",
|
|
1069
|
+
toolCallId: value.item.call_id,
|
|
1070
|
+
toolName: "local_shell",
|
|
1071
|
+
input: JSON.stringify({
|
|
1072
|
+
action: {
|
|
1073
|
+
type: "exec",
|
|
1074
|
+
command: value.item.action.command,
|
|
1075
|
+
timeoutMs: value.item.action.timeout_ms,
|
|
1076
|
+
user: value.item.action.user,
|
|
1077
|
+
workingDirectory: value.item.action.working_directory,
|
|
1078
|
+
env: value.item.action.env,
|
|
1079
|
+
},
|
|
1080
|
+
} satisfies z.infer<typeof localShellInputSchema>),
|
|
1081
|
+
providerMetadata: {
|
|
1082
|
+
openai: { itemId: value.item.id },
|
|
1083
|
+
},
|
|
1084
|
+
})
|
|
1085
|
+
} else if (value.item.type === "message") {
|
|
1086
|
+
if (currentTextId) {
|
|
1087
|
+
controller.enqueue({
|
|
1088
|
+
type: "text-end",
|
|
1089
|
+
id: currentTextId,
|
|
1090
|
+
})
|
|
1091
|
+
currentTextId = null
|
|
1092
|
+
}
|
|
1093
|
+
} else if (isResponseOutputItemDoneReasoningChunk(value)) {
|
|
1094
|
+
const activeReasoningPart = activeReasoning[value.item.id]
|
|
1095
|
+
if (activeReasoningPart) {
|
|
1096
|
+
for (const summaryIndex of activeReasoningPart.summaryParts) {
|
|
1097
|
+
controller.enqueue({
|
|
1098
|
+
type: "reasoning-end",
|
|
1099
|
+
id: `${value.item.id}:${summaryIndex}`,
|
|
1100
|
+
providerMetadata: {
|
|
1101
|
+
openai: {
|
|
1102
|
+
itemId: value.item.id,
|
|
1103
|
+
reasoningEncryptedContent: value.item.encrypted_content ?? null,
|
|
1104
|
+
},
|
|
1105
|
+
},
|
|
1106
|
+
})
|
|
1107
|
+
}
|
|
1108
|
+
}
|
|
1109
|
+
delete activeReasoning[value.item.id]
|
|
1110
|
+
}
|
|
1111
|
+
} else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
|
|
1112
|
+
const toolCall = ongoingToolCalls[value.output_index]
|
|
1113
|
+
|
|
1114
|
+
if (toolCall != null) {
|
|
1115
|
+
controller.enqueue({
|
|
1116
|
+
type: "tool-input-delta",
|
|
1117
|
+
id: toolCall.toolCallId,
|
|
1118
|
+
delta: value.delta,
|
|
1119
|
+
})
|
|
1120
|
+
}
|
|
1121
|
+
} else if (isResponseImageGenerationCallPartialImageChunk(value)) {
|
|
1122
|
+
controller.enqueue({
|
|
1123
|
+
type: "tool-result",
|
|
1124
|
+
toolCallId: value.item_id,
|
|
1125
|
+
toolName: "image_generation",
|
|
1126
|
+
result: {
|
|
1127
|
+
result: value.partial_image_b64,
|
|
1128
|
+
} satisfies z.infer<typeof imageGenerationOutputSchema>,
|
|
1129
|
+
providerExecuted: true,
|
|
1130
|
+
})
|
|
1131
|
+
} else if (isResponseCodeInterpreterCallCodeDeltaChunk(value)) {
|
|
1132
|
+
const toolCall = ongoingToolCalls[value.output_index]
|
|
1133
|
+
|
|
1134
|
+
if (toolCall != null) {
|
|
1135
|
+
controller.enqueue({
|
|
1136
|
+
type: "tool-input-delta",
|
|
1137
|
+
id: toolCall.toolCallId,
|
|
1138
|
+
// The delta is code, which is embedding in a JSON string.
|
|
1139
|
+
// To escape it, we use JSON.stringify and slice to remove the outer quotes.
|
|
1140
|
+
delta: JSON.stringify(value.delta).slice(1, -1),
|
|
1141
|
+
})
|
|
1142
|
+
}
|
|
1143
|
+
} else if (isResponseCodeInterpreterCallCodeDoneChunk(value)) {
|
|
1144
|
+
const toolCall = ongoingToolCalls[value.output_index]
|
|
1145
|
+
|
|
1146
|
+
if (toolCall != null) {
|
|
1147
|
+
controller.enqueue({
|
|
1148
|
+
type: "tool-input-delta",
|
|
1149
|
+
id: toolCall.toolCallId,
|
|
1150
|
+
delta: '"}',
|
|
1151
|
+
})
|
|
1152
|
+
|
|
1153
|
+
controller.enqueue({
|
|
1154
|
+
type: "tool-input-end",
|
|
1155
|
+
id: toolCall.toolCallId,
|
|
1156
|
+
})
|
|
1157
|
+
|
|
1158
|
+
// immediately send the tool call after the input end:
|
|
1159
|
+
controller.enqueue({
|
|
1160
|
+
type: "tool-call",
|
|
1161
|
+
toolCallId: toolCall.toolCallId,
|
|
1162
|
+
toolName: "code_interpreter",
|
|
1163
|
+
input: JSON.stringify({
|
|
1164
|
+
code: value.code,
|
|
1165
|
+
containerId: toolCall.codeInterpreter!.containerId,
|
|
1166
|
+
} satisfies z.infer<typeof codeInterpreterInputSchema>),
|
|
1167
|
+
providerExecuted: true,
|
|
1168
|
+
})
|
|
1169
|
+
}
|
|
1170
|
+
} else if (isResponseCreatedChunk(value)) {
|
|
1171
|
+
responseId = value.response.id
|
|
1172
|
+
controller.enqueue({
|
|
1173
|
+
type: "response-metadata",
|
|
1174
|
+
id: value.response.id,
|
|
1175
|
+
timestamp: new Date(value.response.created_at * 1000),
|
|
1176
|
+
modelId: value.response.model,
|
|
1177
|
+
})
|
|
1178
|
+
} else if (isTextDeltaChunk(value)) {
|
|
1179
|
+
// Ensure a text-start exists, and normalize deltas to a stable id
|
|
1180
|
+
if (!currentTextId) {
|
|
1181
|
+
currentTextId = value.item_id
|
|
1182
|
+
controller.enqueue({
|
|
1183
|
+
type: "text-start",
|
|
1184
|
+
id: currentTextId,
|
|
1185
|
+
providerMetadata: {
|
|
1186
|
+
openai: { itemId: value.item_id },
|
|
1187
|
+
},
|
|
1188
|
+
})
|
|
1189
|
+
}
|
|
1190
|
+
|
|
1191
|
+
controller.enqueue({
|
|
1192
|
+
type: "text-delta",
|
|
1193
|
+
id: currentTextId,
|
|
1194
|
+
delta: value.delta,
|
|
1195
|
+
})
|
|
1196
|
+
|
|
1197
|
+
if (options.providerOptions?.openai?.logprobs && value.logprobs) {
|
|
1198
|
+
logprobs.push(value.logprobs)
|
|
1199
|
+
}
|
|
1200
|
+
} else if (isResponseReasoningSummaryPartAddedChunk(value)) {
|
|
1201
|
+
// the first reasoning start is pushed in isResponseOutputItemAddedReasoningChunk.
|
|
1202
|
+
if (value.summary_index > 0) {
|
|
1203
|
+
activeReasoning[value.item_id]?.summaryParts.push(value.summary_index)
|
|
1204
|
+
|
|
1205
|
+
controller.enqueue({
|
|
1206
|
+
type: "reasoning-start",
|
|
1207
|
+
id: `${value.item_id}:${value.summary_index}`,
|
|
1208
|
+
providerMetadata: {
|
|
1209
|
+
openai: {
|
|
1210
|
+
itemId: value.item_id,
|
|
1211
|
+
reasoningEncryptedContent: activeReasoning[value.item_id]?.encryptedContent ?? null,
|
|
1212
|
+
},
|
|
1213
|
+
},
|
|
1214
|
+
})
|
|
1215
|
+
}
|
|
1216
|
+
} else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
|
|
1217
|
+
controller.enqueue({
|
|
1218
|
+
type: "reasoning-delta",
|
|
1219
|
+
id: `${value.item_id}:${value.summary_index}`,
|
|
1220
|
+
delta: value.delta,
|
|
1221
|
+
providerMetadata: {
|
|
1222
|
+
openai: {
|
|
1223
|
+
itemId: value.item_id,
|
|
1224
|
+
},
|
|
1225
|
+
},
|
|
1226
|
+
})
|
|
1227
|
+
} else if (isResponseFinishedChunk(value)) {
|
|
1228
|
+
finishReason = mapOpenAIResponseFinishReason({
|
|
1229
|
+
finishReason: value.response.incomplete_details?.reason,
|
|
1230
|
+
hasFunctionCall,
|
|
1231
|
+
})
|
|
1232
|
+
usage.inputTokens = value.response.usage.input_tokens
|
|
1233
|
+
usage.outputTokens = value.response.usage.output_tokens
|
|
1234
|
+
usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens
|
|
1235
|
+
usage.reasoningTokens = value.response.usage.output_tokens_details?.reasoning_tokens ?? undefined
|
|
1236
|
+
usage.cachedInputTokens = value.response.usage.input_tokens_details?.cached_tokens ?? undefined
|
|
1237
|
+
if (typeof value.response.service_tier === "string") {
|
|
1238
|
+
serviceTier = value.response.service_tier
|
|
1239
|
+
}
|
|
1240
|
+
} else if (isResponseAnnotationAddedChunk(value)) {
|
|
1241
|
+
if (value.annotation.type === "url_citation") {
|
|
1242
|
+
controller.enqueue({
|
|
1243
|
+
type: "source",
|
|
1244
|
+
sourceType: "url",
|
|
1245
|
+
id: self.config.generateId?.() ?? generateId(),
|
|
1246
|
+
url: value.annotation.url,
|
|
1247
|
+
title: value.annotation.title,
|
|
1248
|
+
})
|
|
1249
|
+
} else if (value.annotation.type === "file_citation") {
|
|
1250
|
+
controller.enqueue({
|
|
1251
|
+
type: "source",
|
|
1252
|
+
sourceType: "document",
|
|
1253
|
+
id: self.config.generateId?.() ?? generateId(),
|
|
1254
|
+
mediaType: "text/plain",
|
|
1255
|
+
title: value.annotation.quote ?? value.annotation.filename ?? "Document",
|
|
1256
|
+
filename: value.annotation.filename ?? value.annotation.file_id,
|
|
1257
|
+
})
|
|
1258
|
+
}
|
|
1259
|
+
} else if (isErrorChunk(value)) {
|
|
1260
|
+
controller.enqueue({ type: "error", error: value })
|
|
1261
|
+
}
|
|
1262
|
+
},
|
|
1263
|
+
|
|
1264
|
+
flush(controller) {
|
|
1265
|
+
// Close any dangling text part
|
|
1266
|
+
if (currentTextId) {
|
|
1267
|
+
controller.enqueue({ type: "text-end", id: currentTextId })
|
|
1268
|
+
currentTextId = null
|
|
1269
|
+
}
|
|
1270
|
+
|
|
1271
|
+
const providerMetadata: SharedV2ProviderMetadata = {
|
|
1272
|
+
openai: {
|
|
1273
|
+
responseId,
|
|
1274
|
+
},
|
|
1275
|
+
}
|
|
1276
|
+
|
|
1277
|
+
if (logprobs.length > 0) {
|
|
1278
|
+
providerMetadata.openai.logprobs = logprobs
|
|
1279
|
+
}
|
|
1280
|
+
|
|
1281
|
+
if (serviceTier !== undefined) {
|
|
1282
|
+
providerMetadata.openai.serviceTier = serviceTier
|
|
1283
|
+
}
|
|
1284
|
+
|
|
1285
|
+
controller.enqueue({
|
|
1286
|
+
type: "finish",
|
|
1287
|
+
finishReason,
|
|
1288
|
+
usage,
|
|
1289
|
+
providerMetadata,
|
|
1290
|
+
})
|
|
1291
|
+
},
|
|
1292
|
+
}),
|
|
1293
|
+
),
|
|
1294
|
+
request: { body },
|
|
1295
|
+
response: { headers: responseHeaders },
|
|
1296
|
+
}
|
|
1297
|
+
}
|
|
1298
|
+
}
|
|
1299
|
+
|
|
1300
|
+
const usageSchema = z.object({
|
|
1301
|
+
input_tokens: z.number(),
|
|
1302
|
+
input_tokens_details: z.object({ cached_tokens: z.number().nullish() }).nullish(),
|
|
1303
|
+
output_tokens: z.number(),
|
|
1304
|
+
output_tokens_details: z.object({ reasoning_tokens: z.number().nullish() }).nullish(),
|
|
1305
|
+
})
|
|
1306
|
+
|
|
1307
|
+
const textDeltaChunkSchema = z.object({
|
|
1308
|
+
type: z.literal("response.output_text.delta"),
|
|
1309
|
+
item_id: z.string(),
|
|
1310
|
+
delta: z.string(),
|
|
1311
|
+
logprobs: LOGPROBS_SCHEMA.nullish(),
|
|
1312
|
+
})
|
|
1313
|
+
|
|
1314
|
+
const errorChunkSchema = z.object({
|
|
1315
|
+
type: z.literal("error"),
|
|
1316
|
+
code: z.string(),
|
|
1317
|
+
message: z.string(),
|
|
1318
|
+
param: z.string().nullish(),
|
|
1319
|
+
sequence_number: z.number(),
|
|
1320
|
+
})
|
|
1321
|
+
|
|
1322
|
+
const responseFinishedChunkSchema = z.object({
|
|
1323
|
+
type: z.enum(["response.completed", "response.incomplete"]),
|
|
1324
|
+
response: z.object({
|
|
1325
|
+
incomplete_details: z.object({ reason: z.string() }).nullish(),
|
|
1326
|
+
usage: usageSchema,
|
|
1327
|
+
service_tier: z.string().nullish(),
|
|
1328
|
+
}),
|
|
1329
|
+
})
|
|
1330
|
+
|
|
1331
|
+
const responseCreatedChunkSchema = z.object({
|
|
1332
|
+
type: z.literal("response.created"),
|
|
1333
|
+
response: z.object({
|
|
1334
|
+
id: z.string(),
|
|
1335
|
+
created_at: z.number(),
|
|
1336
|
+
model: z.string(),
|
|
1337
|
+
service_tier: z.string().nullish(),
|
|
1338
|
+
}),
|
|
1339
|
+
})
|
|
1340
|
+
|
|
1341
|
+
const responseOutputItemAddedSchema = z.object({
|
|
1342
|
+
type: z.literal("response.output_item.added"),
|
|
1343
|
+
output_index: z.number(),
|
|
1344
|
+
item: z.discriminatedUnion("type", [
|
|
1345
|
+
z.object({
|
|
1346
|
+
type: z.literal("message"),
|
|
1347
|
+
id: z.string(),
|
|
1348
|
+
}),
|
|
1349
|
+
z.object({
|
|
1350
|
+
type: z.literal("reasoning"),
|
|
1351
|
+
id: z.string(),
|
|
1352
|
+
encrypted_content: z.string().nullish(),
|
|
1353
|
+
}),
|
|
1354
|
+
z.object({
|
|
1355
|
+
type: z.literal("function_call"),
|
|
1356
|
+
id: z.string(),
|
|
1357
|
+
call_id: z.string(),
|
|
1358
|
+
name: z.string(),
|
|
1359
|
+
arguments: z.string(),
|
|
1360
|
+
}),
|
|
1361
|
+
z.object({
|
|
1362
|
+
type: z.literal("web_search_call"),
|
|
1363
|
+
id: z.string(),
|
|
1364
|
+
status: z.string(),
|
|
1365
|
+
action: z
|
|
1366
|
+
.object({
|
|
1367
|
+
type: z.literal("search"),
|
|
1368
|
+
query: z.string().optional(),
|
|
1369
|
+
})
|
|
1370
|
+
.nullish(),
|
|
1371
|
+
}),
|
|
1372
|
+
z.object({
|
|
1373
|
+
type: z.literal("computer_call"),
|
|
1374
|
+
id: z.string(),
|
|
1375
|
+
status: z.string(),
|
|
1376
|
+
}),
|
|
1377
|
+
z.object({
|
|
1378
|
+
type: z.literal("file_search_call"),
|
|
1379
|
+
id: z.string(),
|
|
1380
|
+
}),
|
|
1381
|
+
z.object({
|
|
1382
|
+
type: z.literal("image_generation_call"),
|
|
1383
|
+
id: z.string(),
|
|
1384
|
+
}),
|
|
1385
|
+
z.object({
|
|
1386
|
+
type: z.literal("code_interpreter_call"),
|
|
1387
|
+
id: z.string(),
|
|
1388
|
+
container_id: z.string(),
|
|
1389
|
+
code: z.string().nullable(),
|
|
1390
|
+
outputs: z
|
|
1391
|
+
.array(
|
|
1392
|
+
z.discriminatedUnion("type", [
|
|
1393
|
+
z.object({ type: z.literal("logs"), logs: z.string() }),
|
|
1394
|
+
z.object({ type: z.literal("image"), url: z.string() }),
|
|
1395
|
+
]),
|
|
1396
|
+
)
|
|
1397
|
+
.nullable(),
|
|
1398
|
+
status: z.string(),
|
|
1399
|
+
}),
|
|
1400
|
+
]),
|
|
1401
|
+
})
|
|
1402
|
+
|
|
1403
|
+
const responseOutputItemDoneSchema = z.object({
|
|
1404
|
+
type: z.literal("response.output_item.done"),
|
|
1405
|
+
output_index: z.number(),
|
|
1406
|
+
item: z.discriminatedUnion("type", [
|
|
1407
|
+
z.object({
|
|
1408
|
+
type: z.literal("message"),
|
|
1409
|
+
id: z.string(),
|
|
1410
|
+
}),
|
|
1411
|
+
z.object({
|
|
1412
|
+
type: z.literal("reasoning"),
|
|
1413
|
+
id: z.string(),
|
|
1414
|
+
encrypted_content: z.string().nullish(),
|
|
1415
|
+
}),
|
|
1416
|
+
z.object({
|
|
1417
|
+
type: z.literal("function_call"),
|
|
1418
|
+
id: z.string(),
|
|
1419
|
+
call_id: z.string(),
|
|
1420
|
+
name: z.string(),
|
|
1421
|
+
arguments: z.string(),
|
|
1422
|
+
status: z.literal("completed"),
|
|
1423
|
+
}),
|
|
1424
|
+
codeInterpreterCallItem,
|
|
1425
|
+
imageGenerationCallItem,
|
|
1426
|
+
webSearchCallItem,
|
|
1427
|
+
fileSearchCallItem,
|
|
1428
|
+
localShellCallItem,
|
|
1429
|
+
z.object({
|
|
1430
|
+
type: z.literal("computer_call"),
|
|
1431
|
+
id: z.string(),
|
|
1432
|
+
status: z.literal("completed"),
|
|
1433
|
+
}),
|
|
1434
|
+
]),
|
|
1435
|
+
})
|
|
1436
|
+
|
|
1437
|
+
const responseFunctionCallArgumentsDeltaSchema = z.object({
|
|
1438
|
+
type: z.literal("response.function_call_arguments.delta"),
|
|
1439
|
+
item_id: z.string(),
|
|
1440
|
+
output_index: z.number(),
|
|
1441
|
+
delta: z.string(),
|
|
1442
|
+
})
|
|
1443
|
+
|
|
1444
|
+
const responseImageGenerationCallPartialImageSchema = z.object({
|
|
1445
|
+
type: z.literal("response.image_generation_call.partial_image"),
|
|
1446
|
+
item_id: z.string(),
|
|
1447
|
+
output_index: z.number(),
|
|
1448
|
+
partial_image_b64: z.string(),
|
|
1449
|
+
})
|
|
1450
|
+
|
|
1451
|
+
const responseCodeInterpreterCallCodeDeltaSchema = z.object({
|
|
1452
|
+
type: z.literal("response.code_interpreter_call_code.delta"),
|
|
1453
|
+
item_id: z.string(),
|
|
1454
|
+
output_index: z.number(),
|
|
1455
|
+
delta: z.string(),
|
|
1456
|
+
})
|
|
1457
|
+
|
|
1458
|
+
const responseCodeInterpreterCallCodeDoneSchema = z.object({
|
|
1459
|
+
type: z.literal("response.code_interpreter_call_code.done"),
|
|
1460
|
+
item_id: z.string(),
|
|
1461
|
+
output_index: z.number(),
|
|
1462
|
+
code: z.string(),
|
|
1463
|
+
})
|
|
1464
|
+
|
|
1465
|
+
const responseAnnotationAddedSchema = z.object({
|
|
1466
|
+
type: z.literal("response.output_text.annotation.added"),
|
|
1467
|
+
annotation: z.discriminatedUnion("type", [
|
|
1468
|
+
z.object({
|
|
1469
|
+
type: z.literal("url_citation"),
|
|
1470
|
+
url: z.string(),
|
|
1471
|
+
title: z.string(),
|
|
1472
|
+
}),
|
|
1473
|
+
z.object({
|
|
1474
|
+
type: z.literal("file_citation"),
|
|
1475
|
+
file_id: z.string(),
|
|
1476
|
+
filename: z.string().nullish(),
|
|
1477
|
+
index: z.number().nullish(),
|
|
1478
|
+
start_index: z.number().nullish(),
|
|
1479
|
+
end_index: z.number().nullish(),
|
|
1480
|
+
quote: z.string().nullish(),
|
|
1481
|
+
}),
|
|
1482
|
+
]),
|
|
1483
|
+
})
|
|
1484
|
+
|
|
1485
|
+
const responseReasoningSummaryPartAddedSchema = z.object({
|
|
1486
|
+
type: z.literal("response.reasoning_summary_part.added"),
|
|
1487
|
+
item_id: z.string(),
|
|
1488
|
+
summary_index: z.number(),
|
|
1489
|
+
})
|
|
1490
|
+
|
|
1491
|
+
const responseReasoningSummaryTextDeltaSchema = z.object({
|
|
1492
|
+
type: z.literal("response.reasoning_summary_text.delta"),
|
|
1493
|
+
item_id: z.string(),
|
|
1494
|
+
summary_index: z.number(),
|
|
1495
|
+
delta: z.string(),
|
|
1496
|
+
})
|
|
1497
|
+
|
|
1498
|
+
const openaiResponsesChunkSchema = z.union([
|
|
1499
|
+
textDeltaChunkSchema,
|
|
1500
|
+
responseFinishedChunkSchema,
|
|
1501
|
+
responseCreatedChunkSchema,
|
|
1502
|
+
responseOutputItemAddedSchema,
|
|
1503
|
+
responseOutputItemDoneSchema,
|
|
1504
|
+
responseFunctionCallArgumentsDeltaSchema,
|
|
1505
|
+
responseImageGenerationCallPartialImageSchema,
|
|
1506
|
+
responseCodeInterpreterCallCodeDeltaSchema,
|
|
1507
|
+
responseCodeInterpreterCallCodeDoneSchema,
|
|
1508
|
+
responseAnnotationAddedSchema,
|
|
1509
|
+
responseReasoningSummaryPartAddedSchema,
|
|
1510
|
+
responseReasoningSummaryTextDeltaSchema,
|
|
1511
|
+
errorChunkSchema,
|
|
1512
|
+
z.object({ type: z.string() }).loose(), // fallback for unknown chunks
|
|
1513
|
+
])
|
|
1514
|
+
|
|
1515
|
+
type ExtractByType<T, K extends T extends { type: infer U } ? U : never> = T extends { type: K } ? T : never
|
|
1516
|
+
|
|
1517
|
+
function isTextDeltaChunk(
|
|
1518
|
+
chunk: z.infer<typeof openaiResponsesChunkSchema>,
|
|
1519
|
+
): chunk is z.infer<typeof textDeltaChunkSchema> {
|
|
1520
|
+
return chunk.type === "response.output_text.delta"
|
|
1521
|
+
}
|
|
1522
|
+
|
|
1523
|
+
function isResponseOutputItemDoneChunk(
|
|
1524
|
+
chunk: z.infer<typeof openaiResponsesChunkSchema>,
|
|
1525
|
+
): chunk is z.infer<typeof responseOutputItemDoneSchema> {
|
|
1526
|
+
return chunk.type === "response.output_item.done"
|
|
1527
|
+
}
|
|
1528
|
+
|
|
1529
|
+
function isResponseOutputItemDoneReasoningChunk(chunk: z.infer<typeof openaiResponsesChunkSchema>): chunk is z.infer<
|
|
1530
|
+
typeof responseOutputItemDoneSchema
|
|
1531
|
+
> & {
|
|
1532
|
+
item: ExtractByType<z.infer<typeof responseOutputItemDoneSchema>["item"], "reasoning">
|
|
1533
|
+
} {
|
|
1534
|
+
return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning"
|
|
1535
|
+
}
|
|
1536
|
+
|
|
1537
|
+
function isResponseFinishedChunk(
|
|
1538
|
+
chunk: z.infer<typeof openaiResponsesChunkSchema>,
|
|
1539
|
+
): chunk is z.infer<typeof responseFinishedChunkSchema> {
|
|
1540
|
+
return chunk.type === "response.completed" || chunk.type === "response.incomplete"
|
|
1541
|
+
}
|
|
1542
|
+
|
|
1543
|
+
function isResponseCreatedChunk(
|
|
1544
|
+
chunk: z.infer<typeof openaiResponsesChunkSchema>,
|
|
1545
|
+
): chunk is z.infer<typeof responseCreatedChunkSchema> {
|
|
1546
|
+
return chunk.type === "response.created"
|
|
1547
|
+
}
|
|
1548
|
+
|
|
1549
|
+
function isResponseFunctionCallArgumentsDeltaChunk(
|
|
1550
|
+
chunk: z.infer<typeof openaiResponsesChunkSchema>,
|
|
1551
|
+
): chunk is z.infer<typeof responseFunctionCallArgumentsDeltaSchema> {
|
|
1552
|
+
return chunk.type === "response.function_call_arguments.delta"
|
|
1553
|
+
}
|
|
1554
|
+
function isResponseImageGenerationCallPartialImageChunk(
|
|
1555
|
+
chunk: z.infer<typeof openaiResponsesChunkSchema>,
|
|
1556
|
+
): chunk is z.infer<typeof responseImageGenerationCallPartialImageSchema> {
|
|
1557
|
+
return chunk.type === "response.image_generation_call.partial_image"
|
|
1558
|
+
}
|
|
1559
|
+
|
|
1560
|
+
function isResponseCodeInterpreterCallCodeDeltaChunk(
|
|
1561
|
+
chunk: z.infer<typeof openaiResponsesChunkSchema>,
|
|
1562
|
+
): chunk is z.infer<typeof responseCodeInterpreterCallCodeDeltaSchema> {
|
|
1563
|
+
return chunk.type === "response.code_interpreter_call_code.delta"
|
|
1564
|
+
}
|
|
1565
|
+
|
|
1566
|
+
function isResponseCodeInterpreterCallCodeDoneChunk(
|
|
1567
|
+
chunk: z.infer<typeof openaiResponsesChunkSchema>,
|
|
1568
|
+
): chunk is z.infer<typeof responseCodeInterpreterCallCodeDoneSchema> {
|
|
1569
|
+
return chunk.type === "response.code_interpreter_call_code.done"
|
|
1570
|
+
}
|
|
1571
|
+
|
|
1572
|
+
function isResponseOutputItemAddedChunk(
|
|
1573
|
+
chunk: z.infer<typeof openaiResponsesChunkSchema>,
|
|
1574
|
+
): chunk is z.infer<typeof responseOutputItemAddedSchema> {
|
|
1575
|
+
return chunk.type === "response.output_item.added"
|
|
1576
|
+
}
|
|
1577
|
+
|
|
1578
|
+
function isResponseOutputItemAddedReasoningChunk(chunk: z.infer<typeof openaiResponsesChunkSchema>): chunk is z.infer<
|
|
1579
|
+
typeof responseOutputItemAddedSchema
|
|
1580
|
+
> & {
|
|
1581
|
+
item: ExtractByType<z.infer<typeof responseOutputItemAddedSchema>["item"], "reasoning">
|
|
1582
|
+
} {
|
|
1583
|
+
return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning"
|
|
1584
|
+
}
|
|
1585
|
+
|
|
1586
|
+
function isResponseAnnotationAddedChunk(
|
|
1587
|
+
chunk: z.infer<typeof openaiResponsesChunkSchema>,
|
|
1588
|
+
): chunk is z.infer<typeof responseAnnotationAddedSchema> {
|
|
1589
|
+
return chunk.type === "response.output_text.annotation.added"
|
|
1590
|
+
}
|
|
1591
|
+
|
|
1592
|
+
function isResponseReasoningSummaryPartAddedChunk(
|
|
1593
|
+
chunk: z.infer<typeof openaiResponsesChunkSchema>,
|
|
1594
|
+
): chunk is z.infer<typeof responseReasoningSummaryPartAddedSchema> {
|
|
1595
|
+
return chunk.type === "response.reasoning_summary_part.added"
|
|
1596
|
+
}
|
|
1597
|
+
|
|
1598
|
+
function isResponseReasoningSummaryTextDeltaChunk(
|
|
1599
|
+
chunk: z.infer<typeof openaiResponsesChunkSchema>,
|
|
1600
|
+
): chunk is z.infer<typeof responseReasoningSummaryTextDeltaSchema> {
|
|
1601
|
+
return chunk.type === "response.reasoning_summary_text.delta"
|
|
1602
|
+
}
|
|
1603
|
+
|
|
1604
|
+
function isErrorChunk(chunk: z.infer<typeof openaiResponsesChunkSchema>): chunk is z.infer<typeof errorChunkSchema> {
|
|
1605
|
+
return chunk.type === "error"
|
|
1606
|
+
}
|
|
1607
|
+
|
|
1608
|
+
type ResponsesModelConfig = {
|
|
1609
|
+
isReasoningModel: boolean
|
|
1610
|
+
systemMessageMode: "remove" | "system" | "developer"
|
|
1611
|
+
requiredAutoTruncation: boolean
|
|
1612
|
+
supportsFlexProcessing: boolean
|
|
1613
|
+
supportsPriorityProcessing: boolean
|
|
1614
|
+
}
|
|
1615
|
+
|
|
1616
|
+
function getResponsesModelConfig(modelId: string): ResponsesModelConfig {
|
|
1617
|
+
const supportsFlexProcessing =
|
|
1618
|
+
modelId.startsWith("o3") ||
|
|
1619
|
+
modelId.startsWith("o4-mini") ||
|
|
1620
|
+
(modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat"))
|
|
1621
|
+
const supportsPriorityProcessing =
|
|
1622
|
+
modelId.startsWith("gpt-4") ||
|
|
1623
|
+
modelId.startsWith("gpt-5-mini") ||
|
|
1624
|
+
(modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat")) ||
|
|
1625
|
+
modelId.startsWith("o3") ||
|
|
1626
|
+
modelId.startsWith("o4-mini")
|
|
1627
|
+
const defaults = {
|
|
1628
|
+
requiredAutoTruncation: false,
|
|
1629
|
+
systemMessageMode: "system" as const,
|
|
1630
|
+
supportsFlexProcessing,
|
|
1631
|
+
supportsPriorityProcessing,
|
|
1632
|
+
}
|
|
1633
|
+
|
|
1634
|
+
// gpt-5-chat models are non-reasoning
|
|
1635
|
+
if (modelId.startsWith("gpt-5-chat")) {
|
|
1636
|
+
return {
|
|
1637
|
+
...defaults,
|
|
1638
|
+
isReasoningModel: false,
|
|
1639
|
+
}
|
|
1640
|
+
}
|
|
1641
|
+
|
|
1642
|
+
// o series reasoning models:
|
|
1643
|
+
if (
|
|
1644
|
+
modelId.startsWith("o") ||
|
|
1645
|
+
modelId.startsWith("gpt-5") ||
|
|
1646
|
+
modelId.startsWith("codex-") ||
|
|
1647
|
+
modelId.startsWith("computer-use")
|
|
1648
|
+
) {
|
|
1649
|
+
if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
|
|
1650
|
+
return {
|
|
1651
|
+
...defaults,
|
|
1652
|
+
isReasoningModel: true,
|
|
1653
|
+
systemMessageMode: "remove",
|
|
1654
|
+
}
|
|
1655
|
+
}
|
|
1656
|
+
|
|
1657
|
+
return {
|
|
1658
|
+
...defaults,
|
|
1659
|
+
isReasoningModel: true,
|
|
1660
|
+
systemMessageMode: "developer",
|
|
1661
|
+
}
|
|
1662
|
+
}
|
|
1663
|
+
|
|
1664
|
+
// gpt models:
|
|
1665
|
+
return {
|
|
1666
|
+
...defaults,
|
|
1667
|
+
isReasoningModel: false,
|
|
1668
|
+
}
|
|
1669
|
+
}
|
|
1670
|
+
|
|
1671
|
+
// TODO AI SDK 6: use optional here instead of nullish
|
|
1672
|
+
const openaiResponsesProviderOptionsSchema = z.object({
|
|
1673
|
+
include: z
|
|
1674
|
+
.array(z.enum(["reasoning.encrypted_content", "file_search_call.results", "message.output_text.logprobs"]))
|
|
1675
|
+
.nullish(),
|
|
1676
|
+
instructions: z.string().nullish(),
|
|
1677
|
+
|
|
1678
|
+
/**
|
|
1679
|
+
* Return the log probabilities of the tokens.
|
|
1680
|
+
*
|
|
1681
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
1682
|
+
* were generated.
|
|
1683
|
+
*
|
|
1684
|
+
* Setting to a number will return the log probabilities of the top n
|
|
1685
|
+
* tokens that were generated.
|
|
1686
|
+
*
|
|
1687
|
+
* @see https://platform.openai.com/docs/api-reference/responses/create
|
|
1688
|
+
* @see https://cookbook.openai.com/examples/using_logprobs
|
|
1689
|
+
*/
|
|
1690
|
+
logprobs: z.union([z.boolean(), z.number().min(1).max(TOP_LOGPROBS_MAX)]).optional(),
|
|
1691
|
+
|
|
1692
|
+
/**
|
|
1693
|
+
* The maximum number of total calls to built-in tools that can be processed in a response.
|
|
1694
|
+
* This maximum number applies across all built-in tool calls, not per individual tool.
|
|
1695
|
+
* Any further attempts to call a tool by the model will be ignored.
|
|
1696
|
+
*/
|
|
1697
|
+
maxToolCalls: z.number().nullish(),
|
|
1698
|
+
|
|
1699
|
+
metadata: z.any().nullish(),
|
|
1700
|
+
parallelToolCalls: z.boolean().nullish(),
|
|
1701
|
+
previousResponseId: z.string().nullish(),
|
|
1702
|
+
promptCacheKey: z.string().nullish(),
|
|
1703
|
+
reasoningEffort: z.string().nullish(),
|
|
1704
|
+
reasoningSummary: z.string().nullish(),
|
|
1705
|
+
safetyIdentifier: z.string().nullish(),
|
|
1706
|
+
serviceTier: z.enum(["auto", "flex", "priority"]).nullish(),
|
|
1707
|
+
store: z.boolean().nullish(),
|
|
1708
|
+
strictJsonSchema: z.boolean().nullish(),
|
|
1709
|
+
textVerbosity: z.enum(["low", "medium", "high"]).nullish(),
|
|
1710
|
+
user: z.string().nullish(),
|
|
1711
|
+
})
|
|
1712
|
+
|
|
1713
|
+
export type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>
|