@otto-assistant/bridge 0.4.92
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin.js +2 -0
- package/dist/agent-model.e2e.test.js +755 -0
- package/dist/ai-tool-to-genai.js +233 -0
- package/dist/ai-tool-to-genai.test.js +267 -0
- package/dist/ai-tool.js +6 -0
- package/dist/anthropic-auth-plugin.js +728 -0
- package/dist/anthropic-auth-plugin.test.js +125 -0
- package/dist/anthropic-auth-state.js +231 -0
- package/dist/bin.js +90 -0
- package/dist/channel-management.js +227 -0
- package/dist/cli-parsing.test.js +137 -0
- package/dist/cli-send-thread.e2e.test.js +356 -0
- package/dist/cli.js +3276 -0
- package/dist/commands/abort.js +65 -0
- package/dist/commands/action-buttons.js +245 -0
- package/dist/commands/add-project.js +113 -0
- package/dist/commands/agent.js +335 -0
- package/dist/commands/ask-question.js +274 -0
- package/dist/commands/btw.js +116 -0
- package/dist/commands/compact.js +120 -0
- package/dist/commands/context-usage.js +140 -0
- package/dist/commands/create-new-project.js +130 -0
- package/dist/commands/diff.js +63 -0
- package/dist/commands/file-upload.js +275 -0
- package/dist/commands/fork.js +220 -0
- package/dist/commands/gemini-apikey.js +70 -0
- package/dist/commands/login.js +885 -0
- package/dist/commands/mcp.js +239 -0
- package/dist/commands/memory-snapshot.js +24 -0
- package/dist/commands/mention-mode.js +44 -0
- package/dist/commands/merge-worktree.js +159 -0
- package/dist/commands/model-variant.js +364 -0
- package/dist/commands/model.js +776 -0
- package/dist/commands/new-worktree.js +366 -0
- package/dist/commands/paginated-select.js +57 -0
- package/dist/commands/permissions.js +274 -0
- package/dist/commands/queue.js +206 -0
- package/dist/commands/remove-project.js +115 -0
- package/dist/commands/restart-opencode-server.js +127 -0
- package/dist/commands/resume.js +149 -0
- package/dist/commands/run-command.js +79 -0
- package/dist/commands/screenshare.js +303 -0
- package/dist/commands/screenshare.test.js +20 -0
- package/dist/commands/session-id.js +78 -0
- package/dist/commands/session.js +176 -0
- package/dist/commands/share.js +80 -0
- package/dist/commands/tasks.js +205 -0
- package/dist/commands/types.js +2 -0
- package/dist/commands/undo-redo.js +305 -0
- package/dist/commands/unset-model.js +138 -0
- package/dist/commands/upgrade.js +42 -0
- package/dist/commands/user-command.js +155 -0
- package/dist/commands/verbosity.js +125 -0
- package/dist/commands/worktree-settings.js +43 -0
- package/dist/commands/worktrees.js +410 -0
- package/dist/condense-memory.js +33 -0
- package/dist/config.js +94 -0
- package/dist/context-awareness-plugin.js +363 -0
- package/dist/context-awareness-plugin.test.js +124 -0
- package/dist/critique-utils.js +95 -0
- package/dist/database.js +1310 -0
- package/dist/db.js +251 -0
- package/dist/db.test.js +138 -0
- package/dist/debounce-timeout.js +28 -0
- package/dist/debounced-process-flush.js +77 -0
- package/dist/discord-bot.js +1008 -0
- package/dist/discord-command-registration.js +524 -0
- package/dist/discord-urls.js +81 -0
- package/dist/discord-utils.js +591 -0
- package/dist/discord-utils.test.js +134 -0
- package/dist/errors.js +157 -0
- package/dist/escape-backticks.test.js +429 -0
- package/dist/event-stream-real-capture.e2e.test.js +533 -0
- package/dist/eventsource-parser.test.js +327 -0
- package/dist/exec-async.js +26 -0
- package/dist/external-opencode-sync.js +480 -0
- package/dist/format-tables.js +302 -0
- package/dist/format-tables.test.js +308 -0
- package/dist/forum-sync/config.js +79 -0
- package/dist/forum-sync/discord-operations.js +154 -0
- package/dist/forum-sync/index.js +5 -0
- package/dist/forum-sync/markdown.js +113 -0
- package/dist/forum-sync/sync-to-discord.js +417 -0
- package/dist/forum-sync/sync-to-files.js +190 -0
- package/dist/forum-sync/types.js +53 -0
- package/dist/forum-sync/watchers.js +307 -0
- package/dist/gateway-proxy-reconnect.e2e.test.js +394 -0
- package/dist/gateway-proxy.e2e.test.js +483 -0
- package/dist/genai-worker-wrapper.js +111 -0
- package/dist/genai-worker.js +311 -0
- package/dist/genai.js +232 -0
- package/dist/generated/browser.js +17 -0
- package/dist/generated/client.js +37 -0
- package/dist/generated/commonInputTypes.js +10 -0
- package/dist/generated/enums.js +52 -0
- package/dist/generated/internal/class.js +49 -0
- package/dist/generated/internal/prismaNamespace.js +253 -0
- package/dist/generated/internal/prismaNamespaceBrowser.js +223 -0
- package/dist/generated/models/bot_api_keys.js +1 -0
- package/dist/generated/models/bot_tokens.js +1 -0
- package/dist/generated/models/channel_agents.js +1 -0
- package/dist/generated/models/channel_directories.js +1 -0
- package/dist/generated/models/channel_mention_mode.js +1 -0
- package/dist/generated/models/channel_models.js +1 -0
- package/dist/generated/models/channel_verbosity.js +1 -0
- package/dist/generated/models/channel_worktrees.js +1 -0
- package/dist/generated/models/forum_sync_configs.js +1 -0
- package/dist/generated/models/global_models.js +1 -0
- package/dist/generated/models/ipc_requests.js +1 -0
- package/dist/generated/models/part_messages.js +1 -0
- package/dist/generated/models/scheduled_tasks.js +1 -0
- package/dist/generated/models/session_agents.js +1 -0
- package/dist/generated/models/session_events.js +1 -0
- package/dist/generated/models/session_models.js +1 -0
- package/dist/generated/models/session_start_sources.js +1 -0
- package/dist/generated/models/thread_sessions.js +1 -0
- package/dist/generated/models/thread_worktrees.js +1 -0
- package/dist/generated/models.js +1 -0
- package/dist/heap-monitor.js +122 -0
- package/dist/hrana-server.js +263 -0
- package/dist/hrana-server.test.js +370 -0
- package/dist/html-actions.js +123 -0
- package/dist/html-actions.test.js +70 -0
- package/dist/html-components.js +117 -0
- package/dist/html-components.test.js +34 -0
- package/dist/image-optimizer-plugin.js +153 -0
- package/dist/image-utils.js +112 -0
- package/dist/interaction-handler.js +397 -0
- package/dist/ipc-polling.js +252 -0
- package/dist/ipc-tools-plugin.js +193 -0
- package/dist/kimaki-digital-twin.e2e.test.js +161 -0
- package/dist/kimaki-opencode-plugin-loading.e2e.test.js +87 -0
- package/dist/kimaki-opencode-plugin.js +17 -0
- package/dist/kimaki-opencode-plugin.test.js +98 -0
- package/dist/limit-heading-depth.js +25 -0
- package/dist/limit-heading-depth.test.js +105 -0
- package/dist/logger.js +165 -0
- package/dist/markdown.js +342 -0
- package/dist/markdown.test.js +257 -0
- package/dist/message-finish-field.e2e.test.js +165 -0
- package/dist/message-formatting.js +413 -0
- package/dist/message-formatting.test.js +73 -0
- package/dist/message-preprocessing.js +330 -0
- package/dist/onboarding-tutorial.js +172 -0
- package/dist/onboarding-welcome.js +37 -0
- package/dist/openai-realtime.js +224 -0
- package/dist/opencode-command-detection.js +65 -0
- package/dist/opencode-command-detection.test.js +240 -0
- package/dist/opencode-command.js +129 -0
- package/dist/opencode-command.test.js +48 -0
- package/dist/opencode-interrupt-plugin.js +361 -0
- package/dist/opencode-interrupt-plugin.test.js +458 -0
- package/dist/opencode.js +861 -0
- package/dist/otto/branding.js +22 -0
- package/dist/otto/index.js +21 -0
- package/dist/parse-permission-rules.test.js +117 -0
- package/dist/patch-text-parser.js +97 -0
- package/dist/plugin-logger.js +59 -0
- package/dist/privacy-sanitizer.js +105 -0
- package/dist/queue-advanced-abort.e2e.test.js +293 -0
- package/dist/queue-advanced-action-buttons.e2e.test.js +206 -0
- package/dist/queue-advanced-e2e-setup.js +786 -0
- package/dist/queue-advanced-footer.e2e.test.js +472 -0
- package/dist/queue-advanced-model-switch.e2e.test.js +299 -0
- package/dist/queue-advanced-permissions-typing.e2e.test.js +180 -0
- package/dist/queue-advanced-question.e2e.test.js +261 -0
- package/dist/queue-advanced-typing-interrupt.e2e.test.js +114 -0
- package/dist/queue-advanced-typing.e2e.test.js +153 -0
- package/dist/queue-drain-after-interactive-ui.e2e.test.js +119 -0
- package/dist/queue-interrupt-drain.e2e.test.js +135 -0
- package/dist/queue-question-select-drain.e2e.test.js +120 -0
- package/dist/runtime-idle-sweeper.js +52 -0
- package/dist/runtime-lifecycle.e2e.test.js +508 -0
- package/dist/sentry.js +23 -0
- package/dist/session-handler/agent-utils.js +67 -0
- package/dist/session-handler/event-stream-state.js +420 -0
- package/dist/session-handler/event-stream-state.test.js +563 -0
- package/dist/session-handler/model-utils.js +124 -0
- package/dist/session-handler/opencode-session-event-log.js +94 -0
- package/dist/session-handler/thread-runtime-state.js +104 -0
- package/dist/session-handler/thread-session-runtime.js +3258 -0
- package/dist/session-handler.js +9 -0
- package/dist/session-search.js +100 -0
- package/dist/session-search.test.js +40 -0
- package/dist/session-title-rename.test.js +80 -0
- package/dist/startup-service.js +153 -0
- package/dist/startup-time.e2e.test.js +296 -0
- package/dist/store.js +17 -0
- package/dist/system-message.js +613 -0
- package/dist/system-message.test.js +602 -0
- package/dist/task-runner.js +295 -0
- package/dist/task-schedule.js +209 -0
- package/dist/task-schedule.test.js +71 -0
- package/dist/test-utils.js +299 -0
- package/dist/thinking-utils.js +35 -0
- package/dist/thread-message-queue.e2e.test.js +999 -0
- package/dist/tools.js +357 -0
- package/dist/undo-redo.e2e.test.js +161 -0
- package/dist/unnest-code-blocks.js +146 -0
- package/dist/unnest-code-blocks.test.js +673 -0
- package/dist/upgrade.js +114 -0
- package/dist/utils.js +144 -0
- package/dist/voice-attachment.js +34 -0
- package/dist/voice-handler.js +646 -0
- package/dist/voice-message.e2e.test.js +1021 -0
- package/dist/voice.js +447 -0
- package/dist/voice.test.js +235 -0
- package/dist/wait-session.js +94 -0
- package/dist/websockify.js +69 -0
- package/dist/worker-types.js +4 -0
- package/dist/worktree-lifecycle.e2e.test.js +308 -0
- package/dist/worktree-utils.js +3 -0
- package/dist/worktrees.js +929 -0
- package/dist/worktrees.test.js +189 -0
- package/dist/xml.js +92 -0
- package/dist/xml.test.js +32 -0
- package/package.json +98 -0
- package/schema.prisma +295 -0
- package/skills/batch/SKILL.md +87 -0
- package/skills/critique/SKILL.md +112 -0
- package/skills/egaki/SKILL.md +100 -0
- package/skills/errore/SKILL.md +647 -0
- package/skills/event-sourcing-state/SKILL.md +252 -0
- package/skills/gitchamber/SKILL.md +93 -0
- package/skills/goke/SKILL.md +644 -0
- package/skills/jitter/EDITOR.md +219 -0
- package/skills/jitter/EXPORT-INTERNALS.md +309 -0
- package/skills/jitter/SKILL.md +158 -0
- package/skills/jitter/jitter-clipboard.json +1042 -0
- package/skills/jitter/package.json +14 -0
- package/skills/jitter/tsconfig.json +15 -0
- package/skills/jitter/utils/actions.ts +212 -0
- package/skills/jitter/utils/export.ts +114 -0
- package/skills/jitter/utils/index.ts +141 -0
- package/skills/jitter/utils/snapshot.ts +154 -0
- package/skills/jitter/utils/traverse.ts +246 -0
- package/skills/jitter/utils/types.ts +279 -0
- package/skills/jitter/utils/wait.ts +133 -0
- package/skills/lintcn/SKILL.md +873 -0
- package/skills/new-skill/SKILL.md +211 -0
- package/skills/npm-package/SKILL.md +239 -0
- package/skills/playwriter/SKILL.md +35 -0
- package/skills/proxyman/SKILL.md +215 -0
- package/skills/security-review/SKILL.md +208 -0
- package/skills/simplify/SKILL.md +58 -0
- package/skills/spiceflow/SKILL.md +14 -0
- package/skills/termcast/SKILL.md +945 -0
- package/skills/tuistory/SKILL.md +250 -0
- package/skills/usecomputer/SKILL.md +264 -0
- package/skills/x-articles/SKILL.md +554 -0
- package/skills/zele/SKILL.md +112 -0
- package/skills/zustand-centralized-state/SKILL.md +1004 -0
- package/src/agent-model.e2e.test.ts +976 -0
- package/src/ai-tool-to-genai.test.ts +296 -0
- package/src/ai-tool-to-genai.ts +283 -0
- package/src/ai-tool.ts +39 -0
- package/src/anthropic-auth-plugin.test.ts +159 -0
- package/src/anthropic-auth-plugin.ts +861 -0
- package/src/anthropic-auth-state.ts +282 -0
- package/src/bin.ts +111 -0
- package/src/channel-management.ts +334 -0
- package/src/cli-parsing.test.ts +195 -0
- package/src/cli-send-thread.e2e.test.ts +464 -0
- package/src/cli.ts +4581 -0
- package/src/commands/abort.ts +89 -0
- package/src/commands/action-buttons.ts +364 -0
- package/src/commands/add-project.ts +149 -0
- package/src/commands/agent.ts +473 -0
- package/src/commands/ask-question.ts +390 -0
- package/src/commands/btw.ts +164 -0
- package/src/commands/compact.ts +157 -0
- package/src/commands/context-usage.ts +199 -0
- package/src/commands/create-new-project.ts +190 -0
- package/src/commands/diff.ts +91 -0
- package/src/commands/file-upload.ts +389 -0
- package/src/commands/fork.ts +321 -0
- package/src/commands/gemini-apikey.ts +104 -0
- package/src/commands/login.ts +1173 -0
- package/src/commands/mcp.ts +307 -0
- package/src/commands/memory-snapshot.ts +30 -0
- package/src/commands/mention-mode.ts +68 -0
- package/src/commands/merge-worktree.ts +223 -0
- package/src/commands/model-variant.ts +483 -0
- package/src/commands/model.ts +1053 -0
- package/src/commands/new-worktree.ts +510 -0
- package/src/commands/paginated-select.ts +81 -0
- package/src/commands/permissions.ts +397 -0
- package/src/commands/queue.ts +271 -0
- package/src/commands/remove-project.ts +155 -0
- package/src/commands/restart-opencode-server.ts +162 -0
- package/src/commands/resume.ts +230 -0
- package/src/commands/run-command.ts +123 -0
- package/src/commands/screenshare.test.ts +30 -0
- package/src/commands/screenshare.ts +366 -0
- package/src/commands/session-id.ts +109 -0
- package/src/commands/session.ts +227 -0
- package/src/commands/share.ts +106 -0
- package/src/commands/tasks.ts +293 -0
- package/src/commands/types.ts +25 -0
- package/src/commands/undo-redo.ts +386 -0
- package/src/commands/unset-model.ts +173 -0
- package/src/commands/upgrade.ts +52 -0
- package/src/commands/user-command.ts +198 -0
- package/src/commands/verbosity.ts +173 -0
- package/src/commands/worktree-settings.ts +70 -0
- package/src/commands/worktrees.ts +552 -0
- package/src/condense-memory.ts +36 -0
- package/src/config.ts +111 -0
- package/src/context-awareness-plugin.test.ts +142 -0
- package/src/context-awareness-plugin.ts +510 -0
- package/src/critique-utils.ts +139 -0
- package/src/database.ts +1876 -0
- package/src/db.test.ts +162 -0
- package/src/db.ts +286 -0
- package/src/debounce-timeout.ts +43 -0
- package/src/debounced-process-flush.ts +104 -0
- package/src/discord-bot.ts +1330 -0
- package/src/discord-command-registration.ts +693 -0
- package/src/discord-urls.ts +88 -0
- package/src/discord-utils.test.ts +153 -0
- package/src/discord-utils.ts +800 -0
- package/src/errors.ts +201 -0
- package/src/escape-backticks.test.ts +469 -0
- package/src/event-stream-real-capture.e2e.test.ts +692 -0
- package/src/eventsource-parser.test.ts +351 -0
- package/src/exec-async.ts +35 -0
- package/src/external-opencode-sync.ts +685 -0
- package/src/format-tables.test.ts +335 -0
- package/src/format-tables.ts +445 -0
- package/src/forum-sync/config.ts +92 -0
- package/src/forum-sync/discord-operations.ts +241 -0
- package/src/forum-sync/index.ts +9 -0
- package/src/forum-sync/markdown.ts +172 -0
- package/src/forum-sync/sync-to-discord.ts +595 -0
- package/src/forum-sync/sync-to-files.ts +294 -0
- package/src/forum-sync/types.ts +175 -0
- package/src/forum-sync/watchers.ts +454 -0
- package/src/gateway-proxy-reconnect.e2e.test.ts +523 -0
- package/src/gateway-proxy.e2e.test.ts +640 -0
- package/src/genai-worker-wrapper.ts +164 -0
- package/src/genai-worker.ts +386 -0
- package/src/genai.ts +321 -0
- package/src/generated/browser.ts +114 -0
- package/src/generated/client.ts +138 -0
- package/src/generated/commonInputTypes.ts +736 -0
- package/src/generated/enums.ts +88 -0
- package/src/generated/internal/class.ts +384 -0
- package/src/generated/internal/prismaNamespace.ts +2386 -0
- package/src/generated/internal/prismaNamespaceBrowser.ts +326 -0
- package/src/generated/models/bot_api_keys.ts +1288 -0
- package/src/generated/models/bot_tokens.ts +1656 -0
- package/src/generated/models/channel_agents.ts +1256 -0
- package/src/generated/models/channel_directories.ts +1859 -0
- package/src/generated/models/channel_mention_mode.ts +1300 -0
- package/src/generated/models/channel_models.ts +1288 -0
- package/src/generated/models/channel_verbosity.ts +1228 -0
- package/src/generated/models/channel_worktrees.ts +1300 -0
- package/src/generated/models/forum_sync_configs.ts +1452 -0
- package/src/generated/models/global_models.ts +1288 -0
- package/src/generated/models/ipc_requests.ts +1485 -0
- package/src/generated/models/part_messages.ts +1302 -0
- package/src/generated/models/scheduled_tasks.ts +2320 -0
- package/src/generated/models/session_agents.ts +1086 -0
- package/src/generated/models/session_events.ts +1439 -0
- package/src/generated/models/session_models.ts +1114 -0
- package/src/generated/models/session_start_sources.ts +1408 -0
- package/src/generated/models/thread_sessions.ts +1781 -0
- package/src/generated/models/thread_worktrees.ts +1356 -0
- package/src/generated/models.ts +30 -0
- package/src/heap-monitor.ts +152 -0
- package/src/hrana-server.test.ts +434 -0
- package/src/hrana-server.ts +314 -0
- package/src/html-actions.test.ts +87 -0
- package/src/html-actions.ts +174 -0
- package/src/html-components.test.ts +38 -0
- package/src/html-components.ts +181 -0
- package/src/image-optimizer-plugin.ts +194 -0
- package/src/image-utils.ts +149 -0
- package/src/interaction-handler.ts +576 -0
- package/src/ipc-polling.ts +326 -0
- package/src/ipc-tools-plugin.ts +236 -0
- package/src/kimaki-digital-twin.e2e.test.ts +199 -0
- package/src/kimaki-opencode-plugin-loading.e2e.test.ts +109 -0
- package/src/kimaki-opencode-plugin.test.ts +108 -0
- package/src/kimaki-opencode-plugin.ts +18 -0
- package/src/limit-heading-depth.test.ts +116 -0
- package/src/limit-heading-depth.ts +26 -0
- package/src/logger.ts +208 -0
- package/src/markdown.test.ts +308 -0
- package/src/markdown.ts +410 -0
- package/src/message-finish-field.e2e.test.ts +192 -0
- package/src/message-formatting.test.ts +81 -0
- package/src/message-formatting.ts +533 -0
- package/src/message-preprocessing.ts +455 -0
- package/src/onboarding-tutorial.ts +176 -0
- package/src/onboarding-welcome.ts +49 -0
- package/src/openai-realtime.ts +358 -0
- package/src/opencode-command-detection.test.ts +307 -0
- package/src/opencode-command-detection.ts +76 -0
- package/src/opencode-command.test.ts +70 -0
- package/src/opencode-command.ts +188 -0
- package/src/opencode-interrupt-plugin.test.ts +677 -0
- package/src/opencode-interrupt-plugin.ts +477 -0
- package/src/opencode.ts +1110 -0
- package/src/otto/branding.ts +23 -0
- package/src/otto/index.ts +22 -0
- package/src/parse-permission-rules.test.ts +127 -0
- package/src/patch-text-parser.ts +107 -0
- package/src/plugin-logger.ts +68 -0
- package/src/privacy-sanitizer.ts +142 -0
- package/src/queue-advanced-abort.e2e.test.ts +382 -0
- package/src/queue-advanced-action-buttons.e2e.test.ts +268 -0
- package/src/queue-advanced-e2e-setup.ts +873 -0
- package/src/queue-advanced-footer.e2e.test.ts +576 -0
- package/src/queue-advanced-model-switch.e2e.test.ts +383 -0
- package/src/queue-advanced-permissions-typing.e2e.test.ts +245 -0
- package/src/queue-advanced-question.e2e.test.ts +316 -0
- package/src/queue-advanced-typing-interrupt.e2e.test.ts +146 -0
- package/src/queue-advanced-typing.e2e.test.ts +199 -0
- package/src/queue-drain-after-interactive-ui.e2e.test.ts +151 -0
- package/src/queue-interrupt-drain.e2e.test.ts +166 -0
- package/src/queue-question-select-drain.e2e.test.ts +152 -0
- package/src/runtime-idle-sweeper.ts +76 -0
- package/src/runtime-lifecycle.e2e.test.ts +641 -0
- package/src/schema.sql +173 -0
- package/src/sentry.ts +26 -0
- package/src/session-handler/agent-utils.ts +97 -0
- package/src/session-handler/event-stream-fixtures/real-session-action-buttons.jsonl +45 -0
- package/src/session-handler/event-stream-fixtures/real-session-footer-suppressed-on-pre-idle-interrupt.jsonl +40 -0
- package/src/session-handler/event-stream-fixtures/real-session-permission-external-file.jsonl +23 -0
- package/src/session-handler/event-stream-fixtures/real-session-task-normal.jsonl +22 -0
- package/src/session-handler/event-stream-fixtures/real-session-task-three-parallel-sleeps.jsonl +277 -0
- package/src/session-handler/event-stream-fixtures/real-session-task-user-interruption.jsonl +46 -0
- package/src/session-handler/event-stream-fixtures/session-abort-after-idle-race.jsonl +21 -0
- package/src/session-handler/event-stream-fixtures/session-concurrent-messages-serialized.jsonl +56 -0
- package/src/session-handler/event-stream-fixtures/session-explicit-abort.jsonl +44 -0
- package/src/session-handler/event-stream-fixtures/session-normal-completion.jsonl +29 -0
- package/src/session-handler/event-stream-fixtures/session-tool-call-noisy-stream.jsonl +29 -0
- package/src/session-handler/event-stream-fixtures/session-two-completions-same-session.jsonl +50 -0
- package/src/session-handler/event-stream-fixtures/session-user-interruption.jsonl +59 -0
- package/src/session-handler/event-stream-fixtures/session-voice-queued-followup.jsonl +52 -0
- package/src/session-handler/event-stream-state.test.ts +645 -0
- package/src/session-handler/event-stream-state.ts +608 -0
- package/src/session-handler/model-utils.ts +183 -0
- package/src/session-handler/opencode-session-event-log.ts +130 -0
- package/src/session-handler/thread-runtime-state.ts +212 -0
- package/src/session-handler/thread-session-runtime.ts +4281 -0
- package/src/session-handler.ts +15 -0
- package/src/session-search.test.ts +50 -0
- package/src/session-search.ts +148 -0
- package/src/session-title-rename.test.ts +112 -0
- package/src/startup-service.ts +200 -0
- package/src/startup-time.e2e.test.ts +373 -0
- package/src/store.ts +122 -0
- package/src/system-message.test.ts +612 -0
- package/src/system-message.ts +723 -0
- package/src/task-runner.ts +421 -0
- package/src/task-schedule.test.ts +84 -0
- package/src/task-schedule.ts +311 -0
- package/src/test-utils.ts +435 -0
- package/src/thinking-utils.ts +61 -0
- package/src/thread-message-queue.e2e.test.ts +1219 -0
- package/src/tools.ts +430 -0
- package/src/undici.d.ts +12 -0
- package/src/undo-redo.e2e.test.ts +209 -0
- package/src/unnest-code-blocks.test.ts +713 -0
- package/src/unnest-code-blocks.ts +185 -0
- package/src/upgrade.ts +127 -0
- package/src/utils.ts +212 -0
- package/src/voice-attachment.ts +51 -0
- package/src/voice-handler.ts +908 -0
- package/src/voice-message.e2e.test.ts +1255 -0
- package/src/voice.test.ts +281 -0
- package/src/voice.ts +627 -0
- package/src/wait-session.ts +147 -0
- package/src/websockify.ts +101 -0
- package/src/worker-types.ts +64 -0
- package/src/worktree-lifecycle.e2e.test.ts +391 -0
- package/src/worktree-utils.ts +4 -0
- package/src/worktrees.test.ts +223 -0
- package/src/worktrees.ts +1294 -0
- package/src/xml.test.ts +38 -0
- package/src/xml.ts +121 -0
package/dist/voice.js
ADDED
|
@@ -0,0 +1,447 @@
|
|
|
1
|
+
// Audio transcription service using AI SDK providers.
|
|
2
|
+
// Both providers use LanguageModelV3 (chat model) with audio file parts + tool calling,
|
|
3
|
+
// so we can pass full context (file tree, session info) for better word recognition.
|
|
4
|
+
// - OpenAI: gpt-4o-audio-preview via .chat() (Chat Completions API). MUST use .chat()
|
|
5
|
+
// because the default Responses API doesn't support audio file parts. The Chat
|
|
6
|
+
// Completions handler converts audio/mpeg file parts to input_audio format.
|
|
7
|
+
// - Gemini: gemini-2.5-flash natively accepts audio file parts in chat.
|
|
8
|
+
// Calls model.doGenerate() directly without the `ai` npm package.
|
|
9
|
+
// Uses errore for type-safe error handling.
|
|
10
|
+
import { createGoogleGenerativeAI } from '@ai-sdk/google';
|
|
11
|
+
import { createOpenAI } from '@ai-sdk/openai';
|
|
12
|
+
import { Readable } from 'node:stream';
|
|
13
|
+
import prism from 'prism-media';
|
|
14
|
+
import * as errore from 'errore';
|
|
15
|
+
import { createLogger, LogPrefix } from './logger.js';
|
|
16
|
+
import { ApiKeyMissingError, InvalidAudioFormatError, TranscriptionError, EmptyTranscriptionError, NoResponseContentError, NoToolResponseError, } from './errors.js';
|
|
17
|
+
const voiceLogger = createLogger(LogPrefix.VOICE);
|
|
18
|
+
// OpenAI input_audio only supports wav and mp3. Other formats (OGG Opus, etc)
|
|
19
|
+
// must be converted before sending.
|
|
20
|
+
const OPENAI_SUPPORTED_AUDIO_TYPES = new Set([
|
|
21
|
+
'audio/mpeg',
|
|
22
|
+
'audio/mp3',
|
|
23
|
+
'audio/wav',
|
|
24
|
+
'audio/x-wav',
|
|
25
|
+
]);
|
|
26
|
+
const OGG_AUDIO_TYPES = new Set([
|
|
27
|
+
'audio/ogg',
|
|
28
|
+
'audio/opus',
|
|
29
|
+
]);
|
|
30
|
+
const M4A_AUDIO_TYPES = new Set([
|
|
31
|
+
'audio/mp4',
|
|
32
|
+
'audio/m4a',
|
|
33
|
+
'audio/x-m4a',
|
|
34
|
+
]);
|
|
35
|
+
export function normalizeAudioMediaType(mediaType) {
|
|
36
|
+
const normalized = mediaType.trim().toLowerCase();
|
|
37
|
+
if (normalized === 'audio/x-m4a' || normalized === 'audio/m4a') {
|
|
38
|
+
return 'audio/mp4';
|
|
39
|
+
}
|
|
40
|
+
return normalized;
|
|
41
|
+
}
|
|
42
|
+
export function getOpenAIAudioConversionStrategy(mediaType) {
|
|
43
|
+
if (OPENAI_SUPPORTED_AUDIO_TYPES.has(mediaType)) {
|
|
44
|
+
return 'none';
|
|
45
|
+
}
|
|
46
|
+
if (OGG_AUDIO_TYPES.has(mediaType)) {
|
|
47
|
+
return 'convert-ogg-to-wav';
|
|
48
|
+
}
|
|
49
|
+
if (M4A_AUDIO_TYPES.has(mediaType)) {
|
|
50
|
+
return 'convert-m4a-to-wav';
|
|
51
|
+
}
|
|
52
|
+
return 'unsupported';
|
|
53
|
+
}
|
|
54
|
+
/**
|
|
55
|
+
* Convert OGG Opus audio to WAV using prism-media (already installed for Discord voice).
|
|
56
|
+
* Pipeline: OGG buffer → OggDemuxer → Opus Decoder → PCM → WAV (with header).
|
|
57
|
+
* No ffmpeg needed — uses @discordjs/opus native bindings.
|
|
58
|
+
*/
|
|
59
|
+
export function convertOggToWav(input) {
|
|
60
|
+
return new Promise((resolve) => {
|
|
61
|
+
const pcmChunks = [];
|
|
62
|
+
const demuxer = new prism.opus.OggDemuxer();
|
|
63
|
+
const decoder = new prism.opus.Decoder({
|
|
64
|
+
rate: 48000,
|
|
65
|
+
channels: 1,
|
|
66
|
+
frameSize: 960,
|
|
67
|
+
});
|
|
68
|
+
decoder.on('data', (chunk) => {
|
|
69
|
+
pcmChunks.push(chunk);
|
|
70
|
+
});
|
|
71
|
+
decoder.on('end', () => {
|
|
72
|
+
const pcmData = Buffer.concat(pcmChunks);
|
|
73
|
+
const wavHeader = createWavHeader({
|
|
74
|
+
dataLength: pcmData.length,
|
|
75
|
+
sampleRate: 48000,
|
|
76
|
+
numChannels: 1,
|
|
77
|
+
bitsPerSample: 16,
|
|
78
|
+
});
|
|
79
|
+
resolve(Buffer.concat([wavHeader, pcmData]));
|
|
80
|
+
});
|
|
81
|
+
decoder.on('error', (err) => {
|
|
82
|
+
resolve(new TranscriptionError({
|
|
83
|
+
reason: `Opus decode failed: ${err.message}`,
|
|
84
|
+
cause: err,
|
|
85
|
+
}));
|
|
86
|
+
});
|
|
87
|
+
demuxer.on('error', (err) => {
|
|
88
|
+
resolve(new TranscriptionError({
|
|
89
|
+
reason: `OGG demux failed: ${err.message}`,
|
|
90
|
+
cause: err,
|
|
91
|
+
}));
|
|
92
|
+
});
|
|
93
|
+
Readable.from(input).pipe(demuxer).pipe(decoder);
|
|
94
|
+
});
|
|
95
|
+
}
|
|
96
|
+
/**
|
|
97
|
+
* Convert M4A/MP4 audio to WAV using prism-media FFmpeg wrapper.
|
|
98
|
+
* This depends on an ffmpeg binary available in PATH.
|
|
99
|
+
*/
|
|
100
|
+
export function convertM4aToWav(input) {
|
|
101
|
+
return new Promise((resolve) => {
|
|
102
|
+
const pcmChunks = [];
|
|
103
|
+
const transcoder = new prism.FFmpeg({
|
|
104
|
+
args: [
|
|
105
|
+
'-analyzeduration',
|
|
106
|
+
'0',
|
|
107
|
+
'-loglevel',
|
|
108
|
+
'0',
|
|
109
|
+
'-f',
|
|
110
|
+
'mp4',
|
|
111
|
+
'-i',
|
|
112
|
+
'pipe:0',
|
|
113
|
+
'-f',
|
|
114
|
+
's16le',
|
|
115
|
+
'-acodec',
|
|
116
|
+
'pcm_s16le',
|
|
117
|
+
'-ac',
|
|
118
|
+
'1',
|
|
119
|
+
'-ar',
|
|
120
|
+
'48000',
|
|
121
|
+
'pipe:1',
|
|
122
|
+
],
|
|
123
|
+
});
|
|
124
|
+
transcoder.on('data', (chunk) => {
|
|
125
|
+
pcmChunks.push(chunk);
|
|
126
|
+
});
|
|
127
|
+
transcoder.on('end', () => {
|
|
128
|
+
const pcmData = Buffer.concat(pcmChunks);
|
|
129
|
+
if (pcmData.length === 0) {
|
|
130
|
+
resolve(new TranscriptionError({
|
|
131
|
+
reason: 'FFmpeg conversion produced empty audio output',
|
|
132
|
+
}));
|
|
133
|
+
return;
|
|
134
|
+
}
|
|
135
|
+
const wavHeader = createWavHeader({
|
|
136
|
+
dataLength: pcmData.length,
|
|
137
|
+
sampleRate: 48000,
|
|
138
|
+
numChannels: 1,
|
|
139
|
+
bitsPerSample: 16,
|
|
140
|
+
});
|
|
141
|
+
resolve(Buffer.concat([wavHeader, pcmData]));
|
|
142
|
+
});
|
|
143
|
+
transcoder.on('error', (err) => {
|
|
144
|
+
const lower = err.message.toLowerCase();
|
|
145
|
+
const isMissingFfmpeg = lower.includes('ffmpeg') &&
|
|
146
|
+
(lower.includes('not found') ||
|
|
147
|
+
lower.includes('enoent') ||
|
|
148
|
+
lower.includes('spawn'));
|
|
149
|
+
if (isMissingFfmpeg) {
|
|
150
|
+
resolve(new TranscriptionError({
|
|
151
|
+
reason: 'M4A transcription with OpenAI requires ffmpeg to be installed and available in PATH',
|
|
152
|
+
cause: err,
|
|
153
|
+
}));
|
|
154
|
+
return;
|
|
155
|
+
}
|
|
156
|
+
resolve(new TranscriptionError({
|
|
157
|
+
reason: `M4A decode failed: ${err.message}`,
|
|
158
|
+
cause: err,
|
|
159
|
+
}));
|
|
160
|
+
});
|
|
161
|
+
Readable.from(input).pipe(transcoder);
|
|
162
|
+
});
|
|
163
|
+
}
|
|
164
|
+
function createWavHeader({ dataLength, sampleRate, numChannels, bitsPerSample, }) {
|
|
165
|
+
const byteRate = (sampleRate * numChannels * bitsPerSample) / 8;
|
|
166
|
+
const blockAlign = (numChannels * bitsPerSample) / 8;
|
|
167
|
+
const buffer = Buffer.alloc(44);
|
|
168
|
+
buffer.write('RIFF', 0);
|
|
169
|
+
buffer.writeUInt32LE(36 + dataLength, 4);
|
|
170
|
+
buffer.write('WAVE', 8);
|
|
171
|
+
buffer.write('fmt ', 12);
|
|
172
|
+
buffer.writeUInt32LE(16, 16);
|
|
173
|
+
buffer.writeUInt16LE(1, 20);
|
|
174
|
+
buffer.writeUInt16LE(numChannels, 22);
|
|
175
|
+
buffer.writeUInt32LE(sampleRate, 24);
|
|
176
|
+
buffer.writeUInt32LE(byteRate, 28);
|
|
177
|
+
buffer.writeUInt16LE(blockAlign, 32);
|
|
178
|
+
buffer.writeUInt16LE(bitsPerSample, 34);
|
|
179
|
+
buffer.write('data', 36);
|
|
180
|
+
buffer.writeUInt32LE(dataLength, 40);
|
|
181
|
+
return buffer;
|
|
182
|
+
}
|
|
183
|
+
// Build the transcription tool schema dynamically so the agent field can
|
|
184
|
+
// use an enum constrained to the actual available agent names.
|
|
185
|
+
function buildTranscriptionTool({ agentNames, }) {
|
|
186
|
+
const properties = {
|
|
187
|
+
transcription: {
|
|
188
|
+
type: 'string',
|
|
189
|
+
description: 'The final transcription of the audio. MUST be non-empty. If audio is unclear, transcribe your best interpretation. If silent, too short to understand, or completely incomprehensible, use "[inaudible audio]".',
|
|
190
|
+
},
|
|
191
|
+
queueMessage: {
|
|
192
|
+
type: 'boolean',
|
|
193
|
+
description: 'Set to true ONLY if the user explicitly says "queue this message", "queue this", or similar phrasing indicating they want this message queued instead of sent immediately. If not mentioned, omit or set to false.',
|
|
194
|
+
},
|
|
195
|
+
};
|
|
196
|
+
if (agentNames && agentNames.length > 0) {
|
|
197
|
+
properties['agent'] = {
|
|
198
|
+
type: 'string',
|
|
199
|
+
enum: agentNames,
|
|
200
|
+
description: 'The agent name ONLY if the user explicitly says "use the X agent", "switch to X agent", "with the X agent", or similar phrasing. Remove the agent instruction from the transcription text. Omit if no agent is mentioned.',
|
|
201
|
+
};
|
|
202
|
+
}
|
|
203
|
+
return {
|
|
204
|
+
type: 'function',
|
|
205
|
+
name: 'transcriptionResult',
|
|
206
|
+
description: 'MANDATORY: You MUST call this tool to complete the task. This is the ONLY way to return results - text responses are ignored. Call this with your transcription, even if imperfect. An imperfect transcription is better than none.',
|
|
207
|
+
inputSchema: {
|
|
208
|
+
type: 'object',
|
|
209
|
+
properties,
|
|
210
|
+
required: ['transcription'],
|
|
211
|
+
},
|
|
212
|
+
};
|
|
213
|
+
}
|
|
214
|
+
/**
|
|
215
|
+
* Extract transcription result from doGenerate content array.
|
|
216
|
+
* Looks for a tool-call named 'transcriptionResult', falls back to text content.
|
|
217
|
+
* Returns structured result with transcription text and queueMessage flag.
|
|
218
|
+
*/
|
|
219
|
+
export function extractTranscription(content) {
|
|
220
|
+
const toolCall = content.find((c) => c.type === 'tool-call' && c.toolName === 'transcriptionResult');
|
|
221
|
+
if (toolCall) {
|
|
222
|
+
// toolCall.input is a JSON string in LanguageModelV3
|
|
223
|
+
const args = (() => {
|
|
224
|
+
if (typeof toolCall.input === 'string') {
|
|
225
|
+
return JSON.parse(toolCall.input);
|
|
226
|
+
}
|
|
227
|
+
return {};
|
|
228
|
+
})();
|
|
229
|
+
const transcription = (typeof args.transcription === 'string' ? args.transcription : '').trim();
|
|
230
|
+
const queueMessage = args.queueMessage === true;
|
|
231
|
+
const agent = typeof args.agent === 'string' ? args.agent : undefined;
|
|
232
|
+
voiceLogger.log(`Transcription result received: "${transcription.slice(0, 100)}..."${queueMessage ? ' [QUEUE]' : ''}${agent ? ` [AGENT:${agent}]` : ''}`);
|
|
233
|
+
if (!transcription) {
|
|
234
|
+
return new EmptyTranscriptionError();
|
|
235
|
+
}
|
|
236
|
+
return { transcription, queueMessage, agent };
|
|
237
|
+
}
|
|
238
|
+
// Fall back to text content if no tool call
|
|
239
|
+
const textPart = content.find((c) => c.type === 'text');
|
|
240
|
+
if (textPart && textPart.type === 'text' && textPart.text.trim()) {
|
|
241
|
+
voiceLogger.log(`No tool call but got text: "${textPart.text.trim().slice(0, 100)}..."`);
|
|
242
|
+
return { transcription: textPart.text.trim(), queueMessage: false };
|
|
243
|
+
}
|
|
244
|
+
if (content.length === 0) {
|
|
245
|
+
return new NoResponseContentError();
|
|
246
|
+
}
|
|
247
|
+
return new TranscriptionError({
|
|
248
|
+
reason: 'Model did not produce a transcription',
|
|
249
|
+
});
|
|
250
|
+
}
|
|
251
|
+
async function runTranscriptionOnce({ model, prompt, audioBase64, mediaType, temperature, agentNames, }) {
|
|
252
|
+
const tool = buildTranscriptionTool({ agentNames });
|
|
253
|
+
const options = {
|
|
254
|
+
prompt: [
|
|
255
|
+
{
|
|
256
|
+
role: 'user',
|
|
257
|
+
content: [
|
|
258
|
+
{ type: 'text', text: prompt },
|
|
259
|
+
{
|
|
260
|
+
type: 'file',
|
|
261
|
+
data: audioBase64,
|
|
262
|
+
mediaType,
|
|
263
|
+
},
|
|
264
|
+
],
|
|
265
|
+
},
|
|
266
|
+
],
|
|
267
|
+
temperature,
|
|
268
|
+
maxOutputTokens: 2048,
|
|
269
|
+
tools: [tool],
|
|
270
|
+
toolChoice: { type: 'tool', toolName: 'transcriptionResult' },
|
|
271
|
+
providerOptions: {
|
|
272
|
+
google: {
|
|
273
|
+
thinkingConfig: { thinkingBudget: 1024 },
|
|
274
|
+
},
|
|
275
|
+
},
|
|
276
|
+
};
|
|
277
|
+
// doGenerate returns PromiseLike, wrap in Promise.resolve for errore compatibility
|
|
278
|
+
const response = await errore.tryAsync({
|
|
279
|
+
try: () => Promise.resolve(model.doGenerate(options)),
|
|
280
|
+
catch: (e) => new TranscriptionError({
|
|
281
|
+
reason: `API call failed: ${String(e)}`,
|
|
282
|
+
cause: e,
|
|
283
|
+
}),
|
|
284
|
+
});
|
|
285
|
+
if (response instanceof TranscriptionError) {
|
|
286
|
+
return response;
|
|
287
|
+
}
|
|
288
|
+
return extractTranscription(response.content);
|
|
289
|
+
}
|
|
290
|
+
/**
|
|
291
|
+
* Create a LanguageModelV3 for transcription.
|
|
292
|
+
* Both providers use chat models that accept audio file parts, so we get full
|
|
293
|
+
* context (prompt, session info, tool calling) for better word recognition.
|
|
294
|
+
*
|
|
295
|
+
* OpenAI: must use .chat() to get the Chat Completions API model, because the
|
|
296
|
+
* default callable (Responses API) doesn't support audio file parts.
|
|
297
|
+
* Gemini: language models natively accept audio in chat.
|
|
298
|
+
*/
|
|
299
|
+
export function createTranscriptionModel({ apiKey, provider, }) {
|
|
300
|
+
const resolvedProvider = provider || (apiKey.startsWith('sk-') ? 'openai' : 'gemini');
|
|
301
|
+
if (resolvedProvider === 'openai') {
|
|
302
|
+
const openai = createOpenAI({ apiKey });
|
|
303
|
+
return openai.chat('gpt-4o-audio-preview');
|
|
304
|
+
}
|
|
305
|
+
const google = createGoogleGenerativeAI({ apiKey });
|
|
306
|
+
return google('gemini-2.5-flash');
|
|
307
|
+
}
|
|
308
|
+
export async function transcribeAudio({ audio, prompt, language, temperature, apiKey: apiKeyParam, model, provider, mediaType: mediaTypeParam, currentSessionContext, lastSessionContext, agents, }) {
|
|
309
|
+
const apiKey = apiKeyParam || process.env.OPENAI_API_KEY || process.env.GEMINI_API_KEY;
|
|
310
|
+
if (!model && !apiKey) {
|
|
311
|
+
return Promise.resolve(new ApiKeyMissingError({ service: 'OpenAI or Gemini' }));
|
|
312
|
+
}
|
|
313
|
+
const resolvedProvider = (() => {
|
|
314
|
+
if (provider) {
|
|
315
|
+
return provider;
|
|
316
|
+
}
|
|
317
|
+
if (apiKey) {
|
|
318
|
+
return apiKey.startsWith('sk-') ? 'openai' : 'gemini';
|
|
319
|
+
}
|
|
320
|
+
return 'gemini';
|
|
321
|
+
})();
|
|
322
|
+
const languageModel = model || createTranscriptionModel({ apiKey: apiKey, provider: resolvedProvider });
|
|
323
|
+
// Convert audio to Buffer for potential format conversion
|
|
324
|
+
const audioBuffer = (() => {
|
|
325
|
+
if (typeof audio === 'string') {
|
|
326
|
+
return Buffer.from(audio, 'base64');
|
|
327
|
+
}
|
|
328
|
+
if (audio instanceof Buffer) {
|
|
329
|
+
return audio;
|
|
330
|
+
}
|
|
331
|
+
if (audio instanceof ArrayBuffer) {
|
|
332
|
+
return Buffer.from(new Uint8Array(audio));
|
|
333
|
+
}
|
|
334
|
+
return Buffer.from(audio);
|
|
335
|
+
})();
|
|
336
|
+
if (audioBuffer.length === 0) {
|
|
337
|
+
return new InvalidAudioFormatError();
|
|
338
|
+
}
|
|
339
|
+
let mediaType = normalizeAudioMediaType(mediaTypeParam || 'audio/mpeg');
|
|
340
|
+
let finalAudioBase64 = audioBuffer.toString('base64');
|
|
341
|
+
// OpenAI input_audio supports only a subset of audio formats.
|
|
342
|
+
// Convert based on MIME so OGG conversion runs only for real OGG/Opus inputs.
|
|
343
|
+
if (resolvedProvider === 'openai') {
|
|
344
|
+
const conversionStrategy = getOpenAIAudioConversionStrategy(mediaType);
|
|
345
|
+
if (conversionStrategy === 'convert-ogg-to-wav') {
|
|
346
|
+
voiceLogger.log(`Converting ${mediaType} to WAV for OpenAI compatibility`);
|
|
347
|
+
const converted = await convertOggToWav(audioBuffer);
|
|
348
|
+
if (converted instanceof Error) {
|
|
349
|
+
return converted;
|
|
350
|
+
}
|
|
351
|
+
finalAudioBase64 = converted.toString('base64');
|
|
352
|
+
mediaType = 'audio/wav';
|
|
353
|
+
}
|
|
354
|
+
else if (conversionStrategy === 'convert-m4a-to-wav') {
|
|
355
|
+
voiceLogger.log(`Converting ${mediaType} to WAV for OpenAI compatibility`);
|
|
356
|
+
const converted = await convertM4aToWav(audioBuffer);
|
|
357
|
+
if (converted instanceof Error) {
|
|
358
|
+
return converted;
|
|
359
|
+
}
|
|
360
|
+
finalAudioBase64 = converted.toString('base64');
|
|
361
|
+
mediaType = 'audio/wav';
|
|
362
|
+
}
|
|
363
|
+
else if (conversionStrategy === 'unsupported') {
|
|
364
|
+
return new InvalidAudioFormatError();
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
const languageHint = language ? `The audio is in ${language}.\n\n` : '';
|
|
368
|
+
// build session context section
|
|
369
|
+
const sessionContextParts = [];
|
|
370
|
+
if (lastSessionContext) {
|
|
371
|
+
sessionContextParts.push(`<last_session>
|
|
372
|
+
${lastSessionContext}
|
|
373
|
+
</last_session>`);
|
|
374
|
+
}
|
|
375
|
+
if (currentSessionContext) {
|
|
376
|
+
sessionContextParts.push(`<current_session>
|
|
377
|
+
${currentSessionContext}
|
|
378
|
+
</current_session>`);
|
|
379
|
+
}
|
|
380
|
+
const sessionContextSection = sessionContextParts.length > 0
|
|
381
|
+
? `\n<session_context>
|
|
382
|
+
${sessionContextParts.join('\n\n')}
|
|
383
|
+
</session_context>`
|
|
384
|
+
: '';
|
|
385
|
+
const transcriptionPrompt = `${languageHint}Transcribe this audio for a coding agent (like Claude Code or OpenCode).
|
|
386
|
+
|
|
387
|
+
CRITICAL REQUIREMENT: You MUST call the "transcriptionResult" tool to complete this task.
|
|
388
|
+
- The transcriptionResult tool is the ONLY way to return results
|
|
389
|
+
- Text responses are completely ignored - only tool calls work
|
|
390
|
+
- You MUST call transcriptionResult even if you run out of tool calls
|
|
391
|
+
- Always call transcriptionResult with your best approximation of what was said
|
|
392
|
+
- DO NOT end without calling transcriptionResult
|
|
393
|
+
|
|
394
|
+
This is a software development environment. The speaker is giving instructions to an AI coding assistant. Expect:
|
|
395
|
+
- File paths, function names, CLI commands, package names, API endpoints
|
|
396
|
+
|
|
397
|
+
RULES:
|
|
398
|
+
- NEVER change the meaning or intent of the user's message. Your job is ONLY to transcribe, not to respond or answer.
|
|
399
|
+
- If the user asks a question, keep it as a question. Do NOT answer it. Do NOT rephrase it as a statement.
|
|
400
|
+
- Only fix grammar, punctuation, and markdown formatting. Preserve the original content faithfully.
|
|
401
|
+
- If audio is unclear, transcribe your best interpretation, even with strong accents. Always provide an approximation.
|
|
402
|
+
- If audio seems silent/empty, is too short to understand, or is completely incomprehensible, call transcriptionResult with "[inaudible audio]"
|
|
403
|
+
- The session context below is ONLY for understanding technical terms, file names, and function names. It may contain previous transcriptions — NEVER copy or reuse them. Always transcribe fresh from the current audio.
|
|
404
|
+
|
|
405
|
+
QUEUE DETECTION:
|
|
406
|
+
- If the user says "queue this message", "queue this", "add this to the queue", or similar phrasing indicating they want the message queued instead of sent immediately, set queueMessage to true.
|
|
407
|
+
- Remove the queue instruction from the transcription text itself — only include the actual message content.
|
|
408
|
+
- Example: "Queue this message. Fix the login bug in auth.ts" → transcription: "Fix the login bug in auth.ts", queueMessage: true
|
|
409
|
+
- If removing the queue phrase would leave empty content (user only said "queue this" with nothing else), keep the full spoken text as the transcription — never return an empty transcription.
|
|
410
|
+
- If no queue intent is detected, omit queueMessage or set it to false.
|
|
411
|
+
${agents && agents.length > 0 ? `
|
|
412
|
+
AGENT SELECTION:
|
|
413
|
+
- If the user explicitly says "use the X agent", "switch to X agent", "with the X agent", or similar phrasing naming a specific agent, set the agent field to that agent name.
|
|
414
|
+
- Remove the agent instruction from the transcription text itself — only include the actual message content.
|
|
415
|
+
- Example: "Use the plan agent. Refactor the auth module" → transcription: "Refactor the auth module", agent: "plan"
|
|
416
|
+
- If removing the agent phrase would leave empty content, keep the full spoken text as the transcription.
|
|
417
|
+
- Only set agent if the user explicitly names one. Do not infer an agent from the task content.
|
|
418
|
+
- If no agent is mentioned, omit the agent field entirely.
|
|
419
|
+
|
|
420
|
+
Available agents:
|
|
421
|
+
${agents.map((a) => { return `- ${a.name}${a.description ? `: ${a.description}` : ''}`; }).join('\n')}
|
|
422
|
+
` : ''}
|
|
423
|
+
|
|
424
|
+
Common corrections (apply without tool calls):
|
|
425
|
+
- "reacked" → "React", "jason" → "JSON", "get hub" → "GitHub", "no JS" → "Node.js", "dacker" → "Docker"
|
|
426
|
+
|
|
427
|
+
Project file structure:
|
|
428
|
+
<file_tree>
|
|
429
|
+
${prompt}
|
|
430
|
+
</file_tree>
|
|
431
|
+
${sessionContextSection}
|
|
432
|
+
|
|
433
|
+
REMEMBER: Call "transcriptionResult" tool with your transcription. This is mandatory.
|
|
434
|
+
|
|
435
|
+
Note: "critique" is a CLI tool for showing diffs in the browser.`;
|
|
436
|
+
const agentNames = agents
|
|
437
|
+
?.map((a) => { return a.name; })
|
|
438
|
+
.filter((name) => { return name.length > 0; });
|
|
439
|
+
return runTranscriptionOnce({
|
|
440
|
+
model: languageModel,
|
|
441
|
+
prompt: transcriptionPrompt,
|
|
442
|
+
audioBase64: finalAudioBase64,
|
|
443
|
+
mediaType,
|
|
444
|
+
temperature: temperature ?? 0.3,
|
|
445
|
+
agentNames: agentNames && agentNames.length > 0 ? agentNames : undefined,
|
|
446
|
+
});
|
|
447
|
+
}
|
|
@@ -0,0 +1,235 @@
|
|
|
1
|
+
// Tests for voice transcription using AI SDK provider (LanguageModelV3).
|
|
2
|
+
// Uses the example audio files at scripts/example-audio.{mp3,ogg}.
|
|
3
|
+
import { describe, test, expect } from 'vitest';
|
|
4
|
+
import fs from 'node:fs';
|
|
5
|
+
import path from 'node:path';
|
|
6
|
+
import { transcribeAudio, convertOggToWav, extractTranscription, normalizeAudioMediaType, getOpenAIAudioConversionStrategy, } from './voice.js';
|
|
7
|
+
import { getVoiceAttachmentMatchReason, isVoiceAttachment, } from './voice-attachment.js';
|
|
8
|
+
describe('audio media type routing', () => {
|
|
9
|
+
test('normalizes m4a aliases to audio/mp4', () => {
|
|
10
|
+
expect(normalizeAudioMediaType('audio/x-m4a')).toMatchInlineSnapshot('"audio/mp4"');
|
|
11
|
+
expect(normalizeAudioMediaType('audio/m4a')).toMatchInlineSnapshot('"audio/mp4"');
|
|
12
|
+
});
|
|
13
|
+
test('keeps non-m4a media types unchanged', () => {
|
|
14
|
+
expect(normalizeAudioMediaType('audio/ogg')).toMatchInlineSnapshot('"audio/ogg"');
|
|
15
|
+
expect(normalizeAudioMediaType('audio/wav')).toMatchInlineSnapshot('"audio/wav"');
|
|
16
|
+
});
|
|
17
|
+
test('converts ogg only when mime is actual ogg/opus', () => {
|
|
18
|
+
expect(getOpenAIAudioConversionStrategy('audio/ogg')).toMatchInlineSnapshot('"convert-ogg-to-wav"');
|
|
19
|
+
expect(getOpenAIAudioConversionStrategy('audio/opus')).toMatchInlineSnapshot('"convert-ogg-to-wav"');
|
|
20
|
+
expect(getOpenAIAudioConversionStrategy('audio/mp4')).toMatchInlineSnapshot('"convert-m4a-to-wav"');
|
|
21
|
+
expect(getOpenAIAudioConversionStrategy('audio/mpeg')).toMatchInlineSnapshot('"none"');
|
|
22
|
+
});
|
|
23
|
+
});
|
|
24
|
+
describe('voice attachment detection', () => {
|
|
25
|
+
test('detects voice attachments by content type, extension, and waveform metadata', () => {
|
|
26
|
+
expect([
|
|
27
|
+
getVoiceAttachmentMatchReason({
|
|
28
|
+
name: 'voice-message.ogg',
|
|
29
|
+
contentType: 'audio/ogg',
|
|
30
|
+
}),
|
|
31
|
+
getVoiceAttachmentMatchReason({
|
|
32
|
+
name: 'voice-message.ogg',
|
|
33
|
+
contentType: null,
|
|
34
|
+
}),
|
|
35
|
+
getVoiceAttachmentMatchReason({
|
|
36
|
+
name: 'upload.bin',
|
|
37
|
+
contentType: null,
|
|
38
|
+
waveform: 'abc123',
|
|
39
|
+
}),
|
|
40
|
+
isVoiceAttachment({
|
|
41
|
+
name: 'notes.txt',
|
|
42
|
+
contentType: null,
|
|
43
|
+
}),
|
|
44
|
+
]).toMatchInlineSnapshot(`
|
|
45
|
+
[
|
|
46
|
+
"contentType:audio/ogg",
|
|
47
|
+
"extension:.ogg",
|
|
48
|
+
"waveform",
|
|
49
|
+
false,
|
|
50
|
+
]
|
|
51
|
+
`);
|
|
52
|
+
});
|
|
53
|
+
});
|
|
54
|
+
describe('extractTranscription', () => {
|
|
55
|
+
test('extracts transcription from tool call', () => {
|
|
56
|
+
const result = extractTranscription([
|
|
57
|
+
{
|
|
58
|
+
type: 'tool-call',
|
|
59
|
+
toolCallId: 'call_1',
|
|
60
|
+
toolName: 'transcriptionResult',
|
|
61
|
+
input: JSON.stringify({ transcription: 'hello world' }),
|
|
62
|
+
},
|
|
63
|
+
]);
|
|
64
|
+
expect(result).toMatchInlineSnapshot(`
|
|
65
|
+
{
|
|
66
|
+
"agent": undefined,
|
|
67
|
+
"queueMessage": false,
|
|
68
|
+
"transcription": "hello world",
|
|
69
|
+
}
|
|
70
|
+
`);
|
|
71
|
+
});
|
|
72
|
+
test('extracts queueMessage: true from tool call', () => {
|
|
73
|
+
const result = extractTranscription([
|
|
74
|
+
{
|
|
75
|
+
type: 'tool-call',
|
|
76
|
+
toolCallId: 'call_1',
|
|
77
|
+
toolName: 'transcriptionResult',
|
|
78
|
+
input: JSON.stringify({
|
|
79
|
+
transcription: 'Fix the login bug in auth.ts',
|
|
80
|
+
queueMessage: true,
|
|
81
|
+
}),
|
|
82
|
+
},
|
|
83
|
+
]);
|
|
84
|
+
expect(result).toMatchInlineSnapshot(`
|
|
85
|
+
{
|
|
86
|
+
"agent": undefined,
|
|
87
|
+
"queueMessage": true,
|
|
88
|
+
"transcription": "Fix the login bug in auth.ts",
|
|
89
|
+
}
|
|
90
|
+
`);
|
|
91
|
+
});
|
|
92
|
+
test('queueMessage defaults to false when omitted', () => {
|
|
93
|
+
const result = extractTranscription([
|
|
94
|
+
{
|
|
95
|
+
type: 'tool-call',
|
|
96
|
+
toolCallId: 'call_1',
|
|
97
|
+
toolName: 'transcriptionResult',
|
|
98
|
+
input: JSON.stringify({ transcription: 'regular message' }),
|
|
99
|
+
},
|
|
100
|
+
]);
|
|
101
|
+
expect(result).not.toBeInstanceOf(Error);
|
|
102
|
+
expect(result.queueMessage).toBe(false);
|
|
103
|
+
});
|
|
104
|
+
test('falls back to text when no tool call', () => {
|
|
105
|
+
const result = extractTranscription([
|
|
106
|
+
{
|
|
107
|
+
type: 'text',
|
|
108
|
+
text: 'fallback text response',
|
|
109
|
+
},
|
|
110
|
+
]);
|
|
111
|
+
expect(result).toMatchInlineSnapshot(`
|
|
112
|
+
{
|
|
113
|
+
"queueMessage": false,
|
|
114
|
+
"transcription": "fallback text response",
|
|
115
|
+
}
|
|
116
|
+
`);
|
|
117
|
+
});
|
|
118
|
+
test('returns NoResponseContentError for empty content', () => {
|
|
119
|
+
const result = extractTranscription([]);
|
|
120
|
+
expect(result).toBeInstanceOf(Error);
|
|
121
|
+
expect(result.message).toMatchInlineSnapshot(`"No response content from model"`);
|
|
122
|
+
});
|
|
123
|
+
test('returns EmptyTranscriptionError for empty transcription string', () => {
|
|
124
|
+
const result = extractTranscription([
|
|
125
|
+
{
|
|
126
|
+
type: 'tool-call',
|
|
127
|
+
toolCallId: 'call_1',
|
|
128
|
+
toolName: 'transcriptionResult',
|
|
129
|
+
input: JSON.stringify({ transcription: ' ' }),
|
|
130
|
+
},
|
|
131
|
+
]);
|
|
132
|
+
expect(result).toBeInstanceOf(Error);
|
|
133
|
+
expect(result.message).toMatchInlineSnapshot(`"Model returned empty transcription"`);
|
|
134
|
+
});
|
|
135
|
+
test('returns TranscriptionError when content has no tool call or text', () => {
|
|
136
|
+
const result = extractTranscription([
|
|
137
|
+
{
|
|
138
|
+
type: 'reasoning',
|
|
139
|
+
text: 'thinking about it',
|
|
140
|
+
},
|
|
141
|
+
]);
|
|
142
|
+
expect(result).toBeInstanceOf(Error);
|
|
143
|
+
expect(result.message).toMatchInlineSnapshot(`"Transcription failed: Model did not produce a transcription"`);
|
|
144
|
+
});
|
|
145
|
+
});
|
|
146
|
+
describe('transcribeAudio with real API', () => {
|
|
147
|
+
const audioPath = path.join(import.meta.dirname, '..', 'scripts', 'example-audio.mp3');
|
|
148
|
+
test('transcribes with Gemini', { timeout: 30_000 }, async () => {
|
|
149
|
+
const apiKey = process.env.GEMINI_API_KEY;
|
|
150
|
+
if (!apiKey) {
|
|
151
|
+
console.log('Skipping: GEMINI_API_KEY not set');
|
|
152
|
+
return;
|
|
153
|
+
}
|
|
154
|
+
if (!fs.existsSync(audioPath)) {
|
|
155
|
+
console.log('Skipping: example-audio.mp3 not found');
|
|
156
|
+
return;
|
|
157
|
+
}
|
|
158
|
+
const audio = fs.readFileSync(audioPath);
|
|
159
|
+
const result = await transcribeAudio({
|
|
160
|
+
audio,
|
|
161
|
+
prompt: 'test project',
|
|
162
|
+
apiKey,
|
|
163
|
+
provider: 'gemini',
|
|
164
|
+
});
|
|
165
|
+
expect(result).not.toBeInstanceOf(Error);
|
|
166
|
+
const { transcription } = result;
|
|
167
|
+
expect(transcription.length).toBeGreaterThan(0);
|
|
168
|
+
console.log('Gemini transcription:', result);
|
|
169
|
+
});
|
|
170
|
+
test('transcribes with OpenAI', { timeout: 30_000 }, async () => {
|
|
171
|
+
const apiKey = process.env.OPENAI_API_KEY;
|
|
172
|
+
if (!apiKey) {
|
|
173
|
+
console.log('Skipping: OPENAI_API_KEY not set');
|
|
174
|
+
return;
|
|
175
|
+
}
|
|
176
|
+
if (!fs.existsSync(audioPath)) {
|
|
177
|
+
console.log('Skipping: example-audio.mp3 not found');
|
|
178
|
+
return;
|
|
179
|
+
}
|
|
180
|
+
const audio = fs.readFileSync(audioPath);
|
|
181
|
+
const result = await transcribeAudio({
|
|
182
|
+
audio,
|
|
183
|
+
prompt: 'test project',
|
|
184
|
+
apiKey,
|
|
185
|
+
provider: 'openai',
|
|
186
|
+
});
|
|
187
|
+
expect(result).not.toBeInstanceOf(Error);
|
|
188
|
+
const { transcription } = result;
|
|
189
|
+
expect(transcription.length).toBeGreaterThan(0);
|
|
190
|
+
console.log('OpenAI transcription:', result);
|
|
191
|
+
});
|
|
192
|
+
test('transcribes OGG with OpenAI (converts to WAV)', { timeout: 30_000 }, async () => {
|
|
193
|
+
const apiKey = process.env.OPENAI_API_KEY;
|
|
194
|
+
const oggPath = path.join(import.meta.dirname, '..', 'scripts', 'example-audio.ogg');
|
|
195
|
+
if (!apiKey) {
|
|
196
|
+
console.log('Skipping: OPENAI_API_KEY not set');
|
|
197
|
+
return;
|
|
198
|
+
}
|
|
199
|
+
if (!fs.existsSync(oggPath)) {
|
|
200
|
+
console.log('Skipping: example-audio.ogg not found');
|
|
201
|
+
return;
|
|
202
|
+
}
|
|
203
|
+
const audio = fs.readFileSync(oggPath);
|
|
204
|
+
const result = await transcribeAudio({
|
|
205
|
+
audio,
|
|
206
|
+
prompt: 'test project',
|
|
207
|
+
apiKey,
|
|
208
|
+
provider: 'openai',
|
|
209
|
+
mediaType: 'audio/ogg',
|
|
210
|
+
});
|
|
211
|
+
expect(result).not.toBeInstanceOf(Error);
|
|
212
|
+
const { transcription } = result;
|
|
213
|
+
expect(transcription.length).toBeGreaterThan(0);
|
|
214
|
+
console.log('OpenAI OGG transcription:', result);
|
|
215
|
+
});
|
|
216
|
+
});
|
|
217
|
+
describe('convertOggToWav', () => {
|
|
218
|
+
test('converts OGG Opus to valid WAV', async () => {
|
|
219
|
+
const oggPath = path.join(import.meta.dirname, '..', 'scripts', 'example-audio.ogg');
|
|
220
|
+
if (!fs.existsSync(oggPath)) {
|
|
221
|
+
console.log('Skipping: example-audio.ogg not found');
|
|
222
|
+
return;
|
|
223
|
+
}
|
|
224
|
+
const ogg = fs.readFileSync(oggPath);
|
|
225
|
+
const result = await convertOggToWav(ogg);
|
|
226
|
+
expect(result).toBeInstanceOf(Buffer);
|
|
227
|
+
const wav = result;
|
|
228
|
+
// WAV header starts with RIFF
|
|
229
|
+
expect(wav.subarray(0, 4).toString()).toBe('RIFF');
|
|
230
|
+
expect(wav.subarray(8, 12).toString()).toBe('WAVE');
|
|
231
|
+
// Must be larger than just the header (44 bytes)
|
|
232
|
+
expect(wav.length).toBeGreaterThan(44);
|
|
233
|
+
console.log(`Converted OGG (${ogg.length} bytes) to WAV (${wav.length} bytes)`);
|
|
234
|
+
});
|
|
235
|
+
});
|