@soederpop/luca 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CLAUDE.md +71 -0
- package/README.md +78 -0
- package/bun.lock +2928 -0
- package/bunfig.toml +3 -0
- package/commands/audit-docs.ts +740 -0
- package/commands/build-scaffolds.ts +154 -0
- package/commands/generate-api-docs.ts +114 -0
- package/commands/update-introspection.ts +67 -0
- package/docs/CLI.md +335 -0
- package/docs/README.md +88 -0
- package/docs/TABLE-OF-CONTENTS.md +157 -0
- package/docs/apis/clients/elevenlabs.md +84 -0
- package/docs/apis/clients/graph.md +56 -0
- package/docs/apis/clients/openai.md +69 -0
- package/docs/apis/clients/rest.md +41 -0
- package/docs/apis/clients/websocket.md +107 -0
- package/docs/apis/features/agi/assistant.md +471 -0
- package/docs/apis/features/agi/assistants-manager.md +154 -0
- package/docs/apis/features/agi/claude-code.md +602 -0
- package/docs/apis/features/agi/conversation-history.md +352 -0
- package/docs/apis/features/agi/conversation.md +333 -0
- package/docs/apis/features/agi/docs-reader.md +121 -0
- package/docs/apis/features/agi/openai-codex.md +318 -0
- package/docs/apis/features/agi/openapi.md +138 -0
- package/docs/apis/features/agi/semantic-search.md +387 -0
- package/docs/apis/features/agi/skills-library.md +216 -0
- package/docs/apis/features/node/container-link.md +133 -0
- package/docs/apis/features/node/content-db.md +313 -0
- package/docs/apis/features/node/disk-cache.md +379 -0
- package/docs/apis/features/node/dns.md +651 -0
- package/docs/apis/features/node/docker.md +705 -0
- package/docs/apis/features/node/downloader.md +81 -0
- package/docs/apis/features/node/esbuild.md +59 -0
- package/docs/apis/features/node/file-manager.md +182 -0
- package/docs/apis/features/node/fs.md +581 -0
- package/docs/apis/features/node/git.md +330 -0
- package/docs/apis/features/node/google-auth.md +174 -0
- package/docs/apis/features/node/google-calendar.md +187 -0
- package/docs/apis/features/node/google-docs.md +151 -0
- package/docs/apis/features/node/google-drive.md +225 -0
- package/docs/apis/features/node/google-sheets.md +179 -0
- package/docs/apis/features/node/grep.md +290 -0
- package/docs/apis/features/node/helpers.md +135 -0
- package/docs/apis/features/node/ink.md +334 -0
- package/docs/apis/features/node/ipc-socket.md +260 -0
- package/docs/apis/features/node/json-tree.md +86 -0
- package/docs/apis/features/node/launcher-app-command-listener.md +145 -0
- package/docs/apis/features/node/networking.md +281 -0
- package/docs/apis/features/node/nlp.md +133 -0
- package/docs/apis/features/node/opener.md +97 -0
- package/docs/apis/features/node/os.md +118 -0
- package/docs/apis/features/node/package-finder.md +402 -0
- package/docs/apis/features/node/postgres.md +212 -0
- package/docs/apis/features/node/proc.md +430 -0
- package/docs/apis/features/node/process-manager.md +210 -0
- package/docs/apis/features/node/python.md +278 -0
- package/docs/apis/features/node/repl.md +88 -0
- package/docs/apis/features/node/runpod.md +673 -0
- package/docs/apis/features/node/secure-shell.md +169 -0
- package/docs/apis/features/node/semantic-search.md +401 -0
- package/docs/apis/features/node/sqlite.md +211 -0
- package/docs/apis/features/node/telegram.md +254 -0
- package/docs/apis/features/node/tts.md +118 -0
- package/docs/apis/features/node/ui.md +703 -0
- package/docs/apis/features/node/vault.md +64 -0
- package/docs/apis/features/node/vm.md +84 -0
- package/docs/apis/features/node/window-manager.md +337 -0
- package/docs/apis/features/node/yaml-tree.md +85 -0
- package/docs/apis/features/node/yaml.md +176 -0
- package/docs/apis/features/web/asset-loader.md +47 -0
- package/docs/apis/features/web/container-link.md +133 -0
- package/docs/apis/features/web/esbuild.md +59 -0
- package/docs/apis/features/web/helpers.md +135 -0
- package/docs/apis/features/web/network.md +30 -0
- package/docs/apis/features/web/speech.md +55 -0
- package/docs/apis/features/web/vault.md +64 -0
- package/docs/apis/features/web/vm.md +84 -0
- package/docs/apis/features/web/voice.md +67 -0
- package/docs/apis/servers/express.md +127 -0
- package/docs/apis/servers/mcp.md +213 -0
- package/docs/apis/servers/websocket.md +99 -0
- package/docs/documentation-audit.md +134 -0
- package/docs/examples/content-db.md +77 -0
- package/docs/examples/disk-cache.md +83 -0
- package/docs/examples/docker.md +101 -0
- package/docs/examples/downloader.md +70 -0
- package/docs/examples/esbuild.md +80 -0
- package/docs/examples/file-manager.md +82 -0
- package/docs/examples/fs.md +83 -0
- package/docs/examples/git.md +85 -0
- package/docs/examples/google-auth.md +88 -0
- package/docs/examples/google-calendar.md +94 -0
- package/docs/examples/google-docs.md +82 -0
- package/docs/examples/google-drive.md +96 -0
- package/docs/examples/google-sheets.md +95 -0
- package/docs/examples/grep.md +85 -0
- package/docs/examples/ink-blocks.md +75 -0
- package/docs/examples/ink-renderer.md +41 -0
- package/docs/examples/ink.md +103 -0
- package/docs/examples/ipc-socket.md +103 -0
- package/docs/examples/json-tree.md +91 -0
- package/docs/examples/launcher-app-command-listener.md +120 -0
- package/docs/examples/networking.md +58 -0
- package/docs/examples/nlp.md +91 -0
- package/docs/examples/opener.md +78 -0
- package/docs/examples/os.md +72 -0
- package/docs/examples/package-finder.md +89 -0
- package/docs/examples/port-exposer.md +89 -0
- package/docs/examples/postgres.md +91 -0
- package/docs/examples/proc.md +81 -0
- package/docs/examples/process-manager.md +79 -0
- package/docs/examples/python.md +91 -0
- package/docs/examples/repl.md +93 -0
- package/docs/examples/runpod.md +119 -0
- package/docs/examples/secure-shell.md +92 -0
- package/docs/examples/sqlite.md +86 -0
- package/docs/examples/telegram.md +77 -0
- package/docs/examples/tts.md +86 -0
- package/docs/examples/ui.md +80 -0
- package/docs/examples/vault.md +70 -0
- package/docs/examples/vm.md +86 -0
- package/docs/examples/window-manager.md +125 -0
- package/docs/examples/yaml-tree.md +93 -0
- package/docs/examples/yaml.md +104 -0
- package/docs/ideas/class-registration-refactor-possibilities.md +197 -0
- package/docs/ideas/container-use-api.md +9 -0
- package/docs/ideas/easy-auth-for-express-servers-and-luca-serve.md +0 -0
- package/docs/ideas/feature-stacks.md +22 -0
- package/docs/ideas/luca-cli-self-sufficiency-demo.md +23 -0
- package/docs/ideas/mcp-design.md +9 -0
- package/docs/ideas/web-container-debugging-feature.md +13 -0
- package/docs/introspection-audit.md +49 -0
- package/docs/introspection.md +154 -0
- package/docs/mcp/readme.md +162 -0
- package/docs/models.ts +38 -0
- package/docs/philosophy.md +85 -0
- package/docs/principles.md +7 -0
- package/docs/prompts/audit-codebase-for-failures-to-use-the-container.md +34 -0
- package/docs/prompts/mcp-test-easy-command.md +27 -0
- package/docs/reports/assistant-bugs.md +38 -0
- package/docs/reports/attach-pattern-usage.md +18 -0
- package/docs/reports/code-audit-results.md +391 -0
- package/docs/reports/introspection-audit-tasks.md +378 -0
- package/docs/reports/luca-mcp-improvements.md +128 -0
- package/docs/scaffolds/client.md +140 -0
- package/docs/scaffolds/command.md +106 -0
- package/docs/scaffolds/endpoint.md +176 -0
- package/docs/scaffolds/feature.md +148 -0
- package/docs/scaffolds/server.md +187 -0
- package/docs/tasks/web-container-helper-discovery.md +71 -0
- package/docs/todos.md +1 -0
- package/docs/tutorials/01-getting-started.md +106 -0
- package/docs/tutorials/02-container.md +210 -0
- package/docs/tutorials/03-scripts.md +194 -0
- package/docs/tutorials/04-features-overview.md +196 -0
- package/docs/tutorials/05-state-and-events.md +171 -0
- package/docs/tutorials/06-servers.md +157 -0
- package/docs/tutorials/07-endpoints.md +198 -0
- package/docs/tutorials/08-commands.md +171 -0
- package/docs/tutorials/09-clients.md +162 -0
- package/docs/tutorials/10-creating-features.md +198 -0
- package/docs/tutorials/11-contentbase.md +191 -0
- package/docs/tutorials/12-assistants.md +215 -0
- package/docs/tutorials/13-introspection.md +147 -0
- package/docs/tutorials/14-type-system.md +174 -0
- package/docs/tutorials/15-project-patterns.md +222 -0
- package/docs/tutorials/16-google-features.md +534 -0
- package/docs/tutorials/17-tui-blocks.md +530 -0
- package/docs/tutorials/18-semantic-search.md +334 -0
- package/index.ts +1 -0
- package/luca.console.ts +9 -0
- package/main.py +6 -0
- package/package.json +154 -0
- package/pyproject.toml +7 -0
- package/scripts/animations/chrome-glitch.ts +55 -0
- package/scripts/animations/index.ts +16 -0
- package/scripts/animations/neon-pulse.ts +64 -0
- package/scripts/animations/types.ts +6 -0
- package/scripts/build-web.ts +28 -0
- package/scripts/examples/ask-luca-expert.ts +42 -0
- package/scripts/examples/assistant-questions.ts +12 -0
- package/scripts/examples/excalidraw-expert.ts +75 -0
- package/scripts/examples/expert-chat.ts +0 -0
- package/scripts/examples/file-manager.ts +14 -0
- package/scripts/examples/ideas.ts +12 -0
- package/scripts/examples/interactive-chat.ts +20 -0
- package/scripts/examples/openai-tool-calls.ts +113 -0
- package/scripts/examples/opening-a-web-browser.ts +5 -0
- package/scripts/examples/telegram-bot.ts +79 -0
- package/scripts/examples/telegram-ink-ui.ts +302 -0
- package/scripts/examples/using-assistant-with-mcp.ts +560 -0
- package/scripts/examples/using-claude-code.ts +10 -0
- package/scripts/examples/using-contentdb.ts +35 -0
- package/scripts/examples/using-conversations.ts +35 -0
- package/scripts/examples/using-disk-cache.ts +10 -0
- package/scripts/examples/using-docker-shell.ts +75 -0
- package/scripts/examples/using-elevenlabs.ts +25 -0
- package/scripts/examples/using-google-calendar.ts +57 -0
- package/scripts/examples/using-google-docs.ts +74 -0
- package/scripts/examples/using-google-drive.ts +74 -0
- package/scripts/examples/using-google-sheets.ts +89 -0
- package/scripts/examples/using-nlp.ts +55 -0
- package/scripts/examples/using-ollama.ts +10 -0
- package/scripts/examples/using-openai-codex.ts +23 -0
- package/scripts/examples/using-postgres.ts +55 -0
- package/scripts/examples/using-runpod.ts +32 -0
- package/scripts/examples/using-tts.ts +40 -0
- package/scripts/examples/vm-loading-esm-modules.ts +16 -0
- package/scripts/scaffold.ts +391 -0
- package/scripts/scratch.ts +15 -0
- package/scripts/test-command-listener.ts +123 -0
- package/scripts/test-window-manager-lifecycle.ts +86 -0
- package/scripts/test-window-manager.ts +43 -0
- package/scripts/update-introspection-data.ts +58 -0
- package/src/agi/README.md +14 -0
- package/src/agi/container.server.ts +114 -0
- package/src/agi/endpoints/ask.ts +60 -0
- package/src/agi/endpoints/conversations/[id].ts +45 -0
- package/src/agi/endpoints/conversations.ts +31 -0
- package/src/agi/endpoints/experts.ts +37 -0
- package/src/agi/features/assistant.ts +767 -0
- package/src/agi/features/assistants-manager.ts +260 -0
- package/src/agi/features/claude-code.ts +1111 -0
- package/src/agi/features/conversation-history.ts +497 -0
- package/src/agi/features/conversation.ts +799 -0
- package/src/agi/features/openai-codex.ts +631 -0
- package/src/agi/features/openapi.ts +438 -0
- package/src/agi/features/skills-library.ts +425 -0
- package/src/agi/index.ts +6 -0
- package/src/agi/lib/token-counter.ts +122 -0
- package/src/browser.ts +25 -0
- package/src/bus.ts +100 -0
- package/src/cli/cli.ts +70 -0
- package/src/client.ts +461 -0
- package/src/clients/civitai/index.ts +541 -0
- package/src/clients/client-template.ts +41 -0
- package/src/clients/comfyui/index.ts +597 -0
- package/src/clients/elevenlabs/index.ts +291 -0
- package/src/clients/openai/index.ts +451 -0
- package/src/clients/supabase/index.ts +366 -0
- package/src/command.ts +164 -0
- package/src/commands/chat.ts +182 -0
- package/src/commands/console.ts +192 -0
- package/src/commands/describe.ts +433 -0
- package/src/commands/eval.ts +116 -0
- package/src/commands/help.ts +214 -0
- package/src/commands/index.ts +14 -0
- package/src/commands/mcp.ts +64 -0
- package/src/commands/prompt.ts +807 -0
- package/src/commands/run.ts +257 -0
- package/src/commands/sandbox-mcp.ts +439 -0
- package/src/commands/scaffold.ts +79 -0
- package/src/commands/serve.ts +172 -0
- package/src/container.ts +781 -0
- package/src/endpoint.ts +340 -0
- package/src/feature.ts +75 -0
- package/src/hash-object.ts +97 -0
- package/src/helper.ts +543 -0
- package/src/introspection/generated.agi.ts +23388 -0
- package/src/introspection/generated.node.ts +18899 -0
- package/src/introspection/generated.web.ts +2021 -0
- package/src/introspection/index.ts +256 -0
- package/src/introspection/scan.ts +912 -0
- package/src/node/container.ts +354 -0
- package/src/node/feature.ts +13 -0
- package/src/node/features/container-link.ts +558 -0
- package/src/node/features/content-db.ts +475 -0
- package/src/node/features/disk-cache.ts +382 -0
- package/src/node/features/dns.ts +655 -0
- package/src/node/features/docker.ts +912 -0
- package/src/node/features/downloader.ts +92 -0
- package/src/node/features/esbuild.ts +68 -0
- package/src/node/features/file-manager.ts +357 -0
- package/src/node/features/fs.ts +534 -0
- package/src/node/features/git.ts +492 -0
- package/src/node/features/google-auth.ts +502 -0
- package/src/node/features/google-calendar.ts +300 -0
- package/src/node/features/google-docs.ts +404 -0
- package/src/node/features/google-drive.ts +339 -0
- package/src/node/features/google-sheets.ts +279 -0
- package/src/node/features/grep.ts +406 -0
- package/src/node/features/helpers.ts +374 -0
- package/src/node/features/ink.ts +490 -0
- package/src/node/features/ipc-socket.ts +459 -0
- package/src/node/features/json-tree.ts +188 -0
- package/src/node/features/launcher-app-command-listener.ts +388 -0
- package/src/node/features/networking.ts +925 -0
- package/src/node/features/nlp.ts +211 -0
- package/src/node/features/opener.ts +166 -0
- package/src/node/features/os.ts +157 -0
- package/src/node/features/package-finder.ts +539 -0
- package/src/node/features/port-exposer.ts +342 -0
- package/src/node/features/postgres.ts +273 -0
- package/src/node/features/proc.ts +502 -0
- package/src/node/features/process-manager.ts +542 -0
- package/src/node/features/python.ts +444 -0
- package/src/node/features/repl.ts +194 -0
- package/src/node/features/runpod.ts +802 -0
- package/src/node/features/secure-shell.ts +248 -0
- package/src/node/features/semantic-search.ts +924 -0
- package/src/node/features/sqlite.ts +289 -0
- package/src/node/features/telegram.ts +342 -0
- package/src/node/features/tts.ts +184 -0
- package/src/node/features/ui.ts +857 -0
- package/src/node/features/vault.ts +164 -0
- package/src/node/features/vm.ts +312 -0
- package/src/node/features/window-manager.ts +804 -0
- package/src/node/features/yaml-tree.ts +149 -0
- package/src/node/features/yaml.ts +132 -0
- package/src/node.ts +70 -0
- package/src/react/index.ts +175 -0
- package/src/registry.ts +199 -0
- package/src/scaffolds/generated.ts +1613 -0
- package/src/scaffolds/template.ts +37 -0
- package/src/schemas/base.ts +255 -0
- package/src/server.ts +135 -0
- package/src/servers/express.ts +209 -0
- package/src/servers/mcp.ts +805 -0
- package/src/servers/socket.ts +120 -0
- package/src/state.ts +101 -0
- package/src/web/clients/socket.ts +82 -0
- package/src/web/container.ts +74 -0
- package/src/web/extension.ts +30 -0
- package/src/web/feature.ts +12 -0
- package/src/web/features/asset-loader.ts +64 -0
- package/src/web/features/container-link.ts +385 -0
- package/src/web/features/esbuild.ts +79 -0
- package/src/web/features/helpers.ts +267 -0
- package/src/web/features/network.ts +61 -0
- package/src/web/features/speech.ts +87 -0
- package/src/web/features/vault.ts +189 -0
- package/src/web/features/vm.ts +78 -0
- package/src/web/features/voice-recognition.ts +129 -0
- package/src/web/shims/isomorphic-vm.ts +149 -0
- package/test/bus.test.ts +134 -0
- package/test/clients-servers.test.ts +216 -0
- package/test/container-link.test.ts +274 -0
- package/test/features.test.ts +160 -0
- package/test/integration.test.ts +787 -0
- package/test/node-container.test.ts +121 -0
- package/test/rate-limit.test.ts +272 -0
- package/test/semantic-search.test.ts +550 -0
- package/test/state.test.ts +121 -0
- package/test-integration/assistant.test.ts +138 -0
- package/test-integration/assistants-manager.test.ts +123 -0
- package/test-integration/claude-code.test.ts +98 -0
- package/test-integration/conversation-history.test.ts +205 -0
- package/test-integration/conversation.test.ts +137 -0
- package/test-integration/elevenlabs.test.ts +55 -0
- package/test-integration/google-services.test.ts +80 -0
- package/test-integration/helpers.ts +89 -0
- package/test-integration/openai-codex.test.ts +93 -0
- package/test-integration/runpod.test.ts +58 -0
- package/test-integration/server-endpoints.test.ts +97 -0
- package/test-integration/skills-library.test.ts +157 -0
- package/test-integration/telegram.test.ts +46 -0
- package/tsconfig.json +58 -0
- package/uv.lock +8 -0
|
@@ -0,0 +1,799 @@
|
|
|
1
|
+
import { z } from 'zod'
|
|
2
|
+
import { FeatureStateSchema, FeatureOptionsSchema } from '../../schemas/base.js'
|
|
3
|
+
import type { Container } from '@soederpop/luca/container';
|
|
4
|
+
import { type AvailableFeatures } from '@soederpop/luca/feature'
|
|
5
|
+
import { features, Feature } from '@soederpop/luca/feature'
|
|
6
|
+
import type { OpenAIClient } from '../../clients/openai';
|
|
7
|
+
import type OpenAI from 'openai';
|
|
8
|
+
import type { ConversationHistory } from './conversation-history';
|
|
9
|
+
import { countMessageTokens, getContextWindow } from '../lib/token-counter.js';
|
|
10
|
+
|
|
11
|
+
declare module '@soederpop/luca/feature' {
|
|
12
|
+
interface AvailableFeatures {
|
|
13
|
+
conversation: typeof Conversation
|
|
14
|
+
}
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
export type Message = OpenAI.Chat.Completions.ChatCompletionMessageParam
|
|
18
|
+
|
|
19
|
+
export type ContentPart =
|
|
20
|
+
| { type: 'text'; text: string }
|
|
21
|
+
| { type: 'image_url'; image_url: { url: string; detail?: 'low' | 'high' | 'auto' } }
|
|
22
|
+
| { type: 'input_audio'; data: string; format: 'mp3' | 'wav' }
|
|
23
|
+
| { type: 'input_file'; file_data: string; filename: string }
|
|
24
|
+
|
|
25
|
+
export interface ConversationTool {
|
|
26
|
+
handler: (...args: any[]) => Promise<any>
|
|
27
|
+
description: string
|
|
28
|
+
parameters: Record<string, any>
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
export interface ConversationMCPServer {
|
|
32
|
+
url: string
|
|
33
|
+
headers?: Record<string, string>
|
|
34
|
+
allowedTools?: string[] | { tool_names?: string[] }
|
|
35
|
+
requireApproval?: 'always' | 'never' | {
|
|
36
|
+
always?: { tool_names?: string[] }
|
|
37
|
+
never?: { tool_names?: string[] }
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
export const ConversationOptionsSchema = FeatureOptionsSchema.extend({
|
|
42
|
+
/** A unique identifier for the conversation */
|
|
43
|
+
id: z.string().optional().describe('A unique identifier for the conversation'),
|
|
44
|
+
/** A human-readable title for the conversation */
|
|
45
|
+
title: z.string().optional().describe('A human-readable title for the conversation'),
|
|
46
|
+
/** A unique identifier for threads, an arbitrary grouping mechanism */
|
|
47
|
+
thread: z.string().optional().describe('A unique identifier for threads, an arbitrary grouping mechanism'),
|
|
48
|
+
/** Any available OpenAI model */
|
|
49
|
+
model: z.string().optional().describe('Any available OpenAI model'),
|
|
50
|
+
/** Initial message history to seed the conversation */
|
|
51
|
+
history: z.array(z.any()).optional().describe('Initial message history to seed the conversation'),
|
|
52
|
+
/** Tools the model can call during conversation */
|
|
53
|
+
tools: z.record(z.string(), z.any()).optional().describe('Tools the model can call during conversation'),
|
|
54
|
+
/** Remote MCP servers to expose as tools when using the OpenAI Responses API */
|
|
55
|
+
mcpServers: z.record(z.string(), z.any()).optional().describe('Remote MCP servers keyed by server label'),
|
|
56
|
+
/** Which OpenAI API to use for completions */
|
|
57
|
+
api: z.enum(['auto', 'responses', 'chat']).optional().describe('Completion API mode. auto uses Responses unless local=true'),
|
|
58
|
+
/** Tags for categorizing and searching this conversation */
|
|
59
|
+
tags: z.array(z.string()).optional().describe('Tags for categorizing and searching this conversation'),
|
|
60
|
+
/** Arbitrary metadata to attach to this conversation */
|
|
61
|
+
metadata: z.record(z.string(), z.any()).optional().describe('Arbitrary metadata to attach to this conversation'),
|
|
62
|
+
|
|
63
|
+
clientOptions: z.record(z.string(), z.any()).optional().describe('Options for the OpenAI client'), // the type of options for OpenAI client
|
|
64
|
+
|
|
65
|
+
local: z.boolean().optional().describe('Whether to use the local ollama models instead of the remote OpenAI models'),
|
|
66
|
+
|
|
67
|
+
/** Maximum number of output tokens per completion */
|
|
68
|
+
maxTokens: z.number().optional().describe('Maximum number of output tokens per completion'),
|
|
69
|
+
|
|
70
|
+
/** Enable automatic compaction when estimated input tokens approach the context limit */
|
|
71
|
+
autoCompact: z.boolean().optional().describe('Enable automatic compaction when input tokens approach the context limit'),
|
|
72
|
+
/** Fraction of contextWindow at which auto-compact triggers (0.0–1.0, default 0.8) */
|
|
73
|
+
compactThreshold: z.number().min(0).max(1).optional().describe('Fraction of context window at which auto-compact triggers (default 0.8)'),
|
|
74
|
+
/** Override the inferred context window size for this model */
|
|
75
|
+
contextWindow: z.number().optional().describe('Override the inferred context window size for this model'),
|
|
76
|
+
/** Number of recent messages to preserve after compaction (default 4) */
|
|
77
|
+
compactKeepRecent: z.number().optional().describe('Number of recent messages to preserve after compaction (default 4)'),
|
|
78
|
+
})
|
|
79
|
+
|
|
80
|
+
export const ConversationStateSchema = FeatureStateSchema.extend({
|
|
81
|
+
id: z.string().describe('Unique identifier for this conversation instance'),
|
|
82
|
+
thread: z.string().describe('Thread identifier for grouping conversations'),
|
|
83
|
+
model: z.string().describe('The OpenAI model being used'),
|
|
84
|
+
messages: z.array(z.any()).describe('Full message history of the conversation'),
|
|
85
|
+
streaming: z.boolean().describe('Whether a streaming response is currently in progress'),
|
|
86
|
+
lastResponse: z.string().describe('The last assistant response text'),
|
|
87
|
+
toolCalls: z.number().describe('Total number of tool calls made in this conversation'),
|
|
88
|
+
api: z.enum(['responses', 'chat']).describe('Which completion API is active for this conversation'),
|
|
89
|
+
lastResponseId: z.string().nullable().describe('Most recent OpenAI Responses API response ID for continuing conversation state'),
|
|
90
|
+
tokenUsage: z.object({
|
|
91
|
+
prompt: z.number().describe('Total prompt tokens consumed'),
|
|
92
|
+
completion: z.number().describe('Total completion tokens consumed'),
|
|
93
|
+
total: z.number().describe('Total tokens consumed'),
|
|
94
|
+
}).describe('Cumulative token usage statistics'),
|
|
95
|
+
estimatedInputTokens: z.number().describe('Estimated input token count for the current messages array'),
|
|
96
|
+
compactionCount: z.number().describe('Number of times compact() has been called'),
|
|
97
|
+
contextWindow: z.number().describe('The context window size for the current model'),
|
|
98
|
+
})
|
|
99
|
+
|
|
100
|
+
export type ConversationOptions = z.infer<typeof ConversationOptionsSchema>
|
|
101
|
+
export type ConversationState = z.infer<typeof ConversationStateSchema>
|
|
102
|
+
|
|
103
|
+
export type AskOptions = {
|
|
104
|
+
maxTokens?: number
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
/**
|
|
108
|
+
* A self-contained conversation with OpenAI that supports streaming,
|
|
109
|
+
* tool calling, and message state management.
|
|
110
|
+
*
|
|
111
|
+
* @extends Feature
|
|
112
|
+
*
|
|
113
|
+
* @example
|
|
114
|
+
* ```typescript
|
|
115
|
+
* const conversation = container.feature('conversation', {
|
|
116
|
+
* model: 'gpt-4.1',
|
|
117
|
+
* tools: myToolMap,
|
|
118
|
+
* history: [{ role: 'system', content: 'You are a helpful assistant.' }]
|
|
119
|
+
* })
|
|
120
|
+
* const reply = await conversation.ask('What is the meaning of life?')
|
|
121
|
+
* ```
|
|
122
|
+
*/
|
|
123
|
+
export class Conversation extends Feature<ConversationState, ConversationOptions> {
|
|
124
|
+
static override stateSchema = ConversationStateSchema
|
|
125
|
+
static override optionsSchema = ConversationOptionsSchema
|
|
126
|
+
static override shortcut = 'features.conversation' as const
|
|
127
|
+
|
|
128
|
+
private _callMaxTokens: number | undefined = undefined
|
|
129
|
+
|
|
130
|
+
/** Resolved max tokens: per-call override > options-level > undefined (no limit). */
|
|
131
|
+
private get maxTokens(): number | undefined {
|
|
132
|
+
return this._callMaxTokens ?? this.options.maxTokens ?? undefined
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
private get _tools(): Record<string, ConversationTool> {
|
|
136
|
+
return this.options.tools || {}
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
static attach(container: Container<AvailableFeatures, any>) {
|
|
140
|
+
features.register('conversation', Conversation)
|
|
141
|
+
return container
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
/** @returns Default state seeded from options: id, thread, model, initial history, and zero token usage. */
|
|
145
|
+
override get initialState(): ConversationState {
|
|
146
|
+
return {
|
|
147
|
+
...super.initialState,
|
|
148
|
+
id: this.options.id || this.uuid,
|
|
149
|
+
thread: this.options.thread || 'default',
|
|
150
|
+
model: this.options.model || 'gpt-5',
|
|
151
|
+
messages: this.options.history || [],
|
|
152
|
+
streaming: false,
|
|
153
|
+
lastResponse: '',
|
|
154
|
+
toolCalls: 0,
|
|
155
|
+
api: this.apiMode,
|
|
156
|
+
lastResponseId: null,
|
|
157
|
+
tokenUsage: { prompt: 0, completion: 0, total: 0 },
|
|
158
|
+
estimatedInputTokens: 0,
|
|
159
|
+
compactionCount: 0,
|
|
160
|
+
contextWindow: this.options.contextWindow || getContextWindow(this.options.model || 'gpt-5'),
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
/** Returns the registered tools available for the model to call. */
|
|
165
|
+
get tools() : Record<string, any> {
|
|
166
|
+
return this.options.tools || {}
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
/** Returns configured remote MCP servers keyed by server label. */
|
|
170
|
+
get mcpServers(): Record<string, ConversationMCPServer> {
|
|
171
|
+
return (this.options.mcpServers || {}) as Record<string, ConversationMCPServer>
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
/** Returns the full message history of the conversation. */
|
|
175
|
+
get messages(): Message[] {
|
|
176
|
+
return this.state.get('messages') || []
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
/** Returns the OpenAI model name being used for completions. */
|
|
180
|
+
get model(): string {
|
|
181
|
+
return this.state.get('model')!
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
/** Returns the active completion API mode after resolving auto/local behavior. */
|
|
185
|
+
get apiMode(): 'responses' | 'chat' {
|
|
186
|
+
const mode = this.options.api || 'auto'
|
|
187
|
+
if (mode === 'chat' || mode === 'responses') return mode
|
|
188
|
+
return this.options.local ? 'chat' : 'responses'
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
/** Whether a streaming response is currently in progress. */
|
|
192
|
+
get isStreaming(): boolean {
|
|
193
|
+
return !!this.state.get('streaming')
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
/** The context window size for the current model (from options override or auto-detected). */
|
|
197
|
+
get contextWindow(): number {
|
|
198
|
+
return this.options.contextWindow || getContextWindow(this.model)
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
/** Whether the conversation is approaching the context limit. */
|
|
202
|
+
get isNearContextLimit(): boolean {
|
|
203
|
+
const threshold = this.options.compactThreshold ?? 0.8
|
|
204
|
+
return this.estimateTokens() >= this.contextWindow * threshold
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
/**
|
|
208
|
+
* Estimate the input token count for the current messages array
|
|
209
|
+
* using the js-tiktoken tokenizer. Updates state.
|
|
210
|
+
*/
|
|
211
|
+
estimateTokens(): number {
|
|
212
|
+
const count = countMessageTokens(this.messages, this.model)
|
|
213
|
+
this.state.set('estimatedInputTokens', count)
|
|
214
|
+
return count
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
/**
|
|
218
|
+
* Generate a summary of the conversation so far using the LLM.
|
|
219
|
+
* Read-only — does not modify messages.
|
|
220
|
+
*/
|
|
221
|
+
async summarize(): Promise<string> {
|
|
222
|
+
this.emit('summarizeStart')
|
|
223
|
+
|
|
224
|
+
const transcript = this.messages
|
|
225
|
+
.map(m => {
|
|
226
|
+
const role = m.role
|
|
227
|
+
const content = typeof m.content === 'string'
|
|
228
|
+
? m.content
|
|
229
|
+
: Array.isArray(m.content)
|
|
230
|
+
? (m.content as any[]).filter((p: any) => p.type === 'text').map((p: any) => p.text).join('\n')
|
|
231
|
+
: (m.content != null ? JSON.stringify(m.content) : '(no content)')
|
|
232
|
+
return `[${role}]: ${content || '(no text content)'}`
|
|
233
|
+
})
|
|
234
|
+
.join('\n\n')
|
|
235
|
+
|
|
236
|
+
const response = await this.openai.raw.chat.completions.create({
|
|
237
|
+
model: this.model,
|
|
238
|
+
messages: [
|
|
239
|
+
{
|
|
240
|
+
role: 'system',
|
|
241
|
+
content: 'You are a conversation summarizer. Produce a concise but comprehensive summary of the following conversation. Preserve all key facts, decisions, context, user preferences, and any important details needed to continue the conversation. Output only the summary.',
|
|
242
|
+
},
|
|
243
|
+
{ role: 'user', content: transcript },
|
|
244
|
+
],
|
|
245
|
+
stream: false,
|
|
246
|
+
})
|
|
247
|
+
|
|
248
|
+
const summary = (response as any).choices?.[0]?.message?.content || ''
|
|
249
|
+
this.emit('summarizeEnd', summary)
|
|
250
|
+
return summary
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
/**
|
|
254
|
+
* Compact the conversation by summarizing old messages and replacing them
|
|
255
|
+
* with a summary message. Keeps the system message (if any) and the most
|
|
256
|
+
* recent N messages.
|
|
257
|
+
*/
|
|
258
|
+
async compact(options?: { keepRecent?: number }): Promise<{ summary: string; removedCount: number; estimatedTokens: number }> {
|
|
259
|
+
const keepRecent = options?.keepRecent ?? this.options.compactKeepRecent ?? 4
|
|
260
|
+
const messages = this.messages
|
|
261
|
+
|
|
262
|
+
if (messages.length <= keepRecent + 1) {
|
|
263
|
+
return { summary: '', removedCount: 0, estimatedTokens: this.estimateTokens() }
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
this.emit('compactStart', { messageCount: messages.length, keepRecent })
|
|
267
|
+
|
|
268
|
+
const summary = await this.summarize()
|
|
269
|
+
|
|
270
|
+
const systemMessage = (messages[0]?.role === 'system' || messages[0]?.role === 'developer')
|
|
271
|
+
? messages[0]
|
|
272
|
+
: null
|
|
273
|
+
|
|
274
|
+
const recentMessages = messages.slice(-keepRecent)
|
|
275
|
+
|
|
276
|
+
const newMessages: Message[] = []
|
|
277
|
+
if (systemMessage) newMessages.push(systemMessage)
|
|
278
|
+
|
|
279
|
+
newMessages.push({
|
|
280
|
+
role: 'developer',
|
|
281
|
+
content: `[Conversation Summary — the following is a summary of the earlier conversation that has been compacted to save context space]\n\n${summary}`,
|
|
282
|
+
} as Message)
|
|
283
|
+
|
|
284
|
+
newMessages.push(...recentMessages)
|
|
285
|
+
|
|
286
|
+
const removedCount = messages.length - newMessages.length
|
|
287
|
+
this.state.set('messages', newMessages)
|
|
288
|
+
this.state.set('compactionCount', (this.state.get('compactionCount') || 0) + 1)
|
|
289
|
+
|
|
290
|
+
// Responses API: clear continuation chain since message history changed
|
|
291
|
+
if (this.apiMode === 'responses') {
|
|
292
|
+
this.state.set('lastResponseId', null)
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
const estimatedTokens = this.estimateTokens()
|
|
296
|
+
|
|
297
|
+
this.emit('compactEnd', { summary, removedCount, estimatedTokens, compactionCount: this.state.get('compactionCount') })
|
|
298
|
+
|
|
299
|
+
return { summary, removedCount, estimatedTokens }
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
/**
|
|
303
|
+
* Get the OpenAI-formatted tools array from the registered tools.
|
|
304
|
+
*
|
|
305
|
+
* @returns {OpenAI.Chat.Completions.ChatCompletionTool[]} The tools formatted for OpenAI
|
|
306
|
+
*/
|
|
307
|
+
private get openaiTools(): OpenAI.Chat.Completions.ChatCompletionTool[] {
|
|
308
|
+
return Object.entries(this.tools).map(([name, tool]) => ({
|
|
309
|
+
type: 'function' as const,
|
|
310
|
+
function: {
|
|
311
|
+
name,
|
|
312
|
+
description: tool.description,
|
|
313
|
+
parameters: tool.parameters
|
|
314
|
+
}
|
|
315
|
+
}))
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
/**
|
|
319
|
+
* Get the OpenAI Responses-formatted tools array from local function tools
|
|
320
|
+
* plus configured remote MCP servers.
|
|
321
|
+
*/
|
|
322
|
+
private get responseTools(): OpenAI.Responses.Tool[] {
|
|
323
|
+
const functionTools = Object.entries(this.tools).map(([name, tool]) => ({
|
|
324
|
+
type: 'function' as const,
|
|
325
|
+
name,
|
|
326
|
+
description: tool.description,
|
|
327
|
+
parameters: { ...tool.parameters, additionalProperties: false },
|
|
328
|
+
strict: true,
|
|
329
|
+
}))
|
|
330
|
+
|
|
331
|
+
const mcpTools = Object.entries(this.mcpServers)
|
|
332
|
+
.filter(([, server]) => !!server?.url)
|
|
333
|
+
.map(([serverLabel, server]) => ({
|
|
334
|
+
type: 'mcp' as const,
|
|
335
|
+
server_label: serverLabel,
|
|
336
|
+
server_url: server.url,
|
|
337
|
+
...(server.headers ? { headers: server.headers } : {}),
|
|
338
|
+
...(server.allowedTools ? { allowed_tools: server.allowedTools } : {}),
|
|
339
|
+
...(server.requireApproval ? { require_approval: server.requireApproval } : {}),
|
|
340
|
+
}))
|
|
341
|
+
|
|
342
|
+
return [...functionTools, ...mcpTools]
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
/** Returns the first system/developer text message to use as Responses instructions. */
|
|
346
|
+
private get responsesInstructions(): string | undefined {
|
|
347
|
+
for (const message of this.messages) {
|
|
348
|
+
if ((message.role === 'system' || message.role === 'developer') && typeof message.content === 'string') {
|
|
349
|
+
return message.content
|
|
350
|
+
}
|
|
351
|
+
}
|
|
352
|
+
return undefined
|
|
353
|
+
}
|
|
354
|
+
|
|
355
|
+
/**
|
|
356
|
+
* Send a message and get a streamed response. Automatically handles
|
|
357
|
+
* tool calls by invoking the registered handlers and feeding results
|
|
358
|
+
* back to the model until a final text response is produced.
|
|
359
|
+
*
|
|
360
|
+
* @param {string | ContentPart[]} content - The user message, either a string or array of content parts (text + images)
|
|
361
|
+
* @returns {Promise<string>} The assistant's final text response
|
|
362
|
+
*
|
|
363
|
+
* @example
|
|
364
|
+
* const reply = await conversation.ask("What's the weather in SF?")
|
|
365
|
+
* // With image:
|
|
366
|
+
* const reply = await conversation.ask([
|
|
367
|
+
* { type: 'text', text: 'What is in this diagram?' },
|
|
368
|
+
* { type: 'image_url', image_url: { url: 'data:image/png;base64,...' } }
|
|
369
|
+
* ])
|
|
370
|
+
*/
|
|
371
|
+
async ask(content: string | ContentPart[], options?: AskOptions): Promise<string> {
|
|
372
|
+
this._callMaxTokens = options?.maxTokens
|
|
373
|
+
|
|
374
|
+
// Auto-compact before adding the new message
|
|
375
|
+
if (this.options.autoCompact) {
|
|
376
|
+
const threshold = this.options.compactThreshold ?? 0.8
|
|
377
|
+
const estimated = this.estimateTokens()
|
|
378
|
+
const limit = this.contextWindow * threshold
|
|
379
|
+
if (estimated >= limit) {
|
|
380
|
+
this.emit('autoCompactTriggered', { estimated, limit, contextWindow: this.contextWindow })
|
|
381
|
+
await this.compact()
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
const userMessage: Message = { role: 'user', content: content as any }
|
|
386
|
+
this.pushMessage(userMessage)
|
|
387
|
+
this.emit('userMessage', content)
|
|
388
|
+
|
|
389
|
+
try {
|
|
390
|
+
if (this.apiMode === 'responses') {
|
|
391
|
+
return await this.runResponsesLoop({
|
|
392
|
+
turn: 1,
|
|
393
|
+
accumulated: '',
|
|
394
|
+
input: [this.toResponsesUserMessage(content)],
|
|
395
|
+
previousResponseId: this.state.get('lastResponseId') || undefined,
|
|
396
|
+
})
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
return await this.runChatCompletionLoop({ turn: 1, accumulated: '' })
|
|
400
|
+
} finally {
|
|
401
|
+
this._callMaxTokens = undefined
|
|
402
|
+
}
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
/** Convert user content into a Responses API input message item. */
|
|
406
|
+
private toResponsesUserMessage(content: string | ContentPart[]): OpenAI.Responses.ResponseInputItem.Message {
|
|
407
|
+
if (typeof content === 'string') {
|
|
408
|
+
return {
|
|
409
|
+
type: 'message',
|
|
410
|
+
role: 'user',
|
|
411
|
+
content: [{ type: 'input_text', text: content }]
|
|
412
|
+
}
|
|
413
|
+
}
|
|
414
|
+
|
|
415
|
+
const parts = content.map((part) => {
|
|
416
|
+
if (part.type === 'text') {
|
|
417
|
+
return { type: 'input_text' as const, text: part.text }
|
|
418
|
+
}
|
|
419
|
+
if (part.type === 'input_audio') {
|
|
420
|
+
return { type: 'input_audio' as const, data: part.data, format: part.format }
|
|
421
|
+
}
|
|
422
|
+
if (part.type === 'input_file') {
|
|
423
|
+
return { type: 'input_file' as const, file_data: part.file_data, filename: part.filename }
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
return {
|
|
427
|
+
type: 'input_image' as const,
|
|
428
|
+
image_url: part.image_url.url,
|
|
429
|
+
detail: part.image_url.detail || 'auto',
|
|
430
|
+
}
|
|
431
|
+
}) as OpenAI.Responses.ResponseInputMessageContentList
|
|
432
|
+
|
|
433
|
+
return {
|
|
434
|
+
type: 'message',
|
|
435
|
+
role: 'user',
|
|
436
|
+
content: parts,
|
|
437
|
+
}
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
/** Returns the OpenAI client instance from the container. */
|
|
441
|
+
get openai() {
|
|
442
|
+
let baseURL = this.options.clientOptions?.baseURL ? this.options.clientOptions.baseURL : undefined
|
|
443
|
+
|
|
444
|
+
if (this.options.local) {
|
|
445
|
+
baseURL = "http://localhost:11434/v1"
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
return (this.container as any).client('openai', {
|
|
449
|
+
defaultModel: this.options.model || (this.options.local ? "qwen2.5:7b" : "gpt-4o"),
|
|
450
|
+
...this.options.clientOptions,
|
|
451
|
+
...(baseURL ? { baseURL } : {}),
|
|
452
|
+
}) as OpenAIClient
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
/** Returns the conversationHistory feature for persistence. */
|
|
456
|
+
get history(): ConversationHistory {
|
|
457
|
+
return this.container.feature('conversationHistory') as ConversationHistory
|
|
458
|
+
}
|
|
459
|
+
|
|
460
|
+
/**
|
|
461
|
+
* Persist this conversation to disk via conversationHistory.
|
|
462
|
+
* Creates a new record if this conversation hasn't been saved before,
|
|
463
|
+
* or updates the existing one.
|
|
464
|
+
*
|
|
465
|
+
* @param opts - Optional overrides for title, tags, thread, or metadata
|
|
466
|
+
* @returns The saved conversation record
|
|
467
|
+
*/
|
|
468
|
+
async save(opts?: { title?: string; tags?: string[]; thread?: string; metadata?: Record<string, any> }) {
|
|
469
|
+
const id = this.state.get('id')!
|
|
470
|
+
const existing = await this.history.load(id)
|
|
471
|
+
|
|
472
|
+
if (existing) {
|
|
473
|
+
existing.messages = this.messages
|
|
474
|
+
existing.model = this.model
|
|
475
|
+
if (opts?.title) existing.title = opts.title
|
|
476
|
+
if (opts?.tags) existing.tags = opts.tags
|
|
477
|
+
if (opts?.thread) existing.thread = opts.thread
|
|
478
|
+
if (opts?.metadata) existing.metadata = { ...existing.metadata, ...opts.metadata }
|
|
479
|
+
await this.history.save(existing)
|
|
480
|
+
return existing
|
|
481
|
+
}
|
|
482
|
+
|
|
483
|
+
return this.history.create({
|
|
484
|
+
id,
|
|
485
|
+
title: opts?.title || this.options.title || 'Untitled',
|
|
486
|
+
model: this.model,
|
|
487
|
+
messages: this.messages,
|
|
488
|
+
tags: opts?.tags || this.options.tags || [],
|
|
489
|
+
thread: opts?.thread || this.options.thread || this.state.get('thread'),
|
|
490
|
+
metadata: opts?.metadata || this.options.metadata || {},
|
|
491
|
+
})
|
|
492
|
+
}
|
|
493
|
+
|
|
494
|
+
/**
|
|
495
|
+
* Runs the streaming Responses API loop. Handles local function calls by
|
|
496
|
+
* executing handlers and submitting `function_call_output` items until
|
|
497
|
+
* the model produces a final text response.
|
|
498
|
+
*/
|
|
499
|
+
private async runResponsesLoop(context: {
|
|
500
|
+
turn: number
|
|
501
|
+
accumulated: string
|
|
502
|
+
input: OpenAI.Responses.ResponseInput
|
|
503
|
+
previousResponseId?: string
|
|
504
|
+
}): Promise<string> {
|
|
505
|
+
const { turn } = context
|
|
506
|
+
let accumulated = context.accumulated
|
|
507
|
+
let turnContent = ''
|
|
508
|
+
let finalResponse: OpenAI.Responses.Response | undefined
|
|
509
|
+
|
|
510
|
+
const toolsParam = this.responseTools.length > 0 ? this.responseTools : undefined
|
|
511
|
+
|
|
512
|
+
this.state.set('streaming', true)
|
|
513
|
+
this.emit('turnStart', { turn, isFollowUp: turn > 1 })
|
|
514
|
+
|
|
515
|
+
try {
|
|
516
|
+
const stream = await this.openai.raw.responses.create({
|
|
517
|
+
model: this.model as OpenAI.Responses.ResponseCreateParams['model'],
|
|
518
|
+
input: context.input,
|
|
519
|
+
stream: true,
|
|
520
|
+
previous_response_id: context.previousResponseId,
|
|
521
|
+
...(toolsParam ? { tools: toolsParam, tool_choice: 'auto', parallel_tool_calls: true } : {}),
|
|
522
|
+
...(this.responsesInstructions ? { instructions: this.responsesInstructions } : {}),
|
|
523
|
+
...(this.maxTokens ? { max_output_tokens: this.maxTokens } : {}),
|
|
524
|
+
})
|
|
525
|
+
|
|
526
|
+
for await (const event of stream) {
|
|
527
|
+
this.emit('rawEvent', event)
|
|
528
|
+
if ((event as any).type?.startsWith?.('response.mcp_')) {
|
|
529
|
+
this.emit('mcpEvent', event)
|
|
530
|
+
}
|
|
531
|
+
if (((event as any).type === 'response.output_item.added' || (event as any).type === 'response.output_item.done')
|
|
532
|
+
&& (event as any).item?.type?.startsWith?.('mcp_')) {
|
|
533
|
+
this.emit('mcpEvent', event)
|
|
534
|
+
}
|
|
535
|
+
|
|
536
|
+
if (event.type === 'response.output_text.delta') {
|
|
537
|
+
const delta = event.delta || ''
|
|
538
|
+
turnContent += delta
|
|
539
|
+
accumulated += delta
|
|
540
|
+
this.emit('chunk', delta)
|
|
541
|
+
this.emit('preview', accumulated)
|
|
542
|
+
}
|
|
543
|
+
|
|
544
|
+
if (event.type === 'response.completed') {
|
|
545
|
+
finalResponse = event.response
|
|
546
|
+
this.emit('responseCompleted', event.response)
|
|
547
|
+
}
|
|
548
|
+
}
|
|
549
|
+
} finally {
|
|
550
|
+
this.state.set('streaming', false)
|
|
551
|
+
}
|
|
552
|
+
|
|
553
|
+
if (!finalResponse) {
|
|
554
|
+
throw new Error('Responses stream ended without a completed response')
|
|
555
|
+
}
|
|
556
|
+
|
|
557
|
+
this.state.set('lastResponseId', finalResponse.id)
|
|
558
|
+
this.applyResponsesUsage(finalResponse.usage || undefined)
|
|
559
|
+
|
|
560
|
+
const functionCalls = (finalResponse.output || []).filter((item) => item.type === 'function_call') as OpenAI.Responses.ResponseFunctionToolCall[]
|
|
561
|
+
if (functionCalls.length > 0) {
|
|
562
|
+
const assistantMessage: OpenAI.Chat.Completions.ChatCompletionAssistantMessageParam = {
|
|
563
|
+
role: 'assistant',
|
|
564
|
+
content: turnContent || null,
|
|
565
|
+
tool_calls: functionCalls.map((call) => ({
|
|
566
|
+
id: call.call_id,
|
|
567
|
+
type: 'function',
|
|
568
|
+
function: {
|
|
569
|
+
name: call.name,
|
|
570
|
+
arguments: call.arguments || '{}',
|
|
571
|
+
}
|
|
572
|
+
}))
|
|
573
|
+
}
|
|
574
|
+
this.pushMessage(assistantMessage)
|
|
575
|
+
|
|
576
|
+
this.emit('toolCallsStart', functionCalls)
|
|
577
|
+
|
|
578
|
+
const functionOutputs: OpenAI.Responses.ResponseInputItem.FunctionCallOutput[] = []
|
|
579
|
+
for (const call of functionCalls) {
|
|
580
|
+
const toolName = call.name
|
|
581
|
+
const tool = this._tools[toolName]
|
|
582
|
+
const callCount = (this.state.get('toolCalls') || 0) + 1
|
|
583
|
+
this.state.set('toolCalls', callCount)
|
|
584
|
+
|
|
585
|
+
let result: string
|
|
586
|
+
if (!tool) {
|
|
587
|
+
result = JSON.stringify({ error: `Unknown tool: ${toolName}` })
|
|
588
|
+
this.emit('toolError', toolName, result)
|
|
589
|
+
} else {
|
|
590
|
+
try {
|
|
591
|
+
const args = call.arguments ? JSON.parse(call.arguments) : {}
|
|
592
|
+
this.emit('toolCall', toolName, args)
|
|
593
|
+
const output = await tool.handler(args)
|
|
594
|
+
result = typeof output === 'string' ? output : JSON.stringify(output)
|
|
595
|
+
this.emit('toolResult', toolName, result)
|
|
596
|
+
} catch (err: any) {
|
|
597
|
+
result = JSON.stringify({ error: err.message || String(err) })
|
|
598
|
+
this.emit('toolError', toolName, err)
|
|
599
|
+
}
|
|
600
|
+
}
|
|
601
|
+
|
|
602
|
+
this.pushMessage({
|
|
603
|
+
role: 'tool',
|
|
604
|
+
tool_call_id: call.call_id,
|
|
605
|
+
content: result,
|
|
606
|
+
})
|
|
607
|
+
|
|
608
|
+
functionOutputs.push({
|
|
609
|
+
type: 'function_call_output',
|
|
610
|
+
call_id: call.call_id,
|
|
611
|
+
output: result,
|
|
612
|
+
})
|
|
613
|
+
}
|
|
614
|
+
|
|
615
|
+
this.emit('toolCallsEnd')
|
|
616
|
+
this.emit('turnEnd', { turn, hasToolCalls: true })
|
|
617
|
+
|
|
618
|
+
return this.runResponsesLoop({
|
|
619
|
+
turn: turn + 1,
|
|
620
|
+
accumulated,
|
|
621
|
+
input: functionOutputs,
|
|
622
|
+
previousResponseId: finalResponse.id,
|
|
623
|
+
})
|
|
624
|
+
}
|
|
625
|
+
|
|
626
|
+
const finalText = turnContent || finalResponse.output_text || ''
|
|
627
|
+
const assistantMessage: Message = { role: 'assistant', content: finalText }
|
|
628
|
+
this.pushMessage(assistantMessage)
|
|
629
|
+
this.state.set('lastResponse', accumulated || finalText)
|
|
630
|
+
|
|
631
|
+
this.emit('turnEnd', { turn, hasToolCalls: false })
|
|
632
|
+
this.emit('response', accumulated || finalText)
|
|
633
|
+
|
|
634
|
+
return accumulated || finalText
|
|
635
|
+
}
|
|
636
|
+
|
|
637
|
+
/** Apply Responses API usage stats to this conversation's token usage counters. */
|
|
638
|
+
private applyResponsesUsage(usage?: OpenAI.Responses.ResponseUsage) {
|
|
639
|
+
if (!usage) return
|
|
640
|
+
const prev = this.state.get('tokenUsage')!
|
|
641
|
+
this.state.set('tokenUsage', {
|
|
642
|
+
prompt: prev.prompt + (usage.input_tokens || 0),
|
|
643
|
+
completion: prev.completion + (usage.output_tokens || 0),
|
|
644
|
+
total: prev.total + (usage.total_tokens || 0),
|
|
645
|
+
})
|
|
646
|
+
}
|
|
647
|
+
|
|
648
|
+
/**
|
|
649
|
+
* Runs the streaming completion loop. If the model requests tool calls,
|
|
650
|
+
* executes them and loops again until a text response is produced.
|
|
651
|
+
*
|
|
652
|
+
* @returns {Promise<string>} The final assistant text response
|
|
653
|
+
*/
|
|
654
|
+
/**
|
|
655
|
+
* Runs the streaming completion loop. If the model requests tool calls,
|
|
656
|
+
* executes them and loops again until a text response is produced.
|
|
657
|
+
*
|
|
658
|
+
* @param context - Turn tracking: turn number and text accumulated across all turns
|
|
659
|
+
* @returns {Promise<string>} The final assistant text response (accumulated across all turns)
|
|
660
|
+
*/
|
|
661
|
+
private async runChatCompletionLoop(context: { turn: number; accumulated: string } = { turn: 1, accumulated: '' }): Promise<string> {
|
|
662
|
+
const { turn } = context
|
|
663
|
+
let accumulated = context.accumulated
|
|
664
|
+
|
|
665
|
+
const hasTools = Object.keys(this._tools || {}).length > 0
|
|
666
|
+
const toolsParam = hasTools ? this.openaiTools : undefined
|
|
667
|
+
|
|
668
|
+
this.state.set('streaming', true)
|
|
669
|
+
this.emit('turnStart', { turn, isFollowUp: turn > 1 })
|
|
670
|
+
|
|
671
|
+
let turnContent = ''
|
|
672
|
+
let toolCalls: Array<{ id: string; function: { name: string; arguments: string }; type: 'function' }> = []
|
|
673
|
+
|
|
674
|
+
try {
|
|
675
|
+
const stream = await this.openai.raw.chat.completions.create({
|
|
676
|
+
model: this.model,
|
|
677
|
+
messages: this.messages,
|
|
678
|
+
stream: true,
|
|
679
|
+
...(toolsParam ? { tools: toolsParam, tool_choice: 'auto' } : {}),
|
|
680
|
+
...(this.maxTokens ? { max_tokens: this.maxTokens } : {}),
|
|
681
|
+
})
|
|
682
|
+
|
|
683
|
+
for await (const chunk of stream) {
|
|
684
|
+
const delta = chunk.choices[0]?.delta
|
|
685
|
+
|
|
686
|
+
if (delta?.content) {
|
|
687
|
+
turnContent += delta.content
|
|
688
|
+
accumulated += delta.content
|
|
689
|
+
this.emit('chunk', delta.content)
|
|
690
|
+
this.emit('preview', accumulated)
|
|
691
|
+
}
|
|
692
|
+
|
|
693
|
+
if (delta?.tool_calls) {
|
|
694
|
+
for (const tc of delta.tool_calls) {
|
|
695
|
+
if (!toolCalls[tc.index]) {
|
|
696
|
+
toolCalls[tc.index] = {
|
|
697
|
+
id: tc.id || '',
|
|
698
|
+
type: 'function',
|
|
699
|
+
function: { name: '', arguments: '' }
|
|
700
|
+
}
|
|
701
|
+
}
|
|
702
|
+
if (tc.id) {
|
|
703
|
+
toolCalls[tc.index]!.id = tc.id
|
|
704
|
+
}
|
|
705
|
+
if (tc.function?.name) {
|
|
706
|
+
toolCalls[tc.index]!.function.name += tc.function.name
|
|
707
|
+
}
|
|
708
|
+
if (tc.function?.arguments) {
|
|
709
|
+
toolCalls[tc.index]!.function.arguments += tc.function.arguments
|
|
710
|
+
}
|
|
711
|
+
}
|
|
712
|
+
}
|
|
713
|
+
|
|
714
|
+
if (chunk.usage) {
|
|
715
|
+
const prev = this.state.get('tokenUsage')!
|
|
716
|
+
this.state.set('tokenUsage', {
|
|
717
|
+
prompt: prev.prompt + (chunk.usage.prompt_tokens || 0),
|
|
718
|
+
completion: prev.completion + (chunk.usage.completion_tokens || 0),
|
|
719
|
+
total: prev.total + (chunk.usage.total_tokens || 0)
|
|
720
|
+
})
|
|
721
|
+
}
|
|
722
|
+
}
|
|
723
|
+
} finally {
|
|
724
|
+
this.state.set('streaming', false)
|
|
725
|
+
}
|
|
726
|
+
|
|
727
|
+
// If the model produced tool calls, execute them and loop
|
|
728
|
+
if (toolCalls.length > 0) {
|
|
729
|
+
const assistantMessage: OpenAI.Chat.Completions.ChatCompletionAssistantMessageParam = {
|
|
730
|
+
role: 'assistant',
|
|
731
|
+
content: turnContent || null,
|
|
732
|
+
tool_calls: toolCalls
|
|
733
|
+
}
|
|
734
|
+
this.pushMessage(assistantMessage)
|
|
735
|
+
|
|
736
|
+
this.emit('toolCallsStart', toolCalls)
|
|
737
|
+
|
|
738
|
+
for (const tc of toolCalls) {
|
|
739
|
+
const toolName = tc.function.name
|
|
740
|
+
const tool = this._tools[toolName]
|
|
741
|
+
const callCount = (this.state.get('toolCalls') || 0) + 1
|
|
742
|
+
this.state.set('toolCalls', callCount)
|
|
743
|
+
|
|
744
|
+
let result: string
|
|
745
|
+
|
|
746
|
+
if (!tool) {
|
|
747
|
+
result = JSON.stringify({ error: `Unknown tool: ${toolName}` })
|
|
748
|
+
this.emit('toolError', toolName, result)
|
|
749
|
+
} else {
|
|
750
|
+
try {
|
|
751
|
+
const args = JSON.parse(tc.function.arguments)
|
|
752
|
+
this.emit('toolCall', toolName, args)
|
|
753
|
+
const output = await tool.handler(args)
|
|
754
|
+
result = typeof output === 'string' ? output : JSON.stringify(output)
|
|
755
|
+
this.emit('toolResult', toolName, result)
|
|
756
|
+
} catch (err: any) {
|
|
757
|
+
result = JSON.stringify({ error: err.message || String(err) })
|
|
758
|
+
this.emit('toolError', toolName, err)
|
|
759
|
+
}
|
|
760
|
+
}
|
|
761
|
+
|
|
762
|
+
const toolMessage: OpenAI.Chat.Completions.ChatCompletionToolMessageParam = {
|
|
763
|
+
role: 'tool',
|
|
764
|
+
tool_call_id: tc.id,
|
|
765
|
+
content: result
|
|
766
|
+
}
|
|
767
|
+
this.pushMessage(toolMessage)
|
|
768
|
+
}
|
|
769
|
+
|
|
770
|
+
this.emit('toolCallsEnd')
|
|
771
|
+
this.emit('turnEnd', { turn, hasToolCalls: true })
|
|
772
|
+
|
|
773
|
+
// Loop: let the model respond to tool results
|
|
774
|
+
return this.runChatCompletionLoop({ turn: turn + 1, accumulated })
|
|
775
|
+
}
|
|
776
|
+
|
|
777
|
+
// Final text response — use this turn's content for the message history,
|
|
778
|
+
// but accumulated for the response event and return value
|
|
779
|
+
const assistantMessage: Message = { role: 'assistant', content: turnContent }
|
|
780
|
+
this.pushMessage(assistantMessage)
|
|
781
|
+
this.state.set('lastResponse', accumulated)
|
|
782
|
+
|
|
783
|
+
this.emit('turnEnd', { turn, hasToolCalls: false })
|
|
784
|
+
this.emit('response', accumulated)
|
|
785
|
+
|
|
786
|
+
return accumulated
|
|
787
|
+
}
|
|
788
|
+
|
|
789
|
+
/**
|
|
790
|
+
* Append a message to the conversation state.
|
|
791
|
+
*
|
|
792
|
+
* @param {Message} message - The message to append
|
|
793
|
+
*/
|
|
794
|
+
pushMessage(message: Message) {
|
|
795
|
+
this.state.set('messages', [...this.messages, message])
|
|
796
|
+
}
|
|
797
|
+
}
|
|
798
|
+
|
|
799
|
+
export default features.register('conversation', Conversation)
|