@andypai/orb 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +349 -0
- package/assets/orb-logo.svg +75 -0
- package/assets/orb-terminal-session.svg +72 -0
- package/assets/orb-wordmark.svg +77 -0
- package/package.json +76 -0
- package/prompts/anthropic.md +2 -0
- package/prompts/base.md +1 -0
- package/prompts/openai.md +7 -0
- package/prompts/voice.md +12 -0
- package/src/cli.ts +9 -0
- package/src/config.ts +270 -0
- package/src/index.ts +82 -0
- package/src/pipeline/adapters/anthropic.ts +111 -0
- package/src/pipeline/adapters/openai.ts +202 -0
- package/src/pipeline/adapters/types.ts +16 -0
- package/src/pipeline/adapters/utils.ts +131 -0
- package/src/pipeline/frames.ts +113 -0
- package/src/pipeline/observer.ts +36 -0
- package/src/pipeline/observers/metrics.ts +95 -0
- package/src/pipeline/pipeline.ts +43 -0
- package/src/pipeline/processor.ts +57 -0
- package/src/pipeline/processors/agent.ts +38 -0
- package/src/pipeline/processors/tts.ts +120 -0
- package/src/pipeline/task.ts +239 -0
- package/src/pipeline/transports/terminal-text.ts +24 -0
- package/src/pipeline/transports/types.ts +33 -0
- package/src/services/auth-utils.ts +149 -0
- package/src/services/global-config.ts +363 -0
- package/src/services/openai-auth.ts +18 -0
- package/src/services/prompts.ts +76 -0
- package/src/services/provider-defaults.ts +97 -0
- package/src/services/session.ts +204 -0
- package/src/services/streaming-tts.ts +483 -0
- package/src/services/tts.ts +309 -0
- package/src/setup.ts +234 -0
- package/src/types/index.ts +108 -0
- package/src/ui/App.tsx +142 -0
- package/src/ui/components/ActivityTimeline.tsx +60 -0
- package/src/ui/components/AsciiOrb.tsx +92 -0
- package/src/ui/components/ConversationRail.tsx +44 -0
- package/src/ui/components/Footer.tsx +61 -0
- package/src/ui/components/InputPrompt.tsx +88 -0
- package/src/ui/components/MicroOrb.tsx +25 -0
- package/src/ui/components/TTSErrorBanner.tsx +36 -0
- package/src/ui/components/TurnRow.tsx +71 -0
- package/src/ui/components/WelcomeSplash.tsx +78 -0
- package/src/ui/hooks/useAnimationFrame.ts +33 -0
- package/src/ui/hooks/useConversation.ts +195 -0
- package/src/ui/hooks/useKeyboardShortcuts.ts +57 -0
- package/src/ui/hooks/usePipeline.ts +83 -0
- package/src/ui/hooks/useTerminalSize.ts +37 -0
- package/src/ui/utils/markdown.ts +89 -0
- package/src/ui/utils/model-label.ts +20 -0
- package/src/ui/utils/text.ts +18 -0
- package/src/ui/utils/tool-format.ts +40 -0
package/prompts/base.md
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
You are a helpful coding assistant.
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
The current project is "{{projectName}}", sourced from {{projectPath}}.
|
|
2
|
+
Inside the provided tool sandbox, the project is mounted at `/workspace`.
|
|
3
|
+
Use the provided `bash`, `readFile`, and `writeFile` tools to explore or edit files.
|
|
4
|
+
Edits happen in a sandbox overlay; describe any changes you make.
|
|
5
|
+
Never claim to be Claude or Anthropic; you are an OpenAI model.
|
|
6
|
+
Prefer concise bash commands (`ls`, `rg`, `sed`, `awk`, `jq`) and keep outputs short.
|
|
7
|
+
If you need to modify files, do so via `writeFile` so changes are explicit.
|
package/prompts/voice.md
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
You are responding via voice.
|
|
2
|
+
|
|
3
|
+
Guidelines for voice responses:
|
|
4
|
+
|
|
5
|
+
- Keep responses concise: 2-4 sentences for simple questions, up to a paragraph for complex topics
|
|
6
|
+
- Use conversational, natural language that sounds good when spoken aloud
|
|
7
|
+
- Avoid code blocks, markdown formatting, bullet lists, and technical symbols
|
|
8
|
+
- When discussing code, describe it verbally rather than showing syntax
|
|
9
|
+
- End with a follow-up question or offer to elaborate if the topic warrants it
|
|
10
|
+
- If a question requires showing code, briefly explain what you would write and ask if they'd like details
|
|
11
|
+
|
|
12
|
+
Remember: your response will be read aloud, so optimize for listening, not reading.
|
package/src/cli.ts
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
#!/usr/bin/env bun
|
|
2
|
+
import { run } from './index.js'
|
|
3
|
+
|
|
4
|
+
run(process.argv.slice(2)).catch((err) => {
|
|
5
|
+
// Commander throws with exitCode for --help, parse errors, etc.
|
|
6
|
+
if (err?.exitCode !== undefined) process.exit(err.exitCode)
|
|
7
|
+
console.error(err?.message ?? err)
|
|
8
|
+
process.exit(1)
|
|
9
|
+
})
|
package/src/config.ts
ADDED
|
@@ -0,0 +1,270 @@
|
|
|
1
|
+
import { Command } from 'commander'
|
|
2
|
+
import type { AnthropicModel, AppConfig, LlmModelId, LlmProvider, Voice } from './types'
|
|
3
|
+
import { ANTHROPIC_MODELS, DEFAULT_CONFIG, VOICES } from './types'
|
|
4
|
+
|
|
5
|
+
const PROVIDER_ALIASES: Record<string, LlmProvider> = {
|
|
6
|
+
anthropic: 'anthropic',
|
|
7
|
+
claude: 'anthropic',
|
|
8
|
+
openai: 'openai',
|
|
9
|
+
gpt: 'openai',
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
const ANTHROPIC_MODEL_ALIASES: Record<string, AnthropicModel> = {
|
|
13
|
+
opus: 'claude-opus-4-6',
|
|
14
|
+
haiku: 'claude-haiku-4-5-20251001',
|
|
15
|
+
sonnet: 'claude-sonnet-4-6',
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
const DEFAULT_MODEL_BY_PROVIDER: Record<LlmProvider, LlmModelId> = {
|
|
19
|
+
anthropic: 'claude-haiku-4-5-20251001',
|
|
20
|
+
openai: 'gpt-5.4',
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
function normalizeProvider(value: string): LlmProvider | undefined {
|
|
24
|
+
return PROVIDER_ALIASES[value.trim().toLowerCase()]
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
function normalizeAnthropicModel(value: string): LlmModelId {
|
|
28
|
+
const normalized = value.trim()
|
|
29
|
+
const alias = ANTHROPIC_MODEL_ALIASES[normalized]
|
|
30
|
+
if (alias) return alias
|
|
31
|
+
if (ANTHROPIC_MODELS.includes(normalized as AnthropicModel)) return normalized
|
|
32
|
+
return normalized || DEFAULT_MODEL_BY_PROVIDER.anthropic
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
function normalizeModelForProvider(provider: LlmProvider, value: string): LlmModelId {
|
|
36
|
+
if (provider === 'anthropic') return normalizeAnthropicModel(value)
|
|
37
|
+
return value.trim() || DEFAULT_MODEL_BY_PROVIDER.openai
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
function isAnthropicModel(value: string): boolean {
|
|
41
|
+
return ANTHROPIC_MODELS.includes(value as AnthropicModel) || value in ANTHROPIC_MODEL_ALIASES
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
function resolveModelForConfig(provider: LlmProvider, modelId: string): LlmModelId {
|
|
45
|
+
const normalized = normalizeModelForProvider(provider, modelId)
|
|
46
|
+
if (provider === 'openai' && isAnthropicModel(normalized)) {
|
|
47
|
+
return DEFAULT_MODEL_BY_PROVIDER.openai
|
|
48
|
+
}
|
|
49
|
+
return normalized
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
type ModelOverride = { provider?: LlmProvider; id: string }
|
|
53
|
+
|
|
54
|
+
function parseModelArg(value: string): ModelOverride | undefined {
|
|
55
|
+
if (!value) return undefined
|
|
56
|
+
if (!value.includes(':')) return { id: value }
|
|
57
|
+
|
|
58
|
+
const [prefix, id] = value.split(':', 2)
|
|
59
|
+
const trimmedPrefix = prefix?.trim() ?? ''
|
|
60
|
+
const trimmedId = id?.trim() ?? ''
|
|
61
|
+
if (!trimmedPrefix || !trimmedId) return undefined
|
|
62
|
+
|
|
63
|
+
const provider = normalizeProvider(trimmedPrefix)
|
|
64
|
+
return provider ? { provider, id: trimmedId } : { id: value }
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
function positiveFloat(value: string): number {
|
|
68
|
+
const n = Number(value)
|
|
69
|
+
if (!Number.isFinite(n) || n <= 0) throw new Error(`Expected a positive number, got "${value}"`)
|
|
70
|
+
return n
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
function positiveInt(value: string): number {
|
|
74
|
+
const n = Number(value)
|
|
75
|
+
if (!Number.isInteger(n) || n <= 0) throw new Error(`Expected a positive integer, got "${value}"`)
|
|
76
|
+
return n
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
function nonNegativeInt(value: string): number {
|
|
80
|
+
const n = Number(value)
|
|
81
|
+
if (!Number.isInteger(n) || n < 0)
|
|
82
|
+
throw new Error(`Expected a non-negative integer, got "${value}"`)
|
|
83
|
+
return n
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
interface ProgramDefaults {
|
|
87
|
+
config: AppConfig
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
const HELP_EPILOGUE = `
|
|
91
|
+
Auto provider selection (when --provider and --model are omitted):
|
|
92
|
+
1) Claude Agent SDK (Claude Code / Max or API key)
|
|
93
|
+
2) OPENAI_API_KEY
|
|
94
|
+
3) ANTHROPIC_API_KEY
|
|
95
|
+
|
|
96
|
+
Examples:
|
|
97
|
+
orb # Current directory with defaults
|
|
98
|
+
orb /path/to/project # Specific project
|
|
99
|
+
orb setup # Create ~/.orb/config.toml
|
|
100
|
+
orb --voice=marius
|
|
101
|
+
orb --provider=openai --model=gpt-5.4
|
|
102
|
+
orb --model=openai:gpt-5.4
|
|
103
|
+
|
|
104
|
+
Controls:
|
|
105
|
+
- Type your question and press Enter
|
|
106
|
+
- Paste MacWhisper transcription with Cmd+V
|
|
107
|
+
- Shift+Tab to cycle models
|
|
108
|
+
- Ctrl+C to exit
|
|
109
|
+
|
|
110
|
+
TTS quick start:
|
|
111
|
+
- Serve mode uses tts-gateway at http://localhost:8000 by default
|
|
112
|
+
- Run "orb setup" for guided defaults and gateway instructions
|
|
113
|
+
- Use --tts-mode=generate for local macOS fallback speech
|
|
114
|
+
|
|
115
|
+
Config:
|
|
116
|
+
Persistent defaults live in ~/.orb/config.toml
|
|
117
|
+
CLI flags override config values for one-off runs`
|
|
118
|
+
|
|
119
|
+
function createProgram({ config: defaults }: ProgramDefaults): Command {
|
|
120
|
+
const program = new Command()
|
|
121
|
+
.name('orb')
|
|
122
|
+
.description('Voice-Driven Code Explorer')
|
|
123
|
+
.argument('[projectPath]', 'Project directory path')
|
|
124
|
+
.option('--provider <provider>', 'LLM provider: anthropic|claude, openai|gpt')
|
|
125
|
+
.option('--llm-provider <provider>', 'LLM provider (alias for --provider)')
|
|
126
|
+
.option('--model <model>', 'Model ID, alias (haiku, sonnet, opus), or provider:model')
|
|
127
|
+
.option('--voice <voice>', `TTS voice: ${VOICES.join(', ')}`, defaults.ttsVoice)
|
|
128
|
+
.option(
|
|
129
|
+
'--tts-mode <mode>',
|
|
130
|
+
'TTS mode: serve (tts-gateway HTTP server), generate (local macOS say), server',
|
|
131
|
+
defaults.ttsMode,
|
|
132
|
+
)
|
|
133
|
+
.option('--tts-server-url <url>', 'Serve-mode tts-gateway URL (default: http://localhost:8000)')
|
|
134
|
+
.option('--tts-speed <rate>', 'TTS speed multiplier', positiveFloat, defaults.ttsSpeed)
|
|
135
|
+
.option('--new', 'Start fresh (ignore saved session)')
|
|
136
|
+
.option('--skip-intro', 'Skip the welcome animation')
|
|
137
|
+
.option('--tts', 'Enable text-to-speech (default: true)')
|
|
138
|
+
.option('--no-tts', 'Disable text-to-speech')
|
|
139
|
+
.option('--streaming-tts', 'Enable streaming TTS (default: true)')
|
|
140
|
+
.option('--no-streaming-tts', 'Disable streaming (batch mode)')
|
|
141
|
+
.addHelpText('after', HELP_EPILOGUE)
|
|
142
|
+
.configureOutput({
|
|
143
|
+
writeOut: (str) => process.stdout.write(str),
|
|
144
|
+
writeErr: (str) => process.stderr.write(str),
|
|
145
|
+
})
|
|
146
|
+
.exitOverride()
|
|
147
|
+
|
|
148
|
+
return program
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
interface ParsedOpts {
|
|
152
|
+
provider?: string
|
|
153
|
+
llmProvider?: string
|
|
154
|
+
model?: string
|
|
155
|
+
voice: string
|
|
156
|
+
ttsMode: string
|
|
157
|
+
ttsServerUrl?: string
|
|
158
|
+
ttsSpeed: number
|
|
159
|
+
new?: boolean
|
|
160
|
+
skipIntro?: boolean
|
|
161
|
+
tts?: boolean
|
|
162
|
+
streamingTts?: boolean
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
export interface ParseResult {
|
|
166
|
+
config: AppConfig
|
|
167
|
+
explicit: ExplicitFlags
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
export interface ExplicitFlags {
|
|
171
|
+
provider: boolean
|
|
172
|
+
model: boolean
|
|
173
|
+
ttsBufferSentences: boolean
|
|
174
|
+
ttsMinChunkLength: boolean
|
|
175
|
+
ttsMaxWaitMs: boolean
|
|
176
|
+
ttsGraceWindowMs: boolean
|
|
177
|
+
ttsClauseBoundaries: boolean
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
interface ParseCliOptions {
|
|
181
|
+
baseConfig?: AppConfig
|
|
182
|
+
baseExplicit?: Partial<ExplicitFlags>
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
function isUserSet(program: Command, name: string): boolean {
|
|
186
|
+
return program.getOptionValueSource(name) === 'cli'
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
export function parseCliArgs(args: string[], options: ParseCliOptions = {}): ParseResult {
|
|
190
|
+
const baseConfig = options.baseConfig ?? DEFAULT_CONFIG
|
|
191
|
+
const baseExplicit = options.baseExplicit ?? {}
|
|
192
|
+
const program = createProgram({ config: baseConfig })
|
|
193
|
+
program.parse(args, { from: 'user' })
|
|
194
|
+
|
|
195
|
+
const opts = program.opts<ParsedOpts>()
|
|
196
|
+
const projectPath = program.args[0] ?? baseConfig.projectPath
|
|
197
|
+
|
|
198
|
+
const config: AppConfig = {
|
|
199
|
+
...baseConfig,
|
|
200
|
+
projectPath,
|
|
201
|
+
startFresh: opts.new ?? false,
|
|
202
|
+
skipIntro: isUserSet(program, 'skipIntro') ? opts.skipIntro === true : baseConfig.skipIntro,
|
|
203
|
+
ttsEnabled: isUserSet(program, 'tts') ? opts.tts !== false : baseConfig.ttsEnabled,
|
|
204
|
+
ttsStreamingEnabled: isUserSet(program, 'streamingTts')
|
|
205
|
+
? opts.streamingTts !== false
|
|
206
|
+
: baseConfig.ttsStreamingEnabled,
|
|
207
|
+
ttsSpeed: opts.ttsSpeed,
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
// Voice validation
|
|
211
|
+
if (VOICES.includes(opts.voice as Voice)) {
|
|
212
|
+
config.ttsVoice = opts.voice as Voice
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
// TTS mode (normalize "server" → "serve")
|
|
216
|
+
const ttsMode = opts.ttsMode
|
|
217
|
+
if (ttsMode === 'generate' || ttsMode === 'serve') {
|
|
218
|
+
config.ttsMode = ttsMode
|
|
219
|
+
} else if (ttsMode === 'server') {
|
|
220
|
+
config.ttsMode = 'serve'
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
// TTS server URL
|
|
224
|
+
if (opts.ttsServerUrl) {
|
|
225
|
+
config.ttsServerUrl = opts.ttsServerUrl.trim()
|
|
226
|
+
if (config.ttsMode === 'generate') config.ttsMode = 'serve'
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
// Clause boundaries (Commander handles --no- prefix)
|
|
230
|
+
// Provider and model resolution
|
|
231
|
+
const providerRaw = opts.provider ?? opts.llmProvider
|
|
232
|
+
let providerOverride: LlmProvider | undefined
|
|
233
|
+
if (providerRaw) {
|
|
234
|
+
providerOverride = normalizeProvider(providerRaw)
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
let modelOverride: ModelOverride | undefined
|
|
238
|
+
if (opts.model) {
|
|
239
|
+
modelOverride = parseModelArg(opts.model.trim())
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
if (modelOverride?.provider) {
|
|
243
|
+
config.llmProvider = modelOverride.provider
|
|
244
|
+
} else if (providerOverride) {
|
|
245
|
+
config.llmProvider = providerOverride
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
if (modelOverride) {
|
|
249
|
+
config.llmModel = resolveModelForConfig(config.llmProvider, modelOverride.id)
|
|
250
|
+
} else if (providerOverride) {
|
|
251
|
+
config.llmModel = DEFAULT_MODEL_BY_PROVIDER[config.llmProvider]
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
const explicit: ExplicitFlags = {
|
|
255
|
+
provider:
|
|
256
|
+
baseExplicit.provider === true ||
|
|
257
|
+
isUserSet(program, 'provider') ||
|
|
258
|
+
isUserSet(program, 'llmProvider'),
|
|
259
|
+
model: baseExplicit.model === true || isUserSet(program, 'model'),
|
|
260
|
+
ttsBufferSentences: baseExplicit.ttsBufferSentences === true,
|
|
261
|
+
ttsMinChunkLength: baseExplicit.ttsMinChunkLength === true,
|
|
262
|
+
ttsMaxWaitMs: baseExplicit.ttsMaxWaitMs === true,
|
|
263
|
+
ttsGraceWindowMs: baseExplicit.ttsGraceWindowMs === true,
|
|
264
|
+
ttsClauseBoundaries: baseExplicit.ttsClauseBoundaries === true,
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
return { config, explicit }
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
export { DEFAULT_CONFIG, DEFAULT_MODEL_BY_PROVIDER }
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
import React from 'react'
|
|
2
|
+
import { render } from 'ink'
|
|
3
|
+
import { App } from './ui/App'
|
|
4
|
+
import { DEFAULT_CONFIG, DEFAULT_MODEL_BY_PROVIDER, parseCliArgs } from './config'
|
|
5
|
+
import type { ExplicitFlags } from './config'
|
|
6
|
+
import type { AppConfig } from './types'
|
|
7
|
+
import { applyGlobalConfig, loadGlobalConfig } from './services/global-config'
|
|
8
|
+
import { resolveSmartProvider } from './services/provider-defaults'
|
|
9
|
+
import { loadSession } from './services/session'
|
|
10
|
+
import { runSetupCommand } from './setup'
|
|
11
|
+
|
|
12
|
+
export { App } from './ui/App'
|
|
13
|
+
export { parseCliArgs, DEFAULT_CONFIG } from './config'
|
|
14
|
+
export type { AnthropicModel, AppConfig, LlmModelId, LlmProvider, Voice } from './types'
|
|
15
|
+
|
|
16
|
+
const OPENAI_STREAMING_DEFAULTS = {
|
|
17
|
+
ttsBufferSentences: 3,
|
|
18
|
+
ttsMinChunkLength: 60,
|
|
19
|
+
ttsMaxWaitMs: 600,
|
|
20
|
+
ttsGraceWindowMs: 200,
|
|
21
|
+
ttsClauseBoundaries: true,
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
function applyOpenAiStreamingDefaults(config: AppConfig, explicit: ExplicitFlags) {
|
|
25
|
+
if (config.llmProvider !== 'openai') return
|
|
26
|
+
if (!config.ttsEnabled || !config.ttsStreamingEnabled) return
|
|
27
|
+
|
|
28
|
+
if (!explicit.ttsBufferSentences) {
|
|
29
|
+
config.ttsBufferSentences = OPENAI_STREAMING_DEFAULTS.ttsBufferSentences
|
|
30
|
+
}
|
|
31
|
+
if (!explicit.ttsMinChunkLength) {
|
|
32
|
+
config.ttsMinChunkLength = OPENAI_STREAMING_DEFAULTS.ttsMinChunkLength
|
|
33
|
+
}
|
|
34
|
+
if (!explicit.ttsMaxWaitMs) {
|
|
35
|
+
config.ttsMaxWaitMs = OPENAI_STREAMING_DEFAULTS.ttsMaxWaitMs
|
|
36
|
+
}
|
|
37
|
+
if (!explicit.ttsGraceWindowMs) {
|
|
38
|
+
config.ttsGraceWindowMs = OPENAI_STREAMING_DEFAULTS.ttsGraceWindowMs
|
|
39
|
+
}
|
|
40
|
+
if (!explicit.ttsClauseBoundaries) {
|
|
41
|
+
config.ttsClauseBoundaries = OPENAI_STREAMING_DEFAULTS.ttsClauseBoundaries
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
export async function run(args: string[]): Promise<void> {
|
|
46
|
+
const command = args[0]
|
|
47
|
+
if (command === 'setup') {
|
|
48
|
+
await runSetupCommand(args.slice(1))
|
|
49
|
+
return
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
const globalConfig = await loadGlobalConfig()
|
|
53
|
+
for (const warning of globalConfig.warnings) {
|
|
54
|
+
console.warn(`[orb] ${warning}`)
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
const baseConfig = applyGlobalConfig(DEFAULT_CONFIG, globalConfig.config)
|
|
58
|
+
const { config, explicit } = parseCliArgs(args, {
|
|
59
|
+
baseConfig,
|
|
60
|
+
baseExplicit: globalConfig.explicit,
|
|
61
|
+
})
|
|
62
|
+
if (!explicit.provider && !explicit.model) {
|
|
63
|
+
const smartProvider = await resolveSmartProvider(config)
|
|
64
|
+
if (!smartProvider) {
|
|
65
|
+
console.error(
|
|
66
|
+
'No available LLM credentials found. Set up Claude (Max/OAuth), OPENAI_API_KEY, or ANTHROPIC_API_KEY before starting.\n' +
|
|
67
|
+
'Tip: Use --provider anthropic (with ANTHROPIC_API_KEY) or --provider openai (with OPENAI_API_KEY) to bypass auto-detection.',
|
|
68
|
+
)
|
|
69
|
+
process.exit(1)
|
|
70
|
+
}
|
|
71
|
+
config.llmProvider = smartProvider.provider
|
|
72
|
+
if (!explicit.model) {
|
|
73
|
+
config.llmModel = DEFAULT_MODEL_BY_PROVIDER[smartProvider.provider]
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
applyOpenAiStreamingDefaults(config, explicit)
|
|
77
|
+
const initialSession = config.startFresh ? null : await loadSession(config.projectPath)
|
|
78
|
+
|
|
79
|
+
render(React.createElement(App, { config, initialSession }), {
|
|
80
|
+
patchConsole: true,
|
|
81
|
+
})
|
|
82
|
+
}
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
import { query, type SDKMessage } from '@anthropic-ai/claude-agent-sdk'
|
|
2
|
+
import { buildProviderPrompt } from '../../services/prompts'
|
|
3
|
+
import type { Frame } from '../frames'
|
|
4
|
+
import { createFrame } from '../frames'
|
|
5
|
+
import type { AgentAdapter, AgentAdapterConfig } from './types'
|
|
6
|
+
import {
|
|
7
|
+
getContentBlocks,
|
|
8
|
+
isTextBlock,
|
|
9
|
+
isToolUseBlock,
|
|
10
|
+
isToolResultBlock,
|
|
11
|
+
extractToolResultText,
|
|
12
|
+
} from './utils'
|
|
13
|
+
|
|
14
|
+
export function createAnthropicAdapter(config: AgentAdapterConfig): AgentAdapter {
|
|
15
|
+
return {
|
|
16
|
+
async *stream(prompt: string): AsyncIterable<Frame> {
|
|
17
|
+
const { appConfig, session, abortController } = config
|
|
18
|
+
let activeSessionId = session?.provider === 'anthropic' ? session.sessionId : undefined
|
|
19
|
+
let accumulatedText = ''
|
|
20
|
+
let toolIndex = 0
|
|
21
|
+
const toolIdToIndex = new Map<string, number>()
|
|
22
|
+
const systemPrompt = await buildProviderPrompt({
|
|
23
|
+
provider: 'anthropic',
|
|
24
|
+
projectPath: appConfig.projectPath,
|
|
25
|
+
ttsEnabled: appConfig.ttsEnabled,
|
|
26
|
+
})
|
|
27
|
+
|
|
28
|
+
const response = query({
|
|
29
|
+
prompt,
|
|
30
|
+
options: {
|
|
31
|
+
cwd: appConfig.projectPath,
|
|
32
|
+
model: appConfig.llmModel,
|
|
33
|
+
maxTurns: 10,
|
|
34
|
+
resume: activeSessionId,
|
|
35
|
+
permissionMode: 'default',
|
|
36
|
+
abortController,
|
|
37
|
+
systemPrompt,
|
|
38
|
+
},
|
|
39
|
+
})
|
|
40
|
+
|
|
41
|
+
for await (const message of response) {
|
|
42
|
+
const typed = message as SDKMessage
|
|
43
|
+
|
|
44
|
+
if (typed.type === 'system' && typed.subtype === 'init') {
|
|
45
|
+
const newSessionId = (typed as { session_id?: string }).session_id
|
|
46
|
+
if (newSessionId) {
|
|
47
|
+
activeSessionId = newSessionId
|
|
48
|
+
yield createFrame('agent-session', {
|
|
49
|
+
session: { provider: 'anthropic', sessionId: newSessionId },
|
|
50
|
+
})
|
|
51
|
+
}
|
|
52
|
+
continue
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
if (typed.type === 'assistant') {
|
|
56
|
+
const blocks = getContentBlocks(typed.message)
|
|
57
|
+
for (const block of blocks) {
|
|
58
|
+
if (isTextBlock(block)) {
|
|
59
|
+
accumulatedText += block.text
|
|
60
|
+
yield createFrame('agent-text-delta', {
|
|
61
|
+
delta: block.text,
|
|
62
|
+
accumulatedText,
|
|
63
|
+
})
|
|
64
|
+
continue
|
|
65
|
+
}
|
|
66
|
+
if (isToolUseBlock(block)) {
|
|
67
|
+
const toolId = block.id ?? block.tool_use_id ?? `tool-${toolIndex}`
|
|
68
|
+
const index = toolIdToIndex.get(toolId) ?? toolIndex++
|
|
69
|
+
toolIdToIndex.set(toolId, index)
|
|
70
|
+
yield createFrame('tool-call-start', {
|
|
71
|
+
toolCall: {
|
|
72
|
+
id: toolId,
|
|
73
|
+
index,
|
|
74
|
+
name: block.name,
|
|
75
|
+
input: block.input ?? {},
|
|
76
|
+
status: 'running',
|
|
77
|
+
},
|
|
78
|
+
})
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
continue
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
if (typed.type === 'user') {
|
|
85
|
+
const blocks = getContentBlocks(typed.message)
|
|
86
|
+
for (const block of blocks) {
|
|
87
|
+
if (!isToolResultBlock(block)) continue
|
|
88
|
+
const toolUseId = block.tool_use_id ?? block.id
|
|
89
|
+
const index = toolUseId ? toolIdToIndex.get(toolUseId) : undefined
|
|
90
|
+
if (index === undefined) continue
|
|
91
|
+
const resultText = extractToolResultText(block.content)
|
|
92
|
+
yield createFrame('tool-call-result', {
|
|
93
|
+
toolIndex: index,
|
|
94
|
+
result: resultText,
|
|
95
|
+
status: block.is_error ? 'error' : 'complete',
|
|
96
|
+
})
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
if (typed.type === 'result' && typed.subtype === 'success') {
|
|
101
|
+
yield createFrame('agent-text-complete', {
|
|
102
|
+
text: typed.result || accumulatedText,
|
|
103
|
+
session: activeSessionId
|
|
104
|
+
? { provider: 'anthropic', sessionId: activeSessionId }
|
|
105
|
+
: undefined,
|
|
106
|
+
})
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
},
|
|
110
|
+
}
|
|
111
|
+
}
|