htmlgraph 0.22.0__py3-none-any.whl → 0.23.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- htmlgraph/__init__.py +1 -1
- htmlgraph/agent_detection.py +41 -2
- htmlgraph/analytics/cli.py +86 -20
- htmlgraph/cli.py +280 -87
- htmlgraph/collections/base.py +68 -4
- htmlgraph/git_events.py +61 -7
- htmlgraph/operations/README.md +62 -0
- htmlgraph/operations/__init__.py +61 -0
- htmlgraph/operations/analytics.py +338 -0
- htmlgraph/operations/events.py +243 -0
- htmlgraph/operations/hooks.py +349 -0
- htmlgraph/operations/server.py +302 -0
- htmlgraph/orchestration/__init__.py +39 -0
- htmlgraph/orchestration/headless_spawner.py +566 -0
- htmlgraph/orchestration/model_selection.py +323 -0
- htmlgraph/orchestrator-system-prompt-optimized.txt +92 -0
- htmlgraph/parser.py +56 -1
- htmlgraph/sdk.py +529 -7
- htmlgraph/server.py +153 -60
- {htmlgraph-0.22.0.dist-info → htmlgraph-0.23.1.dist-info}/METADATA +3 -1
- {htmlgraph-0.22.0.dist-info → htmlgraph-0.23.1.dist-info}/RECORD +29 -19
- /htmlgraph/{orchestration.py → orchestration/task_coordination.py} +0 -0
- {htmlgraph-0.22.0.data → htmlgraph-0.23.1.data}/data/htmlgraph/dashboard.html +0 -0
- {htmlgraph-0.22.0.data → htmlgraph-0.23.1.data}/data/htmlgraph/styles.css +0 -0
- {htmlgraph-0.22.0.data → htmlgraph-0.23.1.data}/data/htmlgraph/templates/AGENTS.md.template +0 -0
- {htmlgraph-0.22.0.data → htmlgraph-0.23.1.data}/data/htmlgraph/templates/CLAUDE.md.template +0 -0
- {htmlgraph-0.22.0.data → htmlgraph-0.23.1.data}/data/htmlgraph/templates/GEMINI.md.template +0 -0
- {htmlgraph-0.22.0.dist-info → htmlgraph-0.23.1.dist-info}/WHEEL +0 -0
- {htmlgraph-0.22.0.dist-info → htmlgraph-0.23.1.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,566 @@
|
|
|
1
|
+
"""Headless AI spawner for multi-AI orchestration."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import subprocess
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class AIResult:
|
|
10
|
+
"""Result from AI CLI execution."""
|
|
11
|
+
|
|
12
|
+
success: bool
|
|
13
|
+
response: str
|
|
14
|
+
tokens_used: int | None
|
|
15
|
+
error: str | None
|
|
16
|
+
raw_output: dict | list | str | None
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class HeadlessSpawner:
|
|
20
|
+
"""
|
|
21
|
+
Spawn AI agents in headless CLI mode.
|
|
22
|
+
|
|
23
|
+
Supports multiple AI CLIs:
|
|
24
|
+
- spawn_gemini(): Google Gemini (free tier)
|
|
25
|
+
- spawn_codex(): OpenAI Codex (ChatGPT Plus+)
|
|
26
|
+
- spawn_copilot(): GitHub Copilot (GitHub subscription)
|
|
27
|
+
- spawn_claude(): Claude Code (same login as Task tool)
|
|
28
|
+
|
|
29
|
+
spawn_claude() vs Task() Tool:
|
|
30
|
+
--------------------------------
|
|
31
|
+
Both use the same Claude Code authentication and billing, but:
|
|
32
|
+
|
|
33
|
+
spawn_claude():
|
|
34
|
+
- Isolated execution (no context sharing)
|
|
35
|
+
- Fresh session each call
|
|
36
|
+
- Best for: independent tasks, external scripts, parallel processing
|
|
37
|
+
- Cache miss on each call (higher token usage)
|
|
38
|
+
|
|
39
|
+
Task():
|
|
40
|
+
- Shared conversation context
|
|
41
|
+
- Builds on previous work
|
|
42
|
+
- Best for: orchestration, related sequential work
|
|
43
|
+
- Cache hits in session (5x cheaper for related work)
|
|
44
|
+
|
|
45
|
+
Example - When to use spawn_claude():
|
|
46
|
+
# Independent tasks in external script
|
|
47
|
+
spawner = HeadlessSpawner()
|
|
48
|
+
for file in files:
|
|
49
|
+
result = spawner.spawn_claude(f"Analyze {file} independently")
|
|
50
|
+
save_result(file, result)
|
|
51
|
+
|
|
52
|
+
Example - When to use Task() instead:
|
|
53
|
+
# Related tasks in orchestration workflow
|
|
54
|
+
Task(prompt="Analyze all files and compare them")
|
|
55
|
+
# Better: shares context, uses caching
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
def __init__(self) -> None:
|
|
59
|
+
"""Initialize spawner."""
|
|
60
|
+
pass
|
|
61
|
+
|
|
62
|
+
def spawn_gemini(
|
|
63
|
+
self,
|
|
64
|
+
prompt: str,
|
|
65
|
+
output_format: str = "json",
|
|
66
|
+
model: str | None = None,
|
|
67
|
+
include_directories: list[str] | None = None,
|
|
68
|
+
color: str = "auto",
|
|
69
|
+
timeout: int = 120,
|
|
70
|
+
) -> AIResult:
|
|
71
|
+
"""
|
|
72
|
+
Spawn Gemini in headless mode.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
prompt: Task description for Gemini
|
|
76
|
+
output_format: "json" or "stream-json"
|
|
77
|
+
model: Model selection (e.g., "gemini-2.0-flash"). Default: None (uses default)
|
|
78
|
+
include_directories: List of directories to include for context. Default: None
|
|
79
|
+
color: Color output control ("auto", "on", "off"). Default: "auto"
|
|
80
|
+
timeout: Max seconds to wait
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
AIResult with response or error
|
|
84
|
+
"""
|
|
85
|
+
try:
|
|
86
|
+
# Build command based on tested pattern from spike spk-4029eef3
|
|
87
|
+
cmd = ["gemini", "-p", prompt, "--output-format", output_format]
|
|
88
|
+
|
|
89
|
+
# Add model option if specified
|
|
90
|
+
if model:
|
|
91
|
+
cmd.extend(["-m", model])
|
|
92
|
+
|
|
93
|
+
# Add include directories if specified
|
|
94
|
+
if include_directories:
|
|
95
|
+
for directory in include_directories:
|
|
96
|
+
cmd.extend(["--include-directories", directory])
|
|
97
|
+
|
|
98
|
+
# Add color option
|
|
99
|
+
cmd.extend(["--color", color])
|
|
100
|
+
|
|
101
|
+
# Execute with timeout and stderr redirection
|
|
102
|
+
# Note: Cannot use capture_output with stderr parameter
|
|
103
|
+
result = subprocess.run(
|
|
104
|
+
cmd,
|
|
105
|
+
stdout=subprocess.PIPE,
|
|
106
|
+
stderr=subprocess.DEVNULL, # Redirect stderr to avoid polluting JSON
|
|
107
|
+
text=True,
|
|
108
|
+
timeout=timeout,
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
# Check for command execution errors
|
|
112
|
+
if result.returncode != 0:
|
|
113
|
+
return AIResult(
|
|
114
|
+
success=False,
|
|
115
|
+
response="",
|
|
116
|
+
tokens_used=None,
|
|
117
|
+
error=f"Gemini CLI failed with exit code {result.returncode}",
|
|
118
|
+
raw_output=None,
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
# Parse JSON response
|
|
122
|
+
try:
|
|
123
|
+
output = json.loads(result.stdout)
|
|
124
|
+
except json.JSONDecodeError as e:
|
|
125
|
+
return AIResult(
|
|
126
|
+
success=False,
|
|
127
|
+
response="",
|
|
128
|
+
tokens_used=None,
|
|
129
|
+
error=f"Failed to parse JSON output: {e}",
|
|
130
|
+
raw_output={"stdout": result.stdout},
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
# Extract response and token usage from parsed output
|
|
134
|
+
# Response is at top level in JSON output
|
|
135
|
+
response_text = output.get("response", "")
|
|
136
|
+
|
|
137
|
+
# Token usage is in stats.models (sum across all models)
|
|
138
|
+
tokens = None
|
|
139
|
+
stats = output.get("stats", {})
|
|
140
|
+
if stats and "models" in stats:
|
|
141
|
+
total_tokens = 0
|
|
142
|
+
for model_stats in stats["models"].values():
|
|
143
|
+
model_tokens = model_stats.get("tokens", {}).get("total", 0)
|
|
144
|
+
total_tokens += model_tokens
|
|
145
|
+
tokens = total_tokens if total_tokens > 0 else None
|
|
146
|
+
|
|
147
|
+
return AIResult(
|
|
148
|
+
success=True,
|
|
149
|
+
response=response_text,
|
|
150
|
+
tokens_used=tokens,
|
|
151
|
+
error=None,
|
|
152
|
+
raw_output=output,
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
except subprocess.TimeoutExpired:
|
|
156
|
+
return AIResult(
|
|
157
|
+
success=False,
|
|
158
|
+
response="",
|
|
159
|
+
tokens_used=None,
|
|
160
|
+
error=f"Gemini CLI timed out after {timeout} seconds",
|
|
161
|
+
raw_output=None,
|
|
162
|
+
)
|
|
163
|
+
except FileNotFoundError:
|
|
164
|
+
return AIResult(
|
|
165
|
+
success=False,
|
|
166
|
+
response="",
|
|
167
|
+
tokens_used=None,
|
|
168
|
+
error="Gemini CLI not found. Ensure 'gemini' is installed and in PATH.",
|
|
169
|
+
raw_output=None,
|
|
170
|
+
)
|
|
171
|
+
except Exception as e:
|
|
172
|
+
return AIResult(
|
|
173
|
+
success=False,
|
|
174
|
+
response="",
|
|
175
|
+
tokens_used=None,
|
|
176
|
+
error=f"Unexpected error: {type(e).__name__}: {e}",
|
|
177
|
+
raw_output=None,
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
def spawn_codex(
|
|
181
|
+
self,
|
|
182
|
+
prompt: str,
|
|
183
|
+
approval: str = "never",
|
|
184
|
+
output_json: bool = True,
|
|
185
|
+
model: str | None = None,
|
|
186
|
+
sandbox: str | None = None,
|
|
187
|
+
full_auto: bool = False,
|
|
188
|
+
images: list[str] | None = None,
|
|
189
|
+
color: str = "auto",
|
|
190
|
+
output_last_message: str | None = None,
|
|
191
|
+
output_schema: str | None = None,
|
|
192
|
+
skip_git_check: bool = False,
|
|
193
|
+
working_directory: str | None = None,
|
|
194
|
+
use_oss: bool = False,
|
|
195
|
+
bypass_approvals: bool = False,
|
|
196
|
+
timeout: int = 120,
|
|
197
|
+
) -> AIResult:
|
|
198
|
+
"""
|
|
199
|
+
Spawn Codex in headless mode.
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
prompt: Task description for Codex
|
|
203
|
+
approval: Approval mode ("never", "always")
|
|
204
|
+
output_json: Use --json flag for JSONL output
|
|
205
|
+
model: Model selection (e.g., "gpt-4-turbo"). Default: None
|
|
206
|
+
sandbox: Sandbox mode ("read-only", "workspace-write", "danger-full-access"). Default: None
|
|
207
|
+
full_auto: Enable full auto mode (--full-auto). Default: False
|
|
208
|
+
images: List of image paths (--image). Default: None
|
|
209
|
+
color: Color output control ("auto", "on", "off"). Default: "auto"
|
|
210
|
+
output_last_message: Write last message to file (--output-last-message). Default: None
|
|
211
|
+
output_schema: JSON schema for validation (--output-schema). Default: None
|
|
212
|
+
skip_git_check: Skip git repo check (--skip-git-repo-check). Default: False
|
|
213
|
+
working_directory: Workspace directory (--cd). Default: None
|
|
214
|
+
use_oss: Use local Ollama provider (--oss). Default: False
|
|
215
|
+
bypass_approvals: Dangerously bypass approvals (--dangerously-bypass-approvals-and-sandbox). Default: False
|
|
216
|
+
timeout: Max seconds to wait
|
|
217
|
+
|
|
218
|
+
Returns:
|
|
219
|
+
AIResult with response or error
|
|
220
|
+
"""
|
|
221
|
+
cmd = ["codex", "exec"]
|
|
222
|
+
|
|
223
|
+
if output_json:
|
|
224
|
+
cmd.append("--json")
|
|
225
|
+
|
|
226
|
+
# Add model if specified
|
|
227
|
+
if model:
|
|
228
|
+
cmd.extend(["--model", model])
|
|
229
|
+
|
|
230
|
+
# Add sandbox mode if specified
|
|
231
|
+
if sandbox:
|
|
232
|
+
cmd.extend(["--sandbox", sandbox])
|
|
233
|
+
|
|
234
|
+
# Add full auto flag
|
|
235
|
+
if full_auto:
|
|
236
|
+
cmd.append("--full-auto")
|
|
237
|
+
|
|
238
|
+
# Add images
|
|
239
|
+
if images:
|
|
240
|
+
for image in images:
|
|
241
|
+
cmd.extend(["--image", image])
|
|
242
|
+
|
|
243
|
+
# Add color option
|
|
244
|
+
cmd.extend(["--color", color])
|
|
245
|
+
|
|
246
|
+
# Add output last message file if specified
|
|
247
|
+
if output_last_message:
|
|
248
|
+
cmd.extend(["--output-last-message", output_last_message])
|
|
249
|
+
|
|
250
|
+
# Add output schema if specified
|
|
251
|
+
if output_schema:
|
|
252
|
+
cmd.extend(["--output-schema", output_schema])
|
|
253
|
+
|
|
254
|
+
# Add skip git check flag
|
|
255
|
+
if skip_git_check:
|
|
256
|
+
cmd.append("--skip-git-repo-check")
|
|
257
|
+
|
|
258
|
+
# Add working directory if specified
|
|
259
|
+
if working_directory:
|
|
260
|
+
cmd.extend(["--cd", working_directory])
|
|
261
|
+
|
|
262
|
+
# Add OSS flag
|
|
263
|
+
if use_oss:
|
|
264
|
+
cmd.append("--oss")
|
|
265
|
+
|
|
266
|
+
# Add bypass approvals flag
|
|
267
|
+
if bypass_approvals:
|
|
268
|
+
cmd.append("--dangerously-bypass-approvals-and-sandbox")
|
|
269
|
+
|
|
270
|
+
cmd.extend(["--approval", approval, prompt])
|
|
271
|
+
|
|
272
|
+
try:
|
|
273
|
+
result = subprocess.run(
|
|
274
|
+
cmd,
|
|
275
|
+
stdout=subprocess.PIPE,
|
|
276
|
+
stderr=subprocess.DEVNULL,
|
|
277
|
+
text=True,
|
|
278
|
+
timeout=timeout,
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
if not output_json:
|
|
282
|
+
# Plain text mode - return as-is
|
|
283
|
+
return AIResult(
|
|
284
|
+
success=result.returncode == 0,
|
|
285
|
+
response=result.stdout.strip(),
|
|
286
|
+
tokens_used=None,
|
|
287
|
+
error=None if result.returncode == 0 else "Command failed",
|
|
288
|
+
raw_output=result.stdout,
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
# Parse JSONL output
|
|
292
|
+
events = []
|
|
293
|
+
for line in result.stdout.splitlines():
|
|
294
|
+
if line.strip():
|
|
295
|
+
try:
|
|
296
|
+
events.append(json.loads(line))
|
|
297
|
+
except json.JSONDecodeError:
|
|
298
|
+
continue
|
|
299
|
+
|
|
300
|
+
# Extract agent message
|
|
301
|
+
response = None
|
|
302
|
+
for event in events:
|
|
303
|
+
if event.get("type") == "item.completed":
|
|
304
|
+
item = event.get("item", {})
|
|
305
|
+
if item.get("type") == "agent_message":
|
|
306
|
+
response = item.get("text")
|
|
307
|
+
|
|
308
|
+
# Extract token usage from turn.completed event
|
|
309
|
+
tokens = None
|
|
310
|
+
for event in events:
|
|
311
|
+
if event.get("type") == "turn.completed":
|
|
312
|
+
usage = event.get("usage", {})
|
|
313
|
+
# Sum all token types
|
|
314
|
+
tokens = sum(usage.values())
|
|
315
|
+
|
|
316
|
+
return AIResult(
|
|
317
|
+
success=result.returncode == 0,
|
|
318
|
+
response=response or "",
|
|
319
|
+
tokens_used=tokens,
|
|
320
|
+
error=None if result.returncode == 0 else "Command failed",
|
|
321
|
+
raw_output=events,
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
except FileNotFoundError:
|
|
325
|
+
return AIResult(
|
|
326
|
+
success=False,
|
|
327
|
+
response="",
|
|
328
|
+
tokens_used=None,
|
|
329
|
+
error="Codex CLI not found. Install from: https://github.com/openai/codex",
|
|
330
|
+
raw_output=None,
|
|
331
|
+
)
|
|
332
|
+
except subprocess.TimeoutExpired:
|
|
333
|
+
return AIResult(
|
|
334
|
+
success=False,
|
|
335
|
+
response="",
|
|
336
|
+
tokens_used=None,
|
|
337
|
+
error=f"Timed out after {timeout} seconds",
|
|
338
|
+
raw_output=None,
|
|
339
|
+
)
|
|
340
|
+
|
|
341
|
+
def spawn_copilot(
|
|
342
|
+
self,
|
|
343
|
+
prompt: str,
|
|
344
|
+
allow_tools: list[str] | None = None,
|
|
345
|
+
allow_all_tools: bool = False,
|
|
346
|
+
deny_tools: list[str] | None = None,
|
|
347
|
+
timeout: int = 120,
|
|
348
|
+
) -> AIResult:
|
|
349
|
+
"""
|
|
350
|
+
Spawn GitHub Copilot in headless mode.
|
|
351
|
+
|
|
352
|
+
Args:
|
|
353
|
+
prompt: Task description for Copilot
|
|
354
|
+
allow_tools: List of tools to auto-approve (e.g., ["shell(git)", "write(*.py)"])
|
|
355
|
+
allow_all_tools: Auto-approve all tools (--allow-all-tools). Default: False
|
|
356
|
+
deny_tools: List of tools to deny (--deny-tool). Default: None
|
|
357
|
+
timeout: Max seconds to wait
|
|
358
|
+
|
|
359
|
+
Returns:
|
|
360
|
+
AIResult with response or error
|
|
361
|
+
"""
|
|
362
|
+
cmd = ["copilot", "-p", prompt]
|
|
363
|
+
|
|
364
|
+
# Add allow all tools flag
|
|
365
|
+
if allow_all_tools:
|
|
366
|
+
cmd.append("--allow-all-tools")
|
|
367
|
+
|
|
368
|
+
# Add tool permissions
|
|
369
|
+
if allow_tools:
|
|
370
|
+
for tool in allow_tools:
|
|
371
|
+
cmd.extend(["--allow-tool", tool])
|
|
372
|
+
|
|
373
|
+
# Add denied tools
|
|
374
|
+
if deny_tools:
|
|
375
|
+
for tool in deny_tools:
|
|
376
|
+
cmd.extend(["--deny-tool", tool])
|
|
377
|
+
|
|
378
|
+
try:
|
|
379
|
+
result = subprocess.run(
|
|
380
|
+
cmd,
|
|
381
|
+
capture_output=True,
|
|
382
|
+
text=True,
|
|
383
|
+
timeout=timeout,
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
# Parse output: response is before stats block
|
|
387
|
+
lines = result.stdout.split("\n")
|
|
388
|
+
|
|
389
|
+
# Find where stats start (look for "Total usage est:" or "Usage by model")
|
|
390
|
+
stats_start = len(lines)
|
|
391
|
+
for i, line in enumerate(lines):
|
|
392
|
+
if "Total usage est" in line or "Usage by model" in line:
|
|
393
|
+
stats_start = i
|
|
394
|
+
break
|
|
395
|
+
|
|
396
|
+
# Response is everything before stats
|
|
397
|
+
response = "\n".join(lines[:stats_start]).strip()
|
|
398
|
+
|
|
399
|
+
# Try to extract token count from stats
|
|
400
|
+
tokens = None
|
|
401
|
+
for line in lines[stats_start:]:
|
|
402
|
+
# Look for token counts like "25.8k input, 5 output"
|
|
403
|
+
if "input" in line and "output" in line:
|
|
404
|
+
# Simple extraction: just note we found stats
|
|
405
|
+
# TODO: More sophisticated parsing if needed
|
|
406
|
+
tokens = 0 # Placeholder
|
|
407
|
+
break
|
|
408
|
+
|
|
409
|
+
return AIResult(
|
|
410
|
+
success=result.returncode == 0,
|
|
411
|
+
response=response,
|
|
412
|
+
tokens_used=tokens,
|
|
413
|
+
error=None if result.returncode == 0 else result.stderr,
|
|
414
|
+
raw_output=result.stdout,
|
|
415
|
+
)
|
|
416
|
+
|
|
417
|
+
except FileNotFoundError:
|
|
418
|
+
return AIResult(
|
|
419
|
+
success=False,
|
|
420
|
+
response="",
|
|
421
|
+
tokens_used=None,
|
|
422
|
+
error="Copilot CLI not found. Install from: https://docs.github.com/en/copilot/using-github-copilot/using-github-copilot-in-the-command-line",
|
|
423
|
+
raw_output=None,
|
|
424
|
+
)
|
|
425
|
+
except subprocess.TimeoutExpired:
|
|
426
|
+
return AIResult(
|
|
427
|
+
success=False,
|
|
428
|
+
response="",
|
|
429
|
+
tokens_used=None,
|
|
430
|
+
error=f"Timed out after {timeout} seconds",
|
|
431
|
+
raw_output=None,
|
|
432
|
+
)
|
|
433
|
+
|
|
434
|
+
def spawn_claude(
|
|
435
|
+
self,
|
|
436
|
+
prompt: str,
|
|
437
|
+
output_format: str = "json",
|
|
438
|
+
permission_mode: str = "bypassPermissions",
|
|
439
|
+
resume: str | None = None,
|
|
440
|
+
verbose: bool = False,
|
|
441
|
+
timeout: int = 300,
|
|
442
|
+
) -> AIResult:
|
|
443
|
+
"""
|
|
444
|
+
Spawn Claude in headless mode.
|
|
445
|
+
|
|
446
|
+
NOTE: Uses same Claude Code authentication as Task() tool, but provides
|
|
447
|
+
isolated execution context. Each call creates a new session without shared
|
|
448
|
+
context. Best for independent tasks or external scripts.
|
|
449
|
+
|
|
450
|
+
For orchestration workflows with shared context, prefer Task() tool which
|
|
451
|
+
leverages prompt caching (5x cheaper for related work).
|
|
452
|
+
|
|
453
|
+
Args:
|
|
454
|
+
prompt: Task description for Claude
|
|
455
|
+
output_format: "text" or "json" (stream-json requires --verbose)
|
|
456
|
+
permission_mode: Permission handling mode:
|
|
457
|
+
- "bypassPermissions": Auto-approve all (default)
|
|
458
|
+
- "acceptEdits": Auto-approve edits only
|
|
459
|
+
- "dontAsk": Fail on permission prompts
|
|
460
|
+
- "default": Normal interactive prompts
|
|
461
|
+
- "plan": Plan mode (no execution)
|
|
462
|
+
- "delegate": Delegation mode
|
|
463
|
+
resume: Resume from previous session (--resume). Default: None
|
|
464
|
+
verbose: Enable verbose output (--verbose). Default: False
|
|
465
|
+
timeout: Max seconds (default: 300, Claude can be slow with initialization)
|
|
466
|
+
|
|
467
|
+
Returns:
|
|
468
|
+
AIResult with response or error
|
|
469
|
+
|
|
470
|
+
Example:
|
|
471
|
+
>>> spawner = HeadlessSpawner()
|
|
472
|
+
>>> result = spawner.spawn_claude("What is 2+2?")
|
|
473
|
+
>>> if result.success:
|
|
474
|
+
... print(result.response) # "4"
|
|
475
|
+
... print(f"Cost: ${result.raw_output['total_cost_usd']}")
|
|
476
|
+
"""
|
|
477
|
+
cmd = ["claude", "-p"]
|
|
478
|
+
|
|
479
|
+
if output_format != "text":
|
|
480
|
+
cmd.extend(["--output-format", output_format])
|
|
481
|
+
|
|
482
|
+
if permission_mode:
|
|
483
|
+
cmd.extend(["--permission-mode", permission_mode])
|
|
484
|
+
|
|
485
|
+
# Add resume flag if specified
|
|
486
|
+
if resume:
|
|
487
|
+
cmd.extend(["--resume", resume])
|
|
488
|
+
|
|
489
|
+
# Add verbose flag
|
|
490
|
+
if verbose:
|
|
491
|
+
cmd.append("--verbose")
|
|
492
|
+
|
|
493
|
+
cmd.append(prompt)
|
|
494
|
+
|
|
495
|
+
try:
|
|
496
|
+
result = subprocess.run(
|
|
497
|
+
cmd,
|
|
498
|
+
capture_output=True,
|
|
499
|
+
text=True,
|
|
500
|
+
timeout=timeout,
|
|
501
|
+
)
|
|
502
|
+
|
|
503
|
+
if output_format == "json":
|
|
504
|
+
# Parse JSON output
|
|
505
|
+
try:
|
|
506
|
+
output = json.loads(result.stdout)
|
|
507
|
+
except json.JSONDecodeError as e:
|
|
508
|
+
return AIResult(
|
|
509
|
+
success=False,
|
|
510
|
+
response="",
|
|
511
|
+
tokens_used=None,
|
|
512
|
+
error=f"Failed to parse JSON output: {e}",
|
|
513
|
+
raw_output=result.stdout,
|
|
514
|
+
)
|
|
515
|
+
|
|
516
|
+
# Extract result and metadata
|
|
517
|
+
usage = output.get("usage", {})
|
|
518
|
+
tokens = (
|
|
519
|
+
usage.get("input_tokens", 0)
|
|
520
|
+
+ usage.get("cache_creation_input_tokens", 0)
|
|
521
|
+
+ usage.get("cache_read_input_tokens", 0)
|
|
522
|
+
+ usage.get("output_tokens", 0)
|
|
523
|
+
)
|
|
524
|
+
|
|
525
|
+
return AIResult(
|
|
526
|
+
success=output.get("type") == "result"
|
|
527
|
+
and not output.get("is_error"),
|
|
528
|
+
response=output.get("result", ""),
|
|
529
|
+
tokens_used=tokens,
|
|
530
|
+
error=output.get("error") if output.get("is_error") else None,
|
|
531
|
+
raw_output=output,
|
|
532
|
+
)
|
|
533
|
+
else:
|
|
534
|
+
# Plain text output
|
|
535
|
+
return AIResult(
|
|
536
|
+
success=result.returncode == 0,
|
|
537
|
+
response=result.stdout.strip(),
|
|
538
|
+
tokens_used=None,
|
|
539
|
+
error=None if result.returncode == 0 else result.stderr,
|
|
540
|
+
raw_output=result.stdout,
|
|
541
|
+
)
|
|
542
|
+
|
|
543
|
+
except FileNotFoundError:
|
|
544
|
+
return AIResult(
|
|
545
|
+
success=False,
|
|
546
|
+
response="",
|
|
547
|
+
tokens_used=None,
|
|
548
|
+
error="Claude CLI not found. Install Claude Code from: https://claude.com/claude-code",
|
|
549
|
+
raw_output=None,
|
|
550
|
+
)
|
|
551
|
+
except subprocess.TimeoutExpired:
|
|
552
|
+
return AIResult(
|
|
553
|
+
success=False,
|
|
554
|
+
response="",
|
|
555
|
+
tokens_used=None,
|
|
556
|
+
error=f"Timed out after {timeout} seconds",
|
|
557
|
+
raw_output=None,
|
|
558
|
+
)
|
|
559
|
+
except Exception as e:
|
|
560
|
+
return AIResult(
|
|
561
|
+
success=False,
|
|
562
|
+
response="",
|
|
563
|
+
tokens_used=None,
|
|
564
|
+
error=f"Unexpected error: {str(e)}",
|
|
565
|
+
raw_output=None,
|
|
566
|
+
)
|