@orchagent/cli 0.3.49 → 0.3.51

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,768 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Agent runner — standalone script for local and sandbox execution.
4
+
5
+ Implements a tool-use loop: the LLM receives the author's prompt as the system
6
+ message and the caller's input as the user message, then iterates with tools
7
+ until it calls submit_result or reaches max_turns.
8
+
9
+ Built-in tools: bash, read_file, write_file, list_files, submit_result
10
+ Custom tools: command wrappers defined by the agent author in custom_tools.json
11
+
12
+ Supports multiple LLM providers: anthropic, openai, gemini.
13
+ Set LLM_PROVIDER env var to select (default: anthropic).
14
+
15
+ When LOCAL_MODE=1 is set, adapts platform context for local execution
16
+ (no sandbox references, uses actual working directory).
17
+ """
18
+
19
+ import argparse
20
+ import json
21
+ import os
22
+ import re
23
+ import subprocess
24
+ import sys
25
+ import threading
26
+
27
+ # ---------------------------------------------------------------------------
28
+ # Tool definitions (canonical format — Anthropic-style)
29
+ # ---------------------------------------------------------------------------
30
+
31
+ BUILTIN_TOOLS = [
32
+ {
33
+ "name": "bash",
34
+ "description": "Run a shell command and return stdout + stderr. Use for installing packages, running tests, compiling code, and other system operations. Commands time out after 120 seconds.",
35
+ "input_schema": {
36
+ "type": "object",
37
+ "properties": {
38
+ "command": {
39
+ "type": "string",
40
+ "description": "The shell command to execute",
41
+ }
42
+ },
43
+ "required": ["command"],
44
+ },
45
+ },
46
+ {
47
+ "name": "read_file",
48
+ "description": "Read the contents of a file. Returns the full file content as a string.",
49
+ "input_schema": {
50
+ "type": "object",
51
+ "properties": {
52
+ "path": {
53
+ "type": "string",
54
+ "description": "Absolute or relative path to the file",
55
+ }
56
+ },
57
+ "required": ["path"],
58
+ },
59
+ },
60
+ {
61
+ "name": "write_file",
62
+ "description": "Write content to a file. Creates the file and any parent directories if they don't exist. Overwrites existing content.",
63
+ "input_schema": {
64
+ "type": "object",
65
+ "properties": {
66
+ "path": {
67
+ "type": "string",
68
+ "description": "Absolute or relative path to the file",
69
+ },
70
+ "content": {
71
+ "type": "string",
72
+ "description": "The content to write to the file",
73
+ },
74
+ },
75
+ "required": ["path", "content"],
76
+ },
77
+ },
78
+ {
79
+ "name": "list_files",
80
+ "description": "List files and directories at the given path.",
81
+ "input_schema": {
82
+ "type": "object",
83
+ "properties": {
84
+ "path": {
85
+ "type": "string",
86
+ "description": "Directory path to list (default: current directory)",
87
+ "default": ".",
88
+ },
89
+ "recursive": {
90
+ "type": "boolean",
91
+ "description": "If true, list files recursively",
92
+ "default": False,
93
+ },
94
+ },
95
+ },
96
+ },
97
+ ]
98
+
99
+ BASH_TIMEOUT = 120 # seconds per command
100
+
101
+
102
+ def error_exit(msg):
103
+ print(json.dumps({"error": msg}))
104
+ sys.exit(1)
105
+
106
+
107
+ def build_platform_context(output_schema, custom_tools_config):
108
+ """Build platform context prepended to the author's prompt.
109
+
110
+ This eliminates the need for authors to explain sandbox mechanics
111
+ (tools, file locations, submit_result usage) in their prompt.md.
112
+ The author's prompt can focus purely on domain expertise.
113
+ """
114
+ is_local = os.environ.get("LOCAL_MODE") == "1"
115
+
116
+ lines = []
117
+ lines.append("[PLATFORM CONTEXT — auto-injected by orchagent]")
118
+ lines.append("")
119
+ lines.append("## Environment")
120
+ if is_local:
121
+ lines.append("You are running locally. Working directory: %s" % os.getcwd())
122
+ else:
123
+ lines.append("You are running inside an isolated sandbox. Working directory: /home/user")
124
+ lines.append("Uploaded files (if any): /tmp/uploads/")
125
+ lines.append("")
126
+ lines.append("## Tools")
127
+ lines.append("- **bash**: Run shell commands (120s timeout per command)")
128
+ lines.append("- **read_file**: Read a file's contents")
129
+ lines.append("- **write_file**: Create or overwrite a file (parent dirs created automatically)")
130
+ lines.append("- **list_files**: List directory contents")
131
+
132
+ if custom_tools_config:
133
+ for ct in custom_tools_config:
134
+ desc = ct.get("description", ct.get("command", ""))
135
+ lines.append("- **%s**: %s" % (ct["name"], desc))
136
+
137
+ # Check for skills
138
+ skills_path = "/home/user/orchagent/skills/manifest.json"
139
+ if is_local:
140
+ skills_path = os.path.join(os.getcwd(), "orchagent", "skills", "manifest.json")
141
+ if os.path.exists(skills_path):
142
+ try:
143
+ with open(skills_path, "r") as f:
144
+ skills = json.load(f)
145
+ if skills:
146
+ lines.append("")
147
+ lines.append("## Skills")
148
+ lines.append("Reference material is available:")
149
+ for skill in skills:
150
+ lines.append("- %s — %s" % (skill.get("name", ""), skill.get("description", "")))
151
+ except Exception:
152
+ pass
153
+
154
+ lines.append("")
155
+ lines.append("## Submitting Results")
156
+ if output_schema:
157
+ schema_str = json.dumps(output_schema, indent=2)
158
+ lines.append("When done, call **submit_result** with output matching this schema:")
159
+ lines.append("```json")
160
+ lines.append(schema_str)
161
+ lines.append("```")
162
+ else:
163
+ lines.append("When done, call **submit_result** with a JSON object containing your result.")
164
+
165
+ lines.append("")
166
+ lines.append("[END PLATFORM CONTEXT]")
167
+ lines.append("")
168
+ lines.append("---")
169
+ lines.append("")
170
+ return "\n".join(lines)
171
+
172
+
173
+ def build_submit_result_tool(output_schema):
174
+ """Build the submit_result tool definition."""
175
+ if output_schema:
176
+ input_schema = output_schema
177
+ else:
178
+ input_schema = {
179
+ "type": "object",
180
+ "properties": {
181
+ "result": {
182
+ "type": "string",
183
+ "description": "The final result to return",
184
+ }
185
+ },
186
+ }
187
+ return {
188
+ "name": "submit_result",
189
+ "description": "Submit the final result. Call this when you have completed the task. The input must match the agent's output schema.",
190
+ "input_schema": input_schema,
191
+ }
192
+
193
+
194
+ def build_custom_tools(custom_tools_config):
195
+ """Convert author-defined custom tool configs to canonical tool format."""
196
+ tools = []
197
+ for ct in custom_tools_config:
198
+ tool_def = {
199
+ "name": ct["name"],
200
+ "description": ct.get("description", "Run: " + ct["command"]),
201
+ }
202
+ if ct.get("input_schema"):
203
+ tool_def["input_schema"] = ct["input_schema"]
204
+ else:
205
+ # No-parameter tool: empty object schema
206
+ tool_def["input_schema"] = {"type": "object", "properties": {}}
207
+ tools.append(tool_def)
208
+ return tools
209
+
210
+
211
+ # ---------------------------------------------------------------------------
212
+ # Structured event emission for real-time streaming
213
+ # ---------------------------------------------------------------------------
214
+
215
+ def emit_event(event_type, **kwargs):
216
+ """Emit a structured event to stderr for the gateway to capture."""
217
+ event = {"type": event_type, **kwargs}
218
+ print("@@ORCHAGENT_EVENT:" + json.dumps(event), file=sys.stderr, flush=True)
219
+
220
+ def _brief_args(tool_name, args):
221
+ """Short safe summary of tool args for streaming display."""
222
+ if tool_name == "bash":
223
+ cmd = args.get("command", "")
224
+ return cmd[:120] + ("..." if len(cmd) > 120 else "")
225
+ if tool_name == "read_file":
226
+ return args.get("path", "")[:100]
227
+ if tool_name == "write_file":
228
+ return "%s (%d chars)" % (args.get("path", "")[:80], len(args.get("content", "")))
229
+ if tool_name == "list_files":
230
+ return args.get("path", ".")
231
+ if tool_name == "submit_result":
232
+ return ""
233
+ try:
234
+ s = json.dumps(args)
235
+ return s[:100] + ("..." if len(s) > 100 else "")
236
+ except Exception:
237
+ return "..."
238
+
239
+
240
+ # ---------------------------------------------------------------------------
241
+ # Verbose logging for local mode
242
+ # ---------------------------------------------------------------------------
243
+
244
+ _VERBOSE = False
245
+
246
+ def verbose_log(tool_name, tool_input):
247
+ """Log tool call to stderr in human-readable format when --verbose is set."""
248
+ if not _VERBOSE:
249
+ return
250
+ if tool_name == "bash":
251
+ cmd = tool_input.get("command", "")
252
+ display = cmd[:100] + ("..." if len(cmd) > 100 else "")
253
+ print(" - bash: %s" % display, file=sys.stderr, flush=True)
254
+ elif tool_name == "read_file":
255
+ print(" - read_file: %s" % tool_input.get("path", ""), file=sys.stderr, flush=True)
256
+ elif tool_name == "write_file":
257
+ print(" - write_file: %s" % tool_input.get("path", ""), file=sys.stderr, flush=True)
258
+ elif tool_name == "list_files":
259
+ print(" - list_files: %s" % tool_input.get("path", "."), file=sys.stderr, flush=True)
260
+ elif tool_name == "submit_result":
261
+ print(" - submit_result", file=sys.stderr, flush=True)
262
+ else:
263
+ print(" - %s" % tool_name, file=sys.stderr, flush=True)
264
+
265
+
266
+ # ---------------------------------------------------------------------------
267
+ # Tool execution
268
+ # ---------------------------------------------------------------------------
269
+
270
+ def execute_bash(command):
271
+ """Execute a bash command with timeout."""
272
+ try:
273
+ result = subprocess.run(
274
+ ["bash", "-c", command],
275
+ capture_output=True,
276
+ text=True,
277
+ timeout=BASH_TIMEOUT,
278
+ )
279
+ output = ""
280
+ if result.stdout:
281
+ output += result.stdout
282
+ if result.stderr:
283
+ output += ("\n" if output else "") + "STDERR:\n" + result.stderr
284
+ if result.returncode != 0:
285
+ output += "\n[exit code: %d]" % result.returncode
286
+ return output or "(no output)"
287
+ except subprocess.TimeoutExpired:
288
+ return "[ERROR] Command timed out after %d seconds" % BASH_TIMEOUT
289
+ except Exception as e:
290
+ return "[ERROR] %s" % e
291
+
292
+
293
+ def execute_read_file(path):
294
+ """Read a file's contents."""
295
+ try:
296
+ with open(path, "r") as f:
297
+ return f.read()
298
+ except FileNotFoundError:
299
+ return "[ERROR] File not found: " + path
300
+ except Exception as e:
301
+ return "[ERROR] %s" % e
302
+
303
+
304
+ def execute_write_file(path, content):
305
+ """Write content to a file, creating parent dirs."""
306
+ try:
307
+ parent = os.path.dirname(path)
308
+ if parent:
309
+ os.makedirs(parent, exist_ok=True)
310
+ with open(path, "w") as f:
311
+ f.write(content)
312
+ return "Successfully wrote %d bytes to %s" % (len(content), path)
313
+ except Exception as e:
314
+ return "[ERROR] %s" % e
315
+
316
+
317
+ def execute_list_files(path=".", recursive=False):
318
+ """List files in a directory."""
319
+ try:
320
+ if recursive:
321
+ entries = []
322
+ for root, dirs, files in os.walk(path):
323
+ dirs[:] = [d for d in dirs if not d.startswith(".")]
324
+ for f in files:
325
+ if not f.startswith("."):
326
+ entries.append(os.path.relpath(os.path.join(root, f), path))
327
+ return "\n".join(sorted(entries)) or "(empty directory)"
328
+ else:
329
+ entries = sorted(os.listdir(path))
330
+ result = []
331
+ for e in entries:
332
+ full = os.path.join(path, e)
333
+ suffix = "/" if os.path.isdir(full) else ""
334
+ result.append(e + suffix)
335
+ return "\n".join(result) or "(empty directory)"
336
+ except FileNotFoundError:
337
+ return "[ERROR] Directory not found: " + path
338
+ except Exception as e:
339
+ return "[ERROR] %s" % e
340
+
341
+
342
+ def execute_custom_tool(command_template, params):
343
+ """Execute a custom tool by substituting params into the command template."""
344
+ # Write params as JSON for tools that prefer structured input
345
+ with open("/tmp/__tool_input.json", "w") as f:
346
+ json.dump(params, f)
347
+ command = command_template
348
+ for key, value in params.items():
349
+ safe_value = str(value).replace("'", "'\\''")
350
+ command = command.replace("{{" + key + "}}", safe_value)
351
+ command = re.sub(r"\{\{\w+\}\}", "", command)
352
+ return execute_bash(command)
353
+
354
+
355
+ def dispatch_tool(tool_name, tool_input, custom_tools_config):
356
+ """
357
+ Dispatch a tool call. Returns (result_text, is_submit).
358
+ is_submit is True only when tool_name == "submit_result".
359
+ """
360
+ if tool_name == "bash":
361
+ return execute_bash(tool_input.get("command", "")), False
362
+ elif tool_name == "read_file":
363
+ return execute_read_file(tool_input.get("path", "")), False
364
+ elif tool_name == "write_file":
365
+ return execute_write_file(
366
+ tool_input.get("path", ""),
367
+ tool_input.get("content", ""),
368
+ ), False
369
+ elif tool_name == "list_files":
370
+ return execute_list_files(
371
+ tool_input.get("path", "."),
372
+ tool_input.get("recursive", False),
373
+ ), False
374
+ elif tool_name == "submit_result":
375
+ return json.dumps(tool_input), True
376
+ else:
377
+ for ct in custom_tools_config:
378
+ if ct["name"] == tool_name:
379
+ return execute_custom_tool(ct["command"], tool_input), False
380
+ return "[ERROR] Unknown tool: " + tool_name, False
381
+
382
+
383
+ # ---------------------------------------------------------------------------
384
+ # Provider abstraction
385
+ # ---------------------------------------------------------------------------
386
+
387
+ class AnthropicProvider:
388
+ name = "anthropic"
389
+
390
+ def import_sdk(self):
391
+ import anthropic
392
+ key = os.environ.get("ANTHROPIC_API_KEY")
393
+ if not key:
394
+ error_exit("ANTHROPIC_API_KEY not set")
395
+ self.client = anthropic.Anthropic(api_key=key)
396
+ self.model = os.environ.get("LLM_MODEL", "claude-sonnet-4-5-20250929")
397
+
398
+ def convert_tools(self, tools):
399
+ return tools # Already in canonical (Anthropic) format
400
+
401
+ def call(self, system, messages, tools):
402
+ return self.client.messages.create(
403
+ model=self.model, max_tokens=16384,
404
+ system=system, tools=tools, messages=messages)
405
+
406
+ def has_tool_use(self, r):
407
+ return any(b.type == "tool_use" for b in r.content)
408
+
409
+ def extract_text(self, r):
410
+ return "\n".join(b.text for b in r.content if b.type == "text")
411
+
412
+ def extract_tool_calls(self, r):
413
+ for b in r.content:
414
+ if b.type == "tool_use":
415
+ yield b.id, b.name, b.input
416
+
417
+ def append_turn(self, messages, response, tool_results):
418
+ messages.append({"role": "assistant", "content": response.content})
419
+ results = []
420
+ for call_id, name, text, is_err in tool_results:
421
+ r = {"type": "tool_result", "tool_use_id": call_id, "content": text}
422
+ if is_err:
423
+ r["is_error"] = True
424
+ results.append(r)
425
+ messages.append({"role": "user", "content": results})
426
+
427
+
428
+ class OpenAIProvider:
429
+ name = "openai"
430
+
431
+ def import_sdk(self):
432
+ import openai
433
+ key = os.environ.get("OPENAI_API_KEY")
434
+ if not key:
435
+ error_exit("OPENAI_API_KEY not set")
436
+ self.client = openai.OpenAI(api_key=key)
437
+ self.model = os.environ.get("LLM_MODEL", "gpt-4o")
438
+
439
+ def convert_tools(self, tools):
440
+ """Wrap canonical tools into OpenAI function-calling format."""
441
+ converted = []
442
+ for t in tools:
443
+ converted.append({
444
+ "type": "function",
445
+ "function": {
446
+ "name": t["name"],
447
+ "description": t.get("description", ""),
448
+ "parameters": t.get("input_schema", {"type": "object", "properties": {}}),
449
+ },
450
+ })
451
+ return converted
452
+
453
+ def call(self, system, messages, tools):
454
+ oai_messages = [{"role": "system", "content": system}] + messages
455
+ return self.client.chat.completions.create(
456
+ model=self.model, max_tokens=16384,
457
+ tools=tools, messages=oai_messages)
458
+
459
+ def has_tool_use(self, r):
460
+ return bool(r.choices[0].message.tool_calls)
461
+
462
+ def extract_text(self, r):
463
+ return r.choices[0].message.content or ""
464
+
465
+ def extract_tool_calls(self, r):
466
+ for tc in r.choices[0].message.tool_calls:
467
+ yield tc.id, tc.function.name, json.loads(tc.function.arguments)
468
+
469
+ def append_turn(self, messages, response, tool_results):
470
+ msg = response.choices[0].message
471
+ # Build assistant message dict with tool_calls
472
+ asst = {"role": "assistant", "content": msg.content or ""}
473
+ if msg.tool_calls:
474
+ asst["tool_calls"] = [
475
+ {
476
+ "id": tc.id,
477
+ "type": "function",
478
+ "function": {"name": tc.function.name, "arguments": tc.function.arguments},
479
+ }
480
+ for tc in msg.tool_calls
481
+ ]
482
+ messages.append(asst)
483
+ # Each tool result is a separate message for OpenAI
484
+ for call_id, name, text, is_err in tool_results:
485
+ messages.append({
486
+ "role": "tool",
487
+ "tool_call_id": call_id,
488
+ "content": text,
489
+ })
490
+
491
+
492
+ class GeminiProvider:
493
+ name = "gemini"
494
+
495
+ def _sanitize_schema(self, schema):
496
+ """Recursively strip keys Gemini doesn't support."""
497
+ if not isinstance(schema, dict):
498
+ return schema
499
+ schema = dict(schema)
500
+ for key in ("$schema", "additionalProperties", "examples", "default", "title"):
501
+ schema.pop(key, None)
502
+ schema_type = (schema.get("type") or "").lower()
503
+ if schema_type == "object":
504
+ props = schema.get("properties")
505
+ if not props or not isinstance(props, dict) or len(props) == 0:
506
+ schema["type"] = "STRING"
507
+ schema.pop("properties", None)
508
+ schema.pop("required", None)
509
+ else:
510
+ sanitized = {}
511
+ for k, v in props.items():
512
+ cleaned = self._sanitize_schema(v)
513
+ if cleaned is not None:
514
+ sanitized[k] = cleaned
515
+ schema["properties"] = sanitized
516
+ elif schema_type == "array":
517
+ items = schema.get("items")
518
+ if isinstance(items, dict):
519
+ schema["items"] = self._sanitize_schema(items)
520
+ return schema
521
+
522
+ def import_sdk(self):
523
+ from google import genai
524
+ key = os.environ.get("GEMINI_API_KEY")
525
+ if not key:
526
+ error_exit("GEMINI_API_KEY not set")
527
+ self.client = genai.Client(api_key=key)
528
+ self.model = os.environ.get("LLM_MODEL", "gemini-2.5-pro")
529
+ self._genai_types = __import__("google.genai", fromlist=["types"]).types
530
+
531
+ def convert_tools(self, tools):
532
+ """Convert canonical tools to Gemini function declarations."""
533
+ types = self._genai_types
534
+ declarations = []
535
+ for t in tools:
536
+ schema = t.get("input_schema", {"type": "object", "properties": {}})
537
+ sanitized = self._sanitize_schema(schema)
538
+ declarations.append(types.FunctionDeclaration(
539
+ name=t["name"],
540
+ description=t.get("description", ""),
541
+ parameters=sanitized,
542
+ ))
543
+ return [types.Tool(function_declarations=declarations)]
544
+
545
+ def call(self, system, messages, tools):
546
+ types = self._genai_types
547
+ # Convert messages to Gemini Content format
548
+ contents = []
549
+ for msg in messages:
550
+ role = msg["role"] if isinstance(msg, dict) else getattr(msg, "role", "user")
551
+ # If msg is already a genai Content object, pass through
552
+ if hasattr(msg, "parts"):
553
+ contents.append(msg)
554
+ continue
555
+ gemini_role = "user" if role == "user" else "model"
556
+ content = msg.get("content", "") if isinstance(msg, dict) else ""
557
+ if isinstance(content, str):
558
+ contents.append(types.Content(
559
+ role=gemini_role,
560
+ parts=[types.Part.from_text(text=content)],
561
+ ))
562
+ elif isinstance(content, list):
563
+ parts = []
564
+ for item in content:
565
+ if isinstance(item, dict) and "function_response" in item:
566
+ fr = item["function_response"]
567
+ parts.append(types.Part.from_function_response(
568
+ name=fr["name"],
569
+ response=fr["response"],
570
+ ))
571
+ else:
572
+ parts.append(types.Part.from_text(text=str(item)))
573
+ contents.append(types.Content(role=gemini_role, parts=parts))
574
+ config = types.GenerateContentConfig(
575
+ system_instruction=system,
576
+ tools=tools,
577
+ max_output_tokens=16384,
578
+ )
579
+ return self.client.models.generate_content(
580
+ model=self.model, contents=contents, config=config)
581
+
582
+ def has_tool_use(self, r):
583
+ if not r.candidates or not r.candidates[0].content:
584
+ return False
585
+ return any(p.function_call for p in r.candidates[0].content.parts)
586
+
587
+ def extract_text(self, r):
588
+ if not r.candidates or not r.candidates[0].content:
589
+ return ""
590
+ parts = []
591
+ for p in r.candidates[0].content.parts:
592
+ if p.text:
593
+ parts.append(p.text)
594
+ return "\n".join(parts)
595
+
596
+ def extract_tool_calls(self, r):
597
+ for i, p in enumerate(r.candidates[0].content.parts):
598
+ if p.function_call:
599
+ yield str(i), p.function_call.name, dict(p.function_call.args)
600
+
601
+ def append_turn(self, messages, response, tool_results):
602
+ types = self._genai_types
603
+ # Append the model's response as a Content object
604
+ model_parts = []
605
+ for p in response.candidates[0].content.parts:
606
+ if p.function_call:
607
+ model_parts.append(types.Part.from_function_call(
608
+ name=p.function_call.name,
609
+ args=dict(p.function_call.args),
610
+ ))
611
+ elif p.text:
612
+ model_parts.append(types.Part.from_text(text=p.text))
613
+ messages.append(types.Content(role="model", parts=model_parts))
614
+ # Append function responses as user message
615
+ fr_parts = []
616
+ for call_id, name, text, is_err in tool_results:
617
+ try:
618
+ resp_data = json.loads(text)
619
+ except (json.JSONDecodeError, TypeError):
620
+ resp_data = {"output": text}
621
+ if is_err:
622
+ resp_data = {"error": text}
623
+ # Gemini requires response to be a dict
624
+ if not isinstance(resp_data, dict):
625
+ resp_data = {"output": resp_data}
626
+ fr_parts.append(types.Part.from_function_response(
627
+ name=name, response=resp_data,
628
+ ))
629
+ messages.append(types.Content(role="user", parts=fr_parts))
630
+
631
+
632
+ PROVIDERS = {
633
+ "anthropic": AnthropicProvider,
634
+ "openai": OpenAIProvider,
635
+ "gemini": GeminiProvider,
636
+ }
637
+
638
+
639
+ class Heartbeat:
640
+ """Print periodic markers to stderr to keep E2B connection alive during LLM calls."""
641
+ def __init__(self, interval=15):
642
+ self.interval = interval
643
+ self._stop = threading.Event()
644
+ self._thread = None
645
+
646
+ def __enter__(self):
647
+ def _beat():
648
+ while not self._stop.wait(self.interval):
649
+ print(".", end="", file=sys.stderr, flush=True)
650
+ self._thread = threading.Thread(target=_beat, daemon=True)
651
+ self._thread.start()
652
+ return self
653
+
654
+ def __exit__(self, *args):
655
+ self._stop.set()
656
+ if self._thread:
657
+ self._thread.join(timeout=2)
658
+
659
+
660
+ # ---------------------------------------------------------------------------
661
+ # Main agent loop
662
+ # ---------------------------------------------------------------------------
663
+
664
+ def main():
665
+ global _VERBOSE
666
+
667
+ parser = argparse.ArgumentParser()
668
+ parser.add_argument("--max-turns", type=int, default=25)
669
+ parser.add_argument("--verbose", action="store_true", help="Log tool calls to stderr")
670
+ args = parser.parse_args()
671
+
672
+ _VERBOSE = args.verbose
673
+
674
+ with open("prompt.md", "r") as f:
675
+ author_prompt = f.read()
676
+
677
+ with open("input.json", "r") as f:
678
+ input_data = json.load(f)
679
+
680
+ output_schema = None
681
+ if os.path.exists("output_schema.json"):
682
+ with open("output_schema.json", "r") as f:
683
+ output_schema = json.load(f)
684
+
685
+ custom_tools_config = []
686
+ if os.path.exists("custom_tools.json"):
687
+ with open("custom_tools.json", "r") as f:
688
+ custom_tools_config = json.load(f)
689
+
690
+ # Prepend platform context so authors don't need to explain sandbox mechanics
691
+ system_prompt = build_platform_context(output_schema, custom_tools_config) + author_prompt
692
+
693
+ # Build canonical tool list
694
+ canonical_tools = list(BUILTIN_TOOLS)
695
+ canonical_tools.append(build_submit_result_tool(output_schema))
696
+ canonical_tools.extend(build_custom_tools(custom_tools_config))
697
+
698
+ # Select and initialize provider
699
+ provider_name = os.environ.get("LLM_PROVIDER", "anthropic")
700
+ if provider_name not in PROVIDERS:
701
+ error_exit("Unsupported LLM_PROVIDER: %s. Supported: %s" % (provider_name, ", ".join(PROVIDERS)))
702
+
703
+ provider = PROVIDERS[provider_name]()
704
+ try:
705
+ provider.import_sdk()
706
+ except ImportError as e:
707
+ error_exit("Failed to import SDK for %s: %s" % (provider_name, e))
708
+ except Exception as e:
709
+ error_exit("Failed to initialize %s provider: %s" % (provider_name, e))
710
+
711
+ tools = provider.convert_tools(canonical_tools)
712
+
713
+ messages = [{"role": "user", "content": json.dumps(input_data, indent=2)}]
714
+
715
+ for turn in range(args.max_turns):
716
+ emit_event("turn_start", turn=turn + 1, max_turns=args.max_turns)
717
+ if _VERBOSE:
718
+ print("[agent] Turn %d/%d" % (turn + 1, args.max_turns), file=sys.stderr, flush=True)
719
+
720
+ with Heartbeat(interval=15):
721
+ try:
722
+ response = provider.call(system_prompt, messages, tools)
723
+ except Exception as e:
724
+ emit_event("error", message=str(e)[:200])
725
+ error_exit("LLM API error (%s): %s" % (provider_name, e))
726
+
727
+ if not provider.has_tool_use(response):
728
+ emit_event("done")
729
+ final_text = provider.extract_text(response)
730
+ try:
731
+ result = json.loads(final_text)
732
+ print(json.dumps(result))
733
+ except json.JSONDecodeError:
734
+ print(json.dumps({"result": final_text}))
735
+ sys.exit(0)
736
+
737
+ tool_results = []
738
+ for call_id, name, input_args in provider.extract_tool_calls(response):
739
+ verbose_log(name, input_args)
740
+ emit_event("tool_call", turn=turn + 1, tool=name, args_brief=_brief_args(name, input_args))
741
+ result_text, is_submit = dispatch_tool(name, input_args, custom_tools_config)
742
+ emit_event("tool_result", turn=turn + 1, tool=name, status="error" if result_text.startswith("[ERROR]") else "ok")
743
+
744
+ if is_submit:
745
+ emit_event("done")
746
+ try:
747
+ result = json.loads(result_text)
748
+ except json.JSONDecodeError:
749
+ result = {"result": result_text}
750
+ print(json.dumps(result))
751
+ sys.exit(0)
752
+
753
+ is_error = result_text.startswith("[ERROR]")
754
+ tool_results.append((call_id, name, result_text, is_error))
755
+
756
+ provider.append_turn(messages, response, tool_results)
757
+ num_calls = len(tool_results)
758
+ if _VERBOSE:
759
+ print("[agent] Turn %d/%d completed (%d tool calls)" % (turn + 1, args.max_turns, num_calls), file=sys.stderr, flush=True)
760
+ else:
761
+ print("[agent] Turn %d/%d completed (%d tool calls)" % (turn + 1, args.max_turns, num_calls), file=sys.stderr)
762
+
763
+ emit_event("error", message="max turns reached")
764
+ error_exit("Agent reached maximum turns (%d) without submitting a result" % args.max_turns)
765
+
766
+
767
+ if __name__ == "__main__":
768
+ main()