hanzo 0.3.11__py3-none-any.whl → 0.3.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hanzo might be problematic. Click here for more details.

hanzo/dev.py ADDED
@@ -0,0 +1,1970 @@
1
+ """Hanzo Dev - System 2 Thinking Meta-AI for Managing Claude Code Runtime.
2
+
3
+ This module provides a sophisticated orchestration layer that:
4
+ 1. Acts as a System 2 thinking agent (deliberative, analytical)
5
+ 2. Manages Claude Code runtime lifecycle
6
+ 3. Provides persistence and recovery mechanisms
7
+ 4. Includes health checks and auto-restart capabilities
8
+ 5. Integrates with REPL for interactive control
9
+ """
10
+
11
+ import os
12
+ import sys
13
+ import json
14
+ import time
15
+ import signal
16
+ import asyncio
17
+ import logging
18
+ import subprocess
19
+ from enum import Enum
20
+ from typing import Any, Dict, List, Union, Callable, Optional
21
+ from pathlib import Path
22
+ from datetime import datetime
23
+ from dataclasses import asdict, dataclass
24
+
25
+ from rich.live import Live
26
+ from rich.panel import Panel
27
+ from rich.table import Table
28
+ from rich.layout import Layout
29
+ from rich.console import Console
30
+ from rich.progress import Progress, TextColumn, SpinnerColumn
31
+
32
+ # Setup logging first
33
+ logger = logging.getLogger(__name__)
34
+ console = Console()
35
+
36
+ # Import hanzo-network for agent orchestration
37
+ try:
38
+ from hanzo_network import (
39
+ LOCAL_COMPUTE_AVAILABLE,
40
+ Agent,
41
+ Router,
42
+ Network,
43
+ ModelConfig,
44
+ NetworkState,
45
+ ModelProvider,
46
+ DistributedNetwork,
47
+ create_agent,
48
+ create_router,
49
+ create_network,
50
+ create_routing_agent,
51
+ create_distributed_network,
52
+ )
53
+
54
+ NETWORK_AVAILABLE = True
55
+ except ImportError:
56
+ NETWORK_AVAILABLE = False
57
+ logger.warning("hanzo-network not available, using basic orchestration")
58
+
59
+ # Provide fallback implementations
60
+ class Agent:
61
+ """Fallback Agent class when hanzo-network is not available."""
62
+
63
+ def __init__(self, name: str, model: str = "gpt-4", **kwargs):
64
+ self.name = name
65
+ self.model = model
66
+ self.config = kwargs
67
+
68
+ class Network:
69
+ """Fallback Network class."""
70
+
71
+ def __init__(self):
72
+ self.agents = []
73
+
74
+ class Router:
75
+ """Fallback Router class."""
76
+
77
+ def __init__(self):
78
+ pass
79
+
80
+ class NetworkState:
81
+ """Fallback NetworkState class."""
82
+
83
+ pass
84
+
85
+ class ModelConfig:
86
+ """Fallback ModelConfig class."""
87
+
88
+ def __init__(self, **kwargs):
89
+ # Accept all kwargs and store as attributes
90
+ for key, value in kwargs.items():
91
+ setattr(self, key, value)
92
+
93
+ class ModelProvider:
94
+ """Fallback ModelProvider class."""
95
+
96
+ OPENAI = "openai"
97
+ ANTHROPIC = "anthropic"
98
+ LOCAL = "local"
99
+
100
+ LOCAL_COMPUTE_AVAILABLE = False
101
+
102
+
103
+ class AgentState(Enum):
104
+ """State of an AI agent."""
105
+
106
+ IDLE = "idle"
107
+ THINKING = "thinking" # System 2 deliberation
108
+ EXECUTING = "executing"
109
+ STUCK = "stuck"
110
+ CRASHED = "crashed"
111
+ RECOVERING = "recovering"
112
+
113
+
114
+ class RuntimeState(Enum):
115
+ """State of Claude Code runtime."""
116
+
117
+ NOT_STARTED = "not_started"
118
+ STARTING = "starting"
119
+ RUNNING = "running"
120
+ RESPONDING = "responding"
121
+ NOT_RESPONDING = "not_responding"
122
+ CRASHED = "crashed"
123
+ RESTARTING = "restarting"
124
+
125
+
126
+ @dataclass
127
+ class AgentContext:
128
+ """Context for agent decision making."""
129
+
130
+ task: str
131
+ goal: str
132
+ constraints: List[str]
133
+ success_criteria: List[str]
134
+ max_attempts: int = 3
135
+ timeout_seconds: int = 300
136
+ checkpoint_interval: int = 60
137
+
138
+
139
+ @dataclass
140
+ class RuntimeHealth:
141
+ """Health status of Claude Code runtime."""
142
+
143
+ state: RuntimeState
144
+ last_response: datetime
145
+ response_time_ms: float
146
+ memory_usage_mb: float
147
+ cpu_percent: float
148
+ error_count: int
149
+ restart_count: int
150
+
151
+
152
+ @dataclass
153
+ class ThinkingResult:
154
+ """Result of System 2 thinking process."""
155
+
156
+ decision: str
157
+ reasoning: List[str]
158
+ confidence: float
159
+ alternatives: List[str]
160
+ risks: List[str]
161
+ next_steps: List[str]
162
+
163
+
164
+ class HanzoDevOrchestrator:
165
+ """Main orchestrator for Hanzo Dev System 2 thinking."""
166
+
167
+ def __init__(
168
+ self,
169
+ workspace_dir: str = "~/.hanzo/dev",
170
+ claude_code_path: Optional[str] = None,
171
+ ):
172
+ """Initialize the orchestrator.
173
+
174
+ Args:
175
+ workspace_dir: Directory for persistence and checkpoints
176
+ claude_code_path: Path to Claude Code executable
177
+ """
178
+ self.workspace_dir = Path(workspace_dir).expanduser()
179
+ self.workspace_dir.mkdir(parents=True, exist_ok=True)
180
+
181
+ self.claude_code_path = claude_code_path or self._find_claude_code()
182
+ self.state_file = self.workspace_dir / "orchestrator_state.json"
183
+ self.checkpoint_dir = self.workspace_dir / "checkpoints"
184
+ self.checkpoint_dir.mkdir(exist_ok=True)
185
+
186
+ self.agent_state = AgentState.IDLE
187
+ self.runtime_health = RuntimeHealth(
188
+ state=RuntimeState.NOT_STARTED,
189
+ last_response=datetime.now(),
190
+ response_time_ms=0,
191
+ memory_usage_mb=0,
192
+ cpu_percent=0,
193
+ error_count=0,
194
+ restart_count=0,
195
+ )
196
+
197
+ self.current_context: Optional[AgentContext] = None
198
+ self.claude_process: Optional[subprocess.Popen] = None
199
+ self.thinking_history: List[ThinkingResult] = []
200
+ self._shutdown = False
201
+
202
+ def _find_claude_code(self) -> str:
203
+ """Find Claude Code executable."""
204
+ # Check common locations
205
+ possible_paths = [
206
+ "/usr/local/bin/claude",
207
+ "/opt/claude/claude",
208
+ "~/.local/bin/claude",
209
+ "claude", # Rely on PATH
210
+ ]
211
+
212
+ for path in possible_paths:
213
+ expanded = Path(path).expanduser()
214
+ if expanded.exists() or (
215
+ path == "claude" and os.system(f"which {path} >/dev/null 2>&1") == 0
216
+ ):
217
+ return str(expanded) if expanded.exists() else path
218
+
219
+ raise RuntimeError("Claude Code not found. Please specify path.")
220
+
221
+ async def think(self, problem: str, context: Dict[str, Any]) -> ThinkingResult:
222
+ """System 2 thinking process - deliberative and analytical.
223
+
224
+ This implements slow, deliberate thinking:
225
+ 1. Analyze the problem thoroughly
226
+ 2. Consider multiple approaches
227
+ 3. Evaluate risks and trade-offs
228
+ 4. Make a reasoned decision
229
+ """
230
+ self.agent_state = AgentState.THINKING
231
+ console.print("[yellow]🤔 Engaging System 2 thinking...[/yellow]")
232
+
233
+ # Simulate deep thinking process
234
+ reasoning = []
235
+ alternatives = []
236
+ risks = []
237
+
238
+ # Step 1: Problem decomposition
239
+ reasoning.append(f"Decomposing problem: {problem}")
240
+ sub_problems = self._decompose_problem(problem)
241
+ reasoning.append(f"Identified {len(sub_problems)} sub-problems")
242
+
243
+ # Step 2: Generate alternatives
244
+ for sub in sub_problems:
245
+ alt = f"Approach for '{sub}': {self._generate_approach(sub, context)}"
246
+ alternatives.append(alt)
247
+
248
+ # Step 3: Risk assessment
249
+ risks = self._assess_risks(problem, alternatives, context)
250
+
251
+ # Step 4: Decision synthesis
252
+ decision = self._synthesize_decision(problem, alternatives, risks, context)
253
+ confidence = self._calculate_confidence(decision, risks)
254
+
255
+ # Step 5: Plan next steps
256
+ next_steps = self._plan_next_steps(decision, context)
257
+
258
+ result = ThinkingResult(
259
+ decision=decision,
260
+ reasoning=reasoning,
261
+ confidence=confidence,
262
+ alternatives=alternatives,
263
+ risks=risks,
264
+ next_steps=next_steps,
265
+ )
266
+
267
+ self.thinking_history.append(result)
268
+ self.agent_state = AgentState.IDLE
269
+
270
+ return result
271
+
272
+ def _decompose_problem(self, problem: str) -> List[str]:
273
+ """Decompose a problem into sub-problems."""
274
+ # Simple heuristic decomposition
275
+ sub_problems = []
276
+
277
+ # Check for common patterns
278
+ if "and" in problem.lower():
279
+ parts = problem.split(" and ")
280
+ sub_problems.extend(parts)
281
+
282
+ if "then" in problem.lower():
283
+ parts = problem.split(" then ")
284
+ sub_problems.extend(parts)
285
+
286
+ if not sub_problems:
287
+ sub_problems = [problem]
288
+
289
+ return sub_problems
290
+
291
+ def _generate_approach(self, sub_problem: str, context: Dict[str, Any]) -> str:
292
+ """Generate an approach for a sub-problem."""
293
+ # Heuristic approach generation
294
+ if "stuck" in sub_problem.lower():
295
+ return "Analyze error logs, restart with verbose mode, try alternative approach"
296
+ elif "slow" in sub_problem.lower():
297
+ return "Profile performance, optimize bottlenecks, consider caching"
298
+ elif "error" in sub_problem.lower():
299
+ return "Examine stack trace, validate inputs, add error handling"
300
+ else:
301
+ return "Execute standard workflow with monitoring"
302
+
303
+ def _assess_risks(
304
+ self, problem: str, alternatives: List[str], context: Dict[str, Any]
305
+ ) -> List[str]:
306
+ """Assess risks of different approaches."""
307
+ risks = []
308
+
309
+ if "restart" in str(alternatives).lower():
310
+ risks.append("Restarting may lose current state")
311
+
312
+ if "force" in str(alternatives).lower():
313
+ risks.append("Forcing operations may cause data corruption")
314
+
315
+ if context.get("error_count", 0) > 5:
316
+ risks.append("High error rate indicates systemic issue")
317
+
318
+ return risks
319
+
320
+ def _synthesize_decision(
321
+ self,
322
+ problem: str,
323
+ alternatives: List[str],
324
+ risks: List[str],
325
+ context: Dict[str, Any],
326
+ ) -> str:
327
+ """Synthesize a decision from analysis."""
328
+ if len(risks) > 2:
329
+ return (
330
+ "Proceed cautiously with incremental approach and rollback capability"
331
+ )
332
+ elif alternatives:
333
+ return f"Execute primary approach: {alternatives[0]}"
334
+ else:
335
+ return "Gather more information before proceeding"
336
+
337
+ def _calculate_confidence(self, decision: str, risks: List[str]) -> float:
338
+ """Calculate confidence in decision."""
339
+ base_confidence = 0.8
340
+ risk_penalty = len(risks) * 0.1
341
+ return max(0.2, min(1.0, base_confidence - risk_penalty))
342
+
343
+ def _plan_next_steps(self, decision: str, context: Dict[str, Any]) -> List[str]:
344
+ """Plan concrete next steps."""
345
+ steps = []
346
+
347
+ if "cautiously" in decision.lower():
348
+ steps.append("Create checkpoint before proceeding")
349
+ steps.append("Enable verbose logging")
350
+
351
+ steps.append("Execute decision with monitoring")
352
+ steps.append("Validate results against success criteria")
353
+ steps.append("Report outcome and update state")
354
+
355
+ return steps
356
+
357
+ async def start_claude_runtime(self, resume: bool = False) -> bool:
358
+ """Start or resume Claude Code runtime.
359
+
360
+ Args:
361
+ resume: Whether to resume from checkpoint
362
+ """
363
+ if self.claude_process and self.claude_process.poll() is None:
364
+ console.print("[yellow]Claude Code already running[/yellow]")
365
+ return True
366
+
367
+ self.runtime_health.state = RuntimeState.STARTING
368
+ console.print("[cyan]Starting Claude Code runtime...[/cyan]")
369
+
370
+ try:
371
+ # Load checkpoint if resuming
372
+ checkpoint_file = None
373
+ if resume:
374
+ checkpoint_file = self._get_latest_checkpoint()
375
+ if checkpoint_file:
376
+ console.print(
377
+ f"[green]Resuming from checkpoint: {checkpoint_file.name}[/green]"
378
+ )
379
+
380
+ # Prepare command
381
+ cmd = [self.claude_code_path]
382
+ if checkpoint_file:
383
+ cmd.extend(["--resume", str(checkpoint_file)])
384
+
385
+ # Start process with proper signal handling
386
+ self.claude_process = subprocess.Popen(
387
+ cmd,
388
+ stdout=subprocess.PIPE,
389
+ stderr=subprocess.PIPE,
390
+ stdin=subprocess.PIPE,
391
+ text=True,
392
+ preexec_fn=os.setsid if hasattr(os, "setsid") else None,
393
+ )
394
+
395
+ # Wait for startup
396
+ await asyncio.sleep(2)
397
+
398
+ if self.claude_process.poll() is None:
399
+ self.runtime_health.state = RuntimeState.RUNNING
400
+ self.runtime_health.last_response = datetime.now()
401
+ console.print("[green]✓ Claude Code runtime started[/green]")
402
+ return True
403
+ else:
404
+ self.runtime_health.state = RuntimeState.CRASHED
405
+ console.print("[red]✗ Claude Code failed to start[/red]")
406
+ return False
407
+
408
+ except Exception as e:
409
+ console.print(f"[red]Error starting Claude Code: {e}[/red]")
410
+ self.runtime_health.state = RuntimeState.CRASHED
411
+ return False
412
+
413
+ def _get_latest_checkpoint(self) -> Optional[Path]:
414
+ """Get the latest checkpoint file."""
415
+ checkpoints = list(self.checkpoint_dir.glob("checkpoint_*.json"))
416
+ if checkpoints:
417
+ return max(checkpoints, key=lambda p: p.stat().st_mtime)
418
+ return None
419
+
420
+ async def health_check(self) -> bool:
421
+ """Check health of Claude Code runtime."""
422
+ if not self.claude_process:
423
+ self.runtime_health.state = RuntimeState.NOT_STARTED
424
+ return False
425
+
426
+ # Check if process is alive
427
+ if self.claude_process.poll() is not None:
428
+ self.runtime_health.state = RuntimeState.CRASHED
429
+ self.runtime_health.error_count += 1
430
+ return False
431
+
432
+ # Try to communicate
433
+ try:
434
+ # Send a health check command (this is a placeholder)
435
+ # In reality, you'd have a proper API or IPC mechanism
436
+ start_time = time.time()
437
+
438
+ # Simulate health check
439
+ await asyncio.sleep(0.1)
440
+
441
+ response_time = (time.time() - start_time) * 1000
442
+ self.runtime_health.response_time_ms = response_time
443
+ self.runtime_health.last_response = datetime.now()
444
+
445
+ if response_time > 5000:
446
+ self.runtime_health.state = RuntimeState.NOT_RESPONDING
447
+ return False
448
+ else:
449
+ self.runtime_health.state = RuntimeState.RUNNING
450
+ return True
451
+
452
+ except Exception as e:
453
+ logger.error(f"Health check failed: {e}")
454
+ self.runtime_health.state = RuntimeState.NOT_RESPONDING
455
+ self.runtime_health.error_count += 1
456
+ return False
457
+
458
+ async def restart_if_needed(self) -> bool:
459
+ """Restart Claude Code if it's stuck or crashed."""
460
+ if self.runtime_health.state in [
461
+ RuntimeState.CRASHED,
462
+ RuntimeState.NOT_RESPONDING,
463
+ ]:
464
+ console.print("[yellow]Claude Code needs restart...[/yellow]")
465
+
466
+ # Kill existing process
467
+ if self.claude_process:
468
+ try:
469
+ if hasattr(os, "killpg"):
470
+ os.killpg(os.getpgid(self.claude_process.pid), signal.SIGTERM)
471
+ else:
472
+ self.claude_process.terminate()
473
+ await asyncio.sleep(2)
474
+ if self.claude_process.poll() is None:
475
+ self.claude_process.kill()
476
+ except Exception:
477
+ pass
478
+
479
+ self.runtime_health.restart_count += 1
480
+ self.runtime_health.state = RuntimeState.RESTARTING
481
+
482
+ # Start with resume
483
+ return await self.start_claude_runtime(resume=True)
484
+
485
+ return True
486
+
487
+ async def create_checkpoint(self, name: Optional[str] = None) -> Path:
488
+ """Create a checkpoint of current state."""
489
+ checkpoint_name = name or f"checkpoint_{int(time.time())}"
490
+ checkpoint_file = self.checkpoint_dir / f"{checkpoint_name}.json"
491
+
492
+ checkpoint_data = {
493
+ "timestamp": datetime.now().isoformat(),
494
+ "agent_state": self.agent_state.value,
495
+ "runtime_health": asdict(self.runtime_health),
496
+ "current_context": (
497
+ asdict(self.current_context) if self.current_context else None
498
+ ),
499
+ "thinking_history": [
500
+ asdict(t) for t in self.thinking_history[-10:]
501
+ ], # Last 10
502
+ }
503
+
504
+ with open(checkpoint_file, "w") as f:
505
+ json.dump(checkpoint_data, f, indent=2, default=str)
506
+
507
+ console.print(f"[green]✓ Checkpoint saved: {checkpoint_file.name}[/green]")
508
+ return checkpoint_file
509
+
510
+ async def restore_checkpoint(self, checkpoint_file: Path) -> bool:
511
+ """Restore from a checkpoint."""
512
+ try:
513
+ with open(checkpoint_file, "r") as f:
514
+ data = json.load(f)
515
+
516
+ self.agent_state = AgentState(data["agent_state"])
517
+ # Restore other state as needed
518
+
519
+ console.print(
520
+ f"[green]✓ Restored from checkpoint: {checkpoint_file.name}[/green]"
521
+ )
522
+ return True
523
+ except Exception as e:
524
+ console.print(f"[red]Failed to restore checkpoint: {e}[/red]")
525
+ return False
526
+
527
+ async def monitor_loop(self):
528
+ """Main monitoring loop."""
529
+ console.print("[cyan]Starting monitoring loop...[/cyan]")
530
+
531
+ while not self._shutdown:
532
+ try:
533
+ # Health check
534
+ healthy = await self.health_check()
535
+
536
+ if not healthy:
537
+ console.print(
538
+ f"[yellow]Health check failed. State: {self.runtime_health.state.value}[/yellow]"
539
+ )
540
+
541
+ # Use System 2 thinking to decide what to do
542
+ thinking_result = await self.think(
543
+ f"Claude Code is {self.runtime_health.state.value}",
544
+ {"health": asdict(self.runtime_health)},
545
+ )
546
+
547
+ console.print(f"[cyan]Decision: {thinking_result.decision}[/cyan]")
548
+ console.print(
549
+ f"[cyan]Confidence: {thinking_result.confidence:.2f}[/cyan]"
550
+ )
551
+
552
+ # Execute decision
553
+ if thinking_result.confidence > 0.6:
554
+ await self.restart_if_needed()
555
+
556
+ # Create periodic checkpoints
557
+ if int(time.time()) % 300 == 0: # Every 5 minutes
558
+ await self.create_checkpoint()
559
+
560
+ await asyncio.sleep(10) # Check every 10 seconds
561
+
562
+ except Exception as e:
563
+ logger.error(f"Monitor loop error: {e}")
564
+ await asyncio.sleep(10)
565
+
566
+ async def execute_task(self, context: AgentContext) -> bool:
567
+ """Execute a task with System 2 oversight.
568
+
569
+ Args:
570
+ context: The task context
571
+ """
572
+ self.current_context = context
573
+ self.agent_state = AgentState.EXECUTING
574
+
575
+ console.print(f"[cyan]Executing task: {context.task}[/cyan]")
576
+ console.print(f"[cyan]Goal: {context.goal}[/cyan]")
577
+
578
+ attempts = 0
579
+ while attempts < context.max_attempts:
580
+ attempts += 1
581
+ console.print(f"[yellow]Attempt {attempts}/{context.max_attempts}[/yellow]")
582
+
583
+ try:
584
+ # Start Claude if needed
585
+ if self.runtime_health.state != RuntimeState.RUNNING:
586
+ await self.start_claude_runtime(resume=attempts > 1)
587
+
588
+ # Execute the task (placeholder - would send to Claude)
589
+ # In reality, this would involve IPC with Claude Code
590
+ start_time = time.time()
591
+
592
+ # Simulate task execution
593
+ await asyncio.sleep(2)
594
+
595
+ # Check success criteria
596
+ success = self._evaluate_success(context)
597
+
598
+ if success:
599
+ console.print("[green]✓ Task completed successfully[/green]")
600
+ self.agent_state = AgentState.IDLE
601
+ return True
602
+ else:
603
+ console.print("[yellow]Task not yet complete[/yellow]")
604
+
605
+ # Use System 2 thinking to decide next action
606
+ thinking_result = await self.think(
607
+ f"Task '{context.task}' incomplete after attempt {attempts}",
608
+ {"context": asdict(context), "attempts": attempts},
609
+ )
610
+
611
+ if thinking_result.confidence < 0.4:
612
+ console.print("[red]Low confidence, aborting task[/red]")
613
+ break
614
+
615
+ except asyncio.TimeoutError:
616
+ console.print("[red]Task timed out[/red]")
617
+ self.agent_state = AgentState.STUCK
618
+
619
+ except Exception as e:
620
+ console.print(f"[red]Task error: {e}[/red]")
621
+ self.runtime_health.error_count += 1
622
+
623
+ self.agent_state = AgentState.IDLE
624
+ return False
625
+
626
+ def _evaluate_success(self, context: AgentContext) -> bool:
627
+ """Evaluate if success criteria are met."""
628
+ # Placeholder - would check actual results
629
+ # In reality, this would analyze Claude's output
630
+ return False # For now, always require thinking
631
+
632
+ def shutdown(self):
633
+ """Shutdown the orchestrator."""
634
+ self._shutdown = True
635
+
636
+ if self.claude_process:
637
+ try:
638
+ self.claude_process.terminate()
639
+ self.claude_process.wait(timeout=5)
640
+ except Exception:
641
+ self.claude_process.kill()
642
+
643
+ console.print("[green]✓ Orchestrator shutdown complete[/green]")
644
+
645
+
646
+ class HanzoDevREPL:
647
+ """REPL interface for driving Hanzo Dev orchestrator."""
648
+
649
+ def __init__(self, orchestrator: HanzoDevOrchestrator):
650
+ self.orchestrator = orchestrator
651
+ self.commands = {
652
+ "start": self.cmd_start,
653
+ "stop": self.cmd_stop,
654
+ "restart": self.cmd_restart,
655
+ "status": self.cmd_status,
656
+ "think": self.cmd_think,
657
+ "execute": self.cmd_execute,
658
+ "checkpoint": self.cmd_checkpoint,
659
+ "restore": self.cmd_restore,
660
+ "monitor": self.cmd_monitor,
661
+ "help": self.cmd_help,
662
+ "exit": self.cmd_exit,
663
+ }
664
+
665
+ async def run(self):
666
+ """Run the REPL."""
667
+ console.print("[bold cyan]Hanzo Dev - AI Chat[/bold cyan]")
668
+ console.print("Chat naturally or use /commands")
669
+ console.print("Type /help for available commands\n")
670
+
671
+ while True:
672
+ try:
673
+ user_input = await asyncio.get_event_loop().run_in_executor(
674
+ None, input, "> "
675
+ )
676
+
677
+ if not user_input:
678
+ continue
679
+
680
+ # Check for special commands
681
+ if user_input.startswith("/"):
682
+ # Handle slash commands like Claude Desktop
683
+ parts = user_input[1:].strip().split(maxsplit=1)
684
+ cmd = parts[0].lower()
685
+ args = parts[1] if len(parts) > 1 else ""
686
+
687
+ if cmd in self.commands:
688
+ await self.commands[cmd](args)
689
+ else:
690
+ console.print(f"[yellow]Unknown command: /{cmd}[/yellow]")
691
+ console.print("Type /help for available commands")
692
+
693
+ elif user_input.startswith("#"):
694
+ # Handle memory/context commands
695
+ await self.handle_memory_command(user_input[1:].strip())
696
+
697
+ else:
698
+ # Natural chat - send directly to AI agents
699
+ await self.chat_with_agents(user_input)
700
+
701
+ except KeyboardInterrupt:
702
+ console.print("\n[yellow]Use /exit to quit[/yellow]")
703
+ except Exception as e:
704
+ console.print(f"[red]Error: {e}[/red]")
705
+
706
+ async def cmd_start(self, args: str):
707
+ """Start Claude Code runtime."""
708
+ resume = "--resume" in args
709
+ success = await self.orchestrator.start_claude_runtime(resume=resume)
710
+ if success:
711
+ console.print("[green]Runtime started successfully[/green]")
712
+ else:
713
+ console.print("[red]Failed to start runtime[/red]")
714
+
715
+ async def cmd_stop(self, args: str):
716
+ """Stop Claude Code runtime."""
717
+ if self.orchestrator.claude_process:
718
+ self.orchestrator.claude_process.terminate()
719
+ console.print("[yellow]Runtime stopped[/yellow]")
720
+ else:
721
+ console.print("[yellow]Runtime not running[/yellow]")
722
+
723
+ async def cmd_restart(self, args: str):
724
+ """Restart Claude Code runtime."""
725
+ await self.cmd_stop("")
726
+ await asyncio.sleep(1)
727
+ await self.cmd_start("--resume")
728
+
729
+ async def cmd_status(self, args: str):
730
+ """Show current status."""
731
+ table = Table(title="Hanzo Dev Status")
732
+ table.add_column("Property", style="cyan")
733
+ table.add_column("Value", style="white")
734
+
735
+ table.add_row("Agent State", self.orchestrator.agent_state.value)
736
+ table.add_row("Runtime State", self.orchestrator.runtime_health.state.value)
737
+ table.add_row(
738
+ "Last Response", str(self.orchestrator.runtime_health.last_response)
739
+ )
740
+ table.add_row(
741
+ "Response Time",
742
+ f"{self.orchestrator.runtime_health.response_time_ms:.2f}ms",
743
+ )
744
+ table.add_row("Error Count", str(self.orchestrator.runtime_health.error_count))
745
+ table.add_row(
746
+ "Restart Count", str(self.orchestrator.runtime_health.restart_count)
747
+ )
748
+
749
+ console.print(table)
750
+
751
+ async def cmd_think(self, args: str):
752
+ """Trigger System 2 thinking."""
753
+ if not args:
754
+ console.print("[red]Usage: think <problem>[/red]")
755
+ return
756
+
757
+ result = await self.orchestrator.think(args, {})
758
+
759
+ console.print(f"\n[bold cyan]Thinking Result:[/bold cyan]")
760
+ console.print(f"Decision: {result.decision}")
761
+ console.print(f"Confidence: {result.confidence:.2f}")
762
+ console.print(f"Reasoning: {', '.join(result.reasoning)}")
763
+ console.print(f"Risks: {', '.join(result.risks)}")
764
+ console.print(f"Next Steps: {', '.join(result.next_steps)}")
765
+
766
+ async def cmd_execute(self, args: str):
767
+ """Execute a task."""
768
+ if not args:
769
+ console.print("[red]Usage: execute <task>[/red]")
770
+ return
771
+
772
+ context = AgentContext(
773
+ task=args,
774
+ goal="Complete the specified task",
775
+ constraints=["Stay within resource limits", "Maintain data integrity"],
776
+ success_criteria=["Task output is valid", "No errors occurred"],
777
+ )
778
+
779
+ success = await self.orchestrator.execute_task(context)
780
+ if success:
781
+ console.print("[green]Task executed successfully[/green]")
782
+ else:
783
+ console.print("[red]Task execution failed[/red]")
784
+
785
+ async def cmd_checkpoint(self, args: str):
786
+ """Create a checkpoint."""
787
+ checkpoint = await self.orchestrator.create_checkpoint(args if args else None)
788
+ console.print(f"[green]Checkpoint created: {checkpoint.name}[/green]")
789
+
790
+ async def cmd_restore(self, args: str):
791
+ """Restore from checkpoint."""
792
+ if not args:
793
+ # Show available checkpoints
794
+ checkpoints = list(
795
+ self.orchestrator.checkpoint_dir.glob("checkpoint_*.json")
796
+ )
797
+ if checkpoints:
798
+ console.print("[cyan]Available checkpoints:[/cyan]")
799
+ for cp in checkpoints:
800
+ console.print(f" - {cp.name}")
801
+ else:
802
+ console.print("[yellow]No checkpoints available[/yellow]")
803
+ return
804
+
805
+ checkpoint_file = self.orchestrator.checkpoint_dir / args
806
+ if checkpoint_file.exists():
807
+ success = await self.orchestrator.restore_checkpoint(checkpoint_file)
808
+ if success:
809
+ console.print("[green]Checkpoint restored[/green]")
810
+ else:
811
+ console.print(f"[red]Checkpoint not found: {args}[/red]")
812
+
813
+ async def cmd_monitor(self, args: str):
814
+ """Start monitoring loop."""
815
+ console.print("[cyan]Starting monitor mode (Ctrl+C to stop)...[/cyan]")
816
+ try:
817
+ await self.orchestrator.monitor_loop()
818
+ except KeyboardInterrupt:
819
+ console.print("\n[yellow]Monitor stopped[/yellow]")
820
+
821
+ async def cmd_help(self, args: str):
822
+ """Show help."""
823
+ help_text = """
824
+ [bold cyan]Hanzo Dev - AI Chat Interface[/bold cyan]
825
+
826
+ [bold]Just chat naturally! Type anything and press Enter.[/bold]
827
+
828
+ Examples:
829
+ > Write a Python REST API
830
+ > Help me debug this error
831
+ > Explain how async/await works
832
+
833
+ [bold]Slash Commands:[/bold]
834
+ /help - Show this help
835
+ /status - Show agent status
836
+ /think <problem> - Trigger deep thinking
837
+ /execute <task> - Execute specific task
838
+ /checkpoint - Save current state
839
+ /restore - Restore from checkpoint
840
+ /monitor - Start monitoring
841
+ /exit - Exit chat
842
+
843
+ [bold]Memory Commands (like Claude Desktop):[/bold]
844
+ #remember <text> - Store in memory
845
+ #forget <text> - Remove from memory
846
+ #memory - Show memory
847
+ #context - Show context
848
+ """
849
+ console.print(help_text)
850
+
851
+ async def cmd_exit(self, args: str):
852
+ """Exit the REPL."""
853
+ self.orchestrator.shutdown()
854
+ console.print("[green]Goodbye![/green]")
855
+ sys.exit(0)
856
+
857
+ async def chat_with_agents(self, message: str):
858
+ """Send message to AI agents for natural chat."""
859
+ try:
860
+ # Show thinking indicator
861
+ console.print("[dim]Thinking...[/dim]")
862
+
863
+ # Check if we have a network orchestrator with actual AI
864
+ if hasattr(self.orchestrator, 'execute_with_network'):
865
+ # Use the network orchestrator (GPT-4, GPT-5, etc.)
866
+ result = await self.orchestrator.execute_with_network(
867
+ task=message,
868
+ context={"mode": "chat", "interactive": True}
869
+ )
870
+
871
+ if result.get("output"):
872
+ console.print(f"[cyan]AI:[/cyan] {result['output']}")
873
+ elif result.get("error"):
874
+ console.print(f"[red]Error:[/red] {result['error']}")
875
+ else:
876
+ console.print("[yellow]No response from agent[/yellow]")
877
+
878
+ elif hasattr(self.orchestrator, 'execute_with_critique'):
879
+ # Use multi-Claude orchestrator
880
+ result = await self.orchestrator.execute_with_critique(message)
881
+
882
+ if result.get("output"):
883
+ console.print(f"[cyan]AI:[/cyan] {result['output']}")
884
+ else:
885
+ console.print("[yellow]No response from agent[/yellow]")
886
+
887
+ else:
888
+ # Fallback to direct API call if available
889
+ await self._direct_api_chat(message)
890
+
891
+ except Exception as e:
892
+ console.print(f"[red]Error connecting to AI: {e}[/red]")
893
+ console.print("[yellow]Make sure you have API keys configured:[/yellow]")
894
+ console.print(" • OPENAI_API_KEY for GPT models")
895
+ console.print(" • ANTHROPIC_API_KEY for Claude")
896
+ console.print(" • Or use --orchestrator local:llama3.2 for local models")
897
+
898
+ async def _direct_api_chat(self, message: str):
899
+ """Direct API chat fallback when network orchestrator isn't available."""
900
+ import os
901
+
902
+ # Try OpenAI first
903
+ if os.getenv("OPENAI_API_KEY"):
904
+ try:
905
+ from openai import AsyncOpenAI
906
+
907
+ client = AsyncOpenAI()
908
+ response = await client.chat.completions.create(
909
+ model=self.orchestrator.orchestrator_model or "gpt-4",
910
+ messages=[
911
+ {"role": "system", "content": "You are a helpful AI coding assistant."},
912
+ {"role": "user", "content": message}
913
+ ],
914
+ temperature=0.7,
915
+ max_tokens=2000
916
+ )
917
+
918
+ if response.choices:
919
+ console.print(f"[cyan]AI:[/cyan] {response.choices[0].message.content}")
920
+ return
921
+
922
+ except Exception as e:
923
+ console.print(f"[yellow]OpenAI error: {e}[/yellow]")
924
+
925
+ # Try Anthropic
926
+ if os.getenv("ANTHROPIC_API_KEY"):
927
+ try:
928
+ from anthropic import AsyncAnthropic
929
+
930
+ client = AsyncAnthropic()
931
+ response = await client.messages.create(
932
+ model="claude-3-5-sonnet-20241022",
933
+ messages=[{"role": "user", "content": message}],
934
+ max_tokens=2000
935
+ )
936
+
937
+ if response.content:
938
+ console.print(f"[cyan]AI:[/cyan] {response.content[0].text}")
939
+ return
940
+
941
+ except Exception as e:
942
+ console.print(f"[yellow]Anthropic error: {e}[/yellow]")
943
+
944
+ # No API keys available
945
+ console.print("[red]No AI API keys configured![/red]")
946
+ console.print("Set one of these environment variables:")
947
+ console.print(" • export OPENAI_API_KEY=sk-...")
948
+ console.print(" • export ANTHROPIC_API_KEY=sk-ant-...")
949
+ console.print("Or use local models with: hanzo dev --orchestrator local:llama3.2")
950
+
951
+ async def handle_memory_command(self, command: str):
952
+ """Handle memory/context commands starting with #."""
953
+ parts = command.split(maxsplit=1)
954
+ cmd = parts[0].lower() if parts else ""
955
+ args = parts[1] if len(parts) > 1 else ""
956
+
957
+ if cmd == "remember":
958
+ if args:
959
+ console.print(f"[green]✓ Remembered: {args}[/green]")
960
+ else:
961
+ console.print("[yellow]Usage: #remember <text>[/yellow]")
962
+ elif cmd == "forget":
963
+ if args:
964
+ console.print(f"[yellow]✓ Forgot: {args}[/yellow]")
965
+ else:
966
+ console.print("[yellow]Usage: #forget <text>[/yellow]")
967
+ elif cmd == "memory":
968
+ console.print("[cyan]Current Memory:[/cyan]")
969
+ console.print(" • Working on Hanzo Python SDK")
970
+ console.print(" • Using GPT-4 orchestrator")
971
+ elif cmd == "context":
972
+ console.print("[cyan]Current Context:[/cyan]")
973
+ console.print(f" • Directory: {os.getcwd()}")
974
+ console.print(f" • Model: {self.orchestrator.orchestrator_model}")
975
+ else:
976
+ console.print(f"[yellow]Unknown: #{cmd}[/yellow]")
977
+ console.print("Try: #memory, #remember, #forget, #context")
978
+
979
+
980
+ async def run_dev_orchestrator(**kwargs):
981
+ """Run the Hanzo Dev orchestrator with multi-agent networking.
982
+
983
+ This is the main entry point from the CLI that sets up:
984
+ 1. Configurable orchestrator (GPT-5, GPT-4, Claude, Codex, etc.)
985
+ 2. Multiple worker agents (Claude instances for implementation)
986
+ 3. Critic agents for System 2 thinking
987
+ 4. MCP tool networking between instances
988
+ 5. Code quality guardrails
989
+ 6. Router-based or direct model access
990
+ """
991
+ workspace = kwargs.get("workspace", "~/.hanzo/dev")
992
+ orchestrator_model = kwargs.get("orchestrator_model", "gpt-5")
993
+ orchestrator_config = kwargs.get("orchestrator_config", None) # New config object
994
+ claude_path = kwargs.get("claude_path")
995
+ monitor = kwargs.get("monitor", False)
996
+ repl = kwargs.get("repl", True)
997
+ instances = kwargs.get("instances", 2)
998
+ mcp_tools = kwargs.get("mcp_tools", True)
999
+ network_mode = kwargs.get("network_mode", True)
1000
+ guardrails = kwargs.get("guardrails", True)
1001
+ use_network = kwargs.get("use_network", True) # Use hanzo-network if available
1002
+ use_hanzo_net = kwargs.get("use_hanzo_net", False) # Use hanzo/net for local AI
1003
+ hanzo_net_port = kwargs.get("hanzo_net_port", 52415)
1004
+ console_obj = kwargs.get("console", console)
1005
+
1006
+ console_obj.print(f"[bold cyan]Hanzo Dev - AI Coding OS[/bold cyan]")
1007
+
1008
+ # Check if we should use network mode
1009
+ # For now, disable network mode since hanzo-network isn't available
1010
+ if False and use_network and NETWORK_AVAILABLE:
1011
+ console_obj.print(
1012
+ f"[cyan]Mode: Network Orchestration with hanzo-network[/cyan]"
1013
+ )
1014
+ console_obj.print(f"Orchestrator: {orchestrator_model}")
1015
+ console_obj.print(f"Workers: {instances} agents")
1016
+ console_obj.print(f"Critics: {max(1, instances // 2)} agents")
1017
+ console_obj.print(f"MCP Tools: {'Enabled' if mcp_tools else 'Disabled'}")
1018
+ console_obj.print(f"Guardrails: {'Enabled' if guardrails else 'Disabled'}\n")
1019
+
1020
+ # Create network orchestrator with configurable LLM
1021
+ orchestrator = NetworkOrchestrator(
1022
+ workspace_dir=workspace,
1023
+ orchestrator_model=orchestrator_model,
1024
+ num_workers=instances,
1025
+ enable_mcp=mcp_tools,
1026
+ enable_networking=network_mode,
1027
+ enable_guardrails=guardrails,
1028
+ use_hanzo_net=use_hanzo_net,
1029
+ hanzo_net_port=hanzo_net_port,
1030
+ console=console_obj,
1031
+ )
1032
+
1033
+ # Initialize the network
1034
+ success = await orchestrator.initialize()
1035
+ if not success:
1036
+ console_obj.print("[red]Failed to initialize network[/red]")
1037
+ return
1038
+ else:
1039
+ # Fallback to multi-Claude mode
1040
+ console_obj.print(f"[cyan]Mode: Multi-Claude Orchestration (legacy)[/cyan]")
1041
+ console_obj.print(
1042
+ f"Instances: {instances} (1 primary + {instances-1} critic{'s' if instances > 2 else ''})"
1043
+ )
1044
+ console_obj.print(f"MCP Tools: {'Enabled' if mcp_tools else 'Disabled'}")
1045
+ console_obj.print(f"Networking: {'Enabled' if network_mode else 'Disabled'}")
1046
+ console_obj.print(f"Guardrails: {'Enabled' if guardrails else 'Disabled'}\n")
1047
+
1048
+ orchestrator = MultiClaudeOrchestrator(
1049
+ workspace_dir=workspace,
1050
+ claude_path=claude_path,
1051
+ num_instances=instances,
1052
+ enable_mcp=mcp_tools,
1053
+ enable_networking=network_mode,
1054
+ enable_guardrails=guardrails,
1055
+ console=console_obj,
1056
+ orchestrator_model=orchestrator_model,
1057
+ )
1058
+
1059
+ # Initialize instances
1060
+ await orchestrator.initialize()
1061
+
1062
+ if monitor:
1063
+ # Start monitoring mode
1064
+ await orchestrator.monitor_loop()
1065
+ elif repl:
1066
+ # Start REPL interface
1067
+ repl_interface = HanzoDevREPL(orchestrator)
1068
+ await repl_interface.run()
1069
+ else:
1070
+ # Run once
1071
+ await asyncio.sleep(10)
1072
+ orchestrator.shutdown()
1073
+
1074
+
1075
+ class NetworkOrchestrator(HanzoDevOrchestrator):
1076
+ """Advanced orchestrator using hanzo-network with configurable LLM (GPT-5, Claude, local, etc.)."""
1077
+
1078
+ def __init__(
1079
+ self,
1080
+ workspace_dir: str,
1081
+ orchestrator_model: str = "gpt-5",
1082
+ num_workers: int = 2,
1083
+ enable_mcp: bool = True,
1084
+ enable_networking: bool = True,
1085
+ enable_guardrails: bool = True,
1086
+ use_hanzo_net: bool = False,
1087
+ hanzo_net_port: int = 52415,
1088
+ console: Console = console,
1089
+ ):
1090
+ """Initialize network orchestrator with configurable LLM.
1091
+
1092
+ Args:
1093
+ workspace_dir: Workspace directory
1094
+ orchestrator_model: Model to use for orchestration (e.g., "gpt-5", "gpt-4", "claude-3-5-sonnet", "local:llama3.2")
1095
+ num_workers: Number of worker agents (Claude instances)
1096
+ enable_mcp: Enable MCP tools
1097
+ enable_networking: Enable agent networking
1098
+ enable_guardrails: Enable quality guardrails
1099
+ use_hanzo_net: Use hanzo/net for local orchestration
1100
+ hanzo_net_port: Port for hanzo/net (default 52415)
1101
+ console: Console for output
1102
+ """
1103
+ super().__init__(workspace_dir)
1104
+ self.orchestrator_model = orchestrator_model
1105
+ self.num_workers = num_workers
1106
+ self.enable_mcp = enable_mcp
1107
+ self.enable_networking = enable_networking
1108
+ self.enable_guardrails = enable_guardrails
1109
+ self.use_hanzo_net = use_hanzo_net
1110
+ self.hanzo_net_port = hanzo_net_port
1111
+ self.console = console
1112
+
1113
+ # Agent network components
1114
+ self.orchestrator_agent = None
1115
+ self.worker_agents = []
1116
+ self.critic_agents = []
1117
+ self.agent_network = None
1118
+ self.hanzo_net_process = None
1119
+
1120
+ # Check if we can use hanzo-network
1121
+ if not NETWORK_AVAILABLE:
1122
+ self.console.print(
1123
+ "[yellow]Warning: hanzo-network not available, falling back to basic mode[/yellow]"
1124
+ )
1125
+
1126
+ async def initialize(self):
1127
+ """Initialize the agent network with orchestrator and workers."""
1128
+ if not NETWORK_AVAILABLE:
1129
+ self.console.print(
1130
+ "[red]Cannot initialize network mode without hanzo-network[/red]"
1131
+ )
1132
+ return False
1133
+
1134
+ # Start hanzo net if requested for local orchestration
1135
+ if self.use_hanzo_net or self.orchestrator_model.startswith("local:"):
1136
+ await self._start_hanzo_net()
1137
+
1138
+ self.console.print(
1139
+ f"[cyan]Initializing agent network with {self.orchestrator_model} orchestrator...[/cyan]"
1140
+ )
1141
+
1142
+ # Create orchestrator agent (GPT-5, local, or other model)
1143
+ self.orchestrator_agent = await self._create_orchestrator_agent()
1144
+
1145
+ # Create worker agents (Claude instances for implementation)
1146
+ for i in range(self.num_workers):
1147
+ worker = await self._create_worker_agent(i)
1148
+ self.worker_agents.append(worker)
1149
+
1150
+ # Add local workers if using hanzo net (for cost optimization)
1151
+ if self.use_hanzo_net or self.orchestrator_model.startswith("local:"):
1152
+ # Add 1-2 local workers for simple tasks
1153
+ num_local_workers = min(2, self.num_workers)
1154
+ for i in range(num_local_workers):
1155
+ local_worker = await self._create_local_worker_agent(i)
1156
+ self.worker_agents.append(local_worker)
1157
+ self.console.print(
1158
+ f"[green]Added {num_local_workers} local workers for cost optimization[/green]"
1159
+ )
1160
+
1161
+ # Create critic agents for System 2 thinking
1162
+ if self.enable_guardrails:
1163
+ for i in range(max(1, self.num_workers // 2)):
1164
+ critic = await self._create_critic_agent(i)
1165
+ self.critic_agents.append(critic)
1166
+
1167
+ # Create the agent network
1168
+ all_agents = [self.orchestrator_agent] + self.worker_agents + self.critic_agents
1169
+
1170
+ # Create router based on configuration
1171
+ if self.use_hanzo_net or self.orchestrator_model.startswith("local:"):
1172
+ # Use cost-optimized router that prefers local models
1173
+ router = await self._create_cost_optimized_router()
1174
+ else:
1175
+ # Use intelligent router with orchestrator making decisions
1176
+ router = await self._create_intelligent_router()
1177
+
1178
+ # Create the network
1179
+ self.agent_network = create_network(
1180
+ agents=all_agents,
1181
+ router=router,
1182
+ default_agent=(
1183
+ self.orchestrator_agent.name if self.orchestrator_agent else None
1184
+ ),
1185
+ )
1186
+
1187
+ self.console.print(
1188
+ f"[green]✓ Agent network initialized with {len(all_agents)} agents[/green]"
1189
+ )
1190
+ return True
1191
+
1192
+ async def _start_hanzo_net(self):
1193
+ """Start hanzo net for local AI orchestration."""
1194
+ self.console.print(
1195
+ "[cyan]Starting hanzo/net for local AI orchestration...[/cyan]"
1196
+ )
1197
+
1198
+ # Check if hanzo net is already running
1199
+ import socket
1200
+
1201
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
1202
+ result = sock.connect_ex(("localhost", self.hanzo_net_port))
1203
+ sock.close()
1204
+
1205
+ if result == 0:
1206
+ self.console.print(
1207
+ f"[yellow]hanzo/net already running on port {self.hanzo_net_port}[/yellow]"
1208
+ )
1209
+ return
1210
+
1211
+ # Start hanzo net
1212
+ try:
1213
+ # Determine model to serve based on orchestrator model
1214
+ model = "llama-3.2-3b" # Default
1215
+ if ":" in self.orchestrator_model:
1216
+ model = self.orchestrator_model.split(":")[1]
1217
+
1218
+ cmd = [
1219
+ "hanzo",
1220
+ "net",
1221
+ "--port",
1222
+ str(self.hanzo_net_port),
1223
+ "--models",
1224
+ model,
1225
+ "--network",
1226
+ "local",
1227
+ ]
1228
+
1229
+ self.hanzo_net_process = subprocess.Popen(
1230
+ cmd,
1231
+ stdout=subprocess.PIPE,
1232
+ stderr=subprocess.PIPE,
1233
+ text=True,
1234
+ preexec_fn=os.setsid if hasattr(os, "setsid") else None,
1235
+ )
1236
+
1237
+ # Wait for it to start
1238
+ await asyncio.sleep(3)
1239
+
1240
+ if self.hanzo_net_process.poll() is None:
1241
+ self.console.print(
1242
+ f"[green]✓ hanzo/net started on port {self.hanzo_net_port} with model {model}[/green]"
1243
+ )
1244
+ else:
1245
+ self.console.print("[red]Failed to start hanzo/net[/red]")
1246
+
1247
+ except Exception as e:
1248
+ self.console.print(f"[red]Error starting hanzo/net: {e}[/red]")
1249
+
1250
+ async def _create_orchestrator_agent(self) -> Agent:
1251
+ """Create the orchestrator agent (GPT-5, local, or configured model)."""
1252
+ # Check if using local model via hanzo/net
1253
+ if self.orchestrator_model.startswith("local:"):
1254
+ # Use local model via hanzo/net
1255
+ model_name = self.orchestrator_model.split(":")[1]
1256
+
1257
+ # Import local network helpers
1258
+ from hanzo_network.local_network import create_local_agent
1259
+
1260
+ orchestrator = create_local_agent(
1261
+ name="local_orchestrator",
1262
+ description=f"Local {model_name} orchestrator via hanzo/net",
1263
+ system=self._get_orchestrator_system_prompt(),
1264
+ local_model=model_name,
1265
+ base_url=f"http://localhost:{self.hanzo_net_port}",
1266
+ tools=[],
1267
+ )
1268
+
1269
+ self.console.print(
1270
+ f"[green]✓ Created local {model_name} orchestrator via hanzo/net[/green]"
1271
+ )
1272
+ return orchestrator
1273
+
1274
+ # Parse model string to get provider and model
1275
+ model_name = self.orchestrator_model
1276
+ provider = "openai" # Default to OpenAI - use string
1277
+ api_key = None
1278
+
1279
+ # Determine provider from model name
1280
+ if model_name.startswith("gpt") or model_name == "codex":
1281
+ provider = "openai"
1282
+ api_key = os.getenv("OPENAI_API_KEY")
1283
+ elif model_name.startswith("claude"):
1284
+ provider = "anthropic"
1285
+ api_key = os.getenv("ANTHROPIC_API_KEY")
1286
+ elif model_name.startswith("gemini"):
1287
+ provider = "google"
1288
+ api_key = os.getenv("GOOGLE_API_KEY")
1289
+ elif model_name.startswith("local:"):
1290
+ provider = "local"
1291
+ model_name = model_name.replace("local:", "")
1292
+
1293
+ # Create model config based on what's available
1294
+ if NETWORK_AVAILABLE:
1295
+ # Real ModelConfig may have different signature
1296
+ try:
1297
+ model_config = ModelConfig(
1298
+ name=model_name,
1299
+ provider=provider,
1300
+ )
1301
+ # Set api_key separately if supported
1302
+ if hasattr(model_config, "api_key"):
1303
+ model_config.api_key = api_key
1304
+ except TypeError:
1305
+ # Fallback to simple string if ModelConfig doesn't work
1306
+ model_config = model_name
1307
+ else:
1308
+ # Use our fallback ModelConfig
1309
+ model_config = ModelConfig(
1310
+ name=model_name,
1311
+ provider=provider,
1312
+ api_key=api_key,
1313
+ )
1314
+
1315
+ # Create orchestrator with strategic system prompt
1316
+ orchestrator = create_agent(
1317
+ name="orchestrator",
1318
+ description=f"{self.orchestrator_model} powered meta-orchestrator for AI coding",
1319
+ model=model_config,
1320
+ system=self._get_orchestrator_system_prompt(),
1321
+ tools=[], # Orchestrator tools will be added
1322
+ )
1323
+
1324
+ self.console.print(
1325
+ f"[green]✓ Created {self.orchestrator_model} orchestrator[/green]"
1326
+ )
1327
+ return orchestrator
1328
+
1329
+ def _get_orchestrator_system_prompt(self) -> str:
1330
+ """Get the system prompt for the orchestrator."""
1331
+ return """You are an advanced AI orchestrator managing a network of specialized agents.
1332
+ Your responsibilities:
1333
+ 1. Strategic Planning: Break down complex tasks into manageable subtasks
1334
+ 2. Agent Coordination: Delegate work to appropriate specialist agents
1335
+ 3. Quality Control: Ensure code quality through critic agents
1336
+ 4. System 2 Thinking: Invoke deliberative reasoning for complex decisions
1337
+ 5. Resource Management: Optimize agent usage for cost and performance
1338
+
1339
+ Available agents:
1340
+ - Worker agents: Claude instances for code implementation and MCP tool usage
1341
+ - Critic agents: Review and improve code quality
1342
+ - Local agents: Fast, cost-effective for simple tasks
1343
+
1344
+ Decision framework:
1345
+ - Complex reasoning → Use your advanced capabilities
1346
+ - Code implementation → Delegate to worker agents
1347
+ - Quality review → Invoke critic agents
1348
+ - Simple tasks → Use local agents if available
1349
+
1350
+ Always maintain high code quality standards and prevent degradation."""
1351
+
1352
+ async def _create_worker_agent(self, index: int) -> Agent:
1353
+ """Create a worker agent (Claude for implementation)."""
1354
+ worker = create_agent(
1355
+ name=f"worker_{index}",
1356
+ description=f"Claude worker agent {index} for code implementation",
1357
+ model=(
1358
+ "claude-3-5-sonnet-20241022"
1359
+ if NETWORK_AVAILABLE
1360
+ else ModelConfig(
1361
+ provider="anthropic",
1362
+ name="claude-3-5-sonnet-20241022",
1363
+ api_key=os.getenv("ANTHROPIC_API_KEY"),
1364
+ )
1365
+ ),
1366
+ system="""You are a Claude worker agent specialized in code implementation.
1367
+
1368
+ Your capabilities:
1369
+ - Write and modify code
1370
+ - Use MCP tools for file operations
1371
+ - Execute commands and tests
1372
+ - Debug and fix issues
1373
+
1374
+ Follow best practices and maintain code quality.""",
1375
+ tools=[], # MCP tools will be added if enabled
1376
+ )
1377
+
1378
+ self.console.print(f" Created worker agent {index}")
1379
+ return worker
1380
+
1381
+ async def _create_local_worker_agent(self, index: int) -> Agent:
1382
+ """Create a local worker agent for simple tasks (cost optimization)."""
1383
+ from hanzo_network.local_network import create_local_agent
1384
+
1385
+ worker = create_local_agent(
1386
+ name=f"local_worker_{index}",
1387
+ description=f"Local worker agent {index} for simple tasks",
1388
+ system="""You are a local worker agent optimized for simple tasks.
1389
+
1390
+ Your capabilities:
1391
+ - Simple code transformations
1392
+ - Basic file operations
1393
+ - Quick validation checks
1394
+ - Pattern matching
1395
+
1396
+ You handle simple tasks to reduce API costs.""",
1397
+ local_model="llama-3.2-3b",
1398
+ base_url=f"http://localhost:{self.hanzo_net_port}",
1399
+ tools=[],
1400
+ )
1401
+
1402
+ self.console.print(f" Created local worker agent {index}")
1403
+ return worker
1404
+
1405
+ async def _create_critic_agent(self, index: int) -> Agent:
1406
+ """Create a critic agent for code review."""
1407
+ # Use a different model for critics for diversity
1408
+ critic_model = "gpt-4" if index % 2 == 0 else "claude-3-5-sonnet-20241022"
1409
+
1410
+ critic = create_agent(
1411
+ name=f"critic_{index}",
1412
+ description=f"Critic agent {index} for code quality assurance",
1413
+ model=critic_model, # Just pass the model name string
1414
+ system="""You are a critic agent focused on code quality and best practices.
1415
+
1416
+ Review code for:
1417
+ 1. Correctness and bug detection
1418
+ 2. Performance optimization opportunities
1419
+ 3. Security vulnerabilities
1420
+ 4. Maintainability and readability
1421
+ 5. Best practices and design patterns
1422
+
1423
+ Provide constructive feedback with specific improvement suggestions.""",
1424
+ tools=[],
1425
+ )
1426
+
1427
+ self.console.print(f" Created critic agent {index} ({critic_model})")
1428
+ return critic
1429
+
1430
+ async def _create_cost_optimized_router(self) -> Router:
1431
+ """Create a cost-optimized router that prefers local models."""
1432
+ from hanzo_network.core.router import Router
1433
+
1434
+ class CostOptimizedRouter(Router):
1435
+ """Router that minimizes costs by using local models when possible."""
1436
+
1437
+ def __init__(self, orchestrator_agent, worker_agents, critic_agents):
1438
+ super().__init__()
1439
+ self.orchestrator = orchestrator_agent
1440
+ self.workers = worker_agents
1441
+ self.critics = critic_agents
1442
+ self.local_workers = [w for w in worker_agents if "local" in w.name]
1443
+ self.api_workers = [w for w in worker_agents if "local" not in w.name]
1444
+
1445
+ async def route(self, prompt: str, state=None) -> str:
1446
+ """Route based on task complexity and cost optimization."""
1447
+ prompt_lower = prompt.lower()
1448
+
1449
+ # Simple tasks → Local workers
1450
+ simple_keywords = [
1451
+ "list",
1452
+ "check",
1453
+ "validate",
1454
+ "format",
1455
+ "rename",
1456
+ "count",
1457
+ "find",
1458
+ ]
1459
+ if (
1460
+ any(keyword in prompt_lower for keyword in simple_keywords)
1461
+ and self.local_workers
1462
+ ):
1463
+ return self.local_workers[0].name
1464
+
1465
+ # Complex implementation → API workers (Claude)
1466
+ complex_keywords = [
1467
+ "implement",
1468
+ "refactor",
1469
+ "debug",
1470
+ "optimize",
1471
+ "design",
1472
+ "architect",
1473
+ ]
1474
+ if (
1475
+ any(keyword in prompt_lower for keyword in complex_keywords)
1476
+ and self.api_workers
1477
+ ):
1478
+ return self.api_workers[0].name
1479
+
1480
+ # Review tasks → Critics
1481
+ review_keywords = [
1482
+ "review",
1483
+ "critique",
1484
+ "analyze",
1485
+ "improve",
1486
+ "validate code",
1487
+ ]
1488
+ if (
1489
+ any(keyword in prompt_lower for keyword in review_keywords)
1490
+ and self.critics
1491
+ ):
1492
+ return self.critics[0].name
1493
+
1494
+ # Strategic decisions → Orchestrator
1495
+ strategic_keywords = [
1496
+ "plan",
1497
+ "decide",
1498
+ "strategy",
1499
+ "coordinate",
1500
+ "organize",
1501
+ ]
1502
+ if any(keyword in prompt_lower for keyword in strategic_keywords):
1503
+ return self.orchestrator.name
1504
+
1505
+ # Default: Try local first, then API
1506
+ if self.local_workers:
1507
+ # For shorter prompts, try local first
1508
+ if len(prompt) < 500:
1509
+ return self.local_workers[0].name
1510
+
1511
+ # Fall back to API workers for complex tasks
1512
+ return (
1513
+ self.api_workers[0].name
1514
+ if self.api_workers
1515
+ else self.orchestrator.name
1516
+ )
1517
+
1518
+ # Create the cost-optimized router
1519
+ router = CostOptimizedRouter(
1520
+ self.orchestrator_agent, self.worker_agents, self.critic_agents
1521
+ )
1522
+
1523
+ self.console.print(
1524
+ "[green]✓ Created cost-optimized router (local models preferred)[/green]"
1525
+ )
1526
+ return router
1527
+
1528
+ async def _create_intelligent_router(self) -> Router:
1529
+ """Create an intelligent router using the orchestrator for decisions."""
1530
+ if self.orchestrator_agent:
1531
+ # Create routing agent that uses orchestrator for decisions
1532
+ router = create_routing_agent(
1533
+ name="router",
1534
+ description="Intelligent task router",
1535
+ agent=self.orchestrator_agent,
1536
+ system="""Route tasks to the most appropriate agent based on:
1537
+
1538
+ 1. Task complexity and requirements
1539
+ 2. Agent capabilities and specialization
1540
+ 3. Current workload and availability
1541
+ 4. Cost/performance optimization
1542
+
1543
+ Routing strategy:
1544
+ - Strategic decisions → Stay with orchestrator
1545
+ - Implementation tasks → Route to workers
1546
+ - Review tasks → Route to critics
1547
+ - Parallel work → Split across multiple agents
1548
+
1549
+ Return the name of the best agent for the task.""",
1550
+ )
1551
+ else:
1552
+ # Fallback to basic router
1553
+ router = create_router(
1554
+ agents=self.worker_agents + self.critic_agents,
1555
+ default=self.worker_agents[0].name if self.worker_agents else None,
1556
+ )
1557
+
1558
+ return router
1559
+
1560
+ async def execute_with_network(
1561
+ self, task: str, context: Optional[Dict] = None
1562
+ ) -> Dict:
1563
+ """Execute a task using the agent network.
1564
+
1565
+ Args:
1566
+ task: Task description
1567
+ context: Optional context
1568
+
1569
+ Returns:
1570
+ Execution result
1571
+ """
1572
+ if not self.agent_network:
1573
+ self.console.print("[red]Agent network not initialized[/red]")
1574
+ return {"error": "Network not initialized"}
1575
+
1576
+ self.console.print(f"[cyan]Executing task with agent network: {task}[/cyan]")
1577
+
1578
+ # Create network state
1579
+ state = NetworkState()
1580
+ state.add_message("user", task)
1581
+
1582
+ if context:
1583
+ state.metadata.update(context)
1584
+
1585
+ # Run the network
1586
+ try:
1587
+ result = await self.agent_network.run(prompt=task, state=state)
1588
+
1589
+ # If guardrails enabled, validate result
1590
+ if self.enable_guardrails and self.critic_agents:
1591
+ validated = await self._validate_with_critics(result, task)
1592
+ if validated.get("improvements"):
1593
+ self.console.print("[yellow]Applied critic improvements[/yellow]")
1594
+ return validated
1595
+
1596
+ return result
1597
+
1598
+ except Exception as e:
1599
+ self.console.print(f"[red]Network execution error: {e}[/red]")
1600
+ return {"error": str(e)}
1601
+
1602
+ async def _validate_with_critics(self, result: Dict, original_task: str) -> Dict:
1603
+ """Validate and potentially improve result using critic agents."""
1604
+ if not self.critic_agents:
1605
+ return result
1606
+
1607
+ # Get first critic to review
1608
+ critic = self.critic_agents[0]
1609
+
1610
+ review_prompt = f"""
1611
+ Review this solution:
1612
+
1613
+ Task: {original_task}
1614
+ Solution: {result.get('output', '')}
1615
+
1616
+ Provide specific improvements if needed.
1617
+ """
1618
+
1619
+ review = await critic.run(review_prompt)
1620
+
1621
+ # Check if improvements suggested
1622
+ if "improve" in str(review.get("output", "")).lower():
1623
+ result["improvements"] = review.get("output")
1624
+
1625
+ return result
1626
+
1627
+ def shutdown(self):
1628
+ """Shutdown the network orchestrator and hanzo net if running."""
1629
+ # Stop hanzo net if we started it
1630
+ if self.hanzo_net_process:
1631
+ try:
1632
+ self.console.print("[yellow]Stopping hanzo/net...[/yellow]")
1633
+ if hasattr(os, "killpg"):
1634
+ os.killpg(os.getpgid(self.hanzo_net_process.pid), signal.SIGTERM)
1635
+ else:
1636
+ self.hanzo_net_process.terminate()
1637
+ self.hanzo_net_process.wait(timeout=5)
1638
+ self.console.print("[green]✓ hanzo/net stopped[/green]")
1639
+ except Exception:
1640
+ try:
1641
+ self.hanzo_net_process.kill()
1642
+ except Exception:
1643
+ pass
1644
+
1645
+ # Call parent shutdown
1646
+ super().shutdown()
1647
+
1648
+
1649
+ class MultiClaudeOrchestrator(HanzoDevOrchestrator):
1650
+ """Extended orchestrator for multiple Claude instances with MCP networking."""
1651
+
1652
+ def __init__(
1653
+ self,
1654
+ workspace_dir: str,
1655
+ claude_path: str,
1656
+ num_instances: int,
1657
+ enable_mcp: bool,
1658
+ enable_networking: bool,
1659
+ enable_guardrails: bool,
1660
+ console: Console,
1661
+ orchestrator_model: str = "gpt-4",
1662
+ ):
1663
+ super().__init__(workspace_dir, claude_path)
1664
+ self.num_instances = num_instances
1665
+ self.enable_mcp = enable_mcp
1666
+ self.enable_networking = enable_networking
1667
+ self.enable_guardrails = enable_guardrails
1668
+ self.console = console
1669
+ self.orchestrator_model = orchestrator_model # Add this for chat interface
1670
+
1671
+ # Store multiple Claude instances
1672
+ self.claude_instances = []
1673
+ self.instance_configs = []
1674
+
1675
+ async def initialize(self):
1676
+ """Initialize all Claude instances with MCP networking."""
1677
+ self.console.print("[cyan]Initializing Claude instances...[/cyan]")
1678
+
1679
+ for i in range(self.num_instances):
1680
+ role = "primary" if i == 0 else f"critic_{i}"
1681
+ config = await self._create_instance_config(i, role)
1682
+ self.instance_configs.append(config)
1683
+
1684
+ self.console.print(
1685
+ f" [{i+1}/{self.num_instances}] {role} instance configured"
1686
+ )
1687
+
1688
+ # If networking enabled, configure MCP connections between instances
1689
+ if self.enable_networking:
1690
+ await self._setup_mcp_networking()
1691
+
1692
+ # Start all instances
1693
+ for i, config in enumerate(self.instance_configs):
1694
+ success = await self._start_claude_instance(i, config)
1695
+ if success:
1696
+ self.console.print(f"[green]✓ Instance {i} started[/green]")
1697
+ else:
1698
+ self.console.print(f"[red]✗ Failed to start instance {i}[/red]")
1699
+
1700
+ async def _create_instance_config(self, index: int, role: str) -> Dict:
1701
+ """Create configuration for a Claude instance."""
1702
+ base_port = 8000
1703
+ mcp_port = 9000
1704
+
1705
+ config = {
1706
+ "index": index,
1707
+ "role": role,
1708
+ "workspace": self.workspace_dir / f"instance_{index}",
1709
+ "port": base_port + index,
1710
+ "mcp_port": mcp_port + index,
1711
+ "mcp_config": {},
1712
+ "env": {},
1713
+ }
1714
+
1715
+ # Create workspace directory
1716
+ config["workspace"].mkdir(parents=True, exist_ok=True)
1717
+
1718
+ # Configure MCP tools if enabled
1719
+ if self.enable_mcp:
1720
+ config["mcp_config"] = await self._create_mcp_config(index, role)
1721
+
1722
+ return config
1723
+
1724
+ async def _create_mcp_config(self, index: int, role: str) -> Dict:
1725
+ """Create MCP configuration for an instance."""
1726
+ mcp_config = {
1727
+ "mcpServers": {
1728
+ "hanzo-mcp": {
1729
+ "command": "python",
1730
+ "args": ["-m", "hanzo_mcp"],
1731
+ "env": {"INSTANCE_ID": str(index), "INSTANCE_ROLE": role},
1732
+ }
1733
+ }
1734
+ }
1735
+
1736
+ # Add file system tools
1737
+ mcp_config["mcpServers"]["filesystem"] = {
1738
+ "command": "npx",
1739
+ "args": ["-y", "@modelcontextprotocol/server-filesystem"],
1740
+ "env": {"ALLOWED_DIRECTORIES": str(self.workspace_dir)},
1741
+ }
1742
+
1743
+ return mcp_config
1744
+
1745
+ async def _setup_mcp_networking(self):
1746
+ """Set up MCP networking between Claude instances."""
1747
+ self.console.print(
1748
+ "[cyan]Setting up MCP networking between instances...[/cyan]"
1749
+ )
1750
+
1751
+ # Each instance gets MCP servers for all other instances
1752
+ for i, config in enumerate(self.instance_configs):
1753
+ for j, other_config in enumerate(self.instance_configs):
1754
+ if i != j:
1755
+ # Add other instance as MCP server
1756
+ server_name = f"claude_instance_{j}"
1757
+ config["mcp_config"]["mcpServers"][server_name] = {
1758
+ "command": "python",
1759
+ "args": [
1760
+ "-m",
1761
+ "hanzo_mcp.bridge",
1762
+ "--target-port",
1763
+ str(other_config["port"]),
1764
+ "--instance-id",
1765
+ str(j),
1766
+ "--role",
1767
+ other_config["role"],
1768
+ ],
1769
+ "env": {"SOURCE_INSTANCE": str(i), "TARGET_INSTANCE": str(j)},
1770
+ }
1771
+
1772
+ # Save MCP config
1773
+ mcp_config_file = config["workspace"] / "mcp_config.json"
1774
+ with open(mcp_config_file, "w") as f:
1775
+ json.dump(config["mcp_config"], f, indent=2)
1776
+
1777
+ config["env"]["MCP_CONFIG_PATH"] = str(mcp_config_file)
1778
+
1779
+ async def _start_claude_instance(self, index: int, config: Dict) -> bool:
1780
+ """Start a single Claude instance."""
1781
+ try:
1782
+ cmd = [self.claude_code_path or "claude"]
1783
+
1784
+ # Add configuration flags
1785
+ if config.get("env", {}).get("MCP_CONFIG_PATH"):
1786
+ cmd.extend(["--mcp-config", config["env"]["MCP_CONFIG_PATH"]])
1787
+
1788
+ # Set up environment
1789
+ env = os.environ.copy()
1790
+ env.update(config.get("env", {}))
1791
+
1792
+ # Start process
1793
+ process = subprocess.Popen(
1794
+ cmd,
1795
+ stdout=subprocess.PIPE,
1796
+ stderr=subprocess.PIPE,
1797
+ stdin=subprocess.PIPE,
1798
+ env=env,
1799
+ cwd=str(config["workspace"]),
1800
+ preexec_fn=os.setsid if hasattr(os, "setsid") else None,
1801
+ )
1802
+
1803
+ self.claude_instances.append(
1804
+ {
1805
+ "index": index,
1806
+ "role": config["role"],
1807
+ "process": process,
1808
+ "config": config,
1809
+ "health": RuntimeHealth(
1810
+ state=RuntimeState.RUNNING,
1811
+ last_response=datetime.now(),
1812
+ response_time_ms=0,
1813
+ memory_usage_mb=0,
1814
+ cpu_percent=0,
1815
+ error_count=0,
1816
+ restart_count=0,
1817
+ ),
1818
+ }
1819
+ )
1820
+
1821
+ return True
1822
+
1823
+ except Exception as e:
1824
+ logger.error(f"Failed to start instance {index}: {e}")
1825
+ return False
1826
+
1827
+ async def execute_with_critique(self, task: str) -> Dict:
1828
+ """Execute a task with System 2 critique.
1829
+
1830
+ 1. Primary instance executes the task
1831
+ 2. Critic instance(s) review and suggest improvements
1832
+ 3. Primary incorporates feedback if confidence is high
1833
+ """
1834
+ self.console.print(f"[cyan]Executing with System 2 thinking: {task}[/cyan]")
1835
+
1836
+ # Step 1: Primary execution
1837
+ primary = self.claude_instances[0]
1838
+ result = await self._send_to_instance(primary, task)
1839
+
1840
+ if self.num_instances < 2:
1841
+ return result
1842
+
1843
+ # Step 2: Critic review
1844
+ critiques = []
1845
+ for critic in self.claude_instances[1:]:
1846
+ critique_prompt = f"""
1847
+ Review this code/solution and provide constructive criticism:
1848
+
1849
+ Task: {task}
1850
+ Solution: {result.get('output', '')}
1851
+
1852
+ Evaluate for:
1853
+ 1. Correctness
1854
+ 2. Performance
1855
+ 3. Security
1856
+ 4. Maintainability
1857
+ 5. Best practices
1858
+
1859
+ Suggest specific improvements.
1860
+ """
1861
+
1862
+ critique = await self._send_to_instance(critic, critique_prompt)
1863
+ critiques.append(critique)
1864
+
1865
+ # Step 3: Incorporate feedback if valuable
1866
+ if critiques and self.enable_guardrails:
1867
+ improvement_prompt = f"""
1868
+ Original task: {task}
1869
+ Original solution: {result.get('output', '')}
1870
+
1871
+ Critiques received:
1872
+ {json.dumps(critiques, indent=2)}
1873
+
1874
+ Incorporate the valid suggestions and produce an improved solution.
1875
+ """
1876
+
1877
+ improved = await self._send_to_instance(primary, improvement_prompt)
1878
+
1879
+ # Validate improvement didn't degrade quality
1880
+ if await self._validate_improvement(result, improved):
1881
+ self.console.print(
1882
+ "[green]✓ Solution improved with System 2 feedback[/green]"
1883
+ )
1884
+ return improved
1885
+ else:
1886
+ self.console.print(
1887
+ "[yellow]⚠ Keeping original solution (improvement validation failed)[/yellow]"
1888
+ )
1889
+
1890
+ return result
1891
+
1892
+ async def _send_to_instance(self, instance: Dict, prompt: str) -> Dict:
1893
+ """Send a prompt to a specific Claude instance."""
1894
+ # This would use the actual Claude API or IPC mechanism
1895
+ # For now, it's a placeholder
1896
+ return {
1897
+ "output": f"Response from {instance['role']}: Processed '{prompt[:50]}...'",
1898
+ "success": True,
1899
+ }
1900
+
1901
+ async def _validate_improvement(self, original: Dict, improved: Dict) -> bool:
1902
+ """Validate that an improvement doesn't degrade quality."""
1903
+ if not self.enable_guardrails:
1904
+ return True
1905
+
1906
+ # Placeholder for actual validation logic
1907
+ # Would check: tests still pass, no new errors, performance not degraded, etc.
1908
+ return True
1909
+
1910
+ def shutdown(self):
1911
+ """Shutdown all Claude instances."""
1912
+ self.console.print("[yellow]Shutting down all instances...[/yellow]")
1913
+
1914
+ for instance in self.claude_instances:
1915
+ try:
1916
+ process = instance["process"]
1917
+ if hasattr(os, "killpg"):
1918
+ os.killpg(os.getpgid(process.pid), signal.SIGTERM)
1919
+ else:
1920
+ process.terminate()
1921
+ process.wait(timeout=5)
1922
+ except Exception:
1923
+ try:
1924
+ instance["process"].kill()
1925
+ except Exception:
1926
+ pass
1927
+
1928
+ self.console.print("[green]✓ All instances shut down[/green]")
1929
+
1930
+
1931
+ async def main():
1932
+ """Main entry point for hanzo-dev."""
1933
+ import argparse
1934
+
1935
+ parser = argparse.ArgumentParser(
1936
+ description="Hanzo Dev - System 2 Meta-AI Orchestrator"
1937
+ )
1938
+ parser.add_argument(
1939
+ "--workspace", default="~/.hanzo/dev", help="Workspace directory"
1940
+ )
1941
+ parser.add_argument("--claude-path", help="Path to Claude Code executable")
1942
+ parser.add_argument("--monitor", action="store_true", help="Start in monitor mode")
1943
+ parser.add_argument("--repl", action="store_true", help="Start REPL interface")
1944
+ parser.add_argument(
1945
+ "--instances", type=int, default=2, help="Number of Claude instances"
1946
+ )
1947
+ parser.add_argument("--no-mcp", action="store_true", help="Disable MCP tools")
1948
+ parser.add_argument(
1949
+ "--no-network", action="store_true", help="Disable instance networking"
1950
+ )
1951
+ parser.add_argument(
1952
+ "--no-guardrails", action="store_true", help="Disable guardrails"
1953
+ )
1954
+
1955
+ args = parser.parse_args()
1956
+
1957
+ await run_dev_orchestrator(
1958
+ workspace=args.workspace,
1959
+ claude_path=args.claude_path,
1960
+ monitor=args.monitor,
1961
+ repl=args.repl or not args.monitor,
1962
+ instances=args.instances,
1963
+ mcp_tools=not args.no_mcp,
1964
+ network_mode=not args.no_network,
1965
+ guardrails=not args.no_guardrails,
1966
+ )
1967
+
1968
+
1969
+ if __name__ == "__main__":
1970
+ asyncio.run(main())