hanzo-mcp 0.8.1__py3-none-any.whl → 0.8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hanzo-mcp might be problematic. Click here for more details.

hanzo_mcp/__init__.py CHANGED
@@ -1,11 +1,24 @@
1
1
  """Hanzo AI - Implementation of Hanzo capabilities using MCP."""
2
2
 
3
+ # Polyfill typing.override for Python < 3.12
4
+ try: # pragma: no cover
5
+ from typing import override as _override # type: ignore
6
+ except Exception: # pragma: no cover
7
+ import typing as _typing
8
+
9
+ def override(obj): # type: ignore
10
+ return obj
11
+
12
+ _typing.override = override # type: ignore[attr-defined]
13
+
3
14
  # Configure FastMCP logging globally for stdio transport
4
15
  import os
5
16
  import warnings
6
17
 
7
18
  # Suppress litellm deprecation warnings about event loop
8
- warnings.filterwarnings("ignore", message="There is no current event loop", category=DeprecationWarning)
19
+ warnings.filterwarnings(
20
+ "ignore", message="There is no current event loop", category=DeprecationWarning
21
+ )
9
22
 
10
23
  if os.environ.get("HANZO_MCP_TRANSPORT") == "stdio":
11
24
  try:
hanzo_mcp/bridge.py ADDED
@@ -0,0 +1,484 @@
1
+ """MCP Bridge for inter-Claude communication.
2
+
3
+ This module provides MCP server functionality that allows Claude instances
4
+ to communicate with each other, enabling peer-to-peer agent networks.
5
+ """
6
+
7
+ import os
8
+ import sys
9
+ import json
10
+ import asyncio
11
+ import logging
12
+ import argparse
13
+ from typing import Any, Dict, List, Optional
14
+ from dataclasses import asdict, dataclass
15
+
16
+ import mcp.server.fastmcp as mcp
17
+ from mcp import tool
18
+ from mcp.types import INTERNAL_ERROR, Tool, TextContent
19
+ from mcp.server.stdio import stdio_server
20
+ from mcp.server.models import InitializationOptions
21
+ from mcp.server.fastmcp import FastMCP
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ @dataclass
27
+ class BridgeConfig:
28
+ """Configuration for MCP bridge."""
29
+
30
+ target_port: int
31
+ instance_id: int
32
+ role: str
33
+ source_instance: Optional[int] = None
34
+ target_instance: Optional[int] = None
35
+
36
+
37
+ class ClaudeBridge(FastMCP):
38
+ """MCP Bridge server for Claude-to-Claude communication."""
39
+
40
+ def __init__(self, config: BridgeConfig):
41
+ """Initialize the bridge.
42
+
43
+ Args:
44
+ config: Bridge configuration
45
+ """
46
+ # Set server name based on target instance
47
+ super().__init__(f"claude_instance_{config.instance_id}")
48
+
49
+ self.config = config
50
+ self.conversation_history: List[Dict[str, Any]] = []
51
+ self.shared_context: Dict[str, Any] = {}
52
+
53
+ # Register tools
54
+ self._register_tools()
55
+
56
+ def _register_tools(self):
57
+ """Register MCP tools for inter-Claude communication."""
58
+
59
+ @self.tool()
60
+ async def chat_with_claude(message: str, context: Optional[str] = None) -> str:
61
+ """Chat with another Claude instance.
62
+
63
+ Args:
64
+ message: Message to send to the other Claude
65
+ context: Optional context to provide
66
+
67
+ Returns:
68
+ Response from the other Claude instance
69
+ """
70
+ logger.info(f"Bridge {self.config.instance_id}: Received chat request")
71
+
72
+ # Record in conversation history
73
+ self.conversation_history.append(
74
+ {
75
+ "from": self.config.source_instance,
76
+ "to": self.config.target_instance,
77
+ "message": message,
78
+ "context": context,
79
+ }
80
+ )
81
+
82
+ # Simulate response (in production, this would make actual API call)
83
+ response = await self._forward_to_claude(message, context)
84
+
85
+ self.conversation_history.append(
86
+ {
87
+ "from": self.config.target_instance,
88
+ "to": self.config.source_instance,
89
+ "response": response,
90
+ }
91
+ )
92
+
93
+ return response
94
+
95
+ @self.tool()
96
+ async def ask_claude_to_review(
97
+ code: str, description: str, focus_areas: Optional[List[str]] = None
98
+ ) -> Dict[str, Any]:
99
+ """Ask another Claude to review code.
100
+
101
+ Args:
102
+ code: Code to review
103
+ description: Description of what the code does
104
+ focus_areas: Specific areas to focus on (e.g., ["security", "performance"])
105
+
106
+ Returns:
107
+ Review feedback from the other Claude
108
+ """
109
+ logger.info(f"Bridge {self.config.instance_id}: Code review request")
110
+
111
+ review_prompt = self._build_review_prompt(code, description, focus_areas)
112
+ review = await self._forward_to_claude(review_prompt)
113
+
114
+ return {
115
+ "reviewer": f"claude_{self.config.instance_id}",
116
+ "role": self.config.role,
117
+ "feedback": review,
118
+ "focus_areas": focus_areas or ["general"],
119
+ }
120
+
121
+ @self.tool()
122
+ async def delegate_to_claude(
123
+ task: str, requirements: List[str], constraints: Optional[List[str]] = None
124
+ ) -> Dict[str, Any]:
125
+ """Delegate a task to another Claude instance.
126
+
127
+ Args:
128
+ task: Task description
129
+ requirements: List of requirements
130
+ constraints: Optional constraints
131
+
132
+ Returns:
133
+ Task completion result from the other Claude
134
+ """
135
+ logger.info(f"Bridge {self.config.instance_id}: Task delegation")
136
+
137
+ delegation_prompt = self._build_delegation_prompt(
138
+ task, requirements, constraints
139
+ )
140
+ result = await self._forward_to_claude(delegation_prompt)
141
+
142
+ return {
143
+ "delegated_to": f"claude_{self.config.instance_id}",
144
+ "role": self.config.role,
145
+ "task": task,
146
+ "result": result,
147
+ "status": "completed",
148
+ }
149
+
150
+ @self.tool()
151
+ async def get_claude_opinion(
152
+ question: str,
153
+ options: Optional[List[str]] = None,
154
+ criteria: Optional[List[str]] = None,
155
+ ) -> Dict[str, Any]:
156
+ """Get another Claude's opinion on a decision.
157
+
158
+ Args:
159
+ question: The question or decision to get opinion on
160
+ options: Optional list of options to choose from
161
+ criteria: Optional evaluation criteria
162
+
163
+ Returns:
164
+ Opinion and reasoning from the other Claude
165
+ """
166
+ logger.info(f"Bridge {self.config.instance_id}: Opinion request")
167
+
168
+ opinion_prompt = self._build_opinion_prompt(question, options, criteria)
169
+ opinion = await self._forward_to_claude(opinion_prompt)
170
+
171
+ return {
172
+ "advisor": f"claude_{self.config.instance_id}",
173
+ "role": self.config.role,
174
+ "question": question,
175
+ "opinion": opinion,
176
+ "options_considered": options,
177
+ "criteria_used": criteria,
178
+ }
179
+
180
+ @self.tool()
181
+ async def share_context_with_claude(
182
+ key: str, value: Any, description: Optional[str] = None
183
+ ) -> bool:
184
+ """Share context with another Claude instance.
185
+
186
+ Args:
187
+ key: Context key
188
+ value: Context value
189
+ description: Optional description of the context
190
+
191
+ Returns:
192
+ Success status
193
+ """
194
+ logger.info(f"Bridge {self.config.instance_id}: Sharing context '{key}'")
195
+
196
+ self.shared_context[key] = {
197
+ "value": value,
198
+ "description": description,
199
+ "shared_by": self.config.source_instance,
200
+ "shared_with": self.config.target_instance,
201
+ }
202
+
203
+ return True
204
+
205
+ @self.tool()
206
+ async def get_shared_context(key: Optional[str] = None) -> Dict[str, Any]:
207
+ """Get shared context from Claude network.
208
+
209
+ Args:
210
+ key: Optional specific key to retrieve
211
+
212
+ Returns:
213
+ Shared context data
214
+ """
215
+ if key:
216
+ return self.shared_context.get(key, {})
217
+ return self.shared_context
218
+
219
+ @self.tool()
220
+ async def brainstorm_with_claude(
221
+ topic: str, num_ideas: int = 5, constraints: Optional[List[str]] = None
222
+ ) -> List[str]:
223
+ """Brainstorm ideas with another Claude.
224
+
225
+ Args:
226
+ topic: Topic to brainstorm about
227
+ num_ideas: Number of ideas to generate
228
+ constraints: Optional constraints
229
+
230
+ Returns:
231
+ List of brainstormed ideas
232
+ """
233
+ logger.info(f"Bridge {self.config.instance_id}: Brainstorming request")
234
+
235
+ brainstorm_prompt = f"""
236
+ Please brainstorm {num_ideas} ideas about: {topic}
237
+
238
+ {"Constraints: " + ", ".join(constraints) if constraints else ""}
239
+
240
+ Provide creative and practical ideas.
241
+ """
242
+
243
+ response = await self._forward_to_claude(brainstorm_prompt)
244
+
245
+ # Parse response into list (simplified)
246
+ ideas = response.split("\n")
247
+ ideas = [idea.strip() for idea in ideas if idea.strip()]
248
+
249
+ return ideas[:num_ideas]
250
+
251
+ @self.tool()
252
+ async def get_claude_status() -> Dict[str, Any]:
253
+ """Get status of the connected Claude instance.
254
+
255
+ Returns:
256
+ Status information
257
+ """
258
+ return {
259
+ "instance_id": self.config.instance_id,
260
+ "role": self.config.role,
261
+ "status": "available",
262
+ "conversation_count": len(self.conversation_history),
263
+ "shared_context_keys": list(self.shared_context.keys()),
264
+ }
265
+
266
+ def _build_review_prompt(
267
+ self, code: str, description: str, focus_areas: Optional[List[str]]
268
+ ) -> str:
269
+ """Build a code review prompt."""
270
+ prompt = f"""
271
+ Please review the following code:
272
+
273
+ Description: {description}
274
+
275
+ Code:
276
+ ```
277
+ {code}
278
+ ```
279
+ """
280
+
281
+ if focus_areas:
282
+ prompt += f"\n\nPlease focus particularly on: {', '.join(focus_areas)}"
283
+
284
+ prompt += """
285
+
286
+ Provide constructive feedback on:
287
+ 1. Potential bugs or issues
288
+ 2. Code quality and best practices
289
+ 3. Performance considerations
290
+ 4. Security concerns
291
+ 5. Suggestions for improvement
292
+ """
293
+
294
+ return prompt
295
+
296
+ def _build_delegation_prompt(
297
+ self, task: str, requirements: List[str], constraints: Optional[List[str]]
298
+ ) -> str:
299
+ """Build a task delegation prompt."""
300
+ prompt = f"""
301
+ Please complete the following task:
302
+
303
+ Task: {task}
304
+
305
+ Requirements:
306
+ {chr(10).join(f"- {req}" for req in requirements)}
307
+ """
308
+
309
+ if constraints:
310
+ prompt += f"""
311
+
312
+ Constraints:
313
+ {chr(10).join(f"- {con}" for con in constraints)}
314
+ """
315
+
316
+ prompt += """
317
+
318
+ Provide a complete solution that meets all requirements.
319
+ """
320
+
321
+ return prompt
322
+
323
+ def _build_opinion_prompt(
324
+ self, question: str, options: Optional[List[str]], criteria: Optional[List[str]]
325
+ ) -> str:
326
+ """Build an opinion request prompt."""
327
+ prompt = f"""
328
+ I need your opinion on the following:
329
+
330
+ Question: {question}
331
+ """
332
+
333
+ if options:
334
+ prompt += f"""
335
+
336
+ Options to consider:
337
+ {chr(10).join(f"{i+1}. {opt}" for i, opt in enumerate(options))}
338
+ """
339
+
340
+ if criteria:
341
+ prompt += f"""
342
+
343
+ Please evaluate based on these criteria:
344
+ {chr(10).join(f"- {crit}" for crit in criteria)}
345
+ """
346
+
347
+ prompt += """
348
+
349
+ Provide your recommendation with clear reasoning.
350
+ """
351
+
352
+ return prompt
353
+
354
+ async def _forward_to_claude(
355
+ self, prompt: str, context: Optional[str] = None
356
+ ) -> str:
357
+ """Forward a request to the target Claude instance.
358
+
359
+ In production, this would make an actual API call to the Claude instance.
360
+ For now, it returns a simulated response.
361
+ """
362
+ # Add context if provided
363
+ full_prompt = prompt
364
+ if context:
365
+ full_prompt = f"Context: {context}\n\n{prompt}"
366
+
367
+ # Log the forwarding
368
+ logger.info(
369
+ f"Forwarding from instance {self.config.source_instance} to {self.config.target_instance}"
370
+ )
371
+ logger.debug(f"Prompt: {full_prompt[:200]}...")
372
+
373
+ # In production, this would:
374
+ # 1. Connect to the target Claude instance API
375
+ # 2. Send the prompt
376
+ # 3. Receive and return the response
377
+
378
+ # Simulated response based on role
379
+ if self.config.role.startswith("critic"):
380
+ return f"""
381
+ As {self.config.role}, I've analyzed your request:
382
+
383
+ Strengths:
384
+ - The approach is logical and well-structured
385
+ - Good attention to requirements
386
+
387
+ Areas for improvement:
388
+ - Consider edge cases more thoroughly
389
+ - Add more comprehensive error handling
390
+ - Optimize for performance in high-load scenarios
391
+
392
+ Recommendation: Proceed with suggested improvements.
393
+ """
394
+ else:
395
+ return f"""
396
+ Response from {self.config.role} (instance {self.config.instance_id}):
397
+
398
+ I've processed your request: "{prompt[:100]}..."
399
+
400
+ The task has been completed successfully with the following approach:
401
+ 1. Analyzed the requirements
402
+ 2. Implemented the solution
403
+ 3. Validated the results
404
+
405
+ The solution meets all specified criteria.
406
+ """
407
+
408
+
409
+ async def run_bridge_server(config: BridgeConfig):
410
+ """Run the MCP bridge server.
411
+
412
+ Args:
413
+ config: Bridge configuration
414
+ """
415
+ # Configure logging
416
+ logging.basicConfig(
417
+ level=logging.INFO,
418
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
419
+ )
420
+
421
+ logger.info(f"Starting MCP Bridge for Claude instance {config.instance_id}")
422
+ logger.info(f"Role: {config.role}")
423
+ logger.info(f"Target port: {config.target_port}")
424
+
425
+ # Create and run the bridge
426
+ bridge = ClaudeBridge(config)
427
+
428
+ # Run the stdio server
429
+ async with stdio_server() as (read_stream, write_stream):
430
+ await bridge.run(
431
+ read_stream=read_stream,
432
+ write_stream=write_stream,
433
+ initialization_options=InitializationOptions(
434
+ server_name=bridge.name,
435
+ server_version="1.0.0",
436
+ capabilities=bridge.get_capabilities(),
437
+ ),
438
+ )
439
+
440
+
441
+ def main():
442
+ """Main entry point for the bridge."""
443
+ parser = argparse.ArgumentParser(
444
+ description="MCP Bridge for Claude-to-Claude communication"
445
+ )
446
+ parser.add_argument(
447
+ "--target-port",
448
+ type=int,
449
+ required=True,
450
+ help="Port of the target Claude instance",
451
+ )
452
+ parser.add_argument(
453
+ "--instance-id",
454
+ type=int,
455
+ required=True,
456
+ help="ID of the target Claude instance",
457
+ )
458
+ parser.add_argument(
459
+ "--role",
460
+ type=str,
461
+ required=True,
462
+ help="Role of the target instance (primary, critic_1, etc.)",
463
+ )
464
+
465
+ args = parser.parse_args()
466
+
467
+ # Get source/target from environment
468
+ source_instance = int(os.environ.get("SOURCE_INSTANCE", "0"))
469
+ target_instance = int(os.environ.get("TARGET_INSTANCE", args.instance_id))
470
+
471
+ config = BridgeConfig(
472
+ target_port=args.target_port,
473
+ instance_id=args.instance_id,
474
+ role=args.role,
475
+ source_instance=source_instance,
476
+ target_instance=target_instance,
477
+ )
478
+
479
+ # Run the bridge
480
+ asyncio.run(run_bridge_server(config))
481
+
482
+
483
+ if __name__ == "__main__":
484
+ main()
@@ -0,0 +1,192 @@
1
+ """Compute node detection and management for distributed processing."""
2
+
3
+ import os
4
+ import platform
5
+ import subprocess
6
+ from typing import Any, Dict, List
7
+
8
+
9
+ class ComputeNodeDetector:
10
+ """Detect available compute nodes (GPUs, WebGPU, CPUs) for distributed work."""
11
+
12
+ @staticmethod
13
+ def detect_local_gpus() -> List[Dict[str, Any]]:
14
+ """Detect local GPU devices."""
15
+ gpus = []
16
+
17
+ # Try NVIDIA GPUs
18
+ try:
19
+ result = subprocess.run(
20
+ [
21
+ "nvidia-smi",
22
+ "--query-gpu=name,memory.total",
23
+ "--format=csv,noheader",
24
+ ],
25
+ capture_output=True,
26
+ text=True,
27
+ timeout=2,
28
+ )
29
+ if result.returncode == 0:
30
+ for line in result.stdout.strip().split("\n"):
31
+ if line:
32
+ name, memory = line.split(", ")
33
+ gpus.append(
34
+ {
35
+ "type": "cuda",
36
+ "name": name,
37
+ "memory": memory,
38
+ "id": f"cuda:{len(gpus)}",
39
+ }
40
+ )
41
+ except (FileNotFoundError, subprocess.TimeoutExpired):
42
+ pass
43
+
44
+ # Try Metal GPUs (macOS)
45
+ if platform.system() == "Darwin":
46
+ try:
47
+ # Check for Metal support
48
+ result = subprocess.run(
49
+ ["system_profiler", "SPDisplaysDataType"],
50
+ capture_output=True,
51
+ text=True,
52
+ timeout=2,
53
+ )
54
+ if result.returncode == 0 and "Metal" in result.stdout:
55
+ # Parse GPU info from system_profiler
56
+ lines = result.stdout.split("\n")
57
+ for i, line in enumerate(lines):
58
+ if "Chipset Model:" in line:
59
+ gpu_name = line.split(":")[1].strip()
60
+ gpus.append(
61
+ {
62
+ "type": "metal",
63
+ "name": gpu_name,
64
+ "memory": "Shared",
65
+ "id": f"metal:{len(gpus)}",
66
+ }
67
+ )
68
+ except (FileNotFoundError, subprocess.TimeoutExpired):
69
+ pass
70
+
71
+ return gpus
72
+
73
+ @staticmethod
74
+ def detect_webgpu_nodes() -> List[Dict[str, Any]]:
75
+ """Detect connected WebGPU nodes (from browsers)."""
76
+ webgpu_nodes = []
77
+
78
+ # Check for WebGPU connections (would need actual WebSocket/server to track)
79
+ # For now, check if a WebGPU server is running
80
+ webgpu_port = os.environ.get("HANZO_WEBGPU_PORT", "8765")
81
+ try:
82
+ import socket
83
+
84
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
85
+ result = sock.connect_ex(("localhost", int(webgpu_port)))
86
+ sock.close()
87
+ if result == 0:
88
+ webgpu_nodes.append(
89
+ {
90
+ "type": "webgpu",
91
+ "name": "Chrome WebGPU",
92
+ "memory": "Browser",
93
+ "id": "webgpu:0",
94
+ }
95
+ )
96
+ except Exception:
97
+ pass
98
+
99
+ return webgpu_nodes
100
+
101
+ @staticmethod
102
+ def detect_cpu_nodes() -> List[Dict[str, Any]]:
103
+ """Detect CPU compute nodes."""
104
+ import multiprocessing
105
+
106
+ return [
107
+ {
108
+ "type": "cpu",
109
+ "name": f"{platform.processor() or 'CPU'}",
110
+ "cores": multiprocessing.cpu_count(),
111
+ "id": "cpu:0",
112
+ }
113
+ ]
114
+
115
+ @classmethod
116
+ def get_all_nodes(cls) -> List[Dict[str, Any]]:
117
+ """Get all available compute nodes."""
118
+ nodes = []
119
+
120
+ # Detect GPUs
121
+ gpus = cls.detect_local_gpus()
122
+ nodes.extend(gpus)
123
+
124
+ # Detect WebGPU connections
125
+ webgpu = cls.detect_webgpu_nodes()
126
+ nodes.extend(webgpu)
127
+
128
+ # If no GPUs/WebGPU, add CPU as compute node
129
+ if not nodes:
130
+ nodes.extend(cls.detect_cpu_nodes())
131
+
132
+ return nodes
133
+
134
+ @classmethod
135
+ def get_node_count(cls) -> int:
136
+ """Get total number of available compute nodes."""
137
+ return len(cls.get_all_nodes())
138
+
139
+ @classmethod
140
+ def get_node_summary(cls) -> str:
141
+ """Get a summary string of available nodes."""
142
+ nodes = cls.get_all_nodes()
143
+ if not nodes:
144
+ return "No compute nodes available"
145
+
146
+ count = len(nodes)
147
+ node_word = "node" if count == 1 else "nodes"
148
+
149
+ # Group by type
150
+ types = {}
151
+ for node in nodes:
152
+ node_type = node["type"]
153
+ if node_type not in types:
154
+ types[node_type] = 0
155
+ types[node_type] += 1
156
+
157
+ # Build summary
158
+ parts = []
159
+ for node_type, type_count in types.items():
160
+ if node_type == "cuda":
161
+ parts.append(f"{type_count} CUDA GPU{'s' if type_count > 1 else ''}")
162
+ elif node_type == "metal":
163
+ parts.append(f"{type_count} Metal GPU{'s' if type_count > 1 else ''}")
164
+ elif node_type == "webgpu":
165
+ parts.append(f"{type_count} WebGPU")
166
+ elif node_type == "cpu":
167
+ parts.append(f"{type_count} CPU")
168
+
169
+ type_str = ", ".join(parts)
170
+ return f"{count} {node_word} available ({type_str})"
171
+
172
+
173
+ def print_node_status():
174
+ """Print current node status."""
175
+ detector = ComputeNodeDetector()
176
+ nodes = detector.get_all_nodes()
177
+
178
+ print(f"\n🖥️ Compute Nodes: {len(nodes)}")
179
+ for node in nodes:
180
+ if node["type"] in ["cuda", "metal"]:
181
+ print(f" • {node['id']}: {node['name']} ({node['memory']})")
182
+ elif node["type"] == "webgpu":
183
+ print(f" • {node['id']}: {node['name']}")
184
+ elif node["type"] == "cpu":
185
+ print(f" • {node['id']}: {node['name']} ({node['cores']} cores)")
186
+ print()
187
+
188
+
189
+ if __name__ == "__main__":
190
+ # Test the detector
191
+ print_node_status()
192
+ print(ComputeNodeDetector.get_node_summary())