hanzo-mcp 0.8.2__py3-none-any.whl → 0.8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hanzo-mcp might be problematic. Click here for more details.

@@ -8,33 +8,39 @@ from typing import Any, Dict, List
8
8
 
9
9
  class ComputeNodeDetector:
10
10
  """Detect available compute nodes (GPUs, WebGPU, CPUs) for distributed work."""
11
-
11
+
12
12
  @staticmethod
13
13
  def detect_local_gpus() -> List[Dict[str, Any]]:
14
14
  """Detect local GPU devices."""
15
15
  gpus = []
16
-
16
+
17
17
  # Try NVIDIA GPUs
18
18
  try:
19
19
  result = subprocess.run(
20
- ["nvidia-smi", "--query-gpu=name,memory.total", "--format=csv,noheader"],
20
+ [
21
+ "nvidia-smi",
22
+ "--query-gpu=name,memory.total",
23
+ "--format=csv,noheader",
24
+ ],
21
25
  capture_output=True,
22
26
  text=True,
23
- timeout=2
27
+ timeout=2,
24
28
  )
25
29
  if result.returncode == 0:
26
- for line in result.stdout.strip().split('\n'):
30
+ for line in result.stdout.strip().split("\n"):
27
31
  if line:
28
- name, memory = line.split(', ')
29
- gpus.append({
30
- "type": "cuda",
31
- "name": name,
32
- "memory": memory,
33
- "id": f"cuda:{len(gpus)}"
34
- })
32
+ name, memory = line.split(", ")
33
+ gpus.append(
34
+ {
35
+ "type": "cuda",
36
+ "name": name,
37
+ "memory": memory,
38
+ "id": f"cuda:{len(gpus)}",
39
+ }
40
+ )
35
41
  except (FileNotFoundError, subprocess.TimeoutExpired):
36
42
  pass
37
-
43
+
38
44
  # Try Metal GPUs (macOS)
39
45
  if platform.system() == "Darwin":
40
46
  try:
@@ -43,96 +49,103 @@ class ComputeNodeDetector:
43
49
  ["system_profiler", "SPDisplaysDataType"],
44
50
  capture_output=True,
45
51
  text=True,
46
- timeout=2
52
+ timeout=2,
47
53
  )
48
54
  if result.returncode == 0 and "Metal" in result.stdout:
49
55
  # Parse GPU info from system_profiler
50
- lines = result.stdout.split('\n')
56
+ lines = result.stdout.split("\n")
51
57
  for i, line in enumerate(lines):
52
- if 'Chipset Model:' in line:
53
- gpu_name = line.split(':')[1].strip()
54
- gpus.append({
55
- "type": "metal",
56
- "name": gpu_name,
57
- "memory": "Shared",
58
- "id": f"metal:{len(gpus)}"
59
- })
58
+ if "Chipset Model:" in line:
59
+ gpu_name = line.split(":")[1].strip()
60
+ gpus.append(
61
+ {
62
+ "type": "metal",
63
+ "name": gpu_name,
64
+ "memory": "Shared",
65
+ "id": f"metal:{len(gpus)}",
66
+ }
67
+ )
60
68
  except (FileNotFoundError, subprocess.TimeoutExpired):
61
69
  pass
62
-
70
+
63
71
  return gpus
64
-
72
+
65
73
  @staticmethod
66
74
  def detect_webgpu_nodes() -> List[Dict[str, Any]]:
67
75
  """Detect connected WebGPU nodes (from browsers)."""
68
76
  webgpu_nodes = []
69
-
77
+
70
78
  # Check for WebGPU connections (would need actual WebSocket/server to track)
71
79
  # For now, check if a WebGPU server is running
72
80
  webgpu_port = os.environ.get("HANZO_WEBGPU_PORT", "8765")
73
81
  try:
74
82
  import socket
83
+
75
84
  sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
76
- result = sock.connect_ex(('localhost', int(webgpu_port)))
85
+ result = sock.connect_ex(("localhost", int(webgpu_port)))
77
86
  sock.close()
78
87
  if result == 0:
79
- webgpu_nodes.append({
80
- "type": "webgpu",
81
- "name": "Chrome WebGPU",
82
- "memory": "Browser",
83
- "id": "webgpu:0"
84
- })
88
+ webgpu_nodes.append(
89
+ {
90
+ "type": "webgpu",
91
+ "name": "Chrome WebGPU",
92
+ "memory": "Browser",
93
+ "id": "webgpu:0",
94
+ }
95
+ )
85
96
  except Exception:
86
97
  pass
87
-
98
+
88
99
  return webgpu_nodes
89
-
100
+
90
101
  @staticmethod
91
102
  def detect_cpu_nodes() -> List[Dict[str, Any]]:
92
103
  """Detect CPU compute nodes."""
93
104
  import multiprocessing
94
-
95
- return [{
96
- "type": "cpu",
97
- "name": f"{platform.processor() or 'CPU'}",
98
- "cores": multiprocessing.cpu_count(),
99
- "id": "cpu:0"
100
- }]
101
-
105
+
106
+ return [
107
+ {
108
+ "type": "cpu",
109
+ "name": f"{platform.processor() or 'CPU'}",
110
+ "cores": multiprocessing.cpu_count(),
111
+ "id": "cpu:0",
112
+ }
113
+ ]
114
+
102
115
  @classmethod
103
116
  def get_all_nodes(cls) -> List[Dict[str, Any]]:
104
117
  """Get all available compute nodes."""
105
118
  nodes = []
106
-
119
+
107
120
  # Detect GPUs
108
121
  gpus = cls.detect_local_gpus()
109
122
  nodes.extend(gpus)
110
-
123
+
111
124
  # Detect WebGPU connections
112
125
  webgpu = cls.detect_webgpu_nodes()
113
126
  nodes.extend(webgpu)
114
-
127
+
115
128
  # If no GPUs/WebGPU, add CPU as compute node
116
129
  if not nodes:
117
130
  nodes.extend(cls.detect_cpu_nodes())
118
-
131
+
119
132
  return nodes
120
-
133
+
121
134
  @classmethod
122
135
  def get_node_count(cls) -> int:
123
136
  """Get total number of available compute nodes."""
124
137
  return len(cls.get_all_nodes())
125
-
138
+
126
139
  @classmethod
127
140
  def get_node_summary(cls) -> str:
128
141
  """Get a summary string of available nodes."""
129
142
  nodes = cls.get_all_nodes()
130
143
  if not nodes:
131
144
  return "No compute nodes available"
132
-
145
+
133
146
  count = len(nodes)
134
147
  node_word = "node" if count == 1 else "nodes"
135
-
148
+
136
149
  # Group by type
137
150
  types = {}
138
151
  for node in nodes:
@@ -140,7 +153,7 @@ class ComputeNodeDetector:
140
153
  if node_type not in types:
141
154
  types[node_type] = 0
142
155
  types[node_type] += 1
143
-
156
+
144
157
  # Build summary
145
158
  parts = []
146
159
  for node_type, type_count in types.items():
@@ -152,7 +165,7 @@ class ComputeNodeDetector:
152
165
  parts.append(f"{type_count} WebGPU")
153
166
  elif node_type == "cpu":
154
167
  parts.append(f"{type_count} CPU")
155
-
168
+
156
169
  type_str = ", ".join(parts)
157
170
  return f"{count} {node_word} available ({type_str})"
158
171
 
@@ -161,7 +174,7 @@ def print_node_status():
161
174
  """Print current node status."""
162
175
  detector = ComputeNodeDetector()
163
176
  nodes = detector.get_all_nodes()
164
-
177
+
165
178
  print(f"\n🖥️ Compute Nodes: {len(nodes)}")
166
179
  for node in nodes:
167
180
  if node["type"] in ["cuda", "metal"]:
@@ -176,4 +189,4 @@ def print_node_status():
176
189
  if __name__ == "__main__":
177
190
  # Test the detector
178
191
  print_node_status()
179
- print(ComputeNodeDetector.get_node_summary())
192
+ print(ComputeNodeDetector.get_node_summary())
@@ -514,6 +514,17 @@ def _merge_config(
514
514
 
515
515
  merged = deep_merge(base_dict, config_dict)
516
516
 
517
+ # Backwards/forwards compatibility: support a structured "tools" section
518
+ # where each tool can define { enabled: bool, ...options } and map it to
519
+ # the existing enabled_tools/disabled_tools layout.
520
+ tools_cfg = merged.get("tools", {})
521
+ if isinstance(tools_cfg, dict):
522
+ enabled_tools = dict(merged.get("enabled_tools", {}))
523
+ for tool_name, tool_data in tools_cfg.items():
524
+ if isinstance(tool_data, dict) and "enabled" in tool_data:
525
+ enabled_tools[tool_name] = bool(tool_data.get("enabled"))
526
+ merged["enabled_tools"] = enabled_tools
527
+
517
528
  # Reconstruct the settings object
518
529
  mcp_servers = {}
519
530
  for name, server_data in merged.get("mcp_servers", {}).items():