zwarm 2.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zwarm/__init__.py +38 -0
- zwarm/adapters/__init__.py +21 -0
- zwarm/adapters/base.py +109 -0
- zwarm/adapters/claude_code.py +357 -0
- zwarm/adapters/codex_mcp.py +1262 -0
- zwarm/adapters/registry.py +69 -0
- zwarm/adapters/test_codex_mcp.py +274 -0
- zwarm/adapters/test_registry.py +68 -0
- zwarm/cli/__init__.py +0 -0
- zwarm/cli/main.py +2503 -0
- zwarm/core/__init__.py +0 -0
- zwarm/core/compact.py +329 -0
- zwarm/core/config.py +344 -0
- zwarm/core/environment.py +173 -0
- zwarm/core/models.py +315 -0
- zwarm/core/state.py +355 -0
- zwarm/core/test_compact.py +312 -0
- zwarm/core/test_config.py +160 -0
- zwarm/core/test_models.py +265 -0
- zwarm/orchestrator.py +683 -0
- zwarm/prompts/__init__.py +10 -0
- zwarm/prompts/orchestrator.py +230 -0
- zwarm/sessions/__init__.py +26 -0
- zwarm/sessions/manager.py +792 -0
- zwarm/test_orchestrator_watchers.py +23 -0
- zwarm/tools/__init__.py +17 -0
- zwarm/tools/delegation.py +784 -0
- zwarm/watchers/__init__.py +31 -0
- zwarm/watchers/base.py +131 -0
- zwarm/watchers/builtin.py +518 -0
- zwarm/watchers/llm_watcher.py +319 -0
- zwarm/watchers/manager.py +181 -0
- zwarm/watchers/registry.py +57 -0
- zwarm/watchers/test_watchers.py +237 -0
- zwarm-2.3.5.dist-info/METADATA +309 -0
- zwarm-2.3.5.dist-info/RECORD +38 -0
- zwarm-2.3.5.dist-info/WHEEL +4 -0
- zwarm-2.3.5.dist-info/entry_points.txt +2 -0
zwarm/core/__init__.py
ADDED
|
File without changes
|
zwarm/core/compact.py
ADDED
|
@@ -0,0 +1,329 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Message compaction for context window management.
|
|
3
|
+
|
|
4
|
+
Safely prunes old messages while preserving:
|
|
5
|
+
- System prompt and initial user task
|
|
6
|
+
- Tool call/response pairs (never orphaned)
|
|
7
|
+
- Recent conversation context
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import logging
|
|
13
|
+
from dataclasses import dataclass
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _get_attr(obj: Any, key: str, default: Any = None) -> Any:
|
|
20
|
+
"""Get attribute from dict or object (handles both Pydantic models and dicts)."""
|
|
21
|
+
if isinstance(obj, dict):
|
|
22
|
+
return obj.get(key, default)
|
|
23
|
+
return getattr(obj, key, default)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class CompactionResult:
|
|
28
|
+
"""Result of a compaction operation."""
|
|
29
|
+
|
|
30
|
+
messages: list[dict[str, Any]]
|
|
31
|
+
removed_count: int
|
|
32
|
+
original_count: int
|
|
33
|
+
preserved_reason: str | None = None
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def was_compacted(self) -> bool:
|
|
37
|
+
return self.removed_count > 0
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def estimate_tokens(messages: list[Any]) -> int:
|
|
41
|
+
"""
|
|
42
|
+
Rough token estimate for messages.
|
|
43
|
+
|
|
44
|
+
Uses ~4 chars per token as a simple heuristic.
|
|
45
|
+
This is intentionally conservative.
|
|
46
|
+
Handles both dict messages and Pydantic model messages.
|
|
47
|
+
"""
|
|
48
|
+
total_chars = 0
|
|
49
|
+
for msg in messages:
|
|
50
|
+
content = _get_attr(msg, "content", "")
|
|
51
|
+
if isinstance(content, str):
|
|
52
|
+
total_chars += len(content)
|
|
53
|
+
elif isinstance(content, list):
|
|
54
|
+
# Anthropic-style content blocks
|
|
55
|
+
for block in content:
|
|
56
|
+
if isinstance(block, dict):
|
|
57
|
+
total_chars += len(str(block.get("text", "")))
|
|
58
|
+
total_chars += len(str(block.get("input", "")))
|
|
59
|
+
elif isinstance(block, str):
|
|
60
|
+
total_chars += len(block)
|
|
61
|
+
else:
|
|
62
|
+
# Pydantic model block
|
|
63
|
+
total_chars += len(str(_get_attr(block, "text", "")))
|
|
64
|
+
total_chars += len(str(_get_attr(block, "input", "")))
|
|
65
|
+
|
|
66
|
+
# Tool calls add tokens too
|
|
67
|
+
tool_calls = _get_attr(msg, "tool_calls", []) or []
|
|
68
|
+
for tc in tool_calls:
|
|
69
|
+
func = _get_attr(tc, "function", {}) or {}
|
|
70
|
+
args = _get_attr(func, "arguments", "") if isinstance(func, dict) else getattr(func, "arguments", "")
|
|
71
|
+
total_chars += len(str(args))
|
|
72
|
+
|
|
73
|
+
return total_chars // 4
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def find_tool_groups(messages: list[Any]) -> list[tuple[int, int]]:
|
|
77
|
+
"""
|
|
78
|
+
Find message index ranges that form tool call groups.
|
|
79
|
+
|
|
80
|
+
A tool call group is:
|
|
81
|
+
- An assistant message with tool_calls
|
|
82
|
+
- All following tool/user response messages until the next assistant message
|
|
83
|
+
|
|
84
|
+
This handles both OpenAI format (role="tool") and Anthropic format
|
|
85
|
+
(role="user" with tool_result content).
|
|
86
|
+
Also handles Pydantic model messages.
|
|
87
|
+
|
|
88
|
+
Returns list of (start_idx, end_idx) tuples (inclusive).
|
|
89
|
+
"""
|
|
90
|
+
groups = []
|
|
91
|
+
i = 0
|
|
92
|
+
|
|
93
|
+
while i < len(messages):
|
|
94
|
+
msg = messages[i]
|
|
95
|
+
|
|
96
|
+
# Check for tool calls in assistant message
|
|
97
|
+
has_tool_calls = False
|
|
98
|
+
|
|
99
|
+
# OpenAI format: tool_calls field
|
|
100
|
+
if _get_attr(msg, "role") == "assistant" and _get_attr(msg, "tool_calls"):
|
|
101
|
+
has_tool_calls = True
|
|
102
|
+
|
|
103
|
+
# Anthropic format: content blocks with type="tool_use"
|
|
104
|
+
if _get_attr(msg, "role") == "assistant":
|
|
105
|
+
content = _get_attr(msg, "content", [])
|
|
106
|
+
if isinstance(content, list):
|
|
107
|
+
for block in content:
|
|
108
|
+
block_type = _get_attr(block, "type", None)
|
|
109
|
+
if block_type == "tool_use":
|
|
110
|
+
has_tool_calls = True
|
|
111
|
+
break
|
|
112
|
+
|
|
113
|
+
if has_tool_calls:
|
|
114
|
+
start = i
|
|
115
|
+
j = i + 1
|
|
116
|
+
|
|
117
|
+
# Find all following tool responses
|
|
118
|
+
while j < len(messages):
|
|
119
|
+
next_msg = messages[j]
|
|
120
|
+
role = _get_attr(next_msg, "role", "")
|
|
121
|
+
|
|
122
|
+
# OpenAI format: tool role
|
|
123
|
+
if role == "tool":
|
|
124
|
+
j += 1
|
|
125
|
+
continue
|
|
126
|
+
|
|
127
|
+
# Anthropic format: user message with tool_result
|
|
128
|
+
if role == "user":
|
|
129
|
+
content = _get_attr(next_msg, "content", [])
|
|
130
|
+
if isinstance(content, list):
|
|
131
|
+
has_tool_result = any(
|
|
132
|
+
_get_attr(b, "type", None) == "tool_result"
|
|
133
|
+
for b in content
|
|
134
|
+
)
|
|
135
|
+
if has_tool_result:
|
|
136
|
+
j += 1
|
|
137
|
+
continue
|
|
138
|
+
|
|
139
|
+
# Not a tool response, stop here
|
|
140
|
+
break
|
|
141
|
+
|
|
142
|
+
groups.append((start, j - 1))
|
|
143
|
+
i = j
|
|
144
|
+
else:
|
|
145
|
+
i += 1
|
|
146
|
+
|
|
147
|
+
return groups
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def compact_messages(
|
|
151
|
+
messages: list[Any],
|
|
152
|
+
keep_first_n: int = 2,
|
|
153
|
+
keep_last_n: int = 10,
|
|
154
|
+
max_tokens: int | None = None,
|
|
155
|
+
target_token_pct: float = 0.7,
|
|
156
|
+
) -> CompactionResult:
|
|
157
|
+
"""
|
|
158
|
+
Compact message history by removing old messages (LRU-style).
|
|
159
|
+
|
|
160
|
+
Preserves:
|
|
161
|
+
- First N messages (system prompt, user task)
|
|
162
|
+
- Last N messages (recent context)
|
|
163
|
+
- Tool call/response pairs are NEVER split
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
messages: The message list to compact
|
|
167
|
+
keep_first_n: Number of messages to always keep at the start
|
|
168
|
+
keep_last_n: Number of messages to always keep at the end
|
|
169
|
+
max_tokens: If set, compact when estimated tokens exceed this
|
|
170
|
+
target_token_pct: Target percentage of max_tokens after compaction
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
CompactionResult with the compacted messages and stats
|
|
174
|
+
"""
|
|
175
|
+
original_count = len(messages)
|
|
176
|
+
|
|
177
|
+
# Nothing to compact if we have few messages
|
|
178
|
+
if len(messages) <= keep_first_n + keep_last_n:
|
|
179
|
+
return CompactionResult(
|
|
180
|
+
messages=messages,
|
|
181
|
+
removed_count=0,
|
|
182
|
+
original_count=original_count,
|
|
183
|
+
preserved_reason="Too few messages to compact",
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
# Check if compaction is needed based on tokens
|
|
187
|
+
if max_tokens:
|
|
188
|
+
current_tokens = estimate_tokens(messages)
|
|
189
|
+
if current_tokens < max_tokens:
|
|
190
|
+
return CompactionResult(
|
|
191
|
+
messages=messages,
|
|
192
|
+
removed_count=0,
|
|
193
|
+
original_count=original_count,
|
|
194
|
+
preserved_reason=f"Under token limit ({current_tokens}/{max_tokens})",
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
# Find tool call groups (these must stay together)
|
|
198
|
+
tool_groups = find_tool_groups(messages)
|
|
199
|
+
|
|
200
|
+
# Build a set of "protected" indices (in tool groups)
|
|
201
|
+
protected_indices: set[int] = set()
|
|
202
|
+
for start, end in tool_groups:
|
|
203
|
+
for idx in range(start, end + 1):
|
|
204
|
+
protected_indices.add(idx)
|
|
205
|
+
|
|
206
|
+
# Determine which messages are in the "middle" (candidates for removal)
|
|
207
|
+
# Middle = not in first N, not in last N
|
|
208
|
+
middle_start = keep_first_n
|
|
209
|
+
middle_end = len(messages) - keep_last_n
|
|
210
|
+
|
|
211
|
+
if middle_start >= middle_end:
|
|
212
|
+
return CompactionResult(
|
|
213
|
+
messages=messages,
|
|
214
|
+
removed_count=0,
|
|
215
|
+
original_count=original_count,
|
|
216
|
+
preserved_reason="No middle messages to remove",
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
# Find removable message ranges in the middle
|
|
220
|
+
# We remove from the oldest (lowest index) first
|
|
221
|
+
removable_ranges: list[tuple[int, int]] = []
|
|
222
|
+
i = middle_start
|
|
223
|
+
|
|
224
|
+
while i < middle_end:
|
|
225
|
+
# Check if this index is in a tool group
|
|
226
|
+
in_group = False
|
|
227
|
+
for start, end in tool_groups:
|
|
228
|
+
if start <= i <= end:
|
|
229
|
+
# This message is part of a tool group
|
|
230
|
+
# Check if the ENTIRE group is in the middle
|
|
231
|
+
if start >= middle_start and end < middle_end:
|
|
232
|
+
# Entire group is removable as a unit
|
|
233
|
+
removable_ranges.append((start, end))
|
|
234
|
+
i = end + 1
|
|
235
|
+
in_group = True
|
|
236
|
+
break
|
|
237
|
+
else:
|
|
238
|
+
# Group spans protected region, skip it entirely
|
|
239
|
+
i = end + 1
|
|
240
|
+
in_group = True
|
|
241
|
+
break
|
|
242
|
+
|
|
243
|
+
if not in_group:
|
|
244
|
+
# Single message, can be removed individually
|
|
245
|
+
removable_ranges.append((i, i))
|
|
246
|
+
i += 1
|
|
247
|
+
|
|
248
|
+
# Deduplicate and sort ranges
|
|
249
|
+
removable_ranges = sorted(set(removable_ranges), key=lambda x: x[0])
|
|
250
|
+
|
|
251
|
+
if not removable_ranges:
|
|
252
|
+
return CompactionResult(
|
|
253
|
+
messages=messages,
|
|
254
|
+
removed_count=0,
|
|
255
|
+
original_count=original_count,
|
|
256
|
+
preserved_reason="All middle messages are in protected tool groups",
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
# Determine how many to remove
|
|
260
|
+
# Start by removing the oldest half of removable ranges
|
|
261
|
+
if max_tokens:
|
|
262
|
+
# Token-based: remove until under target
|
|
263
|
+
target_tokens = int(max_tokens * target_token_pct)
|
|
264
|
+
indices_to_remove: set[int] = set()
|
|
265
|
+
|
|
266
|
+
for start, end in removable_ranges:
|
|
267
|
+
for idx in range(start, end + 1):
|
|
268
|
+
indices_to_remove.add(idx)
|
|
269
|
+
|
|
270
|
+
# Check if we've removed enough
|
|
271
|
+
remaining = [m for i, m in enumerate(messages) if i not in indices_to_remove]
|
|
272
|
+
if estimate_tokens(remaining) <= target_tokens:
|
|
273
|
+
break
|
|
274
|
+
else:
|
|
275
|
+
# Count-based: remove oldest half of middle
|
|
276
|
+
total_removable = sum(end - start + 1 for start, end in removable_ranges)
|
|
277
|
+
target_remove = total_removable // 2
|
|
278
|
+
|
|
279
|
+
indices_to_remove = set()
|
|
280
|
+
removed = 0
|
|
281
|
+
|
|
282
|
+
for start, end in removable_ranges:
|
|
283
|
+
if removed >= target_remove:
|
|
284
|
+
break
|
|
285
|
+
for idx in range(start, end + 1):
|
|
286
|
+
indices_to_remove.add(idx)
|
|
287
|
+
removed += 1
|
|
288
|
+
|
|
289
|
+
# Build new message list
|
|
290
|
+
new_messages = [m for i, m in enumerate(messages) if i not in indices_to_remove]
|
|
291
|
+
|
|
292
|
+
# Add a compaction marker so the model knows history was truncated
|
|
293
|
+
if indices_to_remove and len(new_messages) > keep_first_n:
|
|
294
|
+
# Insert marker after the preserved first messages
|
|
295
|
+
marker = {
|
|
296
|
+
"role": "system",
|
|
297
|
+
"content": (
|
|
298
|
+
f"[Context compacted: {len(indices_to_remove)} older messages removed "
|
|
299
|
+
f"to manage context window. Conversation continues below.]"
|
|
300
|
+
),
|
|
301
|
+
}
|
|
302
|
+
new_messages.insert(keep_first_n, marker)
|
|
303
|
+
|
|
304
|
+
logger.info(
|
|
305
|
+
f"Compacted messages: {original_count} -> {len(new_messages)} "
|
|
306
|
+
f"(removed {len(indices_to_remove)})"
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
return CompactionResult(
|
|
310
|
+
messages=new_messages,
|
|
311
|
+
removed_count=len(indices_to_remove),
|
|
312
|
+
original_count=original_count,
|
|
313
|
+
)
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
def should_compact(
|
|
317
|
+
messages: list[Any],
|
|
318
|
+
max_tokens: int,
|
|
319
|
+
threshold_pct: float = 0.85,
|
|
320
|
+
) -> bool:
|
|
321
|
+
"""
|
|
322
|
+
Check if messages should be compacted.
|
|
323
|
+
|
|
324
|
+
Returns True if estimated tokens exceed threshold percentage of max.
|
|
325
|
+
Handles both dict messages and Pydantic model messages.
|
|
326
|
+
"""
|
|
327
|
+
current = estimate_tokens(messages)
|
|
328
|
+
threshold = int(max_tokens * threshold_pct)
|
|
329
|
+
return current >= threshold
|
zwarm/core/config.py
ADDED
|
@@ -0,0 +1,344 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Configuration system for zwarm.
|
|
3
|
+
|
|
4
|
+
Supports:
|
|
5
|
+
- config.toml for user settings (weave project, defaults)
|
|
6
|
+
- .env for environment variables
|
|
7
|
+
- Composable YAML configs with inheritance (extends:)
|
|
8
|
+
- CLI overrides via --set key=value
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
import os
|
|
14
|
+
import tomllib
|
|
15
|
+
from dataclasses import dataclass, field
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Any
|
|
18
|
+
|
|
19
|
+
import yaml
|
|
20
|
+
from dotenv import load_dotenv
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class WeaveConfig:
|
|
25
|
+
"""Weave integration settings."""
|
|
26
|
+
|
|
27
|
+
project: str | None = None
|
|
28
|
+
enabled: bool = True
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class ExecutorConfig:
|
|
33
|
+
"""Configuration for an executor (coding agent)."""
|
|
34
|
+
|
|
35
|
+
adapter: str = "codex_mcp" # codex_mcp | codex_exec | claude_code
|
|
36
|
+
model: str | None = None
|
|
37
|
+
sandbox: str = "workspace-write" # read-only | workspace-write | danger-full-access
|
|
38
|
+
timeout: int = 3600
|
|
39
|
+
reasoning_effort: str | None = "high" # low | medium | high (default to high for compatibility)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@dataclass
|
|
43
|
+
class CompactionConfig:
|
|
44
|
+
"""Configuration for context window compaction."""
|
|
45
|
+
|
|
46
|
+
enabled: bool = True
|
|
47
|
+
max_tokens: int = 100000 # Trigger compaction when estimated tokens exceed this
|
|
48
|
+
threshold_pct: float = 0.85 # Compact when at this % of max_tokens
|
|
49
|
+
target_pct: float = 0.7 # Target this % after compaction
|
|
50
|
+
keep_first_n: int = 2 # Always keep first N messages (system + task)
|
|
51
|
+
keep_last_n: int = 10 # Always keep last N messages (recent context)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@dataclass
|
|
55
|
+
class OrchestratorConfig:
|
|
56
|
+
"""Configuration for the orchestrator."""
|
|
57
|
+
|
|
58
|
+
lm: str = "gpt-5-mini"
|
|
59
|
+
prompt: str | None = None # path to prompt yaml
|
|
60
|
+
tools: list[str] = field(default_factory=lambda: ["delegate", "converse", "check_session", "end_session", "bash"])
|
|
61
|
+
max_steps: int = 50
|
|
62
|
+
parallel_delegations: int = 4
|
|
63
|
+
sync_first: bool = True # prefer sync mode by default
|
|
64
|
+
compaction: CompactionConfig = field(default_factory=CompactionConfig)
|
|
65
|
+
|
|
66
|
+
# Directory restrictions for agent delegations
|
|
67
|
+
# None = only working_dir allowed (most restrictive, default)
|
|
68
|
+
# ["*"] = any directory allowed (dangerous)
|
|
69
|
+
# ["/path/a", "/path/b"] = only these directories allowed
|
|
70
|
+
allowed_dirs: list[str] | None = None
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
@dataclass
|
|
74
|
+
class WatcherConfigItem:
|
|
75
|
+
"""Configuration for a single watcher."""
|
|
76
|
+
|
|
77
|
+
name: str
|
|
78
|
+
enabled: bool = True
|
|
79
|
+
config: dict[str, Any] = field(default_factory=dict)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
@dataclass
|
|
83
|
+
class WatchersConfig:
|
|
84
|
+
"""Configuration for watchers."""
|
|
85
|
+
|
|
86
|
+
enabled: bool = True
|
|
87
|
+
watchers: list[WatcherConfigItem] = field(default_factory=lambda: [
|
|
88
|
+
WatcherConfigItem(name="progress"),
|
|
89
|
+
WatcherConfigItem(name="budget"),
|
|
90
|
+
WatcherConfigItem(name="delegation_reminder"),
|
|
91
|
+
])
|
|
92
|
+
# Role for watcher nudge messages: "user" | "assistant" | "system"
|
|
93
|
+
# "user" (default) - Appears as if user sent the message, strong nudge
|
|
94
|
+
# "assistant" - Appears as previous assistant thought, softer nudge
|
|
95
|
+
# "system" - Appears as system instruction, authoritative
|
|
96
|
+
message_role: str = "user"
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
@dataclass
|
|
100
|
+
class ZwarmConfig:
|
|
101
|
+
"""Root configuration for zwarm."""
|
|
102
|
+
|
|
103
|
+
weave: WeaveConfig = field(default_factory=WeaveConfig)
|
|
104
|
+
executor: ExecutorConfig = field(default_factory=ExecutorConfig)
|
|
105
|
+
orchestrator: OrchestratorConfig = field(default_factory=OrchestratorConfig)
|
|
106
|
+
watchers: WatchersConfig = field(default_factory=WatchersConfig)
|
|
107
|
+
state_dir: str = ".zwarm"
|
|
108
|
+
|
|
109
|
+
@classmethod
|
|
110
|
+
def from_dict(cls, data: dict[str, Any]) -> ZwarmConfig:
|
|
111
|
+
"""Create config from dictionary."""
|
|
112
|
+
weave_data = data.get("weave", {})
|
|
113
|
+
executor_data = data.get("executor", {})
|
|
114
|
+
orchestrator_data = data.get("orchestrator", {})
|
|
115
|
+
watchers_data = data.get("watchers", {})
|
|
116
|
+
|
|
117
|
+
# Parse compaction config from orchestrator
|
|
118
|
+
compaction_data = orchestrator_data.pop("compaction", {}) if orchestrator_data else {}
|
|
119
|
+
compaction_config = CompactionConfig(**compaction_data) if compaction_data else CompactionConfig()
|
|
120
|
+
|
|
121
|
+
# Parse watchers config - handle both list shorthand and dict format
|
|
122
|
+
if isinstance(watchers_data, list):
|
|
123
|
+
# Shorthand: watchers: [progress, budget, scope]
|
|
124
|
+
watchers_config = WatchersConfig(
|
|
125
|
+
enabled=True,
|
|
126
|
+
watchers=[
|
|
127
|
+
WatcherConfigItem(name=w) if isinstance(w, str) else WatcherConfigItem(**w)
|
|
128
|
+
for w in watchers_data
|
|
129
|
+
],
|
|
130
|
+
)
|
|
131
|
+
else:
|
|
132
|
+
# Full format: watchers: {enabled: true, watchers: [...], message_role: "user"}
|
|
133
|
+
watchers_config = WatchersConfig(
|
|
134
|
+
enabled=watchers_data.get("enabled", True),
|
|
135
|
+
watchers=[
|
|
136
|
+
WatcherConfigItem(name=w) if isinstance(w, str) else WatcherConfigItem(**w)
|
|
137
|
+
for w in watchers_data.get("watchers", [])
|
|
138
|
+
] or WatchersConfig().watchers,
|
|
139
|
+
message_role=watchers_data.get("message_role", "user"),
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# Build orchestrator config with nested compaction
|
|
143
|
+
if orchestrator_data:
|
|
144
|
+
orchestrator_config = OrchestratorConfig(**orchestrator_data, compaction=compaction_config)
|
|
145
|
+
else:
|
|
146
|
+
orchestrator_config = OrchestratorConfig(compaction=compaction_config)
|
|
147
|
+
|
|
148
|
+
return cls(
|
|
149
|
+
weave=WeaveConfig(**weave_data) if weave_data else WeaveConfig(),
|
|
150
|
+
executor=ExecutorConfig(**executor_data) if executor_data else ExecutorConfig(),
|
|
151
|
+
orchestrator=orchestrator_config,
|
|
152
|
+
watchers=watchers_config,
|
|
153
|
+
state_dir=data.get("state_dir", ".zwarm"),
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
def to_dict(self) -> dict[str, Any]:
|
|
157
|
+
"""Convert to dictionary."""
|
|
158
|
+
return {
|
|
159
|
+
"weave": {
|
|
160
|
+
"project": self.weave.project,
|
|
161
|
+
"enabled": self.weave.enabled,
|
|
162
|
+
},
|
|
163
|
+
"executor": {
|
|
164
|
+
"adapter": self.executor.adapter,
|
|
165
|
+
"model": self.executor.model,
|
|
166
|
+
"sandbox": self.executor.sandbox,
|
|
167
|
+
"timeout": self.executor.timeout,
|
|
168
|
+
"reasoning_effort": self.executor.reasoning_effort,
|
|
169
|
+
},
|
|
170
|
+
"orchestrator": {
|
|
171
|
+
"lm": self.orchestrator.lm,
|
|
172
|
+
"prompt": self.orchestrator.prompt,
|
|
173
|
+
"tools": self.orchestrator.tools,
|
|
174
|
+
"max_steps": self.orchestrator.max_steps,
|
|
175
|
+
"parallel_delegations": self.orchestrator.parallel_delegations,
|
|
176
|
+
"sync_first": self.orchestrator.sync_first,
|
|
177
|
+
"compaction": {
|
|
178
|
+
"enabled": self.orchestrator.compaction.enabled,
|
|
179
|
+
"max_tokens": self.orchestrator.compaction.max_tokens,
|
|
180
|
+
"threshold_pct": self.orchestrator.compaction.threshold_pct,
|
|
181
|
+
"target_pct": self.orchestrator.compaction.target_pct,
|
|
182
|
+
"keep_first_n": self.orchestrator.compaction.keep_first_n,
|
|
183
|
+
"keep_last_n": self.orchestrator.compaction.keep_last_n,
|
|
184
|
+
},
|
|
185
|
+
},
|
|
186
|
+
"watchers": {
|
|
187
|
+
"enabled": self.watchers.enabled,
|
|
188
|
+
"watchers": [
|
|
189
|
+
{"name": w.name, "enabled": w.enabled, "config": w.config}
|
|
190
|
+
for w in self.watchers.watchers
|
|
191
|
+
],
|
|
192
|
+
"message_role": self.watchers.message_role,
|
|
193
|
+
},
|
|
194
|
+
"state_dir": self.state_dir,
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def load_env(path: Path | None = None) -> None:
|
|
199
|
+
"""Load .env file if it exists."""
|
|
200
|
+
if path is None:
|
|
201
|
+
path = Path.cwd() / ".env"
|
|
202
|
+
if path.exists():
|
|
203
|
+
load_dotenv(path)
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
def load_toml_config(path: Path | None = None) -> dict[str, Any]:
|
|
207
|
+
"""
|
|
208
|
+
Load config.toml file.
|
|
209
|
+
|
|
210
|
+
Search order:
|
|
211
|
+
1. Explicit path (if provided)
|
|
212
|
+
2. .zwarm/config.toml (new standard location)
|
|
213
|
+
3. config.toml (legacy location for backwards compat)
|
|
214
|
+
"""
|
|
215
|
+
if path is None:
|
|
216
|
+
# Try new location first
|
|
217
|
+
new_path = Path.cwd() / ".zwarm" / "config.toml"
|
|
218
|
+
legacy_path = Path.cwd() / "config.toml"
|
|
219
|
+
if new_path.exists():
|
|
220
|
+
path = new_path
|
|
221
|
+
elif legacy_path.exists():
|
|
222
|
+
path = legacy_path
|
|
223
|
+
else:
|
|
224
|
+
return {}
|
|
225
|
+
if not path.exists():
|
|
226
|
+
return {}
|
|
227
|
+
with open(path, "rb") as f:
|
|
228
|
+
return tomllib.load(f)
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
def load_yaml_config(path: Path) -> dict[str, Any]:
|
|
232
|
+
"""
|
|
233
|
+
Load YAML config with inheritance support.
|
|
234
|
+
|
|
235
|
+
Supports 'extends: path/to/base.yaml' for composition.
|
|
236
|
+
"""
|
|
237
|
+
if not path.exists():
|
|
238
|
+
raise FileNotFoundError(f"Config not found: {path}")
|
|
239
|
+
|
|
240
|
+
with open(path) as f:
|
|
241
|
+
data = yaml.safe_load(f) or {}
|
|
242
|
+
|
|
243
|
+
# Handle inheritance
|
|
244
|
+
extends = data.pop("extends", None)
|
|
245
|
+
if extends:
|
|
246
|
+
base_path = (path.parent / extends).resolve()
|
|
247
|
+
base_data = load_yaml_config(base_path)
|
|
248
|
+
# Deep merge: data overrides base
|
|
249
|
+
data = deep_merge(base_data, data)
|
|
250
|
+
|
|
251
|
+
return data
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
def deep_merge(base: dict, override: dict) -> dict:
|
|
255
|
+
"""Deep merge two dicts, with override taking precedence."""
|
|
256
|
+
result = base.copy()
|
|
257
|
+
for key, value in override.items():
|
|
258
|
+
if key in result and isinstance(result[key], dict) and isinstance(value, dict):
|
|
259
|
+
result[key] = deep_merge(result[key], value)
|
|
260
|
+
else:
|
|
261
|
+
result[key] = value
|
|
262
|
+
return result
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
def apply_overrides(config: dict[str, Any], overrides: list[str]) -> dict[str, Any]:
|
|
266
|
+
"""
|
|
267
|
+
Apply CLI overrides in format 'key.path=value'.
|
|
268
|
+
|
|
269
|
+
Example: 'orchestrator.lm=claude-sonnet' sets config['orchestrator']['lm'] = 'claude-sonnet'
|
|
270
|
+
"""
|
|
271
|
+
result = config.copy()
|
|
272
|
+
for override in overrides:
|
|
273
|
+
if "=" not in override:
|
|
274
|
+
continue
|
|
275
|
+
key_path, value = override.split("=", 1)
|
|
276
|
+
keys = key_path.split(".")
|
|
277
|
+
|
|
278
|
+
# Parse value (try int, float, bool, then string)
|
|
279
|
+
parsed_value: Any = value
|
|
280
|
+
if value.lower() == "true":
|
|
281
|
+
parsed_value = True
|
|
282
|
+
elif value.lower() == "false":
|
|
283
|
+
parsed_value = False
|
|
284
|
+
else:
|
|
285
|
+
try:
|
|
286
|
+
parsed_value = int(value)
|
|
287
|
+
except ValueError:
|
|
288
|
+
try:
|
|
289
|
+
parsed_value = float(value)
|
|
290
|
+
except ValueError:
|
|
291
|
+
pass # Keep as string
|
|
292
|
+
|
|
293
|
+
# Navigate and set
|
|
294
|
+
target = result
|
|
295
|
+
for key in keys[:-1]:
|
|
296
|
+
if key not in target:
|
|
297
|
+
target[key] = {}
|
|
298
|
+
target = target[key]
|
|
299
|
+
target[keys[-1]] = parsed_value
|
|
300
|
+
|
|
301
|
+
return result
|
|
302
|
+
|
|
303
|
+
|
|
304
|
+
def load_config(
|
|
305
|
+
config_path: Path | None = None,
|
|
306
|
+
toml_path: Path | None = None,
|
|
307
|
+
env_path: Path | None = None,
|
|
308
|
+
overrides: list[str] | None = None,
|
|
309
|
+
) -> ZwarmConfig:
|
|
310
|
+
"""
|
|
311
|
+
Load configuration with full precedence chain:
|
|
312
|
+
1. Defaults (in dataclasses)
|
|
313
|
+
2. config.toml (user settings)
|
|
314
|
+
3. YAML config file (if provided)
|
|
315
|
+
4. CLI overrides (--set key=value)
|
|
316
|
+
5. Environment variables (for secrets)
|
|
317
|
+
"""
|
|
318
|
+
# Load .env first (for secrets)
|
|
319
|
+
load_env(env_path)
|
|
320
|
+
|
|
321
|
+
# Start with defaults
|
|
322
|
+
config_dict: dict[str, Any] = {}
|
|
323
|
+
|
|
324
|
+
# Layer in config.toml
|
|
325
|
+
toml_config = load_toml_config(toml_path)
|
|
326
|
+
if toml_config:
|
|
327
|
+
config_dict = deep_merge(config_dict, toml_config)
|
|
328
|
+
|
|
329
|
+
# Layer in YAML config
|
|
330
|
+
if config_path and config_path.exists():
|
|
331
|
+
yaml_config = load_yaml_config(config_path)
|
|
332
|
+
config_dict = deep_merge(config_dict, yaml_config)
|
|
333
|
+
|
|
334
|
+
# Apply CLI overrides
|
|
335
|
+
if overrides:
|
|
336
|
+
config_dict = apply_overrides(config_dict, overrides)
|
|
337
|
+
|
|
338
|
+
# Apply environment variables for weave
|
|
339
|
+
if os.getenv("WEAVE_PROJECT"):
|
|
340
|
+
if "weave" not in config_dict:
|
|
341
|
+
config_dict["weave"] = {}
|
|
342
|
+
config_dict["weave"]["project"] = os.getenv("WEAVE_PROJECT")
|
|
343
|
+
|
|
344
|
+
return ZwarmConfig.from_dict(config_dict)
|