autoforge-ai 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/commands/check-code.md +32 -0
- package/.claude/commands/checkpoint.md +40 -0
- package/.claude/commands/create-spec.md +613 -0
- package/.claude/commands/expand-project.md +234 -0
- package/.claude/commands/gsd-to-autoforge-spec.md +10 -0
- package/.claude/commands/review-pr.md +75 -0
- package/.claude/templates/app_spec.template.txt +331 -0
- package/.claude/templates/coding_prompt.template.md +265 -0
- package/.claude/templates/initializer_prompt.template.md +354 -0
- package/.claude/templates/testing_prompt.template.md +146 -0
- package/.env.example +64 -0
- package/LICENSE.md +676 -0
- package/README.md +423 -0
- package/agent.py +444 -0
- package/api/__init__.py +10 -0
- package/api/database.py +536 -0
- package/api/dependency_resolver.py +449 -0
- package/api/migration.py +156 -0
- package/auth.py +83 -0
- package/autoforge_paths.py +315 -0
- package/autonomous_agent_demo.py +293 -0
- package/bin/autoforge.js +3 -0
- package/client.py +607 -0
- package/env_constants.py +27 -0
- package/examples/OPTIMIZE_CONFIG.md +230 -0
- package/examples/README.md +531 -0
- package/examples/org_config.yaml +172 -0
- package/examples/project_allowed_commands.yaml +139 -0
- package/lib/cli.js +791 -0
- package/mcp_server/__init__.py +1 -0
- package/mcp_server/feature_mcp.py +988 -0
- package/package.json +53 -0
- package/parallel_orchestrator.py +1800 -0
- package/progress.py +247 -0
- package/prompts.py +427 -0
- package/pyproject.toml +17 -0
- package/rate_limit_utils.py +132 -0
- package/registry.py +614 -0
- package/requirements-prod.txt +14 -0
- package/security.py +959 -0
- package/server/__init__.py +17 -0
- package/server/main.py +261 -0
- package/server/routers/__init__.py +32 -0
- package/server/routers/agent.py +177 -0
- package/server/routers/assistant_chat.py +327 -0
- package/server/routers/devserver.py +309 -0
- package/server/routers/expand_project.py +239 -0
- package/server/routers/features.py +746 -0
- package/server/routers/filesystem.py +514 -0
- package/server/routers/projects.py +524 -0
- package/server/routers/schedules.py +356 -0
- package/server/routers/settings.py +127 -0
- package/server/routers/spec_creation.py +357 -0
- package/server/routers/terminal.py +453 -0
- package/server/schemas.py +593 -0
- package/server/services/__init__.py +36 -0
- package/server/services/assistant_chat_session.py +496 -0
- package/server/services/assistant_database.py +304 -0
- package/server/services/chat_constants.py +57 -0
- package/server/services/dev_server_manager.py +557 -0
- package/server/services/expand_chat_session.py +399 -0
- package/server/services/process_manager.py +657 -0
- package/server/services/project_config.py +475 -0
- package/server/services/scheduler_service.py +683 -0
- package/server/services/spec_chat_session.py +502 -0
- package/server/services/terminal_manager.py +756 -0
- package/server/utils/__init__.py +1 -0
- package/server/utils/process_utils.py +134 -0
- package/server/utils/project_helpers.py +32 -0
- package/server/utils/validation.py +54 -0
- package/server/websocket.py +903 -0
- package/start.py +456 -0
- package/ui/dist/assets/index-8W_wmZzz.js +168 -0
- package/ui/dist/assets/index-B47Ubhox.css +1 -0
- package/ui/dist/assets/vendor-flow-CVNK-_lx.js +7 -0
- package/ui/dist/assets/vendor-query-BUABzP5o.js +1 -0
- package/ui/dist/assets/vendor-radix-DTNNCg2d.js +45 -0
- package/ui/dist/assets/vendor-react-qkC6yhPU.js +1 -0
- package/ui/dist/assets/vendor-utils-COeKbHgx.js +2 -0
- package/ui/dist/assets/vendor-xterm-DP_gxef0.js +16 -0
- package/ui/dist/index.html +23 -0
- package/ui/dist/ollama.png +0 -0
- package/ui/dist/vite.svg +6 -0
- package/ui/package.json +57 -0
|
@@ -0,0 +1,449 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Dependency Resolver
|
|
3
|
+
===================
|
|
4
|
+
|
|
5
|
+
Provides dependency resolution using Kahn's algorithm for topological sorting.
|
|
6
|
+
Includes cycle detection, validation, and helper functions for dependency management.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import heapq
|
|
10
|
+
from collections import deque
|
|
11
|
+
from typing import TypedDict
|
|
12
|
+
|
|
13
|
+
# Security: Prevent DoS via excessive dependencies
|
|
14
|
+
MAX_DEPENDENCIES_PER_FEATURE = 20
|
|
15
|
+
MAX_DEPENDENCY_DEPTH = 50 # Prevent stack overflow in cycle detection
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class DependencyResult(TypedDict):
|
|
19
|
+
"""Result from dependency resolution."""
|
|
20
|
+
|
|
21
|
+
ordered_features: list[dict]
|
|
22
|
+
circular_dependencies: list[list[int]]
|
|
23
|
+
blocked_features: dict[int, list[int]] # feature_id -> [blocking_ids]
|
|
24
|
+
missing_dependencies: dict[int, list[int]] # feature_id -> [missing_ids]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def resolve_dependencies(features: list[dict]) -> DependencyResult:
|
|
28
|
+
"""Topological sort using Kahn's algorithm with priority-aware ordering.
|
|
29
|
+
|
|
30
|
+
Returns ordered features respecting dependencies, plus metadata about
|
|
31
|
+
cycles, blocked features, and missing dependencies.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
features: List of feature dicts with id, priority, passes, and dependencies fields
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
DependencyResult with ordered_features, circular_dependencies,
|
|
38
|
+
blocked_features, and missing_dependencies
|
|
39
|
+
"""
|
|
40
|
+
feature_map = {f["id"]: f for f in features}
|
|
41
|
+
in_degree = {f["id"]: 0 for f in features}
|
|
42
|
+
adjacency: dict[int, list[int]] = {f["id"]: [] for f in features}
|
|
43
|
+
blocked: dict[int, list[int]] = {}
|
|
44
|
+
missing: dict[int, list[int]] = {}
|
|
45
|
+
|
|
46
|
+
# Build graph
|
|
47
|
+
for feature in features:
|
|
48
|
+
deps = feature.get("dependencies") or []
|
|
49
|
+
for dep_id in deps:
|
|
50
|
+
if dep_id not in feature_map:
|
|
51
|
+
missing.setdefault(feature["id"], []).append(dep_id)
|
|
52
|
+
else:
|
|
53
|
+
adjacency[dep_id].append(feature["id"])
|
|
54
|
+
in_degree[feature["id"]] += 1
|
|
55
|
+
# Track blocked features
|
|
56
|
+
dep = feature_map[dep_id]
|
|
57
|
+
if not dep.get("passes"):
|
|
58
|
+
blocked.setdefault(feature["id"], []).append(dep_id)
|
|
59
|
+
|
|
60
|
+
# Kahn's algorithm with priority-aware selection using a heap
|
|
61
|
+
# Heap entries are tuples: (priority, id, feature_dict) for stable ordering
|
|
62
|
+
heap = [
|
|
63
|
+
(f.get("priority", 999), f["id"], f)
|
|
64
|
+
for f in features
|
|
65
|
+
if in_degree[f["id"]] == 0
|
|
66
|
+
]
|
|
67
|
+
heapq.heapify(heap)
|
|
68
|
+
ordered: list[dict] = []
|
|
69
|
+
|
|
70
|
+
while heap:
|
|
71
|
+
_, _, current = heapq.heappop(heap)
|
|
72
|
+
ordered.append(current)
|
|
73
|
+
for dependent_id in adjacency[current["id"]]:
|
|
74
|
+
in_degree[dependent_id] -= 1
|
|
75
|
+
if in_degree[dependent_id] == 0:
|
|
76
|
+
dep_feature = feature_map[dependent_id]
|
|
77
|
+
heapq.heappush(
|
|
78
|
+
heap,
|
|
79
|
+
(dep_feature.get("priority", 999), dependent_id, dep_feature)
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
# Detect cycles (features not in ordered = part of cycle)
|
|
83
|
+
cycles: list[list[int]] = []
|
|
84
|
+
if len(ordered) < len(features):
|
|
85
|
+
remaining = [f for f in features if f not in ordered]
|
|
86
|
+
cycles = _detect_cycles(remaining, feature_map)
|
|
87
|
+
ordered.extend(remaining) # Add cyclic features at end
|
|
88
|
+
|
|
89
|
+
return {
|
|
90
|
+
"ordered_features": ordered,
|
|
91
|
+
"circular_dependencies": cycles,
|
|
92
|
+
"blocked_features": blocked,
|
|
93
|
+
"missing_dependencies": missing,
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def are_dependencies_satisfied(
|
|
98
|
+
feature: dict,
|
|
99
|
+
all_features: list[dict],
|
|
100
|
+
passing_ids: set[int] | None = None,
|
|
101
|
+
) -> bool:
|
|
102
|
+
"""Check if all dependencies have passes=True.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
feature: Feature dict to check
|
|
106
|
+
all_features: List of all feature dicts
|
|
107
|
+
passing_ids: Optional pre-computed set of passing feature IDs.
|
|
108
|
+
If None, will be computed from all_features. Pass this when
|
|
109
|
+
calling in a loop to avoid O(n^2) complexity.
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
True if all dependencies are satisfied (or no dependencies)
|
|
113
|
+
"""
|
|
114
|
+
deps = feature.get("dependencies") or []
|
|
115
|
+
if not deps:
|
|
116
|
+
return True
|
|
117
|
+
if passing_ids is None:
|
|
118
|
+
passing_ids = {f["id"] for f in all_features if f.get("passes")}
|
|
119
|
+
return all(dep_id in passing_ids for dep_id in deps)
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def get_blocking_dependencies(
|
|
123
|
+
feature: dict,
|
|
124
|
+
all_features: list[dict],
|
|
125
|
+
passing_ids: set[int] | None = None,
|
|
126
|
+
) -> list[int]:
|
|
127
|
+
"""Get list of incomplete dependency IDs.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
feature: Feature dict to check
|
|
131
|
+
all_features: List of all feature dicts
|
|
132
|
+
passing_ids: Optional pre-computed set of passing feature IDs.
|
|
133
|
+
If None, will be computed from all_features. Pass this when
|
|
134
|
+
calling in a loop to avoid O(n^2) complexity.
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
List of feature IDs that are blocking this feature
|
|
138
|
+
"""
|
|
139
|
+
deps = feature.get("dependencies") or []
|
|
140
|
+
if passing_ids is None:
|
|
141
|
+
passing_ids = {f["id"] for f in all_features if f.get("passes")}
|
|
142
|
+
return [dep_id for dep_id in deps if dep_id not in passing_ids]
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def would_create_circular_dependency(
|
|
146
|
+
features: list[dict], source_id: int, target_id: int
|
|
147
|
+
) -> bool:
|
|
148
|
+
"""Check if adding a dependency from target to source would create a cycle.
|
|
149
|
+
|
|
150
|
+
Uses DFS with visited set for efficient cycle detection.
|
|
151
|
+
|
|
152
|
+
Args:
|
|
153
|
+
features: List of all feature dicts
|
|
154
|
+
source_id: The feature that would gain the dependency
|
|
155
|
+
target_id: The feature that would become a dependency
|
|
156
|
+
|
|
157
|
+
Returns:
|
|
158
|
+
True if adding the dependency would create a cycle
|
|
159
|
+
"""
|
|
160
|
+
if source_id == target_id:
|
|
161
|
+
return True # Self-reference is a cycle
|
|
162
|
+
|
|
163
|
+
feature_map = {f["id"]: f for f in features}
|
|
164
|
+
source = feature_map.get(source_id)
|
|
165
|
+
if not source:
|
|
166
|
+
return False
|
|
167
|
+
|
|
168
|
+
# Check if target already depends on source (direct or indirect)
|
|
169
|
+
target = feature_map.get(target_id)
|
|
170
|
+
if not target:
|
|
171
|
+
return False
|
|
172
|
+
|
|
173
|
+
# DFS from target to see if we can reach source
|
|
174
|
+
visited: set[int] = set()
|
|
175
|
+
|
|
176
|
+
def can_reach(current_id: int, depth: int = 0) -> bool:
|
|
177
|
+
# Security: Prevent stack overflow with depth limit
|
|
178
|
+
if depth > MAX_DEPENDENCY_DEPTH:
|
|
179
|
+
return True # Assume cycle if too deep (fail-safe)
|
|
180
|
+
if current_id == source_id:
|
|
181
|
+
return True
|
|
182
|
+
if current_id in visited:
|
|
183
|
+
return False
|
|
184
|
+
visited.add(current_id)
|
|
185
|
+
|
|
186
|
+
current = feature_map.get(current_id)
|
|
187
|
+
if not current:
|
|
188
|
+
return False
|
|
189
|
+
|
|
190
|
+
deps = current.get("dependencies") or []
|
|
191
|
+
for dep_id in deps:
|
|
192
|
+
if can_reach(dep_id, depth + 1):
|
|
193
|
+
return True
|
|
194
|
+
return False
|
|
195
|
+
|
|
196
|
+
return can_reach(target_id)
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def validate_dependencies(
|
|
200
|
+
feature_id: int, dependency_ids: list[int], all_feature_ids: set[int]
|
|
201
|
+
) -> tuple[bool, str]:
|
|
202
|
+
"""Validate dependency list.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
feature_id: ID of the feature being validated
|
|
206
|
+
dependency_ids: List of proposed dependency IDs
|
|
207
|
+
all_feature_ids: Set of all valid feature IDs
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
Tuple of (is_valid, error_message)
|
|
211
|
+
"""
|
|
212
|
+
# Security: Check limits
|
|
213
|
+
if len(dependency_ids) > MAX_DEPENDENCIES_PER_FEATURE:
|
|
214
|
+
return False, f"Maximum {MAX_DEPENDENCIES_PER_FEATURE} dependencies allowed"
|
|
215
|
+
|
|
216
|
+
# Check self-reference
|
|
217
|
+
if feature_id in dependency_ids:
|
|
218
|
+
return False, "A feature cannot depend on itself"
|
|
219
|
+
|
|
220
|
+
# Check all dependencies exist
|
|
221
|
+
missing = [d for d in dependency_ids if d not in all_feature_ids]
|
|
222
|
+
if missing:
|
|
223
|
+
return False, f"Dependencies not found: {missing}"
|
|
224
|
+
|
|
225
|
+
# Check for duplicates
|
|
226
|
+
if len(dependency_ids) != len(set(dependency_ids)):
|
|
227
|
+
return False, "Duplicate dependencies not allowed"
|
|
228
|
+
|
|
229
|
+
return True, ""
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def _detect_cycles(features: list[dict], feature_map: dict) -> list[list[int]]:
|
|
233
|
+
"""Detect cycles using DFS with recursion tracking.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
features: List of features to check for cycles
|
|
237
|
+
feature_map: Map of feature_id -> feature dict
|
|
238
|
+
|
|
239
|
+
Returns:
|
|
240
|
+
List of cycles, where each cycle is a list of feature IDs
|
|
241
|
+
"""
|
|
242
|
+
cycles: list[list[int]] = []
|
|
243
|
+
visited: set[int] = set()
|
|
244
|
+
rec_stack: set[int] = set()
|
|
245
|
+
path: list[int] = []
|
|
246
|
+
|
|
247
|
+
def dfs(fid: int) -> bool:
|
|
248
|
+
visited.add(fid)
|
|
249
|
+
rec_stack.add(fid)
|
|
250
|
+
path.append(fid)
|
|
251
|
+
|
|
252
|
+
feature = feature_map.get(fid)
|
|
253
|
+
if feature:
|
|
254
|
+
for dep_id in feature.get("dependencies") or []:
|
|
255
|
+
if dep_id not in visited:
|
|
256
|
+
if dfs(dep_id):
|
|
257
|
+
return True
|
|
258
|
+
elif dep_id in rec_stack:
|
|
259
|
+
cycle_start = path.index(dep_id)
|
|
260
|
+
cycles.append(path[cycle_start:])
|
|
261
|
+
return True
|
|
262
|
+
|
|
263
|
+
path.pop()
|
|
264
|
+
rec_stack.remove(fid)
|
|
265
|
+
return False
|
|
266
|
+
|
|
267
|
+
for f in features:
|
|
268
|
+
if f["id"] not in visited:
|
|
269
|
+
dfs(f["id"])
|
|
270
|
+
|
|
271
|
+
return cycles
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
def compute_scheduling_scores(features: list[dict]) -> dict[int, float]:
|
|
275
|
+
"""Compute scheduling scores for all features.
|
|
276
|
+
|
|
277
|
+
Higher scores mean higher priority for scheduling. The algorithm considers:
|
|
278
|
+
1. Unblocking potential - Features that unblock more downstream work score higher
|
|
279
|
+
2. Depth in graph - Features with no dependencies (roots) are "shovel-ready"
|
|
280
|
+
3. User priority - Existing priority field as tiebreaker
|
|
281
|
+
|
|
282
|
+
Score formula: (1000 * unblock) + (100 * depth_score) + (10 * priority_factor)
|
|
283
|
+
|
|
284
|
+
Args:
|
|
285
|
+
features: List of feature dicts with id, priority, dependencies fields
|
|
286
|
+
|
|
287
|
+
Returns:
|
|
288
|
+
Dict mapping feature_id -> score (higher = schedule first)
|
|
289
|
+
"""
|
|
290
|
+
if not features:
|
|
291
|
+
return {}
|
|
292
|
+
|
|
293
|
+
# Build adjacency lists
|
|
294
|
+
children: dict[int, list[int]] = {f["id"]: [] for f in features} # who depends on me
|
|
295
|
+
parents: dict[int, list[int]] = {f["id"]: [] for f in features} # who I depend on
|
|
296
|
+
|
|
297
|
+
for f in features:
|
|
298
|
+
for dep_id in (f.get("dependencies") or []):
|
|
299
|
+
if dep_id in children: # Only valid deps
|
|
300
|
+
children[dep_id].append(f["id"])
|
|
301
|
+
parents[f["id"]].append(dep_id)
|
|
302
|
+
|
|
303
|
+
# Calculate depths via BFS from roots
|
|
304
|
+
# Use visited set to prevent infinite loops from circular dependencies
|
|
305
|
+
# Use deque for O(1) popleft instead of list.pop(0) which is O(n)
|
|
306
|
+
depths: dict[int, int] = {}
|
|
307
|
+
visited: set[int] = set()
|
|
308
|
+
roots = [f["id"] for f in features if not parents[f["id"]]]
|
|
309
|
+
bfs_queue: deque[tuple[int, int]] = deque((root, 0) for root in roots)
|
|
310
|
+
while bfs_queue:
|
|
311
|
+
node_id, depth = bfs_queue.popleft()
|
|
312
|
+
if node_id in visited:
|
|
313
|
+
continue # Skip already visited nodes (handles cycles)
|
|
314
|
+
visited.add(node_id)
|
|
315
|
+
depths[node_id] = depth
|
|
316
|
+
for child_id in children[node_id]:
|
|
317
|
+
if child_id not in visited:
|
|
318
|
+
bfs_queue.append((child_id, depth + 1))
|
|
319
|
+
|
|
320
|
+
# Handle orphaned nodes (shouldn't happen but be safe)
|
|
321
|
+
for f in features:
|
|
322
|
+
if f["id"] not in depths:
|
|
323
|
+
depths[f["id"]] = 0
|
|
324
|
+
|
|
325
|
+
# Calculate transitive downstream counts (reverse topo order)
|
|
326
|
+
downstream: dict[int, int] = {f["id"]: 0 for f in features}
|
|
327
|
+
# Process in reverse depth order (leaves first)
|
|
328
|
+
for fid in sorted(depths.keys(), key=lambda x: -depths[x]):
|
|
329
|
+
for parent_id in parents[fid]:
|
|
330
|
+
downstream[parent_id] += 1 + downstream[fid]
|
|
331
|
+
|
|
332
|
+
# Normalize and compute scores
|
|
333
|
+
max_depth = max(depths.values()) if depths else 0
|
|
334
|
+
max_downstream = max(downstream.values()) if downstream else 0
|
|
335
|
+
|
|
336
|
+
scores: dict[int, float] = {}
|
|
337
|
+
for f in features:
|
|
338
|
+
fid = f["id"]
|
|
339
|
+
|
|
340
|
+
# Unblocking score: 0-1, higher = unblocks more
|
|
341
|
+
unblock = downstream[fid] / max_downstream if max_downstream > 0 else 0
|
|
342
|
+
|
|
343
|
+
# Depth score: 0-1, higher = closer to root (no deps)
|
|
344
|
+
depth_score = 1 - (depths[fid] / max_depth) if max_depth > 0 else 1
|
|
345
|
+
|
|
346
|
+
# Priority factor: 0-1, lower priority number = higher factor
|
|
347
|
+
priority = f.get("priority", 999)
|
|
348
|
+
priority_factor = (10 - min(priority, 10)) / 10
|
|
349
|
+
|
|
350
|
+
scores[fid] = (1000 * unblock) + (100 * depth_score) + (10 * priority_factor)
|
|
351
|
+
|
|
352
|
+
return scores
|
|
353
|
+
|
|
354
|
+
|
|
355
|
+
def get_ready_features(features: list[dict], limit: int = 10) -> list[dict]:
|
|
356
|
+
"""Get features that are ready to be worked on.
|
|
357
|
+
|
|
358
|
+
A feature is ready if:
|
|
359
|
+
- It is not passing
|
|
360
|
+
- It is not in progress
|
|
361
|
+
- All its dependencies are satisfied
|
|
362
|
+
|
|
363
|
+
Args:
|
|
364
|
+
features: List of all feature dicts
|
|
365
|
+
limit: Maximum number of features to return
|
|
366
|
+
|
|
367
|
+
Returns:
|
|
368
|
+
List of ready features, sorted by priority
|
|
369
|
+
"""
|
|
370
|
+
passing_ids = {f["id"] for f in features if f.get("passes")}
|
|
371
|
+
|
|
372
|
+
ready = []
|
|
373
|
+
for f in features:
|
|
374
|
+
if f.get("passes") or f.get("in_progress"):
|
|
375
|
+
continue
|
|
376
|
+
deps = f.get("dependencies") or []
|
|
377
|
+
if all(dep_id in passing_ids for dep_id in deps):
|
|
378
|
+
ready.append(f)
|
|
379
|
+
|
|
380
|
+
# Sort by scheduling score (higher = first), then priority, then id
|
|
381
|
+
scores = compute_scheduling_scores(features)
|
|
382
|
+
ready.sort(key=lambda f: (-scores.get(f["id"], 0), f.get("priority", 999), f["id"]))
|
|
383
|
+
|
|
384
|
+
return ready[:limit]
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
def get_blocked_features(features: list[dict]) -> list[dict]:
|
|
388
|
+
"""Get features that are blocked by unmet dependencies.
|
|
389
|
+
|
|
390
|
+
Args:
|
|
391
|
+
features: List of all feature dicts
|
|
392
|
+
|
|
393
|
+
Returns:
|
|
394
|
+
List of blocked features with 'blocked_by' field added
|
|
395
|
+
"""
|
|
396
|
+
passing_ids = {f["id"] for f in features if f.get("passes")}
|
|
397
|
+
|
|
398
|
+
blocked = []
|
|
399
|
+
for f in features:
|
|
400
|
+
if f.get("passes"):
|
|
401
|
+
continue
|
|
402
|
+
deps = f.get("dependencies") or []
|
|
403
|
+
blocking = [d for d in deps if d not in passing_ids]
|
|
404
|
+
if blocking:
|
|
405
|
+
blocked.append({**f, "blocked_by": blocking})
|
|
406
|
+
|
|
407
|
+
return blocked
|
|
408
|
+
|
|
409
|
+
|
|
410
|
+
def build_graph_data(features: list[dict]) -> dict:
|
|
411
|
+
"""Build graph data structure for visualization.
|
|
412
|
+
|
|
413
|
+
Args:
|
|
414
|
+
features: List of all feature dicts
|
|
415
|
+
|
|
416
|
+
Returns:
|
|
417
|
+
Dict with 'nodes' and 'edges' for graph visualization
|
|
418
|
+
"""
|
|
419
|
+
passing_ids = {f["id"] for f in features if f.get("passes")}
|
|
420
|
+
|
|
421
|
+
nodes = []
|
|
422
|
+
edges = []
|
|
423
|
+
|
|
424
|
+
for f in features:
|
|
425
|
+
deps = f.get("dependencies") or []
|
|
426
|
+
blocking = [d for d in deps if d not in passing_ids]
|
|
427
|
+
|
|
428
|
+
if f.get("passes"):
|
|
429
|
+
status = "done"
|
|
430
|
+
elif blocking:
|
|
431
|
+
status = "blocked"
|
|
432
|
+
elif f.get("in_progress"):
|
|
433
|
+
status = "in_progress"
|
|
434
|
+
else:
|
|
435
|
+
status = "pending"
|
|
436
|
+
|
|
437
|
+
nodes.append({
|
|
438
|
+
"id": f["id"],
|
|
439
|
+
"name": f["name"],
|
|
440
|
+
"category": f["category"],
|
|
441
|
+
"status": status,
|
|
442
|
+
"priority": f.get("priority", 999),
|
|
443
|
+
"dependencies": deps,
|
|
444
|
+
})
|
|
445
|
+
|
|
446
|
+
for dep_id in deps:
|
|
447
|
+
edges.append({"source": dep_id, "target": f["id"]})
|
|
448
|
+
|
|
449
|
+
return {"nodes": nodes, "edges": edges}
|
package/api/migration.py
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
"""
|
|
2
|
+
JSON to SQLite Migration
|
|
3
|
+
========================
|
|
4
|
+
|
|
5
|
+
Automatically migrates existing feature_list.json files to SQLite database.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import shutil
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Optional
|
|
13
|
+
|
|
14
|
+
from sqlalchemy.orm import Session, sessionmaker
|
|
15
|
+
|
|
16
|
+
from api.database import Feature
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def migrate_json_to_sqlite(
|
|
20
|
+
project_dir: Path,
|
|
21
|
+
session_maker: sessionmaker,
|
|
22
|
+
) -> bool:
|
|
23
|
+
"""
|
|
24
|
+
Detect existing feature_list.json, import to SQLite, rename to backup.
|
|
25
|
+
|
|
26
|
+
This function:
|
|
27
|
+
1. Checks if feature_list.json exists
|
|
28
|
+
2. Checks if database already has data (skips if so)
|
|
29
|
+
3. Imports all features from JSON
|
|
30
|
+
4. Renames JSON file to feature_list.json.backup.<timestamp>
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
project_dir: Directory containing the project
|
|
34
|
+
session_maker: SQLAlchemy session maker
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
True if migration was performed, False if skipped
|
|
38
|
+
"""
|
|
39
|
+
json_file = project_dir / "feature_list.json"
|
|
40
|
+
|
|
41
|
+
if not json_file.exists():
|
|
42
|
+
return False # No JSON file to migrate
|
|
43
|
+
|
|
44
|
+
# Check if database already has data
|
|
45
|
+
session: Session = session_maker()
|
|
46
|
+
try:
|
|
47
|
+
existing_count = session.query(Feature).count()
|
|
48
|
+
if existing_count > 0:
|
|
49
|
+
print(
|
|
50
|
+
f"Database already has {existing_count} features, skipping migration"
|
|
51
|
+
)
|
|
52
|
+
return False
|
|
53
|
+
finally:
|
|
54
|
+
session.close()
|
|
55
|
+
|
|
56
|
+
# Load JSON data
|
|
57
|
+
try:
|
|
58
|
+
with open(json_file, "r", encoding="utf-8") as f:
|
|
59
|
+
features_data = json.load(f)
|
|
60
|
+
except json.JSONDecodeError as e:
|
|
61
|
+
print(f"Error parsing feature_list.json: {e}")
|
|
62
|
+
return False
|
|
63
|
+
except IOError as e:
|
|
64
|
+
print(f"Error reading feature_list.json: {e}")
|
|
65
|
+
return False
|
|
66
|
+
|
|
67
|
+
if not isinstance(features_data, list):
|
|
68
|
+
print("Error: feature_list.json must contain a JSON array")
|
|
69
|
+
return False
|
|
70
|
+
|
|
71
|
+
# Import features into database
|
|
72
|
+
session = session_maker()
|
|
73
|
+
try:
|
|
74
|
+
imported_count = 0
|
|
75
|
+
for i, feature_dict in enumerate(features_data):
|
|
76
|
+
# Handle both old format (no id/priority/name) and new format
|
|
77
|
+
feature = Feature(
|
|
78
|
+
id=feature_dict.get("id", i + 1),
|
|
79
|
+
priority=feature_dict.get("priority", i + 1),
|
|
80
|
+
category=feature_dict.get("category", "uncategorized"),
|
|
81
|
+
name=feature_dict.get("name", f"Feature {i + 1}"),
|
|
82
|
+
description=feature_dict.get("description", ""),
|
|
83
|
+
steps=feature_dict.get("steps", []),
|
|
84
|
+
passes=feature_dict.get("passes", False),
|
|
85
|
+
in_progress=feature_dict.get("in_progress", False),
|
|
86
|
+
dependencies=feature_dict.get("dependencies"),
|
|
87
|
+
)
|
|
88
|
+
session.add(feature)
|
|
89
|
+
imported_count += 1
|
|
90
|
+
|
|
91
|
+
session.commit()
|
|
92
|
+
|
|
93
|
+
# Verify import
|
|
94
|
+
final_count = session.query(Feature).count()
|
|
95
|
+
print(f"Migrated {final_count} features from JSON to SQLite")
|
|
96
|
+
|
|
97
|
+
except Exception as e:
|
|
98
|
+
session.rollback()
|
|
99
|
+
print(f"Error during migration: {e}")
|
|
100
|
+
return False
|
|
101
|
+
finally:
|
|
102
|
+
session.close()
|
|
103
|
+
|
|
104
|
+
# Rename JSON file to backup
|
|
105
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
106
|
+
backup_file = project_dir / f"feature_list.json.backup.{timestamp}"
|
|
107
|
+
|
|
108
|
+
try:
|
|
109
|
+
shutil.move(json_file, backup_file)
|
|
110
|
+
print(f"Original JSON backed up to: {backup_file.name}")
|
|
111
|
+
except IOError as e:
|
|
112
|
+
print(f"Warning: Could not backup JSON file: {e}")
|
|
113
|
+
# Continue anyway - the data is in the database
|
|
114
|
+
|
|
115
|
+
return True
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def export_to_json(
|
|
119
|
+
project_dir: Path,
|
|
120
|
+
session_maker: sessionmaker,
|
|
121
|
+
output_file: Optional[Path] = None,
|
|
122
|
+
) -> Path:
|
|
123
|
+
"""
|
|
124
|
+
Export features from database back to JSON format.
|
|
125
|
+
|
|
126
|
+
Useful for debugging or if you need to revert to the old format.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
project_dir: Directory containing the project
|
|
130
|
+
session_maker: SQLAlchemy session maker
|
|
131
|
+
output_file: Output file path (default: feature_list_export.json)
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
Path to the exported file
|
|
135
|
+
"""
|
|
136
|
+
if output_file is None:
|
|
137
|
+
output_file = project_dir / "feature_list_export.json"
|
|
138
|
+
|
|
139
|
+
session: Session = session_maker()
|
|
140
|
+
try:
|
|
141
|
+
features = (
|
|
142
|
+
session.query(Feature)
|
|
143
|
+
.order_by(Feature.priority.asc(), Feature.id.asc())
|
|
144
|
+
.all()
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
features_data = [f.to_dict() for f in features]
|
|
148
|
+
|
|
149
|
+
with open(output_file, "w", encoding="utf-8") as f:
|
|
150
|
+
json.dump(features_data, f, indent=2)
|
|
151
|
+
|
|
152
|
+
print(f"Exported {len(features_data)} features to {output_file}")
|
|
153
|
+
return output_file
|
|
154
|
+
|
|
155
|
+
finally:
|
|
156
|
+
session.close()
|
package/auth.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Authentication Error Detection
|
|
3
|
+
==============================
|
|
4
|
+
|
|
5
|
+
Shared utilities for detecting Claude CLI authentication errors.
|
|
6
|
+
Used by both CLI (start.py) and server (process_manager.py) to provide
|
|
7
|
+
consistent error detection and messaging.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import re
|
|
11
|
+
|
|
12
|
+
# Patterns that indicate authentication errors from Claude CLI
|
|
13
|
+
AUTH_ERROR_PATTERNS = [
|
|
14
|
+
r"not\s+logged\s+in",
|
|
15
|
+
r"not\s+authenticated",
|
|
16
|
+
r"authentication\s+(failed|required|error)",
|
|
17
|
+
r"login\s+required",
|
|
18
|
+
r"please\s+(run\s+)?['\"]?claude\s+login",
|
|
19
|
+
r"unauthorized",
|
|
20
|
+
r"invalid\s+(token|credential|api.?key)",
|
|
21
|
+
r"expired\s+(token|session|credential)",
|
|
22
|
+
r"could\s+not\s+authenticate",
|
|
23
|
+
r"sign\s+in\s+(to|required)",
|
|
24
|
+
]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def is_auth_error(text: str) -> bool:
|
|
28
|
+
"""
|
|
29
|
+
Check if text contains Claude CLI authentication error messages.
|
|
30
|
+
|
|
31
|
+
Uses case-insensitive pattern matching against known error messages.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
text: Output text to check
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
True if any auth error pattern matches, False otherwise
|
|
38
|
+
"""
|
|
39
|
+
if not text:
|
|
40
|
+
return False
|
|
41
|
+
text_lower = text.lower()
|
|
42
|
+
for pattern in AUTH_ERROR_PATTERNS:
|
|
43
|
+
if re.search(pattern, text_lower):
|
|
44
|
+
return True
|
|
45
|
+
return False
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
# CLI-style help message (for terminal output)
|
|
49
|
+
AUTH_ERROR_HELP_CLI = """
|
|
50
|
+
==================================================
|
|
51
|
+
Authentication Error Detected
|
|
52
|
+
==================================================
|
|
53
|
+
|
|
54
|
+
Claude CLI requires authentication to work.
|
|
55
|
+
|
|
56
|
+
To fix this, run:
|
|
57
|
+
claude login
|
|
58
|
+
|
|
59
|
+
This will open a browser window to sign in.
|
|
60
|
+
After logging in, try running this command again.
|
|
61
|
+
==================================================
|
|
62
|
+
"""
|
|
63
|
+
|
|
64
|
+
# Server-style help message (for WebSocket streaming)
|
|
65
|
+
AUTH_ERROR_HELP_SERVER = """
|
|
66
|
+
================================================================================
|
|
67
|
+
AUTHENTICATION ERROR DETECTED
|
|
68
|
+
================================================================================
|
|
69
|
+
|
|
70
|
+
Claude CLI requires authentication to work.
|
|
71
|
+
|
|
72
|
+
To fix this, run:
|
|
73
|
+
claude login
|
|
74
|
+
|
|
75
|
+
This will open a browser window to sign in.
|
|
76
|
+
After logging in, try starting the agent again.
|
|
77
|
+
================================================================================
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def print_auth_error_help() -> None:
|
|
82
|
+
"""Print helpful message when authentication error is detected (CLI version)."""
|
|
83
|
+
print(AUTH_ERROR_HELP_CLI)
|