autoforge-ai 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. package/.claude/commands/check-code.md +32 -0
  2. package/.claude/commands/checkpoint.md +40 -0
  3. package/.claude/commands/create-spec.md +613 -0
  4. package/.claude/commands/expand-project.md +234 -0
  5. package/.claude/commands/gsd-to-autoforge-spec.md +10 -0
  6. package/.claude/commands/review-pr.md +75 -0
  7. package/.claude/templates/app_spec.template.txt +331 -0
  8. package/.claude/templates/coding_prompt.template.md +265 -0
  9. package/.claude/templates/initializer_prompt.template.md +354 -0
  10. package/.claude/templates/testing_prompt.template.md +146 -0
  11. package/.env.example +64 -0
  12. package/LICENSE.md +676 -0
  13. package/README.md +423 -0
  14. package/agent.py +444 -0
  15. package/api/__init__.py +10 -0
  16. package/api/database.py +536 -0
  17. package/api/dependency_resolver.py +449 -0
  18. package/api/migration.py +156 -0
  19. package/auth.py +83 -0
  20. package/autoforge_paths.py +315 -0
  21. package/autonomous_agent_demo.py +293 -0
  22. package/bin/autoforge.js +3 -0
  23. package/client.py +607 -0
  24. package/env_constants.py +27 -0
  25. package/examples/OPTIMIZE_CONFIG.md +230 -0
  26. package/examples/README.md +531 -0
  27. package/examples/org_config.yaml +172 -0
  28. package/examples/project_allowed_commands.yaml +139 -0
  29. package/lib/cli.js +791 -0
  30. package/mcp_server/__init__.py +1 -0
  31. package/mcp_server/feature_mcp.py +988 -0
  32. package/package.json +53 -0
  33. package/parallel_orchestrator.py +1800 -0
  34. package/progress.py +247 -0
  35. package/prompts.py +427 -0
  36. package/pyproject.toml +17 -0
  37. package/rate_limit_utils.py +132 -0
  38. package/registry.py +614 -0
  39. package/requirements-prod.txt +14 -0
  40. package/security.py +959 -0
  41. package/server/__init__.py +17 -0
  42. package/server/main.py +261 -0
  43. package/server/routers/__init__.py +32 -0
  44. package/server/routers/agent.py +177 -0
  45. package/server/routers/assistant_chat.py +327 -0
  46. package/server/routers/devserver.py +309 -0
  47. package/server/routers/expand_project.py +239 -0
  48. package/server/routers/features.py +746 -0
  49. package/server/routers/filesystem.py +514 -0
  50. package/server/routers/projects.py +524 -0
  51. package/server/routers/schedules.py +356 -0
  52. package/server/routers/settings.py +127 -0
  53. package/server/routers/spec_creation.py +357 -0
  54. package/server/routers/terminal.py +453 -0
  55. package/server/schemas.py +593 -0
  56. package/server/services/__init__.py +36 -0
  57. package/server/services/assistant_chat_session.py +496 -0
  58. package/server/services/assistant_database.py +304 -0
  59. package/server/services/chat_constants.py +57 -0
  60. package/server/services/dev_server_manager.py +557 -0
  61. package/server/services/expand_chat_session.py +399 -0
  62. package/server/services/process_manager.py +657 -0
  63. package/server/services/project_config.py +475 -0
  64. package/server/services/scheduler_service.py +683 -0
  65. package/server/services/spec_chat_session.py +502 -0
  66. package/server/services/terminal_manager.py +756 -0
  67. package/server/utils/__init__.py +1 -0
  68. package/server/utils/process_utils.py +134 -0
  69. package/server/utils/project_helpers.py +32 -0
  70. package/server/utils/validation.py +54 -0
  71. package/server/websocket.py +903 -0
  72. package/start.py +456 -0
  73. package/ui/dist/assets/index-8W_wmZzz.js +168 -0
  74. package/ui/dist/assets/index-B47Ubhox.css +1 -0
  75. package/ui/dist/assets/vendor-flow-CVNK-_lx.js +7 -0
  76. package/ui/dist/assets/vendor-query-BUABzP5o.js +1 -0
  77. package/ui/dist/assets/vendor-radix-DTNNCg2d.js +45 -0
  78. package/ui/dist/assets/vendor-react-qkC6yhPU.js +1 -0
  79. package/ui/dist/assets/vendor-utils-COeKbHgx.js +2 -0
  80. package/ui/dist/assets/vendor-xterm-DP_gxef0.js +16 -0
  81. package/ui/dist/index.html +23 -0
  82. package/ui/dist/ollama.png +0 -0
  83. package/ui/dist/vite.svg +6 -0
  84. package/ui/package.json +57 -0
@@ -0,0 +1,988 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ MCP Server for Feature Management
4
+ ==================================
5
+
6
+ Provides tools to manage features in the autonomous coding system.
7
+
8
+ Tools:
9
+ - feature_get_stats: Get progress statistics
10
+ - feature_get_by_id: Get a specific feature by ID
11
+ - feature_get_summary: Get minimal feature info (id, name, status, deps)
12
+ - feature_mark_passing: Mark a feature as passing
13
+ - feature_mark_failing: Mark a feature as failing (regression detected)
14
+ - feature_skip: Skip a feature (move to end of queue)
15
+ - feature_mark_in_progress: Mark a feature as in-progress
16
+ - feature_claim_and_get: Atomically claim and get feature details
17
+ - feature_clear_in_progress: Clear in-progress status
18
+ - feature_create_bulk: Create multiple features at once
19
+ - feature_create: Create a single feature
20
+ - feature_add_dependency: Add a dependency between features
21
+ - feature_remove_dependency: Remove a dependency
22
+ - feature_get_ready: Get features ready to implement
23
+ - feature_get_blocked: Get features blocked by dependencies (with limit)
24
+ - feature_get_graph: Get the dependency graph
25
+
26
+ Note: Feature selection (which feature to work on) is handled by the
27
+ orchestrator, not by agents. Agents receive pre-assigned feature IDs.
28
+ """
29
+
30
+ import json
31
+ import os
32
+ import sys
33
+ from contextlib import asynccontextmanager
34
+ from pathlib import Path
35
+ from typing import Annotated
36
+
37
+ from mcp.server.fastmcp import FastMCP
38
+ from pydantic import BaseModel, Field
39
+ from sqlalchemy import text
40
+
41
+ # Add parent directory to path so we can import from api module
42
+ sys.path.insert(0, str(Path(__file__).parent.parent))
43
+
44
+ from api.database import Feature, atomic_transaction, create_database
45
+ from api.dependency_resolver import (
46
+ MAX_DEPENDENCIES_PER_FEATURE,
47
+ compute_scheduling_scores,
48
+ would_create_circular_dependency,
49
+ )
50
+ from api.migration import migrate_json_to_sqlite
51
+
52
+ # Configuration from environment
53
+ PROJECT_DIR = Path(os.environ.get("PROJECT_DIR", ".")).resolve()
54
+
55
+
56
+ # Pydantic models for input validation
57
+ class MarkPassingInput(BaseModel):
58
+ """Input for marking a feature as passing."""
59
+ feature_id: int = Field(..., description="The ID of the feature to mark as passing", ge=1)
60
+
61
+
62
+ class SkipFeatureInput(BaseModel):
63
+ """Input for skipping a feature."""
64
+ feature_id: int = Field(..., description="The ID of the feature to skip", ge=1)
65
+
66
+
67
+ class MarkInProgressInput(BaseModel):
68
+ """Input for marking a feature as in-progress."""
69
+ feature_id: int = Field(..., description="The ID of the feature to mark as in-progress", ge=1)
70
+
71
+
72
+ class ClearInProgressInput(BaseModel):
73
+ """Input for clearing in-progress status."""
74
+ feature_id: int = Field(..., description="The ID of the feature to clear in-progress status", ge=1)
75
+
76
+
77
+ class RegressionInput(BaseModel):
78
+ """Input for getting regression features."""
79
+ limit: int = Field(default=3, ge=1, le=10, description="Maximum number of passing features to return")
80
+
81
+
82
+ class FeatureCreateItem(BaseModel):
83
+ """Schema for creating a single feature."""
84
+ category: str = Field(..., min_length=1, max_length=100, description="Feature category")
85
+ name: str = Field(..., min_length=1, max_length=255, description="Feature name")
86
+ description: str = Field(..., min_length=1, description="Detailed description")
87
+ steps: list[str] = Field(..., min_length=1, description="Implementation/test steps")
88
+
89
+
90
+ class BulkCreateInput(BaseModel):
91
+ """Input for bulk creating features."""
92
+ features: list[FeatureCreateItem] = Field(..., min_length=1, description="List of features to create")
93
+
94
+
95
+ # Global database session maker (initialized on startup)
96
+ _session_maker = None
97
+ _engine = None
98
+
99
+ # NOTE: The old threading.Lock() was removed because it only worked per-process,
100
+ # not cross-process. In parallel mode, multiple MCP servers run in separate
101
+ # processes, so the lock was useless. We now use atomic SQL operations instead.
102
+
103
+
104
+ @asynccontextmanager
105
+ async def server_lifespan(server: FastMCP):
106
+ """Initialize database on startup, cleanup on shutdown."""
107
+ global _session_maker, _engine
108
+
109
+ # Create project directory if it doesn't exist
110
+ PROJECT_DIR.mkdir(parents=True, exist_ok=True)
111
+
112
+ # Initialize database
113
+ _engine, _session_maker = create_database(PROJECT_DIR)
114
+
115
+ # Run migration if needed (converts legacy JSON to SQLite)
116
+ migrate_json_to_sqlite(PROJECT_DIR, _session_maker)
117
+
118
+ yield
119
+
120
+ # Cleanup
121
+ if _engine:
122
+ _engine.dispose()
123
+
124
+
125
+ # Initialize the MCP server
126
+ mcp = FastMCP("features", lifespan=server_lifespan)
127
+
128
+
129
+ def get_session():
130
+ """Get a new database session."""
131
+ if _session_maker is None:
132
+ raise RuntimeError("Database not initialized")
133
+ return _session_maker()
134
+
135
+
136
+ @mcp.tool()
137
+ def feature_get_stats() -> str:
138
+ """Get statistics about feature completion progress.
139
+
140
+ Returns the number of passing features, in-progress features, total features,
141
+ and completion percentage. Use this to track overall progress of the implementation.
142
+
143
+ Returns:
144
+ JSON with: passing (int), in_progress (int), total (int), percentage (float)
145
+ """
146
+ from sqlalchemy import case, func
147
+
148
+ session = get_session()
149
+ try:
150
+ # Single aggregate query instead of 3 separate COUNT queries
151
+ result = session.query(
152
+ func.count(Feature.id).label('total'),
153
+ func.sum(case((Feature.passes == True, 1), else_=0)).label('passing'),
154
+ func.sum(case((Feature.in_progress == True, 1), else_=0)).label('in_progress')
155
+ ).first()
156
+
157
+ total = result.total or 0
158
+ passing = int(result.passing or 0)
159
+ in_progress = int(result.in_progress or 0)
160
+ percentage = round((passing / total) * 100, 1) if total > 0 else 0.0
161
+
162
+ return json.dumps({
163
+ "passing": passing,
164
+ "in_progress": in_progress,
165
+ "total": total,
166
+ "percentage": percentage
167
+ })
168
+ finally:
169
+ session.close()
170
+
171
+
172
+ @mcp.tool()
173
+ def feature_get_by_id(
174
+ feature_id: Annotated[int, Field(description="The ID of the feature to retrieve", ge=1)]
175
+ ) -> str:
176
+ """Get a specific feature by its ID.
177
+
178
+ Returns the full details of a feature including its name, description,
179
+ verification steps, and current status.
180
+
181
+ Args:
182
+ feature_id: The ID of the feature to retrieve
183
+
184
+ Returns:
185
+ JSON with feature details, or error if not found.
186
+ """
187
+ session = get_session()
188
+ try:
189
+ feature = session.query(Feature).filter(Feature.id == feature_id).first()
190
+
191
+ if feature is None:
192
+ return json.dumps({"error": f"Feature with ID {feature_id} not found"})
193
+
194
+ return json.dumps(feature.to_dict())
195
+ finally:
196
+ session.close()
197
+
198
+
199
+ @mcp.tool()
200
+ def feature_get_summary(
201
+ feature_id: Annotated[int, Field(description="The ID of the feature", ge=1)]
202
+ ) -> str:
203
+ """Get minimal feature info: id, name, status, and dependencies only.
204
+
205
+ Use this instead of feature_get_by_id when you only need status info,
206
+ not the full description and steps. This reduces response size significantly.
207
+
208
+ Args:
209
+ feature_id: The ID of the feature to retrieve
210
+
211
+ Returns:
212
+ JSON with: id, name, passes, in_progress, dependencies
213
+ """
214
+ session = get_session()
215
+ try:
216
+ feature = session.query(Feature).filter(Feature.id == feature_id).first()
217
+ if feature is None:
218
+ return json.dumps({"error": f"Feature with ID {feature_id} not found"})
219
+ return json.dumps({
220
+ "id": feature.id,
221
+ "name": feature.name,
222
+ "passes": feature.passes,
223
+ "in_progress": feature.in_progress,
224
+ "dependencies": feature.dependencies or []
225
+ })
226
+ finally:
227
+ session.close()
228
+
229
+
230
+ @mcp.tool()
231
+ def feature_mark_passing(
232
+ feature_id: Annotated[int, Field(description="The ID of the feature to mark as passing", ge=1)]
233
+ ) -> str:
234
+ """Mark a feature as passing after successful implementation.
235
+
236
+ Updates the feature's passes field to true and clears the in_progress flag.
237
+ Use this after you have implemented the feature and verified it works correctly.
238
+
239
+ Args:
240
+ feature_id: The ID of the feature to mark as passing
241
+
242
+ Returns:
243
+ JSON with success confirmation: {success, feature_id, name}
244
+ """
245
+ session = get_session()
246
+ try:
247
+ # Atomic update with state guard - prevents double-pass in parallel mode
248
+ result = session.execute(text("""
249
+ UPDATE features
250
+ SET passes = 1, in_progress = 0
251
+ WHERE id = :id AND passes = 0
252
+ """), {"id": feature_id})
253
+ session.commit()
254
+
255
+ if result.rowcount == 0:
256
+ # Check why the update didn't match
257
+ feature = session.query(Feature).filter(Feature.id == feature_id).first()
258
+ if feature is None:
259
+ return json.dumps({"error": f"Feature with ID {feature_id} not found"})
260
+ if feature.passes:
261
+ return json.dumps({"error": f"Feature with ID {feature_id} is already passing"})
262
+ return json.dumps({"error": "Failed to mark feature passing for unknown reason"})
263
+
264
+ # Get the feature name for the response
265
+ feature = session.query(Feature).filter(Feature.id == feature_id).first()
266
+ return json.dumps({"success": True, "feature_id": feature_id, "name": feature.name})
267
+ except Exception as e:
268
+ session.rollback()
269
+ return json.dumps({"error": f"Failed to mark feature passing: {str(e)}"})
270
+ finally:
271
+ session.close()
272
+
273
+
274
+ @mcp.tool()
275
+ def feature_mark_failing(
276
+ feature_id: Annotated[int, Field(description="The ID of the feature to mark as failing", ge=1)]
277
+ ) -> str:
278
+ """Mark a feature as failing after finding a regression.
279
+
280
+ Updates the feature's passes field to false and clears the in_progress flag.
281
+ Use this when a testing agent discovers that a previously-passing feature
282
+ no longer works correctly (regression detected).
283
+
284
+ After marking as failing, you should:
285
+ 1. Investigate the root cause
286
+ 2. Fix the regression
287
+ 3. Verify the fix
288
+ 4. Call feature_mark_passing once fixed
289
+
290
+ Args:
291
+ feature_id: The ID of the feature to mark as failing
292
+
293
+ Returns:
294
+ JSON with the updated feature details, or error if not found.
295
+ """
296
+ session = get_session()
297
+ try:
298
+ # Check if feature exists first
299
+ feature = session.query(Feature).filter(Feature.id == feature_id).first()
300
+ if feature is None:
301
+ return json.dumps({"error": f"Feature with ID {feature_id} not found"})
302
+
303
+ # Atomic update for parallel safety
304
+ session.execute(text("""
305
+ UPDATE features
306
+ SET passes = 0, in_progress = 0
307
+ WHERE id = :id
308
+ """), {"id": feature_id})
309
+ session.commit()
310
+
311
+ # Refresh to get updated state
312
+ session.refresh(feature)
313
+
314
+ return json.dumps({
315
+ "message": f"Feature #{feature_id} marked as failing - regression detected",
316
+ "feature": feature.to_dict()
317
+ })
318
+ except Exception as e:
319
+ session.rollback()
320
+ return json.dumps({"error": f"Failed to mark feature failing: {str(e)}"})
321
+ finally:
322
+ session.close()
323
+
324
+
325
+ @mcp.tool()
326
+ def feature_skip(
327
+ feature_id: Annotated[int, Field(description="The ID of the feature to skip", ge=1)]
328
+ ) -> str:
329
+ """Skip a feature by moving it to the end of the priority queue.
330
+
331
+ Use this when a feature cannot be implemented yet due to:
332
+ - Dependencies on other features that aren't implemented yet
333
+ - External blockers (missing assets, unclear requirements)
334
+ - Technical prerequisites that need to be addressed first
335
+
336
+ The feature's priority is set to max_priority + 1, so it will be
337
+ worked on after all other pending features. Also clears the in_progress
338
+ flag so the feature returns to "pending" status.
339
+
340
+ Args:
341
+ feature_id: The ID of the feature to skip
342
+
343
+ Returns:
344
+ JSON with skip details: id, name, old_priority, new_priority, message
345
+ """
346
+ session = get_session()
347
+ try:
348
+ feature = session.query(Feature).filter(Feature.id == feature_id).first()
349
+
350
+ if feature is None:
351
+ return json.dumps({"error": f"Feature with ID {feature_id} not found"})
352
+
353
+ if feature.passes:
354
+ return json.dumps({"error": "Cannot skip a feature that is already passing"})
355
+
356
+ old_priority = feature.priority
357
+ name = feature.name
358
+
359
+ # Atomic update: set priority to max+1 in a single statement
360
+ # This prevents race conditions where two features get the same priority
361
+ session.execute(text("""
362
+ UPDATE features
363
+ SET priority = (SELECT COALESCE(MAX(priority), 0) + 1 FROM features),
364
+ in_progress = 0
365
+ WHERE id = :id
366
+ """), {"id": feature_id})
367
+ session.commit()
368
+
369
+ # Refresh to get new priority
370
+ session.refresh(feature)
371
+ new_priority = feature.priority
372
+
373
+ return json.dumps({
374
+ "id": feature_id,
375
+ "name": name,
376
+ "old_priority": old_priority,
377
+ "new_priority": new_priority,
378
+ "message": f"Feature '{name}' moved to end of queue"
379
+ })
380
+ except Exception as e:
381
+ session.rollback()
382
+ return json.dumps({"error": f"Failed to skip feature: {str(e)}"})
383
+ finally:
384
+ session.close()
385
+
386
+
387
+ @mcp.tool()
388
+ def feature_mark_in_progress(
389
+ feature_id: Annotated[int, Field(description="The ID of the feature to mark as in-progress", ge=1)]
390
+ ) -> str:
391
+ """Mark a feature as in-progress.
392
+
393
+ This prevents other agent sessions from working on the same feature.
394
+ Call this after getting your assigned feature details with feature_get_by_id.
395
+
396
+ Args:
397
+ feature_id: The ID of the feature to mark as in-progress
398
+
399
+ Returns:
400
+ JSON with the updated feature details, or error if not found or already in-progress.
401
+ """
402
+ session = get_session()
403
+ try:
404
+ # Atomic claim: only succeeds if feature is not already claimed or passing
405
+ result = session.execute(text("""
406
+ UPDATE features
407
+ SET in_progress = 1
408
+ WHERE id = :id AND passes = 0 AND in_progress = 0
409
+ """), {"id": feature_id})
410
+ session.commit()
411
+
412
+ if result.rowcount == 0:
413
+ # Check why the claim failed
414
+ feature = session.query(Feature).filter(Feature.id == feature_id).first()
415
+ if feature is None:
416
+ return json.dumps({"error": f"Feature with ID {feature_id} not found"})
417
+ if feature.passes:
418
+ return json.dumps({"error": f"Feature with ID {feature_id} is already passing"})
419
+ if feature.in_progress:
420
+ return json.dumps({"error": f"Feature with ID {feature_id} is already in-progress"})
421
+ return json.dumps({"error": "Failed to mark feature in-progress for unknown reason"})
422
+
423
+ # Fetch the claimed feature
424
+ feature = session.query(Feature).filter(Feature.id == feature_id).first()
425
+ return json.dumps(feature.to_dict())
426
+ except Exception as e:
427
+ session.rollback()
428
+ return json.dumps({"error": f"Failed to mark feature in-progress: {str(e)}"})
429
+ finally:
430
+ session.close()
431
+
432
+
433
+ @mcp.tool()
434
+ def feature_claim_and_get(
435
+ feature_id: Annotated[int, Field(description="The ID of the feature to claim", ge=1)]
436
+ ) -> str:
437
+ """Atomically claim a feature (mark in-progress) and return its full details.
438
+
439
+ Combines feature_mark_in_progress + feature_get_by_id into a single operation.
440
+ If already in-progress, still returns the feature details (idempotent).
441
+
442
+ Args:
443
+ feature_id: The ID of the feature to claim and retrieve
444
+
445
+ Returns:
446
+ JSON with feature details including claimed status, or error if not found.
447
+ """
448
+ session = get_session()
449
+ try:
450
+ # First check if feature exists
451
+ feature = session.query(Feature).filter(Feature.id == feature_id).first()
452
+ if feature is None:
453
+ return json.dumps({"error": f"Feature with ID {feature_id} not found"})
454
+
455
+ if feature.passes:
456
+ return json.dumps({"error": f"Feature with ID {feature_id} is already passing"})
457
+
458
+ # Try atomic claim: only succeeds if not already claimed
459
+ result = session.execute(text("""
460
+ UPDATE features
461
+ SET in_progress = 1
462
+ WHERE id = :id AND passes = 0 AND in_progress = 0
463
+ """), {"id": feature_id})
464
+ session.commit()
465
+
466
+ # Determine if we claimed it or it was already claimed
467
+ already_claimed = result.rowcount == 0
468
+ if already_claimed:
469
+ # Verify it's in_progress (not some other failure condition)
470
+ session.refresh(feature)
471
+ if not feature.in_progress:
472
+ return json.dumps({"error": f"Failed to claim feature {feature_id} for unknown reason"})
473
+
474
+ # Refresh to get current state
475
+ session.refresh(feature)
476
+ result_dict = feature.to_dict()
477
+ result_dict["already_claimed"] = already_claimed
478
+ return json.dumps(result_dict)
479
+ except Exception as e:
480
+ session.rollback()
481
+ return json.dumps({"error": f"Failed to claim feature: {str(e)}"})
482
+ finally:
483
+ session.close()
484
+
485
+
486
+ @mcp.tool()
487
+ def feature_clear_in_progress(
488
+ feature_id: Annotated[int, Field(description="The ID of the feature to clear in-progress status", ge=1)]
489
+ ) -> str:
490
+ """Clear in-progress status from a feature.
491
+
492
+ Use this when abandoning a feature or manually unsticking a stuck feature.
493
+ The feature will return to the pending queue.
494
+
495
+ Args:
496
+ feature_id: The ID of the feature to clear in-progress status
497
+
498
+ Returns:
499
+ JSON with the updated feature details, or error if not found.
500
+ """
501
+ session = get_session()
502
+ try:
503
+ # Check if feature exists
504
+ feature = session.query(Feature).filter(Feature.id == feature_id).first()
505
+ if feature is None:
506
+ return json.dumps({"error": f"Feature with ID {feature_id} not found"})
507
+
508
+ # Atomic update - idempotent, safe in parallel mode
509
+ session.execute(text("""
510
+ UPDATE features
511
+ SET in_progress = 0
512
+ WHERE id = :id
513
+ """), {"id": feature_id})
514
+ session.commit()
515
+
516
+ session.refresh(feature)
517
+ return json.dumps(feature.to_dict())
518
+ except Exception as e:
519
+ session.rollback()
520
+ return json.dumps({"error": f"Failed to clear in-progress status: {str(e)}"})
521
+ finally:
522
+ session.close()
523
+
524
+
525
+ @mcp.tool()
526
+ def feature_create_bulk(
527
+ features: Annotated[list[dict], Field(description="List of features to create, each with category, name, description, and steps")]
528
+ ) -> str:
529
+ """Create multiple features in a single operation.
530
+
531
+ Features are assigned sequential priorities based on their order.
532
+ All features start with passes=false.
533
+
534
+ This is typically used by the initializer agent to set up the initial
535
+ feature list from the app specification.
536
+
537
+ Args:
538
+ features: List of features to create, each with:
539
+ - category (str): Feature category
540
+ - name (str): Feature name
541
+ - description (str): Detailed description
542
+ - steps (list[str]): Implementation/test steps
543
+ - depends_on_indices (list[int], optional): Array indices (0-based) of
544
+ features in THIS batch that this feature depends on. Use this instead
545
+ of 'dependencies' since IDs aren't known until after creation.
546
+ Example: [0, 2] means this feature depends on features at index 0 and 2.
547
+
548
+ Returns:
549
+ JSON with: created (int) - number of features created, with_dependencies (int)
550
+ """
551
+ try:
552
+ # Use atomic transaction for bulk inserts to prevent priority conflicts
553
+ with atomic_transaction(_session_maker) as session:
554
+ # Get the starting priority atomically within the transaction
555
+ result = session.execute(text("""
556
+ SELECT COALESCE(MAX(priority), 0) FROM features
557
+ """)).fetchone()
558
+ start_priority = (result[0] or 0) + 1
559
+
560
+ # First pass: validate all features and their index-based dependencies
561
+ for i, feature_data in enumerate(features):
562
+ # Validate required fields
563
+ if not all(key in feature_data for key in ["category", "name", "description", "steps"]):
564
+ return json.dumps({
565
+ "error": f"Feature at index {i} missing required fields (category, name, description, steps)"
566
+ })
567
+
568
+ # Validate depends_on_indices
569
+ indices = feature_data.get("depends_on_indices", [])
570
+ if indices:
571
+ # Check max dependencies
572
+ if len(indices) > MAX_DEPENDENCIES_PER_FEATURE:
573
+ return json.dumps({
574
+ "error": f"Feature at index {i} has {len(indices)} dependencies, max is {MAX_DEPENDENCIES_PER_FEATURE}"
575
+ })
576
+ # Check for duplicates
577
+ if len(indices) != len(set(indices)):
578
+ return json.dumps({
579
+ "error": f"Feature at index {i} has duplicate dependencies"
580
+ })
581
+ # Check for forward references (can only depend on earlier features)
582
+ for idx in indices:
583
+ if not isinstance(idx, int) or idx < 0:
584
+ return json.dumps({
585
+ "error": f"Feature at index {i} has invalid dependency index: {idx}"
586
+ })
587
+ if idx >= i:
588
+ return json.dumps({
589
+ "error": f"Feature at index {i} cannot depend on feature at index {idx} (forward reference not allowed)"
590
+ })
591
+
592
+ # Second pass: create all features with reserved priorities
593
+ created_features: list[Feature] = []
594
+ for i, feature_data in enumerate(features):
595
+ db_feature = Feature(
596
+ priority=start_priority + i,
597
+ category=feature_data["category"],
598
+ name=feature_data["name"],
599
+ description=feature_data["description"],
600
+ steps=feature_data["steps"],
601
+ passes=False,
602
+ in_progress=False,
603
+ )
604
+ session.add(db_feature)
605
+ created_features.append(db_feature)
606
+
607
+ # Flush to get IDs assigned
608
+ session.flush()
609
+
610
+ # Third pass: resolve index-based dependencies to actual IDs
611
+ deps_count = 0
612
+ for i, feature_data in enumerate(features):
613
+ indices = feature_data.get("depends_on_indices", [])
614
+ if indices:
615
+ # Convert indices to actual feature IDs
616
+ dep_ids = [created_features[idx].id for idx in indices]
617
+ created_features[i].dependencies = sorted(dep_ids) # type: ignore[assignment] # SQLAlchemy JSON Column accepts list at runtime
618
+ deps_count += 1
619
+
620
+ # Commit happens automatically on context manager exit
621
+ return json.dumps({
622
+ "created": len(created_features),
623
+ "with_dependencies": deps_count
624
+ })
625
+ except Exception as e:
626
+ return json.dumps({"error": str(e)})
627
+
628
+
629
+ @mcp.tool()
630
+ def feature_create(
631
+ category: Annotated[str, Field(min_length=1, max_length=100, description="Feature category (e.g., 'Authentication', 'API', 'UI')")],
632
+ name: Annotated[str, Field(min_length=1, max_length=255, description="Feature name")],
633
+ description: Annotated[str, Field(min_length=1, description="Detailed description of the feature")],
634
+ steps: Annotated[list[str], Field(min_length=1, description="List of implementation/verification steps")]
635
+ ) -> str:
636
+ """Create a single feature in the project backlog.
637
+
638
+ Use this when the user asks to add a new feature, capability, or test case.
639
+ The feature will be added with the next available priority number.
640
+
641
+ Args:
642
+ category: Feature category for grouping (e.g., 'Authentication', 'API', 'UI')
643
+ name: Descriptive name for the feature
644
+ description: Detailed description of what this feature should do
645
+ steps: List of steps to implement or verify the feature
646
+
647
+ Returns:
648
+ JSON with the created feature details including its ID
649
+ """
650
+ try:
651
+ # Use atomic transaction to prevent priority collisions
652
+ with atomic_transaction(_session_maker) as session:
653
+ # Get the next priority atomically within the transaction
654
+ result = session.execute(text("""
655
+ SELECT COALESCE(MAX(priority), 0) + 1 FROM features
656
+ """)).fetchone()
657
+ next_priority = result[0]
658
+
659
+ db_feature = Feature(
660
+ priority=next_priority,
661
+ category=category,
662
+ name=name,
663
+ description=description,
664
+ steps=steps,
665
+ passes=False,
666
+ in_progress=False,
667
+ )
668
+ session.add(db_feature)
669
+ session.flush() # Get the ID
670
+
671
+ feature_dict = db_feature.to_dict()
672
+ # Commit happens automatically on context manager exit
673
+
674
+ return json.dumps({
675
+ "success": True,
676
+ "message": f"Created feature: {name}",
677
+ "feature": feature_dict
678
+ })
679
+ except Exception as e:
680
+ return json.dumps({"error": str(e)})
681
+
682
+
683
+ @mcp.tool()
684
+ def feature_add_dependency(
685
+ feature_id: Annotated[int, Field(ge=1, description="Feature to add dependency to")],
686
+ dependency_id: Annotated[int, Field(ge=1, description="ID of the dependency feature")]
687
+ ) -> str:
688
+ """Add a dependency relationship between features.
689
+
690
+ The dependency_id feature must be completed before feature_id can be started.
691
+ Validates: self-reference, existence, circular dependencies, max limit.
692
+
693
+ Args:
694
+ feature_id: The ID of the feature that will depend on another feature
695
+ dependency_id: The ID of the feature that must be completed first
696
+
697
+ Returns:
698
+ JSON with success status and updated dependencies list, or error message
699
+ """
700
+ try:
701
+ # Security: Self-reference check (can do before transaction)
702
+ if feature_id == dependency_id:
703
+ return json.dumps({"error": "A feature cannot depend on itself"})
704
+
705
+ # Use atomic transaction for consistent cycle detection
706
+ with atomic_transaction(_session_maker) as session:
707
+ feature = session.query(Feature).filter(Feature.id == feature_id).first()
708
+ dependency = session.query(Feature).filter(Feature.id == dependency_id).first()
709
+
710
+ if not feature:
711
+ return json.dumps({"error": f"Feature {feature_id} not found"})
712
+ if not dependency:
713
+ return json.dumps({"error": f"Dependency feature {dependency_id} not found"})
714
+
715
+ current_deps = feature.dependencies or []
716
+
717
+ # Security: Max dependencies limit
718
+ if len(current_deps) >= MAX_DEPENDENCIES_PER_FEATURE:
719
+ return json.dumps({"error": f"Maximum {MAX_DEPENDENCIES_PER_FEATURE} dependencies allowed per feature"})
720
+
721
+ # Check if already exists
722
+ if dependency_id in current_deps:
723
+ return json.dumps({"error": "Dependency already exists"})
724
+
725
+ # Security: Circular dependency check
726
+ # Within IMMEDIATE transaction, snapshot is protected by write lock
727
+ all_features = [f.to_dict() for f in session.query(Feature).all()]
728
+ if would_create_circular_dependency(all_features, feature_id, dependency_id):
729
+ return json.dumps({"error": "Cannot add: would create circular dependency"})
730
+
731
+ # Add dependency atomically
732
+ new_deps = sorted(current_deps + [dependency_id])
733
+ feature.dependencies = new_deps
734
+ # Commit happens automatically on context manager exit
735
+
736
+ return json.dumps({
737
+ "success": True,
738
+ "feature_id": feature_id,
739
+ "dependencies": new_deps
740
+ })
741
+ except Exception as e:
742
+ return json.dumps({"error": f"Failed to add dependency: {str(e)}"})
743
+
744
+
745
+ @mcp.tool()
746
+ def feature_remove_dependency(
747
+ feature_id: Annotated[int, Field(ge=1, description="Feature to remove dependency from")],
748
+ dependency_id: Annotated[int, Field(ge=1, description="ID of dependency to remove")]
749
+ ) -> str:
750
+ """Remove a dependency from a feature.
751
+
752
+ Args:
753
+ feature_id: The ID of the feature to remove a dependency from
754
+ dependency_id: The ID of the dependency to remove
755
+
756
+ Returns:
757
+ JSON with success status and updated dependencies list, or error message
758
+ """
759
+ try:
760
+ # Use atomic transaction for consistent read-modify-write
761
+ with atomic_transaction(_session_maker) as session:
762
+ feature = session.query(Feature).filter(Feature.id == feature_id).first()
763
+ if not feature:
764
+ return json.dumps({"error": f"Feature {feature_id} not found"})
765
+
766
+ current_deps = feature.dependencies or []
767
+ if dependency_id not in current_deps:
768
+ return json.dumps({"error": "Dependency does not exist"})
769
+
770
+ # Remove dependency atomically
771
+ new_deps = [d for d in current_deps if d != dependency_id]
772
+ feature.dependencies = new_deps if new_deps else None
773
+ # Commit happens automatically on context manager exit
774
+
775
+ return json.dumps({
776
+ "success": True,
777
+ "feature_id": feature_id,
778
+ "dependencies": new_deps
779
+ })
780
+ except Exception as e:
781
+ return json.dumps({"error": f"Failed to remove dependency: {str(e)}"})
782
+
783
+
784
+ @mcp.tool()
785
+ def feature_get_ready(
786
+ limit: Annotated[int, Field(default=10, ge=1, le=50, description="Max features to return")] = 10
787
+ ) -> str:
788
+ """Get all features ready to start (dependencies satisfied, not in progress).
789
+
790
+ Useful for parallel execution - returns multiple features that can run simultaneously.
791
+ A feature is ready if it is not passing, not in progress, and all dependencies are passing.
792
+
793
+ Args:
794
+ limit: Maximum number of features to return (1-50, default 10)
795
+
796
+ Returns:
797
+ JSON with: features (list), count (int), total_ready (int)
798
+ """
799
+ session = get_session()
800
+ try:
801
+ all_features = session.query(Feature).all()
802
+ passing_ids = {f.id for f in all_features if f.passes}
803
+
804
+ ready = []
805
+ all_dicts = [f.to_dict() for f in all_features]
806
+ for f in all_features:
807
+ if f.passes or f.in_progress:
808
+ continue
809
+ deps = f.dependencies or []
810
+ if all(dep_id in passing_ids for dep_id in deps):
811
+ ready.append(f.to_dict())
812
+
813
+ # Sort by scheduling score (higher = first), then priority, then id
814
+ scores = compute_scheduling_scores(all_dicts)
815
+ ready.sort(key=lambda f: (-scores.get(f["id"], 0), f["priority"], f["id"]))
816
+
817
+ return json.dumps({
818
+ "features": ready[:limit],
819
+ "count": len(ready[:limit]),
820
+ "total_ready": len(ready)
821
+ })
822
+ finally:
823
+ session.close()
824
+
825
+
826
+ @mcp.tool()
827
+ def feature_get_blocked(
828
+ limit: Annotated[int, Field(default=20, ge=1, le=100, description="Max features to return")] = 20
829
+ ) -> str:
830
+ """Get features that are blocked by unmet dependencies.
831
+
832
+ Returns features that have dependencies which are not yet passing.
833
+ Each feature includes a 'blocked_by' field listing the blocking feature IDs.
834
+
835
+ Args:
836
+ limit: Maximum number of features to return (1-100, default 20)
837
+
838
+ Returns:
839
+ JSON with: features (list with blocked_by field), count (int), total_blocked (int)
840
+ """
841
+ session = get_session()
842
+ try:
843
+ all_features = session.query(Feature).all()
844
+ passing_ids = {f.id for f in all_features if f.passes}
845
+
846
+ blocked = []
847
+ for f in all_features:
848
+ if f.passes:
849
+ continue
850
+ deps = f.dependencies or []
851
+ blocking = [d for d in deps if d not in passing_ids]
852
+ if blocking:
853
+ blocked.append({
854
+ **f.to_dict(),
855
+ "blocked_by": blocking
856
+ })
857
+
858
+ return json.dumps({
859
+ "features": blocked[:limit],
860
+ "count": len(blocked[:limit]),
861
+ "total_blocked": len(blocked)
862
+ })
863
+ finally:
864
+ session.close()
865
+
866
+
867
+ @mcp.tool()
868
+ def feature_get_graph() -> str:
869
+ """Get dependency graph data for visualization.
870
+
871
+ Returns nodes (features) and edges (dependencies) for rendering a graph.
872
+ Each node includes status: 'pending', 'in_progress', 'done', or 'blocked'.
873
+
874
+ Returns:
875
+ JSON with: nodes (list), edges (list of {source, target})
876
+ """
877
+ session = get_session()
878
+ try:
879
+ all_features = session.query(Feature).all()
880
+ passing_ids = {f.id for f in all_features if f.passes}
881
+
882
+ nodes = []
883
+ edges = []
884
+
885
+ for f in all_features:
886
+ deps = f.dependencies or []
887
+ blocking = [d for d in deps if d not in passing_ids]
888
+
889
+ if f.passes:
890
+ status = "done"
891
+ elif blocking:
892
+ status = "blocked"
893
+ elif f.in_progress:
894
+ status = "in_progress"
895
+ else:
896
+ status = "pending"
897
+
898
+ nodes.append({
899
+ "id": f.id,
900
+ "name": f.name,
901
+ "category": f.category,
902
+ "status": status,
903
+ "priority": f.priority,
904
+ "dependencies": deps
905
+ })
906
+
907
+ for dep_id in deps:
908
+ edges.append({"source": dep_id, "target": f.id})
909
+
910
+ return json.dumps({
911
+ "nodes": nodes,
912
+ "edges": edges
913
+ })
914
+ finally:
915
+ session.close()
916
+
917
+
918
+ @mcp.tool()
919
+ def feature_set_dependencies(
920
+ feature_id: Annotated[int, Field(ge=1, description="Feature to set dependencies for")],
921
+ dependency_ids: Annotated[list[int], Field(description="List of dependency feature IDs")]
922
+ ) -> str:
923
+ """Set all dependencies for a feature at once, replacing any existing dependencies.
924
+
925
+ Validates: self-reference, existence of all dependencies, circular dependencies, max limit.
926
+
927
+ Args:
928
+ feature_id: The ID of the feature to set dependencies for
929
+ dependency_ids: List of feature IDs that must be completed first
930
+
931
+ Returns:
932
+ JSON with success status and updated dependencies list, or error message
933
+ """
934
+ try:
935
+ # Security: Self-reference check (can do before transaction)
936
+ if feature_id in dependency_ids:
937
+ return json.dumps({"error": "A feature cannot depend on itself"})
938
+
939
+ # Security: Max dependencies limit
940
+ if len(dependency_ids) > MAX_DEPENDENCIES_PER_FEATURE:
941
+ return json.dumps({"error": f"Maximum {MAX_DEPENDENCIES_PER_FEATURE} dependencies allowed"})
942
+
943
+ # Check for duplicates
944
+ if len(dependency_ids) != len(set(dependency_ids)):
945
+ return json.dumps({"error": "Duplicate dependencies not allowed"})
946
+
947
+ # Use atomic transaction for consistent cycle detection
948
+ with atomic_transaction(_session_maker) as session:
949
+ feature = session.query(Feature).filter(Feature.id == feature_id).first()
950
+ if not feature:
951
+ return json.dumps({"error": f"Feature {feature_id} not found"})
952
+
953
+ # Validate all dependencies exist
954
+ all_feature_ids = {f.id for f in session.query(Feature).all()}
955
+ missing = [d for d in dependency_ids if d not in all_feature_ids]
956
+ if missing:
957
+ return json.dumps({"error": f"Dependencies not found: {missing}"})
958
+
959
+ # Check for circular dependencies
960
+ # Within IMMEDIATE transaction, snapshot is protected by write lock
961
+ all_features = [f.to_dict() for f in session.query(Feature).all()]
962
+ test_features = []
963
+ for f in all_features:
964
+ if f["id"] == feature_id:
965
+ test_features.append({**f, "dependencies": dependency_ids})
966
+ else:
967
+ test_features.append(f)
968
+
969
+ for dep_id in dependency_ids:
970
+ if would_create_circular_dependency(test_features, feature_id, dep_id):
971
+ return json.dumps({"error": f"Cannot add dependency {dep_id}: would create circular dependency"})
972
+
973
+ # Set dependencies atomically
974
+ sorted_deps = sorted(dependency_ids) if dependency_ids else None
975
+ feature.dependencies = sorted_deps
976
+ # Commit happens automatically on context manager exit
977
+
978
+ return json.dumps({
979
+ "success": True,
980
+ "feature_id": feature_id,
981
+ "dependencies": sorted_deps or []
982
+ })
983
+ except Exception as e:
984
+ return json.dumps({"error": f"Failed to set dependencies: {str(e)}"})
985
+
986
+
987
+ if __name__ == "__main__":
988
+ mcp.run()