autoforge-ai 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/commands/check-code.md +32 -0
- package/.claude/commands/checkpoint.md +40 -0
- package/.claude/commands/create-spec.md +613 -0
- package/.claude/commands/expand-project.md +234 -0
- package/.claude/commands/gsd-to-autoforge-spec.md +10 -0
- package/.claude/commands/review-pr.md +75 -0
- package/.claude/templates/app_spec.template.txt +331 -0
- package/.claude/templates/coding_prompt.template.md +265 -0
- package/.claude/templates/initializer_prompt.template.md +354 -0
- package/.claude/templates/testing_prompt.template.md +146 -0
- package/.env.example +64 -0
- package/LICENSE.md +676 -0
- package/README.md +423 -0
- package/agent.py +444 -0
- package/api/__init__.py +10 -0
- package/api/database.py +536 -0
- package/api/dependency_resolver.py +449 -0
- package/api/migration.py +156 -0
- package/auth.py +83 -0
- package/autoforge_paths.py +315 -0
- package/autonomous_agent_demo.py +293 -0
- package/bin/autoforge.js +3 -0
- package/client.py +607 -0
- package/env_constants.py +27 -0
- package/examples/OPTIMIZE_CONFIG.md +230 -0
- package/examples/README.md +531 -0
- package/examples/org_config.yaml +172 -0
- package/examples/project_allowed_commands.yaml +139 -0
- package/lib/cli.js +791 -0
- package/mcp_server/__init__.py +1 -0
- package/mcp_server/feature_mcp.py +988 -0
- package/package.json +53 -0
- package/parallel_orchestrator.py +1800 -0
- package/progress.py +247 -0
- package/prompts.py +427 -0
- package/pyproject.toml +17 -0
- package/rate_limit_utils.py +132 -0
- package/registry.py +614 -0
- package/requirements-prod.txt +14 -0
- package/security.py +959 -0
- package/server/__init__.py +17 -0
- package/server/main.py +261 -0
- package/server/routers/__init__.py +32 -0
- package/server/routers/agent.py +177 -0
- package/server/routers/assistant_chat.py +327 -0
- package/server/routers/devserver.py +309 -0
- package/server/routers/expand_project.py +239 -0
- package/server/routers/features.py +746 -0
- package/server/routers/filesystem.py +514 -0
- package/server/routers/projects.py +524 -0
- package/server/routers/schedules.py +356 -0
- package/server/routers/settings.py +127 -0
- package/server/routers/spec_creation.py +357 -0
- package/server/routers/terminal.py +453 -0
- package/server/schemas.py +593 -0
- package/server/services/__init__.py +36 -0
- package/server/services/assistant_chat_session.py +496 -0
- package/server/services/assistant_database.py +304 -0
- package/server/services/chat_constants.py +57 -0
- package/server/services/dev_server_manager.py +557 -0
- package/server/services/expand_chat_session.py +399 -0
- package/server/services/process_manager.py +657 -0
- package/server/services/project_config.py +475 -0
- package/server/services/scheduler_service.py +683 -0
- package/server/services/spec_chat_session.py +502 -0
- package/server/services/terminal_manager.py +756 -0
- package/server/utils/__init__.py +1 -0
- package/server/utils/process_utils.py +134 -0
- package/server/utils/project_helpers.py +32 -0
- package/server/utils/validation.py +54 -0
- package/server/websocket.py +903 -0
- package/start.py +456 -0
- package/ui/dist/assets/index-8W_wmZzz.js +168 -0
- package/ui/dist/assets/index-B47Ubhox.css +1 -0
- package/ui/dist/assets/vendor-flow-CVNK-_lx.js +7 -0
- package/ui/dist/assets/vendor-query-BUABzP5o.js +1 -0
- package/ui/dist/assets/vendor-radix-DTNNCg2d.js +45 -0
- package/ui/dist/assets/vendor-react-qkC6yhPU.js +1 -0
- package/ui/dist/assets/vendor-utils-COeKbHgx.js +2 -0
- package/ui/dist/assets/vendor-xterm-DP_gxef0.js +16 -0
- package/ui/dist/index.html +23 -0
- package/ui/dist/ollama.png +0 -0
- package/ui/dist/vite.svg +6 -0
- package/ui/package.json +57 -0
package/progress.py
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Progress Tracking Utilities
|
|
3
|
+
===========================
|
|
4
|
+
|
|
5
|
+
Functions for tracking and displaying progress of the autonomous coding agent.
|
|
6
|
+
Uses direct SQLite access for database queries.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import os
|
|
11
|
+
import sqlite3
|
|
12
|
+
import urllib.request
|
|
13
|
+
from contextlib import closing
|
|
14
|
+
from datetime import datetime, timezone
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
|
|
17
|
+
WEBHOOK_URL = os.environ.get("PROGRESS_N8N_WEBHOOK_URL")
|
|
18
|
+
PROGRESS_CACHE_FILE = ".progress_cache"
|
|
19
|
+
|
|
20
|
+
# SQLite connection settings for parallel mode safety
|
|
21
|
+
SQLITE_TIMEOUT = 30 # seconds to wait for locks
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _get_connection(db_file: Path) -> sqlite3.Connection:
|
|
25
|
+
"""Get a SQLite connection with proper timeout settings for parallel mode."""
|
|
26
|
+
return sqlite3.connect(db_file, timeout=SQLITE_TIMEOUT)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def has_features(project_dir: Path) -> bool:
|
|
30
|
+
"""
|
|
31
|
+
Check if the project has features in the database.
|
|
32
|
+
|
|
33
|
+
This is used to determine if the initializer agent needs to run.
|
|
34
|
+
We check the database directly (not via API) since the API server
|
|
35
|
+
may not be running yet when this check is performed.
|
|
36
|
+
|
|
37
|
+
Returns True if:
|
|
38
|
+
- features.db exists AND has at least 1 feature, OR
|
|
39
|
+
- feature_list.json exists (legacy format)
|
|
40
|
+
|
|
41
|
+
Returns False if no features exist (initializer needs to run).
|
|
42
|
+
"""
|
|
43
|
+
# Check legacy JSON file first
|
|
44
|
+
json_file = project_dir / "feature_list.json"
|
|
45
|
+
if json_file.exists():
|
|
46
|
+
return True
|
|
47
|
+
|
|
48
|
+
# Check SQLite database
|
|
49
|
+
from autoforge_paths import get_features_db_path
|
|
50
|
+
db_file = get_features_db_path(project_dir)
|
|
51
|
+
if not db_file.exists():
|
|
52
|
+
return False
|
|
53
|
+
|
|
54
|
+
try:
|
|
55
|
+
with closing(_get_connection(db_file)) as conn:
|
|
56
|
+
cursor = conn.cursor()
|
|
57
|
+
cursor.execute("SELECT COUNT(*) FROM features")
|
|
58
|
+
count: int = cursor.fetchone()[0]
|
|
59
|
+
return bool(count > 0)
|
|
60
|
+
except Exception:
|
|
61
|
+
# Database exists but can't be read or has no features table
|
|
62
|
+
return False
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def count_passing_tests(project_dir: Path) -> tuple[int, int, int]:
|
|
66
|
+
"""
|
|
67
|
+
Count passing, in_progress, and total tests via direct database access.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
project_dir: Directory containing the project
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
(passing_count, in_progress_count, total_count)
|
|
74
|
+
"""
|
|
75
|
+
from autoforge_paths import get_features_db_path
|
|
76
|
+
db_file = get_features_db_path(project_dir)
|
|
77
|
+
if not db_file.exists():
|
|
78
|
+
return 0, 0, 0
|
|
79
|
+
|
|
80
|
+
try:
|
|
81
|
+
with closing(_get_connection(db_file)) as conn:
|
|
82
|
+
cursor = conn.cursor()
|
|
83
|
+
# Single aggregate query instead of 3 separate COUNT queries
|
|
84
|
+
# Handle case where in_progress column doesn't exist yet (legacy DBs)
|
|
85
|
+
try:
|
|
86
|
+
cursor.execute("""
|
|
87
|
+
SELECT
|
|
88
|
+
COUNT(*) as total,
|
|
89
|
+
SUM(CASE WHEN passes = 1 THEN 1 ELSE 0 END) as passing,
|
|
90
|
+
SUM(CASE WHEN in_progress = 1 THEN 1 ELSE 0 END) as in_progress
|
|
91
|
+
FROM features
|
|
92
|
+
""")
|
|
93
|
+
row = cursor.fetchone()
|
|
94
|
+
total = row[0] or 0
|
|
95
|
+
passing = row[1] or 0
|
|
96
|
+
in_progress = row[2] or 0
|
|
97
|
+
except sqlite3.OperationalError:
|
|
98
|
+
# Fallback for databases without in_progress column
|
|
99
|
+
cursor.execute("""
|
|
100
|
+
SELECT
|
|
101
|
+
COUNT(*) as total,
|
|
102
|
+
SUM(CASE WHEN passes = 1 THEN 1 ELSE 0 END) as passing
|
|
103
|
+
FROM features
|
|
104
|
+
""")
|
|
105
|
+
row = cursor.fetchone()
|
|
106
|
+
total = row[0] or 0
|
|
107
|
+
passing = row[1] or 0
|
|
108
|
+
in_progress = 0
|
|
109
|
+
return passing, in_progress, total
|
|
110
|
+
except Exception as e:
|
|
111
|
+
print(f"[Database error in count_passing_tests: {e}]")
|
|
112
|
+
return 0, 0, 0
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def get_all_passing_features(project_dir: Path) -> list[dict]:
|
|
116
|
+
"""
|
|
117
|
+
Get all passing features for webhook notifications.
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
project_dir: Directory containing the project
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
List of dicts with id, category, name for each passing feature
|
|
124
|
+
"""
|
|
125
|
+
from autoforge_paths import get_features_db_path
|
|
126
|
+
db_file = get_features_db_path(project_dir)
|
|
127
|
+
if not db_file.exists():
|
|
128
|
+
return []
|
|
129
|
+
|
|
130
|
+
try:
|
|
131
|
+
with closing(_get_connection(db_file)) as conn:
|
|
132
|
+
cursor = conn.cursor()
|
|
133
|
+
cursor.execute(
|
|
134
|
+
"SELECT id, category, name FROM features WHERE passes = 1 ORDER BY priority ASC"
|
|
135
|
+
)
|
|
136
|
+
features = [
|
|
137
|
+
{"id": row[0], "category": row[1], "name": row[2]}
|
|
138
|
+
for row in cursor.fetchall()
|
|
139
|
+
]
|
|
140
|
+
return features
|
|
141
|
+
except Exception:
|
|
142
|
+
return []
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def send_progress_webhook(passing: int, total: int, project_dir: Path) -> None:
|
|
146
|
+
"""Send webhook notification when progress increases."""
|
|
147
|
+
if not WEBHOOK_URL:
|
|
148
|
+
return # Webhook not configured
|
|
149
|
+
|
|
150
|
+
from autoforge_paths import get_progress_cache_path
|
|
151
|
+
cache_file = get_progress_cache_path(project_dir)
|
|
152
|
+
previous = 0
|
|
153
|
+
previous_passing_ids = set()
|
|
154
|
+
|
|
155
|
+
# Read previous progress and passing feature IDs
|
|
156
|
+
if cache_file.exists():
|
|
157
|
+
try:
|
|
158
|
+
cache_data = json.loads(cache_file.read_text())
|
|
159
|
+
previous = cache_data.get("count", 0)
|
|
160
|
+
previous_passing_ids = set(cache_data.get("passing_ids", []))
|
|
161
|
+
except Exception:
|
|
162
|
+
previous = 0
|
|
163
|
+
|
|
164
|
+
# Only notify if progress increased
|
|
165
|
+
if passing > previous:
|
|
166
|
+
# Find which features are now passing via API
|
|
167
|
+
completed_tests = []
|
|
168
|
+
current_passing_ids = []
|
|
169
|
+
|
|
170
|
+
# Detect transition from old cache format (had count but no passing_ids)
|
|
171
|
+
# In this case, we can't reliably identify which specific tests are new
|
|
172
|
+
is_old_cache_format = len(previous_passing_ids) == 0 and previous > 0
|
|
173
|
+
|
|
174
|
+
# Get all passing features via direct database access
|
|
175
|
+
all_passing = get_all_passing_features(project_dir)
|
|
176
|
+
for feature in all_passing:
|
|
177
|
+
feature_id = feature.get("id")
|
|
178
|
+
current_passing_ids.append(feature_id)
|
|
179
|
+
# Only identify individual new tests if we have previous IDs to compare
|
|
180
|
+
if not is_old_cache_format and feature_id not in previous_passing_ids:
|
|
181
|
+
# This feature is newly passing
|
|
182
|
+
name = feature.get("name", f"Feature #{feature_id}")
|
|
183
|
+
category = feature.get("category", "")
|
|
184
|
+
if category:
|
|
185
|
+
completed_tests.append(f"{category} {name}")
|
|
186
|
+
else:
|
|
187
|
+
completed_tests.append(name)
|
|
188
|
+
|
|
189
|
+
payload = {
|
|
190
|
+
"event": "test_progress",
|
|
191
|
+
"passing": passing,
|
|
192
|
+
"total": total,
|
|
193
|
+
"percentage": round((passing / total) * 100, 1) if total > 0 else 0,
|
|
194
|
+
"previous_passing": previous,
|
|
195
|
+
"tests_completed_this_session": passing - previous,
|
|
196
|
+
"completed_tests": completed_tests,
|
|
197
|
+
"project": project_dir.name,
|
|
198
|
+
"timestamp": datetime.now(timezone.utc).isoformat().replace("+00:00", "Z"),
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
try:
|
|
202
|
+
req = urllib.request.Request(
|
|
203
|
+
WEBHOOK_URL,
|
|
204
|
+
data=json.dumps([payload]).encode("utf-8"), # n8n expects array
|
|
205
|
+
headers={"Content-Type": "application/json"},
|
|
206
|
+
)
|
|
207
|
+
urllib.request.urlopen(req, timeout=5)
|
|
208
|
+
except Exception as e:
|
|
209
|
+
print(f"[Webhook notification failed: {e}]")
|
|
210
|
+
|
|
211
|
+
# Update cache with count and passing IDs
|
|
212
|
+
cache_file.write_text(
|
|
213
|
+
json.dumps({"count": passing, "passing_ids": current_passing_ids})
|
|
214
|
+
)
|
|
215
|
+
else:
|
|
216
|
+
# Update cache even if no change (for initial state)
|
|
217
|
+
if not cache_file.exists():
|
|
218
|
+
all_passing = get_all_passing_features(project_dir)
|
|
219
|
+
current_passing_ids = [f.get("id") for f in all_passing]
|
|
220
|
+
cache_file.write_text(
|
|
221
|
+
json.dumps({"count": passing, "passing_ids": current_passing_ids})
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def print_session_header(session_num: int, is_initializer: bool) -> None:
|
|
226
|
+
"""Print a formatted header for the session."""
|
|
227
|
+
session_type = "INITIALIZER" if is_initializer else "CODING AGENT"
|
|
228
|
+
|
|
229
|
+
print("\n" + "=" * 70)
|
|
230
|
+
print(f" SESSION {session_num}: {session_type}")
|
|
231
|
+
print("=" * 70)
|
|
232
|
+
print()
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def print_progress_summary(project_dir: Path) -> None:
|
|
236
|
+
"""Print a summary of current progress."""
|
|
237
|
+
passing, in_progress, total = count_passing_tests(project_dir)
|
|
238
|
+
|
|
239
|
+
if total > 0:
|
|
240
|
+
percentage = (passing / total) * 100
|
|
241
|
+
status_parts = [f"{passing}/{total} tests passing ({percentage:.1f}%)"]
|
|
242
|
+
if in_progress > 0:
|
|
243
|
+
status_parts.append(f"{in_progress} in progress")
|
|
244
|
+
print(f"\nProgress: {', '.join(status_parts)}")
|
|
245
|
+
send_progress_webhook(passing, total, project_dir)
|
|
246
|
+
else:
|
|
247
|
+
print("\nProgress: No features in database yet")
|
package/prompts.py
ADDED
|
@@ -0,0 +1,427 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Prompt Loading Utilities
|
|
3
|
+
========================
|
|
4
|
+
|
|
5
|
+
Functions for loading prompt templates with project-specific support.
|
|
6
|
+
|
|
7
|
+
Fallback chain:
|
|
8
|
+
1. Project-specific: {project_dir}/prompts/{name}.md
|
|
9
|
+
2. Base template: .claude/templates/{name}.template.md
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import re
|
|
13
|
+
import shutil
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
|
|
16
|
+
# Base templates location (generic templates)
|
|
17
|
+
TEMPLATES_DIR = Path(__file__).parent / ".claude" / "templates"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def get_project_prompts_dir(project_dir: Path) -> Path:
|
|
21
|
+
"""Get the prompts directory for a specific project."""
|
|
22
|
+
from autoforge_paths import get_prompts_dir
|
|
23
|
+
return get_prompts_dir(project_dir)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def load_prompt(name: str, project_dir: Path | None = None) -> str:
|
|
27
|
+
"""
|
|
28
|
+
Load a prompt template with fallback chain.
|
|
29
|
+
|
|
30
|
+
Fallback order:
|
|
31
|
+
1. Project-specific: {project_dir}/prompts/{name}.md
|
|
32
|
+
2. Base template: .claude/templates/{name}.template.md
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
name: The prompt name (without extension), e.g., "initializer_prompt"
|
|
36
|
+
project_dir: Optional project directory for project-specific prompts
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
The prompt content as a string
|
|
40
|
+
|
|
41
|
+
Raises:
|
|
42
|
+
FileNotFoundError: If prompt not found in any location
|
|
43
|
+
"""
|
|
44
|
+
# 1. Try project-specific first
|
|
45
|
+
if project_dir:
|
|
46
|
+
project_prompts = get_project_prompts_dir(project_dir)
|
|
47
|
+
project_path = project_prompts / f"{name}.md"
|
|
48
|
+
if project_path.exists():
|
|
49
|
+
try:
|
|
50
|
+
return project_path.read_text(encoding="utf-8")
|
|
51
|
+
except (OSError, PermissionError) as e:
|
|
52
|
+
print(f"Warning: Could not read {project_path}: {e}")
|
|
53
|
+
|
|
54
|
+
# 2. Try base template
|
|
55
|
+
template_path = TEMPLATES_DIR / f"{name}.template.md"
|
|
56
|
+
if template_path.exists():
|
|
57
|
+
try:
|
|
58
|
+
return template_path.read_text(encoding="utf-8")
|
|
59
|
+
except (OSError, PermissionError) as e:
|
|
60
|
+
print(f"Warning: Could not read {template_path}: {e}")
|
|
61
|
+
|
|
62
|
+
raise FileNotFoundError(
|
|
63
|
+
f"Prompt '{name}' not found in:\n"
|
|
64
|
+
f" - Project: {project_dir / 'prompts' if project_dir else 'N/A'}\n"
|
|
65
|
+
f" - Templates: {TEMPLATES_DIR}"
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def get_initializer_prompt(project_dir: Path | None = None) -> str:
|
|
70
|
+
"""Load the initializer prompt (project-specific if available)."""
|
|
71
|
+
return load_prompt("initializer_prompt", project_dir)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _strip_browser_testing_sections(prompt: str) -> str:
|
|
75
|
+
"""Strip browser automation and Playwright testing instructions from prompt.
|
|
76
|
+
|
|
77
|
+
Used in YOLO mode where browser testing is skipped entirely. Replaces
|
|
78
|
+
browser-related sections with a brief YOLO-mode note while preserving
|
|
79
|
+
all non-testing instructions (implementation, git, progress notes, etc.).
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
prompt: The full coding prompt text.
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
The prompt with browser testing sections replaced by YOLO guidance.
|
|
86
|
+
"""
|
|
87
|
+
original_prompt = prompt
|
|
88
|
+
|
|
89
|
+
# Replace STEP 5 (browser automation verification) with YOLO note
|
|
90
|
+
prompt = re.sub(
|
|
91
|
+
r"### STEP 5: VERIFY WITH BROWSER AUTOMATION.*?(?=### STEP 5\.5:)",
|
|
92
|
+
"### STEP 5: VERIFY FEATURE (YOLO MODE)\n\n"
|
|
93
|
+
"**YOLO mode is active.** Skip browser automation testing. "
|
|
94
|
+
"Instead, verify your feature works by ensuring:\n"
|
|
95
|
+
"- Code compiles without errors (lint and type-check pass)\n"
|
|
96
|
+
"- Server starts without errors after your changes\n"
|
|
97
|
+
"- No obvious runtime errors in server logs\n\n",
|
|
98
|
+
prompt,
|
|
99
|
+
flags=re.DOTALL,
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
# Replace the screenshots-only marking rule with YOLO-appropriate wording
|
|
103
|
+
prompt = prompt.replace(
|
|
104
|
+
"**ONLY MARK A FEATURE AS PASSING AFTER VERIFICATION WITH SCREENSHOTS.**",
|
|
105
|
+
"**YOLO mode: Mark a feature as passing after lint/type-check succeeds and server starts cleanly.**",
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
# Replace the BROWSER AUTOMATION reference section
|
|
109
|
+
prompt = re.sub(
|
|
110
|
+
r"## BROWSER AUTOMATION\n\n.*?(?=---)",
|
|
111
|
+
"## VERIFICATION (YOLO MODE)\n\n"
|
|
112
|
+
"Browser automation is disabled in YOLO mode. "
|
|
113
|
+
"Verify features by running lint, type-check, and confirming the dev server starts without errors.\n\n",
|
|
114
|
+
prompt,
|
|
115
|
+
flags=re.DOTALL,
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
# In STEP 4, replace browser automation reference with YOLO guidance
|
|
119
|
+
prompt = prompt.replace(
|
|
120
|
+
"2. Test manually using browser automation (see Step 5)",
|
|
121
|
+
"2. Verify code compiles (lint and type-check pass)",
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
if prompt == original_prompt:
|
|
125
|
+
print("[YOLO] Warning: No browser testing sections found to strip. "
|
|
126
|
+
"Project-specific prompt may need manual YOLO adaptation.")
|
|
127
|
+
|
|
128
|
+
return prompt
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def get_coding_prompt(project_dir: Path | None = None, yolo_mode: bool = False) -> str:
|
|
132
|
+
"""Load the coding agent prompt (project-specific if available).
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
project_dir: Optional project directory for project-specific prompts
|
|
136
|
+
yolo_mode: If True, strip browser automation / Playwright testing
|
|
137
|
+
instructions and replace with YOLO-mode guidance. This reduces
|
|
138
|
+
prompt tokens since YOLO mode skips all browser testing anyway.
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
The coding prompt, optionally stripped of testing instructions.
|
|
142
|
+
"""
|
|
143
|
+
prompt = load_prompt("coding_prompt", project_dir)
|
|
144
|
+
|
|
145
|
+
if yolo_mode:
|
|
146
|
+
prompt = _strip_browser_testing_sections(prompt)
|
|
147
|
+
|
|
148
|
+
return prompt
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def get_testing_prompt(
|
|
152
|
+
project_dir: Path | None = None,
|
|
153
|
+
testing_feature_id: int | None = None,
|
|
154
|
+
testing_feature_ids: list[int] | None = None,
|
|
155
|
+
) -> str:
|
|
156
|
+
"""Load the testing agent prompt (project-specific if available).
|
|
157
|
+
|
|
158
|
+
Supports both single-feature and multi-feature testing modes. When
|
|
159
|
+
testing_feature_ids is provided, the template's {{TESTING_FEATURE_IDS}}
|
|
160
|
+
placeholder is replaced with the comma-separated list. Falls back to
|
|
161
|
+
the legacy single-feature header when only testing_feature_id is given.
|
|
162
|
+
|
|
163
|
+
Args:
|
|
164
|
+
project_dir: Optional project directory for project-specific prompts
|
|
165
|
+
testing_feature_id: If provided, the pre-assigned feature ID to test (legacy single mode).
|
|
166
|
+
testing_feature_ids: If provided, a list of feature IDs to test (batch mode).
|
|
167
|
+
Takes precedence over testing_feature_id when both are set.
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
The testing prompt, with feature assignment instructions populated.
|
|
171
|
+
"""
|
|
172
|
+
base_prompt = load_prompt("testing_prompt", project_dir)
|
|
173
|
+
|
|
174
|
+
# Batch mode: replace the {{TESTING_FEATURE_IDS}} placeholder in the template
|
|
175
|
+
if testing_feature_ids is not None and len(testing_feature_ids) > 0:
|
|
176
|
+
ids_str = ", ".join(str(fid) for fid in testing_feature_ids)
|
|
177
|
+
return base_prompt.replace("{{TESTING_FEATURE_IDS}}", ids_str)
|
|
178
|
+
|
|
179
|
+
# Legacy single-feature mode: prepend header and replace placeholder
|
|
180
|
+
if testing_feature_id is not None:
|
|
181
|
+
# Replace the placeholder with the single ID for template consistency
|
|
182
|
+
base_prompt = base_prompt.replace("{{TESTING_FEATURE_IDS}}", str(testing_feature_id))
|
|
183
|
+
return base_prompt
|
|
184
|
+
|
|
185
|
+
# No feature assignment -- return template with placeholder cleared
|
|
186
|
+
return base_prompt.replace("{{TESTING_FEATURE_IDS}}", "(none assigned)")
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def get_single_feature_prompt(feature_id: int, project_dir: Path | None = None, yolo_mode: bool = False) -> str:
|
|
190
|
+
"""Prepend single-feature assignment header to base coding prompt.
|
|
191
|
+
|
|
192
|
+
Used in parallel mode to assign a specific feature to an agent.
|
|
193
|
+
The base prompt already contains the full workflow - this just
|
|
194
|
+
identifies which feature to work on.
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
feature_id: The specific feature ID to work on
|
|
198
|
+
project_dir: Optional project directory for project-specific prompts
|
|
199
|
+
yolo_mode: If True, strip browser testing instructions from the base
|
|
200
|
+
coding prompt for reduced token usage in YOLO mode.
|
|
201
|
+
|
|
202
|
+
Returns:
|
|
203
|
+
The prompt with single-feature header prepended
|
|
204
|
+
"""
|
|
205
|
+
base_prompt = get_coding_prompt(project_dir, yolo_mode=yolo_mode)
|
|
206
|
+
|
|
207
|
+
# Minimal header - the base prompt already contains the full workflow
|
|
208
|
+
single_feature_header = f"""## ASSIGNED FEATURE: #{feature_id}
|
|
209
|
+
|
|
210
|
+
Work ONLY on this feature. Other agents are handling other features.
|
|
211
|
+
Use `feature_claim_and_get` with ID {feature_id} to claim it and get details.
|
|
212
|
+
If blocked, use `feature_skip` and document the blocker.
|
|
213
|
+
|
|
214
|
+
---
|
|
215
|
+
|
|
216
|
+
"""
|
|
217
|
+
return single_feature_header + base_prompt
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def get_batch_feature_prompt(
|
|
221
|
+
feature_ids: list[int],
|
|
222
|
+
project_dir: Path | None = None,
|
|
223
|
+
yolo_mode: bool = False,
|
|
224
|
+
) -> str:
|
|
225
|
+
"""Prepend batch-feature assignment header to base coding prompt.
|
|
226
|
+
|
|
227
|
+
Used in parallel mode to assign multiple features to an agent.
|
|
228
|
+
Features should be implemented sequentially in the given order.
|
|
229
|
+
|
|
230
|
+
Args:
|
|
231
|
+
feature_ids: List of feature IDs to implement in order
|
|
232
|
+
project_dir: Optional project directory for project-specific prompts
|
|
233
|
+
yolo_mode: If True, strip browser testing instructions from the base prompt
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
The prompt with batch-feature header prepended
|
|
237
|
+
"""
|
|
238
|
+
base_prompt = get_coding_prompt(project_dir, yolo_mode=yolo_mode)
|
|
239
|
+
ids_str = ", ".join(f"#{fid}" for fid in feature_ids)
|
|
240
|
+
|
|
241
|
+
batch_header = f"""## ASSIGNED FEATURES (BATCH): {ids_str}
|
|
242
|
+
|
|
243
|
+
You have been assigned {len(feature_ids)} features to implement sequentially.
|
|
244
|
+
Process them IN ORDER: {ids_str}
|
|
245
|
+
|
|
246
|
+
### Workflow for each feature:
|
|
247
|
+
1. Call `feature_claim_and_get` with the feature ID to get its details
|
|
248
|
+
2. Implement the feature fully
|
|
249
|
+
3. Verify it works (browser testing if applicable)
|
|
250
|
+
4. Call `feature_mark_passing` to mark it complete
|
|
251
|
+
5. Git commit the changes
|
|
252
|
+
6. Move to the next feature
|
|
253
|
+
|
|
254
|
+
### Important:
|
|
255
|
+
- Complete each feature fully before starting the next
|
|
256
|
+
- Mark each feature passing individually as you go
|
|
257
|
+
- If blocked on a feature, use `feature_skip` and move to the next one
|
|
258
|
+
- Other agents are handling other features - focus only on yours
|
|
259
|
+
|
|
260
|
+
---
|
|
261
|
+
|
|
262
|
+
"""
|
|
263
|
+
return batch_header + base_prompt
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
def get_app_spec(project_dir: Path) -> str:
|
|
267
|
+
"""
|
|
268
|
+
Load the app spec from the project.
|
|
269
|
+
|
|
270
|
+
Checks in order:
|
|
271
|
+
1. Project prompts directory: {project_dir}/prompts/app_spec.txt
|
|
272
|
+
2. Project root (legacy): {project_dir}/app_spec.txt
|
|
273
|
+
|
|
274
|
+
Args:
|
|
275
|
+
project_dir: The project directory
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
The app spec content
|
|
279
|
+
|
|
280
|
+
Raises:
|
|
281
|
+
FileNotFoundError: If no app_spec.txt found
|
|
282
|
+
"""
|
|
283
|
+
# Try project prompts directory first
|
|
284
|
+
project_prompts = get_project_prompts_dir(project_dir)
|
|
285
|
+
spec_path = project_prompts / "app_spec.txt"
|
|
286
|
+
if spec_path.exists():
|
|
287
|
+
try:
|
|
288
|
+
return spec_path.read_text(encoding="utf-8")
|
|
289
|
+
except (OSError, PermissionError) as e:
|
|
290
|
+
raise FileNotFoundError(f"Could not read {spec_path}: {e}") from e
|
|
291
|
+
|
|
292
|
+
# Fallback to legacy location in project root
|
|
293
|
+
legacy_spec = project_dir / "app_spec.txt"
|
|
294
|
+
if legacy_spec.exists():
|
|
295
|
+
try:
|
|
296
|
+
return legacy_spec.read_text(encoding="utf-8")
|
|
297
|
+
except (OSError, PermissionError) as e:
|
|
298
|
+
raise FileNotFoundError(f"Could not read {legacy_spec}: {e}") from e
|
|
299
|
+
|
|
300
|
+
raise FileNotFoundError(f"No app_spec.txt found for project: {project_dir}")
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
def scaffold_project_prompts(project_dir: Path) -> Path:
|
|
304
|
+
"""
|
|
305
|
+
Create the project prompts directory and copy base templates.
|
|
306
|
+
|
|
307
|
+
This sets up a new project with template files that can be customized.
|
|
308
|
+
|
|
309
|
+
Args:
|
|
310
|
+
project_dir: The absolute path to the project directory
|
|
311
|
+
|
|
312
|
+
Returns:
|
|
313
|
+
The path to the project prompts directory
|
|
314
|
+
"""
|
|
315
|
+
project_prompts = get_project_prompts_dir(project_dir)
|
|
316
|
+
project_prompts.mkdir(parents=True, exist_ok=True)
|
|
317
|
+
|
|
318
|
+
# Create .autoforge directory with .gitignore for runtime files
|
|
319
|
+
from autoforge_paths import ensure_autoforge_dir
|
|
320
|
+
autoforge_dir = ensure_autoforge_dir(project_dir)
|
|
321
|
+
|
|
322
|
+
# Define template mappings: (source_template, destination_name)
|
|
323
|
+
templates = [
|
|
324
|
+
("app_spec.template.txt", "app_spec.txt"),
|
|
325
|
+
("coding_prompt.template.md", "coding_prompt.md"),
|
|
326
|
+
("initializer_prompt.template.md", "initializer_prompt.md"),
|
|
327
|
+
("testing_prompt.template.md", "testing_prompt.md"),
|
|
328
|
+
]
|
|
329
|
+
|
|
330
|
+
copied_files = []
|
|
331
|
+
for template_name, dest_name in templates:
|
|
332
|
+
template_path = TEMPLATES_DIR / template_name
|
|
333
|
+
dest_path = project_prompts / dest_name
|
|
334
|
+
|
|
335
|
+
# Only copy if template exists and destination doesn't
|
|
336
|
+
if template_path.exists() and not dest_path.exists():
|
|
337
|
+
try:
|
|
338
|
+
shutil.copy(template_path, dest_path)
|
|
339
|
+
copied_files.append(dest_name)
|
|
340
|
+
except (OSError, PermissionError) as e:
|
|
341
|
+
print(f" Warning: Could not copy {dest_name}: {e}")
|
|
342
|
+
|
|
343
|
+
# Copy allowed_commands.yaml template to .autoforge/
|
|
344
|
+
examples_dir = Path(__file__).parent / "examples"
|
|
345
|
+
allowed_commands_template = examples_dir / "project_allowed_commands.yaml"
|
|
346
|
+
allowed_commands_dest = autoforge_dir / "allowed_commands.yaml"
|
|
347
|
+
if allowed_commands_template.exists() and not allowed_commands_dest.exists():
|
|
348
|
+
try:
|
|
349
|
+
shutil.copy(allowed_commands_template, allowed_commands_dest)
|
|
350
|
+
copied_files.append(".autoforge/allowed_commands.yaml")
|
|
351
|
+
except (OSError, PermissionError) as e:
|
|
352
|
+
print(f" Warning: Could not copy allowed_commands.yaml: {e}")
|
|
353
|
+
|
|
354
|
+
if copied_files:
|
|
355
|
+
print(f" Created project files: {', '.join(copied_files)}")
|
|
356
|
+
|
|
357
|
+
return project_prompts
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
def has_project_prompts(project_dir: Path) -> bool:
|
|
361
|
+
"""
|
|
362
|
+
Check if a project has valid prompts set up.
|
|
363
|
+
|
|
364
|
+
A project has valid prompts if:
|
|
365
|
+
1. The prompts directory exists, AND
|
|
366
|
+
2. app_spec.txt exists within it, AND
|
|
367
|
+
3. app_spec.txt contains the <project_specification> tag
|
|
368
|
+
|
|
369
|
+
Args:
|
|
370
|
+
project_dir: The project directory to check
|
|
371
|
+
|
|
372
|
+
Returns:
|
|
373
|
+
True if valid project prompts exist, False otherwise
|
|
374
|
+
"""
|
|
375
|
+
project_prompts = get_project_prompts_dir(project_dir)
|
|
376
|
+
app_spec = project_prompts / "app_spec.txt"
|
|
377
|
+
|
|
378
|
+
if not app_spec.exists():
|
|
379
|
+
# Also check legacy location in project root
|
|
380
|
+
legacy_spec = project_dir / "app_spec.txt"
|
|
381
|
+
if legacy_spec.exists():
|
|
382
|
+
try:
|
|
383
|
+
content = legacy_spec.read_text(encoding="utf-8")
|
|
384
|
+
return "<project_specification>" in content
|
|
385
|
+
except (OSError, PermissionError):
|
|
386
|
+
return False
|
|
387
|
+
return False
|
|
388
|
+
|
|
389
|
+
# Check for valid spec content
|
|
390
|
+
try:
|
|
391
|
+
content = app_spec.read_text(encoding="utf-8")
|
|
392
|
+
return "<project_specification>" in content
|
|
393
|
+
except (OSError, PermissionError):
|
|
394
|
+
return False
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
def copy_spec_to_project(project_dir: Path) -> None:
|
|
398
|
+
"""
|
|
399
|
+
Copy the app spec file into the project root directory for the agent to read.
|
|
400
|
+
|
|
401
|
+
This maintains backwards compatibility - the agent expects app_spec.txt
|
|
402
|
+
in the project root directory.
|
|
403
|
+
|
|
404
|
+
The spec is sourced from: {project_dir}/prompts/app_spec.txt
|
|
405
|
+
|
|
406
|
+
Args:
|
|
407
|
+
project_dir: The project directory
|
|
408
|
+
"""
|
|
409
|
+
spec_dest = project_dir / "app_spec.txt"
|
|
410
|
+
|
|
411
|
+
# Don't overwrite if already exists
|
|
412
|
+
if spec_dest.exists():
|
|
413
|
+
return
|
|
414
|
+
|
|
415
|
+
# Copy from project prompts directory
|
|
416
|
+
project_prompts = get_project_prompts_dir(project_dir)
|
|
417
|
+
project_spec = project_prompts / "app_spec.txt"
|
|
418
|
+
if project_spec.exists():
|
|
419
|
+
try:
|
|
420
|
+
shutil.copy(project_spec, spec_dest)
|
|
421
|
+
print("Copied app_spec.txt to project directory")
|
|
422
|
+
return
|
|
423
|
+
except (OSError, PermissionError) as e:
|
|
424
|
+
print(f"Warning: Could not copy app_spec.txt: {e}")
|
|
425
|
+
return
|
|
426
|
+
|
|
427
|
+
print("Warning: No app_spec.txt found to copy to project directory")
|
package/pyproject.toml
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
[tool.ruff]
|
|
2
|
+
line-length = 120
|
|
3
|
+
target-version = "py311"
|
|
4
|
+
|
|
5
|
+
[tool.ruff.lint]
|
|
6
|
+
select = ["E", "F", "I", "W"]
|
|
7
|
+
ignore = [
|
|
8
|
+
"E501", # Line length handled separately
|
|
9
|
+
"E402", # Allow imports after load_dotenv()
|
|
10
|
+
"E712", # SQLAlchemy requires == True/False syntax
|
|
11
|
+
]
|
|
12
|
+
|
|
13
|
+
[tool.mypy]
|
|
14
|
+
python_version = "3.11"
|
|
15
|
+
ignore_missing_imports = true
|
|
16
|
+
warn_return_any = true
|
|
17
|
+
warn_unused_ignores = true
|