ralphx 0.3.4__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ralphx/__init__.py +1 -1
- ralphx/adapters/base.py +10 -2
- ralphx/adapters/claude_cli.py +222 -82
- ralphx/api/routes/auth.py +780 -98
- ralphx/api/routes/config.py +3 -56
- ralphx/api/routes/export_import.py +6 -9
- ralphx/api/routes/loops.py +4 -4
- ralphx/api/routes/planning.py +882 -19
- ralphx/api/routes/resources.py +528 -6
- ralphx/api/routes/stream.py +58 -56
- ralphx/api/routes/templates.py +2 -2
- ralphx/api/routes/workflows.py +258 -47
- ralphx/cli.py +4 -1
- ralphx/core/auth.py +372 -172
- ralphx/core/database.py +588 -164
- ralphx/core/executor.py +170 -19
- ralphx/core/loop.py +15 -2
- ralphx/core/loop_templates.py +29 -3
- ralphx/core/planning_iteration_executor.py +633 -0
- ralphx/core/planning_service.py +119 -24
- ralphx/core/preview.py +9 -25
- ralphx/core/project_db.py +864 -121
- ralphx/core/project_export.py +1 -5
- ralphx/core/project_import.py +14 -29
- ralphx/core/resources.py +28 -2
- ralphx/core/sample_project.py +1 -5
- ralphx/core/templates.py +9 -9
- ralphx/core/workflow_executor.py +32 -3
- ralphx/core/workflow_export.py +4 -7
- ralphx/core/workflow_import.py +3 -27
- ralphx/mcp/__init__.py +6 -2
- ralphx/mcp/registry.py +3 -3
- ralphx/mcp/tools/diagnostics.py +1 -1
- ralphx/mcp/tools/monitoring.py +10 -16
- ralphx/mcp/tools/workflows.py +115 -33
- ralphx/mcp_server.py +6 -2
- ralphx/static/assets/index-BuLI7ffn.css +1 -0
- ralphx/static/assets/index-DWvlqOTb.js +264 -0
- ralphx/static/assets/index-DWvlqOTb.js.map +1 -0
- ralphx/static/index.html +2 -2
- ralphx/templates/loop_templates/consumer.md +2 -2
- {ralphx-0.3.4.dist-info → ralphx-0.4.0.dist-info}/METADATA +33 -12
- {ralphx-0.3.4.dist-info → ralphx-0.4.0.dist-info}/RECORD +45 -44
- ralphx/static/assets/index-CcRDyY3b.css +0 -1
- ralphx/static/assets/index-CcxfTosc.js +0 -251
- ralphx/static/assets/index-CcxfTosc.js.map +0 -1
- {ralphx-0.3.4.dist-info → ralphx-0.4.0.dist-info}/WHEEL +0 -0
- {ralphx-0.3.4.dist-info → ralphx-0.4.0.dist-info}/entry_points.txt +0 -0
ralphx/api/routes/stream.py
CHANGED
|
@@ -92,7 +92,7 @@ async def event_generator(
|
|
|
92
92
|
yield sse_event
|
|
93
93
|
return
|
|
94
94
|
|
|
95
|
-
# If watching a loop, get latest session
|
|
95
|
+
# If watching a loop, get latest session and follow new sessions
|
|
96
96
|
if loop_name:
|
|
97
97
|
# Check for active run
|
|
98
98
|
runs = project_db.list_runs(
|
|
@@ -113,26 +113,47 @@ async def event_generator(
|
|
|
113
113
|
"mode": run.get("current_mode"),
|
|
114
114
|
})
|
|
115
115
|
|
|
116
|
-
# If running,
|
|
116
|
+
# If running, tail latest session then follow new sessions
|
|
117
117
|
if run_status in [RunStatus.RUNNING.value, RunStatus.PAUSED.value]:
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
118
|
+
run_id = run.get("id")
|
|
119
|
+
last_session_id = None
|
|
120
|
+
|
|
121
|
+
# Keep following new sessions until the run is no longer active.
|
|
122
|
+
# Without this loop, the SSE stream would stop streaming after
|
|
123
|
+
# the first iteration's session completes, leaving the client
|
|
124
|
+
# receiving only heartbeats for subsequent iterations.
|
|
125
|
+
while True:
|
|
126
|
+
session = session_manager.get_latest_session(
|
|
127
|
+
run_id=run_id,
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
if session and session.session_id != last_session_id:
|
|
131
|
+
last_session_id = session.session_id
|
|
132
|
+
async for sse_event in _tail_session(
|
|
133
|
+
session_manager=session_manager,
|
|
134
|
+
session_id=session.session_id,
|
|
135
|
+
project_path=project_path,
|
|
136
|
+
project_db=project_db,
|
|
137
|
+
from_beginning=from_beginning,
|
|
138
|
+
run_id=run_id,
|
|
139
|
+
iteration=session.iteration,
|
|
140
|
+
):
|
|
141
|
+
yield sse_event
|
|
142
|
+
# After first session, always start from beginning for new sessions
|
|
143
|
+
from_beginning = True
|
|
144
|
+
else:
|
|
145
|
+
# No new session yet - check if run is still active
|
|
146
|
+
current_run = project_db.get_run(run_id)
|
|
147
|
+
if not current_run or current_run.get("status") not in [
|
|
148
|
+
RunStatus.RUNNING.value,
|
|
149
|
+
RunStatus.PAUSED.value,
|
|
150
|
+
]:
|
|
151
|
+
break
|
|
152
|
+
# Wait briefly before checking for a new session
|
|
153
|
+
await asyncio.sleep(2)
|
|
154
|
+
yield await format_sse("heartbeat", {
|
|
155
|
+
"timestamp": asyncio.get_event_loop().time(),
|
|
156
|
+
})
|
|
136
157
|
else:
|
|
137
158
|
yield await format_sse("info", {
|
|
138
159
|
"message": f"Loop not running (status: {run_status})"
|
|
@@ -259,25 +280,14 @@ async def _tail_session(
|
|
|
259
280
|
if event.type == SessionEventType.UNKNOWN:
|
|
260
281
|
continue
|
|
261
282
|
|
|
262
|
-
#
|
|
283
|
+
# Stream events to client (persistence handled by executor)
|
|
263
284
|
if event.type == SessionEventType.TEXT:
|
|
264
|
-
project_db.add_session_event(
|
|
265
|
-
session_id=session_id,
|
|
266
|
-
event_type="text",
|
|
267
|
-
content=event.text,
|
|
268
|
-
)
|
|
269
285
|
yield await format_sse("text", {
|
|
270
286
|
"content": event.text,
|
|
271
287
|
**event_meta,
|
|
272
288
|
})
|
|
273
289
|
|
|
274
290
|
elif event.type == SessionEventType.TOOL_CALL:
|
|
275
|
-
project_db.add_session_event(
|
|
276
|
-
session_id=session_id,
|
|
277
|
-
event_type="tool_call",
|
|
278
|
-
tool_name=event.tool_name,
|
|
279
|
-
tool_input=event.tool_input,
|
|
280
|
-
)
|
|
281
291
|
yield await format_sse("tool_call", {
|
|
282
292
|
"name": event.tool_name,
|
|
283
293
|
"input": event.tool_input,
|
|
@@ -285,12 +295,6 @@ async def _tail_session(
|
|
|
285
295
|
})
|
|
286
296
|
|
|
287
297
|
elif event.type == SessionEventType.TOOL_RESULT:
|
|
288
|
-
project_db.add_session_event(
|
|
289
|
-
session_id=session_id,
|
|
290
|
-
event_type="tool_result",
|
|
291
|
-
tool_name=event.tool_name,
|
|
292
|
-
tool_result=event.tool_result[:1000] if event.tool_result else None,
|
|
293
|
-
)
|
|
294
298
|
yield await format_sse("tool_result", {
|
|
295
299
|
"name": event.tool_name,
|
|
296
300
|
"result": event.tool_result[:1000] if event.tool_result else None,
|
|
@@ -298,30 +302,16 @@ async def _tail_session(
|
|
|
298
302
|
})
|
|
299
303
|
|
|
300
304
|
elif event.type == SessionEventType.ERROR:
|
|
301
|
-
project_db.add_session_event(
|
|
302
|
-
session_id=session_id,
|
|
303
|
-
event_type="error",
|
|
304
|
-
error_message=event.error_message,
|
|
305
|
-
)
|
|
306
305
|
yield await format_sse("error", {
|
|
307
306
|
"message": event.error_message,
|
|
308
307
|
**event_meta,
|
|
309
308
|
})
|
|
310
309
|
|
|
311
310
|
elif event.type == SessionEventType.COMPLETE:
|
|
312
|
-
project_db.add_session_event(
|
|
313
|
-
session_id=session_id,
|
|
314
|
-
event_type="complete",
|
|
315
|
-
)
|
|
316
311
|
yield await format_sse("complete", event_meta)
|
|
317
312
|
break
|
|
318
313
|
|
|
319
314
|
elif event.type == SessionEventType.INIT:
|
|
320
|
-
project_db.add_session_event(
|
|
321
|
-
session_id=session_id,
|
|
322
|
-
event_type="init",
|
|
323
|
-
raw_data=event.raw_data,
|
|
324
|
-
)
|
|
325
315
|
yield await format_sse("init", {
|
|
326
316
|
"data": event.raw_data,
|
|
327
317
|
**event_meta,
|
|
@@ -538,11 +528,18 @@ async def get_grouped_events(
|
|
|
538
528
|
slug: str,
|
|
539
529
|
loop_name: str,
|
|
540
530
|
limit_runs: int = Query(5, ge=1, le=50, description="Max runs to return"),
|
|
531
|
+
limit_sessions: int = Query(20, ge=1, le=100, description="Max sessions per run"),
|
|
532
|
+
limit_events: int = Query(200, ge=1, le=1000, description="Max events per session"),
|
|
541
533
|
):
|
|
542
534
|
"""Get events grouped by run and iteration.
|
|
543
535
|
|
|
544
536
|
Returns events organized in a tree structure:
|
|
545
537
|
- runs: { run_id: { status, iterations: { iteration: { events, session_id, is_live } } } }
|
|
538
|
+
|
|
539
|
+
Limits are applied at each level to prevent unbounded data loads:
|
|
540
|
+
- limit_runs: max runs to return (default 5)
|
|
541
|
+
- limit_sessions: max sessions per run (default 20)
|
|
542
|
+
- limit_events: max events per session (default 200)
|
|
546
543
|
"""
|
|
547
544
|
manager, project, project_db = get_project(slug)
|
|
548
545
|
session_manager = SessionManager(project_db)
|
|
@@ -559,15 +556,17 @@ async def get_grouped_events(
|
|
|
559
556
|
run_id = run.get("id")
|
|
560
557
|
run_status = run.get("status", "unknown")
|
|
561
558
|
|
|
562
|
-
# Get
|
|
563
|
-
sessions = session_manager.list_sessions(run_id=run_id, limit=
|
|
559
|
+
# Get sessions for this run (bounded)
|
|
560
|
+
sessions = session_manager.list_sessions(run_id=run_id, limit=limit_sessions)
|
|
564
561
|
|
|
565
562
|
iterations = {}
|
|
566
563
|
for session in sessions:
|
|
567
564
|
iter_num = session.iteration
|
|
568
565
|
|
|
569
|
-
# Get events for this session
|
|
570
|
-
events = project_db.get_session_events(
|
|
566
|
+
# Get events for this session (bounded to prevent memory issues)
|
|
567
|
+
events = project_db.get_session_events(
|
|
568
|
+
session.session_id, limit=limit_events
|
|
569
|
+
)
|
|
571
570
|
|
|
572
571
|
# Determine if this is the live session
|
|
573
572
|
is_live = (
|
|
@@ -581,6 +580,7 @@ async def get_grouped_events(
|
|
|
581
580
|
"status": session.status,
|
|
582
581
|
"is_live": is_live,
|
|
583
582
|
"events": events,
|
|
583
|
+
"events_truncated": len(events) >= limit_events,
|
|
584
584
|
}
|
|
585
585
|
|
|
586
586
|
result["runs"][run_id] = {
|
|
@@ -589,6 +589,8 @@ async def get_grouped_events(
|
|
|
589
589
|
"started_at": run.get("started_at"),
|
|
590
590
|
"completed_at": run.get("completed_at"),
|
|
591
591
|
"iterations_completed": run.get("iterations_completed", 0),
|
|
592
|
+
"items_generated": run.get("items_generated", 0),
|
|
593
|
+
"error_message": run.get("error_message"),
|
|
592
594
|
"iterations": iterations,
|
|
593
595
|
}
|
|
594
596
|
|
ralphx/api/routes/templates.py
CHANGED
|
@@ -4,7 +4,7 @@ Templates are global, read-only, and shipped with RalphX.
|
|
|
4
4
|
No authentication required - templates are public.
|
|
5
5
|
|
|
6
6
|
Includes:
|
|
7
|
-
- Loop templates (
|
|
7
|
+
- Loop templates (extractgen_requirements, implementation, etc.)
|
|
8
8
|
- Loop builder templates (planning, implementation with Phase 1)
|
|
9
9
|
- Permission templates (planning, implementation, read_only, etc.)
|
|
10
10
|
"""
|
|
@@ -87,7 +87,7 @@ async def get_template_by_name(name: str) -> TemplateDetail:
|
|
|
87
87
|
No authentication required - templates are public.
|
|
88
88
|
|
|
89
89
|
Args:
|
|
90
|
-
name: Template name (e.g., '
|
|
90
|
+
name: Template name (e.g., 'extractgen_requirements', 'implementation')
|
|
91
91
|
|
|
92
92
|
Returns:
|
|
93
93
|
Full template with config and YAML representation
|
ralphx/api/routes/workflows.py
CHANGED
|
@@ -11,6 +11,59 @@ from ralphx.core.project_db import ProjectDatabase
|
|
|
11
11
|
|
|
12
12
|
router = APIRouter()
|
|
13
13
|
|
|
14
|
+
# Processing type configurations - maps processing_type to step_type and config
|
|
15
|
+
# Matches frontend StepSettings.tsx and mcp/tools/workflows.py
|
|
16
|
+
PROCESSING_TYPES = {
|
|
17
|
+
"design_doc": {
|
|
18
|
+
"step_type": "interactive",
|
|
19
|
+
"config": {
|
|
20
|
+
"loopType": "design_doc",
|
|
21
|
+
"allowedTools": ["WebSearch", "WebFetch", "Bash", "Read", "Glob", "Grep", "Edit", "Write"],
|
|
22
|
+
"model": "opus",
|
|
23
|
+
"timeout": 300,
|
|
24
|
+
},
|
|
25
|
+
},
|
|
26
|
+
"extractgen_requirements": {
|
|
27
|
+
"step_type": "autonomous",
|
|
28
|
+
"config": {
|
|
29
|
+
"loopType": "generator",
|
|
30
|
+
"template": "extractgen_requirements",
|
|
31
|
+
"allowedTools": ["WebSearch", "WebFetch"],
|
|
32
|
+
"model": "opus",
|
|
33
|
+
"timeout": 600,
|
|
34
|
+
"max_iterations": 100,
|
|
35
|
+
"cooldown_between_iterations": 5,
|
|
36
|
+
"max_consecutive_errors": 5,
|
|
37
|
+
},
|
|
38
|
+
},
|
|
39
|
+
"webgen_requirements": {
|
|
40
|
+
"step_type": "autonomous",
|
|
41
|
+
"config": {
|
|
42
|
+
"loopType": "generator",
|
|
43
|
+
"template": "webgen_requirements",
|
|
44
|
+
"allowedTools": ["WebSearch", "WebFetch"],
|
|
45
|
+
"model": "opus",
|
|
46
|
+
"timeout": 900,
|
|
47
|
+
"max_iterations": 15,
|
|
48
|
+
"cooldown_between_iterations": 15,
|
|
49
|
+
"max_consecutive_errors": 3,
|
|
50
|
+
},
|
|
51
|
+
},
|
|
52
|
+
"implementation": {
|
|
53
|
+
"step_type": "autonomous",
|
|
54
|
+
"config": {
|
|
55
|
+
"loopType": "consumer",
|
|
56
|
+
"template": "implementation",
|
|
57
|
+
"allowedTools": ["Read", "Write", "Edit", "Bash", "Glob", "Grep"],
|
|
58
|
+
"model": "opus",
|
|
59
|
+
"timeout": 1800,
|
|
60
|
+
"max_iterations": 50,
|
|
61
|
+
"cooldown_between_iterations": 5,
|
|
62
|
+
"max_consecutive_errors": 3,
|
|
63
|
+
},
|
|
64
|
+
},
|
|
65
|
+
}
|
|
66
|
+
|
|
14
67
|
|
|
15
68
|
# ============================================================================
|
|
16
69
|
# Request/Response Models
|
|
@@ -62,7 +115,6 @@ class WorkflowResponse(BaseModel):
|
|
|
62
115
|
id: str
|
|
63
116
|
template_id: Optional[str] = None
|
|
64
117
|
name: str
|
|
65
|
-
namespace: str
|
|
66
118
|
status: str
|
|
67
119
|
current_step: int
|
|
68
120
|
created_at: str
|
|
@@ -138,6 +190,8 @@ class CreateStepRequest(BaseModel):
|
|
|
138
190
|
max_consecutive_errors: Optional[int] = Field(None, ge=1, le=100)
|
|
139
191
|
# Custom prompt (autonomous steps only)
|
|
140
192
|
custom_prompt: Optional[str] = Field(None, max_length=50000)
|
|
193
|
+
# Cross-step context: step IDs whose items should be visible as existing context
|
|
194
|
+
context_from_steps: Optional[list[int]] = None
|
|
141
195
|
|
|
142
196
|
|
|
143
197
|
class UpdateStepRequest(BaseModel):
|
|
@@ -162,6 +216,11 @@ class UpdateStepRequest(BaseModel):
|
|
|
162
216
|
max_consecutive_errors: Optional[int] = Field(None, ge=1, le=100)
|
|
163
217
|
# Custom prompt (autonomous steps only)
|
|
164
218
|
custom_prompt: Optional[str] = Field(None, max_length=50000)
|
|
219
|
+
# Cross-step context: step IDs whose items should be visible as existing context
|
|
220
|
+
context_from_steps: Optional[list[int]] = None
|
|
221
|
+
# Design doc file path (for design_doc interactive steps)
|
|
222
|
+
# Path is relative to .ralphx/resources/design_doc/ (e.g., "PROJECT_DESIGN.md")
|
|
223
|
+
design_doc_path: Optional[str] = Field(None, max_length=255)
|
|
165
224
|
|
|
166
225
|
|
|
167
226
|
# Valid tools for autonomous steps
|
|
@@ -222,23 +281,6 @@ def _get_project_db(slug: str) -> ProjectDatabase:
|
|
|
222
281
|
return ProjectDatabase(project["path"])
|
|
223
282
|
|
|
224
283
|
|
|
225
|
-
def _generate_namespace(name: str) -> str:
|
|
226
|
-
"""Generate a valid namespace from a workflow name."""
|
|
227
|
-
import re
|
|
228
|
-
|
|
229
|
-
# Convert to lowercase, replace spaces with dashes
|
|
230
|
-
ns = name.lower().replace(" ", "-")
|
|
231
|
-
# Remove invalid characters
|
|
232
|
-
ns = re.sub(r"[^a-z0-9_-]", "", ns)
|
|
233
|
-
# Ensure it starts with a letter
|
|
234
|
-
if not ns or not ns[0].isalpha():
|
|
235
|
-
ns = "w" + ns
|
|
236
|
-
# Truncate to 64 chars and add unique suffix
|
|
237
|
-
ns = ns[:56]
|
|
238
|
-
suffix = uuid.uuid4().hex[:7]
|
|
239
|
-
return f"{ns}-{suffix}"
|
|
240
|
-
|
|
241
|
-
|
|
242
284
|
def _workflow_to_response(
|
|
243
285
|
workflow: dict, steps: list[dict], pdb: Optional[ProjectDatabase] = None
|
|
244
286
|
) -> WorkflowResponse:
|
|
@@ -292,9 +334,9 @@ def _workflow_to_response(
|
|
|
292
334
|
total_iterations = sum(
|
|
293
335
|
r.get("iterations_completed", 0) or 0 for r in step_runs
|
|
294
336
|
)
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
)
|
|
337
|
+
# Count items from work_items table (includes imports + generated)
|
|
338
|
+
step_item_counts = item_counts_by_step_id.get(step_id, {})
|
|
339
|
+
total_items = sum(step_item_counts.values())
|
|
298
340
|
|
|
299
341
|
# Check for active run
|
|
300
342
|
running_run = next(
|
|
@@ -333,13 +375,15 @@ def _workflow_to_response(
|
|
|
333
375
|
total = sum(item_stats.values())
|
|
334
376
|
# Status mapping for display:
|
|
335
377
|
# - "completed" in DB = ready for consumer (display as "pending")
|
|
378
|
+
# - "claimed" in DB = actively being processed (display as "in_progress")
|
|
336
379
|
# - "processed" in DB = already done (display as "completed")
|
|
337
380
|
# - "pending" in DB = not yet ready (shouldn't happen much)
|
|
338
381
|
input_items = {
|
|
339
382
|
"total": total,
|
|
340
383
|
# "completed" in DB means ready-to-process, so count as pending for display
|
|
341
384
|
"pending": item_stats.get("pending", 0) + item_stats.get("completed", 0),
|
|
342
|
-
|
|
385
|
+
# "claimed" in DB means actively being processed by a consumer loop
|
|
386
|
+
"in_progress": item_stats.get("in_progress", 0) + item_stats.get("claimed", 0),
|
|
343
387
|
# "processed" in DB means actually done
|
|
344
388
|
"completed": item_stats.get("processed", 0),
|
|
345
389
|
"skipped": item_stats.get("skipped", 0),
|
|
@@ -348,6 +392,11 @@ def _workflow_to_response(
|
|
|
348
392
|
"rejected": item_stats.get("rejected", 0),
|
|
349
393
|
}
|
|
350
394
|
|
|
395
|
+
# Derive loop_name fallback from runs if step record is missing it
|
|
396
|
+
derived_loop_name = step.get("loop_name") or next(
|
|
397
|
+
(r.get("loop_name") for r in step_runs), None
|
|
398
|
+
)
|
|
399
|
+
|
|
351
400
|
step_progress[step_id] = {
|
|
352
401
|
"has_active_run": running_run is not None,
|
|
353
402
|
"iterations_completed": total_iterations,
|
|
@@ -360,13 +409,13 @@ def _workflow_to_response(
|
|
|
360
409
|
"items_generated": total_items,
|
|
361
410
|
"has_guardrails": step_has_guardrails or guardrails_count > 0,
|
|
362
411
|
"input_items": input_items,
|
|
412
|
+
"loop_name": derived_loop_name,
|
|
363
413
|
}
|
|
364
414
|
|
|
365
415
|
return WorkflowResponse(
|
|
366
416
|
id=workflow["id"],
|
|
367
417
|
template_id=workflow.get("template_id"),
|
|
368
418
|
name=workflow["name"],
|
|
369
|
-
namespace=workflow["namespace"],
|
|
370
419
|
status=workflow["status"],
|
|
371
420
|
current_step=workflow["current_step"],
|
|
372
421
|
created_at=workflow["created_at"],
|
|
@@ -381,7 +430,7 @@ def _workflow_to_response(
|
|
|
381
430
|
step_type=s["step_type"],
|
|
382
431
|
status=s["status"],
|
|
383
432
|
config=s.get("config"),
|
|
384
|
-
loop_name=s.get("loop_name"),
|
|
433
|
+
loop_name=step_progress.get(s["id"], {}).get("loop_name") or s.get("loop_name"),
|
|
385
434
|
artifacts=s.get("artifacts"),
|
|
386
435
|
started_at=s.get("started_at"),
|
|
387
436
|
completed_at=s.get("completed_at"),
|
|
@@ -511,9 +560,8 @@ async def create_workflow(slug: str, request: CreateWorkflowRequest):
|
|
|
511
560
|
"""
|
|
512
561
|
pdb = _get_project_db(slug)
|
|
513
562
|
|
|
514
|
-
# Generate unique ID
|
|
563
|
+
# Generate unique ID
|
|
515
564
|
workflow_id = f"wf-{uuid.uuid4().hex[:12]}"
|
|
516
|
-
namespace = _generate_namespace(request.name)
|
|
517
565
|
|
|
518
566
|
# Get template steps if template specified (templates still use "phases" internally)
|
|
519
567
|
template_steps = []
|
|
@@ -531,7 +579,6 @@ async def create_workflow(slug: str, request: CreateWorkflowRequest):
|
|
|
531
579
|
workflow = pdb.create_workflow(
|
|
532
580
|
id=workflow_id,
|
|
533
581
|
name=request.name,
|
|
534
|
-
namespace=namespace,
|
|
535
582
|
template_id=request.template_id,
|
|
536
583
|
status="draft",
|
|
537
584
|
)
|
|
@@ -539,19 +586,36 @@ async def create_workflow(slug: str, request: CreateWorkflowRequest):
|
|
|
539
586
|
# Create steps from template
|
|
540
587
|
created_steps = []
|
|
541
588
|
for step_def in template_steps:
|
|
589
|
+
# Get step_type and config from processing_type if available
|
|
590
|
+
processing_type = step_def.get("processing_type")
|
|
591
|
+
if processing_type and processing_type in PROCESSING_TYPES:
|
|
592
|
+
type_config = PROCESSING_TYPES[processing_type]
|
|
593
|
+
step_type = type_config["step_type"]
|
|
594
|
+
# Merge template config with processing_type defaults
|
|
595
|
+
config = {**type_config["config"]}
|
|
596
|
+
else:
|
|
597
|
+
# Fallback to explicit type field (for backwards compatibility)
|
|
598
|
+
step_type = step_def.get("type", "autonomous")
|
|
599
|
+
config = {}
|
|
600
|
+
|
|
601
|
+
# Add template-specific config
|
|
602
|
+
config.update({
|
|
603
|
+
"description": step_def.get("description"),
|
|
604
|
+
"inputs": step_def.get("inputs", []),
|
|
605
|
+
"outputs": step_def.get("outputs", []),
|
|
606
|
+
"skippable": step_def.get("skippable", False),
|
|
607
|
+
"skipCondition": step_def.get("skipCondition"),
|
|
608
|
+
})
|
|
609
|
+
# Override loopType if explicitly specified in template
|
|
610
|
+
if step_def.get("loopType"):
|
|
611
|
+
config["loopType"] = step_def["loopType"]
|
|
612
|
+
|
|
542
613
|
step = pdb.create_workflow_step(
|
|
543
614
|
workflow_id=workflow_id,
|
|
544
615
|
step_number=step_def["number"],
|
|
545
616
|
name=step_def["name"],
|
|
546
|
-
step_type=
|
|
547
|
-
config=
|
|
548
|
-
"description": step_def.get("description"),
|
|
549
|
-
"loopType": step_def.get("loopType"),
|
|
550
|
-
"inputs": step_def.get("inputs", []),
|
|
551
|
-
"outputs": step_def.get("outputs", []),
|
|
552
|
-
"skippable": step_def.get("skippable", False),
|
|
553
|
-
"skipCondition": step_def.get("skipCondition"),
|
|
554
|
-
},
|
|
617
|
+
step_type=step_type,
|
|
618
|
+
config=config,
|
|
555
619
|
status="pending",
|
|
556
620
|
)
|
|
557
621
|
created_steps.append(step)
|
|
@@ -747,6 +811,70 @@ async def advance_workflow_step(
|
|
|
747
811
|
return _workflow_to_response(workflow, steps, pdb)
|
|
748
812
|
|
|
749
813
|
|
|
814
|
+
@router.post("/workflows/{workflow_id}/steps/{step_id}/reopen", response_model=WorkflowResponse)
|
|
815
|
+
async def reopen_workflow_step(slug: str, workflow_id: str, step_id: int):
|
|
816
|
+
"""Reopen a completed or skipped step.
|
|
817
|
+
|
|
818
|
+
Sets the step back to active, resets all later steps to pending,
|
|
819
|
+
and moves the workflow's current_step back.
|
|
820
|
+
"""
|
|
821
|
+
pdb = _get_project_db(slug)
|
|
822
|
+
workflow = pdb.get_workflow(workflow_id)
|
|
823
|
+
if not workflow:
|
|
824
|
+
raise HTTPException(
|
|
825
|
+
status_code=status.HTTP_404_NOT_FOUND,
|
|
826
|
+
detail=f"Workflow '{workflow_id}' not found",
|
|
827
|
+
)
|
|
828
|
+
|
|
829
|
+
if workflow["status"] == "completed":
|
|
830
|
+
raise HTTPException(
|
|
831
|
+
status_code=status.HTTP_400_BAD_REQUEST,
|
|
832
|
+
detail="Cannot reopen steps of a completed workflow.",
|
|
833
|
+
)
|
|
834
|
+
|
|
835
|
+
step = pdb.get_workflow_step(step_id)
|
|
836
|
+
if not step or step["workflow_id"] != workflow_id:
|
|
837
|
+
raise HTTPException(
|
|
838
|
+
status_code=status.HTTP_404_NOT_FOUND,
|
|
839
|
+
detail=f"Step '{step_id}' not found in workflow '{workflow_id}'",
|
|
840
|
+
)
|
|
841
|
+
|
|
842
|
+
if step["status"] not in ("completed", "skipped"):
|
|
843
|
+
raise HTTPException(
|
|
844
|
+
status_code=status.HTTP_400_BAD_REQUEST,
|
|
845
|
+
detail=f"Cannot reopen step in status '{step['status']}'. Step must be 'completed' or 'skipped'.",
|
|
846
|
+
)
|
|
847
|
+
|
|
848
|
+
# Abort any active/paused runs on the reopened step and all later steps
|
|
849
|
+
from datetime import datetime
|
|
850
|
+
|
|
851
|
+
all_steps = pdb.list_workflow_steps(workflow_id)
|
|
852
|
+
affected_step_ids = {
|
|
853
|
+
s["id"]
|
|
854
|
+
for s in all_steps
|
|
855
|
+
if s["step_number"] >= step["step_number"]
|
|
856
|
+
}
|
|
857
|
+
runs = pdb.list_runs(workflow_id=workflow_id, status=["running", "paused"])
|
|
858
|
+
for run in runs:
|
|
859
|
+
if run.get("step_id") in affected_step_ids:
|
|
860
|
+
pdb.update_run(
|
|
861
|
+
run["id"],
|
|
862
|
+
status="aborted",
|
|
863
|
+
completed_at=datetime.utcnow().isoformat(),
|
|
864
|
+
error_message="Step reopened by user",
|
|
865
|
+
)
|
|
866
|
+
|
|
867
|
+
pdb.reopen_workflow_step_atomic(
|
|
868
|
+
workflow_id=workflow_id,
|
|
869
|
+
step_id=step["id"],
|
|
870
|
+
step_number=step["step_number"],
|
|
871
|
+
)
|
|
872
|
+
|
|
873
|
+
workflow = pdb.get_workflow(workflow_id)
|
|
874
|
+
steps = pdb.list_workflow_steps(workflow_id)
|
|
875
|
+
return _workflow_to_response(workflow, steps, pdb)
|
|
876
|
+
|
|
877
|
+
|
|
750
878
|
@router.post("/workflows/{workflow_id}/start", response_model=WorkflowResponse)
|
|
751
879
|
async def start_workflow(slug: str, workflow_id: str):
|
|
752
880
|
"""Start a workflow by activating the first step."""
|
|
@@ -792,17 +920,24 @@ async def start_workflow(slug: str, workflow_id: str):
|
|
|
792
920
|
from ralphx.core.project import Project
|
|
793
921
|
from ralphx.core.workflow_executor import WorkflowExecutor
|
|
794
922
|
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
)
|
|
804
|
-
|
|
805
|
-
|
|
923
|
+
# Guard against double-execution
|
|
924
|
+
existing_runs = pdb.list_runs(
|
|
925
|
+
workflow_id=workflow_id,
|
|
926
|
+
step_id=first_pending["id"],
|
|
927
|
+
status=["running", "paused"],
|
|
928
|
+
)
|
|
929
|
+
if not existing_runs:
|
|
930
|
+
db = Database()
|
|
931
|
+
project = db.get_project(slug)
|
|
932
|
+
if project:
|
|
933
|
+
project_obj = Project.from_dict(project)
|
|
934
|
+
executor = WorkflowExecutor(
|
|
935
|
+
project=project_obj,
|
|
936
|
+
db=pdb,
|
|
937
|
+
workflow_id=workflow_id,
|
|
938
|
+
)
|
|
939
|
+
# Start autonomous step in background
|
|
940
|
+
asyncio.create_task(executor._start_autonomous_step(first_pending))
|
|
806
941
|
|
|
807
942
|
# Return updated workflow
|
|
808
943
|
workflow = pdb.get_workflow(workflow_id)
|
|
@@ -868,6 +1003,18 @@ async def run_workflow_step(slug: str, workflow_id: str):
|
|
|
868
1003
|
detail=f"Step must be active to run. Current status: {current_step['status']}",
|
|
869
1004
|
)
|
|
870
1005
|
|
|
1006
|
+
# Guard against double-execution: check if there's already a running run for this step
|
|
1007
|
+
existing_runs = pdb.list_runs(
|
|
1008
|
+
workflow_id=workflow_id,
|
|
1009
|
+
step_id=current_step["id"],
|
|
1010
|
+
status=["running", "paused"],
|
|
1011
|
+
)
|
|
1012
|
+
if existing_runs:
|
|
1013
|
+
raise HTTPException(
|
|
1014
|
+
status_code=status.HTTP_409_CONFLICT,
|
|
1015
|
+
detail=f"Step already has a running executor (run {existing_runs[0]['id']}). Stop it first.",
|
|
1016
|
+
)
|
|
1017
|
+
|
|
871
1018
|
# Create and use WorkflowExecutor to start the autonomous step
|
|
872
1019
|
project_obj = Project.from_dict(project)
|
|
873
1020
|
executor = WorkflowExecutor(
|
|
@@ -947,6 +1094,21 @@ async def run_specific_step(slug: str, workflow_id: str, step_number: int):
|
|
|
947
1094
|
|
|
948
1095
|
# If autonomous step, trigger the loop execution
|
|
949
1096
|
if target_step["step_type"] == "autonomous":
|
|
1097
|
+
# Guard against double-start: check if there's already a running run
|
|
1098
|
+
# for this step's loop. Without this check, rapid double-clicks could
|
|
1099
|
+
# spawn two concurrent loop executions for the same step.
|
|
1100
|
+
step_loop_name = target_step.get("loop_name")
|
|
1101
|
+
if step_loop_name:
|
|
1102
|
+
existing_runs = pdb.list_runs(
|
|
1103
|
+
loop_name=step_loop_name,
|
|
1104
|
+
status=["running"],
|
|
1105
|
+
)
|
|
1106
|
+
if existing_runs:
|
|
1107
|
+
raise HTTPException(
|
|
1108
|
+
status_code=status.HTTP_409_CONFLICT,
|
|
1109
|
+
detail=f"Step already has an active run (run_id: {existing_runs[0]['id']})",
|
|
1110
|
+
)
|
|
1111
|
+
|
|
950
1112
|
project_obj = Project.from_dict(project)
|
|
951
1113
|
executor = WorkflowExecutor(
|
|
952
1114
|
project=project_obj,
|
|
@@ -1098,6 +1260,32 @@ async def create_step(slug: str, workflow_id: str, request: CreateStepRequest):
|
|
|
1098
1260
|
if request.max_consecutive_errors is not None:
|
|
1099
1261
|
config["max_consecutive_errors"] = request.max_consecutive_errors
|
|
1100
1262
|
|
|
1263
|
+
# Cross-step context links
|
|
1264
|
+
if request.context_from_steps is not None:
|
|
1265
|
+
# Validate step IDs belong to the same workflow
|
|
1266
|
+
if request.context_from_steps:
|
|
1267
|
+
existing_step_ids = {s["id"] for s in pdb.list_workflow_steps(workflow_id)}
|
|
1268
|
+
invalid_ids = [sid for sid in request.context_from_steps if sid not in existing_step_ids]
|
|
1269
|
+
if invalid_ids:
|
|
1270
|
+
raise HTTPException(
|
|
1271
|
+
status_code=status.HTTP_400_BAD_REQUEST,
|
|
1272
|
+
detail=f"context_from_steps contains invalid step IDs: {invalid_ids}",
|
|
1273
|
+
)
|
|
1274
|
+
config["context_from_steps"] = request.context_from_steps
|
|
1275
|
+
|
|
1276
|
+
# Auto-link: webgen steps should see extractgen items as context
|
|
1277
|
+
if (
|
|
1278
|
+
stripped_template == "webgen_requirements"
|
|
1279
|
+
and not request.context_from_steps
|
|
1280
|
+
):
|
|
1281
|
+
# Find preceding extractgen step in the same workflow
|
|
1282
|
+
all_steps = pdb.list_workflow_steps(workflow_id)
|
|
1283
|
+
for s in all_steps:
|
|
1284
|
+
s_config = s.get("config") or {}
|
|
1285
|
+
if s_config.get("template") == "extractgen_requirements":
|
|
1286
|
+
config["context_from_steps"] = [s["id"]]
|
|
1287
|
+
break
|
|
1288
|
+
|
|
1101
1289
|
# Create step atomically (step_number calculated inside transaction)
|
|
1102
1290
|
step = pdb.create_workflow_step_atomic(
|
|
1103
1291
|
workflow_id=workflow_id,
|
|
@@ -1187,6 +1375,18 @@ async def update_step(slug: str, workflow_id: str, step_id: int, request: Update
|
|
|
1187
1375
|
config_updates["template"] = stripped_template if stripped_template else None
|
|
1188
1376
|
if request.skippable is not None:
|
|
1189
1377
|
config_updates["skippable"] = request.skippable
|
|
1378
|
+
if request.design_doc_path is not None:
|
|
1379
|
+
# Empty string means unlink, otherwise store the path
|
|
1380
|
+
stripped_path = request.design_doc_path.strip()
|
|
1381
|
+
if stripped_path:
|
|
1382
|
+
# Security: validate design_doc_path is a safe filename (no traversal)
|
|
1383
|
+
if (".." in stripped_path or "/" in stripped_path or "\\" in stripped_path
|
|
1384
|
+
or "\0" in stripped_path or stripped_path.startswith(".")):
|
|
1385
|
+
raise HTTPException(
|
|
1386
|
+
status_code=status.HTTP_400_BAD_REQUEST,
|
|
1387
|
+
detail="Invalid design_doc_path: must be a simple filename (e.g., 'PROJECT_DESIGN.md')",
|
|
1388
|
+
)
|
|
1389
|
+
config_updates["design_doc_path"] = stripped_path if stripped_path else None
|
|
1190
1390
|
|
|
1191
1391
|
# Include autonomous settings only for autonomous steps
|
|
1192
1392
|
if effective_step_type == "autonomous":
|
|
@@ -1203,6 +1403,17 @@ async def update_step(slug: str, workflow_id: str, step_id: int, request: Update
|
|
|
1203
1403
|
config_updates["cooldown_between_iterations"] = request.cooldown_between_iterations
|
|
1204
1404
|
if request.max_consecutive_errors is not None:
|
|
1205
1405
|
config_updates["max_consecutive_errors"] = request.max_consecutive_errors
|
|
1406
|
+
# Cross-step context links
|
|
1407
|
+
if request.context_from_steps is not None:
|
|
1408
|
+
if request.context_from_steps:
|
|
1409
|
+
existing_step_ids = {s["id"] for s in pdb.list_workflow_steps(workflow_id)}
|
|
1410
|
+
invalid_ids = [sid for sid in request.context_from_steps if sid not in existing_step_ids]
|
|
1411
|
+
if invalid_ids:
|
|
1412
|
+
raise HTTPException(
|
|
1413
|
+
status_code=status.HTTP_400_BAD_REQUEST,
|
|
1414
|
+
detail=f"context_from_steps contains invalid step IDs: {invalid_ids}",
|
|
1415
|
+
)
|
|
1416
|
+
config_updates["context_from_steps"] = request.context_from_steps
|
|
1206
1417
|
# Custom prompt
|
|
1207
1418
|
if request.custom_prompt is not None:
|
|
1208
1419
|
# Empty string means clear custom prompt
|