ralphx 0.3.5__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ralphx/__init__.py +1 -1
- ralphx/adapters/base.py +10 -2
- ralphx/adapters/claude_cli.py +222 -82
- ralphx/api/routes/auth.py +105 -32
- ralphx/api/routes/planning.py +865 -16
- ralphx/api/routes/resources.py +528 -6
- ralphx/api/routes/stream.py +58 -56
- ralphx/api/routes/workflows.py +257 -25
- ralphx/core/auth.py +32 -7
- ralphx/core/executor.py +170 -16
- ralphx/core/loop_templates.py +26 -0
- ralphx/core/planning_iteration_executor.py +633 -0
- ralphx/core/planning_service.py +10 -3
- ralphx/core/project_db.py +770 -79
- ralphx/core/resources.py +28 -2
- ralphx/core/workflow_executor.py +32 -3
- ralphx/mcp/tools/diagnostics.py +1 -1
- ralphx/mcp/tools/monitoring.py +10 -16
- ralphx/mcp/tools/workflows.py +3 -3
- ralphx/static/assets/index-BuLI7ffn.css +1 -0
- ralphx/static/assets/index-DWvlqOTb.js +264 -0
- ralphx/static/assets/index-DWvlqOTb.js.map +1 -0
- ralphx/static/index.html +2 -2
- ralphx/templates/loop_templates/consumer.md +2 -2
- {ralphx-0.3.5.dist-info → ralphx-0.4.0.dist-info}/METADATA +1 -1
- {ralphx-0.3.5.dist-info → ralphx-0.4.0.dist-info}/RECORD +28 -27
- ralphx/static/assets/index-0ovNnfOq.css +0 -1
- ralphx/static/assets/index-CY9s08ZB.js +0 -251
- ralphx/static/assets/index-CY9s08ZB.js.map +0 -1
- {ralphx-0.3.5.dist-info → ralphx-0.4.0.dist-info}/WHEEL +0 -0
- {ralphx-0.3.5.dist-info → ralphx-0.4.0.dist-info}/entry_points.txt +0 -0
ralphx/api/routes/stream.py
CHANGED
|
@@ -92,7 +92,7 @@ async def event_generator(
|
|
|
92
92
|
yield sse_event
|
|
93
93
|
return
|
|
94
94
|
|
|
95
|
-
# If watching a loop, get latest session
|
|
95
|
+
# If watching a loop, get latest session and follow new sessions
|
|
96
96
|
if loop_name:
|
|
97
97
|
# Check for active run
|
|
98
98
|
runs = project_db.list_runs(
|
|
@@ -113,26 +113,47 @@ async def event_generator(
|
|
|
113
113
|
"mode": run.get("current_mode"),
|
|
114
114
|
})
|
|
115
115
|
|
|
116
|
-
# If running,
|
|
116
|
+
# If running, tail latest session then follow new sessions
|
|
117
117
|
if run_status in [RunStatus.RUNNING.value, RunStatus.PAUSED.value]:
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
118
|
+
run_id = run.get("id")
|
|
119
|
+
last_session_id = None
|
|
120
|
+
|
|
121
|
+
# Keep following new sessions until the run is no longer active.
|
|
122
|
+
# Without this loop, the SSE stream would stop streaming after
|
|
123
|
+
# the first iteration's session completes, leaving the client
|
|
124
|
+
# receiving only heartbeats for subsequent iterations.
|
|
125
|
+
while True:
|
|
126
|
+
session = session_manager.get_latest_session(
|
|
127
|
+
run_id=run_id,
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
if session and session.session_id != last_session_id:
|
|
131
|
+
last_session_id = session.session_id
|
|
132
|
+
async for sse_event in _tail_session(
|
|
133
|
+
session_manager=session_manager,
|
|
134
|
+
session_id=session.session_id,
|
|
135
|
+
project_path=project_path,
|
|
136
|
+
project_db=project_db,
|
|
137
|
+
from_beginning=from_beginning,
|
|
138
|
+
run_id=run_id,
|
|
139
|
+
iteration=session.iteration,
|
|
140
|
+
):
|
|
141
|
+
yield sse_event
|
|
142
|
+
# After first session, always start from beginning for new sessions
|
|
143
|
+
from_beginning = True
|
|
144
|
+
else:
|
|
145
|
+
# No new session yet - check if run is still active
|
|
146
|
+
current_run = project_db.get_run(run_id)
|
|
147
|
+
if not current_run or current_run.get("status") not in [
|
|
148
|
+
RunStatus.RUNNING.value,
|
|
149
|
+
RunStatus.PAUSED.value,
|
|
150
|
+
]:
|
|
151
|
+
break
|
|
152
|
+
# Wait briefly before checking for a new session
|
|
153
|
+
await asyncio.sleep(2)
|
|
154
|
+
yield await format_sse("heartbeat", {
|
|
155
|
+
"timestamp": asyncio.get_event_loop().time(),
|
|
156
|
+
})
|
|
136
157
|
else:
|
|
137
158
|
yield await format_sse("info", {
|
|
138
159
|
"message": f"Loop not running (status: {run_status})"
|
|
@@ -259,25 +280,14 @@ async def _tail_session(
|
|
|
259
280
|
if event.type == SessionEventType.UNKNOWN:
|
|
260
281
|
continue
|
|
261
282
|
|
|
262
|
-
#
|
|
283
|
+
# Stream events to client (persistence handled by executor)
|
|
263
284
|
if event.type == SessionEventType.TEXT:
|
|
264
|
-
project_db.add_session_event(
|
|
265
|
-
session_id=session_id,
|
|
266
|
-
event_type="text",
|
|
267
|
-
content=event.text,
|
|
268
|
-
)
|
|
269
285
|
yield await format_sse("text", {
|
|
270
286
|
"content": event.text,
|
|
271
287
|
**event_meta,
|
|
272
288
|
})
|
|
273
289
|
|
|
274
290
|
elif event.type == SessionEventType.TOOL_CALL:
|
|
275
|
-
project_db.add_session_event(
|
|
276
|
-
session_id=session_id,
|
|
277
|
-
event_type="tool_call",
|
|
278
|
-
tool_name=event.tool_name,
|
|
279
|
-
tool_input=event.tool_input,
|
|
280
|
-
)
|
|
281
291
|
yield await format_sse("tool_call", {
|
|
282
292
|
"name": event.tool_name,
|
|
283
293
|
"input": event.tool_input,
|
|
@@ -285,12 +295,6 @@ async def _tail_session(
|
|
|
285
295
|
})
|
|
286
296
|
|
|
287
297
|
elif event.type == SessionEventType.TOOL_RESULT:
|
|
288
|
-
project_db.add_session_event(
|
|
289
|
-
session_id=session_id,
|
|
290
|
-
event_type="tool_result",
|
|
291
|
-
tool_name=event.tool_name,
|
|
292
|
-
tool_result=event.tool_result[:1000] if event.tool_result else None,
|
|
293
|
-
)
|
|
294
298
|
yield await format_sse("tool_result", {
|
|
295
299
|
"name": event.tool_name,
|
|
296
300
|
"result": event.tool_result[:1000] if event.tool_result else None,
|
|
@@ -298,30 +302,16 @@ async def _tail_session(
|
|
|
298
302
|
})
|
|
299
303
|
|
|
300
304
|
elif event.type == SessionEventType.ERROR:
|
|
301
|
-
project_db.add_session_event(
|
|
302
|
-
session_id=session_id,
|
|
303
|
-
event_type="error",
|
|
304
|
-
error_message=event.error_message,
|
|
305
|
-
)
|
|
306
305
|
yield await format_sse("error", {
|
|
307
306
|
"message": event.error_message,
|
|
308
307
|
**event_meta,
|
|
309
308
|
})
|
|
310
309
|
|
|
311
310
|
elif event.type == SessionEventType.COMPLETE:
|
|
312
|
-
project_db.add_session_event(
|
|
313
|
-
session_id=session_id,
|
|
314
|
-
event_type="complete",
|
|
315
|
-
)
|
|
316
311
|
yield await format_sse("complete", event_meta)
|
|
317
312
|
break
|
|
318
313
|
|
|
319
314
|
elif event.type == SessionEventType.INIT:
|
|
320
|
-
project_db.add_session_event(
|
|
321
|
-
session_id=session_id,
|
|
322
|
-
event_type="init",
|
|
323
|
-
raw_data=event.raw_data,
|
|
324
|
-
)
|
|
325
315
|
yield await format_sse("init", {
|
|
326
316
|
"data": event.raw_data,
|
|
327
317
|
**event_meta,
|
|
@@ -538,11 +528,18 @@ async def get_grouped_events(
|
|
|
538
528
|
slug: str,
|
|
539
529
|
loop_name: str,
|
|
540
530
|
limit_runs: int = Query(5, ge=1, le=50, description="Max runs to return"),
|
|
531
|
+
limit_sessions: int = Query(20, ge=1, le=100, description="Max sessions per run"),
|
|
532
|
+
limit_events: int = Query(200, ge=1, le=1000, description="Max events per session"),
|
|
541
533
|
):
|
|
542
534
|
"""Get events grouped by run and iteration.
|
|
543
535
|
|
|
544
536
|
Returns events organized in a tree structure:
|
|
545
537
|
- runs: { run_id: { status, iterations: { iteration: { events, session_id, is_live } } } }
|
|
538
|
+
|
|
539
|
+
Limits are applied at each level to prevent unbounded data loads:
|
|
540
|
+
- limit_runs: max runs to return (default 5)
|
|
541
|
+
- limit_sessions: max sessions per run (default 20)
|
|
542
|
+
- limit_events: max events per session (default 200)
|
|
546
543
|
"""
|
|
547
544
|
manager, project, project_db = get_project(slug)
|
|
548
545
|
session_manager = SessionManager(project_db)
|
|
@@ -559,15 +556,17 @@ async def get_grouped_events(
|
|
|
559
556
|
run_id = run.get("id")
|
|
560
557
|
run_status = run.get("status", "unknown")
|
|
561
558
|
|
|
562
|
-
# Get
|
|
563
|
-
sessions = session_manager.list_sessions(run_id=run_id, limit=
|
|
559
|
+
# Get sessions for this run (bounded)
|
|
560
|
+
sessions = session_manager.list_sessions(run_id=run_id, limit=limit_sessions)
|
|
564
561
|
|
|
565
562
|
iterations = {}
|
|
566
563
|
for session in sessions:
|
|
567
564
|
iter_num = session.iteration
|
|
568
565
|
|
|
569
|
-
# Get events for this session
|
|
570
|
-
events = project_db.get_session_events(
|
|
566
|
+
# Get events for this session (bounded to prevent memory issues)
|
|
567
|
+
events = project_db.get_session_events(
|
|
568
|
+
session.session_id, limit=limit_events
|
|
569
|
+
)
|
|
571
570
|
|
|
572
571
|
# Determine if this is the live session
|
|
573
572
|
is_live = (
|
|
@@ -581,6 +580,7 @@ async def get_grouped_events(
|
|
|
581
580
|
"status": session.status,
|
|
582
581
|
"is_live": is_live,
|
|
583
582
|
"events": events,
|
|
583
|
+
"events_truncated": len(events) >= limit_events,
|
|
584
584
|
}
|
|
585
585
|
|
|
586
586
|
result["runs"][run_id] = {
|
|
@@ -589,6 +589,8 @@ async def get_grouped_events(
|
|
|
589
589
|
"started_at": run.get("started_at"),
|
|
590
590
|
"completed_at": run.get("completed_at"),
|
|
591
591
|
"iterations_completed": run.get("iterations_completed", 0),
|
|
592
|
+
"items_generated": run.get("items_generated", 0),
|
|
593
|
+
"error_message": run.get("error_message"),
|
|
592
594
|
"iterations": iterations,
|
|
593
595
|
}
|
|
594
596
|
|
ralphx/api/routes/workflows.py
CHANGED
|
@@ -11,6 +11,59 @@ from ralphx.core.project_db import ProjectDatabase
|
|
|
11
11
|
|
|
12
12
|
router = APIRouter()
|
|
13
13
|
|
|
14
|
+
# Processing type configurations - maps processing_type to step_type and config
|
|
15
|
+
# Matches frontend StepSettings.tsx and mcp/tools/workflows.py
|
|
16
|
+
PROCESSING_TYPES = {
|
|
17
|
+
"design_doc": {
|
|
18
|
+
"step_type": "interactive",
|
|
19
|
+
"config": {
|
|
20
|
+
"loopType": "design_doc",
|
|
21
|
+
"allowedTools": ["WebSearch", "WebFetch", "Bash", "Read", "Glob", "Grep", "Edit", "Write"],
|
|
22
|
+
"model": "opus",
|
|
23
|
+
"timeout": 300,
|
|
24
|
+
},
|
|
25
|
+
},
|
|
26
|
+
"extractgen_requirements": {
|
|
27
|
+
"step_type": "autonomous",
|
|
28
|
+
"config": {
|
|
29
|
+
"loopType": "generator",
|
|
30
|
+
"template": "extractgen_requirements",
|
|
31
|
+
"allowedTools": ["WebSearch", "WebFetch"],
|
|
32
|
+
"model": "opus",
|
|
33
|
+
"timeout": 600,
|
|
34
|
+
"max_iterations": 100,
|
|
35
|
+
"cooldown_between_iterations": 5,
|
|
36
|
+
"max_consecutive_errors": 5,
|
|
37
|
+
},
|
|
38
|
+
},
|
|
39
|
+
"webgen_requirements": {
|
|
40
|
+
"step_type": "autonomous",
|
|
41
|
+
"config": {
|
|
42
|
+
"loopType": "generator",
|
|
43
|
+
"template": "webgen_requirements",
|
|
44
|
+
"allowedTools": ["WebSearch", "WebFetch"],
|
|
45
|
+
"model": "opus",
|
|
46
|
+
"timeout": 900,
|
|
47
|
+
"max_iterations": 15,
|
|
48
|
+
"cooldown_between_iterations": 15,
|
|
49
|
+
"max_consecutive_errors": 3,
|
|
50
|
+
},
|
|
51
|
+
},
|
|
52
|
+
"implementation": {
|
|
53
|
+
"step_type": "autonomous",
|
|
54
|
+
"config": {
|
|
55
|
+
"loopType": "consumer",
|
|
56
|
+
"template": "implementation",
|
|
57
|
+
"allowedTools": ["Read", "Write", "Edit", "Bash", "Glob", "Grep"],
|
|
58
|
+
"model": "opus",
|
|
59
|
+
"timeout": 1800,
|
|
60
|
+
"max_iterations": 50,
|
|
61
|
+
"cooldown_between_iterations": 5,
|
|
62
|
+
"max_consecutive_errors": 3,
|
|
63
|
+
},
|
|
64
|
+
},
|
|
65
|
+
}
|
|
66
|
+
|
|
14
67
|
|
|
15
68
|
# ============================================================================
|
|
16
69
|
# Request/Response Models
|
|
@@ -137,6 +190,8 @@ class CreateStepRequest(BaseModel):
|
|
|
137
190
|
max_consecutive_errors: Optional[int] = Field(None, ge=1, le=100)
|
|
138
191
|
# Custom prompt (autonomous steps only)
|
|
139
192
|
custom_prompt: Optional[str] = Field(None, max_length=50000)
|
|
193
|
+
# Cross-step context: step IDs whose items should be visible as existing context
|
|
194
|
+
context_from_steps: Optional[list[int]] = None
|
|
140
195
|
|
|
141
196
|
|
|
142
197
|
class UpdateStepRequest(BaseModel):
|
|
@@ -161,6 +216,11 @@ class UpdateStepRequest(BaseModel):
|
|
|
161
216
|
max_consecutive_errors: Optional[int] = Field(None, ge=1, le=100)
|
|
162
217
|
# Custom prompt (autonomous steps only)
|
|
163
218
|
custom_prompt: Optional[str] = Field(None, max_length=50000)
|
|
219
|
+
# Cross-step context: step IDs whose items should be visible as existing context
|
|
220
|
+
context_from_steps: Optional[list[int]] = None
|
|
221
|
+
# Design doc file path (for design_doc interactive steps)
|
|
222
|
+
# Path is relative to .ralphx/resources/design_doc/ (e.g., "PROJECT_DESIGN.md")
|
|
223
|
+
design_doc_path: Optional[str] = Field(None, max_length=255)
|
|
164
224
|
|
|
165
225
|
|
|
166
226
|
# Valid tools for autonomous steps
|
|
@@ -274,9 +334,9 @@ def _workflow_to_response(
|
|
|
274
334
|
total_iterations = sum(
|
|
275
335
|
r.get("iterations_completed", 0) or 0 for r in step_runs
|
|
276
336
|
)
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
)
|
|
337
|
+
# Count items from work_items table (includes imports + generated)
|
|
338
|
+
step_item_counts = item_counts_by_step_id.get(step_id, {})
|
|
339
|
+
total_items = sum(step_item_counts.values())
|
|
280
340
|
|
|
281
341
|
# Check for active run
|
|
282
342
|
running_run = next(
|
|
@@ -315,13 +375,15 @@ def _workflow_to_response(
|
|
|
315
375
|
total = sum(item_stats.values())
|
|
316
376
|
# Status mapping for display:
|
|
317
377
|
# - "completed" in DB = ready for consumer (display as "pending")
|
|
378
|
+
# - "claimed" in DB = actively being processed (display as "in_progress")
|
|
318
379
|
# - "processed" in DB = already done (display as "completed")
|
|
319
380
|
# - "pending" in DB = not yet ready (shouldn't happen much)
|
|
320
381
|
input_items = {
|
|
321
382
|
"total": total,
|
|
322
383
|
# "completed" in DB means ready-to-process, so count as pending for display
|
|
323
384
|
"pending": item_stats.get("pending", 0) + item_stats.get("completed", 0),
|
|
324
|
-
|
|
385
|
+
# "claimed" in DB means actively being processed by a consumer loop
|
|
386
|
+
"in_progress": item_stats.get("in_progress", 0) + item_stats.get("claimed", 0),
|
|
325
387
|
# "processed" in DB means actually done
|
|
326
388
|
"completed": item_stats.get("processed", 0),
|
|
327
389
|
"skipped": item_stats.get("skipped", 0),
|
|
@@ -330,6 +392,11 @@ def _workflow_to_response(
|
|
|
330
392
|
"rejected": item_stats.get("rejected", 0),
|
|
331
393
|
}
|
|
332
394
|
|
|
395
|
+
# Derive loop_name fallback from runs if step record is missing it
|
|
396
|
+
derived_loop_name = step.get("loop_name") or next(
|
|
397
|
+
(r.get("loop_name") for r in step_runs), None
|
|
398
|
+
)
|
|
399
|
+
|
|
333
400
|
step_progress[step_id] = {
|
|
334
401
|
"has_active_run": running_run is not None,
|
|
335
402
|
"iterations_completed": total_iterations,
|
|
@@ -342,6 +409,7 @@ def _workflow_to_response(
|
|
|
342
409
|
"items_generated": total_items,
|
|
343
410
|
"has_guardrails": step_has_guardrails or guardrails_count > 0,
|
|
344
411
|
"input_items": input_items,
|
|
412
|
+
"loop_name": derived_loop_name,
|
|
345
413
|
}
|
|
346
414
|
|
|
347
415
|
return WorkflowResponse(
|
|
@@ -362,7 +430,7 @@ def _workflow_to_response(
|
|
|
362
430
|
step_type=s["step_type"],
|
|
363
431
|
status=s["status"],
|
|
364
432
|
config=s.get("config"),
|
|
365
|
-
loop_name=s.get("loop_name"),
|
|
433
|
+
loop_name=step_progress.get(s["id"], {}).get("loop_name") or s.get("loop_name"),
|
|
366
434
|
artifacts=s.get("artifacts"),
|
|
367
435
|
started_at=s.get("started_at"),
|
|
368
436
|
completed_at=s.get("completed_at"),
|
|
@@ -518,19 +586,36 @@ async def create_workflow(slug: str, request: CreateWorkflowRequest):
|
|
|
518
586
|
# Create steps from template
|
|
519
587
|
created_steps = []
|
|
520
588
|
for step_def in template_steps:
|
|
589
|
+
# Get step_type and config from processing_type if available
|
|
590
|
+
processing_type = step_def.get("processing_type")
|
|
591
|
+
if processing_type and processing_type in PROCESSING_TYPES:
|
|
592
|
+
type_config = PROCESSING_TYPES[processing_type]
|
|
593
|
+
step_type = type_config["step_type"]
|
|
594
|
+
# Merge template config with processing_type defaults
|
|
595
|
+
config = {**type_config["config"]}
|
|
596
|
+
else:
|
|
597
|
+
# Fallback to explicit type field (for backwards compatibility)
|
|
598
|
+
step_type = step_def.get("type", "autonomous")
|
|
599
|
+
config = {}
|
|
600
|
+
|
|
601
|
+
# Add template-specific config
|
|
602
|
+
config.update({
|
|
603
|
+
"description": step_def.get("description"),
|
|
604
|
+
"inputs": step_def.get("inputs", []),
|
|
605
|
+
"outputs": step_def.get("outputs", []),
|
|
606
|
+
"skippable": step_def.get("skippable", False),
|
|
607
|
+
"skipCondition": step_def.get("skipCondition"),
|
|
608
|
+
})
|
|
609
|
+
# Override loopType if explicitly specified in template
|
|
610
|
+
if step_def.get("loopType"):
|
|
611
|
+
config["loopType"] = step_def["loopType"]
|
|
612
|
+
|
|
521
613
|
step = pdb.create_workflow_step(
|
|
522
614
|
workflow_id=workflow_id,
|
|
523
615
|
step_number=step_def["number"],
|
|
524
616
|
name=step_def["name"],
|
|
525
|
-
step_type=
|
|
526
|
-
config=
|
|
527
|
-
"description": step_def.get("description"),
|
|
528
|
-
"loopType": step_def.get("loopType"),
|
|
529
|
-
"inputs": step_def.get("inputs", []),
|
|
530
|
-
"outputs": step_def.get("outputs", []),
|
|
531
|
-
"skippable": step_def.get("skippable", False),
|
|
532
|
-
"skipCondition": step_def.get("skipCondition"),
|
|
533
|
-
},
|
|
617
|
+
step_type=step_type,
|
|
618
|
+
config=config,
|
|
534
619
|
status="pending",
|
|
535
620
|
)
|
|
536
621
|
created_steps.append(step)
|
|
@@ -726,6 +811,70 @@ async def advance_workflow_step(
|
|
|
726
811
|
return _workflow_to_response(workflow, steps, pdb)
|
|
727
812
|
|
|
728
813
|
|
|
814
|
+
@router.post("/workflows/{workflow_id}/steps/{step_id}/reopen", response_model=WorkflowResponse)
|
|
815
|
+
async def reopen_workflow_step(slug: str, workflow_id: str, step_id: int):
|
|
816
|
+
"""Reopen a completed or skipped step.
|
|
817
|
+
|
|
818
|
+
Sets the step back to active, resets all later steps to pending,
|
|
819
|
+
and moves the workflow's current_step back.
|
|
820
|
+
"""
|
|
821
|
+
pdb = _get_project_db(slug)
|
|
822
|
+
workflow = pdb.get_workflow(workflow_id)
|
|
823
|
+
if not workflow:
|
|
824
|
+
raise HTTPException(
|
|
825
|
+
status_code=status.HTTP_404_NOT_FOUND,
|
|
826
|
+
detail=f"Workflow '{workflow_id}' not found",
|
|
827
|
+
)
|
|
828
|
+
|
|
829
|
+
if workflow["status"] == "completed":
|
|
830
|
+
raise HTTPException(
|
|
831
|
+
status_code=status.HTTP_400_BAD_REQUEST,
|
|
832
|
+
detail="Cannot reopen steps of a completed workflow.",
|
|
833
|
+
)
|
|
834
|
+
|
|
835
|
+
step = pdb.get_workflow_step(step_id)
|
|
836
|
+
if not step or step["workflow_id"] != workflow_id:
|
|
837
|
+
raise HTTPException(
|
|
838
|
+
status_code=status.HTTP_404_NOT_FOUND,
|
|
839
|
+
detail=f"Step '{step_id}' not found in workflow '{workflow_id}'",
|
|
840
|
+
)
|
|
841
|
+
|
|
842
|
+
if step["status"] not in ("completed", "skipped"):
|
|
843
|
+
raise HTTPException(
|
|
844
|
+
status_code=status.HTTP_400_BAD_REQUEST,
|
|
845
|
+
detail=f"Cannot reopen step in status '{step['status']}'. Step must be 'completed' or 'skipped'.",
|
|
846
|
+
)
|
|
847
|
+
|
|
848
|
+
# Abort any active/paused runs on the reopened step and all later steps
|
|
849
|
+
from datetime import datetime
|
|
850
|
+
|
|
851
|
+
all_steps = pdb.list_workflow_steps(workflow_id)
|
|
852
|
+
affected_step_ids = {
|
|
853
|
+
s["id"]
|
|
854
|
+
for s in all_steps
|
|
855
|
+
if s["step_number"] >= step["step_number"]
|
|
856
|
+
}
|
|
857
|
+
runs = pdb.list_runs(workflow_id=workflow_id, status=["running", "paused"])
|
|
858
|
+
for run in runs:
|
|
859
|
+
if run.get("step_id") in affected_step_ids:
|
|
860
|
+
pdb.update_run(
|
|
861
|
+
run["id"],
|
|
862
|
+
status="aborted",
|
|
863
|
+
completed_at=datetime.utcnow().isoformat(),
|
|
864
|
+
error_message="Step reopened by user",
|
|
865
|
+
)
|
|
866
|
+
|
|
867
|
+
pdb.reopen_workflow_step_atomic(
|
|
868
|
+
workflow_id=workflow_id,
|
|
869
|
+
step_id=step["id"],
|
|
870
|
+
step_number=step["step_number"],
|
|
871
|
+
)
|
|
872
|
+
|
|
873
|
+
workflow = pdb.get_workflow(workflow_id)
|
|
874
|
+
steps = pdb.list_workflow_steps(workflow_id)
|
|
875
|
+
return _workflow_to_response(workflow, steps, pdb)
|
|
876
|
+
|
|
877
|
+
|
|
729
878
|
@router.post("/workflows/{workflow_id}/start", response_model=WorkflowResponse)
|
|
730
879
|
async def start_workflow(slug: str, workflow_id: str):
|
|
731
880
|
"""Start a workflow by activating the first step."""
|
|
@@ -771,17 +920,24 @@ async def start_workflow(slug: str, workflow_id: str):
|
|
|
771
920
|
from ralphx.core.project import Project
|
|
772
921
|
from ralphx.core.workflow_executor import WorkflowExecutor
|
|
773
922
|
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
)
|
|
783
|
-
|
|
784
|
-
|
|
923
|
+
# Guard against double-execution
|
|
924
|
+
existing_runs = pdb.list_runs(
|
|
925
|
+
workflow_id=workflow_id,
|
|
926
|
+
step_id=first_pending["id"],
|
|
927
|
+
status=["running", "paused"],
|
|
928
|
+
)
|
|
929
|
+
if not existing_runs:
|
|
930
|
+
db = Database()
|
|
931
|
+
project = db.get_project(slug)
|
|
932
|
+
if project:
|
|
933
|
+
project_obj = Project.from_dict(project)
|
|
934
|
+
executor = WorkflowExecutor(
|
|
935
|
+
project=project_obj,
|
|
936
|
+
db=pdb,
|
|
937
|
+
workflow_id=workflow_id,
|
|
938
|
+
)
|
|
939
|
+
# Start autonomous step in background
|
|
940
|
+
asyncio.create_task(executor._start_autonomous_step(first_pending))
|
|
785
941
|
|
|
786
942
|
# Return updated workflow
|
|
787
943
|
workflow = pdb.get_workflow(workflow_id)
|
|
@@ -847,6 +1003,18 @@ async def run_workflow_step(slug: str, workflow_id: str):
|
|
|
847
1003
|
detail=f"Step must be active to run. Current status: {current_step['status']}",
|
|
848
1004
|
)
|
|
849
1005
|
|
|
1006
|
+
# Guard against double-execution: check if there's already a running run for this step
|
|
1007
|
+
existing_runs = pdb.list_runs(
|
|
1008
|
+
workflow_id=workflow_id,
|
|
1009
|
+
step_id=current_step["id"],
|
|
1010
|
+
status=["running", "paused"],
|
|
1011
|
+
)
|
|
1012
|
+
if existing_runs:
|
|
1013
|
+
raise HTTPException(
|
|
1014
|
+
status_code=status.HTTP_409_CONFLICT,
|
|
1015
|
+
detail=f"Step already has a running executor (run {existing_runs[0]['id']}). Stop it first.",
|
|
1016
|
+
)
|
|
1017
|
+
|
|
850
1018
|
# Create and use WorkflowExecutor to start the autonomous step
|
|
851
1019
|
project_obj = Project.from_dict(project)
|
|
852
1020
|
executor = WorkflowExecutor(
|
|
@@ -926,6 +1094,21 @@ async def run_specific_step(slug: str, workflow_id: str, step_number: int):
|
|
|
926
1094
|
|
|
927
1095
|
# If autonomous step, trigger the loop execution
|
|
928
1096
|
if target_step["step_type"] == "autonomous":
|
|
1097
|
+
# Guard against double-start: check if there's already a running run
|
|
1098
|
+
# for this step's loop. Without this check, rapid double-clicks could
|
|
1099
|
+
# spawn two concurrent loop executions for the same step.
|
|
1100
|
+
step_loop_name = target_step.get("loop_name")
|
|
1101
|
+
if step_loop_name:
|
|
1102
|
+
existing_runs = pdb.list_runs(
|
|
1103
|
+
loop_name=step_loop_name,
|
|
1104
|
+
status=["running"],
|
|
1105
|
+
)
|
|
1106
|
+
if existing_runs:
|
|
1107
|
+
raise HTTPException(
|
|
1108
|
+
status_code=status.HTTP_409_CONFLICT,
|
|
1109
|
+
detail=f"Step already has an active run (run_id: {existing_runs[0]['id']})",
|
|
1110
|
+
)
|
|
1111
|
+
|
|
929
1112
|
project_obj = Project.from_dict(project)
|
|
930
1113
|
executor = WorkflowExecutor(
|
|
931
1114
|
project=project_obj,
|
|
@@ -1077,6 +1260,32 @@ async def create_step(slug: str, workflow_id: str, request: CreateStepRequest):
|
|
|
1077
1260
|
if request.max_consecutive_errors is not None:
|
|
1078
1261
|
config["max_consecutive_errors"] = request.max_consecutive_errors
|
|
1079
1262
|
|
|
1263
|
+
# Cross-step context links
|
|
1264
|
+
if request.context_from_steps is not None:
|
|
1265
|
+
# Validate step IDs belong to the same workflow
|
|
1266
|
+
if request.context_from_steps:
|
|
1267
|
+
existing_step_ids = {s["id"] for s in pdb.list_workflow_steps(workflow_id)}
|
|
1268
|
+
invalid_ids = [sid for sid in request.context_from_steps if sid not in existing_step_ids]
|
|
1269
|
+
if invalid_ids:
|
|
1270
|
+
raise HTTPException(
|
|
1271
|
+
status_code=status.HTTP_400_BAD_REQUEST,
|
|
1272
|
+
detail=f"context_from_steps contains invalid step IDs: {invalid_ids}",
|
|
1273
|
+
)
|
|
1274
|
+
config["context_from_steps"] = request.context_from_steps
|
|
1275
|
+
|
|
1276
|
+
# Auto-link: webgen steps should see extractgen items as context
|
|
1277
|
+
if (
|
|
1278
|
+
stripped_template == "webgen_requirements"
|
|
1279
|
+
and not request.context_from_steps
|
|
1280
|
+
):
|
|
1281
|
+
# Find preceding extractgen step in the same workflow
|
|
1282
|
+
all_steps = pdb.list_workflow_steps(workflow_id)
|
|
1283
|
+
for s in all_steps:
|
|
1284
|
+
s_config = s.get("config") or {}
|
|
1285
|
+
if s_config.get("template") == "extractgen_requirements":
|
|
1286
|
+
config["context_from_steps"] = [s["id"]]
|
|
1287
|
+
break
|
|
1288
|
+
|
|
1080
1289
|
# Create step atomically (step_number calculated inside transaction)
|
|
1081
1290
|
step = pdb.create_workflow_step_atomic(
|
|
1082
1291
|
workflow_id=workflow_id,
|
|
@@ -1166,6 +1375,18 @@ async def update_step(slug: str, workflow_id: str, step_id: int, request: Update
|
|
|
1166
1375
|
config_updates["template"] = stripped_template if stripped_template else None
|
|
1167
1376
|
if request.skippable is not None:
|
|
1168
1377
|
config_updates["skippable"] = request.skippable
|
|
1378
|
+
if request.design_doc_path is not None:
|
|
1379
|
+
# Empty string means unlink, otherwise store the path
|
|
1380
|
+
stripped_path = request.design_doc_path.strip()
|
|
1381
|
+
if stripped_path:
|
|
1382
|
+
# Security: validate design_doc_path is a safe filename (no traversal)
|
|
1383
|
+
if (".." in stripped_path or "/" in stripped_path or "\\" in stripped_path
|
|
1384
|
+
or "\0" in stripped_path or stripped_path.startswith(".")):
|
|
1385
|
+
raise HTTPException(
|
|
1386
|
+
status_code=status.HTTP_400_BAD_REQUEST,
|
|
1387
|
+
detail="Invalid design_doc_path: must be a simple filename (e.g., 'PROJECT_DESIGN.md')",
|
|
1388
|
+
)
|
|
1389
|
+
config_updates["design_doc_path"] = stripped_path if stripped_path else None
|
|
1169
1390
|
|
|
1170
1391
|
# Include autonomous settings only for autonomous steps
|
|
1171
1392
|
if effective_step_type == "autonomous":
|
|
@@ -1182,6 +1403,17 @@ async def update_step(slug: str, workflow_id: str, step_id: int, request: Update
|
|
|
1182
1403
|
config_updates["cooldown_between_iterations"] = request.cooldown_between_iterations
|
|
1183
1404
|
if request.max_consecutive_errors is not None:
|
|
1184
1405
|
config_updates["max_consecutive_errors"] = request.max_consecutive_errors
|
|
1406
|
+
# Cross-step context links
|
|
1407
|
+
if request.context_from_steps is not None:
|
|
1408
|
+
if request.context_from_steps:
|
|
1409
|
+
existing_step_ids = {s["id"] for s in pdb.list_workflow_steps(workflow_id)}
|
|
1410
|
+
invalid_ids = [sid for sid in request.context_from_steps if sid not in existing_step_ids]
|
|
1411
|
+
if invalid_ids:
|
|
1412
|
+
raise HTTPException(
|
|
1413
|
+
status_code=status.HTTP_400_BAD_REQUEST,
|
|
1414
|
+
detail=f"context_from_steps contains invalid step IDs: {invalid_ids}",
|
|
1415
|
+
)
|
|
1416
|
+
config_updates["context_from_steps"] = request.context_from_steps
|
|
1185
1417
|
# Custom prompt
|
|
1186
1418
|
if request.custom_prompt is not None:
|
|
1187
1419
|
# Empty string means clear custom prompt
|