ralphx 0.4.0__py3-none-any.whl → 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ralphx/__init__.py +1 -1
- ralphx/adapters/base.py +8 -0
- ralphx/adapters/claude_cli.py +377 -452
- ralphx/api/routes/items.py +4 -0
- ralphx/api/routes/loops.py +101 -15
- ralphx/api/routes/planning.py +1 -1
- ralphx/api/routes/stream.py +104 -59
- ralphx/api/routes/templates.py +1 -0
- ralphx/api/routes/workflows.py +2 -2
- ralphx/core/checkpoint.py +118 -0
- ralphx/core/executor.py +134 -81
- ralphx/core/loop_templates.py +33 -14
- ralphx/core/planning_service.py +1 -1
- ralphx/core/project_db.py +66 -7
- ralphx/core/session.py +62 -10
- ralphx/core/templates.py +74 -87
- ralphx/core/workflow_executor.py +3 -0
- ralphx/mcp/tools/workflows.py +2 -2
- ralphx/models/loop.py +1 -1
- ralphx/models/session.py +5 -0
- ralphx/static/assets/index-DnihHetG.js +265 -0
- ralphx/static/assets/index-DnihHetG.js.map +1 -0
- ralphx/static/assets/index-nIDWmtzm.css +1 -0
- ralphx/static/index.html +2 -2
- {ralphx-0.4.0.dist-info → ralphx-0.4.1.dist-info}/METADATA +1 -1
- {ralphx-0.4.0.dist-info → ralphx-0.4.1.dist-info}/RECORD +28 -28
- ralphx/static/assets/index-BuLI7ffn.css +0 -1
- ralphx/static/assets/index-DWvlqOTb.js +0 -264
- ralphx/static/assets/index-DWvlqOTb.js.map +0 -1
- {ralphx-0.4.0.dist-info → ralphx-0.4.1.dist-info}/WHEEL +0 -0
- {ralphx-0.4.0.dist-info → ralphx-0.4.1.dist-info}/entry_points.txt +0 -0
ralphx/core/executor.py
CHANGED
|
@@ -11,6 +11,7 @@ Implements the main loop execution with:
|
|
|
11
11
|
"""
|
|
12
12
|
|
|
13
13
|
import asyncio
|
|
14
|
+
import json
|
|
14
15
|
import os
|
|
15
16
|
import random
|
|
16
17
|
import re
|
|
@@ -33,6 +34,7 @@ from ralphx.core.workspace import get_loop_settings_path
|
|
|
33
34
|
from ralphx.models.loop import LoopConfig, Mode, ModeSelectionStrategy
|
|
34
35
|
from ralphx.models.project import Project
|
|
35
36
|
from ralphx.models.run import Run, RunStatus
|
|
37
|
+
from ralphx.core.auth import get_effective_account_for_project
|
|
36
38
|
from ralphx.core.logger import run_log, iteration_log
|
|
37
39
|
|
|
38
40
|
|
|
@@ -555,8 +557,8 @@ class LoopExecutor:
|
|
|
555
557
|
|
|
556
558
|
Priority order:
|
|
557
559
|
1. Loop-level LOOP_TEMPLATE resource with position=template_body (from loop_resources table)
|
|
558
|
-
2.
|
|
559
|
-
3.
|
|
560
|
+
2. Mode's prompt_template file path (step-specific prompt)
|
|
561
|
+
3. Project-level LOOP_TEMPLATE resource with position=TEMPLATE_BODY (from resources table)
|
|
560
562
|
4. Default template from ralphx/templates/loop_templates/{loop_type}.md
|
|
561
563
|
|
|
562
564
|
Args:
|
|
@@ -576,22 +578,22 @@ class LoopExecutor:
|
|
|
576
578
|
if content:
|
|
577
579
|
return content
|
|
578
580
|
|
|
579
|
-
# Priority 2:
|
|
581
|
+
# Priority 2: Mode's prompt_template file (step-specific prompt)
|
|
582
|
+
template_path = self.project.path / mode.prompt_template
|
|
583
|
+
if template_path.exists():
|
|
584
|
+
return template_path.read_text()
|
|
585
|
+
|
|
586
|
+
# Priority 3: Check for project-level LOOP_TEMPLATE resource with TEMPLATE_BODY position
|
|
580
587
|
resource_manager = ResourceManager(self.project.path, db=self.db)
|
|
581
588
|
resource_set = resource_manager.load_for_loop(self.config)
|
|
582
589
|
template_resources = resource_set.by_position(InjectionPosition.TEMPLATE_BODY)
|
|
583
590
|
|
|
584
591
|
if template_resources:
|
|
585
|
-
# Use the first LOOP_TEMPLATE resource as the base template
|
|
586
592
|
resource = template_resources[0]
|
|
587
593
|
if resource.content:
|
|
594
|
+
run_log.debug(f"Using project-level template resource: {resource.name}")
|
|
588
595
|
return resource.content
|
|
589
596
|
|
|
590
|
-
# Priority 3: Mode's prompt_template file
|
|
591
|
-
template_path = self.project.path / mode.prompt_template
|
|
592
|
-
if template_path.exists():
|
|
593
|
-
return template_path.read_text()
|
|
594
|
-
|
|
595
597
|
# Priority 4: Default template for loop type
|
|
596
598
|
default_template_path = (
|
|
597
599
|
Path(__file__).parent.parent / "templates" / "loop_templates" / f"{self.config.type.value}.md"
|
|
@@ -698,35 +700,50 @@ class LoopExecutor:
|
|
|
698
700
|
else:
|
|
699
701
|
template = after_design_doc + "\n\n" + template
|
|
700
702
|
|
|
701
|
-
# Get design doc
|
|
702
|
-
#
|
|
703
|
+
# Get design doc for substitution - prefer file reference over inline content
|
|
704
|
+
# to avoid bloating the prompt with large documents.
|
|
705
|
+
# Claude can read files directly from the project directory.
|
|
706
|
+
design_doc_path: Optional[str] = None
|
|
703
707
|
design_doc_content = ""
|
|
704
708
|
for resource in loop_resources:
|
|
705
709
|
if resource.get("resource_type") == "design_doc":
|
|
710
|
+
# Prefer file path if available (keeps prompt small)
|
|
711
|
+
if resource.get("source_type") == "project_file" and resource.get("source_path"):
|
|
712
|
+
design_doc_path = resource.get("source_path")
|
|
706
713
|
design_doc_content = resource.get("_resolved_content", "")
|
|
707
714
|
break
|
|
708
|
-
if not design_doc_content:
|
|
715
|
+
if not design_doc_content and not design_doc_path:
|
|
709
716
|
# Fallback to project-level resources
|
|
710
717
|
for resource in resource_set.resources:
|
|
711
718
|
if resource.resource_type.value == "design_doc":
|
|
719
|
+
if resource.file_path:
|
|
720
|
+
design_doc_path = resource.file_path
|
|
712
721
|
design_doc_content = resource.content or ""
|
|
713
722
|
break
|
|
714
723
|
|
|
715
|
-
#
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
724
|
+
# Build design doc substitution - use file reference for large docs
|
|
725
|
+
# Threshold: 50KB (inline) vs file reference (keeps prompt manageable)
|
|
726
|
+
DESIGN_DOC_INLINE_THRESHOLD = 50_000
|
|
727
|
+
if design_doc_path and len(design_doc_content) > DESIGN_DOC_INLINE_THRESHOLD:
|
|
728
|
+
# Use file reference - Claude will read the file directly
|
|
729
|
+
design_doc_ref = (
|
|
730
|
+
f"[Design document is in file: {design_doc_path}]\n"
|
|
731
|
+
f"Read this file to understand the project requirements and architecture."
|
|
732
|
+
)
|
|
733
|
+
elif design_doc_content:
|
|
734
|
+
design_doc_ref = self._escape_template_vars(design_doc_content)
|
|
735
|
+
else:
|
|
736
|
+
design_doc_ref = ""
|
|
737
|
+
|
|
738
|
+
# Substitute {DESIGN_DOC} (hank-rcm style)
|
|
739
|
+
if "{DESIGN_DOC}" in template:
|
|
740
|
+
template = template.replace("{DESIGN_DOC}", design_doc_ref)
|
|
719
741
|
|
|
720
|
-
# Substitute {{design_doc}} (RalphX style)
|
|
742
|
+
# Substitute {{design_doc}} (RalphX style)
|
|
721
743
|
# Note: {{design_doc}} may have already been used as a position marker for
|
|
722
744
|
# after_design_doc resources above, but the marker text itself remains.
|
|
723
|
-
# Replace it with the actual design doc content (or empty string if none).
|
|
724
745
|
if "{{design_doc}}" in template:
|
|
725
|
-
|
|
726
|
-
escaped_design_doc = self._escape_template_vars(design_doc_content)
|
|
727
|
-
template = template.replace("{{design_doc}}", escaped_design_doc)
|
|
728
|
-
else:
|
|
729
|
-
template = template.replace("{{design_doc}}", "")
|
|
746
|
+
template = template.replace("{{design_doc}}", design_doc_ref)
|
|
730
747
|
|
|
731
748
|
# BEFORE_TASK: Insert before the main task instruction
|
|
732
749
|
# Look for {{task}} marker or insert near the end
|
|
@@ -1002,17 +1019,33 @@ class LoopExecutor:
|
|
|
1002
1019
|
})
|
|
1003
1020
|
|
|
1004
1021
|
# 4. Handle inputs_list (input files for this loop)
|
|
1005
|
-
|
|
1022
|
+
from ralphx.core.workspace import get_loop_inputs_path
|
|
1023
|
+
inputs_dir = get_loop_inputs_path(self.project.path, self.config.name)
|
|
1006
1024
|
if inputs_dir.exists():
|
|
1007
|
-
input_files = [f
|
|
1008
|
-
inputs_list = "\n".join(f"- {f}" for f in sorted(input_files))
|
|
1025
|
+
input_files = [f for f in inputs_dir.iterdir() if f.is_file()]
|
|
1026
|
+
inputs_list = "\n".join(f"- {f.name} ({f})" for f in sorted(input_files))
|
|
1009
1027
|
else:
|
|
1010
1028
|
inputs_list = "(No input files found)"
|
|
1011
1029
|
|
|
1012
|
-
# 5. Substitute template variables
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1030
|
+
# 5. Substitute template variables
|
|
1031
|
+
# For large story lists (>50 items), write to file and reference it
|
|
1032
|
+
# to avoid bloating the prompt
|
|
1033
|
+
STORIES_INLINE_THRESHOLD = 50
|
|
1034
|
+
|
|
1035
|
+
if len(stories_summary) > STORIES_INLINE_THRESHOLD:
|
|
1036
|
+
# Write stories to a temp file in .ralphx directory
|
|
1037
|
+
stories_file = Path(self.project.path) / ".ralphx" / "temp" / "existing_stories.json"
|
|
1038
|
+
stories_file.parent.mkdir(parents=True, exist_ok=True)
|
|
1039
|
+
stories_file.write_text(json.dumps(stories_summary, indent=2))
|
|
1040
|
+
existing_stories_ref = (
|
|
1041
|
+
f"[{len(stories_summary)} existing stories - see .ralphx/temp/existing_stories.json]\n"
|
|
1042
|
+
f"Read this file to see all existing story IDs and avoid duplicates."
|
|
1043
|
+
)
|
|
1044
|
+
else:
|
|
1045
|
+
existing_stories_ref = self._escape_template_vars(
|
|
1046
|
+
json.dumps(stories_summary, indent=2)
|
|
1047
|
+
)
|
|
1048
|
+
|
|
1016
1049
|
category_stats_json = self._escape_template_vars(
|
|
1017
1050
|
json.dumps(category_stats, indent=2)
|
|
1018
1051
|
)
|
|
@@ -1020,7 +1053,7 @@ class LoopExecutor:
|
|
|
1020
1053
|
# Escape inputs_list to prevent template injection from filenames
|
|
1021
1054
|
inputs_list_escaped = self._escape_template_vars(inputs_list)
|
|
1022
1055
|
|
|
1023
|
-
template = template.replace("{{existing_stories}}",
|
|
1056
|
+
template = template.replace("{{existing_stories}}", existing_stories_ref)
|
|
1024
1057
|
template = template.replace("{{category_stats}}", category_stats_json)
|
|
1025
1058
|
template = template.replace("{{total_stories}}", str(len(existing_items)))
|
|
1026
1059
|
template = template.replace("{{inputs_list}}", inputs_list_escaped)
|
|
@@ -1541,7 +1574,7 @@ class LoopExecutor:
|
|
|
1541
1574
|
# Extract known fields explicitly
|
|
1542
1575
|
known_fields = {
|
|
1543
1576
|
'id', 'content', 'story', 'title', 'priority', 'category',
|
|
1544
|
-
'tags', 'dependencies',
|
|
1577
|
+
'tags', 'dependencies',
|
|
1545
1578
|
}
|
|
1546
1579
|
extracted.append({
|
|
1547
1580
|
'id': str(item['id']),
|
|
@@ -1556,31 +1589,40 @@ class LoopExecutor:
|
|
|
1556
1589
|
})
|
|
1557
1590
|
return extracted
|
|
1558
1591
|
|
|
1592
|
+
def _try_parse_json_items(parsed) -> list[dict]:
|
|
1593
|
+
"""Extract items from a parsed JSON value (dict or list)."""
|
|
1594
|
+
if isinstance(parsed, dict):
|
|
1595
|
+
stories_list = parsed.get('stories') or parsed.get('items') or []
|
|
1596
|
+
if isinstance(stories_list, list):
|
|
1597
|
+
return extract_items_from_list(stories_list)
|
|
1598
|
+
elif isinstance(parsed, list):
|
|
1599
|
+
return extract_items_from_list(parsed)
|
|
1600
|
+
return []
|
|
1601
|
+
|
|
1559
1602
|
try:
|
|
1560
|
-
# Try 1:
|
|
1561
|
-
|
|
1562
|
-
|
|
1563
|
-
|
|
1603
|
+
# Try 1: Extract JSON from markdown code fences (```json ... ``` or ``` ... ```)
|
|
1604
|
+
fence_match = re.search(r'```(?:json)?\s*\n([\s\S]*?)\n```', output)
|
|
1605
|
+
if fence_match:
|
|
1606
|
+
fenced = fence_match.group(1).strip()
|
|
1564
1607
|
try:
|
|
1565
|
-
parsed = json.loads(
|
|
1566
|
-
|
|
1567
|
-
|
|
1568
|
-
|
|
1569
|
-
items = extract_items_from_list(stories_list)
|
|
1570
|
-
if items:
|
|
1571
|
-
return items
|
|
1608
|
+
parsed = json.loads(fenced)
|
|
1609
|
+
items = _try_parse_json_items(parsed)
|
|
1610
|
+
if items:
|
|
1611
|
+
return items
|
|
1572
1612
|
except json.JSONDecodeError:
|
|
1573
1613
|
pass
|
|
1574
1614
|
|
|
1575
|
-
# Try 2:
|
|
1576
|
-
|
|
1577
|
-
|
|
1578
|
-
|
|
1579
|
-
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
|
|
1583
|
-
|
|
1615
|
+
# Try 2: raw_decode from first { or [ (handles JSON embedded in prose)
|
|
1616
|
+
decoder = json.JSONDecoder()
|
|
1617
|
+
for i, ch in enumerate(output):
|
|
1618
|
+
if ch in ('{', '['):
|
|
1619
|
+
try:
|
|
1620
|
+
parsed, _ = decoder.raw_decode(output, i)
|
|
1621
|
+
items = _try_parse_json_items(parsed)
|
|
1622
|
+
if items:
|
|
1623
|
+
return items
|
|
1624
|
+
except json.JSONDecodeError:
|
|
1625
|
+
continue
|
|
1584
1626
|
except (json.JSONDecodeError, ValueError):
|
|
1585
1627
|
pass
|
|
1586
1628
|
|
|
@@ -1740,18 +1782,36 @@ class LoopExecutor:
|
|
|
1740
1782
|
# Mutable holder for session_id (set by INIT event before other events)
|
|
1741
1783
|
session_id_holder: list[Optional[str]] = [None]
|
|
1742
1784
|
|
|
1785
|
+
# Get effective account for logging and session tracking
|
|
1786
|
+
account = get_effective_account_for_project(self.project.id)
|
|
1787
|
+
account_email = account.get("email") if account else None
|
|
1788
|
+
if account_email:
|
|
1789
|
+
run_log.info(
|
|
1790
|
+
"account_selected",
|
|
1791
|
+
f"Using Claude account: {account_email}",
|
|
1792
|
+
email=account_email,
|
|
1793
|
+
iteration=self._iteration,
|
|
1794
|
+
)
|
|
1795
|
+
|
|
1743
1796
|
# Callback to register session immediately when it starts
|
|
1744
1797
|
# This enables live streaming in the UI before execution completes
|
|
1745
1798
|
def register_session_early(session_id: str) -> None:
|
|
1746
1799
|
session_id_holder[0] = session_id
|
|
1747
1800
|
if self._run:
|
|
1748
|
-
|
|
1749
|
-
|
|
1750
|
-
|
|
1751
|
-
|
|
1752
|
-
|
|
1753
|
-
|
|
1754
|
-
|
|
1801
|
+
try:
|
|
1802
|
+
self.db.create_session(
|
|
1803
|
+
session_id=session_id,
|
|
1804
|
+
run_id=self._run.id,
|
|
1805
|
+
iteration=self._iteration,
|
|
1806
|
+
mode=mode_name,
|
|
1807
|
+
status="running",
|
|
1808
|
+
account_email=account_email,
|
|
1809
|
+
)
|
|
1810
|
+
except Exception as e:
|
|
1811
|
+
run_log.warning(
|
|
1812
|
+
"session_register_failed",
|
|
1813
|
+
f"Failed to register session {session_id}: {e}",
|
|
1814
|
+
)
|
|
1755
1815
|
|
|
1756
1816
|
# Callback to persist events to DB for history/debugging
|
|
1757
1817
|
def persist_event(event: StreamEvent) -> None:
|
|
@@ -1785,6 +1845,18 @@ class LoopExecutor:
|
|
|
1785
1845
|
tool_name=event.tool_name,
|
|
1786
1846
|
tool_result=event.tool_result[:1000] if event.tool_result else None,
|
|
1787
1847
|
)
|
|
1848
|
+
elif event.type == AdapterEvent.THINKING:
|
|
1849
|
+
self.db.add_session_event(
|
|
1850
|
+
session_id=sid,
|
|
1851
|
+
event_type="thinking",
|
|
1852
|
+
content=event.thinking[:2000] if event.thinking else None,
|
|
1853
|
+
)
|
|
1854
|
+
elif event.type == AdapterEvent.USAGE:
|
|
1855
|
+
self.db.add_session_event(
|
|
1856
|
+
session_id=sid,
|
|
1857
|
+
event_type="usage",
|
|
1858
|
+
raw_data=event.usage,
|
|
1859
|
+
)
|
|
1788
1860
|
elif event.type == AdapterEvent.ERROR:
|
|
1789
1861
|
self.db.add_session_event(
|
|
1790
1862
|
session_id=sid,
|
|
@@ -1796,16 +1868,13 @@ class LoopExecutor:
|
|
|
1796
1868
|
session_id=sid,
|
|
1797
1869
|
event_type="complete",
|
|
1798
1870
|
)
|
|
1799
|
-
except Exception
|
|
1800
|
-
|
|
1801
|
-
logging.getLogger(__name__).debug(
|
|
1802
|
-
f"[PERSIST] Failed to persist {event.type} event for session {sid}: {exc}"
|
|
1803
|
-
)
|
|
1871
|
+
except Exception:
|
|
1872
|
+
pass # Don't let event persistence failures break execution
|
|
1804
1873
|
|
|
1805
1874
|
exec_result = await self.adapter.execute(
|
|
1806
1875
|
prompt=prompt,
|
|
1807
1876
|
model=mode.model,
|
|
1808
|
-
tools=mode.tools
|
|
1877
|
+
tools=mode.tools,
|
|
1809
1878
|
timeout=mode.timeout,
|
|
1810
1879
|
json_schema=json_schema,
|
|
1811
1880
|
on_session_start=register_session_early,
|
|
@@ -1820,36 +1889,20 @@ class LoopExecutor:
|
|
|
1820
1889
|
result.error_message = exec_result.error_message
|
|
1821
1890
|
|
|
1822
1891
|
# Extract work items from output
|
|
1823
|
-
import logging
|
|
1824
|
-
_log = logging.getLogger(__name__)
|
|
1825
|
-
_log.warning(f"[EXTRACT] text_output len={len(exec_result.text_output) if exec_result.text_output else 0}")
|
|
1826
1892
|
if exec_result.text_output:
|
|
1827
|
-
_log.warning(f"[EXTRACT] text_output[:200]={exec_result.text_output[:200]}")
|
|
1828
|
-
# Check if JSON with stories is present
|
|
1829
|
-
has_stories = '"stories"' in exec_result.text_output
|
|
1830
|
-
has_json_start = '```json' in exec_result.text_output or '{"stories"' in exec_result.text_output
|
|
1831
|
-
_log.warning(f"[EXTRACT] has_stories={has_stories}, has_json_start={has_json_start}")
|
|
1832
|
-
if has_stories:
|
|
1833
|
-
# Find and log the stories section
|
|
1834
|
-
idx = exec_result.text_output.find('"stories"')
|
|
1835
|
-
_log.warning(f"[EXTRACT] stories found at idx={idx}, context: {exec_result.text_output[max(0,idx-50):idx+200]}")
|
|
1836
1893
|
items = self.extract_work_items(exec_result.text_output)
|
|
1837
|
-
_log.warning(f"[EXTRACT] extracted {len(items)} items")
|
|
1838
1894
|
if items:
|
|
1839
1895
|
saved = self._save_work_items(items)
|
|
1840
1896
|
result.items_added = items
|
|
1841
1897
|
self._items_generated += saved
|
|
1842
1898
|
self._no_items_streak = 0 # Reset streak when items are generated
|
|
1843
|
-
_log.warning(f"[EXTRACT] saved {saved} items")
|
|
1844
1899
|
else:
|
|
1845
1900
|
# No items generated this iteration - track for completion detection
|
|
1846
1901
|
self._no_items_streak += 1
|
|
1847
|
-
_log.warning(f"[EXTRACT] no items extracted, streak={self._no_items_streak}")
|
|
1848
1902
|
|
|
1849
1903
|
# Check for explicit completion signal from Claude
|
|
1850
1904
|
if "[GENERATION_COMPLETE]" in exec_result.text_output:
|
|
1851
1905
|
result.generator_complete = True # type: ignore
|
|
1852
|
-
_log.warning("[EXTRACT] found [GENERATION_COMPLETE] signal")
|
|
1853
1906
|
|
|
1854
1907
|
# Update session status (session was registered early via callback)
|
|
1855
1908
|
if exec_result.session_id and self._run:
|
ralphx/core/loop_templates.py
CHANGED
|
@@ -73,7 +73,7 @@ modes:
|
|
|
73
73
|
description: "Extract stories from design documents"
|
|
74
74
|
model: sonnet
|
|
75
75
|
timeout: 300
|
|
76
|
-
tools: []
|
|
76
|
+
tools: [Read, Glob, Grep]
|
|
77
77
|
prompt_template: prompts/extract.md
|
|
78
78
|
|
|
79
79
|
research:
|
|
@@ -102,6 +102,10 @@ PLANNING_EXTRACT_PROMPT = """# Story Extraction Mode
|
|
|
102
102
|
|
|
103
103
|
You are analyzing design documents to extract user stories for implementation.
|
|
104
104
|
|
|
105
|
+
## Design Document
|
|
106
|
+
|
|
107
|
+
{{design_doc}}
|
|
108
|
+
|
|
105
109
|
## Existing Stories (DO NOT DUPLICATE)
|
|
106
110
|
|
|
107
111
|
Total stories generated so far: {{total_stories}}
|
|
@@ -116,14 +120,15 @@ Use these to assign the next available ID for each category:
|
|
|
116
120
|
|
|
117
121
|
## Input Documents
|
|
118
122
|
|
|
119
|
-
The following
|
|
123
|
+
The following input files are available. Use the Read tool to read any files you need:
|
|
120
124
|
{{inputs_list}}
|
|
121
125
|
|
|
122
126
|
## Your Task
|
|
123
127
|
|
|
124
|
-
1. Read
|
|
125
|
-
2.
|
|
126
|
-
3.
|
|
128
|
+
1. Read any input documents listed above using the Read tool
|
|
129
|
+
2. Analyze the design document and input files thoroughly
|
|
130
|
+
3. Generate NEW stories (do not duplicate existing ones above)
|
|
131
|
+
4. For each story, provide:
|
|
127
132
|
- **ID**: Use format CATEGORY-NNN (see category stats for next number)
|
|
128
133
|
- A clear title
|
|
129
134
|
- User story format: "As a [user], I want [feature] so that [benefit]"
|
|
@@ -345,7 +350,7 @@ modes:
|
|
|
345
350
|
description: "Analyze all stories for Phase 1 grouping"
|
|
346
351
|
model: sonnet
|
|
347
352
|
timeout: 600
|
|
348
|
-
tools: []
|
|
353
|
+
tools: [Read, Glob, Grep]
|
|
349
354
|
prompt_template: prompts/phase1-analyze.md
|
|
350
355
|
phase: phase_1
|
|
351
356
|
|
|
@@ -719,6 +724,7 @@ def generate_simple_planning_config(
|
|
|
719
724
|
max_iterations: Optional[int] = None,
|
|
720
725
|
cooldown_between_iterations: Optional[int] = None,
|
|
721
726
|
max_consecutive_errors: Optional[int] = None,
|
|
727
|
+
tools: Optional[list[str]] = None,
|
|
722
728
|
) -> str:
|
|
723
729
|
"""Generate YAML config for a simple planning loop.
|
|
724
730
|
|
|
@@ -729,6 +735,7 @@ def generate_simple_planning_config(
|
|
|
729
735
|
max_iterations: Override for max iterations (default: 100).
|
|
730
736
|
cooldown_between_iterations: Override for cooldown in seconds (default: 5).
|
|
731
737
|
max_consecutive_errors: Override for max consecutive errors (default: 5).
|
|
738
|
+
tools: Override for allowed tools (default: [Read, Glob, Grep]).
|
|
732
739
|
|
|
733
740
|
Returns:
|
|
734
741
|
YAML configuration string.
|
|
@@ -739,6 +746,14 @@ def generate_simple_planning_config(
|
|
|
739
746
|
max_iter = max_iterations if max_iterations is not None else 100
|
|
740
747
|
cooldown = cooldown_between_iterations if cooldown_between_iterations is not None else 5
|
|
741
748
|
max_errors = max_consecutive_errors if max_consecutive_errors is not None else 5
|
|
749
|
+
tool_list = tools if tools is not None else ["Read", "Glob", "Grep"]
|
|
750
|
+
|
|
751
|
+
# Build tools YAML block
|
|
752
|
+
# Empty list must produce "tools: []" (disable all), not "tools:" (parsed as None/use defaults)
|
|
753
|
+
if tool_list:
|
|
754
|
+
tools_yaml = "tools:\n" + "\n".join(f" - {t}" for t in tool_list)
|
|
755
|
+
else:
|
|
756
|
+
tools_yaml = "tools: []"
|
|
742
757
|
|
|
743
758
|
return f"""name: {name}
|
|
744
759
|
display_name: "{display_name}"
|
|
@@ -757,7 +772,7 @@ modes:
|
|
|
757
772
|
model: sonnet
|
|
758
773
|
timeout: 300
|
|
759
774
|
prompt_template: .ralphx/loops/{name}/prompts/planning.md
|
|
760
|
-
|
|
775
|
+
{tools_yaml}
|
|
761
776
|
|
|
762
777
|
mode_selection:
|
|
763
778
|
strategy: fixed
|
|
@@ -782,6 +797,7 @@ def generate_simple_implementation_config(
|
|
|
782
797
|
max_iterations: Optional[int] = None,
|
|
783
798
|
cooldown_between_iterations: Optional[int] = None,
|
|
784
799
|
max_consecutive_errors: Optional[int] = None,
|
|
800
|
+
tools: Optional[list[str]] = None,
|
|
785
801
|
) -> str:
|
|
786
802
|
"""Generate YAML config for a simple implementation loop.
|
|
787
803
|
|
|
@@ -793,6 +809,7 @@ def generate_simple_implementation_config(
|
|
|
793
809
|
max_iterations: Override for max iterations (default: 50).
|
|
794
810
|
cooldown_between_iterations: Override for cooldown in seconds (default: 5).
|
|
795
811
|
max_consecutive_errors: Override for max consecutive errors (default: 3).
|
|
812
|
+
tools: Override for allowed tools (default: [Read, Write, Edit, Bash, Glob, Grep]).
|
|
796
813
|
|
|
797
814
|
Returns:
|
|
798
815
|
YAML configuration string.
|
|
@@ -804,6 +821,14 @@ def generate_simple_implementation_config(
|
|
|
804
821
|
max_iter = max_iterations if max_iterations is not None else 50
|
|
805
822
|
cooldown = cooldown_between_iterations if cooldown_between_iterations is not None else 5
|
|
806
823
|
max_errors = max_consecutive_errors if max_consecutive_errors is not None else 3
|
|
824
|
+
tool_list = tools if tools is not None else ["Read", "Write", "Edit", "Bash", "Glob", "Grep"]
|
|
825
|
+
|
|
826
|
+
# Build tools YAML block
|
|
827
|
+
# Empty list must produce "tools: []" (disable all), not "tools:" (parsed as None/use defaults)
|
|
828
|
+
if tool_list:
|
|
829
|
+
tools_yaml = "tools:\n" + "\n".join(f" - {t}" for t in tool_list)
|
|
830
|
+
else:
|
|
831
|
+
tools_yaml = "tools: []"
|
|
807
832
|
|
|
808
833
|
return f"""name: {name}
|
|
809
834
|
display_name: "{display_name}"
|
|
@@ -827,13 +852,7 @@ modes:
|
|
|
827
852
|
model: sonnet
|
|
828
853
|
timeout: 1800
|
|
829
854
|
prompt_template: .ralphx/loops/{name}/prompts/implement.md
|
|
830
|
-
|
|
831
|
-
- Read
|
|
832
|
-
- Write
|
|
833
|
-
- Edit
|
|
834
|
-
- Bash
|
|
835
|
-
- Glob
|
|
836
|
-
- Grep
|
|
855
|
+
{tools_yaml}
|
|
837
856
|
|
|
838
857
|
mode_selection:
|
|
839
858
|
strategy: fixed
|
ralphx/core/planning_service.py
CHANGED
|
@@ -412,7 +412,7 @@ Start your response with <design_doc> immediately.
|
|
|
412
412
|
async for event in adapter.stream(
|
|
413
413
|
prompt=prompt,
|
|
414
414
|
model=model,
|
|
415
|
-
tools=
|
|
415
|
+
tools=[], # Explicitly disable tools for artifact generation
|
|
416
416
|
timeout=180, # Allow more time for artifact generation
|
|
417
417
|
):
|
|
418
418
|
yield event
|
ralphx/core/project_db.py
CHANGED
|
@@ -27,7 +27,7 @@ logger = logging.getLogger(__name__)
|
|
|
27
27
|
|
|
28
28
|
|
|
29
29
|
# Schema version for project DB
|
|
30
|
-
PROJECT_SCHEMA_VERSION =
|
|
30
|
+
PROJECT_SCHEMA_VERSION = 21
|
|
31
31
|
|
|
32
32
|
# Project database schema - all project-specific data
|
|
33
33
|
PROJECT_SCHEMA_SQL = """
|
|
@@ -67,7 +67,8 @@ CREATE TABLE IF NOT EXISTS sessions (
|
|
|
67
67
|
started_at TIMESTAMP,
|
|
68
68
|
duration_seconds REAL,
|
|
69
69
|
status TEXT,
|
|
70
|
-
items_added TEXT
|
|
70
|
+
items_added TEXT,
|
|
71
|
+
account_email TEXT
|
|
71
72
|
);
|
|
72
73
|
|
|
73
74
|
-- Session events table (stores parsed events for history and streaming)
|
|
@@ -630,9 +631,26 @@ class ProjectDatabase:
|
|
|
630
631
|
# Run migrations (for future versions > 6)
|
|
631
632
|
self._run_migrations(conn, current_version)
|
|
632
633
|
|
|
634
|
+
# Self-heal: verify critical columns exist even if migrations
|
|
635
|
+
# partially applied (e.g., version bumped but ALTER TABLE lost)
|
|
636
|
+
self._verify_schema_columns(conn)
|
|
637
|
+
|
|
633
638
|
# Create indexes AFTER migrations so all columns exist
|
|
634
639
|
conn.executescript(PROJECT_INDEXES_SQL)
|
|
635
640
|
|
|
641
|
+
def _verify_schema_columns(self, conn: sqlite3.Connection) -> None:
|
|
642
|
+
"""Verify expected columns exist and repair if missing.
|
|
643
|
+
|
|
644
|
+
Handles edge cases where migrations bumped the schema version
|
|
645
|
+
but the ALTER TABLE didn't persist (e.g., due to executescript()
|
|
646
|
+
transaction semantics).
|
|
647
|
+
"""
|
|
648
|
+
cursor = conn.execute("PRAGMA table_info(sessions)")
|
|
649
|
+
columns = {row[1] for row in cursor.fetchall()}
|
|
650
|
+
if "account_email" not in columns:
|
|
651
|
+
conn.execute("ALTER TABLE sessions ADD COLUMN account_email TEXT")
|
|
652
|
+
logger.warning("Repaired missing account_email column in sessions table")
|
|
653
|
+
|
|
636
654
|
def _backup_before_migration(self, from_version: int) -> None:
|
|
637
655
|
"""Create a backup of the database before running migrations.
|
|
638
656
|
|
|
@@ -733,6 +751,11 @@ class ProjectDatabase:
|
|
|
733
751
|
# Migration from v19 to v20: Add doc_before/doc_after columns
|
|
734
752
|
if from_version == 19:
|
|
735
753
|
self._migrate_v19_to_v20(conn)
|
|
754
|
+
from_version = 20 # Continue to next migration
|
|
755
|
+
|
|
756
|
+
# Migration from v20 to v21: Add account_email to sessions
|
|
757
|
+
if from_version == 20:
|
|
758
|
+
self._migrate_v20_to_v21(conn)
|
|
736
759
|
|
|
737
760
|
# Seed workflow templates for fresh databases
|
|
738
761
|
self._seed_workflow_templates(conn)
|
|
@@ -1198,6 +1221,20 @@ class ProjectDatabase:
|
|
|
1198
1221
|
"ALTER TABLE planning_iterations ADD COLUMN doc_after TEXT"
|
|
1199
1222
|
)
|
|
1200
1223
|
|
|
1224
|
+
def _migrate_v20_to_v21(self, conn: sqlite3.Connection) -> None:
|
|
1225
|
+
"""Migrate from schema v20 to v21.
|
|
1226
|
+
|
|
1227
|
+
Adds:
|
|
1228
|
+
- account_email column to sessions for tracking which Claude account was used
|
|
1229
|
+
"""
|
|
1230
|
+
# Idempotent: check if column already exists before adding
|
|
1231
|
+
cursor = conn.execute("PRAGMA table_info(sessions)")
|
|
1232
|
+
columns = {row[1] for row in cursor.fetchall()}
|
|
1233
|
+
if "account_email" not in columns:
|
|
1234
|
+
conn.execute(
|
|
1235
|
+
"ALTER TABLE sessions ADD COLUMN account_email TEXT"
|
|
1236
|
+
)
|
|
1237
|
+
|
|
1201
1238
|
# ========== Loops ==========
|
|
1202
1239
|
|
|
1203
1240
|
def create_loop(
|
|
@@ -1406,6 +1443,7 @@ class ProjectDatabase:
|
|
|
1406
1443
|
iteration: int,
|
|
1407
1444
|
mode: Optional[str] = None,
|
|
1408
1445
|
status: str = "running",
|
|
1446
|
+
account_email: Optional[str] = None,
|
|
1409
1447
|
) -> dict:
|
|
1410
1448
|
"""Create a new session.
|
|
1411
1449
|
|
|
@@ -1415,15 +1453,16 @@ class ProjectDatabase:
|
|
|
1415
1453
|
iteration: Iteration number.
|
|
1416
1454
|
mode: Mode name for this session.
|
|
1417
1455
|
status: Session status (running, completed, error).
|
|
1456
|
+
account_email: Email of the Claude account used for this session.
|
|
1418
1457
|
"""
|
|
1419
1458
|
with self._writer() as conn:
|
|
1420
1459
|
now = datetime.utcnow().isoformat()
|
|
1421
1460
|
conn.execute(
|
|
1422
1461
|
"""
|
|
1423
|
-
INSERT INTO sessions (session_id, run_id, iteration, mode, started_at, status)
|
|
1424
|
-
VALUES (?, ?, ?, ?, ?, ?)
|
|
1462
|
+
INSERT OR IGNORE INTO sessions (session_id, run_id, iteration, mode, started_at, status, account_email)
|
|
1463
|
+
VALUES (?, ?, ?, ?, ?, ?, ?)
|
|
1425
1464
|
""",
|
|
1426
|
-
(session_id, run_id, iteration, mode, now, status),
|
|
1465
|
+
(session_id, run_id, iteration, mode, now, status, account_email),
|
|
1427
1466
|
)
|
|
1428
1467
|
return self.get_session(session_id)
|
|
1429
1468
|
|
|
@@ -1483,7 +1522,7 @@ class ProjectDatabase:
|
|
|
1483
1522
|
return [dict(row) for row in cursor.fetchall()]
|
|
1484
1523
|
|
|
1485
1524
|
_SESSION_UPDATE_COLS = frozenset({
|
|
1486
|
-
"duration_seconds", "status", "items_added"
|
|
1525
|
+
"duration_seconds", "status", "items_added", "account_email"
|
|
1487
1526
|
})
|
|
1488
1527
|
|
|
1489
1528
|
def update_session(self, session_id: str, **kwargs) -> bool:
|
|
@@ -1705,6 +1744,11 @@ class ProjectDatabase:
|
|
|
1705
1744
|
return item
|
|
1706
1745
|
return None
|
|
1707
1746
|
|
|
1747
|
+
# Whitelist of columns allowed for sorting (prevents SQL injection)
|
|
1748
|
+
_WORK_ITEM_SORT_COLUMNS = frozenset({
|
|
1749
|
+
"created_at", "updated_at", "priority", "title", "status", "category", "id"
|
|
1750
|
+
})
|
|
1751
|
+
|
|
1708
1752
|
def list_work_items(
|
|
1709
1753
|
self,
|
|
1710
1754
|
status: Optional[str] = None,
|
|
@@ -1715,6 +1759,8 @@ class ProjectDatabase:
|
|
|
1715
1759
|
unclaimed_only: bool = False,
|
|
1716
1760
|
limit: int = 100,
|
|
1717
1761
|
offset: int = 0,
|
|
1762
|
+
sort_by: str = "priority",
|
|
1763
|
+
sort_order: str = "desc",
|
|
1718
1764
|
) -> tuple[list[dict], int]:
|
|
1719
1765
|
"""List work items with optional filters.
|
|
1720
1766
|
|
|
@@ -1727,6 +1773,8 @@ class ProjectDatabase:
|
|
|
1727
1773
|
unclaimed_only: If True, only return items not claimed by any loop.
|
|
1728
1774
|
limit: Maximum items to return.
|
|
1729
1775
|
offset: Pagination offset.
|
|
1776
|
+
sort_by: Column to sort by (default: priority).
|
|
1777
|
+
sort_order: Sort order, "asc" or "desc" (default: desc).
|
|
1730
1778
|
|
|
1731
1779
|
Returns:
|
|
1732
1780
|
Tuple of (items list, total count).
|
|
@@ -1763,10 +1811,21 @@ class ProjectDatabase:
|
|
|
1763
1811
|
)
|
|
1764
1812
|
total = cursor.fetchone()[0]
|
|
1765
1813
|
|
|
1814
|
+
# Validate and build ORDER BY clause
|
|
1815
|
+
if sort_by not in self._WORK_ITEM_SORT_COLUMNS:
|
|
1816
|
+
sort_by = "priority"
|
|
1817
|
+
order_dir = "ASC" if (sort_order or "desc").lower() == "asc" else "DESC"
|
|
1818
|
+
|
|
1819
|
+
# Build ORDER BY: primary sort + secondary sort by priority
|
|
1820
|
+
if sort_by == "priority":
|
|
1821
|
+
order_clause = f"ORDER BY priority {order_dir} NULLS LAST"
|
|
1822
|
+
else:
|
|
1823
|
+
order_clause = f"ORDER BY {sort_by} {order_dir}, priority ASC NULLS LAST"
|
|
1824
|
+
|
|
1766
1825
|
# Get items
|
|
1767
1826
|
query = f"""
|
|
1768
1827
|
SELECT * FROM work_items WHERE {where_clause}
|
|
1769
|
-
|
|
1828
|
+
{order_clause}
|
|
1770
1829
|
LIMIT ? OFFSET ?
|
|
1771
1830
|
"""
|
|
1772
1831
|
cursor = conn.execute(query, params + [limit, offset])
|