code-puppy 0.0.380__py3-none-any.whl → 0.0.382__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- code_puppy/callbacks.py +70 -0
- code_puppy/cli_runner.py +93 -1
- code_puppy/command_line/core_commands.py +61 -0
- code_puppy/command_line/motd.py +26 -3
- code_puppy/command_line/wiggum_state.py +78 -0
- code_puppy/config.py +3 -2
- code_puppy/model_factory.py +43 -0
- code_puppy/tools/browser/browser_manager.py +66 -5
- {code_puppy-0.0.380.dist-info → code_puppy-0.0.382.dist-info}/METADATA +1 -1
- {code_puppy-0.0.380.dist-info → code_puppy-0.0.382.dist-info}/RECORD +15 -22
- code_puppy/plugins/ralph/__init__.py +0 -13
- code_puppy/plugins/ralph/agents.py +0 -433
- code_puppy/plugins/ralph/commands.py +0 -208
- code_puppy/plugins/ralph/loop_controller.py +0 -289
- code_puppy/plugins/ralph/models.py +0 -125
- code_puppy/plugins/ralph/register_callbacks.py +0 -140
- code_puppy/plugins/ralph/state_manager.py +0 -322
- code_puppy/plugins/ralph/tools.py +0 -451
- {code_puppy-0.0.380.data → code_puppy-0.0.382.data}/data/code_puppy/models.json +0 -0
- {code_puppy-0.0.380.data → code_puppy-0.0.382.data}/data/code_puppy/models_dev_api.json +0 -0
- {code_puppy-0.0.380.dist-info → code_puppy-0.0.382.dist-info}/WHEEL +0 -0
- {code_puppy-0.0.380.dist-info → code_puppy-0.0.382.dist-info}/entry_points.txt +0 -0
- {code_puppy-0.0.380.dist-info → code_puppy-0.0.382.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,208 +0,0 @@
|
|
|
1
|
-
"""Ralph plugin slash commands - registered via custom_command callback."""
|
|
2
|
-
|
|
3
|
-
from typing import Any, List, Optional, Tuple
|
|
4
|
-
|
|
5
|
-
from code_puppy.messaging import emit_error, emit_info, emit_success, emit_warning
|
|
6
|
-
from code_puppy.plugins.customizable_commands.register_callbacks import (
|
|
7
|
-
MarkdownCommandResult,
|
|
8
|
-
)
|
|
9
|
-
|
|
10
|
-
from .state_manager import get_state_manager
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
def get_ralph_help() -> List[Tuple[str, str]]:
|
|
14
|
-
"""Get help entries for Ralph commands.
|
|
15
|
-
|
|
16
|
-
Returns:
|
|
17
|
-
List of (command_name, description) tuples.
|
|
18
|
-
"""
|
|
19
|
-
return [
|
|
20
|
-
("ralph", "Show Ralph help and usage"),
|
|
21
|
-
("ralph status", "Show current prd.json status"),
|
|
22
|
-
("ralph prd", "Switch to PRD Generator agent to create a new PRD"),
|
|
23
|
-
("ralph convert", "Switch to Ralph Converter agent to convert PRD to JSON"),
|
|
24
|
-
("ralph start [N]", "Start the autonomous loop (optional: max N iterations)"),
|
|
25
|
-
("ralph stop", "Stop the running Ralph loop after current iteration"),
|
|
26
|
-
("ralph reset", "Archive current run and reset for a new PRD"),
|
|
27
|
-
]
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
def handle_ralph_command(command: str, name: str) -> Optional[Any]:
|
|
31
|
-
"""Handle /ralph commands.
|
|
32
|
-
|
|
33
|
-
Args:
|
|
34
|
-
command: Full command string (e.g., "/ralph status")
|
|
35
|
-
name: Command name without slash (e.g., "ralph")
|
|
36
|
-
|
|
37
|
-
Returns:
|
|
38
|
-
- True if handled (no further action)
|
|
39
|
-
- String to process as agent input
|
|
40
|
-
- None if not a ralph command
|
|
41
|
-
"""
|
|
42
|
-
if not name.startswith("ralph"):
|
|
43
|
-
return None
|
|
44
|
-
|
|
45
|
-
# Parse subcommand
|
|
46
|
-
parts = command.strip().split(maxsplit=2)
|
|
47
|
-
subcommand = parts[1] if len(parts) > 1 else "help"
|
|
48
|
-
args = parts[2] if len(parts) > 2 else ""
|
|
49
|
-
|
|
50
|
-
# Route to handler
|
|
51
|
-
handlers = {
|
|
52
|
-
"help": _handle_help,
|
|
53
|
-
"status": _handle_status,
|
|
54
|
-
"prd": _handle_prd,
|
|
55
|
-
"convert": _handle_convert,
|
|
56
|
-
"start": _handle_start,
|
|
57
|
-
"stop": _handle_stop,
|
|
58
|
-
"reset": _handle_reset,
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
handler = handlers.get(subcommand, _handle_help)
|
|
62
|
-
return handler(args)
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
def _handle_help(args: str) -> bool:
|
|
66
|
-
"""Show Ralph help."""
|
|
67
|
-
help_text = """
|
|
68
|
-
🐺 **Ralph - Autonomous AI Agent Loop**
|
|
69
|
-
|
|
70
|
-
Ralph runs AI coding agents repeatedly until all PRD items are complete.
|
|
71
|
-
Based on Geoffrey Huntley's Ralph pattern: https://ghuntley.com/ralph/
|
|
72
|
-
|
|
73
|
-
**Commands:**
|
|
74
|
-
|
|
75
|
-
`/ralph status` - Show current prd.json status and progress
|
|
76
|
-
`/ralph prd` - Create a new PRD (Product Requirements Document)
|
|
77
|
-
`/ralph convert` - Convert a markdown PRD to prd.json format
|
|
78
|
-
`/ralph start [N]` - Start the autonomous loop (max N iterations, default 10)
|
|
79
|
-
`/ralph stop` - Stop the loop after current iteration
|
|
80
|
-
`/ralph reset` - Archive current run and start fresh
|
|
81
|
-
|
|
82
|
-
**Workflow:**
|
|
83
|
-
|
|
84
|
-
1. `/ralph prd` - Create a detailed PRD with user stories
|
|
85
|
-
2. `/ralph convert` - Convert it to prd.json format
|
|
86
|
-
3. `/ralph start` - Let Ralph autonomously implement each story
|
|
87
|
-
|
|
88
|
-
**Key Files:**
|
|
89
|
-
|
|
90
|
-
- `prd.json` - User stories with completion status
|
|
91
|
-
- `progress.txt` - Learnings and patterns for future iterations
|
|
92
|
-
- `archive/` - Previous runs (auto-archived on branch change)
|
|
93
|
-
"""
|
|
94
|
-
emit_info(help_text)
|
|
95
|
-
return True
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
def _handle_status(args: str) -> bool:
|
|
99
|
-
"""Show PRD status."""
|
|
100
|
-
manager = get_state_manager()
|
|
101
|
-
|
|
102
|
-
if not manager.prd_exists():
|
|
103
|
-
emit_warning("No prd.json found in current directory.")
|
|
104
|
-
emit_info(
|
|
105
|
-
"Use `/ralph prd` to create a PRD, then `/ralph convert` to generate prd.json"
|
|
106
|
-
)
|
|
107
|
-
return True
|
|
108
|
-
|
|
109
|
-
status = manager.get_status_summary()
|
|
110
|
-
emit_info(status)
|
|
111
|
-
|
|
112
|
-
# Also show patterns if any
|
|
113
|
-
patterns = manager.read_codebase_patterns()
|
|
114
|
-
if patterns and "<!--" not in patterns: # Skip if just the placeholder
|
|
115
|
-
emit_info("\n**Codebase Patterns:**")
|
|
116
|
-
emit_info(patterns)
|
|
117
|
-
|
|
118
|
-
return True
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
def _handle_prd(args: str) -> MarkdownCommandResult:
|
|
122
|
-
"""Switch to PRD Generator agent."""
|
|
123
|
-
emit_info("🐺 Switching to Ralph PRD Generator...")
|
|
124
|
-
|
|
125
|
-
# Return a MarkdownCommandResult so it's processed as agent input
|
|
126
|
-
return MarkdownCommandResult(
|
|
127
|
-
"/agent ralph-prd-generator\nI want to create a new PRD. Please help me define the requirements."
|
|
128
|
-
)
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
def _handle_convert(args: str) -> MarkdownCommandResult:
|
|
132
|
-
"""Switch to Ralph Converter agent."""
|
|
133
|
-
emit_info("🐺 Switching to Ralph Converter...")
|
|
134
|
-
|
|
135
|
-
# Return a MarkdownCommandResult so it's processed as agent input
|
|
136
|
-
if args:
|
|
137
|
-
return MarkdownCommandResult(
|
|
138
|
-
f"/agent ralph-converter\nPlease convert the PRD in {args} to prd.json format."
|
|
139
|
-
)
|
|
140
|
-
else:
|
|
141
|
-
return MarkdownCommandResult(
|
|
142
|
-
"/agent ralph-converter\nPlease help me convert my PRD to prd.json format."
|
|
143
|
-
)
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
def _handle_start(args: str) -> str | bool:
|
|
147
|
-
"""Start the Ralph autonomous loop."""
|
|
148
|
-
manager = get_state_manager()
|
|
149
|
-
|
|
150
|
-
if not manager.prd_exists():
|
|
151
|
-
emit_error("No prd.json found!")
|
|
152
|
-
emit_info(
|
|
153
|
-
"First create a PRD with `/ralph prd` and convert it with `/ralph convert`"
|
|
154
|
-
)
|
|
155
|
-
return True
|
|
156
|
-
|
|
157
|
-
# Check if there's work to do
|
|
158
|
-
prd = manager.read_prd()
|
|
159
|
-
if prd and prd.all_complete():
|
|
160
|
-
emit_success("🎉 All stories are already complete!")
|
|
161
|
-
return True
|
|
162
|
-
|
|
163
|
-
# Parse max iterations if provided
|
|
164
|
-
max_iter = 10
|
|
165
|
-
if args:
|
|
166
|
-
try:
|
|
167
|
-
max_iter = int(args)
|
|
168
|
-
except ValueError:
|
|
169
|
-
emit_warning(f"Invalid max_iterations '{args}', using default 10")
|
|
170
|
-
|
|
171
|
-
emit_info(f"🐺 Starting Ralph with max {max_iter} iterations...")
|
|
172
|
-
|
|
173
|
-
# Return a prompt that tells the agent to run the loop
|
|
174
|
-
return f"Call the ralph_run_loop tool with max_iterations={max_iter} to start the autonomous Ralph loop. This will implement all pending stories from prd.json one by one."
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
def _handle_stop(args: str) -> bool:
|
|
178
|
-
"""Stop the running Ralph loop."""
|
|
179
|
-
emit_info("To stop the Ralph loop, press Ctrl+C or your configured cancel key.")
|
|
180
|
-
emit_info("The loop will halt after the current iteration completes.")
|
|
181
|
-
return True
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
def _handle_reset(args: str) -> bool:
|
|
185
|
-
"""Archive current run and reset."""
|
|
186
|
-
manager = get_state_manager()
|
|
187
|
-
|
|
188
|
-
if not manager.prd_exists():
|
|
189
|
-
emit_info("Nothing to reset - no prd.json found.")
|
|
190
|
-
return True
|
|
191
|
-
|
|
192
|
-
# Archive if there's content
|
|
193
|
-
progress = manager.read_progress()
|
|
194
|
-
if progress and len(progress) > 100:
|
|
195
|
-
archive_path = manager.archive_current_run()
|
|
196
|
-
if archive_path:
|
|
197
|
-
emit_success(f"📦 Archived to: {archive_path}")
|
|
198
|
-
|
|
199
|
-
# Reset
|
|
200
|
-
manager.reset_for_new_run()
|
|
201
|
-
|
|
202
|
-
# Delete prd.json
|
|
203
|
-
if manager.prd_file.exists():
|
|
204
|
-
manager.prd_file.unlink()
|
|
205
|
-
emit_info("🗑️ Removed prd.json")
|
|
206
|
-
|
|
207
|
-
emit_success("✨ Reset complete! Ready for a new PRD.")
|
|
208
|
-
return True
|
|
@@ -1,289 +0,0 @@
|
|
|
1
|
-
"""Ralph Loop Controller - Manages the autonomous iteration loop.
|
|
2
|
-
|
|
3
|
-
This is the "outer loop" that spawns fresh agent instances per iteration,
|
|
4
|
-
just like the original ralph.sh bash script does.
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
import asyncio
|
|
8
|
-
import logging
|
|
9
|
-
from typing import Awaitable, Callable, Optional
|
|
10
|
-
|
|
11
|
-
from code_puppy.messaging import emit_error, emit_info, emit_success, emit_warning
|
|
12
|
-
|
|
13
|
-
from .state_manager import get_state_manager
|
|
14
|
-
|
|
15
|
-
logger = logging.getLogger(__name__)
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
class RalphLoopController:
|
|
19
|
-
"""Controls the Ralph autonomous loop.
|
|
20
|
-
|
|
21
|
-
Each iteration:
|
|
22
|
-
1. Checks if there's work to do
|
|
23
|
-
2. Invokes the ralph-orchestrator agent with a FRESH session
|
|
24
|
-
3. Waits for completion
|
|
25
|
-
4. Checks if all stories are done or if we should continue
|
|
26
|
-
"""
|
|
27
|
-
|
|
28
|
-
def __init__(self, max_iterations: int = 10):
|
|
29
|
-
self.max_iterations = max_iterations
|
|
30
|
-
self.current_iteration = 0
|
|
31
|
-
self.is_complete = False
|
|
32
|
-
self.is_running = False
|
|
33
|
-
self._stop_requested = False
|
|
34
|
-
|
|
35
|
-
def request_stop(self) -> None:
|
|
36
|
-
"""Request the loop to stop after current iteration."""
|
|
37
|
-
self._stop_requested = True
|
|
38
|
-
emit_warning("🛑 Stop requested - will halt after current iteration")
|
|
39
|
-
|
|
40
|
-
async def run(
|
|
41
|
-
self,
|
|
42
|
-
invoke_func: Callable[[str, str, Optional[str]], Awaitable[dict]],
|
|
43
|
-
) -> dict:
|
|
44
|
-
"""Run the Ralph loop until completion or max iterations.
|
|
45
|
-
|
|
46
|
-
Args:
|
|
47
|
-
invoke_func: Async function to invoke an agent.
|
|
48
|
-
Signature: (agent_name, prompt, session_id) -> result_dict
|
|
49
|
-
The result_dict should have 'response' and 'error' keys.
|
|
50
|
-
|
|
51
|
-
Returns:
|
|
52
|
-
dict with 'success', 'iterations', 'message' keys
|
|
53
|
-
"""
|
|
54
|
-
self.is_running = True
|
|
55
|
-
self.is_complete = False
|
|
56
|
-
self._stop_requested = False
|
|
57
|
-
|
|
58
|
-
manager = get_state_manager()
|
|
59
|
-
|
|
60
|
-
# Pre-flight checks
|
|
61
|
-
if not manager.prd_exists():
|
|
62
|
-
self.is_running = False
|
|
63
|
-
return {
|
|
64
|
-
"success": False,
|
|
65
|
-
"iterations": 0,
|
|
66
|
-
"message": "No prd.json found. Create one with /ralph prd first.",
|
|
67
|
-
}
|
|
68
|
-
|
|
69
|
-
if manager.all_stories_complete():
|
|
70
|
-
self.is_running = False
|
|
71
|
-
return {
|
|
72
|
-
"success": True,
|
|
73
|
-
"iterations": 0,
|
|
74
|
-
"message": "All stories already complete!",
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
prd = manager.read_prd()
|
|
78
|
-
emit_info("🐺 Starting Ralph Loop")
|
|
79
|
-
emit_info(f"📋 Project: {prd.project if prd else 'Unknown'}")
|
|
80
|
-
emit_info(f"📊 Progress: {prd.get_progress_summary() if prd else 'Unknown'}")
|
|
81
|
-
emit_info(f"🔄 Max iterations: {self.max_iterations}")
|
|
82
|
-
emit_info("─" * 50)
|
|
83
|
-
|
|
84
|
-
try:
|
|
85
|
-
for iteration in range(1, self.max_iterations + 1):
|
|
86
|
-
self.current_iteration = iteration
|
|
87
|
-
|
|
88
|
-
# Check for stop request
|
|
89
|
-
if self._stop_requested:
|
|
90
|
-
emit_warning(f"🛑 Stopped at iteration {iteration}")
|
|
91
|
-
break
|
|
92
|
-
|
|
93
|
-
# Check if already complete
|
|
94
|
-
if manager.all_stories_complete():
|
|
95
|
-
self.is_complete = True
|
|
96
|
-
emit_success("🎉 All stories complete!")
|
|
97
|
-
break
|
|
98
|
-
|
|
99
|
-
# Get current story for logging
|
|
100
|
-
story = manager.get_next_story()
|
|
101
|
-
if story is None:
|
|
102
|
-
self.is_complete = True
|
|
103
|
-
emit_success("🎉 All stories complete!")
|
|
104
|
-
break
|
|
105
|
-
|
|
106
|
-
emit_info(f"\n{'=' * 60}")
|
|
107
|
-
emit_info(f"🐺 RALPH ITERATION {iteration} of {self.max_iterations}")
|
|
108
|
-
emit_info(f"📌 Working on: [{story.id}] {story.title}")
|
|
109
|
-
emit_info(f"{'=' * 60}\n")
|
|
110
|
-
|
|
111
|
-
# Build the prompt for this iteration
|
|
112
|
-
iteration_prompt = self._build_iteration_prompt(story)
|
|
113
|
-
|
|
114
|
-
# Invoke orchestrator with FRESH session (unique per iteration)
|
|
115
|
-
session_id = f"ralph-iter-{iteration}"
|
|
116
|
-
|
|
117
|
-
try:
|
|
118
|
-
result = await invoke_func(
|
|
119
|
-
"ralph-orchestrator",
|
|
120
|
-
iteration_prompt,
|
|
121
|
-
session_id,
|
|
122
|
-
)
|
|
123
|
-
|
|
124
|
-
response = result.get("response", "")
|
|
125
|
-
error = result.get("error")
|
|
126
|
-
|
|
127
|
-
if error:
|
|
128
|
-
emit_error(f"Iteration {iteration} error: {error}")
|
|
129
|
-
# Continue to next iteration despite error
|
|
130
|
-
continue
|
|
131
|
-
|
|
132
|
-
# Check for completion signal in response
|
|
133
|
-
if response and "<promise>COMPLETE</promise>" in response:
|
|
134
|
-
self.is_complete = True
|
|
135
|
-
emit_success("🎉 Ralph signaled COMPLETE - all stories done!")
|
|
136
|
-
break
|
|
137
|
-
|
|
138
|
-
except asyncio.CancelledError:
|
|
139
|
-
emit_warning(f"🛑 Iteration {iteration} cancelled")
|
|
140
|
-
break
|
|
141
|
-
except Exception as e:
|
|
142
|
-
emit_error(f"Iteration {iteration} failed: {e}")
|
|
143
|
-
logger.exception(f"Ralph iteration {iteration} failed")
|
|
144
|
-
# Continue to next iteration
|
|
145
|
-
continue
|
|
146
|
-
|
|
147
|
-
# Brief pause between iterations
|
|
148
|
-
await asyncio.sleep(1)
|
|
149
|
-
|
|
150
|
-
else:
|
|
151
|
-
# Loop completed without break (max iterations reached)
|
|
152
|
-
emit_warning(f"⚠️ Reached max iterations ({self.max_iterations})")
|
|
153
|
-
|
|
154
|
-
finally:
|
|
155
|
-
self.is_running = False
|
|
156
|
-
|
|
157
|
-
# Final status
|
|
158
|
-
prd = manager.read_prd()
|
|
159
|
-
final_progress = prd.get_progress_summary() if prd else "Unknown"
|
|
160
|
-
|
|
161
|
-
return {
|
|
162
|
-
"success": self.is_complete,
|
|
163
|
-
"iterations": self.current_iteration,
|
|
164
|
-
"message": f"Completed {self.current_iteration} iterations. {final_progress}",
|
|
165
|
-
"all_complete": self.is_complete,
|
|
166
|
-
}
|
|
167
|
-
|
|
168
|
-
def _build_iteration_prompt(self, story) -> str:
|
|
169
|
-
"""Build the prompt for a single iteration."""
|
|
170
|
-
# Find VERIFY criteria
|
|
171
|
-
verify_criteria = [
|
|
172
|
-
c for c in story.acceptance_criteria if c.startswith("VERIFY:")
|
|
173
|
-
]
|
|
174
|
-
other_criteria = [
|
|
175
|
-
c for c in story.acceptance_criteria if not c.startswith("VERIFY:")
|
|
176
|
-
]
|
|
177
|
-
|
|
178
|
-
verify_section = ""
|
|
179
|
-
if verify_criteria:
|
|
180
|
-
verify_section = f"""
|
|
181
|
-
## MANDATORY VERIFICATION COMMANDS
|
|
182
|
-
You MUST run these commands and they MUST succeed before marking complete:
|
|
183
|
-
{chr(10).join(f" {c}" for c in verify_criteria)}
|
|
184
|
-
|
|
185
|
-
If ANY verification fails, fix the code and re-run until it passes!
|
|
186
|
-
"""
|
|
187
|
-
|
|
188
|
-
return f"""Execute ONE iteration of the Ralph loop.
|
|
189
|
-
|
|
190
|
-
## Current Story
|
|
191
|
-
- **ID:** {story.id}
|
|
192
|
-
- **Title:** {story.title}
|
|
193
|
-
- **Description:** {story.description}
|
|
194
|
-
|
|
195
|
-
## Acceptance Criteria (implement ALL of these):
|
|
196
|
-
{chr(10).join(f" - {c}" for c in other_criteria)}
|
|
197
|
-
{verify_section}
|
|
198
|
-
## Requires UI Verification: {story.has_ui_verification()}
|
|
199
|
-
{"If yes, invoke qa-kitten to verify UI changes work correctly." if story.has_ui_verification() else ""}
|
|
200
|
-
|
|
201
|
-
## Your Task
|
|
202
|
-
|
|
203
|
-
1. Call `ralph_read_patterns()` to get context from previous iterations
|
|
204
|
-
2. Implement this ONE story completely
|
|
205
|
-
3. **RUN ALL VERIFY COMMANDS** - they must pass!
|
|
206
|
-
4. If checks pass, commit with: `git commit -m "feat: {story.id} - {story.title}"`
|
|
207
|
-
5. Call `ralph_mark_story_complete("{story.id}", "Verified: <what you tested>")`
|
|
208
|
-
6. Call `ralph_log_progress(...)` with what you learned
|
|
209
|
-
7. Call `ralph_check_all_complete()` to see if we're done
|
|
210
|
-
|
|
211
|
-
If ALL stories are complete, output: <promise>COMPLETE</promise>
|
|
212
|
-
|
|
213
|
-
⚠️ DO NOT mark complete until verification passes! Actually run the VERIFY commands!
|
|
214
|
-
"""
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
# Global controller instance
|
|
218
|
-
_controller: Optional[RalphLoopController] = None
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
def get_loop_controller(max_iterations: int = 10) -> RalphLoopController:
|
|
222
|
-
"""Get or create the loop controller."""
|
|
223
|
-
global _controller
|
|
224
|
-
if _controller is None or not _controller.is_running:
|
|
225
|
-
_controller = RalphLoopController(max_iterations)
|
|
226
|
-
return _controller
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
async def run_ralph_loop(
|
|
230
|
-
max_iterations: int = 10,
|
|
231
|
-
invoke_func: Optional[Callable] = None,
|
|
232
|
-
) -> dict:
|
|
233
|
-
"""Convenience function to run the Ralph loop.
|
|
234
|
-
|
|
235
|
-
Args:
|
|
236
|
-
max_iterations: Maximum number of iterations
|
|
237
|
-
invoke_func: Function to invoke agents. If None, uses default.
|
|
238
|
-
|
|
239
|
-
Returns:
|
|
240
|
-
Result dict from the controller
|
|
241
|
-
"""
|
|
242
|
-
if invoke_func is None:
|
|
243
|
-
# Use the default agent invocation mechanism
|
|
244
|
-
invoke_func = _default_invoke_agent
|
|
245
|
-
|
|
246
|
-
controller = get_loop_controller(max_iterations)
|
|
247
|
-
return await controller.run(invoke_func)
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
async def _default_invoke_agent(
|
|
251
|
-
agent_name: str,
|
|
252
|
-
prompt: str,
|
|
253
|
-
session_id: Optional[str] = None,
|
|
254
|
-
) -> dict:
|
|
255
|
-
"""Default agent invocation using code_puppy's agent system."""
|
|
256
|
-
try:
|
|
257
|
-
from code_puppy.agents import get_current_agent, load_agent, set_current_agent
|
|
258
|
-
|
|
259
|
-
# Save current agent to restore later
|
|
260
|
-
original_agent = get_current_agent()
|
|
261
|
-
|
|
262
|
-
try:
|
|
263
|
-
# Load the target agent
|
|
264
|
-
target_agent = load_agent(agent_name)
|
|
265
|
-
if target_agent is None:
|
|
266
|
-
return {"response": None, "error": f"Agent '{agent_name}' not found"}
|
|
267
|
-
|
|
268
|
-
# Run the agent with the prompt
|
|
269
|
-
# Note: This creates a fresh run with no message history
|
|
270
|
-
result = await target_agent.run_with_mcp(prompt)
|
|
271
|
-
|
|
272
|
-
# Extract response text
|
|
273
|
-
response_text = ""
|
|
274
|
-
if result is not None:
|
|
275
|
-
if hasattr(result, "data"):
|
|
276
|
-
response_text = str(result.data) if result.data else ""
|
|
277
|
-
else:
|
|
278
|
-
response_text = str(result)
|
|
279
|
-
|
|
280
|
-
return {"response": response_text, "error": None}
|
|
281
|
-
|
|
282
|
-
finally:
|
|
283
|
-
# Restore original agent
|
|
284
|
-
if original_agent:
|
|
285
|
-
set_current_agent(original_agent.name)
|
|
286
|
-
|
|
287
|
-
except Exception as e:
|
|
288
|
-
logger.exception(f"Failed to invoke agent {agent_name}")
|
|
289
|
-
return {"response": None, "error": str(e)}
|
|
@@ -1,125 +0,0 @@
|
|
|
1
|
-
"""Data models for the Ralph plugin."""
|
|
2
|
-
|
|
3
|
-
import json
|
|
4
|
-
from dataclasses import dataclass, field
|
|
5
|
-
from datetime import datetime
|
|
6
|
-
from typing import List, Optional
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
@dataclass
|
|
10
|
-
class UserStory:
|
|
11
|
-
"""A single user story in the PRD."""
|
|
12
|
-
|
|
13
|
-
id: str
|
|
14
|
-
title: str
|
|
15
|
-
description: str
|
|
16
|
-
acceptance_criteria: List[str]
|
|
17
|
-
priority: int
|
|
18
|
-
passes: bool = False
|
|
19
|
-
notes: str = ""
|
|
20
|
-
|
|
21
|
-
def to_dict(self) -> dict:
|
|
22
|
-
return {
|
|
23
|
-
"id": self.id,
|
|
24
|
-
"title": self.title,
|
|
25
|
-
"description": self.description,
|
|
26
|
-
"acceptanceCriteria": self.acceptance_criteria,
|
|
27
|
-
"priority": self.priority,
|
|
28
|
-
"passes": self.passes,
|
|
29
|
-
"notes": self.notes,
|
|
30
|
-
}
|
|
31
|
-
|
|
32
|
-
@classmethod
|
|
33
|
-
def from_dict(cls, data: dict) -> "UserStory":
|
|
34
|
-
return cls(
|
|
35
|
-
id=data.get("id", ""),
|
|
36
|
-
title=data.get("title", ""),
|
|
37
|
-
description=data.get("description", ""),
|
|
38
|
-
acceptance_criteria=data.get("acceptanceCriteria", []),
|
|
39
|
-
priority=data.get("priority", 0),
|
|
40
|
-
passes=data.get("passes", False),
|
|
41
|
-
notes=data.get("notes", ""),
|
|
42
|
-
)
|
|
43
|
-
|
|
44
|
-
def has_ui_verification(self) -> bool:
|
|
45
|
-
"""Check if this story requires browser/UI verification."""
|
|
46
|
-
ui_keywords = ["browser", "ui", "verify in browser", "qa-kitten", "visual"]
|
|
47
|
-
criteria_text = " ".join(self.acceptance_criteria).lower()
|
|
48
|
-
return any(kw in criteria_text for kw in ui_keywords)
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
@dataclass
|
|
52
|
-
class PRDConfig:
|
|
53
|
-
"""Configuration for a PRD project."""
|
|
54
|
-
|
|
55
|
-
project: str
|
|
56
|
-
branch_name: str
|
|
57
|
-
description: str
|
|
58
|
-
user_stories: List[UserStory] = field(default_factory=list)
|
|
59
|
-
|
|
60
|
-
def to_dict(self) -> dict:
|
|
61
|
-
return {
|
|
62
|
-
"project": self.project,
|
|
63
|
-
"branchName": self.branch_name,
|
|
64
|
-
"description": self.description,
|
|
65
|
-
"userStories": [s.to_dict() for s in self.user_stories],
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
@classmethod
|
|
69
|
-
def from_dict(cls, data: dict) -> "PRDConfig":
|
|
70
|
-
return cls(
|
|
71
|
-
project=data.get("project", ""),
|
|
72
|
-
branch_name=data.get("branchName", ""),
|
|
73
|
-
description=data.get("description", ""),
|
|
74
|
-
user_stories=[UserStory.from_dict(s) for s in data.get("userStories", [])],
|
|
75
|
-
)
|
|
76
|
-
|
|
77
|
-
def to_json(self, indent: int = 2) -> str:
|
|
78
|
-
return json.dumps(self.to_dict(), indent=indent)
|
|
79
|
-
|
|
80
|
-
@classmethod
|
|
81
|
-
def from_json(cls, json_str: str) -> "PRDConfig":
|
|
82
|
-
return cls.from_dict(json.loads(json_str))
|
|
83
|
-
|
|
84
|
-
def get_next_story(self) -> Optional[UserStory]:
|
|
85
|
-
"""Get the highest priority story that hasn't passed yet."""
|
|
86
|
-
pending = [s for s in self.user_stories if not s.passes]
|
|
87
|
-
if not pending:
|
|
88
|
-
return None
|
|
89
|
-
return min(pending, key=lambda s: s.priority)
|
|
90
|
-
|
|
91
|
-
def all_complete(self) -> bool:
|
|
92
|
-
"""Check if all stories have passed."""
|
|
93
|
-
return all(s.passes for s in self.user_stories)
|
|
94
|
-
|
|
95
|
-
def get_progress_summary(self) -> str:
|
|
96
|
-
"""Get a summary of progress."""
|
|
97
|
-
total = len(self.user_stories)
|
|
98
|
-
done = sum(1 for s in self.user_stories if s.passes)
|
|
99
|
-
return f"{done}/{total} stories complete"
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
@dataclass
|
|
103
|
-
class ProgressEntry:
|
|
104
|
-
"""An entry in the progress log."""
|
|
105
|
-
|
|
106
|
-
timestamp: datetime
|
|
107
|
-
story_id: str
|
|
108
|
-
summary: str
|
|
109
|
-
files_changed: List[str] = field(default_factory=list)
|
|
110
|
-
learnings: List[str] = field(default_factory=list)
|
|
111
|
-
|
|
112
|
-
def to_markdown(self) -> str:
|
|
113
|
-
"""Convert to markdown format for progress.txt."""
|
|
114
|
-
lines = [
|
|
115
|
-
f"## {self.timestamp.strftime('%Y-%m-%d %H:%M')} - {self.story_id}",
|
|
116
|
-
f"- {self.summary}",
|
|
117
|
-
]
|
|
118
|
-
if self.files_changed:
|
|
119
|
-
lines.append(f"- Files changed: {', '.join(self.files_changed)}")
|
|
120
|
-
if self.learnings:
|
|
121
|
-
lines.append("- **Learnings for future iterations:**")
|
|
122
|
-
for learning in self.learnings:
|
|
123
|
-
lines.append(f" - {learning}")
|
|
124
|
-
lines.append("---")
|
|
125
|
-
return "\n".join(lines)
|