groknroll 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- groknroll/__init__.py +36 -0
- groknroll/__main__.py +9 -0
- groknroll/agents/__init__.py +18 -0
- groknroll/agents/agent_manager.py +187 -0
- groknroll/agents/base_agent.py +118 -0
- groknroll/agents/build_agent.py +231 -0
- groknroll/agents/plan_agent.py +215 -0
- groknroll/cli/__init__.py +7 -0
- groknroll/cli/enhanced_cli.py +372 -0
- groknroll/cli/large_codebase_cli.py +413 -0
- groknroll/cli/main.py +331 -0
- groknroll/cli/rlm_commands.py +258 -0
- groknroll/clients/__init__.py +63 -0
- groknroll/clients/anthropic.py +112 -0
- groknroll/clients/azure_openai.py +142 -0
- groknroll/clients/base_lm.py +33 -0
- groknroll/clients/gemini.py +162 -0
- groknroll/clients/litellm.py +105 -0
- groknroll/clients/openai.py +129 -0
- groknroll/clients/portkey.py +94 -0
- groknroll/core/__init__.py +9 -0
- groknroll/core/agent.py +339 -0
- groknroll/core/comms_utils.py +264 -0
- groknroll/core/context.py +251 -0
- groknroll/core/exceptions.py +181 -0
- groknroll/core/large_codebase.py +564 -0
- groknroll/core/lm_handler.py +206 -0
- groknroll/core/rlm.py +446 -0
- groknroll/core/rlm_codebase.py +448 -0
- groknroll/core/rlm_integration.py +256 -0
- groknroll/core/types.py +276 -0
- groknroll/environments/__init__.py +34 -0
- groknroll/environments/base_env.py +182 -0
- groknroll/environments/constants.py +32 -0
- groknroll/environments/docker_repl.py +336 -0
- groknroll/environments/local_repl.py +388 -0
- groknroll/environments/modal_repl.py +502 -0
- groknroll/environments/prime_repl.py +588 -0
- groknroll/logger/__init__.py +4 -0
- groknroll/logger/rlm_logger.py +63 -0
- groknroll/logger/verbose.py +393 -0
- groknroll/operations/__init__.py +15 -0
- groknroll/operations/bash_ops.py +447 -0
- groknroll/operations/file_ops.py +473 -0
- groknroll/operations/git_ops.py +620 -0
- groknroll/oracle/__init__.py +11 -0
- groknroll/oracle/codebase_indexer.py +238 -0
- groknroll/oracle/oracle_agent.py +278 -0
- groknroll/setup.py +34 -0
- groknroll/storage/__init__.py +14 -0
- groknroll/storage/database.py +272 -0
- groknroll/storage/models.py +128 -0
- groknroll/utils/__init__.py +0 -0
- groknroll/utils/parsing.py +168 -0
- groknroll/utils/prompts.py +146 -0
- groknroll/utils/rlm_utils.py +19 -0
- groknroll-2.0.0.dist-info/METADATA +246 -0
- groknroll-2.0.0.dist-info/RECORD +62 -0
- groknroll-2.0.0.dist-info/WHEEL +5 -0
- groknroll-2.0.0.dist-info/entry_points.txt +3 -0
- groknroll-2.0.0.dist-info/licenses/LICENSE +21 -0
- groknroll-2.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,393 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Verbose printing for RLM using rich. Modify this however you please :)
|
|
3
|
+
I was mainly using this for debugging, and a lot of it is vibe-coded.
|
|
4
|
+
|
|
5
|
+
Provides console output for debugging and understanding RLM execution.
|
|
6
|
+
Uses a "Tokyo Night" inspired color theme.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
from rich.console import Console, Group
|
|
12
|
+
from rich.panel import Panel
|
|
13
|
+
from rich.rule import Rule
|
|
14
|
+
from rich.style import Style
|
|
15
|
+
from rich.table import Table
|
|
16
|
+
from rich.text import Text
|
|
17
|
+
|
|
18
|
+
from groknroll.core.types import CodeBlock, RLMIteration, RLMMetadata
|
|
19
|
+
|
|
20
|
+
# ============================================================================
|
|
21
|
+
# Tokyo Night Color Theme
|
|
22
|
+
# ============================================================================
|
|
23
|
+
COLORS = {
|
|
24
|
+
"primary": "#7AA2F7", # Soft blue - headers, titles
|
|
25
|
+
"secondary": "#BB9AF7", # Soft purple - emphasis
|
|
26
|
+
"success": "#9ECE6A", # Soft green - success, code
|
|
27
|
+
"warning": "#E0AF68", # Soft amber - warnings
|
|
28
|
+
"error": "#F7768E", # Soft red/pink - errors
|
|
29
|
+
"text": "#A9B1D6", # Soft gray-blue - regular text
|
|
30
|
+
"muted": "#565F89", # Muted gray - less important
|
|
31
|
+
"accent": "#7DCFFF", # Bright cyan - accents
|
|
32
|
+
"bg_subtle": "#1A1B26", # Dark background
|
|
33
|
+
"border": "#3B4261", # Border color
|
|
34
|
+
"code_bg": "#24283B", # Code background
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
# Rich styles
|
|
38
|
+
STYLE_PRIMARY = Style(color=COLORS["primary"], bold=True)
|
|
39
|
+
STYLE_SECONDARY = Style(color=COLORS["secondary"])
|
|
40
|
+
STYLE_SUCCESS = Style(color=COLORS["success"])
|
|
41
|
+
STYLE_WARNING = Style(color=COLORS["warning"])
|
|
42
|
+
STYLE_ERROR = Style(color=COLORS["error"])
|
|
43
|
+
STYLE_TEXT = Style(color=COLORS["text"])
|
|
44
|
+
STYLE_MUTED = Style(color=COLORS["muted"])
|
|
45
|
+
STYLE_ACCENT = Style(color=COLORS["accent"], bold=True)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _to_str(value: Any) -> str:
|
|
49
|
+
"""Convert any value to string safely."""
|
|
50
|
+
if isinstance(value, str):
|
|
51
|
+
return value
|
|
52
|
+
return str(value)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class VerbosePrinter:
|
|
56
|
+
"""
|
|
57
|
+
Rich console printer for RLM verbose output.
|
|
58
|
+
|
|
59
|
+
Displays beautiful, structured output showing the RLM's execution:
|
|
60
|
+
- Initial configuration panel
|
|
61
|
+
- Each iteration with response summaries
|
|
62
|
+
- Code execution with results
|
|
63
|
+
- Sub-calls to other models
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
def __init__(self, enabled: bool = True):
|
|
67
|
+
"""
|
|
68
|
+
Initialize the verbose printer.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
enabled: Whether verbose printing is enabled. If False, all methods are no-ops.
|
|
72
|
+
"""
|
|
73
|
+
self.enabled = enabled
|
|
74
|
+
self.console = Console() if enabled else None
|
|
75
|
+
self._iteration_count = 0
|
|
76
|
+
|
|
77
|
+
def print_header(
|
|
78
|
+
self,
|
|
79
|
+
backend: str,
|
|
80
|
+
model: str,
|
|
81
|
+
environment: str,
|
|
82
|
+
max_iterations: int,
|
|
83
|
+
max_depth: int,
|
|
84
|
+
other_backends: list[str] | None = None,
|
|
85
|
+
) -> None:
|
|
86
|
+
"""Print the initial RLM configuration header."""
|
|
87
|
+
if not self.enabled:
|
|
88
|
+
return
|
|
89
|
+
|
|
90
|
+
# Main title
|
|
91
|
+
title = Text()
|
|
92
|
+
title.append("◆ ", style=STYLE_ACCENT)
|
|
93
|
+
title.append("RLM", style=Style(color=COLORS["primary"], bold=True))
|
|
94
|
+
title.append(" ━ Recursive Language Model", style=STYLE_MUTED)
|
|
95
|
+
|
|
96
|
+
# Configuration table
|
|
97
|
+
config_table = Table(
|
|
98
|
+
show_header=False,
|
|
99
|
+
show_edge=False,
|
|
100
|
+
box=None,
|
|
101
|
+
padding=(0, 2),
|
|
102
|
+
expand=True,
|
|
103
|
+
)
|
|
104
|
+
config_table.add_column("key", style=STYLE_MUTED, width=16)
|
|
105
|
+
config_table.add_column("value", style=STYLE_TEXT)
|
|
106
|
+
config_table.add_column("key2", style=STYLE_MUTED, width=16)
|
|
107
|
+
config_table.add_column("value2", style=STYLE_TEXT)
|
|
108
|
+
|
|
109
|
+
config_table.add_row(
|
|
110
|
+
"Backend",
|
|
111
|
+
Text(backend, style=STYLE_SECONDARY),
|
|
112
|
+
"Environment",
|
|
113
|
+
Text(environment, style=STYLE_SECONDARY),
|
|
114
|
+
)
|
|
115
|
+
config_table.add_row(
|
|
116
|
+
"Model",
|
|
117
|
+
Text(model, style=STYLE_ACCENT),
|
|
118
|
+
"Max Iterations",
|
|
119
|
+
Text(str(max_iterations), style=STYLE_WARNING),
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
if other_backends:
|
|
123
|
+
backends_text = Text(", ".join(other_backends), style=STYLE_SECONDARY)
|
|
124
|
+
config_table.add_row(
|
|
125
|
+
"Sub-models",
|
|
126
|
+
backends_text,
|
|
127
|
+
"Max Depth",
|
|
128
|
+
Text(str(max_depth), style=STYLE_WARNING),
|
|
129
|
+
)
|
|
130
|
+
else:
|
|
131
|
+
config_table.add_row(
|
|
132
|
+
"Max Depth",
|
|
133
|
+
Text(str(max_depth), style=STYLE_WARNING),
|
|
134
|
+
"",
|
|
135
|
+
"",
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
# Wrap in panel
|
|
139
|
+
panel = Panel(
|
|
140
|
+
config_table,
|
|
141
|
+
title=title,
|
|
142
|
+
title_align="left",
|
|
143
|
+
border_style=COLORS["border"],
|
|
144
|
+
padding=(1, 2),
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
self.console.print()
|
|
148
|
+
self.console.print(panel)
|
|
149
|
+
self.console.print()
|
|
150
|
+
|
|
151
|
+
def print_metadata(self, metadata: RLMMetadata) -> None:
|
|
152
|
+
"""Print RLM metadata as header."""
|
|
153
|
+
if not self.enabled:
|
|
154
|
+
return
|
|
155
|
+
|
|
156
|
+
model = metadata.backend_kwargs.get("model_name", "unknown")
|
|
157
|
+
other = list(metadata.other_backends) if metadata.other_backends else None
|
|
158
|
+
|
|
159
|
+
self.print_header(
|
|
160
|
+
backend=metadata.backend,
|
|
161
|
+
model=model,
|
|
162
|
+
environment=metadata.environment_type,
|
|
163
|
+
max_iterations=metadata.max_iterations,
|
|
164
|
+
max_depth=metadata.max_depth,
|
|
165
|
+
other_backends=other,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
def print_iteration_start(self, iteration: int) -> None:
|
|
169
|
+
"""Print the start of a new iteration."""
|
|
170
|
+
if not self.enabled:
|
|
171
|
+
return
|
|
172
|
+
|
|
173
|
+
self._iteration_count = iteration
|
|
174
|
+
|
|
175
|
+
rule = Rule(
|
|
176
|
+
Text(f" Iteration {iteration} ", style=STYLE_PRIMARY),
|
|
177
|
+
style=COLORS["border"],
|
|
178
|
+
characters="─",
|
|
179
|
+
)
|
|
180
|
+
self.console.print(rule)
|
|
181
|
+
|
|
182
|
+
def print_completion(self, response: Any, iteration_time: float | None = None) -> None:
|
|
183
|
+
"""Print a completion response."""
|
|
184
|
+
if not self.enabled:
|
|
185
|
+
return
|
|
186
|
+
|
|
187
|
+
# Header with timing
|
|
188
|
+
header = Text()
|
|
189
|
+
header.append("◇ ", style=STYLE_ACCENT)
|
|
190
|
+
header.append("LLM Response", style=STYLE_PRIMARY)
|
|
191
|
+
if iteration_time:
|
|
192
|
+
header.append(f" ({iteration_time:.2f}s)", style=STYLE_MUTED)
|
|
193
|
+
|
|
194
|
+
# Response content
|
|
195
|
+
response_str = _to_str(response)
|
|
196
|
+
response_text = Text(response_str, style=STYLE_TEXT)
|
|
197
|
+
|
|
198
|
+
# Count words roughly
|
|
199
|
+
word_count = len(response_str.split())
|
|
200
|
+
footer = Text(f"~{word_count} words", style=STYLE_MUTED)
|
|
201
|
+
|
|
202
|
+
panel = Panel(
|
|
203
|
+
Group(response_text, Text(), footer),
|
|
204
|
+
title=header,
|
|
205
|
+
title_align="left",
|
|
206
|
+
border_style=COLORS["muted"],
|
|
207
|
+
padding=(0, 1),
|
|
208
|
+
)
|
|
209
|
+
self.console.print(panel)
|
|
210
|
+
|
|
211
|
+
def print_code_execution(self, code_block: CodeBlock) -> None:
|
|
212
|
+
"""Print code execution details."""
|
|
213
|
+
if not self.enabled:
|
|
214
|
+
return
|
|
215
|
+
|
|
216
|
+
result = code_block.result
|
|
217
|
+
|
|
218
|
+
# Header
|
|
219
|
+
header = Text()
|
|
220
|
+
header.append("▸ ", style=STYLE_SUCCESS)
|
|
221
|
+
header.append("Code Execution", style=Style(color=COLORS["success"], bold=True))
|
|
222
|
+
if result.execution_time:
|
|
223
|
+
header.append(f" ({result.execution_time:.3f}s)", style=STYLE_MUTED)
|
|
224
|
+
|
|
225
|
+
# Build content
|
|
226
|
+
content_parts = []
|
|
227
|
+
|
|
228
|
+
# Code snippet
|
|
229
|
+
code_text = Text()
|
|
230
|
+
code_text.append("Code:\n", style=STYLE_MUTED)
|
|
231
|
+
code_text.append(_to_str(code_block.code), style=STYLE_TEXT)
|
|
232
|
+
content_parts.append(code_text)
|
|
233
|
+
|
|
234
|
+
# Stdout if present
|
|
235
|
+
stdout_str = _to_str(result.stdout) if result.stdout else ""
|
|
236
|
+
if stdout_str.strip():
|
|
237
|
+
stdout_text = Text()
|
|
238
|
+
stdout_text.append("\nOutput:\n", style=STYLE_MUTED)
|
|
239
|
+
stdout_text.append(stdout_str, style=STYLE_SUCCESS)
|
|
240
|
+
content_parts.append(stdout_text)
|
|
241
|
+
|
|
242
|
+
# Stderr if present (error)
|
|
243
|
+
stderr_str = _to_str(result.stderr) if result.stderr else ""
|
|
244
|
+
if stderr_str.strip():
|
|
245
|
+
stderr_text = Text()
|
|
246
|
+
stderr_text.append("\nError:\n", style=STYLE_MUTED)
|
|
247
|
+
stderr_text.append(stderr_str, style=STYLE_ERROR)
|
|
248
|
+
content_parts.append(stderr_text)
|
|
249
|
+
|
|
250
|
+
# Sub-calls summary
|
|
251
|
+
if result.rlm_calls:
|
|
252
|
+
calls_text = Text()
|
|
253
|
+
calls_text.append(f"\n↳ {len(result.rlm_calls)} sub-call(s)", style=STYLE_SECONDARY)
|
|
254
|
+
content_parts.append(calls_text)
|
|
255
|
+
|
|
256
|
+
panel = Panel(
|
|
257
|
+
Group(*content_parts),
|
|
258
|
+
title=header,
|
|
259
|
+
title_align="left",
|
|
260
|
+
border_style=COLORS["success"],
|
|
261
|
+
padding=(0, 1),
|
|
262
|
+
)
|
|
263
|
+
self.console.print(panel)
|
|
264
|
+
|
|
265
|
+
def print_subcall(
|
|
266
|
+
self,
|
|
267
|
+
model: str,
|
|
268
|
+
prompt_preview: str,
|
|
269
|
+
response_preview: str,
|
|
270
|
+
execution_time: float | None = None,
|
|
271
|
+
) -> None:
|
|
272
|
+
"""Print a sub-call to another model."""
|
|
273
|
+
if not self.enabled:
|
|
274
|
+
return
|
|
275
|
+
|
|
276
|
+
# Header
|
|
277
|
+
header = Text()
|
|
278
|
+
header.append(" ↳ ", style=STYLE_SECONDARY)
|
|
279
|
+
header.append("Sub-call: ", style=STYLE_SECONDARY)
|
|
280
|
+
header.append(_to_str(model), style=STYLE_ACCENT)
|
|
281
|
+
if execution_time:
|
|
282
|
+
header.append(f" ({execution_time:.2f}s)", style=STYLE_MUTED)
|
|
283
|
+
|
|
284
|
+
# Content
|
|
285
|
+
content = Text()
|
|
286
|
+
content.append("Prompt: ", style=STYLE_MUTED)
|
|
287
|
+
content.append(_to_str(prompt_preview), style=STYLE_TEXT)
|
|
288
|
+
content.append("\nResponse: ", style=STYLE_MUTED)
|
|
289
|
+
content.append(_to_str(response_preview), style=STYLE_TEXT)
|
|
290
|
+
|
|
291
|
+
panel = Panel(
|
|
292
|
+
content,
|
|
293
|
+
title=header,
|
|
294
|
+
title_align="left",
|
|
295
|
+
border_style=COLORS["secondary"],
|
|
296
|
+
padding=(0, 1),
|
|
297
|
+
)
|
|
298
|
+
self.console.print(panel)
|
|
299
|
+
|
|
300
|
+
def print_iteration(self, iteration: RLMIteration, iteration_num: int) -> None:
|
|
301
|
+
"""
|
|
302
|
+
Print a complete iteration including response and code executions.
|
|
303
|
+
This is the main entry point for printing an iteration.
|
|
304
|
+
"""
|
|
305
|
+
if not self.enabled:
|
|
306
|
+
return
|
|
307
|
+
|
|
308
|
+
# Print iteration header
|
|
309
|
+
self.print_iteration_start(iteration_num)
|
|
310
|
+
|
|
311
|
+
# Print the LLM response
|
|
312
|
+
self.print_completion(iteration.response, iteration.iteration_time)
|
|
313
|
+
|
|
314
|
+
# Print each code block execution
|
|
315
|
+
for code_block in iteration.code_blocks:
|
|
316
|
+
self.print_code_execution(code_block)
|
|
317
|
+
|
|
318
|
+
# Print any sub-calls made during this code block
|
|
319
|
+
for call in code_block.result.rlm_calls:
|
|
320
|
+
self.print_subcall(
|
|
321
|
+
model=call.root_model,
|
|
322
|
+
prompt_preview=_to_str(call.prompt) if call.prompt else "",
|
|
323
|
+
response_preview=_to_str(call.response) if call.response else "",
|
|
324
|
+
execution_time=call.execution_time,
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
def print_final_answer(self, answer: Any) -> None:
|
|
328
|
+
"""Print the final answer."""
|
|
329
|
+
if not self.enabled:
|
|
330
|
+
return
|
|
331
|
+
|
|
332
|
+
# Title
|
|
333
|
+
title = Text()
|
|
334
|
+
title.append("★ ", style=STYLE_WARNING)
|
|
335
|
+
title.append("Final Answer", style=Style(color=COLORS["warning"], bold=True))
|
|
336
|
+
|
|
337
|
+
# Answer content
|
|
338
|
+
answer_text = Text(_to_str(answer), style=STYLE_TEXT)
|
|
339
|
+
|
|
340
|
+
panel = Panel(
|
|
341
|
+
answer_text,
|
|
342
|
+
title=title,
|
|
343
|
+
title_align="left",
|
|
344
|
+
border_style=COLORS["warning"],
|
|
345
|
+
padding=(1, 2),
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
self.console.print()
|
|
349
|
+
self.console.print(panel)
|
|
350
|
+
self.console.print()
|
|
351
|
+
|
|
352
|
+
def print_summary(
|
|
353
|
+
self,
|
|
354
|
+
total_iterations: int,
|
|
355
|
+
total_time: float,
|
|
356
|
+
usage_summary: dict[str, Any] | None = None,
|
|
357
|
+
) -> None:
|
|
358
|
+
"""Print a summary at the end of execution."""
|
|
359
|
+
if not self.enabled:
|
|
360
|
+
return
|
|
361
|
+
|
|
362
|
+
# Summary table
|
|
363
|
+
summary_table = Table(
|
|
364
|
+
show_header=False,
|
|
365
|
+
show_edge=False,
|
|
366
|
+
box=None,
|
|
367
|
+
padding=(0, 2),
|
|
368
|
+
)
|
|
369
|
+
summary_table.add_column("metric", style=STYLE_MUTED)
|
|
370
|
+
summary_table.add_column("value", style=STYLE_ACCENT)
|
|
371
|
+
|
|
372
|
+
summary_table.add_row("Iterations", str(total_iterations))
|
|
373
|
+
summary_table.add_row("Total Time", f"{total_time:.2f}s")
|
|
374
|
+
|
|
375
|
+
if usage_summary:
|
|
376
|
+
total_input = sum(
|
|
377
|
+
m.get("total_input_tokens", 0)
|
|
378
|
+
for m in usage_summary.get("model_usage_summaries", {}).values()
|
|
379
|
+
)
|
|
380
|
+
total_output = sum(
|
|
381
|
+
m.get("total_output_tokens", 0)
|
|
382
|
+
for m in usage_summary.get("model_usage_summaries", {}).values()
|
|
383
|
+
)
|
|
384
|
+
if total_input or total_output:
|
|
385
|
+
summary_table.add_row("Input Tokens", f"{total_input:,}")
|
|
386
|
+
summary_table.add_row("Output Tokens", f"{total_output:,}")
|
|
387
|
+
|
|
388
|
+
# Wrap in rule
|
|
389
|
+
self.console.print()
|
|
390
|
+
self.console.print(Rule(style=COLORS["border"], characters="═"))
|
|
391
|
+
self.console.print(summary_table, justify="center")
|
|
392
|
+
self.console.print(Rule(style=COLORS["border"], characters="═"))
|
|
393
|
+
self.console.print()
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Operations modules for groknroll
|
|
3
|
+
|
|
4
|
+
File, bash, and git operations for the coding agent.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from groknroll.operations.file_ops import FileOperations
|
|
8
|
+
from groknroll.operations.bash_ops import BashOperations
|
|
9
|
+
from groknroll.operations.git_ops import GitOperations
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
"FileOperations",
|
|
13
|
+
"BashOperations",
|
|
14
|
+
"GitOperations",
|
|
15
|
+
]
|