groknroll 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- groknroll/__init__.py +36 -0
- groknroll/__main__.py +9 -0
- groknroll/agents/__init__.py +18 -0
- groknroll/agents/agent_manager.py +187 -0
- groknroll/agents/base_agent.py +118 -0
- groknroll/agents/build_agent.py +231 -0
- groknroll/agents/plan_agent.py +215 -0
- groknroll/cli/__init__.py +7 -0
- groknroll/cli/enhanced_cli.py +372 -0
- groknroll/cli/large_codebase_cli.py +413 -0
- groknroll/cli/main.py +331 -0
- groknroll/cli/rlm_commands.py +258 -0
- groknroll/clients/__init__.py +63 -0
- groknroll/clients/anthropic.py +112 -0
- groknroll/clients/azure_openai.py +142 -0
- groknroll/clients/base_lm.py +33 -0
- groknroll/clients/gemini.py +162 -0
- groknroll/clients/litellm.py +105 -0
- groknroll/clients/openai.py +129 -0
- groknroll/clients/portkey.py +94 -0
- groknroll/core/__init__.py +9 -0
- groknroll/core/agent.py +339 -0
- groknroll/core/comms_utils.py +264 -0
- groknroll/core/context.py +251 -0
- groknroll/core/exceptions.py +181 -0
- groknroll/core/large_codebase.py +564 -0
- groknroll/core/lm_handler.py +206 -0
- groknroll/core/rlm.py +446 -0
- groknroll/core/rlm_codebase.py +448 -0
- groknroll/core/rlm_integration.py +256 -0
- groknroll/core/types.py +276 -0
- groknroll/environments/__init__.py +34 -0
- groknroll/environments/base_env.py +182 -0
- groknroll/environments/constants.py +32 -0
- groknroll/environments/docker_repl.py +336 -0
- groknroll/environments/local_repl.py +388 -0
- groknroll/environments/modal_repl.py +502 -0
- groknroll/environments/prime_repl.py +588 -0
- groknroll/logger/__init__.py +4 -0
- groknroll/logger/rlm_logger.py +63 -0
- groknroll/logger/verbose.py +393 -0
- groknroll/operations/__init__.py +15 -0
- groknroll/operations/bash_ops.py +447 -0
- groknroll/operations/file_ops.py +473 -0
- groknroll/operations/git_ops.py +620 -0
- groknroll/oracle/__init__.py +11 -0
- groknroll/oracle/codebase_indexer.py +238 -0
- groknroll/oracle/oracle_agent.py +278 -0
- groknroll/setup.py +34 -0
- groknroll/storage/__init__.py +14 -0
- groknroll/storage/database.py +272 -0
- groknroll/storage/models.py +128 -0
- groknroll/utils/__init__.py +0 -0
- groknroll/utils/parsing.py +168 -0
- groknroll/utils/prompts.py +146 -0
- groknroll/utils/rlm_utils.py +19 -0
- groknroll-2.0.0.dist-info/METADATA +246 -0
- groknroll-2.0.0.dist-info/RECORD +62 -0
- groknroll-2.0.0.dist-info/WHEEL +5 -0
- groknroll-2.0.0.dist-info/entry_points.txt +3 -0
- groknroll-2.0.0.dist-info/licenses/LICENSE +21 -0
- groknroll-2.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,448 @@
|
|
|
1
|
+
"""
|
|
2
|
+
RLM-Powered Codebase Analysis
|
|
3
|
+
|
|
4
|
+
Use RLM's unlimited context to work with massive codebases.
|
|
5
|
+
No chunking needed - RLM recursively processes everything!
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Dict, List, Any, Optional
|
|
10
|
+
from dataclasses import dataclass
|
|
11
|
+
|
|
12
|
+
from groknroll.core.rlm_integration import RLMIntegration, RLMConfig
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class CodebaseContext:
|
|
17
|
+
"""Context for RLM to explore codebase"""
|
|
18
|
+
project_path: Path
|
|
19
|
+
file_tree: str # Directory tree structure
|
|
20
|
+
file_list: List[str] # All indexed files
|
|
21
|
+
total_files: int
|
|
22
|
+
total_lines: int
|
|
23
|
+
languages: Dict[str, int]
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class RLMCodebaseAnalyzer:
|
|
27
|
+
"""
|
|
28
|
+
Use RLM to analyze massive codebases without context limits
|
|
29
|
+
|
|
30
|
+
Strategy:
|
|
31
|
+
1. Give RLM the full file tree and list
|
|
32
|
+
2. Let RLM recursively explore files as needed
|
|
33
|
+
3. RLM decides what to read based on the task
|
|
34
|
+
4. No pre-chunking - RLM handles context intelligently
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
def __init__(
|
|
38
|
+
self,
|
|
39
|
+
project_path: Path,
|
|
40
|
+
model: str = "gpt-4o-mini",
|
|
41
|
+
max_cost: float = 10.0
|
|
42
|
+
):
|
|
43
|
+
"""
|
|
44
|
+
Initialize RLM codebase analyzer
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
project_path: Project root
|
|
48
|
+
model: LLM model to use
|
|
49
|
+
max_cost: Maximum cost per task
|
|
50
|
+
"""
|
|
51
|
+
self.project_path = project_path.resolve()
|
|
52
|
+
|
|
53
|
+
# Initialize RLM
|
|
54
|
+
rlm_config = RLMConfig(
|
|
55
|
+
model=model,
|
|
56
|
+
max_cost=max_cost,
|
|
57
|
+
timeout_seconds=600 # 10 minutes for large tasks
|
|
58
|
+
)
|
|
59
|
+
self.rlm = RLMIntegration(rlm_config)
|
|
60
|
+
|
|
61
|
+
# Build codebase context
|
|
62
|
+
self.context = self._build_context()
|
|
63
|
+
|
|
64
|
+
def analyze_entire_codebase(self, focus: Optional[str] = None) -> str:
|
|
65
|
+
"""
|
|
66
|
+
Analyze entire codebase with RLM
|
|
67
|
+
|
|
68
|
+
RLM will recursively explore files as needed.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
focus: Optional focus area (e.g., "security", "architecture")
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
Analysis results
|
|
75
|
+
"""
|
|
76
|
+
task = f"""Analyze this entire codebase.
|
|
77
|
+
|
|
78
|
+
Project: {self.project_path.name}
|
|
79
|
+
Total Files: {self.context.total_files:,}
|
|
80
|
+
Total Lines: {self.context.total_lines:,}
|
|
81
|
+
|
|
82
|
+
Available to you in the Python environment:
|
|
83
|
+
- project_path = Path("{self.project_path}")
|
|
84
|
+
- You can read ANY file: Path("file.py").read_text()
|
|
85
|
+
- You can list directories: list(Path("dir").glob("*"))
|
|
86
|
+
- Explore recursively as needed
|
|
87
|
+
|
|
88
|
+
File Tree:
|
|
89
|
+
{self.context.file_tree}
|
|
90
|
+
|
|
91
|
+
{f"Focus on: {focus}" if focus else ""}
|
|
92
|
+
|
|
93
|
+
Instructions:
|
|
94
|
+
1. Start by understanding the project structure
|
|
95
|
+
2. Read key files to understand architecture
|
|
96
|
+
3. Explore dependencies and relationships
|
|
97
|
+
4. Provide comprehensive analysis
|
|
98
|
+
|
|
99
|
+
You have unlimited context - explore everything you need!
|
|
100
|
+
"""
|
|
101
|
+
|
|
102
|
+
result = self.rlm.complete(task, context={
|
|
103
|
+
"project_path": str(self.project_path),
|
|
104
|
+
"file_list": self.context.file_list,
|
|
105
|
+
"languages": self.context.languages
|
|
106
|
+
})
|
|
107
|
+
|
|
108
|
+
return result.response if result.success else f"Error: {result.error}"
|
|
109
|
+
|
|
110
|
+
def find_and_fix(self, issue_description: str) -> str:
|
|
111
|
+
"""
|
|
112
|
+
Find and fix an issue across the entire codebase
|
|
113
|
+
|
|
114
|
+
RLM will:
|
|
115
|
+
1. Search for relevant code
|
|
116
|
+
2. Understand the issue
|
|
117
|
+
3. Make fixes
|
|
118
|
+
4. Verify the fix
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
issue_description: Description of the issue to fix
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
Fix results
|
|
125
|
+
"""
|
|
126
|
+
task = f"""Find and fix this issue in the codebase:
|
|
127
|
+
|
|
128
|
+
Issue: {issue_description}
|
|
129
|
+
|
|
130
|
+
Project: {self.project_path.name}
|
|
131
|
+
Files: {self.context.total_files:,}
|
|
132
|
+
Lines: {self.context.total_lines:,}
|
|
133
|
+
|
|
134
|
+
You have access to:
|
|
135
|
+
- file_ops: FileOperations instance (read, write, edit files)
|
|
136
|
+
- bash_ops: BashOperations instance (run tests, commands)
|
|
137
|
+
- git_ops: GitOperations instance (git operations)
|
|
138
|
+
|
|
139
|
+
File Tree:
|
|
140
|
+
{self.context.file_tree}
|
|
141
|
+
|
|
142
|
+
Instructions:
|
|
143
|
+
1. Search through files to find the issue
|
|
144
|
+
2. Understand the root cause
|
|
145
|
+
3. Make necessary fixes using file_ops.edit_file()
|
|
146
|
+
4. Run tests to verify the fix
|
|
147
|
+
5. Report what you fixed
|
|
148
|
+
|
|
149
|
+
You can read and modify ANY file. Explore as needed!
|
|
150
|
+
"""
|
|
151
|
+
|
|
152
|
+
result = self.rlm.complete(task, context={
|
|
153
|
+
"project_path": str(self.project_path),
|
|
154
|
+
"issue": issue_description
|
|
155
|
+
})
|
|
156
|
+
|
|
157
|
+
return result.response if result.success else f"Error: {result.error}"
|
|
158
|
+
|
|
159
|
+
def implement_feature(self, feature_description: str) -> str:
|
|
160
|
+
"""
|
|
161
|
+
Implement a feature across the entire codebase
|
|
162
|
+
|
|
163
|
+
RLM will:
|
|
164
|
+
1. Understand existing architecture
|
|
165
|
+
2. Plan the implementation
|
|
166
|
+
3. Create/modify files as needed
|
|
167
|
+
4. Integrate with existing code
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
feature_description: Description of feature to implement
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
Implementation results
|
|
174
|
+
"""
|
|
175
|
+
task = f"""Implement this feature in the codebase:
|
|
176
|
+
|
|
177
|
+
Feature: {feature_description}
|
|
178
|
+
|
|
179
|
+
Project: {self.project_path.name}
|
|
180
|
+
Files: {self.context.total_files:,}
|
|
181
|
+
|
|
182
|
+
You have full access to:
|
|
183
|
+
- file_ops: Read/write/edit files
|
|
184
|
+
- bash_ops: Run commands and tests
|
|
185
|
+
- git_ops: Git operations
|
|
186
|
+
|
|
187
|
+
File Tree:
|
|
188
|
+
{self.context.file_tree}
|
|
189
|
+
|
|
190
|
+
Instructions:
|
|
191
|
+
1. Understand the existing codebase architecture
|
|
192
|
+
2. Plan where the feature should be implemented
|
|
193
|
+
3. Create new files or modify existing ones
|
|
194
|
+
4. Integrate with existing code
|
|
195
|
+
5. Run tests to verify
|
|
196
|
+
6. Report what you implemented
|
|
197
|
+
|
|
198
|
+
Explore the codebase as needed. You have unlimited context!
|
|
199
|
+
"""
|
|
200
|
+
|
|
201
|
+
result = self.rlm.complete(task, context={
|
|
202
|
+
"project_path": str(self.project_path),
|
|
203
|
+
"feature": feature_description
|
|
204
|
+
})
|
|
205
|
+
|
|
206
|
+
return result.response if result.success else f"Error: {result.error}"
|
|
207
|
+
|
|
208
|
+
def refactor(self, refactor_description: str) -> str:
|
|
209
|
+
"""
|
|
210
|
+
Refactor code across the entire codebase
|
|
211
|
+
|
|
212
|
+
Args:
|
|
213
|
+
refactor_description: What to refactor
|
|
214
|
+
|
|
215
|
+
Returns:
|
|
216
|
+
Refactoring results
|
|
217
|
+
"""
|
|
218
|
+
task = f"""Refactor the codebase:
|
|
219
|
+
|
|
220
|
+
Refactoring: {refactor_description}
|
|
221
|
+
|
|
222
|
+
Project: {self.project_path.name}
|
|
223
|
+
Files: {self.context.total_files:,}
|
|
224
|
+
|
|
225
|
+
Available operations:
|
|
226
|
+
- Read any file
|
|
227
|
+
- Edit files (with automatic backups)
|
|
228
|
+
- Run tests to verify changes
|
|
229
|
+
- Git operations
|
|
230
|
+
|
|
231
|
+
File Tree:
|
|
232
|
+
{self.context.file_tree}
|
|
233
|
+
|
|
234
|
+
Instructions:
|
|
235
|
+
1. Find all code that needs refactoring
|
|
236
|
+
2. Make changes carefully
|
|
237
|
+
3. Ensure tests still pass
|
|
238
|
+
4. Report all changes made
|
|
239
|
+
|
|
240
|
+
You can explore and modify ANY file!
|
|
241
|
+
"""
|
|
242
|
+
|
|
243
|
+
result = self.rlm.complete(task, context={
|
|
244
|
+
"project_path": str(self.project_path),
|
|
245
|
+
"refactor": refactor_description
|
|
246
|
+
})
|
|
247
|
+
|
|
248
|
+
return result.response if result.success else f"Error: {result.error}"
|
|
249
|
+
|
|
250
|
+
def answer_question(self, question: str) -> str:
|
|
251
|
+
"""
|
|
252
|
+
Answer a question about the codebase
|
|
253
|
+
|
|
254
|
+
RLM will explore files to find the answer.
|
|
255
|
+
|
|
256
|
+
Args:
|
|
257
|
+
question: Question about the codebase
|
|
258
|
+
|
|
259
|
+
Returns:
|
|
260
|
+
Answer
|
|
261
|
+
"""
|
|
262
|
+
task = f"""Answer this question about the codebase:
|
|
263
|
+
|
|
264
|
+
Question: {question}
|
|
265
|
+
|
|
266
|
+
Project: {self.project_path.name}
|
|
267
|
+
Files: {self.context.total_files:,}
|
|
268
|
+
|
|
269
|
+
File Tree:
|
|
270
|
+
{self.context.file_tree}
|
|
271
|
+
|
|
272
|
+
Instructions:
|
|
273
|
+
1. Read relevant files to understand the answer
|
|
274
|
+
2. Explore related code if needed
|
|
275
|
+
3. Provide a clear, detailed answer
|
|
276
|
+
|
|
277
|
+
You can read ANY file to find the answer!
|
|
278
|
+
"""
|
|
279
|
+
|
|
280
|
+
result = self.rlm.complete(task, context={
|
|
281
|
+
"project_path": str(self.project_path),
|
|
282
|
+
"question": question
|
|
283
|
+
})
|
|
284
|
+
|
|
285
|
+
return result.response if result.success else f"Error: {result.error}"
|
|
286
|
+
|
|
287
|
+
def generate_documentation(self, doc_type: str = "overview") -> str:
|
|
288
|
+
"""
|
|
289
|
+
Generate documentation for the codebase
|
|
290
|
+
|
|
291
|
+
Args:
|
|
292
|
+
doc_type: Type of documentation (overview, API, architecture)
|
|
293
|
+
|
|
294
|
+
Returns:
|
|
295
|
+
Generated documentation
|
|
296
|
+
"""
|
|
297
|
+
task = f"""Generate {doc_type} documentation for this codebase.
|
|
298
|
+
|
|
299
|
+
Project: {self.project_path.name}
|
|
300
|
+
Files: {self.context.total_files:,}
|
|
301
|
+
Lines: {self.context.total_lines:,}
|
|
302
|
+
|
|
303
|
+
File Tree:
|
|
304
|
+
{self.context.file_tree}
|
|
305
|
+
|
|
306
|
+
Instructions:
|
|
307
|
+
1. Explore the codebase structure
|
|
308
|
+
2. Read key files to understand functionality
|
|
309
|
+
3. Generate comprehensive {doc_type} documentation
|
|
310
|
+
4. Include code examples where relevant
|
|
311
|
+
|
|
312
|
+
Explore as needed - you have unlimited context!
|
|
313
|
+
"""
|
|
314
|
+
|
|
315
|
+
result = self.rlm.complete(task, context={
|
|
316
|
+
"project_path": str(self.project_path),
|
|
317
|
+
"doc_type": doc_type
|
|
318
|
+
})
|
|
319
|
+
|
|
320
|
+
return result.response if result.success else f"Error: {result.error}"
|
|
321
|
+
|
|
322
|
+
# =========================================================================
|
|
323
|
+
# Helper Methods
|
|
324
|
+
# =========================================================================
|
|
325
|
+
|
|
326
|
+
def _build_context(self) -> CodebaseContext:
|
|
327
|
+
"""Build context about the codebase for RLM"""
|
|
328
|
+
import os
|
|
329
|
+
|
|
330
|
+
file_list = []
|
|
331
|
+
total_files = 0
|
|
332
|
+
total_lines = 0
|
|
333
|
+
languages = {}
|
|
334
|
+
|
|
335
|
+
# Walk directory tree
|
|
336
|
+
for root, dirs, files in os.walk(self.project_path):
|
|
337
|
+
# Skip common ignore directories
|
|
338
|
+
dirs[:] = [d for d in dirs if d not in {
|
|
339
|
+
'.git', 'node_modules', '.venv', 'venv', '__pycache__',
|
|
340
|
+
'build', 'dist', '.next', 'target', '.groknroll',
|
|
341
|
+
'.pytest_cache', 'coverage', '.mypy_cache'
|
|
342
|
+
}]
|
|
343
|
+
|
|
344
|
+
for file in files:
|
|
345
|
+
if self._should_include(file):
|
|
346
|
+
file_path = Path(root) / file
|
|
347
|
+
relative_path = file_path.relative_to(self.project_path)
|
|
348
|
+
file_list.append(str(relative_path))
|
|
349
|
+
|
|
350
|
+
try:
|
|
351
|
+
lines = len(file_path.read_text(errors='ignore').splitlines())
|
|
352
|
+
total_lines += lines
|
|
353
|
+
total_files += 1
|
|
354
|
+
|
|
355
|
+
# Track language
|
|
356
|
+
lang = self._detect_language(file_path.suffix)
|
|
357
|
+
languages[lang] = languages.get(lang, 0) + 1
|
|
358
|
+
except Exception:
|
|
359
|
+
continue
|
|
360
|
+
|
|
361
|
+
# Generate file tree
|
|
362
|
+
file_tree = self._generate_tree()
|
|
363
|
+
|
|
364
|
+
return CodebaseContext(
|
|
365
|
+
project_path=self.project_path,
|
|
366
|
+
file_tree=file_tree,
|
|
367
|
+
file_list=file_list,
|
|
368
|
+
total_files=total_files,
|
|
369
|
+
total_lines=total_lines,
|
|
370
|
+
languages=languages
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
def _generate_tree(self, max_depth: int = 3) -> str:
|
|
374
|
+
"""Generate directory tree structure"""
|
|
375
|
+
import os
|
|
376
|
+
|
|
377
|
+
tree_lines = [f"{self.project_path.name}/"]
|
|
378
|
+
|
|
379
|
+
def walk_dir(path: Path, prefix: str = "", depth: int = 0):
|
|
380
|
+
if depth >= max_depth:
|
|
381
|
+
return
|
|
382
|
+
|
|
383
|
+
try:
|
|
384
|
+
entries = sorted(path.iterdir(), key=lambda p: (not p.is_dir(), p.name))
|
|
385
|
+
|
|
386
|
+
# Filter out ignored directories
|
|
387
|
+
entries = [
|
|
388
|
+
e for e in entries
|
|
389
|
+
if e.name not in {
|
|
390
|
+
'.git', 'node_modules', '.venv', 'venv', '__pycache__',
|
|
391
|
+
'build', 'dist', '.next', 'target', '.groknroll'
|
|
392
|
+
}
|
|
393
|
+
]
|
|
394
|
+
|
|
395
|
+
for i, entry in enumerate(entries[:50]): # Limit to 50 entries per dir
|
|
396
|
+
is_last = i == len(entries) - 1
|
|
397
|
+
current_prefix = "└── " if is_last else "├── "
|
|
398
|
+
next_prefix = " " if is_last else "│ "
|
|
399
|
+
|
|
400
|
+
tree_lines.append(f"{prefix}{current_prefix}{entry.name}")
|
|
401
|
+
|
|
402
|
+
if entry.is_dir():
|
|
403
|
+
walk_dir(entry, prefix + next_prefix, depth + 1)
|
|
404
|
+
|
|
405
|
+
except PermissionError:
|
|
406
|
+
pass
|
|
407
|
+
|
|
408
|
+
walk_dir(self.project_path)
|
|
409
|
+
|
|
410
|
+
return "\n".join(tree_lines[:500]) # Limit tree size
|
|
411
|
+
|
|
412
|
+
def _should_include(self, filename: str) -> bool:
|
|
413
|
+
"""Check if file should be included"""
|
|
414
|
+
skip_extensions = {
|
|
415
|
+
'.pyc', '.pyo', '.so', '.dylib', '.dll',
|
|
416
|
+
'.jpg', '.jpeg', '.png', '.gif', '.svg', '.ico',
|
|
417
|
+
'.pdf', '.zip', '.tar', '.gz', '.bz2',
|
|
418
|
+
'.lock', '.log', '.tmp', '.cache',
|
|
419
|
+
'.min.js', '.min.css'
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
ext = Path(filename).suffix.lower()
|
|
423
|
+
return ext not in skip_extensions and not filename.startswith('.')
|
|
424
|
+
|
|
425
|
+
def _detect_language(self, extension: str) -> str:
|
|
426
|
+
"""Detect language from extension"""
|
|
427
|
+
lang_map = {
|
|
428
|
+
'.py': 'Python',
|
|
429
|
+
'.js': 'JavaScript',
|
|
430
|
+
'.ts': 'TypeScript',
|
|
431
|
+
'.jsx': 'React',
|
|
432
|
+
'.tsx': 'React',
|
|
433
|
+
'.go': 'Go',
|
|
434
|
+
'.rs': 'Rust',
|
|
435
|
+
'.java': 'Java',
|
|
436
|
+
'.cpp': 'C++',
|
|
437
|
+
'.c': 'C',
|
|
438
|
+
'.rb': 'Ruby',
|
|
439
|
+
'.php': 'PHP',
|
|
440
|
+
'.md': 'Markdown',
|
|
441
|
+
'.json': 'JSON',
|
|
442
|
+
'.yaml': 'YAML',
|
|
443
|
+
'.yml': 'YAML',
|
|
444
|
+
'.html': 'HTML',
|
|
445
|
+
'.css': 'CSS',
|
|
446
|
+
'.sql': 'SQL',
|
|
447
|
+
}
|
|
448
|
+
return lang_map.get(extension.lower(), 'Other')
|
|
@@ -0,0 +1,256 @@
|
|
|
1
|
+
"""
|
|
2
|
+
RLM Integration - Direct, native integration with RLM core
|
|
3
|
+
|
|
4
|
+
No bridges, no HTTP, no external services. Pure Python.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Any, Dict, List, Optional
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
|
|
10
|
+
from groknroll.core.rlm import RLM
|
|
11
|
+
from groknroll.core.exceptions import CostLimitExceededError, CompletionTimeoutError
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class RLMConfig:
|
|
16
|
+
"""Configuration for RLM"""
|
|
17
|
+
model: str = "gpt-4o-mini"
|
|
18
|
+
max_cost: float = 5.0
|
|
19
|
+
timeout_seconds: int = 300
|
|
20
|
+
iteration_timeout_seconds: int = 30
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class RLMResult:
|
|
25
|
+
"""Result from RLM completion"""
|
|
26
|
+
response: str
|
|
27
|
+
trace_log: str
|
|
28
|
+
total_cost: float
|
|
29
|
+
total_time: float
|
|
30
|
+
iterations: int
|
|
31
|
+
success: bool
|
|
32
|
+
error: Optional[str] = None
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class RLMIntegration:
|
|
36
|
+
"""
|
|
37
|
+
Native RLM integration for groknroll
|
|
38
|
+
|
|
39
|
+
Provides unlimited context reasoning directly integrated into the agent.
|
|
40
|
+
No external services, everything runs in-process.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
def __init__(self, config: Optional[RLMConfig] = None):
|
|
44
|
+
"""Initialize RLM integration"""
|
|
45
|
+
self.config = config or RLMConfig()
|
|
46
|
+
self.rlm = None
|
|
47
|
+
self.env = None
|
|
48
|
+
|
|
49
|
+
def initialize(self) -> None:
|
|
50
|
+
"""Initialize RLM and environment"""
|
|
51
|
+
if self.rlm is None:
|
|
52
|
+
self.rlm = RLM(
|
|
53
|
+
backend="openai",
|
|
54
|
+
backend_kwargs={"model_name": self.config.model},
|
|
55
|
+
environment="local",
|
|
56
|
+
max_cost=self.config.max_cost,
|
|
57
|
+
timeout=self.config.timeout_seconds
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
def complete(
|
|
61
|
+
self,
|
|
62
|
+
task: str,
|
|
63
|
+
context: Optional[Dict[str, Any]] = None,
|
|
64
|
+
code_context: Optional[str] = None
|
|
65
|
+
) -> RLMResult:
|
|
66
|
+
"""
|
|
67
|
+
Execute RLM completion with unlimited context
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
task: The task description
|
|
71
|
+
context: Additional context dict
|
|
72
|
+
code_context: Code context to include
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
RLMResult with response and metrics
|
|
76
|
+
"""
|
|
77
|
+
self.initialize()
|
|
78
|
+
|
|
79
|
+
try:
|
|
80
|
+
# Build message with context
|
|
81
|
+
message_content = self._build_message(task, context, code_context)
|
|
82
|
+
|
|
83
|
+
# Execute RLM (RLM.completion takes a string prompt)
|
|
84
|
+
result = self.rlm.completion(message_content)
|
|
85
|
+
|
|
86
|
+
return RLMResult(
|
|
87
|
+
response=result.response,
|
|
88
|
+
trace_log="", # RLM doesn't expose trace_log in public API
|
|
89
|
+
total_cost=0.0, # Would need to track via usage_summary
|
|
90
|
+
total_time=result.execution_time if hasattr(result, 'execution_time') else 0.0,
|
|
91
|
+
iterations=len(result.iterations) if hasattr(result, 'iterations') else 0,
|
|
92
|
+
success=True
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
except CostLimitExceededError as e:
|
|
96
|
+
return RLMResult(
|
|
97
|
+
response="",
|
|
98
|
+
trace_log="",
|
|
99
|
+
total_cost=self.config.max_cost,
|
|
100
|
+
total_time=0.0,
|
|
101
|
+
iterations=0,
|
|
102
|
+
success=False,
|
|
103
|
+
error=f"Cost limit exceeded: {e}"
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
except CompletionTimeoutError as e:
|
|
107
|
+
return RLMResult(
|
|
108
|
+
response="",
|
|
109
|
+
trace_log="",
|
|
110
|
+
total_cost=0.0,
|
|
111
|
+
total_time=self.config.timeout_seconds,
|
|
112
|
+
iterations=0,
|
|
113
|
+
success=False,
|
|
114
|
+
error=f"Timeout exceeded: {e}"
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
except Exception as e:
|
|
118
|
+
return RLMResult(
|
|
119
|
+
response="",
|
|
120
|
+
trace_log="",
|
|
121
|
+
total_cost=0.0,
|
|
122
|
+
total_time=0.0,
|
|
123
|
+
iterations=0,
|
|
124
|
+
success=False,
|
|
125
|
+
error=f"RLM execution failed: {e}"
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
def chat(
|
|
129
|
+
self,
|
|
130
|
+
messages: List[Dict[str, str]],
|
|
131
|
+
context: Optional[Dict[str, Any]] = None
|
|
132
|
+
) -> RLMResult:
|
|
133
|
+
"""
|
|
134
|
+
Chat with RLM using conversation history
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
messages: List of {role, content} dicts
|
|
138
|
+
context: Additional context
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
RLMResult with response
|
|
142
|
+
"""
|
|
143
|
+
self.initialize()
|
|
144
|
+
|
|
145
|
+
try:
|
|
146
|
+
# Combine messages into a single prompt
|
|
147
|
+
prompt_parts = []
|
|
148
|
+
for msg in messages:
|
|
149
|
+
prompt_parts.append(f"{msg['role']}: {msg['content']}")
|
|
150
|
+
|
|
151
|
+
prompt = "\n\n".join(prompt_parts)
|
|
152
|
+
|
|
153
|
+
# Add context if provided
|
|
154
|
+
if context:
|
|
155
|
+
context_str = "\n\nContext:\n" + "\n".join(
|
|
156
|
+
f"- {k}: {v}" for k, v in context.items()
|
|
157
|
+
)
|
|
158
|
+
prompt += context_str
|
|
159
|
+
|
|
160
|
+
# Execute
|
|
161
|
+
result = self.rlm.completion(prompt)
|
|
162
|
+
|
|
163
|
+
return RLMResult(
|
|
164
|
+
response=result.response,
|
|
165
|
+
trace_log="",
|
|
166
|
+
total_cost=0.0,
|
|
167
|
+
total_time=result.execution_time if hasattr(result, 'execution_time') else 0.0,
|
|
168
|
+
iterations=len(result.iterations) if hasattr(result, 'iterations') else 0,
|
|
169
|
+
success=True
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
except Exception as e:
|
|
173
|
+
return RLMResult(
|
|
174
|
+
response="",
|
|
175
|
+
trace_log="",
|
|
176
|
+
total_cost=0.0,
|
|
177
|
+
total_time=0.0,
|
|
178
|
+
iterations=0,
|
|
179
|
+
success=False,
|
|
180
|
+
error=str(e)
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
def analyze_code(
|
|
184
|
+
self,
|
|
185
|
+
code: str,
|
|
186
|
+
analysis_type: str = "review",
|
|
187
|
+
language: str = "python"
|
|
188
|
+
) -> RLMResult:
|
|
189
|
+
"""
|
|
190
|
+
Analyze code using RLM
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
code: Code to analyze
|
|
194
|
+
analysis_type: Type of analysis (review, security, complexity, etc)
|
|
195
|
+
language: Programming language
|
|
196
|
+
|
|
197
|
+
Returns:
|
|
198
|
+
RLMResult with analysis
|
|
199
|
+
"""
|
|
200
|
+
task = self._build_analysis_task(code, analysis_type, language)
|
|
201
|
+
return self.complete(task, code_context=code)
|
|
202
|
+
|
|
203
|
+
def _build_message(
|
|
204
|
+
self,
|
|
205
|
+
task: str,
|
|
206
|
+
context: Optional[Dict[str, Any]],
|
|
207
|
+
code_context: Optional[str]
|
|
208
|
+
) -> str:
|
|
209
|
+
"""Build message content with context"""
|
|
210
|
+
parts = [task]
|
|
211
|
+
|
|
212
|
+
if context:
|
|
213
|
+
parts.append("\n\nContext:")
|
|
214
|
+
for key, value in context.items():
|
|
215
|
+
parts.append(f"- {key}: {value}")
|
|
216
|
+
|
|
217
|
+
if code_context:
|
|
218
|
+
parts.append(f"\n\nCode:\n```\n{code_context}\n```")
|
|
219
|
+
|
|
220
|
+
return "\n".join(parts)
|
|
221
|
+
|
|
222
|
+
def _build_analysis_task(
|
|
223
|
+
self,
|
|
224
|
+
code: str,
|
|
225
|
+
analysis_type: str,
|
|
226
|
+
language: str
|
|
227
|
+
) -> str:
|
|
228
|
+
"""Build analysis task prompt"""
|
|
229
|
+
prompts = {
|
|
230
|
+
"review": f"Review this {language} code for quality, bugs, and improvements:",
|
|
231
|
+
"security": f"Perform a security audit of this {language} code:",
|
|
232
|
+
"complexity": f"Analyze the complexity of this {language} code:",
|
|
233
|
+
"refactor": f"Suggest refactorings for this {language} code:",
|
|
234
|
+
"explain": f"Explain what this {language} code does:",
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
prompt = prompts.get(analysis_type, f"Analyze this {language} code:")
|
|
238
|
+
return f"{prompt}\n\n```{language}\n{code}\n```"
|
|
239
|
+
|
|
240
|
+
def reset(self) -> None:
|
|
241
|
+
"""Reset RLM environment"""
|
|
242
|
+
# Re-initialize RLM to reset state
|
|
243
|
+
self.rlm = None
|
|
244
|
+
self.initialize()
|
|
245
|
+
|
|
246
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
247
|
+
"""Get RLM usage statistics"""
|
|
248
|
+
if self.rlm is None:
|
|
249
|
+
return {"initialized": False}
|
|
250
|
+
|
|
251
|
+
return {
|
|
252
|
+
"initialized": True,
|
|
253
|
+
"model": self.config.model,
|
|
254
|
+
"max_cost": self.config.max_cost,
|
|
255
|
+
"timeout": self.config.timeout_seconds,
|
|
256
|
+
}
|