groknroll 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. groknroll/__init__.py +36 -0
  2. groknroll/__main__.py +9 -0
  3. groknroll/agents/__init__.py +18 -0
  4. groknroll/agents/agent_manager.py +187 -0
  5. groknroll/agents/base_agent.py +118 -0
  6. groknroll/agents/build_agent.py +231 -0
  7. groknroll/agents/plan_agent.py +215 -0
  8. groknroll/cli/__init__.py +7 -0
  9. groknroll/cli/enhanced_cli.py +372 -0
  10. groknroll/cli/large_codebase_cli.py +413 -0
  11. groknroll/cli/main.py +331 -0
  12. groknroll/cli/rlm_commands.py +258 -0
  13. groknroll/clients/__init__.py +63 -0
  14. groknroll/clients/anthropic.py +112 -0
  15. groknroll/clients/azure_openai.py +142 -0
  16. groknroll/clients/base_lm.py +33 -0
  17. groknroll/clients/gemini.py +162 -0
  18. groknroll/clients/litellm.py +105 -0
  19. groknroll/clients/openai.py +129 -0
  20. groknroll/clients/portkey.py +94 -0
  21. groknroll/core/__init__.py +9 -0
  22. groknroll/core/agent.py +339 -0
  23. groknroll/core/comms_utils.py +264 -0
  24. groknroll/core/context.py +251 -0
  25. groknroll/core/exceptions.py +181 -0
  26. groknroll/core/large_codebase.py +564 -0
  27. groknroll/core/lm_handler.py +206 -0
  28. groknroll/core/rlm.py +446 -0
  29. groknroll/core/rlm_codebase.py +448 -0
  30. groknroll/core/rlm_integration.py +256 -0
  31. groknroll/core/types.py +276 -0
  32. groknroll/environments/__init__.py +34 -0
  33. groknroll/environments/base_env.py +182 -0
  34. groknroll/environments/constants.py +32 -0
  35. groknroll/environments/docker_repl.py +336 -0
  36. groknroll/environments/local_repl.py +388 -0
  37. groknroll/environments/modal_repl.py +502 -0
  38. groknroll/environments/prime_repl.py +588 -0
  39. groknroll/logger/__init__.py +4 -0
  40. groknroll/logger/rlm_logger.py +63 -0
  41. groknroll/logger/verbose.py +393 -0
  42. groknroll/operations/__init__.py +15 -0
  43. groknroll/operations/bash_ops.py +447 -0
  44. groknroll/operations/file_ops.py +473 -0
  45. groknroll/operations/git_ops.py +620 -0
  46. groknroll/oracle/__init__.py +11 -0
  47. groknroll/oracle/codebase_indexer.py +238 -0
  48. groknroll/oracle/oracle_agent.py +278 -0
  49. groknroll/setup.py +34 -0
  50. groknroll/storage/__init__.py +14 -0
  51. groknroll/storage/database.py +272 -0
  52. groknroll/storage/models.py +128 -0
  53. groknroll/utils/__init__.py +0 -0
  54. groknroll/utils/parsing.py +168 -0
  55. groknroll/utils/prompts.py +146 -0
  56. groknroll/utils/rlm_utils.py +19 -0
  57. groknroll-2.0.0.dist-info/METADATA +246 -0
  58. groknroll-2.0.0.dist-info/RECORD +62 -0
  59. groknroll-2.0.0.dist-info/WHEEL +5 -0
  60. groknroll-2.0.0.dist-info/entry_points.txt +3 -0
  61. groknroll-2.0.0.dist-info/licenses/LICENSE +21 -0
  62. groknroll-2.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,388 @@
1
+ import copy
2
+ import io
3
+ import json
4
+ import os
5
+ import shutil
6
+ import sys
7
+ import tempfile
8
+ import threading
9
+ import time
10
+ import uuid
11
+ from contextlib import contextmanager
12
+ from typing import Any
13
+
14
+ from groknroll.core.comms_utils import LMRequest, send_lm_request, send_lm_request_batched
15
+ from groknroll.core.exceptions import (
16
+ EnvironmentCleanupError,
17
+ LMHandlerConnectionError,
18
+ REPLExecutionError,
19
+ )
20
+ from groknroll.core.types import REPLResult, RLMChatCompletion
21
+ from groknroll.environments.base_env import NonIsolatedEnv
22
+
23
+ # =============================================================================
24
+ # Safe Builtins
25
+ # =============================================================================
26
+
27
+ # Safe builtins - blocks dangerous operations like eval/exec/input
28
+ _SAFE_BUILTINS = {
29
+ # Core types and functions
30
+ "print": print,
31
+ "len": len,
32
+ "str": str,
33
+ "int": int,
34
+ "float": float,
35
+ "list": list,
36
+ "dict": dict,
37
+ "set": set,
38
+ "tuple": tuple,
39
+ "bool": bool,
40
+ "type": type,
41
+ "isinstance": isinstance,
42
+ "issubclass": issubclass,
43
+ "enumerate": enumerate,
44
+ "zip": zip,
45
+ "map": map,
46
+ "filter": filter,
47
+ "sorted": sorted,
48
+ "reversed": reversed,
49
+ "range": range,
50
+ "min": min,
51
+ "max": max,
52
+ "sum": sum,
53
+ "abs": abs,
54
+ "round": round,
55
+ "any": any,
56
+ "all": all,
57
+ "pow": pow,
58
+ "divmod": divmod,
59
+ "chr": chr,
60
+ "ord": ord,
61
+ "hex": hex,
62
+ "bin": bin,
63
+ "oct": oct,
64
+ "repr": repr,
65
+ "ascii": ascii,
66
+ "format": format,
67
+ "hash": hash,
68
+ "id": id,
69
+ "iter": iter,
70
+ "next": next,
71
+ "slice": slice,
72
+ "callable": callable,
73
+ "hasattr": hasattr,
74
+ "getattr": getattr,
75
+ "setattr": setattr,
76
+ "delattr": delattr,
77
+ "dir": dir,
78
+ "vars": vars,
79
+ "bytes": bytes,
80
+ "bytearray": bytearray,
81
+ "memoryview": memoryview,
82
+ "complex": complex,
83
+ "object": object,
84
+ "super": super,
85
+ "property": property,
86
+ "staticmethod": staticmethod,
87
+ "classmethod": classmethod,
88
+ "__import__": __import__,
89
+ "open": open,
90
+ # Exceptions
91
+ "Exception": Exception,
92
+ "BaseException": BaseException,
93
+ "ValueError": ValueError,
94
+ "TypeError": TypeError,
95
+ "KeyError": KeyError,
96
+ "IndexError": IndexError,
97
+ "AttributeError": AttributeError,
98
+ "FileNotFoundError": FileNotFoundError,
99
+ "OSError": OSError,
100
+ "IOError": IOError,
101
+ "RuntimeError": RuntimeError,
102
+ "NameError": NameError,
103
+ "ImportError": ImportError,
104
+ "StopIteration": StopIteration,
105
+ "AssertionError": AssertionError,
106
+ "NotImplementedError": NotImplementedError,
107
+ "ArithmeticError": ArithmeticError,
108
+ "LookupError": LookupError,
109
+ "Warning": Warning,
110
+ # Blocked
111
+ "input": None,
112
+ "eval": None,
113
+ "exec": None,
114
+ "compile": None,
115
+ "globals": None,
116
+ "locals": None,
117
+ }
118
+
119
+
120
+ class LocalREPL(NonIsolatedEnv):
121
+ """
122
+ Local REPL environment with persistent Python namespace.
123
+ Executes code in a sandboxed namespace with access to context data.
124
+ """
125
+
126
+ def __init__(
127
+ self,
128
+ lm_handler_address: tuple[str, int] | None = None,
129
+ context_payload: dict | list | str | None = None,
130
+ setup_code: str | None = None,
131
+ persistent: bool = False,
132
+ depth: int = 1,
133
+ **kwargs,
134
+ ):
135
+ super().__init__(persistent=persistent, depth=depth, **kwargs)
136
+
137
+ self.lm_handler_address = lm_handler_address
138
+ self.original_cwd = os.getcwd()
139
+ self.temp_dir = tempfile.mkdtemp(prefix=f"repl_env_{uuid.uuid4()}_")
140
+ self._lock = threading.Lock()
141
+ self._context_count: int = 0
142
+ self._history_count: int = 0
143
+
144
+ # Setup globals, locals, and modules in environment.
145
+ self.setup()
146
+
147
+ # Load context if provided
148
+ if context_payload is not None:
149
+ self.load_context(context_payload)
150
+
151
+ # Run setup code if provided
152
+ if setup_code:
153
+ self.execute_code(setup_code)
154
+
155
+ def setup(self):
156
+ """Setup the environment."""
157
+ # Create sandboxed globals
158
+ self.globals: dict[str, Any] = {
159
+ "__builtins__": _SAFE_BUILTINS.copy(),
160
+ "__name__": "__main__",
161
+ }
162
+ self.locals: dict[str, Any] = {}
163
+
164
+ # Track LLM calls made during code execution
165
+ self._pending_llm_calls: list[RLMChatCompletion] = []
166
+
167
+ # Add helper functions
168
+ self.globals["FINAL_VAR"] = self._final_var
169
+ self.globals["llm_query"] = self._llm_query
170
+ self.globals["llm_query_batched"] = self._llm_query_batched
171
+
172
+ def _final_var(self, variable_name: str) -> str:
173
+ """Return the value of a variable as a final answer."""
174
+ variable_name = variable_name.strip().strip("\"'")
175
+ if variable_name in self.locals:
176
+ return str(self.locals[variable_name])
177
+ return f"Error: Variable '{variable_name}' not found"
178
+
179
+ def _llm_query(self, prompt: str, model: str | None = None) -> str:
180
+ """Query the LM via socket connection to the handler.
181
+
182
+ Args:
183
+ prompt: The prompt to send to the LM.
184
+ model: Optional model name to use (if handler has multiple clients).
185
+ """
186
+ if not self.lm_handler_address:
187
+ return "Error: No LM handler configured"
188
+
189
+ try:
190
+ request = LMRequest(prompt=prompt, model=model, depth=self.depth)
191
+ response = send_lm_request(self.lm_handler_address, request)
192
+
193
+ if not response.success:
194
+ return f"Error: {response.error}"
195
+
196
+ # Track this LLM call
197
+ self._pending_llm_calls.append(
198
+ response.chat_completion,
199
+ )
200
+
201
+ return response.chat_completion.response
202
+ except Exception as e:
203
+ return f"Error: LM query failed - {e}"
204
+
205
+ def _llm_query_batched(self, prompts: list[str], model: str | None = None) -> list[str]:
206
+ """Query the LM with multiple prompts concurrently.
207
+
208
+ Args:
209
+ prompts: List of prompts to send to the LM.
210
+ model: Optional model name to use (if handler has multiple clients).
211
+
212
+ Returns:
213
+ List of responses in the same order as input prompts.
214
+ """
215
+ if not self.lm_handler_address:
216
+ return ["Error: No LM handler configured"] * len(prompts)
217
+
218
+ try:
219
+ responses = send_lm_request_batched(
220
+ self.lm_handler_address, prompts, model=model, depth=self.depth
221
+ )
222
+
223
+ results = []
224
+ for response in responses:
225
+ if not response.success:
226
+ results.append(f"Error: {response.error}")
227
+ else:
228
+ # Track this LLM call in list of all calls -- we may want to do this hierarchically
229
+ self._pending_llm_calls.append(response.chat_completion)
230
+ results.append(response.chat_completion.response)
231
+
232
+ return results
233
+ except Exception as e:
234
+ return [f"Error: LM query failed - {e}"] * len(prompts)
235
+
236
+ def load_context(self, context_payload: dict | list | str):
237
+ """Load context into the environment as context_0 (and 'context' alias)."""
238
+ self.add_context(context_payload, 0)
239
+
240
+ def add_context(
241
+ self, context_payload: dict | list | str, context_index: int | None = None
242
+ ) -> int:
243
+ """
244
+ Add a context with versioned variable name.
245
+
246
+ Args:
247
+ context_payload: The context data to add
248
+ context_index: Optional explicit index. If None, auto-increments.
249
+
250
+ Returns:
251
+ The context index used.
252
+ """
253
+ if context_index is None:
254
+ context_index = self._context_count
255
+
256
+ var_name = f"context_{context_index}"
257
+
258
+ if isinstance(context_payload, str):
259
+ context_path = os.path.join(self.temp_dir, f"context_{context_index}.txt")
260
+ with open(context_path, "w") as f:
261
+ f.write(context_payload)
262
+ self.execute_code(f"with open(r'{context_path}', 'r') as f:\n {var_name} = f.read()")
263
+ else:
264
+ context_path = os.path.join(self.temp_dir, f"context_{context_index}.json")
265
+ with open(context_path, "w") as f:
266
+ json.dump(context_payload, f)
267
+ self.execute_code(
268
+ f"import json\nwith open(r'{context_path}', 'r') as f:\n {var_name} = json.load(f)"
269
+ )
270
+
271
+ # Alias context_0 as 'context' for backward compatibility
272
+ if context_index == 0:
273
+ self.execute_code(f"context = {var_name}")
274
+
275
+ self._context_count = max(self._context_count, context_index + 1)
276
+ return context_index
277
+
278
+ def update_handler_address(self, address: tuple[str, int]) -> None:
279
+ """Update the LM handler address for a new completion call."""
280
+ self.lm_handler_address = address
281
+
282
+ def get_context_count(self) -> int:
283
+ """Return the number of contexts loaded."""
284
+ return self._context_count
285
+
286
+ def add_history(
287
+ self, message_history: list[dict[str, Any]], history_index: int | None = None
288
+ ) -> int:
289
+ """
290
+ Store a conversation's message history as a versioned variable.
291
+
292
+ Args:
293
+ message_history: The list of message dicts from a completion call
294
+ history_index: Optional explicit index. If None, auto-increments.
295
+
296
+ Returns:
297
+ The history index used.
298
+ """
299
+ if history_index is None:
300
+ history_index = self._history_count
301
+
302
+ var_name = f"history_{history_index}"
303
+
304
+ # Store deep copy to avoid reference issues with nested dicts
305
+ self.locals[var_name] = copy.deepcopy(message_history)
306
+
307
+ # Alias history_0 as 'history' for convenience
308
+ if history_index == 0:
309
+ self.locals["history"] = self.locals[var_name]
310
+
311
+ self._history_count = max(self._history_count, history_index + 1)
312
+ return history_index
313
+
314
+ def get_history_count(self) -> int:
315
+ """Return the number of conversation histories stored."""
316
+ return self._history_count
317
+
318
+ @contextmanager
319
+ def _capture_output(self):
320
+ """Thread-safe context manager to capture stdout/stderr."""
321
+ with self._lock:
322
+ old_stdout, old_stderr = sys.stdout, sys.stderr
323
+ stdout_buf, stderr_buf = io.StringIO(), io.StringIO()
324
+ try:
325
+ sys.stdout, sys.stderr = stdout_buf, stderr_buf
326
+ yield stdout_buf, stderr_buf
327
+ finally:
328
+ sys.stdout, sys.stderr = old_stdout, old_stderr
329
+
330
+ @contextmanager
331
+ def _temp_cwd(self):
332
+ """Temporarily change to temp directory for execution."""
333
+ old_cwd = os.getcwd()
334
+ try:
335
+ os.chdir(self.temp_dir)
336
+ yield
337
+ finally:
338
+ os.chdir(old_cwd)
339
+
340
+ def execute_code(self, code: str) -> REPLResult:
341
+ """Execute code in the persistent namespace and return result."""
342
+ start_time = time.perf_counter()
343
+
344
+ # Clear pending LLM calls from previous execution
345
+ self._pending_llm_calls = []
346
+
347
+ with self._capture_output() as (stdout_buf, stderr_buf), self._temp_cwd():
348
+ try:
349
+ combined = {**self.globals, **self.locals}
350
+ exec(code, combined, combined)
351
+
352
+ # Update locals with new variables
353
+ for key, value in combined.items():
354
+ if key not in self.globals and not key.startswith("_"):
355
+ self.locals[key] = value
356
+
357
+ stdout = stdout_buf.getvalue()
358
+ stderr = stderr_buf.getvalue()
359
+ except Exception as e:
360
+ stdout = stdout_buf.getvalue()
361
+ stderr = stderr_buf.getvalue() + f"\n{type(e).__name__}: {e}"
362
+
363
+ return REPLResult(
364
+ stdout=stdout,
365
+ stderr=stderr,
366
+ locals=self.locals.copy(),
367
+ execution_time=time.perf_counter() - start_time,
368
+ rlm_calls=self._pending_llm_calls.copy(),
369
+ )
370
+
371
+ def __enter__(self):
372
+ return self
373
+
374
+ def __exit__(self, exc_type, exc_val, exc_tb):
375
+ self.cleanup()
376
+ return False
377
+
378
+ def cleanup(self):
379
+ """Clean up temp directory and reset state."""
380
+ try:
381
+ shutil.rmtree(self.temp_dir)
382
+ except Exception:
383
+ pass
384
+ self.globals.clear()
385
+ self.locals.clear()
386
+
387
+ def __del__(self):
388
+ self.cleanup()