groknroll 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. groknroll/__init__.py +36 -0
  2. groknroll/__main__.py +9 -0
  3. groknroll/agents/__init__.py +18 -0
  4. groknroll/agents/agent_manager.py +187 -0
  5. groknroll/agents/base_agent.py +118 -0
  6. groknroll/agents/build_agent.py +231 -0
  7. groknroll/agents/plan_agent.py +215 -0
  8. groknroll/cli/__init__.py +7 -0
  9. groknroll/cli/enhanced_cli.py +372 -0
  10. groknroll/cli/large_codebase_cli.py +413 -0
  11. groknroll/cli/main.py +331 -0
  12. groknroll/cli/rlm_commands.py +258 -0
  13. groknroll/clients/__init__.py +63 -0
  14. groknroll/clients/anthropic.py +112 -0
  15. groknroll/clients/azure_openai.py +142 -0
  16. groknroll/clients/base_lm.py +33 -0
  17. groknroll/clients/gemini.py +162 -0
  18. groknroll/clients/litellm.py +105 -0
  19. groknroll/clients/openai.py +129 -0
  20. groknroll/clients/portkey.py +94 -0
  21. groknroll/core/__init__.py +9 -0
  22. groknroll/core/agent.py +339 -0
  23. groknroll/core/comms_utils.py +264 -0
  24. groknroll/core/context.py +251 -0
  25. groknroll/core/exceptions.py +181 -0
  26. groknroll/core/large_codebase.py +564 -0
  27. groknroll/core/lm_handler.py +206 -0
  28. groknroll/core/rlm.py +446 -0
  29. groknroll/core/rlm_codebase.py +448 -0
  30. groknroll/core/rlm_integration.py +256 -0
  31. groknroll/core/types.py +276 -0
  32. groknroll/environments/__init__.py +34 -0
  33. groknroll/environments/base_env.py +182 -0
  34. groknroll/environments/constants.py +32 -0
  35. groknroll/environments/docker_repl.py +336 -0
  36. groknroll/environments/local_repl.py +388 -0
  37. groknroll/environments/modal_repl.py +502 -0
  38. groknroll/environments/prime_repl.py +588 -0
  39. groknroll/logger/__init__.py +4 -0
  40. groknroll/logger/rlm_logger.py +63 -0
  41. groknroll/logger/verbose.py +393 -0
  42. groknroll/operations/__init__.py +15 -0
  43. groknroll/operations/bash_ops.py +447 -0
  44. groknroll/operations/file_ops.py +473 -0
  45. groknroll/operations/git_ops.py +620 -0
  46. groknroll/oracle/__init__.py +11 -0
  47. groknroll/oracle/codebase_indexer.py +238 -0
  48. groknroll/oracle/oracle_agent.py +278 -0
  49. groknroll/setup.py +34 -0
  50. groknroll/storage/__init__.py +14 -0
  51. groknroll/storage/database.py +272 -0
  52. groknroll/storage/models.py +128 -0
  53. groknroll/utils/__init__.py +0 -0
  54. groknroll/utils/parsing.py +168 -0
  55. groknroll/utils/prompts.py +146 -0
  56. groknroll/utils/rlm_utils.py +19 -0
  57. groknroll-2.0.0.dist-info/METADATA +246 -0
  58. groknroll-2.0.0.dist-info/RECORD +62 -0
  59. groknroll-2.0.0.dist-info/WHEEL +5 -0
  60. groknroll-2.0.0.dist-info/entry_points.txt +3 -0
  61. groknroll-2.0.0.dist-info/licenses/LICENSE +21 -0
  62. groknroll-2.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,336 @@
1
+ """
2
+ Docker REPL environment that runs Python code in a Docker container.
3
+
4
+ Setup:
5
+ docker build -t rlm-sandbox -f Dockerfile.sandbox .
6
+
7
+ Or use any Python 3.11+ image with: pip install dill requests
8
+ """
9
+
10
+ import base64
11
+ import json
12
+ import os
13
+ import subprocess
14
+ import tempfile
15
+ import textwrap
16
+ import threading
17
+ import time
18
+ from http.server import BaseHTTPRequestHandler, HTTPServer
19
+
20
+ from groknroll.core.comms_utils import LMRequest, send_lm_request, send_lm_request_batched
21
+ from groknroll.core.types import REPLResult, RLMChatCompletion
22
+ from groknroll.environments.base_env import NonIsolatedEnv
23
+
24
+
25
+ class LLMProxyHandler(BaseHTTPRequestHandler):
26
+ """HTTP handler for LLM requests from the container."""
27
+
28
+ lm_handler_address: tuple[str, int] | None = None
29
+ pending_calls: list[RLMChatCompletion] = []
30
+ lock: threading.Lock = threading.Lock()
31
+ depth: int = 1
32
+
33
+ def log_message(self, *args):
34
+ pass
35
+
36
+ def do_POST(self):
37
+ body = json.loads(self.rfile.read(int(self.headers["Content-Length"])))
38
+
39
+ if self.path == "/llm_query":
40
+ result = self._handle_single(body)
41
+ elif self.path == "/llm_query_batched":
42
+ result = self._handle_batched(body)
43
+ else:
44
+ self._respond(404, {"error": "Not found"})
45
+ return
46
+
47
+ self._respond(200, result)
48
+
49
+ def _respond(self, status: int, data: dict):
50
+ self.send_response(status)
51
+ self.send_header("Content-Type", "application/json")
52
+ self.end_headers()
53
+ self.wfile.write(json.dumps(data).encode())
54
+
55
+ def _handle_single(self, body: dict) -> dict:
56
+ if not self.lm_handler_address:
57
+ return {"error": "No LM handler configured"}
58
+
59
+ request = LMRequest(prompt=body.get("prompt"), model=body.get("model"), depth=self.depth)
60
+ response = send_lm_request(self.lm_handler_address, request)
61
+
62
+ if not response.success:
63
+ return {"error": response.error}
64
+
65
+ with self.lock:
66
+ self.pending_calls.append(response.chat_completion)
67
+
68
+ return {"response": response.chat_completion.response}
69
+
70
+ def _handle_batched(self, body: dict) -> dict:
71
+ if not self.lm_handler_address:
72
+ return {"error": "No LM handler configured"}
73
+
74
+ prompts = body.get("prompts", [])
75
+ responses = send_lm_request_batched(
76
+ self.lm_handler_address, prompts, model=body.get("model"), depth=self.depth
77
+ )
78
+
79
+ results = []
80
+ for resp in responses:
81
+ if not resp.success:
82
+ results.append(f"Error: {resp.error}")
83
+ else:
84
+ with self.lock:
85
+ self.pending_calls.append(resp.chat_completion)
86
+ results.append(resp.chat_completion.response)
87
+
88
+ return {"responses": results}
89
+
90
+
91
+ def _build_exec_script(code: str, proxy_port: int, depth: int = 1) -> str:
92
+ """Build execution script for the container."""
93
+ code_b64 = base64.b64encode(code.encode()).decode()
94
+
95
+ return textwrap.dedent(
96
+ f'''
97
+ import sys, io, json, base64, traceback, os, requests
98
+ try:
99
+ import dill
100
+ except ImportError:
101
+ import pickle as dill
102
+
103
+ PROXY = "http://host.docker.internal:{proxy_port}"
104
+ STATE = "/workspace/state.dill"
105
+
106
+ def llm_query(prompt, model=None):
107
+ try:
108
+ r = requests.post(f"{{PROXY}}/llm_query", json={{"prompt": prompt, "model": model, "depth": {depth}}}, timeout=300)
109
+ d = r.json()
110
+ return d.get("response") or f"Error: {{d.get('error')}}"
111
+ except Exception as e:
112
+ return f"Error: {{e}}"
113
+
114
+ def llm_query_batched(prompts, model=None):
115
+ try:
116
+ r = requests.post(f"{{PROXY}}/llm_query_batched", json={{"prompts": prompts, "model": model, "depth": {depth}}}, timeout=300)
117
+ d = r.json()
118
+ return d.get("responses") or [f"Error: {{d.get('error')}}"] * len(prompts)
119
+ except Exception as e:
120
+ return [f"Error: {{e}}"] * len(prompts)
121
+
122
+ def load_state():
123
+ if os.path.exists(STATE):
124
+ try:
125
+ with open(STATE, "rb") as f:
126
+ return dill.load(f)
127
+ except:
128
+ pass
129
+ return {{}}
130
+
131
+ def save_state(s):
132
+ clean = {{k: v for k, v in s.items() if not k.startswith("_")}}
133
+ for k in list(clean.keys()):
134
+ try:
135
+ dill.dumps(clean[k])
136
+ except:
137
+ del clean[k]
138
+ with open(STATE, "wb") as f:
139
+ dill.dump(clean, f)
140
+
141
+ _locals = load_state()
142
+
143
+ def FINAL_VAR(name):
144
+ name = name.strip().strip("\\"\\'")
145
+ return str(_locals.get(name, f"Error: Variable '{{name}}' not found"))
146
+
147
+ _globals = {{"__builtins__": __builtins__, "__name__": "__main__", "llm_query": llm_query, "llm_query_batched": llm_query_batched, "FINAL_VAR": FINAL_VAR}}
148
+
149
+ code = base64.b64decode("{code_b64}").decode()
150
+ stdout_buf, stderr_buf = io.StringIO(), io.StringIO()
151
+ old_stdout, old_stderr = sys.stdout, sys.stderr
152
+
153
+ try:
154
+ sys.stdout, sys.stderr = stdout_buf, stderr_buf
155
+ combined = {{**_globals, **_locals}}
156
+ exec(code, combined, combined)
157
+ for k, v in combined.items():
158
+ if k not in _globals and not k.startswith("_"):
159
+ _locals[k] = v
160
+ except:
161
+ traceback.print_exc(file=stderr_buf)
162
+ finally:
163
+ sys.stdout, sys.stderr = old_stdout, old_stderr
164
+
165
+ save_state(_locals)
166
+ print(json.dumps({{"stdout": stdout_buf.getvalue(), "stderr": stderr_buf.getvalue(), "locals": {{k: repr(v) for k, v in _locals.items() if not k.startswith("_")}}}}, ensure_ascii=False))
167
+ '''
168
+ )
169
+
170
+
171
+ class DockerREPL(NonIsolatedEnv):
172
+ """
173
+ Docker REPL - runs Python in a Docker container with LLM support.
174
+
175
+ Requires: Docker with a Python 3.11+ image (default: python:3.11-slim).
176
+ """
177
+
178
+ def __init__(
179
+ self,
180
+ image: str = "python:3.11-slim",
181
+ lm_handler_address: tuple[str, int] | None = None,
182
+ context_payload: dict | list | str | None = None,
183
+ setup_code: str | None = None,
184
+ persistent: bool = False,
185
+ depth: int = 1,
186
+ **kwargs,
187
+ ):
188
+ if persistent:
189
+ raise NotImplementedError(
190
+ "Persistent REPLs are currently not supported for environment: DockerREPL"
191
+ )
192
+ super().__init__(persistent=persistent, depth=depth, **kwargs)
193
+
194
+ self.image = image
195
+ self.lm_handler_address = lm_handler_address
196
+ self.container_id: str | None = None
197
+ self.proxy_server: HTTPServer | None = None
198
+ self.proxy_thread: threading.Thread | None = None
199
+ self.proxy_port: int = 0
200
+ base_dir = os.environ.get(
201
+ "RLM_DOCKER_WORKSPACE_DIR", os.path.join(os.getcwd(), ".rlm_workspace")
202
+ )
203
+ os.makedirs(base_dir, exist_ok=True)
204
+ self.temp_dir = tempfile.mkdtemp(prefix="docker_repl_", dir=base_dir)
205
+ self.pending_calls: list[RLMChatCompletion] = []
206
+ self._calls_lock = threading.Lock()
207
+
208
+ self.setup()
209
+
210
+ if context_payload:
211
+ self.load_context(context_payload)
212
+ if setup_code:
213
+ self.execute_code(setup_code)
214
+
215
+ def setup(self):
216
+ """Start the proxy server and Docker container."""
217
+ # Start LLM proxy server
218
+ handler = type(
219
+ "Handler",
220
+ (LLMProxyHandler,),
221
+ {
222
+ "lm_handler_address": self.lm_handler_address,
223
+ "pending_calls": self.pending_calls,
224
+ "lock": self._calls_lock,
225
+ "depth": self.depth,
226
+ },
227
+ )
228
+ self.proxy_server = HTTPServer(("127.0.0.1", 0), handler)
229
+ self.proxy_port = self.proxy_server.server_address[1]
230
+ self.proxy_thread = threading.Thread(target=self.proxy_server.serve_forever, daemon=True)
231
+ self.proxy_thread.start()
232
+
233
+ # Start Docker container
234
+ result = subprocess.run(
235
+ [
236
+ "docker",
237
+ "run",
238
+ "-d",
239
+ "--rm",
240
+ "-v",
241
+ f"{self.temp_dir}:/workspace",
242
+ "--add-host",
243
+ "host.docker.internal:host-gateway",
244
+ self.image,
245
+ "tail",
246
+ "-f",
247
+ "/dev/null",
248
+ ],
249
+ capture_output=True,
250
+ text=True,
251
+ )
252
+ if result.returncode != 0:
253
+ raise RuntimeError(f"Failed to start container: {result.stderr}")
254
+
255
+ self.container_id = result.stdout.strip()
256
+
257
+ # Install dependencies
258
+ subprocess.run(
259
+ ["docker", "exec", self.container_id, "pip", "install", "-q", "dill", "requests"],
260
+ capture_output=True,
261
+ )
262
+
263
+ def load_context(self, context_payload: dict | list | str):
264
+ """Load context by writing to a file in the mounted workspace."""
265
+ if isinstance(context_payload, str):
266
+ context_path = os.path.join(self.temp_dir, "context.txt")
267
+ with open(context_path, "w") as f:
268
+ f.write(context_payload)
269
+ self.execute_code(
270
+ "with open('/workspace/context.txt', 'r') as f:\n context = f.read()"
271
+ )
272
+ else:
273
+ context_path = os.path.join(self.temp_dir, "context.json")
274
+ with open(context_path, "w") as f:
275
+ json.dump(context_payload, f)
276
+ self.execute_code(
277
+ "import json\nwith open('/workspace/context.json', 'r') as f:\n context = json.load(f)"
278
+ )
279
+
280
+ def execute_code(self, code: str) -> REPLResult:
281
+ start = time.perf_counter()
282
+
283
+ with self._calls_lock:
284
+ self.pending_calls.clear()
285
+
286
+ script = _build_exec_script(code, self.proxy_port, self.depth)
287
+ result = subprocess.run(
288
+ ["docker", "exec", self.container_id, "python", "-c", script],
289
+ capture_output=True,
290
+ text=True,
291
+ )
292
+
293
+ with self._calls_lock:
294
+ calls = self.pending_calls.copy()
295
+ self.pending_calls.clear()
296
+
297
+ try:
298
+ lines = result.stdout.strip().split("\n")
299
+ data = json.loads(lines[-1]) if lines else {}
300
+ return REPLResult(
301
+ stdout=data.get("stdout", ""),
302
+ stderr=data.get("stderr", "") + result.stderr,
303
+ locals=data.get("locals", {}),
304
+ execution_time=time.perf_counter() - start,
305
+ rlm_calls=calls,
306
+ )
307
+ except json.JSONDecodeError:
308
+ return REPLResult(
309
+ stdout=result.stdout,
310
+ stderr=result.stderr or "Parse error",
311
+ locals={},
312
+ execution_time=time.perf_counter() - start,
313
+ rlm_calls=calls,
314
+ )
315
+
316
+ def cleanup(self):
317
+ if hasattr(self, "container_id") and self.container_id:
318
+ subprocess.run(["docker", "stop", self.container_id], capture_output=True)
319
+ self.container_id = None
320
+ if hasattr(self, "proxy_server") and self.proxy_server:
321
+ self.proxy_server.shutdown()
322
+ self.proxy_server = None
323
+ if hasattr(self, "temp_dir") and os.path.exists(self.temp_dir):
324
+ import shutil
325
+
326
+ shutil.rmtree(self.temp_dir, ignore_errors=True)
327
+
328
+ def __enter__(self):
329
+ return self
330
+
331
+ def __exit__(self, *args):
332
+ self.cleanup()
333
+ return False
334
+
335
+ def __del__(self):
336
+ self.cleanup()