groknroll 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- groknroll/__init__.py +36 -0
- groknroll/__main__.py +9 -0
- groknroll/agents/__init__.py +18 -0
- groknroll/agents/agent_manager.py +187 -0
- groknroll/agents/base_agent.py +118 -0
- groknroll/agents/build_agent.py +231 -0
- groknroll/agents/plan_agent.py +215 -0
- groknroll/cli/__init__.py +7 -0
- groknroll/cli/enhanced_cli.py +372 -0
- groknroll/cli/large_codebase_cli.py +413 -0
- groknroll/cli/main.py +331 -0
- groknroll/cli/rlm_commands.py +258 -0
- groknroll/clients/__init__.py +63 -0
- groknroll/clients/anthropic.py +112 -0
- groknroll/clients/azure_openai.py +142 -0
- groknroll/clients/base_lm.py +33 -0
- groknroll/clients/gemini.py +162 -0
- groknroll/clients/litellm.py +105 -0
- groknroll/clients/openai.py +129 -0
- groknroll/clients/portkey.py +94 -0
- groknroll/core/__init__.py +9 -0
- groknroll/core/agent.py +339 -0
- groknroll/core/comms_utils.py +264 -0
- groknroll/core/context.py +251 -0
- groknroll/core/exceptions.py +181 -0
- groknroll/core/large_codebase.py +564 -0
- groknroll/core/lm_handler.py +206 -0
- groknroll/core/rlm.py +446 -0
- groknroll/core/rlm_codebase.py +448 -0
- groknroll/core/rlm_integration.py +256 -0
- groknroll/core/types.py +276 -0
- groknroll/environments/__init__.py +34 -0
- groknroll/environments/base_env.py +182 -0
- groknroll/environments/constants.py +32 -0
- groknroll/environments/docker_repl.py +336 -0
- groknroll/environments/local_repl.py +388 -0
- groknroll/environments/modal_repl.py +502 -0
- groknroll/environments/prime_repl.py +588 -0
- groknroll/logger/__init__.py +4 -0
- groknroll/logger/rlm_logger.py +63 -0
- groknroll/logger/verbose.py +393 -0
- groknroll/operations/__init__.py +15 -0
- groknroll/operations/bash_ops.py +447 -0
- groknroll/operations/file_ops.py +473 -0
- groknroll/operations/git_ops.py +620 -0
- groknroll/oracle/__init__.py +11 -0
- groknroll/oracle/codebase_indexer.py +238 -0
- groknroll/oracle/oracle_agent.py +278 -0
- groknroll/setup.py +34 -0
- groknroll/storage/__init__.py +14 -0
- groknroll/storage/database.py +272 -0
- groknroll/storage/models.py +128 -0
- groknroll/utils/__init__.py +0 -0
- groknroll/utils/parsing.py +168 -0
- groknroll/utils/prompts.py +146 -0
- groknroll/utils/rlm_utils.py +19 -0
- groknroll-2.0.0.dist-info/METADATA +246 -0
- groknroll-2.0.0.dist-info/RECORD +62 -0
- groknroll-2.0.0.dist-info/WHEEL +5 -0
- groknroll-2.0.0.dist-info/entry_points.txt +3 -0
- groknroll-2.0.0.dist-info/licenses/LICENSE +21 -0
- groknroll-2.0.0.dist-info/top_level.txt +1 -0
groknroll/core/types.py
ADDED
|
@@ -0,0 +1,276 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from types import ModuleType
|
|
3
|
+
from typing import Any, Literal
|
|
4
|
+
|
|
5
|
+
ClientBackend = Literal[
|
|
6
|
+
"openai",
|
|
7
|
+
"portkey",
|
|
8
|
+
"openrouter",
|
|
9
|
+
"vercel",
|
|
10
|
+
"vllm",
|
|
11
|
+
"litellm",
|
|
12
|
+
"anthropic",
|
|
13
|
+
"azure_openai",
|
|
14
|
+
"gemini",
|
|
15
|
+
]
|
|
16
|
+
EnvironmentType = Literal["local", "docker", "modal", "prime"]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _serialize_value(value: Any) -> Any:
|
|
20
|
+
"""Convert a value to a JSON-serializable representation."""
|
|
21
|
+
if value is None or isinstance(value, (bool, int, float, str)):
|
|
22
|
+
return value
|
|
23
|
+
if isinstance(value, ModuleType):
|
|
24
|
+
return f"<module '{value.__name__}'>"
|
|
25
|
+
if isinstance(value, (list, tuple)):
|
|
26
|
+
return [_serialize_value(v) for v in value]
|
|
27
|
+
if isinstance(value, dict):
|
|
28
|
+
return {str(k): _serialize_value(v) for k, v in value.items()}
|
|
29
|
+
if callable(value):
|
|
30
|
+
return f"<{type(value).__name__} '{getattr(value, '__name__', repr(value))}'>"
|
|
31
|
+
# Try to convert to string for other types
|
|
32
|
+
try:
|
|
33
|
+
return repr(value)
|
|
34
|
+
except Exception:
|
|
35
|
+
return f"<{type(value).__name__}>"
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
########################################################
|
|
39
|
+
######## Types for LM Cost Tracking #########
|
|
40
|
+
########################################################
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@dataclass
|
|
44
|
+
class ModelUsageSummary:
|
|
45
|
+
total_calls: int
|
|
46
|
+
total_input_tokens: int
|
|
47
|
+
total_output_tokens: int
|
|
48
|
+
|
|
49
|
+
def to_dict(self):
|
|
50
|
+
return {
|
|
51
|
+
"total_calls": self.total_calls,
|
|
52
|
+
"total_input_tokens": self.total_input_tokens,
|
|
53
|
+
"total_output_tokens": self.total_output_tokens,
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
@classmethod
|
|
57
|
+
def from_dict(cls, data: dict) -> "ModelUsageSummary":
|
|
58
|
+
return cls(
|
|
59
|
+
total_calls=data.get("total_calls"),
|
|
60
|
+
total_input_tokens=data.get("total_input_tokens"),
|
|
61
|
+
total_output_tokens=data.get("total_output_tokens"),
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
@dataclass
|
|
66
|
+
class UsageSummary:
|
|
67
|
+
model_usage_summaries: dict[str, ModelUsageSummary]
|
|
68
|
+
|
|
69
|
+
@property
|
|
70
|
+
def total_cost(self) -> float:
|
|
71
|
+
"""Calculate total cost across all models.
|
|
72
|
+
|
|
73
|
+
Note: This is a placeholder that returns 0.0 until cost calculation
|
|
74
|
+
is fully implemented with model-specific pricing data.
|
|
75
|
+
"""
|
|
76
|
+
# TODO: Implement actual cost calculation using model pricing
|
|
77
|
+
# For now, return 0 to allow cost limiting infrastructure to work
|
|
78
|
+
return 0.0
|
|
79
|
+
|
|
80
|
+
def to_dict(self):
|
|
81
|
+
return {
|
|
82
|
+
"model_usage_summaries": {
|
|
83
|
+
model: usage_summary.to_dict()
|
|
84
|
+
for model, usage_summary in self.model_usage_summaries.items()
|
|
85
|
+
},
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
@classmethod
|
|
89
|
+
def from_dict(cls, data: dict) -> "UsageSummary":
|
|
90
|
+
return cls(
|
|
91
|
+
model_usage_summaries={
|
|
92
|
+
model: ModelUsageSummary.from_dict(usage_summary)
|
|
93
|
+
for model, usage_summary in data.get("model_usage_summaries", {}).items()
|
|
94
|
+
},
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
########################################################
|
|
99
|
+
######## Types for REPL and RLM Iterations #########
|
|
100
|
+
########################################################
|
|
101
|
+
@dataclass
|
|
102
|
+
class RLMChatCompletion:
|
|
103
|
+
"""Record of a single LLM call made from within the environment."""
|
|
104
|
+
|
|
105
|
+
root_model: str
|
|
106
|
+
prompt: str | dict[str, Any]
|
|
107
|
+
response: str
|
|
108
|
+
usage_summary: UsageSummary
|
|
109
|
+
execution_time: float
|
|
110
|
+
|
|
111
|
+
def to_dict(self):
|
|
112
|
+
return {
|
|
113
|
+
"root_model": self.root_model,
|
|
114
|
+
"prompt": self.prompt,
|
|
115
|
+
"response": self.response,
|
|
116
|
+
"usage_summary": self.usage_summary.to_dict(),
|
|
117
|
+
"execution_time": self.execution_time,
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
@classmethod
|
|
121
|
+
def from_dict(cls, data: dict) -> "RLMChatCompletion":
|
|
122
|
+
return cls(
|
|
123
|
+
root_model=data.get("root_model"),
|
|
124
|
+
prompt=data.get("prompt"),
|
|
125
|
+
response=data.get("response"),
|
|
126
|
+
usage_summary=UsageSummary.from_dict(data.get("usage_summary")),
|
|
127
|
+
execution_time=data.get("execution_time"),
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
@dataclass
|
|
132
|
+
class REPLResult:
|
|
133
|
+
stdout: str
|
|
134
|
+
stderr: str
|
|
135
|
+
locals: dict
|
|
136
|
+
execution_time: float
|
|
137
|
+
llm_calls: list["RLMChatCompletion"]
|
|
138
|
+
|
|
139
|
+
def __init__(
|
|
140
|
+
self,
|
|
141
|
+
stdout: str,
|
|
142
|
+
stderr: str,
|
|
143
|
+
locals: dict,
|
|
144
|
+
execution_time: float = None,
|
|
145
|
+
rlm_calls: list["RLMChatCompletion"] = None,
|
|
146
|
+
):
|
|
147
|
+
self.stdout = stdout
|
|
148
|
+
self.stderr = stderr
|
|
149
|
+
self.locals = locals
|
|
150
|
+
self.execution_time = execution_time
|
|
151
|
+
self.rlm_calls = rlm_calls or []
|
|
152
|
+
|
|
153
|
+
def __str__(self):
|
|
154
|
+
return f"REPLResult(stdout={self.stdout}, stderr={self.stderr}, locals={self.locals}, execution_time={self.execution_time}, rlm_calls={len(self.rlm_calls)})"
|
|
155
|
+
|
|
156
|
+
def to_dict(self):
|
|
157
|
+
return {
|
|
158
|
+
"stdout": self.stdout,
|
|
159
|
+
"stderr": self.stderr,
|
|
160
|
+
"locals": {k: _serialize_value(v) for k, v in self.locals.items()},
|
|
161
|
+
"execution_time": self.execution_time,
|
|
162
|
+
"rlm_calls": [call.to_dict() for call in self.rlm_calls],
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
@dataclass
|
|
167
|
+
class CodeBlock:
|
|
168
|
+
code: str
|
|
169
|
+
result: REPLResult
|
|
170
|
+
|
|
171
|
+
def to_dict(self):
|
|
172
|
+
return {"code": self.code, "result": self.result.to_dict()}
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
@dataclass
|
|
176
|
+
class RLMIteration:
|
|
177
|
+
prompt: str | dict[str, Any]
|
|
178
|
+
response: str
|
|
179
|
+
code_blocks: list[CodeBlock]
|
|
180
|
+
final_answer: str | None = None
|
|
181
|
+
iteration_time: float | None = None
|
|
182
|
+
|
|
183
|
+
def to_dict(self):
|
|
184
|
+
return {
|
|
185
|
+
"prompt": self.prompt,
|
|
186
|
+
"response": self.response,
|
|
187
|
+
"code_blocks": [code_block.to_dict() for code_block in self.code_blocks],
|
|
188
|
+
"final_answer": self.final_answer,
|
|
189
|
+
"iteration_time": self.iteration_time,
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
########################################################
|
|
194
|
+
######## Types for RLM Metadata #########
|
|
195
|
+
########################################################
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
@dataclass
|
|
199
|
+
class RLMMetadata:
|
|
200
|
+
"""Metadata about the RLM configuration."""
|
|
201
|
+
|
|
202
|
+
root_model: str
|
|
203
|
+
max_depth: int
|
|
204
|
+
max_iterations: int
|
|
205
|
+
backend: str
|
|
206
|
+
backend_kwargs: dict[str, Any]
|
|
207
|
+
environment_type: str
|
|
208
|
+
environment_kwargs: dict[str, Any]
|
|
209
|
+
other_backends: list[str] | None = None
|
|
210
|
+
|
|
211
|
+
def to_dict(self):
|
|
212
|
+
return {
|
|
213
|
+
"root_model": self.root_model,
|
|
214
|
+
"max_depth": self.max_depth,
|
|
215
|
+
"max_iterations": self.max_iterations,
|
|
216
|
+
"backend": self.backend,
|
|
217
|
+
"backend_kwargs": {k: _serialize_value(v) for k, v in self.backend_kwargs.items()},
|
|
218
|
+
"environment_type": self.environment_type,
|
|
219
|
+
"environment_kwargs": {
|
|
220
|
+
k: _serialize_value(v) for k, v in self.environment_kwargs.items()
|
|
221
|
+
},
|
|
222
|
+
"other_backends": self.other_backends,
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
########################################################
|
|
227
|
+
######## Types for RLM Prompting #########
|
|
228
|
+
########################################################
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
@dataclass
|
|
232
|
+
class QueryMetadata:
|
|
233
|
+
context_lengths: list[int]
|
|
234
|
+
context_total_length: int
|
|
235
|
+
context_type: str
|
|
236
|
+
|
|
237
|
+
def __init__(self, prompt: str | list[str] | dict[Any, Any] | list[dict[Any, Any]]):
|
|
238
|
+
if isinstance(prompt, str):
|
|
239
|
+
self.context_lengths = [len(prompt)]
|
|
240
|
+
self.context_type = "str"
|
|
241
|
+
elif isinstance(prompt, dict):
|
|
242
|
+
self.context_type = "dict"
|
|
243
|
+
self.context_lengths = []
|
|
244
|
+
for chunk in prompt.values():
|
|
245
|
+
if isinstance(chunk, str):
|
|
246
|
+
self.context_lengths.append(len(chunk))
|
|
247
|
+
continue
|
|
248
|
+
try:
|
|
249
|
+
import json
|
|
250
|
+
|
|
251
|
+
self.context_lengths.append(len(json.dumps(chunk, default=str)))
|
|
252
|
+
except Exception:
|
|
253
|
+
self.context_lengths.append(len(repr(chunk)))
|
|
254
|
+
self.context_type = "dict"
|
|
255
|
+
elif isinstance(prompt, list):
|
|
256
|
+
self.context_type = "list"
|
|
257
|
+
if len(prompt) == 0:
|
|
258
|
+
self.context_lengths = [0]
|
|
259
|
+
elif isinstance(prompt[0], dict):
|
|
260
|
+
if "content" in prompt[0]:
|
|
261
|
+
self.context_lengths = [len(str(chunk.get("content", ""))) for chunk in prompt]
|
|
262
|
+
else:
|
|
263
|
+
self.context_lengths = []
|
|
264
|
+
for chunk in prompt:
|
|
265
|
+
try:
|
|
266
|
+
import json
|
|
267
|
+
|
|
268
|
+
self.context_lengths.append(len(json.dumps(chunk, default=str)))
|
|
269
|
+
except Exception:
|
|
270
|
+
self.context_lengths.append(len(repr(chunk)))
|
|
271
|
+
else:
|
|
272
|
+
self.context_lengths = [len(chunk) for chunk in prompt]
|
|
273
|
+
else:
|
|
274
|
+
raise ValueError(f"Invalid prompt type: {type(prompt)}")
|
|
275
|
+
|
|
276
|
+
self.context_total_length = sum(self.context_lengths)
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
from typing import Any, Literal
|
|
2
|
+
|
|
3
|
+
from groknroll.environments.base_env import BaseEnv, SupportsPersistence
|
|
4
|
+
from groknroll.environments.local_repl import LocalREPL
|
|
5
|
+
|
|
6
|
+
__all__ = ["BaseEnv", "LocalREPL", "SupportsPersistence", "get_environment"]
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def get_environment(
|
|
10
|
+
environment: Literal["local", "modal", "docker", "prime"],
|
|
11
|
+
environment_kwargs: dict[str, Any],
|
|
12
|
+
) -> BaseEnv:
|
|
13
|
+
"""
|
|
14
|
+
Routes a specific environment and the args (as a dict) to the appropriate environment if supported.
|
|
15
|
+
Currently supported environments: ['local', 'modal', 'docker', 'prime']
|
|
16
|
+
"""
|
|
17
|
+
if environment == "local":
|
|
18
|
+
return LocalREPL(**environment_kwargs)
|
|
19
|
+
elif environment == "modal":
|
|
20
|
+
from groknroll.environments.modal_repl import ModalREPL
|
|
21
|
+
|
|
22
|
+
return ModalREPL(**environment_kwargs)
|
|
23
|
+
elif environment == "docker":
|
|
24
|
+
from groknroll.environments.docker_repl import DockerREPL
|
|
25
|
+
|
|
26
|
+
return DockerREPL(**environment_kwargs)
|
|
27
|
+
elif environment == "prime":
|
|
28
|
+
from groknroll.environments.prime_repl import PrimeREPL
|
|
29
|
+
|
|
30
|
+
return PrimeREPL(**environment_kwargs)
|
|
31
|
+
else:
|
|
32
|
+
raise ValueError(
|
|
33
|
+
f"Unknown environment: {environment}. Supported: ['local', 'modal', 'docker', 'prime']"
|
|
34
|
+
)
|
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from typing import Any, Protocol, runtime_checkable
|
|
3
|
+
|
|
4
|
+
from groknroll.core.types import REPLResult
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class BaseEnv(ABC):
|
|
8
|
+
"""
|
|
9
|
+
Base REPL-like environment that the RLM uses to interact with. The primary types are isolated and non-isolated,
|
|
10
|
+
where isolated environments are on a separate machine from the LM.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(self, persistent: bool = False, depth: int = 1, **kwargs):
|
|
14
|
+
self.persistent = persistent
|
|
15
|
+
self.depth = depth
|
|
16
|
+
self.kwargs = kwargs
|
|
17
|
+
|
|
18
|
+
@abstractmethod
|
|
19
|
+
def setup(self):
|
|
20
|
+
raise NotImplementedError
|
|
21
|
+
|
|
22
|
+
@abstractmethod
|
|
23
|
+
def load_context(self, context_payload: dict | list | str):
|
|
24
|
+
raise NotImplementedError
|
|
25
|
+
|
|
26
|
+
@abstractmethod
|
|
27
|
+
def execute_code(self, code: str) -> REPLResult:
|
|
28
|
+
raise NotImplementedError
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class IsolatedEnv(BaseEnv, ABC):
|
|
32
|
+
"""
|
|
33
|
+
These environments (e.g. Prime Envs, Modal Envs) sit on a completely separate machine from the LM,
|
|
34
|
+
guaranteeing complete isolation from the LM process.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
def __init__(self, persistent: bool = False, **kwargs):
|
|
38
|
+
super().__init__(persistent=persistent, **kwargs)
|
|
39
|
+
|
|
40
|
+
@abstractmethod
|
|
41
|
+
def setup(self):
|
|
42
|
+
raise NotImplementedError
|
|
43
|
+
|
|
44
|
+
@abstractmethod
|
|
45
|
+
def load_context(self, context_payload: dict | list | str):
|
|
46
|
+
raise NotImplementedError
|
|
47
|
+
|
|
48
|
+
@abstractmethod
|
|
49
|
+
def execute_code(self, code: str) -> REPLResult:
|
|
50
|
+
raise NotImplementedError
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class NonIsolatedEnv(BaseEnv, ABC):
|
|
54
|
+
"""
|
|
55
|
+
These environments run on the same machine as the LM, and provide different levels of isolation
|
|
56
|
+
depending on the choice of environment. The simplest, default is a local Python REPL that runs
|
|
57
|
+
as a subprocess.
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
def __init__(self, persistent: bool = False, **kwargs):
|
|
61
|
+
super().__init__(persistent=persistent, **kwargs)
|
|
62
|
+
|
|
63
|
+
@abstractmethod
|
|
64
|
+
def setup(self):
|
|
65
|
+
raise NotImplementedError
|
|
66
|
+
|
|
67
|
+
@abstractmethod
|
|
68
|
+
def load_context(self, context_payload: dict | list | str):
|
|
69
|
+
raise NotImplementedError
|
|
70
|
+
|
|
71
|
+
@abstractmethod
|
|
72
|
+
def execute_code(self, code: str) -> REPLResult:
|
|
73
|
+
raise NotImplementedError
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
@runtime_checkable
|
|
77
|
+
class SupportsPersistence(Protocol):
|
|
78
|
+
"""Protocol for environments that support persistent multi-turn sessions.
|
|
79
|
+
|
|
80
|
+
CHECKING SUPPORT:
|
|
81
|
+
Use isinstance(env, SupportsPersistence) to check if an environment
|
|
82
|
+
supports persistence capabilities.
|
|
83
|
+
|
|
84
|
+
IMPLEMENTING THIS PROTOCOL:
|
|
85
|
+
To add persistence to your environment, implement these 5 methods.
|
|
86
|
+
See tests/test_local_repl_persistent.py for expected behavior.
|
|
87
|
+
|
|
88
|
+
VERSIONING BEHAVIOR:
|
|
89
|
+
Contexts and histories are versioned with numeric suffixes:
|
|
90
|
+
- First context -> context_0, context_1, context_2, ...
|
|
91
|
+
- First history -> history_0, history_1, history_2, ...
|
|
92
|
+
|
|
93
|
+
ALIASING BEHAVIOR:
|
|
94
|
+
The unversioned names always point to index 0:
|
|
95
|
+
- context -> context_0 (first context)
|
|
96
|
+
- history -> history_0 (first history)
|
|
97
|
+
|
|
98
|
+
EXAMPLE IMPLEMENTATION:
|
|
99
|
+
See rlm/environments/local_repl.py for a complete reference.
|
|
100
|
+
|
|
101
|
+
TESTS:
|
|
102
|
+
- Unit tests: tests/test_local_repl_persistent.py
|
|
103
|
+
- Integration tests: tests/test_multi_turn_integration.py
|
|
104
|
+
|
|
105
|
+
Run: uv run pytest tests/test_local_repl_persistent.py -v
|
|
106
|
+
"""
|
|
107
|
+
|
|
108
|
+
def update_handler_address(self, address: tuple[str, int]) -> None:
|
|
109
|
+
"""Update the LM handler address for nested LLM calls.
|
|
110
|
+
|
|
111
|
+
Called by RLM when the handler address changes between completions.
|
|
112
|
+
Store the address so llm_query() calls from executed code can reach
|
|
113
|
+
the LM handler.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
address: (host, port) tuple for the LM handler server.
|
|
117
|
+
"""
|
|
118
|
+
...
|
|
119
|
+
|
|
120
|
+
def add_context(
|
|
121
|
+
self, context_payload: dict | list | str, context_index: int | None = None
|
|
122
|
+
) -> int:
|
|
123
|
+
"""Add a context payload, making it available as context_N in code.
|
|
124
|
+
|
|
125
|
+
Versioning:
|
|
126
|
+
- context_index=None: auto-increment (0, 1, 2, ...)
|
|
127
|
+
- context_index=N: use specific index N
|
|
128
|
+
|
|
129
|
+
Storage:
|
|
130
|
+
Must store so executed code can access:
|
|
131
|
+
- context_0, context_1, etc. (versioned)
|
|
132
|
+
- context (alias to context_0)
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
context_payload: The context data (string, dict, or list).
|
|
136
|
+
context_index: Optional specific index, or None to auto-increment.
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
The index used (for auto-increment, returns the assigned index).
|
|
140
|
+
"""
|
|
141
|
+
...
|
|
142
|
+
|
|
143
|
+
def get_context_count(self) -> int:
|
|
144
|
+
"""Return the number of contexts added so far.
|
|
145
|
+
|
|
146
|
+
Used by RLM to inform the model how many contexts are available.
|
|
147
|
+
"""
|
|
148
|
+
...
|
|
149
|
+
|
|
150
|
+
def add_history(
|
|
151
|
+
self, message_history: list[dict[str, Any]], history_index: int | None = None
|
|
152
|
+
) -> int:
|
|
153
|
+
"""Add a message history, making it available as history_N in code.
|
|
154
|
+
|
|
155
|
+
Versioning:
|
|
156
|
+
- history_index=None: auto-increment (0, 1, 2, ...)
|
|
157
|
+
- history_index=N: use specific index N
|
|
158
|
+
|
|
159
|
+
Storage:
|
|
160
|
+
Must store so executed code can access:
|
|
161
|
+
- history_0, history_1, etc. (versioned)
|
|
162
|
+
- history (alias to history_0)
|
|
163
|
+
|
|
164
|
+
IMPORTANT: Store a deep copy, not a reference. The caller may
|
|
165
|
+
modify the list after calling this method.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
message_history: List of message dicts (role, content).
|
|
169
|
+
history_index: Optional specific index, or None to auto-increment.
|
|
170
|
+
|
|
171
|
+
Returns:
|
|
172
|
+
The index used.
|
|
173
|
+
"""
|
|
174
|
+
...
|
|
175
|
+
|
|
176
|
+
def get_history_count(self) -> int:
|
|
177
|
+
"""Return the number of histories added so far.
|
|
178
|
+
|
|
179
|
+
Used by RLM to inform the model how many conversation histories
|
|
180
|
+
are available.
|
|
181
|
+
"""
|
|
182
|
+
...
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# Default packages for isolated REPL environments (Modal, Prime, etc.)
|
|
2
|
+
|
|
3
|
+
APT_PACKAGES = [
|
|
4
|
+
"build-essential",
|
|
5
|
+
"git",
|
|
6
|
+
"curl",
|
|
7
|
+
"wget",
|
|
8
|
+
"libopenblas-dev",
|
|
9
|
+
"liblapack-dev",
|
|
10
|
+
]
|
|
11
|
+
|
|
12
|
+
PIP_PACKAGES = [
|
|
13
|
+
# Data science essentials
|
|
14
|
+
"numpy>=1.26.0",
|
|
15
|
+
"pandas>=2.1.0",
|
|
16
|
+
"scipy>=1.11.0",
|
|
17
|
+
# Math & symbolic computation
|
|
18
|
+
"sympy>=1.12",
|
|
19
|
+
# HTTP & APIs
|
|
20
|
+
"requests>=2.31.0",
|
|
21
|
+
"httpx>=0.25.0",
|
|
22
|
+
"flask>=3.0.0",
|
|
23
|
+
# Data formats
|
|
24
|
+
"pyyaml>=6.0",
|
|
25
|
+
"toml>=0.10.2",
|
|
26
|
+
# Utilities
|
|
27
|
+
"tqdm>=4.66.0",
|
|
28
|
+
"python-dateutil>=2.8.2",
|
|
29
|
+
"regex>=2023.0.0",
|
|
30
|
+
# For state serialization
|
|
31
|
+
"dill>=0.3.7",
|
|
32
|
+
]
|