letta-nightly 0.6.48.dev20250406104033__py3-none-any.whl → 0.6.49.dev20250408030511__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-nightly might be problematic. Click here for more details.
- letta/__init__.py +1 -1
- letta/agent.py +47 -12
- letta/agents/base_agent.py +7 -4
- letta/agents/helpers.py +52 -0
- letta/agents/letta_agent.py +105 -42
- letta/agents/voice_agent.py +2 -2
- letta/constants.py +13 -1
- letta/errors.py +10 -3
- letta/functions/function_sets/base.py +65 -0
- letta/functions/interface.py +2 -2
- letta/functions/mcp_client/base_client.py +18 -1
- letta/{dynamic_multi_agent.py → groups/dynamic_multi_agent.py} +3 -0
- letta/groups/helpers.py +113 -0
- letta/{round_robin_multi_agent.py → groups/round_robin_multi_agent.py} +2 -0
- letta/groups/sleeptime_multi_agent.py +259 -0
- letta/{supervisor_multi_agent.py → groups/supervisor_multi_agent.py} +1 -0
- letta/helpers/converters.py +109 -7
- letta/helpers/message_helper.py +1 -0
- letta/helpers/tool_rule_solver.py +40 -23
- letta/interface.py +12 -5
- letta/interfaces/anthropic_streaming_interface.py +329 -0
- letta/llm_api/anthropic.py +12 -1
- letta/llm_api/anthropic_client.py +65 -14
- letta/llm_api/azure_openai.py +2 -2
- letta/llm_api/google_ai_client.py +13 -2
- letta/llm_api/google_constants.py +3 -0
- letta/llm_api/google_vertex_client.py +2 -2
- letta/llm_api/llm_api_tools.py +1 -1
- letta/llm_api/llm_client.py +7 -0
- letta/llm_api/llm_client_base.py +2 -7
- letta/llm_api/openai.py +7 -1
- letta/llm_api/openai_client.py +250 -0
- letta/orm/__init__.py +4 -0
- letta/orm/agent.py +6 -0
- letta/orm/block.py +32 -2
- letta/orm/block_history.py +46 -0
- letta/orm/custom_columns.py +60 -0
- letta/orm/enums.py +7 -0
- letta/orm/group.py +6 -0
- letta/orm/groups_blocks.py +13 -0
- letta/orm/llm_batch_items.py +55 -0
- letta/orm/llm_batch_job.py +48 -0
- letta/orm/message.py +7 -1
- letta/orm/organization.py +2 -0
- letta/orm/sqlalchemy_base.py +18 -15
- letta/prompts/system/memgpt_sleeptime_chat.txt +52 -0
- letta/prompts/system/sleeptime.txt +26 -0
- letta/schemas/agent.py +13 -1
- letta/schemas/enums.py +17 -2
- letta/schemas/group.py +14 -1
- letta/schemas/letta_message.py +5 -3
- letta/schemas/llm_batch_job.py +53 -0
- letta/schemas/llm_config.py +14 -4
- letta/schemas/message.py +44 -0
- letta/schemas/tool.py +3 -0
- letta/schemas/usage.py +1 -0
- letta/server/db.py +2 -0
- letta/server/rest_api/app.py +1 -1
- letta/server/rest_api/chat_completions_interface.py +8 -3
- letta/server/rest_api/interface.py +36 -7
- letta/server/rest_api/routers/v1/agents.py +53 -39
- letta/server/rest_api/routers/v1/runs.py +14 -2
- letta/server/rest_api/utils.py +15 -4
- letta/server/server.py +120 -71
- letta/services/agent_manager.py +70 -6
- letta/services/block_manager.py +190 -2
- letta/services/group_manager.py +68 -0
- letta/services/helpers/agent_manager_helper.py +6 -4
- letta/services/llm_batch_manager.py +139 -0
- letta/services/message_manager.py +17 -31
- letta/services/tool_executor/tool_execution_sandbox.py +1 -3
- letta/services/tool_executor/tool_executor.py +9 -20
- letta/services/tool_manager.py +14 -3
- letta/services/tool_sandbox/__init__.py +0 -0
- letta/services/tool_sandbox/base.py +188 -0
- letta/services/tool_sandbox/e2b_sandbox.py +116 -0
- letta/services/tool_sandbox/local_sandbox.py +221 -0
- letta/sleeptime_agent.py +61 -0
- letta/streaming_interface.py +20 -10
- letta/utils.py +4 -0
- {letta_nightly-0.6.48.dev20250406104033.dist-info → letta_nightly-0.6.49.dev20250408030511.dist-info}/METADATA +2 -2
- {letta_nightly-0.6.48.dev20250406104033.dist-info → letta_nightly-0.6.49.dev20250408030511.dist-info}/RECORD +85 -69
- letta/offline_memory_agent.py +0 -173
- letta/services/tool_executor/async_tool_execution_sandbox.py +0 -397
- {letta_nightly-0.6.48.dev20250406104033.dist-info → letta_nightly-0.6.49.dev20250408030511.dist-info}/LICENSE +0 -0
- {letta_nightly-0.6.48.dev20250406104033.dist-info → letta_nightly-0.6.49.dev20250408030511.dist-info}/WHEEL +0 -0
- {letta_nightly-0.6.48.dev20250406104033.dist-info → letta_nightly-0.6.49.dev20250408030511.dist-info}/entry_points.txt +0 -0
|
@@ -1,397 +0,0 @@
|
|
|
1
|
-
import ast
|
|
2
|
-
import asyncio
|
|
3
|
-
import base64
|
|
4
|
-
import os
|
|
5
|
-
import pickle
|
|
6
|
-
import sys
|
|
7
|
-
import tempfile
|
|
8
|
-
import uuid
|
|
9
|
-
from typing import Any, Dict, Optional, Tuple
|
|
10
|
-
|
|
11
|
-
from letta.functions.helpers import generate_model_from_args_json_schema
|
|
12
|
-
from letta.schemas.agent import AgentState
|
|
13
|
-
from letta.schemas.sandbox_config import SandboxRunResult, SandboxType
|
|
14
|
-
from letta.services.helpers.tool_execution_helper import (
|
|
15
|
-
add_imports_and_pydantic_schemas_for_args,
|
|
16
|
-
create_venv_for_local_sandbox,
|
|
17
|
-
find_python_executable,
|
|
18
|
-
install_pip_requirements_for_sandbox,
|
|
19
|
-
)
|
|
20
|
-
from letta.services.organization_manager import OrganizationManager
|
|
21
|
-
from letta.services.sandbox_config_manager import SandboxConfigManager
|
|
22
|
-
from letta.services.tool_manager import ToolManager
|
|
23
|
-
from letta.tracing import log_event, trace_method
|
|
24
|
-
from letta.utils import get_friendly_error_msg
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
class AsyncToolExecutionSandbox:
|
|
28
|
-
METADATA_CONFIG_STATE_KEY = "config_state"
|
|
29
|
-
REQUIREMENT_TXT_NAME = "requirements.txt"
|
|
30
|
-
|
|
31
|
-
# For generating long, random marker hashes
|
|
32
|
-
NAMESPACE = uuid.NAMESPACE_DNS
|
|
33
|
-
LOCAL_SANDBOX_RESULT_START_MARKER = str(uuid.uuid5(NAMESPACE, "local-sandbox-result-start-marker"))
|
|
34
|
-
LOCAL_SANDBOX_RESULT_END_MARKER = str(uuid.uuid5(NAMESPACE, "local-sandbox-result-end-marker"))
|
|
35
|
-
|
|
36
|
-
# This is the variable name in the auto-generated code that contains the function results
|
|
37
|
-
# We make this a long random string to avoid collisions with any variables in the user's code
|
|
38
|
-
LOCAL_SANDBOX_RESULT_VAR_NAME = "result_ZQqiequkcFwRwwGQMqkt"
|
|
39
|
-
|
|
40
|
-
def __init__(self, tool_name: str, args: dict, user, force_recreate=True, force_recreate_venv=False, tool_object=None):
|
|
41
|
-
self.tool_name = tool_name
|
|
42
|
-
self.args = args
|
|
43
|
-
self.user = user
|
|
44
|
-
# get organization
|
|
45
|
-
self.organization = OrganizationManager().get_organization_by_id(self.user.organization_id)
|
|
46
|
-
self.privileged_tools = self.organization.privileged_tools
|
|
47
|
-
|
|
48
|
-
# If a tool object is provided, we use it directly, otherwise pull via name
|
|
49
|
-
if tool_object is not None:
|
|
50
|
-
self.tool = tool_object
|
|
51
|
-
else:
|
|
52
|
-
# Get the tool via name
|
|
53
|
-
self.tool = ToolManager().get_tool_by_name(tool_name=tool_name, actor=self.user)
|
|
54
|
-
if not self.tool:
|
|
55
|
-
raise ValueError(
|
|
56
|
-
f"Agent attempted to invoke tool {self.tool_name} that does not exist for organization {self.user.organization_id}"
|
|
57
|
-
)
|
|
58
|
-
|
|
59
|
-
self.sandbox_config_manager = SandboxConfigManager()
|
|
60
|
-
self.force_recreate = force_recreate
|
|
61
|
-
self.force_recreate_venv = force_recreate_venv
|
|
62
|
-
|
|
63
|
-
async def run(
|
|
64
|
-
self, agent_state: Optional[AgentState] = None, additional_env_vars: Optional[Dict] = None, inject_agent_state: bool = False
|
|
65
|
-
) -> SandboxRunResult:
|
|
66
|
-
"""
|
|
67
|
-
Run the tool in a sandbox environment asynchronously,
|
|
68
|
-
*always* using a subprocess for execution.
|
|
69
|
-
"""
|
|
70
|
-
result = await self.run_local_dir_sandbox(
|
|
71
|
-
agent_state=agent_state, additional_env_vars=additional_env_vars, inject_agent_state=inject_agent_state
|
|
72
|
-
)
|
|
73
|
-
|
|
74
|
-
# Simple console logging for demonstration
|
|
75
|
-
for log_line in (result.stdout or []) + (result.stderr or []):
|
|
76
|
-
print(f"Tool execution log: {log_line}")
|
|
77
|
-
|
|
78
|
-
return result
|
|
79
|
-
|
|
80
|
-
@trace_method
|
|
81
|
-
async def run_local_dir_sandbox(
|
|
82
|
-
self, agent_state: Optional[AgentState], additional_env_vars: Optional[Dict], inject_agent_state: bool
|
|
83
|
-
) -> SandboxRunResult:
|
|
84
|
-
"""
|
|
85
|
-
Unified asynchronougit pus method to run the tool in a local sandbox environment,
|
|
86
|
-
always via subprocess for multi-core parallelism.
|
|
87
|
-
"""
|
|
88
|
-
# Get sandbox configuration
|
|
89
|
-
sbx_config = self.sandbox_config_manager.get_or_create_default_sandbox_config(sandbox_type=SandboxType.LOCAL, actor=self.user)
|
|
90
|
-
local_configs = sbx_config.get_local_config()
|
|
91
|
-
use_venv = local_configs.use_venv
|
|
92
|
-
|
|
93
|
-
# Prepare environment variables
|
|
94
|
-
env = os.environ.copy()
|
|
95
|
-
env_vars = self.sandbox_config_manager.get_sandbox_env_vars_as_dict(sandbox_config_id=sbx_config.id, actor=self.user, limit=100)
|
|
96
|
-
env.update(env_vars)
|
|
97
|
-
|
|
98
|
-
if agent_state:
|
|
99
|
-
env.update(agent_state.get_agent_env_vars_as_dict())
|
|
100
|
-
|
|
101
|
-
if additional_env_vars:
|
|
102
|
-
env.update(additional_env_vars)
|
|
103
|
-
|
|
104
|
-
# Make sure sandbox directory exists
|
|
105
|
-
sandbox_dir = os.path.expanduser(local_configs.sandbox_dir)
|
|
106
|
-
if not os.path.exists(sandbox_dir) or not os.path.isdir(sandbox_dir):
|
|
107
|
-
os.makedirs(sandbox_dir)
|
|
108
|
-
|
|
109
|
-
# If using a virtual environment, ensure it's prepared in parallel
|
|
110
|
-
venv_preparation_task = None
|
|
111
|
-
if use_venv:
|
|
112
|
-
venv_path = str(os.path.join(sandbox_dir, local_configs.venv_name))
|
|
113
|
-
if self.force_recreate_venv or not os.path.isdir(venv_path):
|
|
114
|
-
venv_preparation_task = asyncio.create_task(self._prepare_venv(local_configs, venv_path, env))
|
|
115
|
-
|
|
116
|
-
# Generate and write execution script (always with markers, since we rely on stdout)
|
|
117
|
-
with tempfile.NamedTemporaryFile(mode="w", dir=sandbox_dir, suffix=".py", delete=False) as temp_file:
|
|
118
|
-
code = self.generate_execution_script(agent_state=agent_state, inject_agent_state=inject_agent_state)
|
|
119
|
-
temp_file.write(code)
|
|
120
|
-
temp_file.flush()
|
|
121
|
-
temp_file_path = temp_file.name
|
|
122
|
-
|
|
123
|
-
try:
|
|
124
|
-
# If we started a venv preparation task, wait for it to complete
|
|
125
|
-
if venv_preparation_task:
|
|
126
|
-
await venv_preparation_task
|
|
127
|
-
|
|
128
|
-
# Determine the python executable and environment for the subprocess
|
|
129
|
-
exec_env = env.copy()
|
|
130
|
-
if use_venv:
|
|
131
|
-
venv_path = str(os.path.join(sandbox_dir, local_configs.venv_name))
|
|
132
|
-
python_executable = find_python_executable(local_configs)
|
|
133
|
-
exec_env["VIRTUAL_ENV"] = venv_path
|
|
134
|
-
exec_env["PATH"] = os.path.join(venv_path, "bin") + ":" + exec_env["PATH"]
|
|
135
|
-
else:
|
|
136
|
-
# If not using venv, use whatever Python we are running on
|
|
137
|
-
python_executable = sys.executable
|
|
138
|
-
|
|
139
|
-
exec_env["PYTHONWARNINGS"] = "ignore"
|
|
140
|
-
|
|
141
|
-
# Execute in subprocess
|
|
142
|
-
return await self._execute_tool_subprocess(
|
|
143
|
-
sbx_config=sbx_config,
|
|
144
|
-
python_executable=python_executable,
|
|
145
|
-
temp_file_path=temp_file_path,
|
|
146
|
-
env=exec_env,
|
|
147
|
-
cwd=sandbox_dir,
|
|
148
|
-
)
|
|
149
|
-
|
|
150
|
-
except Exception as e:
|
|
151
|
-
print(f"Executing tool {self.tool_name} has an unexpected error: {e}")
|
|
152
|
-
print(f"Auto-generated code for debugging:\n\n{code}")
|
|
153
|
-
raise e
|
|
154
|
-
finally:
|
|
155
|
-
# Clean up the temp file
|
|
156
|
-
os.remove(temp_file_path)
|
|
157
|
-
|
|
158
|
-
async def _prepare_venv(self, local_configs, venv_path: str, env: Dict[str, str]):
|
|
159
|
-
"""
|
|
160
|
-
Prepare virtual environment asynchronously (in a background thread).
|
|
161
|
-
"""
|
|
162
|
-
sandbox_dir = os.path.expanduser(local_configs.sandbox_dir)
|
|
163
|
-
log_event(name="start create_venv_for_local_sandbox", attributes={"venv_path": venv_path})
|
|
164
|
-
|
|
165
|
-
await asyncio.to_thread(
|
|
166
|
-
create_venv_for_local_sandbox,
|
|
167
|
-
sandbox_dir_path=sandbox_dir,
|
|
168
|
-
venv_path=venv_path,
|
|
169
|
-
env=env,
|
|
170
|
-
force_recreate=self.force_recreate_venv,
|
|
171
|
-
)
|
|
172
|
-
log_event(name="finish create_venv_for_local_sandbox")
|
|
173
|
-
|
|
174
|
-
log_event(name="start install_pip_requirements_for_sandbox", attributes={"local_configs": local_configs.model_dump_json()})
|
|
175
|
-
await asyncio.to_thread(install_pip_requirements_for_sandbox, local_configs, upgrade=True, user_install_if_no_venv=False, env=env)
|
|
176
|
-
log_event(name="finish install_pip_requirements_for_sandbox", attributes={"local_configs": local_configs.model_dump_json()})
|
|
177
|
-
|
|
178
|
-
@trace_method
|
|
179
|
-
async def _execute_tool_subprocess(
|
|
180
|
-
self, sbx_config, python_executable: str, temp_file_path: str, env: Dict[str, str], cwd: str
|
|
181
|
-
) -> SandboxRunResult:
|
|
182
|
-
"""
|
|
183
|
-
Execute user code in a subprocess, always capturing stdout and stderr.
|
|
184
|
-
We parse special markers to extract the pickled result string.
|
|
185
|
-
"""
|
|
186
|
-
try:
|
|
187
|
-
log_event(name="start subprocess")
|
|
188
|
-
|
|
189
|
-
process = await asyncio.create_subprocess_exec(
|
|
190
|
-
python_executable, temp_file_path, env=env, cwd=cwd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
|
|
191
|
-
)
|
|
192
|
-
|
|
193
|
-
try:
|
|
194
|
-
stdout_bytes, stderr_bytes = await asyncio.wait_for(process.communicate(), timeout=60)
|
|
195
|
-
except asyncio.TimeoutError:
|
|
196
|
-
# Terminate the process on timeout
|
|
197
|
-
if process.returncode is None:
|
|
198
|
-
process.terminate()
|
|
199
|
-
try:
|
|
200
|
-
await asyncio.wait_for(process.wait(), timeout=5)
|
|
201
|
-
except asyncio.TimeoutError:
|
|
202
|
-
process.kill()
|
|
203
|
-
|
|
204
|
-
raise TimeoutError(f"Executing tool {self.tool_name} timed out after 60 seconds.")
|
|
205
|
-
|
|
206
|
-
stdout = stdout_bytes.decode("utf-8") if stdout_bytes else ""
|
|
207
|
-
stderr = stderr_bytes.decode("utf-8") if stderr_bytes else ""
|
|
208
|
-
log_event(name="finish subprocess")
|
|
209
|
-
|
|
210
|
-
# Parse markers to isolate the function result
|
|
211
|
-
func_result, stdout_text = self.parse_out_function_results_markers(stdout)
|
|
212
|
-
func_return, agent_state = self.parse_best_effort(func_result)
|
|
213
|
-
|
|
214
|
-
return SandboxRunResult(
|
|
215
|
-
func_return=func_return,
|
|
216
|
-
agent_state=agent_state,
|
|
217
|
-
stdout=[stdout_text] if stdout_text else [],
|
|
218
|
-
stderr=[stderr] if stderr else [],
|
|
219
|
-
status="success" if process.returncode == 0 else "error",
|
|
220
|
-
sandbox_config_fingerprint=sbx_config.fingerprint(),
|
|
221
|
-
)
|
|
222
|
-
|
|
223
|
-
except (TimeoutError, Exception) as e:
|
|
224
|
-
# Distinguish between timeouts and other exceptions for clarity
|
|
225
|
-
if isinstance(e, TimeoutError):
|
|
226
|
-
raise e
|
|
227
|
-
|
|
228
|
-
print(f"Subprocess execution for tool {self.tool_name} encountered an error: {e}")
|
|
229
|
-
func_return = get_friendly_error_msg(
|
|
230
|
-
function_name=self.tool_name,
|
|
231
|
-
exception_name=type(e).__name__,
|
|
232
|
-
exception_message=str(e),
|
|
233
|
-
)
|
|
234
|
-
return SandboxRunResult(
|
|
235
|
-
func_return=func_return,
|
|
236
|
-
agent_state=None,
|
|
237
|
-
stdout=[],
|
|
238
|
-
stderr=[str(e)],
|
|
239
|
-
status="error",
|
|
240
|
-
sandbox_config_fingerprint=sbx_config.fingerprint(),
|
|
241
|
-
)
|
|
242
|
-
|
|
243
|
-
def parse_out_function_results_markers(self, text: str) -> Tuple[str, str]:
|
|
244
|
-
"""
|
|
245
|
-
Parse the function results out of the stdout using special markers.
|
|
246
|
-
Returns (function_result_str, stripped_stdout).
|
|
247
|
-
"""
|
|
248
|
-
if self.LOCAL_SANDBOX_RESULT_START_MARKER not in text:
|
|
249
|
-
# No markers found, so nothing to parse
|
|
250
|
-
return "", text
|
|
251
|
-
|
|
252
|
-
marker_len = len(self.LOCAL_SANDBOX_RESULT_START_MARKER)
|
|
253
|
-
start_index = text.index(self.LOCAL_SANDBOX_RESULT_START_MARKER) + marker_len
|
|
254
|
-
end_index = text.index(self.LOCAL_SANDBOX_RESULT_END_MARKER)
|
|
255
|
-
|
|
256
|
-
# The actual pickled base64 is between start_index and end_index
|
|
257
|
-
results_str = text[start_index:end_index]
|
|
258
|
-
# The rest of stdout (minus the markers)
|
|
259
|
-
remainder = text[: start_index - marker_len] + text[end_index + marker_len :]
|
|
260
|
-
return results_str, remainder
|
|
261
|
-
|
|
262
|
-
def parse_best_effort(self, text: str) -> Tuple[Any, Optional[AgentState]]:
|
|
263
|
-
"""
|
|
264
|
-
Decode and unpickle the result from the function execution if possible.
|
|
265
|
-
Returns (function_return_value, agent_state).
|
|
266
|
-
"""
|
|
267
|
-
if not text:
|
|
268
|
-
return None, None
|
|
269
|
-
|
|
270
|
-
result = pickle.loads(base64.b64decode(text))
|
|
271
|
-
agent_state = result["agent_state"] if result["agent_state"] is not None else None
|
|
272
|
-
return result["results"], agent_state
|
|
273
|
-
|
|
274
|
-
def parse_function_arguments(self, source_code: str, tool_name: str) -> list:
|
|
275
|
-
"""
|
|
276
|
-
Get arguments of the given function from its source code via AST.
|
|
277
|
-
"""
|
|
278
|
-
tree = ast.parse(source_code)
|
|
279
|
-
args = []
|
|
280
|
-
for node in ast.walk(tree):
|
|
281
|
-
if isinstance(node, ast.FunctionDef) and node.name == tool_name:
|
|
282
|
-
for arg in node.args.args:
|
|
283
|
-
args.append(arg.arg)
|
|
284
|
-
return args
|
|
285
|
-
|
|
286
|
-
def generate_execution_script(self, agent_state: Optional[AgentState], inject_agent_state: bool) -> str:
|
|
287
|
-
"""
|
|
288
|
-
Generate code to run inside of execution sandbox.
|
|
289
|
-
Serialize the agent state and arguments, call the tool,
|
|
290
|
-
then base64-encode/pickle the result.
|
|
291
|
-
"""
|
|
292
|
-
code = "from typing import *\n"
|
|
293
|
-
code += "import pickle\n"
|
|
294
|
-
code += "import sys\n"
|
|
295
|
-
code += "import base64\n"
|
|
296
|
-
|
|
297
|
-
# Additional imports to support agent state
|
|
298
|
-
if inject_agent_state:
|
|
299
|
-
code += "import letta\n"
|
|
300
|
-
code += "from letta import * \n"
|
|
301
|
-
|
|
302
|
-
# Add schema code if available
|
|
303
|
-
if self.tool.args_json_schema:
|
|
304
|
-
schema_code = add_imports_and_pydantic_schemas_for_args(self.tool.args_json_schema)
|
|
305
|
-
if "from __future__ import annotations" in schema_code:
|
|
306
|
-
schema_code = schema_code.replace("from __future__ import annotations", "").lstrip()
|
|
307
|
-
code = "from __future__ import annotations\n\n" + code
|
|
308
|
-
code += schema_code + "\n"
|
|
309
|
-
|
|
310
|
-
# Load the agent state
|
|
311
|
-
if inject_agent_state:
|
|
312
|
-
agent_state_pickle = pickle.dumps(agent_state)
|
|
313
|
-
code += f"agent_state = pickle.loads({agent_state_pickle})\n"
|
|
314
|
-
else:
|
|
315
|
-
code += "agent_state = None\n"
|
|
316
|
-
|
|
317
|
-
# Initialize arguments
|
|
318
|
-
if self.tool.args_json_schema:
|
|
319
|
-
args_schema = generate_model_from_args_json_schema(self.tool.args_json_schema)
|
|
320
|
-
code += f"args_object = {args_schema.__name__}(**{self.args})\n"
|
|
321
|
-
for param in self.args:
|
|
322
|
-
code += f"{param} = args_object.{param}\n"
|
|
323
|
-
else:
|
|
324
|
-
for param in self.args:
|
|
325
|
-
code += self.initialize_param(param, self.args[param])
|
|
326
|
-
|
|
327
|
-
# Insert the tool's source code
|
|
328
|
-
code += "\n" + self.tool.source_code + "\n"
|
|
329
|
-
|
|
330
|
-
# Invoke the function and store the result in a global variable
|
|
331
|
-
code += (
|
|
332
|
-
f"{self.LOCAL_SANDBOX_RESULT_VAR_NAME}"
|
|
333
|
-
+ ' = {"results": '
|
|
334
|
-
+ self.invoke_function_call(inject_agent_state=inject_agent_state)
|
|
335
|
-
+ ', "agent_state": agent_state}\n'
|
|
336
|
-
)
|
|
337
|
-
code += (
|
|
338
|
-
f"{self.LOCAL_SANDBOX_RESULT_VAR_NAME} = base64.b64encode("
|
|
339
|
-
f"pickle.dumps({self.LOCAL_SANDBOX_RESULT_VAR_NAME})"
|
|
340
|
-
").decode('utf-8')\n"
|
|
341
|
-
)
|
|
342
|
-
|
|
343
|
-
# If we're always in a subprocess, we must rely on markers to parse out the result
|
|
344
|
-
code += f"sys.stdout.write('{self.LOCAL_SANDBOX_RESULT_START_MARKER}')\n"
|
|
345
|
-
code += f"sys.stdout.write(str({self.LOCAL_SANDBOX_RESULT_VAR_NAME}))\n"
|
|
346
|
-
code += f"sys.stdout.write('{self.LOCAL_SANDBOX_RESULT_END_MARKER}')\n"
|
|
347
|
-
|
|
348
|
-
return code
|
|
349
|
-
|
|
350
|
-
def _convert_param_to_value(self, param_type: str, raw_value: str) -> str:
|
|
351
|
-
"""
|
|
352
|
-
Convert parameter to Python code representation based on JSON schema type.
|
|
353
|
-
"""
|
|
354
|
-
if param_type == "string":
|
|
355
|
-
# Safely inject a Python string via pickle
|
|
356
|
-
value = "pickle.loads(" + str(pickle.dumps(raw_value)) + ")"
|
|
357
|
-
elif param_type in ["integer", "boolean", "number", "array", "object"]:
|
|
358
|
-
# This is simplistic. In real usage, ensure correct type-casting or sanitization.
|
|
359
|
-
value = raw_value
|
|
360
|
-
else:
|
|
361
|
-
raise TypeError(f"Unsupported type: {param_type}, raw_value={raw_value}")
|
|
362
|
-
|
|
363
|
-
return str(value)
|
|
364
|
-
|
|
365
|
-
def initialize_param(self, name: str, raw_value: str) -> str:
|
|
366
|
-
"""
|
|
367
|
-
Produce code for initializing a single parameter in the generated script.
|
|
368
|
-
"""
|
|
369
|
-
params = self.tool.json_schema["parameters"]["properties"]
|
|
370
|
-
spec = params.get(name)
|
|
371
|
-
if spec is None:
|
|
372
|
-
# Possibly an extra param like 'self' that we ignore
|
|
373
|
-
return ""
|
|
374
|
-
|
|
375
|
-
param_type = spec.get("type")
|
|
376
|
-
if param_type is None and spec.get("parameters"):
|
|
377
|
-
param_type = spec["parameters"].get("type")
|
|
378
|
-
|
|
379
|
-
value = self._convert_param_to_value(param_type, raw_value)
|
|
380
|
-
return f"{name} = {value}\n"
|
|
381
|
-
|
|
382
|
-
def invoke_function_call(self, inject_agent_state: bool) -> str:
|
|
383
|
-
"""
|
|
384
|
-
Generate the function call code string with the appropriate arguments.
|
|
385
|
-
"""
|
|
386
|
-
kwargs = []
|
|
387
|
-
for name in self.args:
|
|
388
|
-
if name in self.tool.json_schema["parameters"]["properties"]:
|
|
389
|
-
kwargs.append(name)
|
|
390
|
-
|
|
391
|
-
param_list = [f"{arg}={arg}" for arg in kwargs]
|
|
392
|
-
if inject_agent_state:
|
|
393
|
-
param_list.append("agent_state=agent_state")
|
|
394
|
-
|
|
395
|
-
params = ", ".join(param_list)
|
|
396
|
-
func_call_str = self.tool.name + "(" + params + ")"
|
|
397
|
-
return func_call_str
|
|
File without changes
|
|
File without changes
|