letta-nightly 0.7.5.dev20250428110034__py3-none-any.whl → 0.7.6.dev20250429062643__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- letta/__init__.py +1 -1
- letta/agents/base_agent.py +1 -1
- letta/agents/ephemeral_memory_agent.py +353 -43
- letta/agents/voice_agent.py +196 -62
- letta/constants.py +2 -0
- letta/helpers/datetime_helpers.py +7 -0
- letta/interfaces/openai_chat_completions_streaming_interface.py +16 -12
- letta/llm_api/google_ai_client.py +4 -0
- letta/llm_api/llm_api_tools.py +5 -2
- letta/llm_api/openai.py +2 -1
- letta/llm_api/openai_client.py +3 -2
- letta/schemas/llm_config.py +5 -1
- letta/schemas/openai/chat_completion_request.py +1 -0
- letta/schemas/providers.py +4 -3
- letta/schemas/sandbox_config.py +4 -4
- letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +4 -10
- letta/server/rest_api/routers/v1/voice.py +8 -18
- letta/server/rest_api/utils.py +26 -20
- letta/server/server.py +67 -26
- letta/services/helpers/agent_manager_helper.py +2 -2
- letta/services/helpers/tool_execution_helper.py +30 -3
- letta/services/summarizer/summarizer.py +121 -54
- letta/services/tool_executor/tool_execution_sandbox.py +13 -9
- letta/services/tool_sandbox/local_sandbox.py +4 -4
- letta/services/user_manager.py +5 -2
- letta/settings.py +4 -2
- letta/system.py +0 -1
- letta/tracing.py +1 -0
- {letta_nightly-0.7.5.dev20250428110034.dist-info → letta_nightly-0.7.6.dev20250429062643.dist-info}/METADATA +1 -1
- {letta_nightly-0.7.5.dev20250428110034.dist-info → letta_nightly-0.7.6.dev20250429062643.dist-info}/RECORD +33 -33
- {letta_nightly-0.7.5.dev20250428110034.dist-info → letta_nightly-0.7.6.dev20250429062643.dist-info}/LICENSE +0 -0
- {letta_nightly-0.7.5.dev20250428110034.dist-info → letta_nightly-0.7.6.dev20250429062643.dist-info}/WHEEL +0 -0
- {letta_nightly-0.7.5.dev20250428110034.dist-info → letta_nightly-0.7.6.dev20250429062643.dist-info}/entry_points.txt +0 -0
@@ -6,14 +6,14 @@ from fastapi.responses import StreamingResponse
|
|
6
6
|
from openai.types.chat.completion_create_params import CompletionCreateParams
|
7
7
|
|
8
8
|
from letta.agent import Agent
|
9
|
-
from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG
|
9
|
+
from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG, LETTA_MODEL_ENDPOINT
|
10
10
|
from letta.log import get_logger
|
11
11
|
from letta.schemas.message import Message, MessageCreate
|
12
12
|
from letta.schemas.user import User
|
13
13
|
from letta.server.rest_api.chat_completions_interface import ChatCompletionsStreamingInterface
|
14
14
|
|
15
15
|
# TODO this belongs in a controller!
|
16
|
-
from letta.server.rest_api.utils import get_letta_server,
|
16
|
+
from letta.server.rest_api.utils import get_letta_server, get_user_message_from_chat_completions_request, sse_async_generator
|
17
17
|
|
18
18
|
if TYPE_CHECKING:
|
19
19
|
from letta.server.server import SyncServer
|
@@ -43,10 +43,6 @@ async def create_chat_completions(
|
|
43
43
|
user_id: Optional[str] = Header(None, alias="user_id"),
|
44
44
|
):
|
45
45
|
# Validate and process fields
|
46
|
-
messages = get_messages_from_completion_request(completion_request)
|
47
|
-
input_message = messages[-1]
|
48
|
-
|
49
|
-
# Process remaining fields
|
50
46
|
if not completion_request["stream"]:
|
51
47
|
raise HTTPException(status_code=400, detail="Must be streaming request: `stream` was set to `False` in the request.")
|
52
48
|
|
@@ -54,7 +50,7 @@ async def create_chat_completions(
|
|
54
50
|
|
55
51
|
letta_agent = server.load_agent(agent_id=agent_id, actor=actor)
|
56
52
|
llm_config = letta_agent.agent_state.llm_config
|
57
|
-
if llm_config.model_endpoint_type != "openai" or
|
53
|
+
if llm_config.model_endpoint_type != "openai" or llm_config.model_endpoint == LETTA_MODEL_ENDPOINT:
|
58
54
|
error_msg = f"You can only use models with type 'openai' for chat completions. This agent {agent_id} has llm_config: \n{llm_config.model_dump_json(indent=4)}"
|
59
55
|
logger.error(error_msg)
|
60
56
|
raise HTTPException(status_code=400, detail=error_msg)
|
@@ -65,13 +61,11 @@ async def create_chat_completions(
|
|
65
61
|
logger.warning(f"Defaulting to {llm_config.model}...")
|
66
62
|
logger.warning(warning_msg)
|
67
63
|
|
68
|
-
logger.info(f"Received input message: {input_message}")
|
69
|
-
|
70
64
|
return await send_message_to_agent_chat_completions(
|
71
65
|
server=server,
|
72
66
|
letta_agent=letta_agent,
|
73
67
|
actor=actor,
|
74
|
-
messages=
|
68
|
+
messages=get_user_message_from_chat_completions_request(completion_request),
|
75
69
|
)
|
76
70
|
|
77
71
|
|
@@ -1,6 +1,5 @@
|
|
1
1
|
from typing import TYPE_CHECKING, Optional
|
2
2
|
|
3
|
-
import httpx
|
4
3
|
import openai
|
5
4
|
from fastapi import APIRouter, Body, Depends, Header
|
6
5
|
from fastapi.responses import StreamingResponse
|
@@ -8,8 +7,7 @@ from openai.types.chat.completion_create_params import CompletionCreateParams
|
|
8
7
|
|
9
8
|
from letta.agents.voice_agent import VoiceAgent
|
10
9
|
from letta.log import get_logger
|
11
|
-
from letta.
|
12
|
-
from letta.server.rest_api.utils import get_letta_server, get_messages_from_completion_request
|
10
|
+
from letta.server.rest_api.utils import get_letta_server, get_user_message_from_chat_completions_request
|
13
11
|
from letta.settings import model_settings
|
14
12
|
|
15
13
|
if TYPE_CHECKING:
|
@@ -42,22 +40,11 @@ async def create_voice_chat_completions(
|
|
42
40
|
):
|
43
41
|
actor = server.user_manager.get_user_or_default(user_id=user_id)
|
44
42
|
|
45
|
-
# Also parse the user's new input
|
46
|
-
input_message = UserMessage(**get_messages_from_completion_request(completion_request)[-1])
|
47
|
-
|
48
43
|
# Create OpenAI async client
|
49
44
|
client = openai.AsyncClient(
|
50
45
|
api_key=model_settings.openai_api_key,
|
51
46
|
max_retries=0,
|
52
|
-
http_client=
|
53
|
-
timeout=httpx.Timeout(connect=15.0, read=30.0, write=15.0, pool=15.0),
|
54
|
-
follow_redirects=True,
|
55
|
-
limits=httpx.Limits(
|
56
|
-
max_connections=50,
|
57
|
-
max_keepalive_connections=50,
|
58
|
-
keepalive_expiry=120,
|
59
|
-
),
|
60
|
-
),
|
47
|
+
http_client=server.httpx_client,
|
61
48
|
)
|
62
49
|
|
63
50
|
# Instantiate our LowLatencyAgent
|
@@ -67,10 +54,13 @@ async def create_voice_chat_completions(
|
|
67
54
|
message_manager=server.message_manager,
|
68
55
|
agent_manager=server.agent_manager,
|
69
56
|
block_manager=server.block_manager,
|
57
|
+
passage_manager=server.passage_manager,
|
70
58
|
actor=actor,
|
71
|
-
message_buffer_limit=
|
72
|
-
message_buffer_min=
|
59
|
+
message_buffer_limit=40,
|
60
|
+
message_buffer_min=15,
|
73
61
|
)
|
74
62
|
|
75
63
|
# Return the streaming generator
|
76
|
-
return StreamingResponse(
|
64
|
+
return StreamingResponse(
|
65
|
+
agent.step_stream(input_messages=get_user_message_from_chat_completions_request(completion_request)), media_type="text/event-stream"
|
66
|
+
)
|
letta/server/rest_api/utils.py
CHANGED
@@ -210,19 +210,20 @@ def create_letta_messages_from_llm_response(
|
|
210
210
|
|
211
211
|
# TODO: Use ToolReturnContent instead of TextContent
|
212
212
|
# TODO: This helps preserve ordering
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
213
|
+
if function_response:
|
214
|
+
tool_message = Message(
|
215
|
+
role=MessageRole.tool,
|
216
|
+
content=[TextContent(text=package_function_response(function_call_success, function_response))],
|
217
|
+
organization_id=actor.organization_id,
|
218
|
+
agent_id=agent_id,
|
219
|
+
model=model,
|
220
|
+
tool_calls=[],
|
221
|
+
tool_call_id=tool_call_id,
|
222
|
+
created_at=get_utc_time(),
|
223
|
+
)
|
224
|
+
if pre_computed_tool_message_id:
|
225
|
+
tool_message.id = pre_computed_tool_message_id
|
226
|
+
messages.append(tool_message)
|
226
227
|
|
227
228
|
if add_heartbeat_request_system_message:
|
228
229
|
heartbeat_system_message = create_heartbeat_system_message(
|
@@ -278,7 +279,7 @@ def create_assistant_messages_from_openai_response(
|
|
278
279
|
)
|
279
280
|
|
280
281
|
|
281
|
-
def
|
282
|
+
def convert_in_context_letta_messages_to_openai(in_context_messages: List[Message], exclude_system_messages: bool = False) -> List[dict]:
|
282
283
|
"""
|
283
284
|
Flattens Letta's messages (with system, user, assistant, tool roles, etc.)
|
284
285
|
into standard OpenAI chat messages (system, user, assistant).
|
@@ -289,10 +290,15 @@ def convert_letta_messages_to_openai(messages: List[Message]) -> List[dict]:
|
|
289
290
|
3. User messages might store actual text inside JSON => parse that into content
|
290
291
|
4. System => pass through as normal
|
291
292
|
"""
|
293
|
+
# Always include the system prompt
|
294
|
+
# TODO: This is brittle
|
295
|
+
openai_messages = [in_context_messages[0].to_openai_dict()]
|
292
296
|
|
293
|
-
|
297
|
+
for msg in in_context_messages[1:]:
|
298
|
+
if msg.role == MessageRole.system and exclude_system_messages:
|
299
|
+
# Skip if exclude_system_messages is set to True
|
300
|
+
continue
|
294
301
|
|
295
|
-
for msg in messages:
|
296
302
|
# 1. Assistant + 'send_message' tool_calls => flatten
|
297
303
|
if msg.role == MessageRole.assistant and msg.tool_calls:
|
298
304
|
# Find any 'send_message' tool_calls
|
@@ -350,15 +356,13 @@ def convert_letta_messages_to_openai(messages: List[Message]) -> List[dict]:
|
|
350
356
|
except json.JSONDecodeError:
|
351
357
|
pass # It's not JSON, leave as-is
|
352
358
|
|
353
|
-
# 4. System is left as-is (or any other role that doesn't need special handling)
|
354
|
-
#
|
355
359
|
# Finally, convert to dict using your existing method
|
356
360
|
openai_messages.append(msg.to_openai_dict())
|
357
361
|
|
358
362
|
return openai_messages
|
359
363
|
|
360
364
|
|
361
|
-
def
|
365
|
+
def get_user_message_from_chat_completions_request(completion_request: CompletionCreateParams) -> List[MessageCreate]:
|
362
366
|
try:
|
363
367
|
messages = list(cast(Iterable[ChatCompletionMessageParam], completion_request["messages"]))
|
364
368
|
except KeyError:
|
@@ -380,4 +384,6 @@ def get_messages_from_completion_request(completion_request: CompletionCreatePar
|
|
380
384
|
logger.error(f"The input message does not have valid content: {input_message}")
|
381
385
|
raise HTTPException(status_code=400, detail="'messages[-1].content' must be a 'string'")
|
382
386
|
|
383
|
-
|
387
|
+
for message in reversed(messages):
|
388
|
+
if message["role"] == "user":
|
389
|
+
return [MessageCreate(role=MessageRole.user, content=[TextContent(text=message["content"])])]
|
letta/server/server.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1
|
-
# inspecting tools
|
2
1
|
import asyncio
|
3
2
|
import json
|
4
3
|
import os
|
@@ -6,8 +5,10 @@ import traceback
|
|
6
5
|
import warnings
|
7
6
|
from abc import abstractmethod
|
8
7
|
from datetime import datetime
|
8
|
+
from pathlib import Path
|
9
9
|
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
10
10
|
|
11
|
+
import httpx
|
11
12
|
from anthropic import AsyncAnthropic
|
12
13
|
from composio.client import Composio
|
13
14
|
from composio.client.collections import ActionModel, AppModel
|
@@ -19,6 +20,7 @@ import letta.server.utils as server_utils
|
|
19
20
|
import letta.system as system
|
20
21
|
from letta.agent import Agent, save_agent
|
21
22
|
from letta.config import LettaConfig
|
23
|
+
from letta.constants import LETTA_TOOL_EXECUTION_DIR
|
22
24
|
from letta.data_sources.connectors import DataConnector, load_data
|
23
25
|
from letta.errors import HandleNotFoundError
|
24
26
|
from letta.functions.mcp_client.base_client import BaseMCPClient
|
@@ -70,7 +72,7 @@ from letta.schemas.providers import (
|
|
70
72
|
VLLMCompletionsProvider,
|
71
73
|
XAIProvider,
|
72
74
|
)
|
73
|
-
from letta.schemas.sandbox_config import SandboxType
|
75
|
+
from letta.schemas.sandbox_config import LocalSandboxConfig, SandboxConfigCreate, SandboxType
|
74
76
|
from letta.schemas.source import Source
|
75
77
|
from letta.schemas.tool import Tool
|
76
78
|
from letta.schemas.usage import LettaUsageStatistics
|
@@ -81,6 +83,7 @@ from letta.server.rest_api.utils import sse_async_generator
|
|
81
83
|
from letta.services.agent_manager import AgentManager
|
82
84
|
from letta.services.block_manager import BlockManager
|
83
85
|
from letta.services.group_manager import GroupManager
|
86
|
+
from letta.services.helpers.tool_execution_helper import prepare_local_sandbox
|
84
87
|
from letta.services.identity_manager import IdentityManager
|
85
88
|
from letta.services.job_manager import JobManager
|
86
89
|
from letta.services.llm_batch_manager import LLMBatchManager
|
@@ -211,6 +214,11 @@ class SyncServer(Server):
|
|
211
214
|
self.group_manager = GroupManager()
|
212
215
|
self.batch_manager = LLMBatchManager()
|
213
216
|
|
217
|
+
# A resusable httpx client
|
218
|
+
timeout = httpx.Timeout(connect=10.0, read=20.0, write=10.0, pool=10.0)
|
219
|
+
limits = httpx.Limits(max_connections=100, max_keepalive_connections=80, keepalive_expiry=300)
|
220
|
+
self.httpx_client = httpx.AsyncClient(timeout=timeout, follow_redirects=True, limits=limits)
|
221
|
+
|
214
222
|
# Make default user and org
|
215
223
|
if init_with_default_org_and_user:
|
216
224
|
self.default_org = self.organization_manager.create_default_organization()
|
@@ -229,6 +237,36 @@ class SyncServer(Server):
|
|
229
237
|
actor=self.default_user,
|
230
238
|
)
|
231
239
|
|
240
|
+
# For OSS users, create a local sandbox config
|
241
|
+
oss_default_user = self.user_manager.get_default_user()
|
242
|
+
use_venv = False if not tool_settings.tool_exec_venv_name else True
|
243
|
+
venv_name = tool_settings.tool_exec_venv_name or "venv"
|
244
|
+
tool_dir = tool_settings.tool_exec_dir or LETTA_TOOL_EXECUTION_DIR
|
245
|
+
|
246
|
+
venv_dir = Path(tool_dir) / venv_name
|
247
|
+
if not Path(tool_dir).is_dir():
|
248
|
+
logger.error(f"Provided LETTA_TOOL_SANDBOX_DIR is not a valid directory: {tool_dir}")
|
249
|
+
else:
|
250
|
+
if tool_settings.tool_exec_venv_name and not venv_dir.is_dir():
|
251
|
+
logger.warning(
|
252
|
+
f"Provided LETTA_TOOL_SANDBOX_VENV_NAME is not a valid venv ({venv_dir}), one will be created for you during tool execution."
|
253
|
+
)
|
254
|
+
|
255
|
+
sandbox_config_create = SandboxConfigCreate(
|
256
|
+
config=LocalSandboxConfig(sandbox_dir=tool_settings.tool_exec_dir, use_venv=use_venv, venv_name=venv_name)
|
257
|
+
)
|
258
|
+
sandbox_config = self.sandbox_config_manager.create_or_update_sandbox_config(
|
259
|
+
sandbox_config_create=sandbox_config_create, actor=oss_default_user
|
260
|
+
)
|
261
|
+
logger.info(f"Successfully created default local sandbox config:\n{sandbox_config.get_local_config().model_dump()}")
|
262
|
+
|
263
|
+
if use_venv and tool_settings.tool_exec_autoreload_venv:
|
264
|
+
prepare_local_sandbox(
|
265
|
+
sandbox_config.get_local_config(),
|
266
|
+
env=os.environ.copy(),
|
267
|
+
force_recreate=True,
|
268
|
+
)
|
269
|
+
|
232
270
|
# collect providers (always has Letta as a default)
|
233
271
|
self._enabled_providers: List[Provider] = [LettaProvider()]
|
234
272
|
if model_settings.openai_api_key:
|
@@ -325,29 +363,29 @@ class SyncServer(Server):
|
|
325
363
|
|
326
364
|
# For MCP
|
327
365
|
"""Initialize the MCP clients (there may be multiple)"""
|
328
|
-
mcp_server_configs = self.get_mcp_servers()
|
366
|
+
# mcp_server_configs = self.get_mcp_servers()
|
329
367
|
self.mcp_clients: Dict[str, BaseMCPClient] = {}
|
330
|
-
|
331
|
-
for server_name, server_config in mcp_server_configs.items():
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
# Print out the tools that are connected
|
346
|
-
for server_name, client in self.mcp_clients.items():
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
368
|
+
#
|
369
|
+
# for server_name, server_config in mcp_server_configs.items():
|
370
|
+
# if server_config.type == MCPServerType.SSE:
|
371
|
+
# self.mcp_clients[server_name] = SSEMCPClient(server_config)
|
372
|
+
# elif server_config.type == MCPServerType.STDIO:
|
373
|
+
# self.mcp_clients[server_name] = StdioMCPClient(server_config)
|
374
|
+
# else:
|
375
|
+
# raise ValueError(f"Invalid MCP server config: {server_config}")
|
376
|
+
#
|
377
|
+
# try:
|
378
|
+
# self.mcp_clients[server_name].connect_to_server()
|
379
|
+
# except Exception as e:
|
380
|
+
# logger.error(e)
|
381
|
+
# self.mcp_clients.pop(server_name)
|
382
|
+
#
|
383
|
+
# # Print out the tools that are connected
|
384
|
+
# for server_name, client in self.mcp_clients.items():
|
385
|
+
# logger.info(f"Attempting to fetch tools from MCP server: {server_name}")
|
386
|
+
# mcp_tools = client.list_tools()
|
387
|
+
# logger.info(f"MCP tools connected: {', '.join([t.name for t in mcp_tools])}")
|
388
|
+
# logger.debug(f"MCP tools: {', '.join([str(t) for t in mcp_tools])}")
|
351
389
|
|
352
390
|
# TODO: Remove these in memory caches
|
353
391
|
self._llm_config_cache = {}
|
@@ -1181,6 +1219,8 @@ class SyncServer(Server):
|
|
1181
1219
|
llm_config.max_reasoning_tokens = max_reasoning_tokens
|
1182
1220
|
if enable_reasoner is not None:
|
1183
1221
|
llm_config.enable_reasoner = enable_reasoner
|
1222
|
+
if enable_reasoner and llm_config.model_endpoint_type == "anthropic":
|
1223
|
+
llm_config.put_inner_thoughts_in_kwargs = False
|
1184
1224
|
|
1185
1225
|
return llm_config
|
1186
1226
|
|
@@ -1562,7 +1602,8 @@ class SyncServer(Server):
|
|
1562
1602
|
# supports_token_streaming = ["openai", "anthropic", "xai", "deepseek"]
|
1563
1603
|
supports_token_streaming = ["openai", "anthropic", "deepseek"] # TODO re-enable xAI once streaming is patched
|
1564
1604
|
if stream_tokens and (
|
1565
|
-
llm_config.model_endpoint_type not in supports_token_streaming
|
1605
|
+
llm_config.model_endpoint_type not in supports_token_streaming
|
1606
|
+
or llm_config.model_endpoint == constants.LETTA_MODEL_ENDPOINT
|
1566
1607
|
):
|
1567
1608
|
warnings.warn(
|
1568
1609
|
f"Token streaming is only supported for models with type {' or '.join(supports_token_streaming)} in the model_endpoint: agent has endpoint type {llm_config.model_endpoint_type} and {llm_config.model_endpoint}. Setting stream_tokens to False."
|
@@ -1685,7 +1726,7 @@ class SyncServer(Server):
|
|
1685
1726
|
llm_config = letta_multi_agent.agent_state.llm_config
|
1686
1727
|
supports_token_streaming = ["openai", "anthropic", "deepseek"]
|
1687
1728
|
if stream_tokens and (
|
1688
|
-
llm_config.model_endpoint_type not in supports_token_streaming or
|
1729
|
+
llm_config.model_endpoint_type not in supports_token_streaming or llm_config.model_endpoint == constants.LETTA_MODEL_ENDPOINT
|
1689
1730
|
):
|
1690
1731
|
warnings.warn(
|
1691
1732
|
f"Token streaming is only supported for models with type {' or '.join(supports_token_streaming)} in the model_endpoint: agent has endpoint type {llm_config.model_endpoint_type} and {llm_config.model_endpoint}. Setting stream_tokens to False."
|
@@ -6,7 +6,7 @@ from sqlalchemy import and_, asc, desc, func, literal, or_, select
|
|
6
6
|
from letta import system
|
7
7
|
from letta.constants import IN_CONTEXT_MEMORY_KEYWORD, STRUCTURED_OUTPUT_MODELS
|
8
8
|
from letta.helpers import ToolRulesSolver
|
9
|
-
from letta.helpers.datetime_helpers import get_local_time
|
9
|
+
from letta.helpers.datetime_helpers import get_local_time, get_local_time_fast
|
10
10
|
from letta.orm.agent import Agent as AgentModel
|
11
11
|
from letta.orm.agents_tags import AgentsTags
|
12
12
|
from letta.orm.errors import NoResultFound
|
@@ -119,7 +119,7 @@ def compile_memory_metadata_block(
|
|
119
119
|
# Create a metadata block of info so the agent knows about the metadata of out-of-context memories
|
120
120
|
memory_metadata_block = "\n".join(
|
121
121
|
[
|
122
|
-
f"### Memory [last modified: {timestamp_str}]",
|
122
|
+
f"### Current Time: {get_local_time_fast()}" f"### Memory [last modified: {timestamp_str}]",
|
123
123
|
f"{previous_message_count} previous messages between you and the user are stored in recall memory (use functions to access them)",
|
124
124
|
f"{archival_memory_size} total memories you created are stored in archival memory (use functions to access them)",
|
125
125
|
(
|
@@ -24,7 +24,7 @@ def find_python_executable(local_configs: LocalSandboxConfig) -> str:
|
|
24
24
|
"""
|
25
25
|
sandbox_dir = os.path.expanduser(local_configs.sandbox_dir) # Expand tilde
|
26
26
|
|
27
|
-
if not local_configs.
|
27
|
+
if not local_configs.use_venv:
|
28
28
|
return "python.exe" if platform.system().lower().startswith("win") else "python3"
|
29
29
|
|
30
30
|
venv_path = os.path.join(sandbox_dir, local_configs.venv_name)
|
@@ -96,7 +96,7 @@ def install_pip_requirements_for_sandbox(
|
|
96
96
|
python_exec = find_python_executable(local_configs)
|
97
97
|
|
98
98
|
# If using a virtual environment, upgrade pip before installing dependencies.
|
99
|
-
if local_configs.
|
99
|
+
if local_configs.use_venv:
|
100
100
|
ensure_pip_is_up_to_date(python_exec, env=env)
|
101
101
|
|
102
102
|
# Construct package list
|
@@ -108,7 +108,7 @@ def install_pip_requirements_for_sandbox(
|
|
108
108
|
pip_cmd.append("--upgrade")
|
109
109
|
pip_cmd += packages
|
110
110
|
|
111
|
-
if user_install_if_no_venv and not local_configs.
|
111
|
+
if user_install_if_no_venv and not local_configs.use_venv:
|
112
112
|
pip_cmd.append("--user")
|
113
113
|
|
114
114
|
run_subprocess(pip_cmd, env=env, fail_msg=f"Failed to install packages: {', '.join(packages)}")
|
@@ -171,3 +171,30 @@ def add_imports_and_pydantic_schemas_for_args(args_json_schema: dict) -> str:
|
|
171
171
|
)
|
172
172
|
result = parser.parse()
|
173
173
|
return result
|
174
|
+
|
175
|
+
|
176
|
+
def prepare_local_sandbox(
|
177
|
+
local_cfg: LocalSandboxConfig,
|
178
|
+
env: Dict[str, str],
|
179
|
+
force_recreate: bool = False,
|
180
|
+
) -> None:
|
181
|
+
"""
|
182
|
+
Ensure the sandbox virtual-env is freshly created and that
|
183
|
+
requirements are installed. Uses your existing helpers.
|
184
|
+
"""
|
185
|
+
sandbox_dir = os.path.expanduser(local_cfg.sandbox_dir)
|
186
|
+
venv_path = os.path.join(sandbox_dir, local_cfg.venv_name)
|
187
|
+
|
188
|
+
create_venv_for_local_sandbox(
|
189
|
+
sandbox_dir_path=sandbox_dir,
|
190
|
+
venv_path=venv_path,
|
191
|
+
env=env,
|
192
|
+
force_recreate=force_recreate,
|
193
|
+
)
|
194
|
+
|
195
|
+
install_pip_requirements_for_sandbox(
|
196
|
+
local_cfg,
|
197
|
+
upgrade=True,
|
198
|
+
user_install_if_no_venv=False,
|
199
|
+
env=env,
|
200
|
+
)
|