camel-ai 0.2.75a6__py3-none-any.whl → 0.2.76__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +1001 -205
- camel/agents/mcp_agent.py +30 -27
- camel/configs/__init__.py +6 -0
- camel/configs/amd_config.py +70 -0
- camel/configs/cometapi_config.py +104 -0
- camel/data_collectors/alpaca_collector.py +15 -6
- camel/environments/tic_tac_toe.py +1 -1
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/docker/Dockerfile +3 -12
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/loaders/__init__.py +11 -2
- camel/loaders/chunkr_reader.py +9 -0
- camel/memories/__init__.py +2 -1
- camel/memories/agent_memories.py +3 -1
- camel/memories/blocks/chat_history_block.py +21 -3
- camel/memories/records.py +88 -8
- camel/messages/base.py +127 -34
- camel/models/__init__.py +4 -0
- camel/models/amd_model.py +101 -0
- camel/models/azure_openai_model.py +0 -6
- camel/models/base_model.py +30 -0
- camel/models/cometapi_model.py +83 -0
- camel/models/model_factory.py +4 -0
- camel/models/openai_compatible_model.py +0 -6
- camel/models/openai_model.py +0 -6
- camel/models/zhipuai_model.py +61 -2
- camel/parsers/__init__.py +18 -0
- camel/parsers/mcp_tool_call_parser.py +176 -0
- camel/retrievers/auto_retriever.py +1 -0
- camel/runtimes/daytona_runtime.py +11 -12
- camel/societies/workforce/prompts.py +131 -50
- camel/societies/workforce/single_agent_worker.py +434 -49
- camel/societies/workforce/structured_output_handler.py +30 -18
- camel/societies/workforce/task_channel.py +43 -0
- camel/societies/workforce/utils.py +105 -12
- camel/societies/workforce/workforce.py +1322 -311
- camel/societies/workforce/workforce_logger.py +24 -5
- camel/storages/key_value_storages/json.py +15 -2
- camel/storages/object_storages/google_cloud.py +1 -1
- camel/storages/vectordb_storages/oceanbase.py +10 -11
- camel/storages/vectordb_storages/tidb.py +8 -6
- camel/tasks/task.py +4 -3
- camel/toolkits/__init__.py +18 -5
- camel/toolkits/aci_toolkit.py +45 -0
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/context_summarizer_toolkit.py +684 -0
- camel/toolkits/dingtalk.py +1135 -0
- camel/toolkits/edgeone_pages_mcp_toolkit.py +11 -31
- camel/toolkits/{file_write_toolkit.py → file_toolkit.py} +194 -34
- camel/toolkits/function_tool.py +6 -1
- camel/toolkits/google_drive_mcp_toolkit.py +12 -31
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +12 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +79 -2
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +95 -59
- camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +5 -612
- camel/toolkits/hybrid_browser_toolkit/ts/package.json +0 -1
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +619 -95
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +7 -2
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +115 -219
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +1 -0
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +39 -6
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +405 -131
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +9 -5
- camel/toolkits/{openai_image_toolkit.py → image_generation_toolkit.py} +98 -31
- camel/toolkits/markitdown_toolkit.py +27 -1
- camel/toolkits/mcp_toolkit.py +348 -348
- camel/toolkits/message_integration.py +3 -0
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/note_taking_toolkit.py +18 -8
- camel/toolkits/notion_mcp_toolkit.py +16 -26
- camel/toolkits/origene_mcp_toolkit.py +8 -49
- camel/toolkits/playwright_mcp_toolkit.py +12 -31
- camel/toolkits/resend_toolkit.py +168 -0
- camel/toolkits/slack_toolkit.py +50 -1
- camel/toolkits/terminal_toolkit/__init__.py +18 -0
- camel/toolkits/terminal_toolkit/terminal_toolkit.py +924 -0
- camel/toolkits/terminal_toolkit/utils.py +532 -0
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +17 -11
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/types/enums.py +124 -1
- camel/types/unified_model_type.py +5 -0
- camel/utils/commons.py +17 -0
- camel/utils/context_utils.py +804 -0
- camel/utils/mcp.py +136 -2
- camel/utils/token_counting.py +25 -17
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76.dist-info}/METADATA +158 -59
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76.dist-info}/RECORD +95 -76
- camel/loaders/pandas_reader.py +0 -368
- camel/toolkits/terminal_toolkit.py +0 -1788
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76.dist-info}/licenses/LICENSE +0 -0
camel/agents/mcp_agent.py
CHANGED
|
@@ -15,16 +15,25 @@
|
|
|
15
15
|
import asyncio
|
|
16
16
|
import json
|
|
17
17
|
import platform
|
|
18
|
-
import
|
|
19
|
-
|
|
18
|
+
from typing import (
|
|
19
|
+
TYPE_CHECKING,
|
|
20
|
+
Any,
|
|
21
|
+
Callable,
|
|
22
|
+
Dict,
|
|
23
|
+
List,
|
|
24
|
+
Optional,
|
|
25
|
+
Union,
|
|
26
|
+
cast,
|
|
27
|
+
)
|
|
20
28
|
|
|
21
|
-
from camel.agents import ChatAgent
|
|
29
|
+
from camel.agents.chat_agent import ChatAgent
|
|
22
30
|
from camel.logger import get_logger
|
|
23
31
|
from camel.messages import BaseMessage
|
|
24
|
-
from camel.models import BaseModelBackend
|
|
32
|
+
from camel.models.base_model import BaseModelBackend
|
|
33
|
+
from camel.models.model_factory import ModelFactory
|
|
25
34
|
from camel.prompts import TextPrompt
|
|
26
35
|
from camel.responses import ChatAgentResponse
|
|
27
|
-
from camel.toolkits import FunctionTool
|
|
36
|
+
from camel.toolkits.function_tool import FunctionTool
|
|
28
37
|
from camel.types import (
|
|
29
38
|
BaseMCPRegistryConfig,
|
|
30
39
|
MCPRegistryType,
|
|
@@ -33,6 +42,9 @@ from camel.types import (
|
|
|
33
42
|
RoleType,
|
|
34
43
|
)
|
|
35
44
|
|
|
45
|
+
if TYPE_CHECKING:
|
|
46
|
+
from camel.toolkits.mcp_toolkit import MCPToolkit
|
|
47
|
+
|
|
36
48
|
# AgentOps decorator setting
|
|
37
49
|
try:
|
|
38
50
|
import os
|
|
@@ -44,6 +56,8 @@ try:
|
|
|
44
56
|
except (ImportError, AttributeError):
|
|
45
57
|
from camel.utils import track_agent
|
|
46
58
|
|
|
59
|
+
from camel.parsers.mcp_tool_call_parser import extract_tool_calls_from_text
|
|
60
|
+
|
|
47
61
|
logger = get_logger(__name__)
|
|
48
62
|
|
|
49
63
|
|
|
@@ -168,8 +182,10 @@ class MCPAgent(ChatAgent):
|
|
|
168
182
|
**kwargs,
|
|
169
183
|
)
|
|
170
184
|
|
|
171
|
-
def _initialize_mcp_toolkit(self) -> MCPToolkit:
|
|
185
|
+
def _initialize_mcp_toolkit(self) -> "MCPToolkit":
|
|
172
186
|
r"""Initialize the MCP toolkit from the provided configuration."""
|
|
187
|
+
from camel.toolkits.mcp_toolkit import MCPToolkit
|
|
188
|
+
|
|
173
189
|
config_dict = {}
|
|
174
190
|
for registry_config in self.registry_configs:
|
|
175
191
|
config_dict.update(registry_config.get_config())
|
|
@@ -334,27 +350,14 @@ class MCPAgent(ChatAgent):
|
|
|
334
350
|
task = f"## Task:\n {input_message}"
|
|
335
351
|
input_message = str(self._text_tools) + task
|
|
336
352
|
response = await super().astep(input_message, *args, **kwargs)
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
end_match = re.search(r'```', content[json_start:])
|
|
347
|
-
if not end_match:
|
|
348
|
-
break
|
|
349
|
-
json_end = end_match.span()[0] + json_start
|
|
350
|
-
|
|
351
|
-
tool_json = content[json_start:json_end].strip('\n')
|
|
352
|
-
try:
|
|
353
|
-
tool_calls.append(json.loads(tool_json))
|
|
354
|
-
except json.JSONDecodeError:
|
|
355
|
-
logger.warning(f"Failed to parse JSON: {tool_json}")
|
|
356
|
-
continue
|
|
357
|
-
content = content[json_end:]
|
|
353
|
+
raw_content = response.msgs[0].content if response.msgs else ""
|
|
354
|
+
content = (
|
|
355
|
+
raw_content
|
|
356
|
+
if isinstance(raw_content, str)
|
|
357
|
+
else str(raw_content)
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
tool_calls = extract_tool_calls_from_text(content)
|
|
358
361
|
|
|
359
362
|
if not tool_calls:
|
|
360
363
|
return response
|
camel/configs/__init__.py
CHANGED
|
@@ -12,10 +12,12 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
from .aiml_config import AIML_API_PARAMS, AIMLConfig
|
|
15
|
+
from .amd_config import AMD_API_PARAMS, AMDConfig
|
|
15
16
|
from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
|
|
16
17
|
from .base_config import BaseConfig
|
|
17
18
|
from .bedrock_config import BEDROCK_API_PARAMS, BedrockConfig
|
|
18
19
|
from .cohere_config import COHERE_API_PARAMS, CohereConfig
|
|
20
|
+
from .cometapi_config import COMETAPI_API_PARAMS, CometAPIConfig
|
|
19
21
|
from .crynux_config import CRYNUX_API_PARAMS, CrynuxConfig
|
|
20
22
|
from .deepseek_config import DEEPSEEK_API_PARAMS, DeepSeekConfig
|
|
21
23
|
from .gemini_config import Gemini_API_PARAMS, GeminiConfig
|
|
@@ -89,6 +91,8 @@ __all__ = [
|
|
|
89
91
|
'TOGETHERAI_API_PARAMS',
|
|
90
92
|
'CohereConfig',
|
|
91
93
|
'COHERE_API_PARAMS',
|
|
94
|
+
'CometAPIConfig',
|
|
95
|
+
'COMETAPI_API_PARAMS',
|
|
92
96
|
'YiConfig',
|
|
93
97
|
'YI_API_PARAMS',
|
|
94
98
|
'QwenConfig',
|
|
@@ -111,6 +115,8 @@ __all__ = [
|
|
|
111
115
|
'SILICONFLOW_API_PARAMS',
|
|
112
116
|
'AIMLConfig',
|
|
113
117
|
'AIML_API_PARAMS',
|
|
118
|
+
'AMDConfig',
|
|
119
|
+
'AMD_API_PARAMS',
|
|
114
120
|
'OpenRouterConfig',
|
|
115
121
|
'OPENROUTER_API_PARAMS',
|
|
116
122
|
'LMSTUDIO_API_PARAMS',
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import List, Optional, Union
|
|
17
|
+
|
|
18
|
+
from pydantic import Field
|
|
19
|
+
|
|
20
|
+
from camel.configs.base_config import BaseConfig
|
|
21
|
+
from camel.types import NotGiven
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class AMDConfig(BaseConfig):
|
|
25
|
+
r"""Configuration class for AMD API models.
|
|
26
|
+
|
|
27
|
+
This class defines the configuration parameters for AMD's language
|
|
28
|
+
models, including temperature, sampling parameters, and response format
|
|
29
|
+
settings.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
stream (bool, optional): Whether to stream the response.
|
|
33
|
+
(default: :obj:`None`)
|
|
34
|
+
temperature (float, optional): Controls randomness in the response.
|
|
35
|
+
Higher values make output more random, lower values make it more
|
|
36
|
+
deterministic. Range: [0.0, 2.0]. (default: :obj:`None`)
|
|
37
|
+
top_p (float, optional): Controls diversity via nucleus sampling.
|
|
38
|
+
Range: [0.0, 1.0]. (default: :obj:`None`)
|
|
39
|
+
presence_penalty (float, optional): Penalizes new tokens based on
|
|
40
|
+
whether they appear in the text so far. Range: [-2.0, 2.0].
|
|
41
|
+
(default: :obj:`None`)
|
|
42
|
+
frequency_penalty (float, optional): Penalizes new tokens based on
|
|
43
|
+
their frequency in the text so far. Range: [-2.0, 2.0].
|
|
44
|
+
(default: :obj:`None`)
|
|
45
|
+
max_tokens (Union[int, NotGiven], optional): Maximum number of tokens
|
|
46
|
+
to generate. If not provided, model will use its default maximum.
|
|
47
|
+
(default: :obj:`None`)
|
|
48
|
+
seed (Optional[int], optional): Random seed for deterministic sampling.
|
|
49
|
+
(default: :obj:`None`)
|
|
50
|
+
tools (Optional[List[Dict]], optional): List of tools available to the
|
|
51
|
+
model. This includes tools such as a text editor, a calculator, or
|
|
52
|
+
a search engine. (default: :obj:`None`)
|
|
53
|
+
tool_choice (Optional[str], optional): Tool choice configuration.
|
|
54
|
+
(default: :obj:`None`)
|
|
55
|
+
stop (Optional[List[str]], optional): List of stop sequences.
|
|
56
|
+
(default: :obj:`None`)
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
stream: Optional[bool] = Field(default=None)
|
|
60
|
+
temperature: Optional[float] = Field(default=None)
|
|
61
|
+
top_p: Optional[float] = Field(default=None)
|
|
62
|
+
presence_penalty: Optional[float] = Field(default=None)
|
|
63
|
+
frequency_penalty: Optional[float] = Field(default=None)
|
|
64
|
+
max_tokens: Optional[Union[int, NotGiven]] = Field(default=None)
|
|
65
|
+
seed: Optional[int] = Field(default=None)
|
|
66
|
+
tool_choice: Optional[str] = Field(default=None)
|
|
67
|
+
stop: Optional[List[str]] = Field(default=None)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
AMD_API_PARAMS = {param for param in AMDConfig.model_fields.keys()}
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import Optional, Sequence, Union
|
|
17
|
+
|
|
18
|
+
from camel.configs.base_config import BaseConfig
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class CometAPIConfig(BaseConfig):
|
|
22
|
+
r"""Defines the parameters for generating chat completions using CometAPI's
|
|
23
|
+
OpenAI-compatible interface.
|
|
24
|
+
|
|
25
|
+
Reference: https://api.cometapi.com/v1/
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
temperature (float, optional): Sampling temperature to use, between
|
|
29
|
+
:obj:`0` and :obj:`2`. Higher values make the output more random,
|
|
30
|
+
while lower values make it more focused and deterministic.
|
|
31
|
+
(default: :obj:`None`)
|
|
32
|
+
top_p (float, optional): An alternative to sampling with temperature,
|
|
33
|
+
called nucleus sampling, where the model considers the results of
|
|
34
|
+
the tokens with top_p probability mass. So :obj:`0.1` means only
|
|
35
|
+
the tokens comprising the top 10% probability mass are considered.
|
|
36
|
+
(default: :obj:`None`)
|
|
37
|
+
n (int, optional): How many chat completion choices to generate for
|
|
38
|
+
each input message. (default: :obj:`None`)
|
|
39
|
+
response_format (object, optional): An object specifying the format
|
|
40
|
+
that the model must output. Compatible with GPT-4 Turbo and all
|
|
41
|
+
GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
|
|
42
|
+
{"type": "json_object"} enables JSON mode, which guarantees the
|
|
43
|
+
message the model generates is valid JSON. Important: when using
|
|
44
|
+
JSON mode, you must also instruct the model to produce JSON
|
|
45
|
+
yourself via a system or user message. Without this, the model
|
|
46
|
+
may generate an unending stream of whitespace until the generation
|
|
47
|
+
reaches the token limit, resulting in a long-running and seemingly
|
|
48
|
+
"stuck" request. Also note that the message content may be
|
|
49
|
+
partially cut off if finish_reason="length", which indicates the
|
|
50
|
+
generation exceeded max_tokens or the conversation exceeded the
|
|
51
|
+
max context length.
|
|
52
|
+
stream (bool, optional): If True, partial message deltas will be sent
|
|
53
|
+
as data-only server-sent events as they become available.
|
|
54
|
+
(default: :obj:`None`)
|
|
55
|
+
stop (str or list, optional): Up to :obj:`4` sequences where the API
|
|
56
|
+
will stop generating further tokens. (default: :obj:`None`)
|
|
57
|
+
max_tokens (int, optional): The maximum number of tokens to generate
|
|
58
|
+
in the chat completion. The total length of input tokens and
|
|
59
|
+
generated tokens is limited by the model's context length.
|
|
60
|
+
(default: :obj:`None`)
|
|
61
|
+
presence_penalty (float, optional): Number between :obj:`-2.0` and
|
|
62
|
+
:obj:`2.0`. Positive values penalize new tokens based on whether
|
|
63
|
+
they appear in the text so far, increasing the model's likelihood
|
|
64
|
+
to talk about new topics. See more information about frequency and
|
|
65
|
+
presence penalties. (default: :obj:`None`)
|
|
66
|
+
frequency_penalty (float, optional): Number between :obj:`-2.0` and
|
|
67
|
+
:obj:`2.0`. Positive values penalize new tokens based on their
|
|
68
|
+
existing frequency in the text so far, decreasing the model's
|
|
69
|
+
likelihood to repeat the same line verbatim. See more information
|
|
70
|
+
about frequency and presence penalties. (default: :obj:`None`)
|
|
71
|
+
user (str, optional): A unique identifier representing your end-user,
|
|
72
|
+
which can help CometAPI to monitor and detect abuse.
|
|
73
|
+
(default: :obj:`None`)
|
|
74
|
+
tools (list[FunctionTool], optional): A list of tools the model may
|
|
75
|
+
call. Currently, only functions are supported as a tool. Use this
|
|
76
|
+
to provide a list of functions the model may generate JSON inputs
|
|
77
|
+
for. A max of 128 functions are supported.
|
|
78
|
+
tool_choice (Union[dict[str, str], str], optional): Controls which (if
|
|
79
|
+
any) tool is called by the model. :obj:`"none"` means the model
|
|
80
|
+
will not call any tool and instead generates a message.
|
|
81
|
+
:obj:`"auto"` means the model can pick between generating a
|
|
82
|
+
message or calling one or more tools. :obj:`"required"` means the
|
|
83
|
+
model must call one or more tools. Specifying a particular tool
|
|
84
|
+
via {"type": "function", "function": {"name": "my_function"}}
|
|
85
|
+
forces the model to call that tool. :obj:`"none"` is the default
|
|
86
|
+
when no tools are present. :obj:`"auto"` is the default if tools
|
|
87
|
+
are present.
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
temperature: Optional[float] = None
|
|
91
|
+
top_p: Optional[float] = None
|
|
92
|
+
n: Optional[int] = None
|
|
93
|
+
stream: Optional[bool] = None
|
|
94
|
+
stop: Optional[Union[str, Sequence[str]]] = None
|
|
95
|
+
max_tokens: Optional[int] = None
|
|
96
|
+
presence_penalty: Optional[float] = None
|
|
97
|
+
response_format: Optional[dict] = None
|
|
98
|
+
frequency_penalty: Optional[float] = None
|
|
99
|
+
user: Optional[str] = None
|
|
100
|
+
tools: Optional[list] = None
|
|
101
|
+
tool_choice: Optional[Union[dict[str, str], str]] = None
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
COMETAPI_API_PARAMS = {param for param in CometAPIConfig.model_fields.keys()}
|
|
@@ -70,16 +70,25 @@ class AlpacaDataCollector(BaseDataCollector):
|
|
|
70
70
|
if not history:
|
|
71
71
|
raise ValueError("No data collected.")
|
|
72
72
|
|
|
73
|
-
#
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
73
|
+
# Filter out system and tool-related messages
|
|
74
|
+
# Keep only user and final assistant messages
|
|
75
|
+
filtered_history = []
|
|
76
|
+
for msg in history:
|
|
77
|
+
if msg.role == "user":
|
|
78
|
+
filtered_history.append(msg)
|
|
79
|
+
elif msg.role == "assistant" and msg.message:
|
|
80
|
+
# Keep assistant messages with actual content
|
|
81
|
+
# (skip empty ones that only contain tool calls)
|
|
82
|
+
filtered_history.append(msg)
|
|
83
|
+
|
|
84
|
+
# Validate filtered history
|
|
85
|
+
if len(filtered_history) != 2:
|
|
77
86
|
raise ValueError(
|
|
78
87
|
f"AlpacaDataCollector only supports one message pair, but "
|
|
79
|
-
f"got {len(
|
|
88
|
+
f"got {len(filtered_history)} after filtering tool messages"
|
|
80
89
|
)
|
|
81
90
|
|
|
82
|
-
input_message, output_message =
|
|
91
|
+
input_message, output_message = filtered_history
|
|
83
92
|
instruction = (
|
|
84
93
|
self.system_message.content if self.system_message else ""
|
|
85
94
|
) + str(input_message.message)
|
|
@@ -483,7 +483,7 @@ class TicTacToeEnv(MultiStepEnv):
|
|
|
483
483
|
# Check all win combinations.
|
|
484
484
|
for a, b, c in TicTacToeEnv.WIN_COMBINATIONS:
|
|
485
485
|
if board[a] != " " and board[a] == board[b] == board[c]:
|
|
486
|
-
return board[a]
|
|
486
|
+
return board[a] # type: ignore[return-value]
|
|
487
487
|
# Check for draw.
|
|
488
488
|
if all(cell != " " for cell in board):
|
|
489
489
|
return "draw"
|
camel/interpreters/__init__.py
CHANGED
|
@@ -18,6 +18,7 @@ from .e2b_interpreter import E2BInterpreter
|
|
|
18
18
|
from .internal_python_interpreter import InternalPythonInterpreter
|
|
19
19
|
from .interpreter_error import InterpreterError
|
|
20
20
|
from .ipython_interpreter import JupyterKernelInterpreter
|
|
21
|
+
from .microsandbox_interpreter import MicrosandboxInterpreter
|
|
21
22
|
from .subprocess_interpreter import SubprocessInterpreter
|
|
22
23
|
|
|
23
24
|
__all__ = [
|
|
@@ -28,4 +29,5 @@ __all__ = [
|
|
|
28
29
|
'DockerInterpreter',
|
|
29
30
|
'JupyterKernelInterpreter',
|
|
30
31
|
'E2BInterpreter',
|
|
32
|
+
'MicrosandboxInterpreter',
|
|
31
33
|
]
|
|
@@ -1,11 +1,8 @@
|
|
|
1
1
|
# syntax=docker/dockerfile:1
|
|
2
|
-
|
|
3
2
|
FROM ubuntu:22.04
|
|
4
3
|
|
|
5
|
-
# Set environment variable to avoid interactive prompts
|
|
6
4
|
ENV DEBIAN_FRONTEND=noninteractive
|
|
7
5
|
|
|
8
|
-
# Update and install base utilities
|
|
9
6
|
RUN apt-get update && apt-get install -y \
|
|
10
7
|
build-essential \
|
|
11
8
|
software-properties-common \
|
|
@@ -20,7 +17,6 @@ RUN apt-get update && apt-get install -y \
|
|
|
20
17
|
&& apt-get clean \
|
|
21
18
|
&& apt-get autoremove -y
|
|
22
19
|
|
|
23
|
-
# Install Python 3.10 and its dependencies
|
|
24
20
|
RUN add-apt-repository ppa:deadsnakes/ppa && \
|
|
25
21
|
apt-get update && \
|
|
26
22
|
apt-get install -y \
|
|
@@ -34,32 +30,27 @@ RUN add-apt-repository ppa:deadsnakes/ppa && \
|
|
|
34
30
|
&& apt-get clean \
|
|
35
31
|
&& apt-get autoremove -y
|
|
36
32
|
|
|
37
|
-
# Install R
|
|
38
33
|
RUN apt-get update && \
|
|
39
34
|
apt-get install -y r-base && \
|
|
40
35
|
rm -rf /var/lib/apt/lists/* && \
|
|
41
36
|
apt-get clean && \
|
|
42
37
|
apt-get autoremove -y
|
|
43
38
|
|
|
44
|
-
# Install NodeJS 22.x
|
|
45
39
|
RUN curl -fsSL https://deb.nodesource.com/setup_22.x | bash - && \
|
|
46
40
|
apt-get install -y nodejs && \
|
|
47
41
|
rm -rf /var/lib/apt/lists/* && \
|
|
48
42
|
apt-get clean && \
|
|
49
43
|
apt-get autoremove -y
|
|
50
44
|
|
|
51
|
-
# Install Poetry
|
|
52
45
|
RUN curl -fsSL https://install.python-poetry.org | python3.10 - && \
|
|
53
46
|
ln -s ~/.local/bin/poetry /usr/local/bin/poetry
|
|
54
47
|
|
|
55
|
-
# Upgrade pip and install base Python packages
|
|
56
48
|
RUN python3.10 -m pip install --upgrade pip setuptools wheel
|
|
57
|
-
|
|
58
|
-
# Install uv using pip instead of the shell script
|
|
59
49
|
RUN pip install uv
|
|
60
50
|
|
|
61
|
-
|
|
51
|
+
RUN groupadd -r devuser && useradd -r -m -g devuser devuser
|
|
62
52
|
WORKDIR /workspace
|
|
53
|
+
RUN chown -R devuser:devuser /workspace
|
|
54
|
+
USER devuser
|
|
63
55
|
|
|
64
|
-
# Set default shell
|
|
65
56
|
CMD ["/bin/bash"]
|