camel-ai 0.2.75a6__py3-none-any.whl → 0.2.76a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +151 -37
- camel/configs/__init__.py +3 -0
- camel/configs/amd_config.py +70 -0
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/models/__init__.py +2 -0
- camel/models/amd_model.py +101 -0
- camel/models/model_factory.py +2 -0
- camel/models/openai_model.py +0 -6
- camel/runtimes/daytona_runtime.py +11 -12
- camel/toolkits/__init__.py +5 -3
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/function_tool.py +6 -1
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +8 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +12 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +33 -14
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +135 -40
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +2 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +43 -207
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +231 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +39 -6
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +241 -56
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +5 -1
- camel/toolkits/{openai_image_toolkit.py → image_generation_toolkit.py} +98 -31
- camel/toolkits/mcp_toolkit.py +39 -14
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/terminal_toolkit.py +12 -2
- camel/toolkits/video_analysis_toolkit.py +16 -10
- camel/types/enums.py +11 -0
- camel/utils/commons.py +2 -0
- camel/utils/mcp.py +136 -2
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a0.dist-info}/METADATA +5 -3
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a0.dist-info}/RECORD +38 -31
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a0.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a0.dist-info}/licenses/LICENSE +0 -0
camel/__init__.py
CHANGED
camel/agents/chat_agent.py
CHANGED
|
@@ -233,7 +233,7 @@ class StreamingChatAgentResponse:
|
|
|
233
233
|
r"""Make this object iterable."""
|
|
234
234
|
if self._consumed:
|
|
235
235
|
# If already consumed, iterate over stored responses
|
|
236
|
-
|
|
236
|
+
yield from self._responses
|
|
237
237
|
else:
|
|
238
238
|
# If not consumed, consume and yield
|
|
239
239
|
try:
|
|
@@ -2665,12 +2665,6 @@ class ChatAgent(BaseAgent):
|
|
|
2665
2665
|
stream_completed = False
|
|
2666
2666
|
|
|
2667
2667
|
for chunk in stream:
|
|
2668
|
-
# Update token usage if available
|
|
2669
|
-
if chunk.usage:
|
|
2670
|
-
self._update_token_usage_tracker(
|
|
2671
|
-
step_token_usage, safe_model_dump(chunk.usage)
|
|
2672
|
-
)
|
|
2673
|
-
|
|
2674
2668
|
# Process chunk delta
|
|
2675
2669
|
if chunk.choices and len(chunk.choices) > 0:
|
|
2676
2670
|
choice = chunk.choices[0]
|
|
@@ -2703,12 +2697,6 @@ class ChatAgent(BaseAgent):
|
|
|
2703
2697
|
# If we have complete tool calls, execute them with
|
|
2704
2698
|
# sync status updates
|
|
2705
2699
|
if accumulated_tool_calls:
|
|
2706
|
-
# Record assistant message with tool calls first
|
|
2707
|
-
self._record_assistant_tool_calls_message(
|
|
2708
|
-
accumulated_tool_calls,
|
|
2709
|
-
content_accumulator.get_full_content(),
|
|
2710
|
-
)
|
|
2711
|
-
|
|
2712
2700
|
# Execute tools synchronously with
|
|
2713
2701
|
# optimized status updates
|
|
2714
2702
|
for (
|
|
@@ -2741,7 +2729,49 @@ class ChatAgent(BaseAgent):
|
|
|
2741
2729
|
)
|
|
2742
2730
|
|
|
2743
2731
|
self.record_message(final_message)
|
|
2744
|
-
|
|
2732
|
+
elif chunk.usage and not chunk.choices:
|
|
2733
|
+
# Handle final chunk with usage but empty choices
|
|
2734
|
+
# This happens when stream_options={"include_usage": True}
|
|
2735
|
+
# Update the final usage from this chunk
|
|
2736
|
+
self._update_token_usage_tracker(
|
|
2737
|
+
step_token_usage, safe_model_dump(chunk.usage)
|
|
2738
|
+
)
|
|
2739
|
+
|
|
2740
|
+
# Create final response with final usage
|
|
2741
|
+
final_content = content_accumulator.get_full_content()
|
|
2742
|
+
if final_content.strip():
|
|
2743
|
+
final_message = BaseMessage(
|
|
2744
|
+
role_name=self.role_name,
|
|
2745
|
+
role_type=self.role_type,
|
|
2746
|
+
meta_dict={},
|
|
2747
|
+
content=final_content,
|
|
2748
|
+
)
|
|
2749
|
+
|
|
2750
|
+
if response_format:
|
|
2751
|
+
self._try_format_message(
|
|
2752
|
+
final_message, response_format
|
|
2753
|
+
)
|
|
2754
|
+
|
|
2755
|
+
# Create final response with final usage (not partial)
|
|
2756
|
+
final_response = ChatAgentResponse(
|
|
2757
|
+
msgs=[final_message],
|
|
2758
|
+
terminated=False,
|
|
2759
|
+
info={
|
|
2760
|
+
"id": getattr(chunk, 'id', ''),
|
|
2761
|
+
"usage": step_token_usage.copy(),
|
|
2762
|
+
"finish_reasons": ["stop"],
|
|
2763
|
+
"num_tokens": self._get_token_count(final_content),
|
|
2764
|
+
"tool_calls": tool_call_records or [],
|
|
2765
|
+
"external_tool_requests": None,
|
|
2766
|
+
"streaming": False,
|
|
2767
|
+
"partial": False,
|
|
2768
|
+
},
|
|
2769
|
+
)
|
|
2770
|
+
yield final_response
|
|
2771
|
+
break
|
|
2772
|
+
elif stream_completed:
|
|
2773
|
+
# If we've already seen finish_reason but no usage chunk, exit
|
|
2774
|
+
break
|
|
2745
2775
|
|
|
2746
2776
|
return stream_completed, tool_calls_complete
|
|
2747
2777
|
|
|
@@ -2906,10 +2936,19 @@ class ChatAgent(BaseAgent):
|
|
|
2906
2936
|
tool = self._internal_tools[function_name]
|
|
2907
2937
|
try:
|
|
2908
2938
|
result = tool(**args)
|
|
2939
|
+
# First, create and record the assistant message with tool
|
|
2940
|
+
# call
|
|
2941
|
+
assist_msg = FunctionCallingMessage(
|
|
2942
|
+
role_name=self.role_name,
|
|
2943
|
+
role_type=self.role_type,
|
|
2944
|
+
meta_dict=None,
|
|
2945
|
+
content="",
|
|
2946
|
+
func_name=function_name,
|
|
2947
|
+
args=args,
|
|
2948
|
+
tool_call_id=tool_call_id,
|
|
2949
|
+
)
|
|
2909
2950
|
|
|
2910
|
-
#
|
|
2911
|
-
# message assistant message with tool_calls was already
|
|
2912
|
-
# recorded in _record_assistant_tool_calls_message
|
|
2951
|
+
# Then create the tool response message
|
|
2913
2952
|
func_msg = FunctionCallingMessage(
|
|
2914
2953
|
role_name=self.role_name,
|
|
2915
2954
|
role_type=self.role_type,
|
|
@@ -2920,7 +2959,25 @@ class ChatAgent(BaseAgent):
|
|
|
2920
2959
|
tool_call_id=tool_call_id,
|
|
2921
2960
|
)
|
|
2922
2961
|
|
|
2923
|
-
|
|
2962
|
+
# Record both messages with precise timestamps to ensure
|
|
2963
|
+
# correct ordering
|
|
2964
|
+
import time
|
|
2965
|
+
|
|
2966
|
+
current_time_ns = time.time_ns()
|
|
2967
|
+
base_timestamp = (
|
|
2968
|
+
current_time_ns / 1_000_000_000
|
|
2969
|
+
) # Convert to seconds
|
|
2970
|
+
|
|
2971
|
+
self.update_memory(
|
|
2972
|
+
assist_msg,
|
|
2973
|
+
OpenAIBackendRole.ASSISTANT,
|
|
2974
|
+
timestamp=base_timestamp,
|
|
2975
|
+
)
|
|
2976
|
+
self.update_memory(
|
|
2977
|
+
func_msg,
|
|
2978
|
+
OpenAIBackendRole.FUNCTION,
|
|
2979
|
+
timestamp=base_timestamp + 1e-6,
|
|
2980
|
+
)
|
|
2924
2981
|
|
|
2925
2982
|
return ToolCallingRecord(
|
|
2926
2983
|
tool_name=function_name,
|
|
@@ -3004,10 +3061,19 @@ class ChatAgent(BaseAgent):
|
|
|
3004
3061
|
else:
|
|
3005
3062
|
# Fallback: synchronous call
|
|
3006
3063
|
result = tool(**args)
|
|
3064
|
+
# First, create and record the assistant message with tool
|
|
3065
|
+
# call
|
|
3066
|
+
assist_msg = FunctionCallingMessage(
|
|
3067
|
+
role_name=self.role_name,
|
|
3068
|
+
role_type=self.role_type,
|
|
3069
|
+
meta_dict=None,
|
|
3070
|
+
content="",
|
|
3071
|
+
func_name=function_name,
|
|
3072
|
+
args=args,
|
|
3073
|
+
tool_call_id=tool_call_id,
|
|
3074
|
+
)
|
|
3007
3075
|
|
|
3008
|
-
#
|
|
3009
|
-
# message assistant message with tool_calls was already
|
|
3010
|
-
# recorded in _record_assistant_tool_calls_message
|
|
3076
|
+
# Then create the tool response message
|
|
3011
3077
|
func_msg = FunctionCallingMessage(
|
|
3012
3078
|
role_name=self.role_name,
|
|
3013
3079
|
role_type=self.role_type,
|
|
@@ -3018,7 +3084,25 @@ class ChatAgent(BaseAgent):
|
|
|
3018
3084
|
tool_call_id=tool_call_id,
|
|
3019
3085
|
)
|
|
3020
3086
|
|
|
3021
|
-
|
|
3087
|
+
# Record both messages with precise timestamps to ensure
|
|
3088
|
+
# correct ordering
|
|
3089
|
+
import time
|
|
3090
|
+
|
|
3091
|
+
current_time_ns = time.time_ns()
|
|
3092
|
+
base_timestamp = (
|
|
3093
|
+
current_time_ns / 1_000_000_000
|
|
3094
|
+
) # Convert to seconds
|
|
3095
|
+
|
|
3096
|
+
self.update_memory(
|
|
3097
|
+
assist_msg,
|
|
3098
|
+
OpenAIBackendRole.ASSISTANT,
|
|
3099
|
+
timestamp=base_timestamp,
|
|
3100
|
+
)
|
|
3101
|
+
self.update_memory(
|
|
3102
|
+
func_msg,
|
|
3103
|
+
OpenAIBackendRole.FUNCTION,
|
|
3104
|
+
timestamp=base_timestamp + 1e-6,
|
|
3105
|
+
)
|
|
3022
3106
|
|
|
3023
3107
|
return ToolCallingRecord(
|
|
3024
3108
|
tool_name=function_name,
|
|
@@ -3369,18 +3453,13 @@ class ChatAgent(BaseAgent):
|
|
|
3369
3453
|
response_format: Optional[Type[BaseModel]] = None,
|
|
3370
3454
|
) -> AsyncGenerator[Union[ChatAgentResponse, Tuple[bool, bool]], None]:
|
|
3371
3455
|
r"""Async version of process streaming chunks with
|
|
3372
|
-
content accumulator.
|
|
3456
|
+
content accumulator.
|
|
3457
|
+
"""
|
|
3373
3458
|
|
|
3374
3459
|
tool_calls_complete = False
|
|
3375
3460
|
stream_completed = False
|
|
3376
3461
|
|
|
3377
3462
|
async for chunk in stream:
|
|
3378
|
-
# Update token usage if available
|
|
3379
|
-
if chunk.usage:
|
|
3380
|
-
self._update_token_usage_tracker(
|
|
3381
|
-
step_token_usage, safe_model_dump(chunk.usage)
|
|
3382
|
-
)
|
|
3383
|
-
|
|
3384
3463
|
# Process chunk delta
|
|
3385
3464
|
if chunk.choices and len(chunk.choices) > 0:
|
|
3386
3465
|
choice = chunk.choices[0]
|
|
@@ -3413,13 +3492,6 @@ class ChatAgent(BaseAgent):
|
|
|
3413
3492
|
# If we have complete tool calls, execute them with
|
|
3414
3493
|
# async status updates
|
|
3415
3494
|
if accumulated_tool_calls:
|
|
3416
|
-
# Record assistant message with
|
|
3417
|
-
# tool calls first
|
|
3418
|
-
self._record_assistant_tool_calls_message(
|
|
3419
|
-
accumulated_tool_calls,
|
|
3420
|
-
content_accumulator.get_full_content(),
|
|
3421
|
-
)
|
|
3422
|
-
|
|
3423
3495
|
# Execute tools asynchronously with real-time
|
|
3424
3496
|
# status updates
|
|
3425
3497
|
async for (
|
|
@@ -3454,7 +3526,49 @@ class ChatAgent(BaseAgent):
|
|
|
3454
3526
|
)
|
|
3455
3527
|
|
|
3456
3528
|
self.record_message(final_message)
|
|
3457
|
-
|
|
3529
|
+
elif chunk.usage and not chunk.choices:
|
|
3530
|
+
# Handle final chunk with usage but empty choices
|
|
3531
|
+
# This happens when stream_options={"include_usage": True}
|
|
3532
|
+
# Update the final usage from this chunk
|
|
3533
|
+
self._update_token_usage_tracker(
|
|
3534
|
+
step_token_usage, safe_model_dump(chunk.usage)
|
|
3535
|
+
)
|
|
3536
|
+
|
|
3537
|
+
# Create final response with final usage
|
|
3538
|
+
final_content = content_accumulator.get_full_content()
|
|
3539
|
+
if final_content.strip():
|
|
3540
|
+
final_message = BaseMessage(
|
|
3541
|
+
role_name=self.role_name,
|
|
3542
|
+
role_type=self.role_type,
|
|
3543
|
+
meta_dict={},
|
|
3544
|
+
content=final_content,
|
|
3545
|
+
)
|
|
3546
|
+
|
|
3547
|
+
if response_format:
|
|
3548
|
+
self._try_format_message(
|
|
3549
|
+
final_message, response_format
|
|
3550
|
+
)
|
|
3551
|
+
|
|
3552
|
+
# Create final response with final usage (not partial)
|
|
3553
|
+
final_response = ChatAgentResponse(
|
|
3554
|
+
msgs=[final_message],
|
|
3555
|
+
terminated=False,
|
|
3556
|
+
info={
|
|
3557
|
+
"id": getattr(chunk, 'id', ''),
|
|
3558
|
+
"usage": step_token_usage.copy(),
|
|
3559
|
+
"finish_reasons": ["stop"],
|
|
3560
|
+
"num_tokens": self._get_token_count(final_content),
|
|
3561
|
+
"tool_calls": tool_call_records or [],
|
|
3562
|
+
"external_tool_requests": None,
|
|
3563
|
+
"streaming": False,
|
|
3564
|
+
"partial": False,
|
|
3565
|
+
},
|
|
3566
|
+
)
|
|
3567
|
+
yield final_response
|
|
3568
|
+
break
|
|
3569
|
+
elif stream_completed:
|
|
3570
|
+
# If we've already seen finish_reason but no usage chunk, exit
|
|
3571
|
+
break
|
|
3458
3572
|
|
|
3459
3573
|
# Yield the final status as a tuple
|
|
3460
3574
|
yield (stream_completed, tool_calls_complete)
|
camel/configs/__init__.py
CHANGED
|
@@ -12,6 +12,7 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
from .aiml_config import AIML_API_PARAMS, AIMLConfig
|
|
15
|
+
from .amd_config import AMD_API_PARAMS, AMDConfig
|
|
15
16
|
from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
|
|
16
17
|
from .base_config import BaseConfig
|
|
17
18
|
from .bedrock_config import BEDROCK_API_PARAMS, BedrockConfig
|
|
@@ -111,6 +112,8 @@ __all__ = [
|
|
|
111
112
|
'SILICONFLOW_API_PARAMS',
|
|
112
113
|
'AIMLConfig',
|
|
113
114
|
'AIML_API_PARAMS',
|
|
115
|
+
'AMDConfig',
|
|
116
|
+
'AMD_API_PARAMS',
|
|
114
117
|
'OpenRouterConfig',
|
|
115
118
|
'OPENROUTER_API_PARAMS',
|
|
116
119
|
'LMSTUDIO_API_PARAMS',
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import List, Optional, Union
|
|
17
|
+
|
|
18
|
+
from pydantic import Field
|
|
19
|
+
|
|
20
|
+
from camel.configs.base_config import BaseConfig
|
|
21
|
+
from camel.types import NotGiven
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class AMDConfig(BaseConfig):
|
|
25
|
+
r"""Configuration class for AMD API models.
|
|
26
|
+
|
|
27
|
+
This class defines the configuration parameters for AMD's language
|
|
28
|
+
models, including temperature, sampling parameters, and response format
|
|
29
|
+
settings.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
stream (bool, optional): Whether to stream the response.
|
|
33
|
+
(default: :obj:`None`)
|
|
34
|
+
temperature (float, optional): Controls randomness in the response.
|
|
35
|
+
Higher values make output more random, lower values make it more
|
|
36
|
+
deterministic. Range: [0.0, 2.0]. (default: :obj:`None`)
|
|
37
|
+
top_p (float, optional): Controls diversity via nucleus sampling.
|
|
38
|
+
Range: [0.0, 1.0]. (default: :obj:`None`)
|
|
39
|
+
presence_penalty (float, optional): Penalizes new tokens based on
|
|
40
|
+
whether they appear in the text so far. Range: [-2.0, 2.0].
|
|
41
|
+
(default: :obj:`None`)
|
|
42
|
+
frequency_penalty (float, optional): Penalizes new tokens based on
|
|
43
|
+
their frequency in the text so far. Range: [-2.0, 2.0].
|
|
44
|
+
(default: :obj:`None`)
|
|
45
|
+
max_tokens (Union[int, NotGiven], optional): Maximum number of tokens
|
|
46
|
+
to generate. If not provided, model will use its default maximum.
|
|
47
|
+
(default: :obj:`None`)
|
|
48
|
+
seed (Optional[int], optional): Random seed for deterministic sampling.
|
|
49
|
+
(default: :obj:`None`)
|
|
50
|
+
tools (Optional[List[Dict]], optional): List of tools available to the
|
|
51
|
+
model. This includes tools such as a text editor, a calculator, or
|
|
52
|
+
a search engine. (default: :obj:`None`)
|
|
53
|
+
tool_choice (Optional[str], optional): Tool choice configuration.
|
|
54
|
+
(default: :obj:`None`)
|
|
55
|
+
stop (Optional[List[str]], optional): List of stop sequences.
|
|
56
|
+
(default: :obj:`None`)
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
stream: Optional[bool] = Field(default=None)
|
|
60
|
+
temperature: Optional[float] = Field(default=None)
|
|
61
|
+
top_p: Optional[float] = Field(default=None)
|
|
62
|
+
presence_penalty: Optional[float] = Field(default=None)
|
|
63
|
+
frequency_penalty: Optional[float] = Field(default=None)
|
|
64
|
+
max_tokens: Optional[Union[int, NotGiven]] = Field(default=None)
|
|
65
|
+
seed: Optional[int] = Field(default=None)
|
|
66
|
+
tool_choice: Optional[str] = Field(default=None)
|
|
67
|
+
stop: Optional[List[str]] = Field(default=None)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
AMD_API_PARAMS = {param for param in AMDConfig.model_fields.keys()}
|
camel/interpreters/__init__.py
CHANGED
|
@@ -18,6 +18,7 @@ from .e2b_interpreter import E2BInterpreter
|
|
|
18
18
|
from .internal_python_interpreter import InternalPythonInterpreter
|
|
19
19
|
from .interpreter_error import InterpreterError
|
|
20
20
|
from .ipython_interpreter import JupyterKernelInterpreter
|
|
21
|
+
from .microsandbox_interpreter import MicrosandboxInterpreter
|
|
21
22
|
from .subprocess_interpreter import SubprocessInterpreter
|
|
22
23
|
|
|
23
24
|
__all__ = [
|
|
@@ -28,4 +29,5 @@ __all__ = [
|
|
|
28
29
|
'DockerInterpreter',
|
|
29
30
|
'JupyterKernelInterpreter',
|
|
30
31
|
'E2BInterpreter',
|
|
32
|
+
'MicrosandboxInterpreter',
|
|
31
33
|
]
|