camel-ai 0.2.75a6__py3-none-any.whl → 0.2.76a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +159 -38
- camel/configs/__init__.py +3 -0
- camel/configs/amd_config.py +70 -0
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/memories/__init__.py +2 -1
- camel/memories/agent_memories.py +3 -1
- camel/memories/blocks/chat_history_block.py +17 -2
- camel/models/__init__.py +2 -0
- camel/models/amd_model.py +101 -0
- camel/models/model_factory.py +2 -0
- camel/models/openai_model.py +0 -6
- camel/runtimes/daytona_runtime.py +11 -12
- camel/societies/workforce/single_agent_worker.py +44 -38
- camel/storages/object_storages/google_cloud.py +1 -1
- camel/toolkits/__init__.py +14 -5
- camel/toolkits/aci_toolkit.py +45 -0
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/context_summarizer_toolkit.py +683 -0
- camel/toolkits/{file_write_toolkit.py → file_toolkit.py} +194 -34
- camel/toolkits/function_tool.py +6 -1
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +12 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +19 -2
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +95 -59
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +619 -95
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +7 -2
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +115 -219
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +1 -0
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +39 -6
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +401 -80
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +9 -5
- camel/toolkits/{openai_image_toolkit.py → image_generation_toolkit.py} +98 -31
- camel/toolkits/markitdown_toolkit.py +27 -1
- camel/toolkits/mcp_toolkit.py +39 -14
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/note_taking_toolkit.py +18 -8
- camel/toolkits/terminal_toolkit.py +12 -2
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +16 -10
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/types/enums.py +11 -0
- camel/utils/commons.py +2 -0
- camel/utils/context_utils.py +395 -0
- camel/utils/mcp.py +136 -2
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a1.dist-info}/METADATA +6 -3
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a1.dist-info}/RECORD +52 -41
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a1.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a1.dist-info}/licenses/LICENSE +0 -0
camel/__init__.py
CHANGED
camel/agents/chat_agent.py
CHANGED
|
@@ -63,6 +63,7 @@ from camel.memories import (
|
|
|
63
63
|
MemoryRecord,
|
|
64
64
|
ScoreBasedContextCreator,
|
|
65
65
|
)
|
|
66
|
+
from camel.memories.blocks.chat_history_block import EmptyMemoryWarning
|
|
66
67
|
from camel.messages import (
|
|
67
68
|
BaseMessage,
|
|
68
69
|
FunctionCallingMessage,
|
|
@@ -233,7 +234,7 @@ class StreamingChatAgentResponse:
|
|
|
233
234
|
r"""Make this object iterable."""
|
|
234
235
|
if self._consumed:
|
|
235
236
|
# If already consumed, iterate over stored responses
|
|
236
|
-
|
|
237
|
+
yield from self._responses
|
|
237
238
|
else:
|
|
238
239
|
# If not consumed, consume and yield
|
|
239
240
|
try:
|
|
@@ -841,7 +842,12 @@ class ChatAgent(BaseAgent):
|
|
|
841
842
|
current_tokens = token_counter.count_tokens_from_messages(
|
|
842
843
|
[message.to_openai_message(role)]
|
|
843
844
|
)
|
|
844
|
-
|
|
845
|
+
import warnings
|
|
846
|
+
|
|
847
|
+
with warnings.catch_warnings():
|
|
848
|
+
warnings.filterwarnings("ignore", category=EmptyMemoryWarning)
|
|
849
|
+
_, ctx_tokens = self.memory.get_context()
|
|
850
|
+
|
|
845
851
|
remaining_budget = max(0, token_limit - ctx_tokens)
|
|
846
852
|
|
|
847
853
|
if current_tokens <= remaining_budget:
|
|
@@ -1035,6 +1041,7 @@ class ChatAgent(BaseAgent):
|
|
|
1035
1041
|
None
|
|
1036
1042
|
"""
|
|
1037
1043
|
self.memory.clear()
|
|
1044
|
+
|
|
1038
1045
|
if self.system_message is not None:
|
|
1039
1046
|
self.update_memory(self.system_message, OpenAIBackendRole.SYSTEM)
|
|
1040
1047
|
|
|
@@ -2665,12 +2672,6 @@ class ChatAgent(BaseAgent):
|
|
|
2665
2672
|
stream_completed = False
|
|
2666
2673
|
|
|
2667
2674
|
for chunk in stream:
|
|
2668
|
-
# Update token usage if available
|
|
2669
|
-
if chunk.usage:
|
|
2670
|
-
self._update_token_usage_tracker(
|
|
2671
|
-
step_token_usage, safe_model_dump(chunk.usage)
|
|
2672
|
-
)
|
|
2673
|
-
|
|
2674
2675
|
# Process chunk delta
|
|
2675
2676
|
if chunk.choices and len(chunk.choices) > 0:
|
|
2676
2677
|
choice = chunk.choices[0]
|
|
@@ -2703,12 +2704,6 @@ class ChatAgent(BaseAgent):
|
|
|
2703
2704
|
# If we have complete tool calls, execute them with
|
|
2704
2705
|
# sync status updates
|
|
2705
2706
|
if accumulated_tool_calls:
|
|
2706
|
-
# Record assistant message with tool calls first
|
|
2707
|
-
self._record_assistant_tool_calls_message(
|
|
2708
|
-
accumulated_tool_calls,
|
|
2709
|
-
content_accumulator.get_full_content(),
|
|
2710
|
-
)
|
|
2711
|
-
|
|
2712
2707
|
# Execute tools synchronously with
|
|
2713
2708
|
# optimized status updates
|
|
2714
2709
|
for (
|
|
@@ -2741,7 +2736,49 @@ class ChatAgent(BaseAgent):
|
|
|
2741
2736
|
)
|
|
2742
2737
|
|
|
2743
2738
|
self.record_message(final_message)
|
|
2744
|
-
|
|
2739
|
+
elif chunk.usage and not chunk.choices:
|
|
2740
|
+
# Handle final chunk with usage but empty choices
|
|
2741
|
+
# This happens when stream_options={"include_usage": True}
|
|
2742
|
+
# Update the final usage from this chunk
|
|
2743
|
+
self._update_token_usage_tracker(
|
|
2744
|
+
step_token_usage, safe_model_dump(chunk.usage)
|
|
2745
|
+
)
|
|
2746
|
+
|
|
2747
|
+
# Create final response with final usage
|
|
2748
|
+
final_content = content_accumulator.get_full_content()
|
|
2749
|
+
if final_content.strip():
|
|
2750
|
+
final_message = BaseMessage(
|
|
2751
|
+
role_name=self.role_name,
|
|
2752
|
+
role_type=self.role_type,
|
|
2753
|
+
meta_dict={},
|
|
2754
|
+
content=final_content,
|
|
2755
|
+
)
|
|
2756
|
+
|
|
2757
|
+
if response_format:
|
|
2758
|
+
self._try_format_message(
|
|
2759
|
+
final_message, response_format
|
|
2760
|
+
)
|
|
2761
|
+
|
|
2762
|
+
# Create final response with final usage (not partial)
|
|
2763
|
+
final_response = ChatAgentResponse(
|
|
2764
|
+
msgs=[final_message],
|
|
2765
|
+
terminated=False,
|
|
2766
|
+
info={
|
|
2767
|
+
"id": getattr(chunk, 'id', ''),
|
|
2768
|
+
"usage": step_token_usage.copy(),
|
|
2769
|
+
"finish_reasons": ["stop"],
|
|
2770
|
+
"num_tokens": self._get_token_count(final_content),
|
|
2771
|
+
"tool_calls": tool_call_records or [],
|
|
2772
|
+
"external_tool_requests": None,
|
|
2773
|
+
"streaming": False,
|
|
2774
|
+
"partial": False,
|
|
2775
|
+
},
|
|
2776
|
+
)
|
|
2777
|
+
yield final_response
|
|
2778
|
+
break
|
|
2779
|
+
elif stream_completed:
|
|
2780
|
+
# If we've already seen finish_reason but no usage chunk, exit
|
|
2781
|
+
break
|
|
2745
2782
|
|
|
2746
2783
|
return stream_completed, tool_calls_complete
|
|
2747
2784
|
|
|
@@ -2906,10 +2943,19 @@ class ChatAgent(BaseAgent):
|
|
|
2906
2943
|
tool = self._internal_tools[function_name]
|
|
2907
2944
|
try:
|
|
2908
2945
|
result = tool(**args)
|
|
2946
|
+
# First, create and record the assistant message with tool
|
|
2947
|
+
# call
|
|
2948
|
+
assist_msg = FunctionCallingMessage(
|
|
2949
|
+
role_name=self.role_name,
|
|
2950
|
+
role_type=self.role_type,
|
|
2951
|
+
meta_dict=None,
|
|
2952
|
+
content="",
|
|
2953
|
+
func_name=function_name,
|
|
2954
|
+
args=args,
|
|
2955
|
+
tool_call_id=tool_call_id,
|
|
2956
|
+
)
|
|
2909
2957
|
|
|
2910
|
-
#
|
|
2911
|
-
# message assistant message with tool_calls was already
|
|
2912
|
-
# recorded in _record_assistant_tool_calls_message
|
|
2958
|
+
# Then create the tool response message
|
|
2913
2959
|
func_msg = FunctionCallingMessage(
|
|
2914
2960
|
role_name=self.role_name,
|
|
2915
2961
|
role_type=self.role_type,
|
|
@@ -2920,7 +2966,25 @@ class ChatAgent(BaseAgent):
|
|
|
2920
2966
|
tool_call_id=tool_call_id,
|
|
2921
2967
|
)
|
|
2922
2968
|
|
|
2923
|
-
|
|
2969
|
+
# Record both messages with precise timestamps to ensure
|
|
2970
|
+
# correct ordering
|
|
2971
|
+
import time
|
|
2972
|
+
|
|
2973
|
+
current_time_ns = time.time_ns()
|
|
2974
|
+
base_timestamp = (
|
|
2975
|
+
current_time_ns / 1_000_000_000
|
|
2976
|
+
) # Convert to seconds
|
|
2977
|
+
|
|
2978
|
+
self.update_memory(
|
|
2979
|
+
assist_msg,
|
|
2980
|
+
OpenAIBackendRole.ASSISTANT,
|
|
2981
|
+
timestamp=base_timestamp,
|
|
2982
|
+
)
|
|
2983
|
+
self.update_memory(
|
|
2984
|
+
func_msg,
|
|
2985
|
+
OpenAIBackendRole.FUNCTION,
|
|
2986
|
+
timestamp=base_timestamp + 1e-6,
|
|
2987
|
+
)
|
|
2924
2988
|
|
|
2925
2989
|
return ToolCallingRecord(
|
|
2926
2990
|
tool_name=function_name,
|
|
@@ -3004,10 +3068,19 @@ class ChatAgent(BaseAgent):
|
|
|
3004
3068
|
else:
|
|
3005
3069
|
# Fallback: synchronous call
|
|
3006
3070
|
result = tool(**args)
|
|
3071
|
+
# First, create and record the assistant message with tool
|
|
3072
|
+
# call
|
|
3073
|
+
assist_msg = FunctionCallingMessage(
|
|
3074
|
+
role_name=self.role_name,
|
|
3075
|
+
role_type=self.role_type,
|
|
3076
|
+
meta_dict=None,
|
|
3077
|
+
content="",
|
|
3078
|
+
func_name=function_name,
|
|
3079
|
+
args=args,
|
|
3080
|
+
tool_call_id=tool_call_id,
|
|
3081
|
+
)
|
|
3007
3082
|
|
|
3008
|
-
#
|
|
3009
|
-
# message assistant message with tool_calls was already
|
|
3010
|
-
# recorded in _record_assistant_tool_calls_message
|
|
3083
|
+
# Then create the tool response message
|
|
3011
3084
|
func_msg = FunctionCallingMessage(
|
|
3012
3085
|
role_name=self.role_name,
|
|
3013
3086
|
role_type=self.role_type,
|
|
@@ -3018,7 +3091,25 @@ class ChatAgent(BaseAgent):
|
|
|
3018
3091
|
tool_call_id=tool_call_id,
|
|
3019
3092
|
)
|
|
3020
3093
|
|
|
3021
|
-
|
|
3094
|
+
# Record both messages with precise timestamps to ensure
|
|
3095
|
+
# correct ordering
|
|
3096
|
+
import time
|
|
3097
|
+
|
|
3098
|
+
current_time_ns = time.time_ns()
|
|
3099
|
+
base_timestamp = (
|
|
3100
|
+
current_time_ns / 1_000_000_000
|
|
3101
|
+
) # Convert to seconds
|
|
3102
|
+
|
|
3103
|
+
self.update_memory(
|
|
3104
|
+
assist_msg,
|
|
3105
|
+
OpenAIBackendRole.ASSISTANT,
|
|
3106
|
+
timestamp=base_timestamp,
|
|
3107
|
+
)
|
|
3108
|
+
self.update_memory(
|
|
3109
|
+
func_msg,
|
|
3110
|
+
OpenAIBackendRole.FUNCTION,
|
|
3111
|
+
timestamp=base_timestamp + 1e-6,
|
|
3112
|
+
)
|
|
3022
3113
|
|
|
3023
3114
|
return ToolCallingRecord(
|
|
3024
3115
|
tool_name=function_name,
|
|
@@ -3369,18 +3460,13 @@ class ChatAgent(BaseAgent):
|
|
|
3369
3460
|
response_format: Optional[Type[BaseModel]] = None,
|
|
3370
3461
|
) -> AsyncGenerator[Union[ChatAgentResponse, Tuple[bool, bool]], None]:
|
|
3371
3462
|
r"""Async version of process streaming chunks with
|
|
3372
|
-
content accumulator.
|
|
3463
|
+
content accumulator.
|
|
3464
|
+
"""
|
|
3373
3465
|
|
|
3374
3466
|
tool_calls_complete = False
|
|
3375
3467
|
stream_completed = False
|
|
3376
3468
|
|
|
3377
3469
|
async for chunk in stream:
|
|
3378
|
-
# Update token usage if available
|
|
3379
|
-
if chunk.usage:
|
|
3380
|
-
self._update_token_usage_tracker(
|
|
3381
|
-
step_token_usage, safe_model_dump(chunk.usage)
|
|
3382
|
-
)
|
|
3383
|
-
|
|
3384
3470
|
# Process chunk delta
|
|
3385
3471
|
if chunk.choices and len(chunk.choices) > 0:
|
|
3386
3472
|
choice = chunk.choices[0]
|
|
@@ -3413,13 +3499,6 @@ class ChatAgent(BaseAgent):
|
|
|
3413
3499
|
# If we have complete tool calls, execute them with
|
|
3414
3500
|
# async status updates
|
|
3415
3501
|
if accumulated_tool_calls:
|
|
3416
|
-
# Record assistant message with
|
|
3417
|
-
# tool calls first
|
|
3418
|
-
self._record_assistant_tool_calls_message(
|
|
3419
|
-
accumulated_tool_calls,
|
|
3420
|
-
content_accumulator.get_full_content(),
|
|
3421
|
-
)
|
|
3422
|
-
|
|
3423
3502
|
# Execute tools asynchronously with real-time
|
|
3424
3503
|
# status updates
|
|
3425
3504
|
async for (
|
|
@@ -3454,7 +3533,49 @@ class ChatAgent(BaseAgent):
|
|
|
3454
3533
|
)
|
|
3455
3534
|
|
|
3456
3535
|
self.record_message(final_message)
|
|
3457
|
-
|
|
3536
|
+
elif chunk.usage and not chunk.choices:
|
|
3537
|
+
# Handle final chunk with usage but empty choices
|
|
3538
|
+
# This happens when stream_options={"include_usage": True}
|
|
3539
|
+
# Update the final usage from this chunk
|
|
3540
|
+
self._update_token_usage_tracker(
|
|
3541
|
+
step_token_usage, safe_model_dump(chunk.usage)
|
|
3542
|
+
)
|
|
3543
|
+
|
|
3544
|
+
# Create final response with final usage
|
|
3545
|
+
final_content = content_accumulator.get_full_content()
|
|
3546
|
+
if final_content.strip():
|
|
3547
|
+
final_message = BaseMessage(
|
|
3548
|
+
role_name=self.role_name,
|
|
3549
|
+
role_type=self.role_type,
|
|
3550
|
+
meta_dict={},
|
|
3551
|
+
content=final_content,
|
|
3552
|
+
)
|
|
3553
|
+
|
|
3554
|
+
if response_format:
|
|
3555
|
+
self._try_format_message(
|
|
3556
|
+
final_message, response_format
|
|
3557
|
+
)
|
|
3558
|
+
|
|
3559
|
+
# Create final response with final usage (not partial)
|
|
3560
|
+
final_response = ChatAgentResponse(
|
|
3561
|
+
msgs=[final_message],
|
|
3562
|
+
terminated=False,
|
|
3563
|
+
info={
|
|
3564
|
+
"id": getattr(chunk, 'id', ''),
|
|
3565
|
+
"usage": step_token_usage.copy(),
|
|
3566
|
+
"finish_reasons": ["stop"],
|
|
3567
|
+
"num_tokens": self._get_token_count(final_content),
|
|
3568
|
+
"tool_calls": tool_call_records or [],
|
|
3569
|
+
"external_tool_requests": None,
|
|
3570
|
+
"streaming": False,
|
|
3571
|
+
"partial": False,
|
|
3572
|
+
},
|
|
3573
|
+
)
|
|
3574
|
+
yield final_response
|
|
3575
|
+
break
|
|
3576
|
+
elif stream_completed:
|
|
3577
|
+
# If we've already seen finish_reason but no usage chunk, exit
|
|
3578
|
+
break
|
|
3458
3579
|
|
|
3459
3580
|
# Yield the final status as a tuple
|
|
3460
3581
|
yield (stream_completed, tool_calls_complete)
|
camel/configs/__init__.py
CHANGED
|
@@ -12,6 +12,7 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
from .aiml_config import AIML_API_PARAMS, AIMLConfig
|
|
15
|
+
from .amd_config import AMD_API_PARAMS, AMDConfig
|
|
15
16
|
from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
|
|
16
17
|
from .base_config import BaseConfig
|
|
17
18
|
from .bedrock_config import BEDROCK_API_PARAMS, BedrockConfig
|
|
@@ -111,6 +112,8 @@ __all__ = [
|
|
|
111
112
|
'SILICONFLOW_API_PARAMS',
|
|
112
113
|
'AIMLConfig',
|
|
113
114
|
'AIML_API_PARAMS',
|
|
115
|
+
'AMDConfig',
|
|
116
|
+
'AMD_API_PARAMS',
|
|
114
117
|
'OpenRouterConfig',
|
|
115
118
|
'OPENROUTER_API_PARAMS',
|
|
116
119
|
'LMSTUDIO_API_PARAMS',
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import List, Optional, Union
|
|
17
|
+
|
|
18
|
+
from pydantic import Field
|
|
19
|
+
|
|
20
|
+
from camel.configs.base_config import BaseConfig
|
|
21
|
+
from camel.types import NotGiven
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class AMDConfig(BaseConfig):
|
|
25
|
+
r"""Configuration class for AMD API models.
|
|
26
|
+
|
|
27
|
+
This class defines the configuration parameters for AMD's language
|
|
28
|
+
models, including temperature, sampling parameters, and response format
|
|
29
|
+
settings.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
stream (bool, optional): Whether to stream the response.
|
|
33
|
+
(default: :obj:`None`)
|
|
34
|
+
temperature (float, optional): Controls randomness in the response.
|
|
35
|
+
Higher values make output more random, lower values make it more
|
|
36
|
+
deterministic. Range: [0.0, 2.0]. (default: :obj:`None`)
|
|
37
|
+
top_p (float, optional): Controls diversity via nucleus sampling.
|
|
38
|
+
Range: [0.0, 1.0]. (default: :obj:`None`)
|
|
39
|
+
presence_penalty (float, optional): Penalizes new tokens based on
|
|
40
|
+
whether they appear in the text so far. Range: [-2.0, 2.0].
|
|
41
|
+
(default: :obj:`None`)
|
|
42
|
+
frequency_penalty (float, optional): Penalizes new tokens based on
|
|
43
|
+
their frequency in the text so far. Range: [-2.0, 2.0].
|
|
44
|
+
(default: :obj:`None`)
|
|
45
|
+
max_tokens (Union[int, NotGiven], optional): Maximum number of tokens
|
|
46
|
+
to generate. If not provided, model will use its default maximum.
|
|
47
|
+
(default: :obj:`None`)
|
|
48
|
+
seed (Optional[int], optional): Random seed for deterministic sampling.
|
|
49
|
+
(default: :obj:`None`)
|
|
50
|
+
tools (Optional[List[Dict]], optional): List of tools available to the
|
|
51
|
+
model. This includes tools such as a text editor, a calculator, or
|
|
52
|
+
a search engine. (default: :obj:`None`)
|
|
53
|
+
tool_choice (Optional[str], optional): Tool choice configuration.
|
|
54
|
+
(default: :obj:`None`)
|
|
55
|
+
stop (Optional[List[str]], optional): List of stop sequences.
|
|
56
|
+
(default: :obj:`None`)
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
stream: Optional[bool] = Field(default=None)
|
|
60
|
+
temperature: Optional[float] = Field(default=None)
|
|
61
|
+
top_p: Optional[float] = Field(default=None)
|
|
62
|
+
presence_penalty: Optional[float] = Field(default=None)
|
|
63
|
+
frequency_penalty: Optional[float] = Field(default=None)
|
|
64
|
+
max_tokens: Optional[Union[int, NotGiven]] = Field(default=None)
|
|
65
|
+
seed: Optional[int] = Field(default=None)
|
|
66
|
+
tool_choice: Optional[str] = Field(default=None)
|
|
67
|
+
stop: Optional[List[str]] = Field(default=None)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
AMD_API_PARAMS = {param for param in AMDConfig.model_fields.keys()}
|
camel/interpreters/__init__.py
CHANGED
|
@@ -18,6 +18,7 @@ from .e2b_interpreter import E2BInterpreter
|
|
|
18
18
|
from .internal_python_interpreter import InternalPythonInterpreter
|
|
19
19
|
from .interpreter_error import InterpreterError
|
|
20
20
|
from .ipython_interpreter import JupyterKernelInterpreter
|
|
21
|
+
from .microsandbox_interpreter import MicrosandboxInterpreter
|
|
21
22
|
from .subprocess_interpreter import SubprocessInterpreter
|
|
22
23
|
|
|
23
24
|
__all__ = [
|
|
@@ -28,4 +29,5 @@ __all__ = [
|
|
|
28
29
|
'DockerInterpreter',
|
|
29
30
|
'JupyterKernelInterpreter',
|
|
30
31
|
'E2BInterpreter',
|
|
32
|
+
'MicrosandboxInterpreter',
|
|
31
33
|
]
|