camel-ai 0.2.67__py3-none-any.whl → 0.2.80a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- camel/__init__.py +1 -1
- camel/agents/_types.py +6 -2
- camel/agents/_utils.py +38 -0
- camel/agents/chat_agent.py +4014 -410
- camel/agents/mcp_agent.py +30 -27
- camel/agents/repo_agent.py +2 -1
- camel/benchmarks/browsecomp.py +6 -6
- camel/configs/__init__.py +15 -0
- camel/configs/aihubmix_config.py +88 -0
- camel/configs/amd_config.py +70 -0
- camel/configs/cometapi_config.py +104 -0
- camel/configs/minimax_config.py +93 -0
- camel/configs/nebius_config.py +103 -0
- camel/configs/vllm_config.py +2 -0
- camel/data_collectors/alpaca_collector.py +15 -6
- camel/datagen/self_improving_cot.py +1 -1
- camel/datasets/base_generator.py +39 -10
- camel/environments/__init__.py +12 -0
- camel/environments/rlcards_env.py +860 -0
- camel/environments/single_step.py +28 -3
- camel/environments/tic_tac_toe.py +1 -1
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/docker/Dockerfile +4 -16
- camel/interpreters/docker_interpreter.py +3 -2
- camel/interpreters/e2b_interpreter.py +34 -1
- camel/interpreters/internal_python_interpreter.py +51 -2
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/loaders/__init__.py +11 -2
- camel/loaders/base_loader.py +85 -0
- camel/loaders/chunkr_reader.py +9 -0
- camel/loaders/firecrawl_reader.py +4 -4
- camel/logger.py +1 -1
- camel/memories/agent_memories.py +84 -1
- camel/memories/base.py +34 -0
- camel/memories/blocks/chat_history_block.py +122 -4
- camel/memories/blocks/vectordb_block.py +8 -1
- camel/memories/context_creators/score_based.py +29 -237
- camel/memories/records.py +88 -8
- camel/messages/base.py +166 -40
- camel/messages/func_message.py +32 -5
- camel/models/__init__.py +10 -0
- camel/models/aihubmix_model.py +83 -0
- camel/models/aiml_model.py +1 -16
- camel/models/amd_model.py +101 -0
- camel/models/anthropic_model.py +117 -18
- camel/models/aws_bedrock_model.py +2 -33
- camel/models/azure_openai_model.py +205 -91
- camel/models/base_audio_model.py +3 -1
- camel/models/base_model.py +189 -24
- camel/models/cohere_model.py +5 -17
- camel/models/cometapi_model.py +83 -0
- camel/models/crynux_model.py +1 -16
- camel/models/deepseek_model.py +6 -16
- camel/models/fish_audio_model.py +6 -0
- camel/models/gemini_model.py +71 -20
- camel/models/groq_model.py +1 -17
- camel/models/internlm_model.py +1 -16
- camel/models/litellm_model.py +49 -32
- camel/models/lmstudio_model.py +1 -17
- camel/models/minimax_model.py +83 -0
- camel/models/mistral_model.py +1 -16
- camel/models/model_factory.py +27 -1
- camel/models/model_manager.py +24 -6
- camel/models/modelscope_model.py +1 -16
- camel/models/moonshot_model.py +185 -19
- camel/models/nebius_model.py +83 -0
- camel/models/nemotron_model.py +0 -5
- camel/models/netmind_model.py +1 -16
- camel/models/novita_model.py +1 -16
- camel/models/nvidia_model.py +1 -16
- camel/models/ollama_model.py +4 -19
- camel/models/openai_compatible_model.py +171 -46
- camel/models/openai_model.py +205 -77
- camel/models/openrouter_model.py +1 -17
- camel/models/ppio_model.py +1 -16
- camel/models/qianfan_model.py +1 -16
- camel/models/qwen_model.py +1 -16
- camel/models/reka_model.py +1 -16
- camel/models/samba_model.py +34 -47
- camel/models/sglang_model.py +64 -31
- camel/models/siliconflow_model.py +1 -16
- camel/models/stub_model.py +0 -4
- camel/models/togetherai_model.py +1 -16
- camel/models/vllm_model.py +1 -16
- camel/models/volcano_model.py +0 -17
- camel/models/watsonx_model.py +1 -16
- camel/models/yi_model.py +1 -16
- camel/models/zhipuai_model.py +60 -16
- camel/parsers/__init__.py +18 -0
- camel/parsers/mcp_tool_call_parser.py +176 -0
- camel/retrievers/auto_retriever.py +1 -0
- camel/runtimes/configs.py +11 -11
- camel/runtimes/daytona_runtime.py +15 -16
- camel/runtimes/docker_runtime.py +6 -6
- camel/runtimes/remote_http_runtime.py +5 -5
- camel/services/agent_openapi_server.py +380 -0
- camel/societies/__init__.py +2 -0
- camel/societies/role_playing.py +26 -28
- camel/societies/workforce/__init__.py +2 -0
- camel/societies/workforce/events.py +122 -0
- camel/societies/workforce/prompts.py +249 -38
- camel/societies/workforce/role_playing_worker.py +82 -20
- camel/societies/workforce/single_agent_worker.py +634 -34
- camel/societies/workforce/structured_output_handler.py +512 -0
- camel/societies/workforce/task_channel.py +169 -23
- camel/societies/workforce/utils.py +176 -9
- camel/societies/workforce/worker.py +77 -23
- camel/societies/workforce/workflow_memory_manager.py +772 -0
- camel/societies/workforce/workforce.py +3168 -478
- camel/societies/workforce/workforce_callback.py +74 -0
- camel/societies/workforce/workforce_logger.py +203 -175
- camel/societies/workforce/workforce_metrics.py +33 -0
- camel/storages/__init__.py +4 -0
- camel/storages/key_value_storages/json.py +15 -2
- camel/storages/key_value_storages/mem0_cloud.py +48 -47
- camel/storages/object_storages/google_cloud.py +1 -1
- camel/storages/vectordb_storages/__init__.py +6 -0
- camel/storages/vectordb_storages/chroma.py +731 -0
- camel/storages/vectordb_storages/oceanbase.py +13 -13
- camel/storages/vectordb_storages/pgvector.py +349 -0
- camel/storages/vectordb_storages/qdrant.py +3 -3
- camel/storages/vectordb_storages/surreal.py +365 -0
- camel/storages/vectordb_storages/tidb.py +8 -6
- camel/tasks/task.py +244 -27
- camel/toolkits/__init__.py +46 -8
- camel/toolkits/aci_toolkit.py +64 -19
- camel/toolkits/arxiv_toolkit.py +6 -6
- camel/toolkits/base.py +63 -5
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/context_summarizer_toolkit.py +684 -0
- camel/toolkits/craw4ai_toolkit.py +93 -0
- camel/toolkits/dappier_toolkit.py +10 -6
- camel/toolkits/dingtalk.py +1135 -0
- camel/toolkits/edgeone_pages_mcp_toolkit.py +49 -0
- camel/toolkits/excel_toolkit.py +901 -67
- camel/toolkits/file_toolkit.py +1402 -0
- camel/toolkits/function_tool.py +30 -6
- camel/toolkits/github_toolkit.py +107 -20
- camel/toolkits/gmail_toolkit.py +1839 -0
- camel/toolkits/google_calendar_toolkit.py +38 -4
- camel/toolkits/google_drive_mcp_toolkit.py +54 -0
- camel/toolkits/human_toolkit.py +34 -10
- camel/toolkits/hybrid_browser_toolkit/__init__.py +18 -0
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +185 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +246 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +1973 -0
- camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +3749 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package.json +32 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-scripts.js +125 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +1815 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +233 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +590 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/index.ts +7 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +130 -0
- camel/toolkits/hybrid_browser_toolkit/ts/tsconfig.json +26 -0
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +319 -0
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +1032 -0
- camel/toolkits/hybrid_browser_toolkit_py/__init__.py +17 -0
- camel/toolkits/hybrid_browser_toolkit_py/actions.py +575 -0
- camel/toolkits/hybrid_browser_toolkit_py/agent.py +311 -0
- camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +787 -0
- camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +490 -0
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +2390 -0
- camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +233 -0
- camel/toolkits/hybrid_browser_toolkit_py/stealth_script.js +0 -0
- camel/toolkits/hybrid_browser_toolkit_py/unified_analyzer.js +1043 -0
- camel/toolkits/image_generation_toolkit.py +390 -0
- camel/toolkits/jina_reranker_toolkit.py +3 -4
- camel/toolkits/klavis_toolkit.py +5 -1
- camel/toolkits/markitdown_toolkit.py +104 -0
- camel/toolkits/math_toolkit.py +64 -10
- camel/toolkits/mcp_toolkit.py +370 -45
- camel/toolkits/memory_toolkit.py +5 -1
- camel/toolkits/message_agent_toolkit.py +608 -0
- camel/toolkits/message_integration.py +724 -0
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/note_taking_toolkit.py +277 -0
- camel/toolkits/notion_mcp_toolkit.py +224 -0
- camel/toolkits/openbb_toolkit.py +5 -1
- camel/toolkits/origene_mcp_toolkit.py +56 -0
- camel/toolkits/playwright_mcp_toolkit.py +12 -31
- camel/toolkits/pptx_toolkit.py +25 -12
- camel/toolkits/resend_toolkit.py +168 -0
- camel/toolkits/screenshot_toolkit.py +213 -0
- camel/toolkits/search_toolkit.py +437 -142
- camel/toolkits/slack_toolkit.py +104 -50
- camel/toolkits/sympy_toolkit.py +1 -1
- camel/toolkits/task_planning_toolkit.py +3 -3
- camel/toolkits/terminal_toolkit/__init__.py +18 -0
- camel/toolkits/terminal_toolkit/terminal_toolkit.py +957 -0
- camel/toolkits/terminal_toolkit/utils.py +532 -0
- camel/toolkits/thinking_toolkit.py +1 -1
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +106 -26
- camel/toolkits/video_download_toolkit.py +17 -14
- camel/toolkits/web_deploy_toolkit.py +1219 -0
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/toolkits/zapier_toolkit.py +5 -1
- camel/types/__init__.py +2 -2
- camel/types/agents/tool_calling_record.py +4 -1
- camel/types/enums.py +316 -40
- camel/types/openai_types.py +2 -2
- camel/types/unified_model_type.py +31 -4
- camel/utils/commons.py +36 -5
- camel/utils/constants.py +3 -0
- camel/utils/context_utils.py +1003 -0
- camel/utils/mcp.py +138 -4
- camel/utils/mcp_client.py +45 -1
- camel/utils/message_summarizer.py +148 -0
- camel/utils/token_counting.py +43 -20
- camel/utils/tool_result.py +44 -0
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/METADATA +296 -85
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/RECORD +219 -146
- camel/loaders/pandas_reader.py +0 -368
- camel/toolkits/dalle_toolkit.py +0 -175
- camel/toolkits/file_write_toolkit.py +0 -444
- camel/toolkits/openai_agent_toolkit.py +0 -135
- camel/toolkits/terminal_toolkit.py +0 -1037
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/licenses/LICENSE +0 -0
camel/messages/base.py
CHANGED
|
@@ -64,12 +64,13 @@ class BaseMessage:
|
|
|
64
64
|
content (str): The content of the message.
|
|
65
65
|
video_bytes (Optional[bytes]): Optional bytes of a video associated
|
|
66
66
|
with the message. (default: :obj:`None`)
|
|
67
|
-
image_list (Optional[List[Image.Image]]): Optional list of
|
|
68
|
-
objects associated with the
|
|
67
|
+
image_list (Optional[List[Union[Image.Image, str]]]): Optional list of
|
|
68
|
+
PIL Image objects or image URLs (strings) associated with the
|
|
69
|
+
message. (default: :obj:`None`)
|
|
69
70
|
image_detail (Literal["auto", "low", "high"]): Detail level of the
|
|
70
71
|
images associated with the message. (default: :obj:`auto`)
|
|
71
72
|
video_detail (Literal["auto", "low", "high"]): Detail level of the
|
|
72
|
-
videos associated with the message. (default: :obj:`
|
|
73
|
+
videos associated with the message. (default: :obj:`auto`)
|
|
73
74
|
parsed: Optional[Union[Type[BaseModel], dict]]: Optional object which
|
|
74
75
|
is parsed from the content. (default: :obj:`None`)
|
|
75
76
|
"""
|
|
@@ -80,9 +81,9 @@ class BaseMessage:
|
|
|
80
81
|
content: str
|
|
81
82
|
|
|
82
83
|
video_bytes: Optional[bytes] = None
|
|
83
|
-
image_list: Optional[List[Image.Image]] = None
|
|
84
|
+
image_list: Optional[List[Union[Image.Image, str]]] = None
|
|
84
85
|
image_detail: Literal["auto", "low", "high"] = "auto"
|
|
85
|
-
video_detail: Literal["auto", "low", "high"] = "
|
|
86
|
+
video_detail: Literal["auto", "low", "high"] = "auto"
|
|
86
87
|
parsed: Optional[Union[BaseModel, dict]] = None
|
|
87
88
|
|
|
88
89
|
@classmethod
|
|
@@ -92,7 +93,7 @@ class BaseMessage:
|
|
|
92
93
|
content: str,
|
|
93
94
|
meta_dict: Optional[Dict[str, str]] = None,
|
|
94
95
|
video_bytes: Optional[bytes] = None,
|
|
95
|
-
image_list: Optional[List[Image.Image]] = None,
|
|
96
|
+
image_list: Optional[List[Union[Image.Image, str]]] = None,
|
|
96
97
|
image_detail: Union[
|
|
97
98
|
OpenAIVisionDetailType, str
|
|
98
99
|
] = OpenAIVisionDetailType.AUTO,
|
|
@@ -109,8 +110,9 @@ class BaseMessage:
|
|
|
109
110
|
dictionary for the message.
|
|
110
111
|
video_bytes (Optional[bytes]): Optional bytes of a video
|
|
111
112
|
associated with the message.
|
|
112
|
-
image_list (Optional[List[Image.Image]]): Optional list
|
|
113
|
-
Image objects associated with
|
|
113
|
+
image_list (Optional[List[Union[Image.Image, str]]]): Optional list
|
|
114
|
+
of PIL Image objects or image URLs (strings) associated with
|
|
115
|
+
the message.
|
|
114
116
|
image_detail (Union[OpenAIVisionDetailType, str]): Detail level of
|
|
115
117
|
the images associated with the message.
|
|
116
118
|
video_detail (Union[OpenAIVisionDetailType, str]): Detail level of
|
|
@@ -137,7 +139,7 @@ class BaseMessage:
|
|
|
137
139
|
content: str,
|
|
138
140
|
meta_dict: Optional[Dict[str, str]] = None,
|
|
139
141
|
video_bytes: Optional[bytes] = None,
|
|
140
|
-
image_list: Optional[List[Image.Image]] = None,
|
|
142
|
+
image_list: Optional[List[Union[Image.Image, str]]] = None,
|
|
141
143
|
image_detail: Union[
|
|
142
144
|
OpenAIVisionDetailType, str
|
|
143
145
|
] = OpenAIVisionDetailType.AUTO,
|
|
@@ -154,8 +156,9 @@ class BaseMessage:
|
|
|
154
156
|
dictionary for the message.
|
|
155
157
|
video_bytes (Optional[bytes]): Optional bytes of a video
|
|
156
158
|
associated with the message.
|
|
157
|
-
image_list (Optional[List[Image.Image]]): Optional list
|
|
158
|
-
Image objects associated with
|
|
159
|
+
image_list (Optional[List[Union[Image.Image, str]]]): Optional list
|
|
160
|
+
of PIL Image objects or image URLs (strings) associated with
|
|
161
|
+
the message.
|
|
159
162
|
image_detail (Union[OpenAIVisionDetailType, str]): Detail level of
|
|
160
163
|
the images associated with the message.
|
|
161
164
|
video_detail (Union[OpenAIVisionDetailType, str]): Detail level of
|
|
@@ -175,6 +178,32 @@ class BaseMessage:
|
|
|
175
178
|
OpenAIVisionDetailType(video_detail).value,
|
|
176
179
|
)
|
|
177
180
|
|
|
181
|
+
@classmethod
|
|
182
|
+
def make_system_message(
|
|
183
|
+
cls,
|
|
184
|
+
content: str,
|
|
185
|
+
role_name: str = "System",
|
|
186
|
+
meta_dict: Optional[Dict[str, str]] = None,
|
|
187
|
+
) -> "BaseMessage":
|
|
188
|
+
r"""Create a new system message.
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
content (str): The content of the system message.
|
|
192
|
+
role_name (str): The name of the system role.
|
|
193
|
+
(default: :obj:`"System"`)
|
|
194
|
+
meta_dict (Optional[Dict[str, str]]): Additional metadata
|
|
195
|
+
dictionary for the message.
|
|
196
|
+
|
|
197
|
+
Returns:
|
|
198
|
+
BaseMessage: The new system message.
|
|
199
|
+
"""
|
|
200
|
+
return cls(
|
|
201
|
+
role_name,
|
|
202
|
+
RoleType.SYSTEM,
|
|
203
|
+
meta_dict,
|
|
204
|
+
content,
|
|
205
|
+
)
|
|
206
|
+
|
|
178
207
|
def create_new_instance(self, content: str) -> "BaseMessage":
|
|
179
208
|
r"""Create a new instance of the :obj:`BaseMessage` with updated
|
|
180
209
|
content.
|
|
@@ -436,35 +465,64 @@ class BaseMessage:
|
|
|
436
465
|
)
|
|
437
466
|
if self.image_list and len(self.image_list) > 0:
|
|
438
467
|
for image in self.image_list:
|
|
439
|
-
if image
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
468
|
+
# Check if image is a URL string or PIL Image
|
|
469
|
+
if isinstance(image, str):
|
|
470
|
+
# Image is a URL string
|
|
471
|
+
hybrid_content.append(
|
|
472
|
+
{
|
|
473
|
+
"type": "image_url",
|
|
474
|
+
"image_url": {
|
|
475
|
+
"url": image,
|
|
476
|
+
"detail": self.image_detail,
|
|
477
|
+
},
|
|
478
|
+
}
|
|
445
479
|
)
|
|
480
|
+
else:
|
|
481
|
+
# Image is a PIL Image object
|
|
482
|
+
if image.format is None:
|
|
483
|
+
# Set default format to PNG as fallback
|
|
484
|
+
image.format = 'PNG'
|
|
485
|
+
|
|
486
|
+
image_type: str = image.format.lower()
|
|
487
|
+
if image_type not in OpenAIImageType:
|
|
488
|
+
raise ValueError(
|
|
489
|
+
f"Image type {image.format} "
|
|
490
|
+
f"is not supported by OpenAI vision model"
|
|
491
|
+
)
|
|
492
|
+
|
|
493
|
+
# Convert RGBA to RGB for formats that don't support
|
|
494
|
+
# transparency or when the image has transparency channel
|
|
495
|
+
img_to_save = image
|
|
496
|
+
if image.mode in ('RGBA', 'LA', 'P') and image_type in (
|
|
497
|
+
'jpeg',
|
|
498
|
+
'jpg',
|
|
499
|
+
):
|
|
500
|
+
# JPEG doesn't support transparency, convert to RGB
|
|
501
|
+
img_to_save = image.convert('RGB')
|
|
502
|
+
elif (
|
|
503
|
+
image.mode in ('RGBA', 'LA', 'P')
|
|
504
|
+
and image_type == 'png'
|
|
505
|
+
):
|
|
506
|
+
# For PNG with transparency, convert to RGBA if needed
|
|
507
|
+
if image.mode in ('LA', 'P'):
|
|
508
|
+
img_to_save = image.convert('RGBA')
|
|
509
|
+
# else: RGBA mode, keep as-is
|
|
446
510
|
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
511
|
+
with io.BytesIO() as buffer:
|
|
512
|
+
img_to_save.save(fp=buffer, format=image.format)
|
|
513
|
+
encoded_image = base64.b64encode(
|
|
514
|
+
buffer.getvalue()
|
|
515
|
+
).decode("utf-8")
|
|
516
|
+
image_prefix = f"data:image/{image_type};base64,"
|
|
517
|
+
hybrid_content.append(
|
|
518
|
+
{
|
|
519
|
+
"type": "image_url",
|
|
520
|
+
"image_url": {
|
|
521
|
+
"url": f"{image_prefix}{encoded_image}",
|
|
522
|
+
"detail": self.image_detail,
|
|
523
|
+
},
|
|
524
|
+
}
|
|
457
525
|
)
|
|
458
|
-
image_prefix = f"data:image/{image_type};base64,"
|
|
459
|
-
hybrid_content.append(
|
|
460
|
-
{
|
|
461
|
-
"type": "image_url",
|
|
462
|
-
"image_url": {
|
|
463
|
-
"url": f"{image_prefix}{encoded_image}",
|
|
464
|
-
"detail": self.image_detail,
|
|
465
|
-
},
|
|
466
|
-
}
|
|
467
|
-
)
|
|
468
526
|
|
|
469
527
|
if self.video_bytes:
|
|
470
528
|
import imageio.v3 as iio
|
|
@@ -537,7 +595,18 @@ class BaseMessage:
|
|
|
537
595
|
OpenAIAssistantMessage: The converted :obj:`OpenAIAssistantMessage`
|
|
538
596
|
object.
|
|
539
597
|
"""
|
|
540
|
-
|
|
598
|
+
message_dict: Dict[str, Any] = {
|
|
599
|
+
"role": "assistant",
|
|
600
|
+
"content": self.content,
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
# Check if meta_dict contains tool_calls
|
|
604
|
+
if self.meta_dict and "tool_calls" in self.meta_dict:
|
|
605
|
+
tool_calls = self.meta_dict["tool_calls"]
|
|
606
|
+
if tool_calls:
|
|
607
|
+
message_dict["tool_calls"] = tool_calls
|
|
608
|
+
|
|
609
|
+
return message_dict # type: ignore[return-value]
|
|
541
610
|
|
|
542
611
|
def to_dict(self) -> Dict:
|
|
543
612
|
r"""Converts the message to a dictionary.
|
|
@@ -545,9 +614,66 @@ class BaseMessage:
|
|
|
545
614
|
Returns:
|
|
546
615
|
dict: The converted dictionary.
|
|
547
616
|
"""
|
|
548
|
-
|
|
617
|
+
result = {
|
|
549
618
|
"role_name": self.role_name,
|
|
550
|
-
"role_type": self.role_type.
|
|
619
|
+
"role_type": self.role_type.value,
|
|
551
620
|
**(self.meta_dict or {}),
|
|
552
621
|
"content": self.content,
|
|
553
622
|
}
|
|
623
|
+
|
|
624
|
+
# Include image/video fields if present
|
|
625
|
+
if self.image_list is not None:
|
|
626
|
+
# Handle both PIL Images and URL strings
|
|
627
|
+
import base64
|
|
628
|
+
from io import BytesIO
|
|
629
|
+
|
|
630
|
+
image_data_list = []
|
|
631
|
+
for img in self.image_list:
|
|
632
|
+
if isinstance(img, str):
|
|
633
|
+
# Image is a URL string, store as-is
|
|
634
|
+
image_data_list.append({"type": "url", "data": img})
|
|
635
|
+
else:
|
|
636
|
+
# Image is a PIL Image, convert to base64
|
|
637
|
+
# Preserve format, default to PNG if not set
|
|
638
|
+
img_format = img.format if img.format else "PNG"
|
|
639
|
+
|
|
640
|
+
# Handle transparency for different formats
|
|
641
|
+
img_to_save = img
|
|
642
|
+
if img.mode in (
|
|
643
|
+
'RGBA',
|
|
644
|
+
'LA',
|
|
645
|
+
'P',
|
|
646
|
+
) and img_format.upper() in ('JPEG', 'JPG'):
|
|
647
|
+
# JPEG doesn't support transparency, convert to RGB
|
|
648
|
+
img_to_save = img.convert('RGB')
|
|
649
|
+
elif (
|
|
650
|
+
img.mode in ('LA', 'P') and img_format.upper() == 'PNG'
|
|
651
|
+
):
|
|
652
|
+
# For PNG with transparency, convert to RGBA if needed
|
|
653
|
+
img_to_save = img.convert('RGBA')
|
|
654
|
+
# else: keep as-is for other combinations
|
|
655
|
+
|
|
656
|
+
buffered = BytesIO()
|
|
657
|
+
img_to_save.save(buffered, format=img_format)
|
|
658
|
+
img_str = base64.b64encode(buffered.getvalue()).decode()
|
|
659
|
+
image_data_list.append(
|
|
660
|
+
{
|
|
661
|
+
"type": "base64",
|
|
662
|
+
"data": img_str,
|
|
663
|
+
"format": img_format, # Preserve format
|
|
664
|
+
}
|
|
665
|
+
)
|
|
666
|
+
result["image_list"] = image_data_list
|
|
667
|
+
|
|
668
|
+
if self.video_bytes is not None:
|
|
669
|
+
import base64
|
|
670
|
+
|
|
671
|
+
result["video_bytes"] = base64.b64encode(self.video_bytes).decode()
|
|
672
|
+
|
|
673
|
+
if self.image_detail is not None:
|
|
674
|
+
result["image_detail"] = self.image_detail
|
|
675
|
+
|
|
676
|
+
if self.video_detail is not None:
|
|
677
|
+
result["video_detail"] = self.video_detail
|
|
678
|
+
|
|
679
|
+
return result
|
camel/messages/func_message.py
CHANGED
|
@@ -47,12 +47,16 @@ class FunctionCallingMessage(BaseMessage):
|
|
|
47
47
|
(default: :obj:`None`)
|
|
48
48
|
tool_call_id (Optional[str]): The ID of the tool call, if available.
|
|
49
49
|
(default: :obj:`None`)
|
|
50
|
+
mask_output (Optional[bool]): Whether to return a sanitized placeholder
|
|
51
|
+
instead of the raw tool output.
|
|
52
|
+
(default: :obj:`False`)
|
|
50
53
|
"""
|
|
51
54
|
|
|
52
55
|
func_name: Optional[str] = None
|
|
53
56
|
args: Optional[Dict] = None
|
|
54
57
|
result: Optional[Any] = None
|
|
55
58
|
tool_call_id: Optional[str] = None
|
|
59
|
+
mask_output: Optional[bool] = False
|
|
56
60
|
|
|
57
61
|
def to_openai_message(
|
|
58
62
|
self,
|
|
@@ -105,10 +109,13 @@ class FunctionCallingMessage(BaseMessage):
|
|
|
105
109
|
# This is a function response
|
|
106
110
|
# TODO: Allow for more flexible setting of tool role,
|
|
107
111
|
# optionally to be the same as assistant messages
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
+
if self.mask_output:
|
|
113
|
+
content = "[MASKED]"
|
|
114
|
+
else:
|
|
115
|
+
content = function_format.format_tool_response(
|
|
116
|
+
self.func_name, # type: ignore[arg-type]
|
|
117
|
+
self.result, # type: ignore[arg-type]
|
|
118
|
+
)
|
|
112
119
|
return ShareGPTMessage(from_="tool", value=content) # type: ignore[call-arg]
|
|
113
120
|
|
|
114
121
|
def to_openai_assistant_message(self) -> OpenAIAssistantMessage:
|
|
@@ -154,10 +161,30 @@ class FunctionCallingMessage(BaseMessage):
|
|
|
154
161
|
" due to missing function name."
|
|
155
162
|
)
|
|
156
163
|
|
|
157
|
-
|
|
164
|
+
if self.mask_output:
|
|
165
|
+
result_content = "[MASKED]"
|
|
166
|
+
else:
|
|
167
|
+
result_content = str(self.result)
|
|
158
168
|
|
|
159
169
|
return {
|
|
160
170
|
"role": "tool",
|
|
161
171
|
"content": result_content,
|
|
162
172
|
"tool_call_id": self.tool_call_id or "null",
|
|
163
173
|
}
|
|
174
|
+
|
|
175
|
+
def to_dict(self) -> Dict:
|
|
176
|
+
r"""Converts the message to a dictionary.
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
dict: The converted dictionary.
|
|
180
|
+
"""
|
|
181
|
+
base = super().to_dict()
|
|
182
|
+
base["func_name"] = self.func_name
|
|
183
|
+
if self.args is not None:
|
|
184
|
+
base["args"] = self.args
|
|
185
|
+
if self.result is not None:
|
|
186
|
+
base["result"] = self.result
|
|
187
|
+
if self.tool_call_id is not None:
|
|
188
|
+
base["tool_call_id"] = self.tool_call_id
|
|
189
|
+
base["mask_output"] = self.mask_output
|
|
190
|
+
return base
|
camel/models/__init__.py
CHANGED
|
@@ -11,13 +11,16 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from .aihubmix_model import AihubMixModel
|
|
14
15
|
from .aiml_model import AIMLModel
|
|
16
|
+
from .amd_model import AMDModel
|
|
15
17
|
from .anthropic_model import AnthropicModel
|
|
16
18
|
from .aws_bedrock_model import AWSBedrockModel
|
|
17
19
|
from .azure_openai_model import AzureOpenAIModel
|
|
18
20
|
from .base_audio_model import BaseAudioModel
|
|
19
21
|
from .base_model import BaseModelBackend
|
|
20
22
|
from .cohere_model import CohereModel
|
|
23
|
+
from .cometapi_model import CometAPIModel
|
|
21
24
|
from .crynux_model import CrynuxModel
|
|
22
25
|
from .deepseek_model import DeepSeekModel
|
|
23
26
|
from .fish_audio_model import FishAudioModel
|
|
@@ -26,11 +29,13 @@ from .groq_model import GroqModel
|
|
|
26
29
|
from .internlm_model import InternLMModel
|
|
27
30
|
from .litellm_model import LiteLLMModel
|
|
28
31
|
from .lmstudio_model import LMStudioModel
|
|
32
|
+
from .minimax_model import MinimaxModel
|
|
29
33
|
from .mistral_model import MistralModel
|
|
30
34
|
from .model_factory import ModelFactory
|
|
31
35
|
from .model_manager import ModelManager, ModelProcessingError
|
|
32
36
|
from .modelscope_model import ModelScopeModel
|
|
33
37
|
from .moonshot_model import MoonshotModel
|
|
38
|
+
from .nebius_model import NebiusModel
|
|
34
39
|
from .nemotron_model import NemotronModel
|
|
35
40
|
from .netmind_model import NetmindModel
|
|
36
41
|
from .novita_model import NovitaModel
|
|
@@ -61,11 +66,13 @@ __all__ = [
|
|
|
61
66
|
'OpenRouterModel',
|
|
62
67
|
'AzureOpenAIModel',
|
|
63
68
|
'AnthropicModel',
|
|
69
|
+
'AMDModel',
|
|
64
70
|
'MistralModel',
|
|
65
71
|
'GroqModel',
|
|
66
72
|
'StubModel',
|
|
67
73
|
'ZhipuAIModel',
|
|
68
74
|
'CohereModel',
|
|
75
|
+
'CometAPIModel',
|
|
69
76
|
'ModelFactory',
|
|
70
77
|
'ModelManager',
|
|
71
78
|
'LiteLLMModel',
|
|
@@ -87,6 +94,7 @@ __all__ = [
|
|
|
87
94
|
'QwenModel',
|
|
88
95
|
'AWSBedrockModel',
|
|
89
96
|
'ModelProcessingError',
|
|
97
|
+
'NebiusModel',
|
|
90
98
|
'DeepSeekModel',
|
|
91
99
|
'FishAudioModel',
|
|
92
100
|
'InternLMModel',
|
|
@@ -97,7 +105,9 @@ __all__ = [
|
|
|
97
105
|
'SiliconFlowModel',
|
|
98
106
|
'VolcanoModel',
|
|
99
107
|
'LMStudioModel',
|
|
108
|
+
'MinimaxModel',
|
|
100
109
|
'WatsonXModel',
|
|
101
110
|
'QianfanModel',
|
|
102
111
|
'CrynuxModel',
|
|
112
|
+
'AihubMixModel',
|
|
103
113
|
]
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import os
|
|
15
|
+
from typing import Any, Dict, Optional, Union
|
|
16
|
+
|
|
17
|
+
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
18
|
+
from camel.types import ModelType
|
|
19
|
+
from camel.utils import (
|
|
20
|
+
BaseTokenCounter,
|
|
21
|
+
api_keys_required,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class AihubMixModel(OpenAICompatibleModel):
|
|
26
|
+
r"""AihubMix API in a unified OpenAICompatibleModel interface.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
30
|
+
created.
|
|
31
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
32
|
+
that will be fed into OpenAI client. If :obj:`None`,
|
|
33
|
+
:obj:`{}` will be used.
|
|
34
|
+
(default: :obj:`None`)
|
|
35
|
+
api_key (Optional[str], optional): The API key for authenticating with
|
|
36
|
+
AihubMix service. (default: :obj:`None`)
|
|
37
|
+
url (Optional[str], optional): The URL to AihubMix service. If
|
|
38
|
+
not provided, :obj:`https://aihubmix.com/v1` will be used.
|
|
39
|
+
(default: :obj:`None`)
|
|
40
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
41
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
42
|
+
ModelType.GPT_4O_MINI)` will be used.
|
|
43
|
+
(default: :obj:`None`)
|
|
44
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
45
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
46
|
+
environment variable or default to 180 seconds.
|
|
47
|
+
(default: :obj:`None`)
|
|
48
|
+
max_retries (int, optional): Maximum number of retries for API calls.
|
|
49
|
+
(default: :obj:`3`)
|
|
50
|
+
**kwargs (Any): Additional arguments to pass to the client
|
|
51
|
+
initialization.
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
@api_keys_required([("api_key", "AIHUBMIX_API_KEY")])
|
|
55
|
+
def __init__(
|
|
56
|
+
self,
|
|
57
|
+
model_type: Union[ModelType, str],
|
|
58
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
59
|
+
api_key: Optional[str] = None,
|
|
60
|
+
url: Optional[str] = None,
|
|
61
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
62
|
+
timeout: Optional[float] = None,
|
|
63
|
+
max_retries: int = 3,
|
|
64
|
+
**kwargs: Any,
|
|
65
|
+
) -> None:
|
|
66
|
+
if model_config_dict is None:
|
|
67
|
+
model_config_dict = {}
|
|
68
|
+
api_key = api_key or os.environ.get("AIHUBMIX_API_KEY")
|
|
69
|
+
url = url or os.environ.get(
|
|
70
|
+
"AIHUBMIX_API_BASE_URL",
|
|
71
|
+
"https://aihubmix.com/v1",
|
|
72
|
+
)
|
|
73
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
74
|
+
super().__init__(
|
|
75
|
+
model_type=model_type,
|
|
76
|
+
model_config_dict=model_config_dict,
|
|
77
|
+
api_key=api_key,
|
|
78
|
+
url=url,
|
|
79
|
+
token_counter=token_counter,
|
|
80
|
+
timeout=timeout,
|
|
81
|
+
max_retries=max_retries,
|
|
82
|
+
**kwargs,
|
|
83
|
+
)
|
camel/models/aiml_model.py
CHANGED
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
import os
|
|
15
15
|
from typing import Any, Dict, Optional, Union
|
|
16
16
|
|
|
17
|
-
from camel.configs import
|
|
17
|
+
from camel.configs import AIMLConfig
|
|
18
18
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
19
19
|
from camel.types import ModelType
|
|
20
20
|
from camel.utils import (
|
|
@@ -82,18 +82,3 @@ class AIMLModel(OpenAICompatibleModel):
|
|
|
82
82
|
max_retries=max_retries,
|
|
83
83
|
**kwargs,
|
|
84
84
|
)
|
|
85
|
-
|
|
86
|
-
def check_model_config(self):
|
|
87
|
-
r"""Check whether the model configuration contains any
|
|
88
|
-
unexpected arguments to AIML API.
|
|
89
|
-
|
|
90
|
-
Raises:
|
|
91
|
-
ValueError: If the model configuration dictionary contains any
|
|
92
|
-
unexpected arguments to AIML API.
|
|
93
|
-
"""
|
|
94
|
-
for param in self.model_config_dict:
|
|
95
|
-
if param not in AIML_API_PARAMS:
|
|
96
|
-
raise ValueError(
|
|
97
|
-
f"Unexpected argument `{param}` is "
|
|
98
|
-
"input into AIML model backend."
|
|
99
|
-
)
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
from typing import Any, Dict, Optional, Union
|
|
17
|
+
|
|
18
|
+
from camel.configs import AMD_API_PARAMS, AMDConfig
|
|
19
|
+
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
20
|
+
from camel.types import ModelType
|
|
21
|
+
from camel.utils import BaseTokenCounter, api_keys_required
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class AMDModel(OpenAICompatibleModel):
|
|
25
|
+
r"""AMD API in a unified OpenAICompatibleModel interface.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
29
|
+
created, one of AMD series.
|
|
30
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
31
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`. If
|
|
32
|
+
:obj:`None`, :obj:`AMDConfig().as_dict()` will be used.
|
|
33
|
+
(default: :obj:`None`)
|
|
34
|
+
api_key (Optional[str], optional): The API key for authenticating with
|
|
35
|
+
the AMD service. (default: :obj:`None`)
|
|
36
|
+
url (Optional[str], optional): The url to the AMD service.
|
|
37
|
+
(default: :obj:`None`)
|
|
38
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
39
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
40
|
+
ModelType.GPT_4)` will be used.
|
|
41
|
+
(default: :obj:`None`)
|
|
42
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
43
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
44
|
+
environment variable or default to 180 seconds.
|
|
45
|
+
(default: :obj:`None`)
|
|
46
|
+
max_retries (int, optional): Maximum number of retries for API calls.
|
|
47
|
+
(default: :obj:`3`)
|
|
48
|
+
**kwargs (Any): Additional arguments to pass to the client
|
|
49
|
+
initialization.
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
@api_keys_required(
|
|
53
|
+
[
|
|
54
|
+
("api_key", "AMD_API_KEY"),
|
|
55
|
+
]
|
|
56
|
+
)
|
|
57
|
+
def __init__(
|
|
58
|
+
self,
|
|
59
|
+
model_type: Union[ModelType, str],
|
|
60
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
61
|
+
api_key: Optional[str] = None,
|
|
62
|
+
url: Optional[str] = None,
|
|
63
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
64
|
+
timeout: Optional[float] = None,
|
|
65
|
+
max_retries: int = 3,
|
|
66
|
+
**kwargs: Any,
|
|
67
|
+
) -> None:
|
|
68
|
+
if model_config_dict is None:
|
|
69
|
+
model_config_dict = AMDConfig().as_dict()
|
|
70
|
+
api_key = api_key or os.environ.get("AMD_API_KEY")
|
|
71
|
+
url = url or os.environ.get(
|
|
72
|
+
"AMD_API_BASE_URL", "https://llm-api.amd.com"
|
|
73
|
+
)
|
|
74
|
+
headers = {'Ocp-Apim-Subscription-Key': api_key}
|
|
75
|
+
kwargs["default_headers"] = headers
|
|
76
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
77
|
+
super().__init__(
|
|
78
|
+
model_type=model_type,
|
|
79
|
+
model_config_dict=model_config_dict,
|
|
80
|
+
api_key=api_key,
|
|
81
|
+
url=url,
|
|
82
|
+
token_counter=token_counter,
|
|
83
|
+
timeout=timeout,
|
|
84
|
+
max_retries=max_retries,
|
|
85
|
+
**kwargs,
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
def check_model_config(self):
|
|
89
|
+
r"""Check whether the model configuration contains any
|
|
90
|
+
unexpected arguments to AMD API.
|
|
91
|
+
|
|
92
|
+
Raises:
|
|
93
|
+
ValueError: If the model configuration dictionary contains any
|
|
94
|
+
unexpected arguments to AMD API.
|
|
95
|
+
"""
|
|
96
|
+
for param in self.model_config_dict:
|
|
97
|
+
if param not in AMD_API_PARAMS:
|
|
98
|
+
raise ValueError(
|
|
99
|
+
f"Unexpected argument `{param}` is "
|
|
100
|
+
"input into AMD model backend."
|
|
101
|
+
)
|