camel-ai 0.2.67__py3-none-any.whl → 0.2.80a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- camel/__init__.py +1 -1
- camel/agents/_types.py +6 -2
- camel/agents/_utils.py +38 -0
- camel/agents/chat_agent.py +4014 -410
- camel/agents/mcp_agent.py +30 -27
- camel/agents/repo_agent.py +2 -1
- camel/benchmarks/browsecomp.py +6 -6
- camel/configs/__init__.py +15 -0
- camel/configs/aihubmix_config.py +88 -0
- camel/configs/amd_config.py +70 -0
- camel/configs/cometapi_config.py +104 -0
- camel/configs/minimax_config.py +93 -0
- camel/configs/nebius_config.py +103 -0
- camel/configs/vllm_config.py +2 -0
- camel/data_collectors/alpaca_collector.py +15 -6
- camel/datagen/self_improving_cot.py +1 -1
- camel/datasets/base_generator.py +39 -10
- camel/environments/__init__.py +12 -0
- camel/environments/rlcards_env.py +860 -0
- camel/environments/single_step.py +28 -3
- camel/environments/tic_tac_toe.py +1 -1
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/docker/Dockerfile +4 -16
- camel/interpreters/docker_interpreter.py +3 -2
- camel/interpreters/e2b_interpreter.py +34 -1
- camel/interpreters/internal_python_interpreter.py +51 -2
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/loaders/__init__.py +11 -2
- camel/loaders/base_loader.py +85 -0
- camel/loaders/chunkr_reader.py +9 -0
- camel/loaders/firecrawl_reader.py +4 -4
- camel/logger.py +1 -1
- camel/memories/agent_memories.py +84 -1
- camel/memories/base.py +34 -0
- camel/memories/blocks/chat_history_block.py +122 -4
- camel/memories/blocks/vectordb_block.py +8 -1
- camel/memories/context_creators/score_based.py +29 -237
- camel/memories/records.py +88 -8
- camel/messages/base.py +166 -40
- camel/messages/func_message.py +32 -5
- camel/models/__init__.py +10 -0
- camel/models/aihubmix_model.py +83 -0
- camel/models/aiml_model.py +1 -16
- camel/models/amd_model.py +101 -0
- camel/models/anthropic_model.py +117 -18
- camel/models/aws_bedrock_model.py +2 -33
- camel/models/azure_openai_model.py +205 -91
- camel/models/base_audio_model.py +3 -1
- camel/models/base_model.py +189 -24
- camel/models/cohere_model.py +5 -17
- camel/models/cometapi_model.py +83 -0
- camel/models/crynux_model.py +1 -16
- camel/models/deepseek_model.py +6 -16
- camel/models/fish_audio_model.py +6 -0
- camel/models/gemini_model.py +71 -20
- camel/models/groq_model.py +1 -17
- camel/models/internlm_model.py +1 -16
- camel/models/litellm_model.py +49 -32
- camel/models/lmstudio_model.py +1 -17
- camel/models/minimax_model.py +83 -0
- camel/models/mistral_model.py +1 -16
- camel/models/model_factory.py +27 -1
- camel/models/model_manager.py +24 -6
- camel/models/modelscope_model.py +1 -16
- camel/models/moonshot_model.py +185 -19
- camel/models/nebius_model.py +83 -0
- camel/models/nemotron_model.py +0 -5
- camel/models/netmind_model.py +1 -16
- camel/models/novita_model.py +1 -16
- camel/models/nvidia_model.py +1 -16
- camel/models/ollama_model.py +4 -19
- camel/models/openai_compatible_model.py +171 -46
- camel/models/openai_model.py +205 -77
- camel/models/openrouter_model.py +1 -17
- camel/models/ppio_model.py +1 -16
- camel/models/qianfan_model.py +1 -16
- camel/models/qwen_model.py +1 -16
- camel/models/reka_model.py +1 -16
- camel/models/samba_model.py +34 -47
- camel/models/sglang_model.py +64 -31
- camel/models/siliconflow_model.py +1 -16
- camel/models/stub_model.py +0 -4
- camel/models/togetherai_model.py +1 -16
- camel/models/vllm_model.py +1 -16
- camel/models/volcano_model.py +0 -17
- camel/models/watsonx_model.py +1 -16
- camel/models/yi_model.py +1 -16
- camel/models/zhipuai_model.py +60 -16
- camel/parsers/__init__.py +18 -0
- camel/parsers/mcp_tool_call_parser.py +176 -0
- camel/retrievers/auto_retriever.py +1 -0
- camel/runtimes/configs.py +11 -11
- camel/runtimes/daytona_runtime.py +15 -16
- camel/runtimes/docker_runtime.py +6 -6
- camel/runtimes/remote_http_runtime.py +5 -5
- camel/services/agent_openapi_server.py +380 -0
- camel/societies/__init__.py +2 -0
- camel/societies/role_playing.py +26 -28
- camel/societies/workforce/__init__.py +2 -0
- camel/societies/workforce/events.py +122 -0
- camel/societies/workforce/prompts.py +249 -38
- camel/societies/workforce/role_playing_worker.py +82 -20
- camel/societies/workforce/single_agent_worker.py +634 -34
- camel/societies/workforce/structured_output_handler.py +512 -0
- camel/societies/workforce/task_channel.py +169 -23
- camel/societies/workforce/utils.py +176 -9
- camel/societies/workforce/worker.py +77 -23
- camel/societies/workforce/workflow_memory_manager.py +772 -0
- camel/societies/workforce/workforce.py +3168 -478
- camel/societies/workforce/workforce_callback.py +74 -0
- camel/societies/workforce/workforce_logger.py +203 -175
- camel/societies/workforce/workforce_metrics.py +33 -0
- camel/storages/__init__.py +4 -0
- camel/storages/key_value_storages/json.py +15 -2
- camel/storages/key_value_storages/mem0_cloud.py +48 -47
- camel/storages/object_storages/google_cloud.py +1 -1
- camel/storages/vectordb_storages/__init__.py +6 -0
- camel/storages/vectordb_storages/chroma.py +731 -0
- camel/storages/vectordb_storages/oceanbase.py +13 -13
- camel/storages/vectordb_storages/pgvector.py +349 -0
- camel/storages/vectordb_storages/qdrant.py +3 -3
- camel/storages/vectordb_storages/surreal.py +365 -0
- camel/storages/vectordb_storages/tidb.py +8 -6
- camel/tasks/task.py +244 -27
- camel/toolkits/__init__.py +46 -8
- camel/toolkits/aci_toolkit.py +64 -19
- camel/toolkits/arxiv_toolkit.py +6 -6
- camel/toolkits/base.py +63 -5
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/context_summarizer_toolkit.py +684 -0
- camel/toolkits/craw4ai_toolkit.py +93 -0
- camel/toolkits/dappier_toolkit.py +10 -6
- camel/toolkits/dingtalk.py +1135 -0
- camel/toolkits/edgeone_pages_mcp_toolkit.py +49 -0
- camel/toolkits/excel_toolkit.py +901 -67
- camel/toolkits/file_toolkit.py +1402 -0
- camel/toolkits/function_tool.py +30 -6
- camel/toolkits/github_toolkit.py +107 -20
- camel/toolkits/gmail_toolkit.py +1839 -0
- camel/toolkits/google_calendar_toolkit.py +38 -4
- camel/toolkits/google_drive_mcp_toolkit.py +54 -0
- camel/toolkits/human_toolkit.py +34 -10
- camel/toolkits/hybrid_browser_toolkit/__init__.py +18 -0
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +185 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +246 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +1973 -0
- camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +3749 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package.json +32 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-scripts.js +125 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +1815 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +233 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +590 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/index.ts +7 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +130 -0
- camel/toolkits/hybrid_browser_toolkit/ts/tsconfig.json +26 -0
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +319 -0
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +1032 -0
- camel/toolkits/hybrid_browser_toolkit_py/__init__.py +17 -0
- camel/toolkits/hybrid_browser_toolkit_py/actions.py +575 -0
- camel/toolkits/hybrid_browser_toolkit_py/agent.py +311 -0
- camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +787 -0
- camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +490 -0
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +2390 -0
- camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +233 -0
- camel/toolkits/hybrid_browser_toolkit_py/stealth_script.js +0 -0
- camel/toolkits/hybrid_browser_toolkit_py/unified_analyzer.js +1043 -0
- camel/toolkits/image_generation_toolkit.py +390 -0
- camel/toolkits/jina_reranker_toolkit.py +3 -4
- camel/toolkits/klavis_toolkit.py +5 -1
- camel/toolkits/markitdown_toolkit.py +104 -0
- camel/toolkits/math_toolkit.py +64 -10
- camel/toolkits/mcp_toolkit.py +370 -45
- camel/toolkits/memory_toolkit.py +5 -1
- camel/toolkits/message_agent_toolkit.py +608 -0
- camel/toolkits/message_integration.py +724 -0
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/note_taking_toolkit.py +277 -0
- camel/toolkits/notion_mcp_toolkit.py +224 -0
- camel/toolkits/openbb_toolkit.py +5 -1
- camel/toolkits/origene_mcp_toolkit.py +56 -0
- camel/toolkits/playwright_mcp_toolkit.py +12 -31
- camel/toolkits/pptx_toolkit.py +25 -12
- camel/toolkits/resend_toolkit.py +168 -0
- camel/toolkits/screenshot_toolkit.py +213 -0
- camel/toolkits/search_toolkit.py +437 -142
- camel/toolkits/slack_toolkit.py +104 -50
- camel/toolkits/sympy_toolkit.py +1 -1
- camel/toolkits/task_planning_toolkit.py +3 -3
- camel/toolkits/terminal_toolkit/__init__.py +18 -0
- camel/toolkits/terminal_toolkit/terminal_toolkit.py +957 -0
- camel/toolkits/terminal_toolkit/utils.py +532 -0
- camel/toolkits/thinking_toolkit.py +1 -1
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +106 -26
- camel/toolkits/video_download_toolkit.py +17 -14
- camel/toolkits/web_deploy_toolkit.py +1219 -0
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/toolkits/zapier_toolkit.py +5 -1
- camel/types/__init__.py +2 -2
- camel/types/agents/tool_calling_record.py +4 -1
- camel/types/enums.py +316 -40
- camel/types/openai_types.py +2 -2
- camel/types/unified_model_type.py +31 -4
- camel/utils/commons.py +36 -5
- camel/utils/constants.py +3 -0
- camel/utils/context_utils.py +1003 -0
- camel/utils/mcp.py +138 -4
- camel/utils/mcp_client.py +45 -1
- camel/utils/message_summarizer.py +148 -0
- camel/utils/token_counting.py +43 -20
- camel/utils/tool_result.py +44 -0
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/METADATA +296 -85
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/RECORD +219 -146
- camel/loaders/pandas_reader.py +0 -368
- camel/toolkits/dalle_toolkit.py +0 -175
- camel/toolkits/file_write_toolkit.py +0 -444
- camel/toolkits/openai_agent_toolkit.py +0 -135
- camel/toolkits/terminal_toolkit.py +0 -1037
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/licenses/LICENSE +0 -0
camel/models/groq_model.py
CHANGED
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
import os
|
|
15
15
|
from typing import Any, Dict, Optional, Union
|
|
16
16
|
|
|
17
|
-
from camel.configs import
|
|
17
|
+
from camel.configs import GroqConfig
|
|
18
18
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
19
19
|
from camel.types import ModelType
|
|
20
20
|
from camel.utils import (
|
|
@@ -80,19 +80,3 @@ class GroqModel(OpenAICompatibleModel):
|
|
|
80
80
|
max_retries=max_retries,
|
|
81
81
|
**kwargs,
|
|
82
82
|
)
|
|
83
|
-
|
|
84
|
-
def check_model_config(self):
|
|
85
|
-
r"""Check whether the model configuration contains any unexpected
|
|
86
|
-
arguments to Groq API. But Groq API does not have any additional
|
|
87
|
-
arguments to check.
|
|
88
|
-
|
|
89
|
-
Raises:
|
|
90
|
-
ValueError: If the model configuration dictionary contains any
|
|
91
|
-
unexpected arguments to Groq API.
|
|
92
|
-
"""
|
|
93
|
-
for param in self.model_config_dict:
|
|
94
|
-
if param not in GROQ_API_PARAMS:
|
|
95
|
-
raise ValueError(
|
|
96
|
-
f"Unexpected argument `{param}` is "
|
|
97
|
-
"input into Groq model backend."
|
|
98
|
-
)
|
camel/models/internlm_model.py
CHANGED
|
@@ -18,7 +18,7 @@ from typing import Any, Dict, List, Optional, Type, Union
|
|
|
18
18
|
from openai import AsyncStream
|
|
19
19
|
from pydantic import BaseModel
|
|
20
20
|
|
|
21
|
-
from camel.configs import
|
|
21
|
+
from camel.configs import InternLMConfig
|
|
22
22
|
from camel.messages import OpenAIMessage
|
|
23
23
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
24
24
|
from camel.types import (
|
|
@@ -101,18 +101,3 @@ class InternLMModel(OpenAICompatibleModel):
|
|
|
101
101
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
102
102
|
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
103
103
|
raise NotImplementedError("InternLM does not support async inference.")
|
|
104
|
-
|
|
105
|
-
def check_model_config(self):
|
|
106
|
-
r"""Check whether the model configuration contains any
|
|
107
|
-
unexpected arguments to InternLM API.
|
|
108
|
-
|
|
109
|
-
Raises:
|
|
110
|
-
ValueError: If the model configuration dictionary contains any
|
|
111
|
-
unexpected arguments to InternLM API.
|
|
112
|
-
"""
|
|
113
|
-
for param in self.model_config_dict:
|
|
114
|
-
if param not in INTERNLM_API_PARAMS:
|
|
115
|
-
raise ValueError(
|
|
116
|
-
f"Unexpected argument `{param}` is "
|
|
117
|
-
"input into InternLM model backend."
|
|
118
|
-
)
|
camel/models/litellm_model.py
CHANGED
|
@@ -12,11 +12,12 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
|
+
import uuid
|
|
15
16
|
from typing import Any, Dict, List, Optional, Type, Union
|
|
16
17
|
|
|
17
18
|
from pydantic import BaseModel
|
|
18
19
|
|
|
19
|
-
from camel.configs import
|
|
20
|
+
from camel.configs import LiteLLMConfig
|
|
20
21
|
from camel.messages import OpenAIMessage
|
|
21
22
|
from camel.models import BaseModelBackend
|
|
22
23
|
from camel.types import ChatCompletion, ModelType
|
|
@@ -98,23 +99,47 @@ class LiteLLMModel(BaseModelBackend):
|
|
|
98
99
|
Returns:
|
|
99
100
|
ChatCompletion: The response object in OpenAI's format.
|
|
100
101
|
"""
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
102
|
+
|
|
103
|
+
converted_choices = []
|
|
104
|
+
for choice in response.choices:
|
|
105
|
+
# Build the assistant message dict
|
|
106
|
+
msg_dict: Dict[str, Any] = {
|
|
107
|
+
"role": choice.message.role,
|
|
108
|
+
"content": choice.message.content,
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
if getattr(choice.message, "tool_calls", None):
|
|
112
|
+
msg_dict["tool_calls"] = choice.message.tool_calls
|
|
113
|
+
|
|
114
|
+
elif getattr(choice.message, "function_call", None):
|
|
115
|
+
func_call = choice.message.function_call
|
|
116
|
+
msg_dict["tool_calls"] = [
|
|
117
|
+
{
|
|
118
|
+
"id": f"call_{uuid.uuid4().hex[:24]}",
|
|
119
|
+
"type": "function",
|
|
120
|
+
"function": {
|
|
121
|
+
"name": getattr(func_call, "name", None),
|
|
122
|
+
"arguments": getattr(func_call, "arguments", "{}"),
|
|
123
|
+
},
|
|
124
|
+
}
|
|
125
|
+
]
|
|
126
|
+
|
|
127
|
+
converted_choices.append(
|
|
104
128
|
{
|
|
105
|
-
"index":
|
|
106
|
-
"message":
|
|
107
|
-
|
|
108
|
-
"content": response.choices[0].message.content,
|
|
109
|
-
},
|
|
110
|
-
"finish_reason": response.choices[0].finish_reason,
|
|
129
|
+
"index": choice.index,
|
|
130
|
+
"message": msg_dict,
|
|
131
|
+
"finish_reason": choice.finish_reason,
|
|
111
132
|
}
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
return ChatCompletion.construct(
|
|
136
|
+
id=response.id,
|
|
137
|
+
choices=converted_choices,
|
|
138
|
+
created=getattr(response, "created", None),
|
|
139
|
+
model=getattr(response, "model", None),
|
|
140
|
+
object=getattr(response, "object", None),
|
|
141
|
+
system_fingerprint=getattr(response, "system_fingerprint", None),
|
|
142
|
+
usage=getattr(response, "usage", None),
|
|
118
143
|
)
|
|
119
144
|
|
|
120
145
|
@property
|
|
@@ -148,6 +173,13 @@ class LiteLLMModel(BaseModelBackend):
|
|
|
148
173
|
Returns:
|
|
149
174
|
ChatCompletion
|
|
150
175
|
"""
|
|
176
|
+
|
|
177
|
+
request_config = self.model_config_dict.copy()
|
|
178
|
+
if tools:
|
|
179
|
+
request_config['tools'] = tools
|
|
180
|
+
if response_format:
|
|
181
|
+
request_config['response_format'] = response_format
|
|
182
|
+
|
|
151
183
|
update_current_observation(
|
|
152
184
|
input={
|
|
153
185
|
"messages": messages,
|
|
@@ -176,7 +208,7 @@ class LiteLLMModel(BaseModelBackend):
|
|
|
176
208
|
base_url=self._url,
|
|
177
209
|
model=self.model_type,
|
|
178
210
|
messages=messages,
|
|
179
|
-
**
|
|
211
|
+
**request_config,
|
|
180
212
|
**self.kwargs,
|
|
181
213
|
)
|
|
182
214
|
response = self._convert_response_from_litellm_to_openai(response)
|
|
@@ -185,18 +217,3 @@ class LiteLLMModel(BaseModelBackend):
|
|
|
185
217
|
usage=response.usage,
|
|
186
218
|
)
|
|
187
219
|
return response
|
|
188
|
-
|
|
189
|
-
def check_model_config(self):
|
|
190
|
-
r"""Check whether the model configuration contains any unexpected
|
|
191
|
-
arguments to LiteLLM API.
|
|
192
|
-
|
|
193
|
-
Raises:
|
|
194
|
-
ValueError: If the model configuration dictionary contains any
|
|
195
|
-
unexpected arguments.
|
|
196
|
-
"""
|
|
197
|
-
for param in self.model_config_dict:
|
|
198
|
-
if param not in LITELLM_API_PARAMS:
|
|
199
|
-
raise ValueError(
|
|
200
|
-
f"Unexpected argument `{param}` is "
|
|
201
|
-
"input into LiteLLM model backend."
|
|
202
|
-
)
|
camel/models/lmstudio_model.py
CHANGED
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
import os
|
|
15
15
|
from typing import Any, Dict, Optional, Union
|
|
16
16
|
|
|
17
|
-
from camel.configs import
|
|
17
|
+
from camel.configs import LMStudioConfig
|
|
18
18
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
19
19
|
from camel.types import ModelType
|
|
20
20
|
from camel.utils import BaseTokenCounter
|
|
@@ -77,19 +77,3 @@ class LMStudioModel(OpenAICompatibleModel):
|
|
|
77
77
|
max_retries=max_retries,
|
|
78
78
|
**kwargs,
|
|
79
79
|
)
|
|
80
|
-
|
|
81
|
-
def check_model_config(self):
|
|
82
|
-
r"""Check whether the model configuration contains any unexpected
|
|
83
|
-
arguments to LMStudio API. But LMStudio API does not have any
|
|
84
|
-
additional arguments to check.
|
|
85
|
-
|
|
86
|
-
Raises:
|
|
87
|
-
ValueError: If the model configuration dictionary contains any
|
|
88
|
-
unexpected arguments to LMStudio API.
|
|
89
|
-
"""
|
|
90
|
-
for param in self.model_config_dict:
|
|
91
|
-
if param not in LMSTUDIO_API_PARAMS:
|
|
92
|
-
raise ValueError(
|
|
93
|
-
f"Unexpected argument `{param}` is "
|
|
94
|
-
"input into LMStudio model backend."
|
|
95
|
-
)
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import os
|
|
15
|
+
from typing import Any, Dict, Optional, Union
|
|
16
|
+
|
|
17
|
+
from camel.configs import MinimaxConfig
|
|
18
|
+
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
19
|
+
from camel.types import ModelType
|
|
20
|
+
from camel.utils import (
|
|
21
|
+
BaseTokenCounter,
|
|
22
|
+
api_keys_required,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class MinimaxModel(OpenAICompatibleModel):
|
|
27
|
+
r"""LLM API served by Minimax in a unified OpenAICompatibleModel
|
|
28
|
+
interface.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
32
|
+
created.
|
|
33
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
34
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`.
|
|
35
|
+
If:obj:`None`, :obj:`MinimaxConfig().as_dict()` will be used.
|
|
36
|
+
(default: :obj:`None`)
|
|
37
|
+
api_key (Optional[str], optional): The API key for authenticating
|
|
38
|
+
with the Minimax service. (default: :obj:`None`).
|
|
39
|
+
url (Optional[str], optional): The url to the Minimax M2 service.
|
|
40
|
+
(default: :obj:`None`)
|
|
41
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
42
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
43
|
+
ModelType.GPT_4O_MINI)` will be used.
|
|
44
|
+
(default: :obj:`None`)
|
|
45
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
46
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
47
|
+
environment variable or default to 180 seconds.
|
|
48
|
+
(default: :obj:`None`)
|
|
49
|
+
max_retries (int, optional): Maximum number of retries for API calls.
|
|
50
|
+
(default: :obj:`3`)
|
|
51
|
+
**kwargs (Any): Additional arguments to pass to the client
|
|
52
|
+
initialization.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
@api_keys_required([("api_key", "MINIMAX_API_KEY")])
|
|
56
|
+
def __init__(
|
|
57
|
+
self,
|
|
58
|
+
model_type: Union[ModelType, str],
|
|
59
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
60
|
+
api_key: Optional[str] = None,
|
|
61
|
+
url: Optional[str] = None,
|
|
62
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
63
|
+
timeout: Optional[float] = None,
|
|
64
|
+
max_retries: int = 3,
|
|
65
|
+
**kwargs: Any,
|
|
66
|
+
) -> None:
|
|
67
|
+
if model_config_dict is None:
|
|
68
|
+
model_config_dict = MinimaxConfig().as_dict()
|
|
69
|
+
api_key = api_key or os.environ.get("MINIMAX_API_KEY")
|
|
70
|
+
url = url or os.environ.get(
|
|
71
|
+
"MINIMAX_API_BASE_URL", "https://api.minimaxi.com/v1"
|
|
72
|
+
)
|
|
73
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
74
|
+
super().__init__(
|
|
75
|
+
model_type=model_type,
|
|
76
|
+
model_config_dict=model_config_dict,
|
|
77
|
+
api_key=api_key,
|
|
78
|
+
url=url,
|
|
79
|
+
token_counter=token_counter,
|
|
80
|
+
timeout=timeout,
|
|
81
|
+
max_retries=max_retries,
|
|
82
|
+
**kwargs,
|
|
83
|
+
)
|
camel/models/mistral_model.py
CHANGED
|
@@ -24,7 +24,7 @@ if TYPE_CHECKING:
|
|
|
24
24
|
|
|
25
25
|
from openai import AsyncStream
|
|
26
26
|
|
|
27
|
-
from camel.configs import
|
|
27
|
+
from camel.configs import MistralConfig
|
|
28
28
|
from camel.logger import get_logger
|
|
29
29
|
from camel.messages import OpenAIMessage
|
|
30
30
|
from camel.models import BaseModelBackend
|
|
@@ -414,21 +414,6 @@ class MistralModel(BaseModelBackend):
|
|
|
414
414
|
|
|
415
415
|
return request_config
|
|
416
416
|
|
|
417
|
-
def check_model_config(self):
|
|
418
|
-
r"""Check whether the model configuration contains any
|
|
419
|
-
unexpected arguments to Mistral API.
|
|
420
|
-
|
|
421
|
-
Raises:
|
|
422
|
-
ValueError: If the model configuration dictionary contains any
|
|
423
|
-
unexpected arguments to Mistral API.
|
|
424
|
-
"""
|
|
425
|
-
for param in self.model_config_dict:
|
|
426
|
-
if param not in MISTRAL_API_PARAMS:
|
|
427
|
-
raise ValueError(
|
|
428
|
-
f"Unexpected argument `{param}` is "
|
|
429
|
-
"input into Mistral model backend."
|
|
430
|
-
)
|
|
431
|
-
|
|
432
417
|
@property
|
|
433
418
|
def stream(self) -> bool:
|
|
434
419
|
r"""Returns whether the model is in stream mode, which sends partial
|
camel/models/model_factory.py
CHANGED
|
@@ -13,14 +13,17 @@
|
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import json
|
|
15
15
|
import os
|
|
16
|
-
from typing import ClassVar, Dict, Optional, Type, Union
|
|
16
|
+
from typing import Any, ClassVar, Dict, Optional, Type, Union
|
|
17
17
|
|
|
18
|
+
from camel.models.aihubmix_model import AihubMixModel
|
|
18
19
|
from camel.models.aiml_model import AIMLModel
|
|
20
|
+
from camel.models.amd_model import AMDModel
|
|
19
21
|
from camel.models.anthropic_model import AnthropicModel
|
|
20
22
|
from camel.models.aws_bedrock_model import AWSBedrockModel
|
|
21
23
|
from camel.models.azure_openai_model import AzureOpenAIModel
|
|
22
24
|
from camel.models.base_model import BaseModelBackend
|
|
23
25
|
from camel.models.cohere_model import CohereModel
|
|
26
|
+
from camel.models.cometapi_model import CometAPIModel
|
|
24
27
|
from camel.models.crynux_model import CrynuxModel
|
|
25
28
|
from camel.models.deepseek_model import DeepSeekModel
|
|
26
29
|
from camel.models.gemini_model import GeminiModel
|
|
@@ -28,9 +31,11 @@ from camel.models.groq_model import GroqModel
|
|
|
28
31
|
from camel.models.internlm_model import InternLMModel
|
|
29
32
|
from camel.models.litellm_model import LiteLLMModel
|
|
30
33
|
from camel.models.lmstudio_model import LMStudioModel
|
|
34
|
+
from camel.models.minimax_model import MinimaxModel
|
|
31
35
|
from camel.models.mistral_model import MistralModel
|
|
32
36
|
from camel.models.modelscope_model import ModelScopeModel
|
|
33
37
|
from camel.models.moonshot_model import MoonshotModel
|
|
38
|
+
from camel.models.nebius_model import NebiusModel
|
|
34
39
|
from camel.models.netmind_model import NetmindModel
|
|
35
40
|
from camel.models.novita_model import NovitaModel
|
|
36
41
|
from camel.models.nvidia_model import NvidiaModel
|
|
@@ -76,6 +81,7 @@ class ModelFactory:
|
|
|
76
81
|
ModelPlatformType.AWS_BEDROCK: AWSBedrockModel,
|
|
77
82
|
ModelPlatformType.NVIDIA: NvidiaModel,
|
|
78
83
|
ModelPlatformType.SILICONFLOW: SiliconFlowModel,
|
|
84
|
+
ModelPlatformType.AMD: AMDModel,
|
|
79
85
|
ModelPlatformType.AIML: AIMLModel,
|
|
80
86
|
ModelPlatformType.VOLCANO: VolcanoModel,
|
|
81
87
|
ModelPlatformType.NETMIND: NetmindModel,
|
|
@@ -83,7 +89,10 @@ class ModelFactory:
|
|
|
83
89
|
ModelPlatformType.AZURE: AzureOpenAIModel,
|
|
84
90
|
ModelPlatformType.ANTHROPIC: AnthropicModel,
|
|
85
91
|
ModelPlatformType.GROQ: GroqModel,
|
|
92
|
+
ModelPlatformType.COMETAPI: CometAPIModel,
|
|
93
|
+
ModelPlatformType.NEBIUS: NebiusModel,
|
|
86
94
|
ModelPlatformType.LMSTUDIO: LMStudioModel,
|
|
95
|
+
ModelPlatformType.MINIMAX: MinimaxModel,
|
|
87
96
|
ModelPlatformType.OPENROUTER: OpenRouterModel,
|
|
88
97
|
ModelPlatformType.ZHIPU: ZhipuAIModel,
|
|
89
98
|
ModelPlatformType.GEMINI: GeminiModel,
|
|
@@ -101,6 +110,7 @@ class ModelFactory:
|
|
|
101
110
|
ModelPlatformType.WATSONX: WatsonXModel,
|
|
102
111
|
ModelPlatformType.QIANFAN: QianfanModel,
|
|
103
112
|
ModelPlatformType.CRYNUX: CrynuxModel,
|
|
113
|
+
ModelPlatformType.AIHUBMIX: AihubMixModel,
|
|
104
114
|
}
|
|
105
115
|
|
|
106
116
|
@staticmethod
|
|
@@ -113,6 +123,8 @@ class ModelFactory:
|
|
|
113
123
|
url: Optional[str] = None,
|
|
114
124
|
timeout: Optional[float] = None,
|
|
115
125
|
max_retries: int = 3,
|
|
126
|
+
client: Optional[Any] = None,
|
|
127
|
+
async_client: Optional[Any] = None,
|
|
116
128
|
**kwargs,
|
|
117
129
|
) -> BaseModelBackend:
|
|
118
130
|
r"""Creates an instance of `BaseModelBackend` of the specified type.
|
|
@@ -139,6 +151,14 @@ class ModelFactory:
|
|
|
139
151
|
for API calls. (default: :obj:`None`)
|
|
140
152
|
max_retries (int, optional): Maximum number of retries
|
|
141
153
|
for API calls. (default: :obj:`3`)
|
|
154
|
+
client (Optional[Any], optional): A custom synchronous client
|
|
155
|
+
instance. Supported by models that use OpenAI-compatible APIs
|
|
156
|
+
. The client should implement the appropriate client interface
|
|
157
|
+
for the platform. (default: :obj:`None`)
|
|
158
|
+
async_client (Optional[Any], optional): A custom asynchronous
|
|
159
|
+
client instance. Supported by models that use OpenAI-compatible
|
|
160
|
+
APIs. The client should implement the appropriate async client
|
|
161
|
+
interface for the platform. (default: :obj:`None`)
|
|
142
162
|
**kwargs: Additional model-specific parameters that will be passed
|
|
143
163
|
to the model constructor. For example, Azure OpenAI models may
|
|
144
164
|
require `api_version`, `azure_deployment_name`,
|
|
@@ -184,6 +204,12 @@ class ModelFactory:
|
|
|
184
204
|
if model_class is None:
|
|
185
205
|
raise ValueError(f"Unknown model platform `{model_platform}`")
|
|
186
206
|
|
|
207
|
+
# Pass client and async_client via kwargs if provided
|
|
208
|
+
if client is not None:
|
|
209
|
+
kwargs['client'] = client
|
|
210
|
+
if async_client is not None:
|
|
211
|
+
kwargs['async_client'] = async_client
|
|
212
|
+
|
|
187
213
|
return model_class(
|
|
188
214
|
model_type=model_type,
|
|
189
215
|
model_config_dict=model_config_dict,
|
camel/models/model_manager.py
CHANGED
|
@@ -27,6 +27,10 @@ from typing import (
|
|
|
27
27
|
)
|
|
28
28
|
|
|
29
29
|
from openai import AsyncStream, Stream
|
|
30
|
+
from openai.lib.streaming.chat import (
|
|
31
|
+
AsyncChatCompletionStreamManager,
|
|
32
|
+
ChatCompletionStreamManager,
|
|
33
|
+
)
|
|
30
34
|
from pydantic import BaseModel
|
|
31
35
|
|
|
32
36
|
from camel.messages import OpenAIMessage
|
|
@@ -196,7 +200,11 @@ class ModelManager:
|
|
|
196
200
|
messages: List[OpenAIMessage],
|
|
197
201
|
response_format: Optional[Type[BaseModel]] = None,
|
|
198
202
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
199
|
-
) -> Union[
|
|
203
|
+
) -> Union[
|
|
204
|
+
ChatCompletion,
|
|
205
|
+
Stream[ChatCompletionChunk],
|
|
206
|
+
ChatCompletionStreamManager[BaseModel],
|
|
207
|
+
]:
|
|
200
208
|
r"""Process a list of messages by selecting a model based on
|
|
201
209
|
the scheduling strategy.
|
|
202
210
|
Sends the entire list of messages to the selected model,
|
|
@@ -207,9 +215,12 @@ class ModelManager:
|
|
|
207
215
|
history in OpenAI API format.
|
|
208
216
|
|
|
209
217
|
Returns:
|
|
210
|
-
Union[ChatCompletion, Stream[ChatCompletionChunk]
|
|
218
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk],
|
|
219
|
+
ChatCompletionStreamManager[BaseModel]]:
|
|
211
220
|
`ChatCompletion` in the non-stream mode, or
|
|
212
|
-
`Stream[ChatCompletionChunk]` in the stream mode
|
|
221
|
+
`Stream[ChatCompletionChunk]` in the stream mode, or
|
|
222
|
+
`ChatCompletionStreamManager[BaseModel]` for
|
|
223
|
+
structured-output stream.
|
|
213
224
|
"""
|
|
214
225
|
self.current_model = self.scheduling_strategy()
|
|
215
226
|
|
|
@@ -233,7 +244,11 @@ class ModelManager:
|
|
|
233
244
|
messages: List[OpenAIMessage],
|
|
234
245
|
response_format: Optional[Type[BaseModel]] = None,
|
|
235
246
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
236
|
-
) -> Union[
|
|
247
|
+
) -> Union[
|
|
248
|
+
ChatCompletion,
|
|
249
|
+
AsyncStream[ChatCompletionChunk],
|
|
250
|
+
AsyncChatCompletionStreamManager[BaseModel],
|
|
251
|
+
]:
|
|
237
252
|
r"""Process a list of messages by selecting a model based on
|
|
238
253
|
the scheduling strategy.
|
|
239
254
|
Sends the entire list of messages to the selected model,
|
|
@@ -244,9 +259,12 @@ class ModelManager:
|
|
|
244
259
|
history in OpenAI API format.
|
|
245
260
|
|
|
246
261
|
Returns:
|
|
247
|
-
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]
|
|
262
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk],
|
|
263
|
+
AsyncChatCompletionStreamManager[BaseModel]]:
|
|
248
264
|
`ChatCompletion` in the non-stream mode, or
|
|
249
|
-
`AsyncStream[ChatCompletionChunk]` in the stream mode
|
|
265
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode, or
|
|
266
|
+
`AsyncChatCompletionStreamManager[BaseModel]` for
|
|
267
|
+
structured-output stream.
|
|
250
268
|
"""
|
|
251
269
|
async with self.lock:
|
|
252
270
|
self.current_model = self.scheduling_strategy()
|
camel/models/modelscope_model.py
CHANGED
|
@@ -18,7 +18,7 @@ from typing import Any, Dict, List, Optional, Union
|
|
|
18
18
|
|
|
19
19
|
from openai import AsyncStream, Stream
|
|
20
20
|
|
|
21
|
-
from camel.configs import
|
|
21
|
+
from camel.configs import ModelScopeConfig
|
|
22
22
|
from camel.messages import OpenAIMessage
|
|
23
23
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
24
24
|
from camel.types import (
|
|
@@ -261,18 +261,3 @@ class ModelScopeModel(OpenAICompatibleModel):
|
|
|
261
261
|
**request_config,
|
|
262
262
|
)
|
|
263
263
|
return self._post_handle_response(response)
|
|
264
|
-
|
|
265
|
-
def check_model_config(self):
|
|
266
|
-
r"""Check whether the model configuration contains any
|
|
267
|
-
unexpected arguments to ModelScope API.
|
|
268
|
-
|
|
269
|
-
Raises:
|
|
270
|
-
ValueError: If the model configuration dictionary contains any
|
|
271
|
-
unexpected arguments to ModelScope API.
|
|
272
|
-
"""
|
|
273
|
-
for param in self.model_config_dict:
|
|
274
|
-
if param not in MODELSCOPE_API_PARAMS:
|
|
275
|
-
raise ValueError(
|
|
276
|
-
f"Unexpected argument `{param}` is "
|
|
277
|
-
"input into ModelScope model backend."
|
|
278
|
-
)
|