camel-ai 0.2.67__py3-none-any.whl → 0.2.80a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- camel/__init__.py +1 -1
- camel/agents/_types.py +6 -2
- camel/agents/_utils.py +38 -0
- camel/agents/chat_agent.py +4014 -410
- camel/agents/mcp_agent.py +30 -27
- camel/agents/repo_agent.py +2 -1
- camel/benchmarks/browsecomp.py +6 -6
- camel/configs/__init__.py +15 -0
- camel/configs/aihubmix_config.py +88 -0
- camel/configs/amd_config.py +70 -0
- camel/configs/cometapi_config.py +104 -0
- camel/configs/minimax_config.py +93 -0
- camel/configs/nebius_config.py +103 -0
- camel/configs/vllm_config.py +2 -0
- camel/data_collectors/alpaca_collector.py +15 -6
- camel/datagen/self_improving_cot.py +1 -1
- camel/datasets/base_generator.py +39 -10
- camel/environments/__init__.py +12 -0
- camel/environments/rlcards_env.py +860 -0
- camel/environments/single_step.py +28 -3
- camel/environments/tic_tac_toe.py +1 -1
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/docker/Dockerfile +4 -16
- camel/interpreters/docker_interpreter.py +3 -2
- camel/interpreters/e2b_interpreter.py +34 -1
- camel/interpreters/internal_python_interpreter.py +51 -2
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/loaders/__init__.py +11 -2
- camel/loaders/base_loader.py +85 -0
- camel/loaders/chunkr_reader.py +9 -0
- camel/loaders/firecrawl_reader.py +4 -4
- camel/logger.py +1 -1
- camel/memories/agent_memories.py +84 -1
- camel/memories/base.py +34 -0
- camel/memories/blocks/chat_history_block.py +122 -4
- camel/memories/blocks/vectordb_block.py +8 -1
- camel/memories/context_creators/score_based.py +29 -237
- camel/memories/records.py +88 -8
- camel/messages/base.py +166 -40
- camel/messages/func_message.py +32 -5
- camel/models/__init__.py +10 -0
- camel/models/aihubmix_model.py +83 -0
- camel/models/aiml_model.py +1 -16
- camel/models/amd_model.py +101 -0
- camel/models/anthropic_model.py +117 -18
- camel/models/aws_bedrock_model.py +2 -33
- camel/models/azure_openai_model.py +205 -91
- camel/models/base_audio_model.py +3 -1
- camel/models/base_model.py +189 -24
- camel/models/cohere_model.py +5 -17
- camel/models/cometapi_model.py +83 -0
- camel/models/crynux_model.py +1 -16
- camel/models/deepseek_model.py +6 -16
- camel/models/fish_audio_model.py +6 -0
- camel/models/gemini_model.py +71 -20
- camel/models/groq_model.py +1 -17
- camel/models/internlm_model.py +1 -16
- camel/models/litellm_model.py +49 -32
- camel/models/lmstudio_model.py +1 -17
- camel/models/minimax_model.py +83 -0
- camel/models/mistral_model.py +1 -16
- camel/models/model_factory.py +27 -1
- camel/models/model_manager.py +24 -6
- camel/models/modelscope_model.py +1 -16
- camel/models/moonshot_model.py +185 -19
- camel/models/nebius_model.py +83 -0
- camel/models/nemotron_model.py +0 -5
- camel/models/netmind_model.py +1 -16
- camel/models/novita_model.py +1 -16
- camel/models/nvidia_model.py +1 -16
- camel/models/ollama_model.py +4 -19
- camel/models/openai_compatible_model.py +171 -46
- camel/models/openai_model.py +205 -77
- camel/models/openrouter_model.py +1 -17
- camel/models/ppio_model.py +1 -16
- camel/models/qianfan_model.py +1 -16
- camel/models/qwen_model.py +1 -16
- camel/models/reka_model.py +1 -16
- camel/models/samba_model.py +34 -47
- camel/models/sglang_model.py +64 -31
- camel/models/siliconflow_model.py +1 -16
- camel/models/stub_model.py +0 -4
- camel/models/togetherai_model.py +1 -16
- camel/models/vllm_model.py +1 -16
- camel/models/volcano_model.py +0 -17
- camel/models/watsonx_model.py +1 -16
- camel/models/yi_model.py +1 -16
- camel/models/zhipuai_model.py +60 -16
- camel/parsers/__init__.py +18 -0
- camel/parsers/mcp_tool_call_parser.py +176 -0
- camel/retrievers/auto_retriever.py +1 -0
- camel/runtimes/configs.py +11 -11
- camel/runtimes/daytona_runtime.py +15 -16
- camel/runtimes/docker_runtime.py +6 -6
- camel/runtimes/remote_http_runtime.py +5 -5
- camel/services/agent_openapi_server.py +380 -0
- camel/societies/__init__.py +2 -0
- camel/societies/role_playing.py +26 -28
- camel/societies/workforce/__init__.py +2 -0
- camel/societies/workforce/events.py +122 -0
- camel/societies/workforce/prompts.py +249 -38
- camel/societies/workforce/role_playing_worker.py +82 -20
- camel/societies/workforce/single_agent_worker.py +634 -34
- camel/societies/workforce/structured_output_handler.py +512 -0
- camel/societies/workforce/task_channel.py +169 -23
- camel/societies/workforce/utils.py +176 -9
- camel/societies/workforce/worker.py +77 -23
- camel/societies/workforce/workflow_memory_manager.py +772 -0
- camel/societies/workforce/workforce.py +3168 -478
- camel/societies/workforce/workforce_callback.py +74 -0
- camel/societies/workforce/workforce_logger.py +203 -175
- camel/societies/workforce/workforce_metrics.py +33 -0
- camel/storages/__init__.py +4 -0
- camel/storages/key_value_storages/json.py +15 -2
- camel/storages/key_value_storages/mem0_cloud.py +48 -47
- camel/storages/object_storages/google_cloud.py +1 -1
- camel/storages/vectordb_storages/__init__.py +6 -0
- camel/storages/vectordb_storages/chroma.py +731 -0
- camel/storages/vectordb_storages/oceanbase.py +13 -13
- camel/storages/vectordb_storages/pgvector.py +349 -0
- camel/storages/vectordb_storages/qdrant.py +3 -3
- camel/storages/vectordb_storages/surreal.py +365 -0
- camel/storages/vectordb_storages/tidb.py +8 -6
- camel/tasks/task.py +244 -27
- camel/toolkits/__init__.py +46 -8
- camel/toolkits/aci_toolkit.py +64 -19
- camel/toolkits/arxiv_toolkit.py +6 -6
- camel/toolkits/base.py +63 -5
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/context_summarizer_toolkit.py +684 -0
- camel/toolkits/craw4ai_toolkit.py +93 -0
- camel/toolkits/dappier_toolkit.py +10 -6
- camel/toolkits/dingtalk.py +1135 -0
- camel/toolkits/edgeone_pages_mcp_toolkit.py +49 -0
- camel/toolkits/excel_toolkit.py +901 -67
- camel/toolkits/file_toolkit.py +1402 -0
- camel/toolkits/function_tool.py +30 -6
- camel/toolkits/github_toolkit.py +107 -20
- camel/toolkits/gmail_toolkit.py +1839 -0
- camel/toolkits/google_calendar_toolkit.py +38 -4
- camel/toolkits/google_drive_mcp_toolkit.py +54 -0
- camel/toolkits/human_toolkit.py +34 -10
- camel/toolkits/hybrid_browser_toolkit/__init__.py +18 -0
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +185 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +246 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +1973 -0
- camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +3749 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package.json +32 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-scripts.js +125 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +1815 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +233 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +590 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/index.ts +7 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +130 -0
- camel/toolkits/hybrid_browser_toolkit/ts/tsconfig.json +26 -0
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +319 -0
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +1032 -0
- camel/toolkits/hybrid_browser_toolkit_py/__init__.py +17 -0
- camel/toolkits/hybrid_browser_toolkit_py/actions.py +575 -0
- camel/toolkits/hybrid_browser_toolkit_py/agent.py +311 -0
- camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +787 -0
- camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +490 -0
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +2390 -0
- camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +233 -0
- camel/toolkits/hybrid_browser_toolkit_py/stealth_script.js +0 -0
- camel/toolkits/hybrid_browser_toolkit_py/unified_analyzer.js +1043 -0
- camel/toolkits/image_generation_toolkit.py +390 -0
- camel/toolkits/jina_reranker_toolkit.py +3 -4
- camel/toolkits/klavis_toolkit.py +5 -1
- camel/toolkits/markitdown_toolkit.py +104 -0
- camel/toolkits/math_toolkit.py +64 -10
- camel/toolkits/mcp_toolkit.py +370 -45
- camel/toolkits/memory_toolkit.py +5 -1
- camel/toolkits/message_agent_toolkit.py +608 -0
- camel/toolkits/message_integration.py +724 -0
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/note_taking_toolkit.py +277 -0
- camel/toolkits/notion_mcp_toolkit.py +224 -0
- camel/toolkits/openbb_toolkit.py +5 -1
- camel/toolkits/origene_mcp_toolkit.py +56 -0
- camel/toolkits/playwright_mcp_toolkit.py +12 -31
- camel/toolkits/pptx_toolkit.py +25 -12
- camel/toolkits/resend_toolkit.py +168 -0
- camel/toolkits/screenshot_toolkit.py +213 -0
- camel/toolkits/search_toolkit.py +437 -142
- camel/toolkits/slack_toolkit.py +104 -50
- camel/toolkits/sympy_toolkit.py +1 -1
- camel/toolkits/task_planning_toolkit.py +3 -3
- camel/toolkits/terminal_toolkit/__init__.py +18 -0
- camel/toolkits/terminal_toolkit/terminal_toolkit.py +957 -0
- camel/toolkits/terminal_toolkit/utils.py +532 -0
- camel/toolkits/thinking_toolkit.py +1 -1
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +106 -26
- camel/toolkits/video_download_toolkit.py +17 -14
- camel/toolkits/web_deploy_toolkit.py +1219 -0
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/toolkits/zapier_toolkit.py +5 -1
- camel/types/__init__.py +2 -2
- camel/types/agents/tool_calling_record.py +4 -1
- camel/types/enums.py +316 -40
- camel/types/openai_types.py +2 -2
- camel/types/unified_model_type.py +31 -4
- camel/utils/commons.py +36 -5
- camel/utils/constants.py +3 -0
- camel/utils/context_utils.py +1003 -0
- camel/utils/mcp.py +138 -4
- camel/utils/mcp_client.py +45 -1
- camel/utils/message_summarizer.py +148 -0
- camel/utils/token_counting.py +43 -20
- camel/utils/tool_result.py +44 -0
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/METADATA +296 -85
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/RECORD +219 -146
- camel/loaders/pandas_reader.py +0 -368
- camel/toolkits/dalle_toolkit.py +0 -175
- camel/toolkits/file_write_toolkit.py +0 -444
- camel/toolkits/openai_agent_toolkit.py +0 -135
- camel/toolkits/terminal_toolkit.py +0 -1037
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/licenses/LICENSE +0 -0
camel/agents/mcp_agent.py
CHANGED
|
@@ -15,16 +15,25 @@
|
|
|
15
15
|
import asyncio
|
|
16
16
|
import json
|
|
17
17
|
import platform
|
|
18
|
-
import
|
|
19
|
-
|
|
18
|
+
from typing import (
|
|
19
|
+
TYPE_CHECKING,
|
|
20
|
+
Any,
|
|
21
|
+
Callable,
|
|
22
|
+
Dict,
|
|
23
|
+
List,
|
|
24
|
+
Optional,
|
|
25
|
+
Union,
|
|
26
|
+
cast,
|
|
27
|
+
)
|
|
20
28
|
|
|
21
|
-
from camel.agents import ChatAgent
|
|
29
|
+
from camel.agents.chat_agent import ChatAgent
|
|
22
30
|
from camel.logger import get_logger
|
|
23
31
|
from camel.messages import BaseMessage
|
|
24
|
-
from camel.models import BaseModelBackend
|
|
32
|
+
from camel.models.base_model import BaseModelBackend
|
|
33
|
+
from camel.models.model_factory import ModelFactory
|
|
25
34
|
from camel.prompts import TextPrompt
|
|
26
35
|
from camel.responses import ChatAgentResponse
|
|
27
|
-
from camel.toolkits import FunctionTool
|
|
36
|
+
from camel.toolkits.function_tool import FunctionTool
|
|
28
37
|
from camel.types import (
|
|
29
38
|
BaseMCPRegistryConfig,
|
|
30
39
|
MCPRegistryType,
|
|
@@ -33,6 +42,9 @@ from camel.types import (
|
|
|
33
42
|
RoleType,
|
|
34
43
|
)
|
|
35
44
|
|
|
45
|
+
if TYPE_CHECKING:
|
|
46
|
+
from camel.toolkits.mcp_toolkit import MCPToolkit
|
|
47
|
+
|
|
36
48
|
# AgentOps decorator setting
|
|
37
49
|
try:
|
|
38
50
|
import os
|
|
@@ -44,6 +56,8 @@ try:
|
|
|
44
56
|
except (ImportError, AttributeError):
|
|
45
57
|
from camel.utils import track_agent
|
|
46
58
|
|
|
59
|
+
from camel.parsers.mcp_tool_call_parser import extract_tool_calls_from_text
|
|
60
|
+
|
|
47
61
|
logger = get_logger(__name__)
|
|
48
62
|
|
|
49
63
|
|
|
@@ -168,8 +182,10 @@ class MCPAgent(ChatAgent):
|
|
|
168
182
|
**kwargs,
|
|
169
183
|
)
|
|
170
184
|
|
|
171
|
-
def _initialize_mcp_toolkit(self) -> MCPToolkit:
|
|
185
|
+
def _initialize_mcp_toolkit(self) -> "MCPToolkit":
|
|
172
186
|
r"""Initialize the MCP toolkit from the provided configuration."""
|
|
187
|
+
from camel.toolkits.mcp_toolkit import MCPToolkit
|
|
188
|
+
|
|
173
189
|
config_dict = {}
|
|
174
190
|
for registry_config in self.registry_configs:
|
|
175
191
|
config_dict.update(registry_config.get_config())
|
|
@@ -334,27 +350,14 @@ class MCPAgent(ChatAgent):
|
|
|
334
350
|
task = f"## Task:\n {input_message}"
|
|
335
351
|
input_message = str(self._text_tools) + task
|
|
336
352
|
response = await super().astep(input_message, *args, **kwargs)
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
end_match = re.search(r'```', content[json_start:])
|
|
347
|
-
if not end_match:
|
|
348
|
-
break
|
|
349
|
-
json_end = end_match.span()[0] + json_start
|
|
350
|
-
|
|
351
|
-
tool_json = content[json_start:json_end].strip('\n')
|
|
352
|
-
try:
|
|
353
|
-
tool_calls.append(json.loads(tool_json))
|
|
354
|
-
except json.JSONDecodeError:
|
|
355
|
-
logger.warning(f"Failed to parse JSON: {tool_json}")
|
|
356
|
-
continue
|
|
357
|
-
content = content[json_end:]
|
|
353
|
+
raw_content = response.msgs[0].content if response.msgs else ""
|
|
354
|
+
content = (
|
|
355
|
+
raw_content
|
|
356
|
+
if isinstance(raw_content, str)
|
|
357
|
+
else str(raw_content)
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
tool_calls = extract_tool_calls_from_text(content)
|
|
358
361
|
|
|
359
362
|
if not tool_calls:
|
|
360
363
|
return response
|
camel/agents/repo_agent.py
CHANGED
|
@@ -21,6 +21,7 @@ if TYPE_CHECKING:
|
|
|
21
21
|
from pydantic import BaseModel
|
|
22
22
|
|
|
23
23
|
from camel.agents import ChatAgent
|
|
24
|
+
from camel.agents.chat_agent import StreamingChatAgentResponse
|
|
24
25
|
from camel.logger import get_logger
|
|
25
26
|
from camel.messages import BaseMessage
|
|
26
27
|
from camel.models import BaseModelBackend, ModelFactory
|
|
@@ -442,7 +443,7 @@ class RepoAgent(ChatAgent):
|
|
|
442
443
|
|
|
443
444
|
def step(
|
|
444
445
|
self, input_message: Union[BaseMessage, str], *args, **kwargs
|
|
445
|
-
) -> ChatAgentResponse:
|
|
446
|
+
) -> Union[ChatAgentResponse, StreamingChatAgentResponse]:
|
|
446
447
|
r"""Overrides `ChatAgent.step()` to first retrieve relevant context
|
|
447
448
|
from the vector store before passing the input to the language model.
|
|
448
449
|
"""
|
camel/benchmarks/browsecomp.py
CHANGED
|
@@ -619,20 +619,20 @@ class BrowseCompBenchmark(BaseBenchmark):
|
|
|
619
619
|
assistant_response, user_response = pipeline.step(
|
|
620
620
|
input_msg
|
|
621
621
|
)
|
|
622
|
-
if assistant_response.terminated: # type: ignore[attr
|
|
622
|
+
if assistant_response.terminated: # type: ignore[union-attr]
|
|
623
623
|
break
|
|
624
|
-
if user_response.terminated: # type: ignore[attr
|
|
624
|
+
if user_response.terminated: # type: ignore[union-attr]
|
|
625
625
|
break
|
|
626
|
-
if "CAMEL_TASK_DONE" in user_response.msg.content: # type: ignore[attr
|
|
626
|
+
if "CAMEL_TASK_DONE" in user_response.msg.content: # type: ignore[union-attr]
|
|
627
627
|
break
|
|
628
628
|
|
|
629
629
|
chat_history.append(
|
|
630
|
-
f"AI User: {user_response.msg.content}" # type: ignore[attr
|
|
630
|
+
f"AI User: {user_response.msg.content}" # type: ignore[union-attr]
|
|
631
631
|
)
|
|
632
632
|
chat_history.append(
|
|
633
|
-
f"AI Assistant: {assistant_response.msg.content}" # type: ignore[attr
|
|
633
|
+
f"AI Assistant: {assistant_response.msg.content}" # type: ignore[union-attr]
|
|
634
634
|
)
|
|
635
|
-
input_msg = assistant_response.msg # type: ignore[attr
|
|
635
|
+
input_msg = assistant_response.msg # type: ignore[union-attr]
|
|
636
636
|
|
|
637
637
|
chat_history_str = "\n".join(chat_history)
|
|
638
638
|
if roleplaying_summarizer:
|
camel/configs/__init__.py
CHANGED
|
@@ -11,11 +11,14 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from .aihubmix_config import AIHUBMIX_API_PARAMS, AihubMixConfig
|
|
14
15
|
from .aiml_config import AIML_API_PARAMS, AIMLConfig
|
|
16
|
+
from .amd_config import AMD_API_PARAMS, AMDConfig
|
|
15
17
|
from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
|
|
16
18
|
from .base_config import BaseConfig
|
|
17
19
|
from .bedrock_config import BEDROCK_API_PARAMS, BedrockConfig
|
|
18
20
|
from .cohere_config import COHERE_API_PARAMS, CohereConfig
|
|
21
|
+
from .cometapi_config import COMETAPI_API_PARAMS, CometAPIConfig
|
|
19
22
|
from .crynux_config import CRYNUX_API_PARAMS, CrynuxConfig
|
|
20
23
|
from .deepseek_config import DEEPSEEK_API_PARAMS, DeepSeekConfig
|
|
21
24
|
from .gemini_config import Gemini_API_PARAMS, GeminiConfig
|
|
@@ -23,9 +26,11 @@ from .groq_config import GROQ_API_PARAMS, GroqConfig
|
|
|
23
26
|
from .internlm_config import INTERNLM_API_PARAMS, InternLMConfig
|
|
24
27
|
from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
|
|
25
28
|
from .lmstudio_config import LMSTUDIO_API_PARAMS, LMStudioConfig
|
|
29
|
+
from .minimax_config import MINIMAX_API_PARAMS, MinimaxConfig
|
|
26
30
|
from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
|
|
27
31
|
from .modelscope_config import MODELSCOPE_API_PARAMS, ModelScopeConfig
|
|
28
32
|
from .moonshot_config import MOONSHOT_API_PARAMS, MoonshotConfig
|
|
33
|
+
from .nebius_config import NEBIUS_API_PARAMS, NebiusConfig
|
|
29
34
|
from .netmind_config import NETMIND_API_PARAMS, NetmindConfig
|
|
30
35
|
from .novita_config import NOVITA_API_PARAMS, NovitaConfig
|
|
31
36
|
from .nvidia_config import NVIDIA_API_PARAMS, NvidiaConfig
|
|
@@ -54,10 +59,14 @@ __all__ = [
|
|
|
54
59
|
'BaseConfig',
|
|
55
60
|
'ChatGPTConfig',
|
|
56
61
|
'OPENAI_API_PARAMS',
|
|
62
|
+
'AihubMixConfig',
|
|
63
|
+
'AIHUBMIX_API_PARAMS',
|
|
57
64
|
'AnthropicConfig',
|
|
58
65
|
'ANTHROPIC_API_PARAMS',
|
|
59
66
|
'GROQ_API_PARAMS',
|
|
60
67
|
'GroqConfig',
|
|
68
|
+
'NEBIUS_API_PARAMS',
|
|
69
|
+
'NebiusConfig',
|
|
61
70
|
'LiteLLMConfig',
|
|
62
71
|
'LITELLM_API_PARAMS',
|
|
63
72
|
'NetmindConfig',
|
|
@@ -86,6 +95,8 @@ __all__ = [
|
|
|
86
95
|
'TOGETHERAI_API_PARAMS',
|
|
87
96
|
'CohereConfig',
|
|
88
97
|
'COHERE_API_PARAMS',
|
|
98
|
+
'CometAPIConfig',
|
|
99
|
+
'COMETAPI_API_PARAMS',
|
|
89
100
|
'YiConfig',
|
|
90
101
|
'YI_API_PARAMS',
|
|
91
102
|
'QwenConfig',
|
|
@@ -108,10 +119,14 @@ __all__ = [
|
|
|
108
119
|
'SILICONFLOW_API_PARAMS',
|
|
109
120
|
'AIMLConfig',
|
|
110
121
|
'AIML_API_PARAMS',
|
|
122
|
+
'AMDConfig',
|
|
123
|
+
'AMD_API_PARAMS',
|
|
111
124
|
'OpenRouterConfig',
|
|
112
125
|
'OPENROUTER_API_PARAMS',
|
|
113
126
|
'LMSTUDIO_API_PARAMS',
|
|
114
127
|
'LMStudioConfig',
|
|
128
|
+
'MINIMAX_API_PARAMS',
|
|
129
|
+
'MinimaxConfig',
|
|
115
130
|
'WatsonXConfig',
|
|
116
131
|
'WATSONX_API_PARAMS',
|
|
117
132
|
'QianfanConfig',
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import Dict, Optional, Union
|
|
17
|
+
|
|
18
|
+
from camel.configs.base_config import BaseConfig
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class AihubMixConfig(BaseConfig):
|
|
22
|
+
r"""Defines the parameters for generating chat completions using the
|
|
23
|
+
AihubMix API.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
temperature (float, optional): Sampling temperature to use, between
|
|
27
|
+
:obj:`0` and :obj:`2`. Higher values make the output more random,
|
|
28
|
+
while lower values make it more focused and deterministic.
|
|
29
|
+
(default: :obj:`0.8`)
|
|
30
|
+
max_tokens (int, optional): The maximum number of tokens to generate
|
|
31
|
+
in the chat completion. The total length of input tokens and
|
|
32
|
+
generated tokens is limited by the model's context length.
|
|
33
|
+
(default: :obj:`1024`)
|
|
34
|
+
top_p (float, optional): An alternative to sampling with temperature,
|
|
35
|
+
called nucleus sampling, where the model considers the results of
|
|
36
|
+
the tokens with top_p probability mass. So :obj:`0.1` means only
|
|
37
|
+
the tokens comprising the top 10% probability mass are considered.
|
|
38
|
+
(default: :obj:`1`)
|
|
39
|
+
frequency_penalty (float, optional): Number between :obj:`-2.0` and
|
|
40
|
+
:obj:`2.0`. Positive values penalize new tokens based on their
|
|
41
|
+
existing frequency in the text so far, decreasing the model's
|
|
42
|
+
likelihood to repeat the same line verbatim.
|
|
43
|
+
(default: :obj:`0`)
|
|
44
|
+
presence_penalty (float, optional): Number between :obj:`-2.0` and
|
|
45
|
+
:obj:`2.0`. Positive values penalize new tokens based on whether
|
|
46
|
+
they appear in the text so far, increasing the model's likelihood
|
|
47
|
+
to talk about new topics.
|
|
48
|
+
(default: :obj:`0`)
|
|
49
|
+
stream (bool, optional): If True, partial message deltas will be sent
|
|
50
|
+
as data-only server-sent events as they become available.
|
|
51
|
+
(default: :obj:`False`)
|
|
52
|
+
web_search_options (dict, optional): Search model's web search options,
|
|
53
|
+
only supported by specific search models.
|
|
54
|
+
(default: :obj:`None`)
|
|
55
|
+
tools (list[FunctionTool], optional): A list of tools the model may
|
|
56
|
+
call. Currently, only functions are supported as a tool. Use this
|
|
57
|
+
to provide a list of functions the model may generate JSON inputs
|
|
58
|
+
for. A max of 128 functions are supported.
|
|
59
|
+
tool_choice (Union[dict[str, str], str], optional): Controls which (if
|
|
60
|
+
any) tool is called by the model. :obj:`"none"` means the model
|
|
61
|
+
will not call any tool and instead generates a message.
|
|
62
|
+
:obj:`"auto"` means the model can pick between generating a
|
|
63
|
+
message or calling one or more tools. :obj:`"required"` means the
|
|
64
|
+
model must call one or more tools. Specifying a particular tool
|
|
65
|
+
via {"type": "function", "function": {"name": "my_function"}}
|
|
66
|
+
forces the model to call that tool. :obj:`"none"` is the default
|
|
67
|
+
when no tools are present. :obj:`"auto"` is the default if tools
|
|
68
|
+
are present.
|
|
69
|
+
parallel_tool_calls (bool, optional): A parameter specifying whether
|
|
70
|
+
the model should call tools in parallel or not.
|
|
71
|
+
(default: :obj:`None`)
|
|
72
|
+
extra_headers: Optional[Dict[str, str]]: Extra headers to use for the
|
|
73
|
+
model. (default: :obj:`None`)
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
temperature: Optional[float] = 0.8
|
|
77
|
+
max_tokens: Optional[int] = 1024
|
|
78
|
+
top_p: Optional[float] = 1.0
|
|
79
|
+
frequency_penalty: Optional[float] = 0.0
|
|
80
|
+
presence_penalty: Optional[float] = 0.0
|
|
81
|
+
stream: Optional[bool] = False
|
|
82
|
+
web_search_options: Optional[Dict] = None
|
|
83
|
+
tool_choice: Optional[Union[Dict[str, str], str]] = None
|
|
84
|
+
parallel_tool_calls: Optional[bool] = None
|
|
85
|
+
extra_headers: Optional[Dict[str, str]] = None
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
AIHUBMIX_API_PARAMS = {param for param in AihubMixConfig.model_fields.keys()}
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import List, Optional, Union
|
|
17
|
+
|
|
18
|
+
from pydantic import Field
|
|
19
|
+
|
|
20
|
+
from camel.configs.base_config import BaseConfig
|
|
21
|
+
from camel.types import NotGiven
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class AMDConfig(BaseConfig):
|
|
25
|
+
r"""Configuration class for AMD API models.
|
|
26
|
+
|
|
27
|
+
This class defines the configuration parameters for AMD's language
|
|
28
|
+
models, including temperature, sampling parameters, and response format
|
|
29
|
+
settings.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
stream (bool, optional): Whether to stream the response.
|
|
33
|
+
(default: :obj:`None`)
|
|
34
|
+
temperature (float, optional): Controls randomness in the response.
|
|
35
|
+
Higher values make output more random, lower values make it more
|
|
36
|
+
deterministic. Range: [0.0, 2.0]. (default: :obj:`None`)
|
|
37
|
+
top_p (float, optional): Controls diversity via nucleus sampling.
|
|
38
|
+
Range: [0.0, 1.0]. (default: :obj:`None`)
|
|
39
|
+
presence_penalty (float, optional): Penalizes new tokens based on
|
|
40
|
+
whether they appear in the text so far. Range: [-2.0, 2.0].
|
|
41
|
+
(default: :obj:`None`)
|
|
42
|
+
frequency_penalty (float, optional): Penalizes new tokens based on
|
|
43
|
+
their frequency in the text so far. Range: [-2.0, 2.0].
|
|
44
|
+
(default: :obj:`None`)
|
|
45
|
+
max_tokens (Union[int, NotGiven], optional): Maximum number of tokens
|
|
46
|
+
to generate. If not provided, model will use its default maximum.
|
|
47
|
+
(default: :obj:`None`)
|
|
48
|
+
seed (Optional[int], optional): Random seed for deterministic sampling.
|
|
49
|
+
(default: :obj:`None`)
|
|
50
|
+
tools (Optional[List[Dict]], optional): List of tools available to the
|
|
51
|
+
model. This includes tools such as a text editor, a calculator, or
|
|
52
|
+
a search engine. (default: :obj:`None`)
|
|
53
|
+
tool_choice (Optional[str], optional): Tool choice configuration.
|
|
54
|
+
(default: :obj:`None`)
|
|
55
|
+
stop (Optional[List[str]], optional): List of stop sequences.
|
|
56
|
+
(default: :obj:`None`)
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
stream: Optional[bool] = Field(default=None)
|
|
60
|
+
temperature: Optional[float] = Field(default=None)
|
|
61
|
+
top_p: Optional[float] = Field(default=None)
|
|
62
|
+
presence_penalty: Optional[float] = Field(default=None)
|
|
63
|
+
frequency_penalty: Optional[float] = Field(default=None)
|
|
64
|
+
max_tokens: Optional[Union[int, NotGiven]] = Field(default=None)
|
|
65
|
+
seed: Optional[int] = Field(default=None)
|
|
66
|
+
tool_choice: Optional[str] = Field(default=None)
|
|
67
|
+
stop: Optional[List[str]] = Field(default=None)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
AMD_API_PARAMS = {param for param in AMDConfig.model_fields.keys()}
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import Optional, Sequence, Union
|
|
17
|
+
|
|
18
|
+
from camel.configs.base_config import BaseConfig
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class CometAPIConfig(BaseConfig):
|
|
22
|
+
r"""Defines the parameters for generating chat completions using CometAPI's
|
|
23
|
+
OpenAI-compatible interface.
|
|
24
|
+
|
|
25
|
+
Reference: https://api.cometapi.com/v1/
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
temperature (float, optional): Sampling temperature to use, between
|
|
29
|
+
:obj:`0` and :obj:`2`. Higher values make the output more random,
|
|
30
|
+
while lower values make it more focused and deterministic.
|
|
31
|
+
(default: :obj:`None`)
|
|
32
|
+
top_p (float, optional): An alternative to sampling with temperature,
|
|
33
|
+
called nucleus sampling, where the model considers the results of
|
|
34
|
+
the tokens with top_p probability mass. So :obj:`0.1` means only
|
|
35
|
+
the tokens comprising the top 10% probability mass are considered.
|
|
36
|
+
(default: :obj:`None`)
|
|
37
|
+
n (int, optional): How many chat completion choices to generate for
|
|
38
|
+
each input message. (default: :obj:`None`)
|
|
39
|
+
response_format (object, optional): An object specifying the format
|
|
40
|
+
that the model must output. Compatible with GPT-4 Turbo and all
|
|
41
|
+
GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
|
|
42
|
+
{"type": "json_object"} enables JSON mode, which guarantees the
|
|
43
|
+
message the model generates is valid JSON. Important: when using
|
|
44
|
+
JSON mode, you must also instruct the model to produce JSON
|
|
45
|
+
yourself via a system or user message. Without this, the model
|
|
46
|
+
may generate an unending stream of whitespace until the generation
|
|
47
|
+
reaches the token limit, resulting in a long-running and seemingly
|
|
48
|
+
"stuck" request. Also note that the message content may be
|
|
49
|
+
partially cut off if finish_reason="length", which indicates the
|
|
50
|
+
generation exceeded max_tokens or the conversation exceeded the
|
|
51
|
+
max context length.
|
|
52
|
+
stream (bool, optional): If True, partial message deltas will be sent
|
|
53
|
+
as data-only server-sent events as they become available.
|
|
54
|
+
(default: :obj:`None`)
|
|
55
|
+
stop (str or list, optional): Up to :obj:`4` sequences where the API
|
|
56
|
+
will stop generating further tokens. (default: :obj:`None`)
|
|
57
|
+
max_tokens (int, optional): The maximum number of tokens to generate
|
|
58
|
+
in the chat completion. The total length of input tokens and
|
|
59
|
+
generated tokens is limited by the model's context length.
|
|
60
|
+
(default: :obj:`None`)
|
|
61
|
+
presence_penalty (float, optional): Number between :obj:`-2.0` and
|
|
62
|
+
:obj:`2.0`. Positive values penalize new tokens based on whether
|
|
63
|
+
they appear in the text so far, increasing the model's likelihood
|
|
64
|
+
to talk about new topics. See more information about frequency and
|
|
65
|
+
presence penalties. (default: :obj:`None`)
|
|
66
|
+
frequency_penalty (float, optional): Number between :obj:`-2.0` and
|
|
67
|
+
:obj:`2.0`. Positive values penalize new tokens based on their
|
|
68
|
+
existing frequency in the text so far, decreasing the model's
|
|
69
|
+
likelihood to repeat the same line verbatim. See more information
|
|
70
|
+
about frequency and presence penalties. (default: :obj:`None`)
|
|
71
|
+
user (str, optional): A unique identifier representing your end-user,
|
|
72
|
+
which can help CometAPI to monitor and detect abuse.
|
|
73
|
+
(default: :obj:`None`)
|
|
74
|
+
tools (list[FunctionTool], optional): A list of tools the model may
|
|
75
|
+
call. Currently, only functions are supported as a tool. Use this
|
|
76
|
+
to provide a list of functions the model may generate JSON inputs
|
|
77
|
+
for. A max of 128 functions are supported.
|
|
78
|
+
tool_choice (Union[dict[str, str], str], optional): Controls which (if
|
|
79
|
+
any) tool is called by the model. :obj:`"none"` means the model
|
|
80
|
+
will not call any tool and instead generates a message.
|
|
81
|
+
:obj:`"auto"` means the model can pick between generating a
|
|
82
|
+
message or calling one or more tools. :obj:`"required"` means the
|
|
83
|
+
model must call one or more tools. Specifying a particular tool
|
|
84
|
+
via {"type": "function", "function": {"name": "my_function"}}
|
|
85
|
+
forces the model to call that tool. :obj:`"none"` is the default
|
|
86
|
+
when no tools are present. :obj:`"auto"` is the default if tools
|
|
87
|
+
are present.
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
temperature: Optional[float] = None
|
|
91
|
+
top_p: Optional[float] = None
|
|
92
|
+
n: Optional[int] = None
|
|
93
|
+
stream: Optional[bool] = None
|
|
94
|
+
stop: Optional[Union[str, Sequence[str]]] = None
|
|
95
|
+
max_tokens: Optional[int] = None
|
|
96
|
+
presence_penalty: Optional[float] = None
|
|
97
|
+
response_format: Optional[dict] = None
|
|
98
|
+
frequency_penalty: Optional[float] = None
|
|
99
|
+
user: Optional[str] = None
|
|
100
|
+
tools: Optional[list] = None
|
|
101
|
+
tool_choice: Optional[Union[dict[str, str], str]] = None
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
COMETAPI_API_PARAMS = {param for param in CometAPIConfig.model_fields.keys()}
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import Optional, Sequence, Union
|
|
17
|
+
|
|
18
|
+
from camel.configs.base_config import BaseConfig
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class MinimaxConfig(BaseConfig):
|
|
22
|
+
r"""Defines the parameters for generating chat completions using OpenAI
|
|
23
|
+
compatibility with Minimax.
|
|
24
|
+
|
|
25
|
+
Reference: https://api.minimax.chat/document/guides/chat-model
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
temperature (float, optional): Sampling temperature to use, between
|
|
29
|
+
:obj:`0.0` and :obj:`1.0`. Higher values make the output more
|
|
30
|
+
random, while lower values make it more focused and deterministic.
|
|
31
|
+
Recommended to use :obj:`1.0`. Values outside this range will
|
|
32
|
+
return an error. (default: :obj:`None`)
|
|
33
|
+
top_p (float, optional): An alternative to sampling with temperature,
|
|
34
|
+
called nucleus sampling, where the model considers the results of
|
|
35
|
+
the tokens with top_p probability mass. So :obj:`0.1` means only
|
|
36
|
+
the tokens comprising the top 10% probability mass are considered.
|
|
37
|
+
(default: :obj:`None`)
|
|
38
|
+
n (int, optional): How many chat completion choices to generate for
|
|
39
|
+
each input message. Only supports value :obj:`1`.
|
|
40
|
+
(default: :obj:`None`)
|
|
41
|
+
response_format (object, optional): An object specifying the format
|
|
42
|
+
that the model must output. Setting to
|
|
43
|
+
{"type": "json_object"} enables JSON mode, which guarantees the
|
|
44
|
+
message the model generates is valid JSON. Important: when using
|
|
45
|
+
JSON mode, you must also instruct the model to produce JSON
|
|
46
|
+
yourself via a system or user message. Without this, the model
|
|
47
|
+
may generate an unending stream of whitespace until the generation
|
|
48
|
+
reaches the token limit, resulting in a long-running and seemingly
|
|
49
|
+
"stuck" request. Also note that the message content may be
|
|
50
|
+
partially cut off if finish_reason="length", which indicates the
|
|
51
|
+
generation exceeded max_tokens or the conversation exceeded the
|
|
52
|
+
max context length. (default: :obj:`None`)
|
|
53
|
+
stream (bool, optional): If set, partial message deltas will be sent,
|
|
54
|
+
like in ChatGPT. Tokens will be sent as data-only server-sent
|
|
55
|
+
events as they become available, with the stream terminated by
|
|
56
|
+
a data: [DONE] message. (default: :obj:`None`)
|
|
57
|
+
stop (str or list, optional): Up to :obj:`4` sequences where the API
|
|
58
|
+
will stop generating further tokens. (default: :obj:`None`)
|
|
59
|
+
max_tokens (int, optional): The maximum number of tokens to generate
|
|
60
|
+
in the chat completion. The total length of input tokens and
|
|
61
|
+
generated tokens is limited by the model's context length.
|
|
62
|
+
(default: :obj:`None`)
|
|
63
|
+
user (str, optional): A unique identifier representing your end-user,
|
|
64
|
+
which can help to monitor and detect abuse.
|
|
65
|
+
(default: :obj:`None`)
|
|
66
|
+
tool_choice (Union[dict[str, str], str], optional): Controls which (if
|
|
67
|
+
any) tool is called by the model. :obj:`"none"` means the model
|
|
68
|
+
will not call any tool and instead generates a message.
|
|
69
|
+
:obj:`"auto"` means the model can pick between generating a
|
|
70
|
+
message or calling one or more tools. :obj:`"required"` means the
|
|
71
|
+
model must call one or more tools. Specifying a particular tool
|
|
72
|
+
via {"type": "function", "function": {"name": "my_function"}}
|
|
73
|
+
forces the model to call that tool. :obj:`"none"` is the default
|
|
74
|
+
when no tools are present. :obj:`"auto"` is the default if tools
|
|
75
|
+
are present.
|
|
76
|
+
|
|
77
|
+
Note:
|
|
78
|
+
Some OpenAI parameters such as presence_penalty, frequency_penalty,
|
|
79
|
+
and logit_bias will be ignored by Minimax.
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
temperature: Optional[float] = None
|
|
83
|
+
top_p: Optional[float] = None
|
|
84
|
+
n: Optional[int] = None
|
|
85
|
+
stream: Optional[bool] = None
|
|
86
|
+
stop: Optional[Union[str, Sequence[str]]] = None
|
|
87
|
+
max_tokens: Optional[int] = None
|
|
88
|
+
response_format: Optional[dict] = None
|
|
89
|
+
user: Optional[str] = None
|
|
90
|
+
tool_choice: Optional[Union[dict[str, str], str]] = None
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
MINIMAX_API_PARAMS = {param for param in MinimaxConfig.model_fields.keys()}
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import Optional, Sequence, Union
|
|
17
|
+
|
|
18
|
+
from camel.configs.base_config import BaseConfig
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class NebiusConfig(BaseConfig):
|
|
22
|
+
r"""Defines the parameters for generating chat completions using OpenAI
|
|
23
|
+
compatibility with Nebius AI Studio.
|
|
24
|
+
|
|
25
|
+
Reference: https://nebius.com/docs/ai-studio/api
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
temperature (float, optional): Sampling temperature to use, between
|
|
29
|
+
:obj:`0` and :obj:`2`. Higher values make the output more random,
|
|
30
|
+
while lower values make it more focused and deterministic.
|
|
31
|
+
(default: :obj:`None`)
|
|
32
|
+
top_p (float, optional): An alternative to sampling with temperature,
|
|
33
|
+
called nucleus sampling, where the model considers the results of
|
|
34
|
+
the tokens with top_p probability mass. So :obj:`0.1` means only
|
|
35
|
+
the tokens comprising the top 10% probability mass are considered.
|
|
36
|
+
(default: :obj:`None`)
|
|
37
|
+
n (int, optional): How many chat completion choices to generate for
|
|
38
|
+
each input message. (default: :obj:`None`)
|
|
39
|
+
response_format (object, optional): An object specifying the format
|
|
40
|
+
that the model must output. Compatible with GPT-4 Turbo and all
|
|
41
|
+
GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
|
|
42
|
+
{"type": "json_object"} enables JSON mode, which guarantees the
|
|
43
|
+
message the model generates is valid JSON. Important: when using
|
|
44
|
+
JSON mode, you must also instruct the model to produce JSON
|
|
45
|
+
yourself via a system or user message. Without this, the model
|
|
46
|
+
may generate an unending stream of whitespace until the generation
|
|
47
|
+
reaches the token limit, resulting in a long-running and seemingly
|
|
48
|
+
"stuck" request. Also note that the message content may be
|
|
49
|
+
partially cut off if finish_reason="length", which indicates the
|
|
50
|
+
generation exceeded max_tokens or the conversation exceeded the
|
|
51
|
+
max context length.
|
|
52
|
+
stream (bool, optional): If True, partial message deltas will be sent
|
|
53
|
+
as data-only server-sent events as they become available.
|
|
54
|
+
(default: :obj:`None`)
|
|
55
|
+
stop (str or list, optional): Up to :obj:`4` sequences where the API
|
|
56
|
+
will stop generating further tokens. (default: :obj:`None`)
|
|
57
|
+
max_tokens (int, optional): The maximum number of tokens to generate
|
|
58
|
+
in the chat completion. The total length of input tokens and
|
|
59
|
+
generated tokens is limited by the model's context length.
|
|
60
|
+
(default: :obj:`None`)
|
|
61
|
+
presence_penalty (float, optional): Number between :obj:`-2.0` and
|
|
62
|
+
:obj:`2.0`. Positive values penalize new tokens based on whether
|
|
63
|
+
they appear in the text so far, increasing the model's likelihood
|
|
64
|
+
to talk about new topics. See more information about frequency and
|
|
65
|
+
presence penalties. (default: :obj:`None`)
|
|
66
|
+
frequency_penalty (float, optional): Number between :obj:`-2.0` and
|
|
67
|
+
:obj:`2.0`. Positive values penalize new tokens based on their
|
|
68
|
+
existing frequency in the text so far, decreasing the model's
|
|
69
|
+
likelihood to repeat the same line verbatim. See more information
|
|
70
|
+
about frequency and presence penalties. (default: :obj:`None`)
|
|
71
|
+
user (str, optional): A unique identifier representing your end-user,
|
|
72
|
+
which can help OpenAI to monitor and detect abuse.
|
|
73
|
+
(default: :obj:`None`)
|
|
74
|
+
tools (list[FunctionTool], optional): A list of tools the model may
|
|
75
|
+
call. Currently, only functions are supported as a tool. Use this
|
|
76
|
+
to provide a list of functions the model may generate JSON inputs
|
|
77
|
+
for. A max of 128 functions are supported.
|
|
78
|
+
tool_choice (Union[dict[str, str], str], optional): Controls which (if
|
|
79
|
+
any) tool is called by the model. :obj:`"none"` means the model
|
|
80
|
+
will not call any tool and instead generates a message.
|
|
81
|
+
:obj:`"auto"` means the model can pick between generating a
|
|
82
|
+
message or calling one or more tools. :obj:`"required"` means the
|
|
83
|
+
model must call one or more tools. Specifying a particular tool
|
|
84
|
+
via {"type": "function", "function": {"name": "my_function"}}
|
|
85
|
+
forces the model to call that tool. :obj:`"none"` is the default
|
|
86
|
+
when no tools are present. :obj:`"auto"` is the default if tools
|
|
87
|
+
are present.
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
temperature: Optional[float] = None
|
|
91
|
+
top_p: Optional[float] = None
|
|
92
|
+
n: Optional[int] = None
|
|
93
|
+
stream: Optional[bool] = None
|
|
94
|
+
stop: Optional[Union[str, Sequence[str]]] = None
|
|
95
|
+
max_tokens: Optional[int] = None
|
|
96
|
+
presence_penalty: Optional[float] = None
|
|
97
|
+
response_format: Optional[dict] = None
|
|
98
|
+
frequency_penalty: Optional[float] = None
|
|
99
|
+
user: Optional[str] = None
|
|
100
|
+
tool_choice: Optional[Union[dict[str, str], str]] = None
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
NEBIUS_API_PARAMS = {param for param in NebiusConfig.model_fields.keys()}
|