camel-ai 0.2.67__py3-none-any.whl → 0.2.80a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- camel/__init__.py +1 -1
- camel/agents/_types.py +6 -2
- camel/agents/_utils.py +38 -0
- camel/agents/chat_agent.py +4014 -410
- camel/agents/mcp_agent.py +30 -27
- camel/agents/repo_agent.py +2 -1
- camel/benchmarks/browsecomp.py +6 -6
- camel/configs/__init__.py +15 -0
- camel/configs/aihubmix_config.py +88 -0
- camel/configs/amd_config.py +70 -0
- camel/configs/cometapi_config.py +104 -0
- camel/configs/minimax_config.py +93 -0
- camel/configs/nebius_config.py +103 -0
- camel/configs/vllm_config.py +2 -0
- camel/data_collectors/alpaca_collector.py +15 -6
- camel/datagen/self_improving_cot.py +1 -1
- camel/datasets/base_generator.py +39 -10
- camel/environments/__init__.py +12 -0
- camel/environments/rlcards_env.py +860 -0
- camel/environments/single_step.py +28 -3
- camel/environments/tic_tac_toe.py +1 -1
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/docker/Dockerfile +4 -16
- camel/interpreters/docker_interpreter.py +3 -2
- camel/interpreters/e2b_interpreter.py +34 -1
- camel/interpreters/internal_python_interpreter.py +51 -2
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/loaders/__init__.py +11 -2
- camel/loaders/base_loader.py +85 -0
- camel/loaders/chunkr_reader.py +9 -0
- camel/loaders/firecrawl_reader.py +4 -4
- camel/logger.py +1 -1
- camel/memories/agent_memories.py +84 -1
- camel/memories/base.py +34 -0
- camel/memories/blocks/chat_history_block.py +122 -4
- camel/memories/blocks/vectordb_block.py +8 -1
- camel/memories/context_creators/score_based.py +29 -237
- camel/memories/records.py +88 -8
- camel/messages/base.py +166 -40
- camel/messages/func_message.py +32 -5
- camel/models/__init__.py +10 -0
- camel/models/aihubmix_model.py +83 -0
- camel/models/aiml_model.py +1 -16
- camel/models/amd_model.py +101 -0
- camel/models/anthropic_model.py +117 -18
- camel/models/aws_bedrock_model.py +2 -33
- camel/models/azure_openai_model.py +205 -91
- camel/models/base_audio_model.py +3 -1
- camel/models/base_model.py +189 -24
- camel/models/cohere_model.py +5 -17
- camel/models/cometapi_model.py +83 -0
- camel/models/crynux_model.py +1 -16
- camel/models/deepseek_model.py +6 -16
- camel/models/fish_audio_model.py +6 -0
- camel/models/gemini_model.py +71 -20
- camel/models/groq_model.py +1 -17
- camel/models/internlm_model.py +1 -16
- camel/models/litellm_model.py +49 -32
- camel/models/lmstudio_model.py +1 -17
- camel/models/minimax_model.py +83 -0
- camel/models/mistral_model.py +1 -16
- camel/models/model_factory.py +27 -1
- camel/models/model_manager.py +24 -6
- camel/models/modelscope_model.py +1 -16
- camel/models/moonshot_model.py +185 -19
- camel/models/nebius_model.py +83 -0
- camel/models/nemotron_model.py +0 -5
- camel/models/netmind_model.py +1 -16
- camel/models/novita_model.py +1 -16
- camel/models/nvidia_model.py +1 -16
- camel/models/ollama_model.py +4 -19
- camel/models/openai_compatible_model.py +171 -46
- camel/models/openai_model.py +205 -77
- camel/models/openrouter_model.py +1 -17
- camel/models/ppio_model.py +1 -16
- camel/models/qianfan_model.py +1 -16
- camel/models/qwen_model.py +1 -16
- camel/models/reka_model.py +1 -16
- camel/models/samba_model.py +34 -47
- camel/models/sglang_model.py +64 -31
- camel/models/siliconflow_model.py +1 -16
- camel/models/stub_model.py +0 -4
- camel/models/togetherai_model.py +1 -16
- camel/models/vllm_model.py +1 -16
- camel/models/volcano_model.py +0 -17
- camel/models/watsonx_model.py +1 -16
- camel/models/yi_model.py +1 -16
- camel/models/zhipuai_model.py +60 -16
- camel/parsers/__init__.py +18 -0
- camel/parsers/mcp_tool_call_parser.py +176 -0
- camel/retrievers/auto_retriever.py +1 -0
- camel/runtimes/configs.py +11 -11
- camel/runtimes/daytona_runtime.py +15 -16
- camel/runtimes/docker_runtime.py +6 -6
- camel/runtimes/remote_http_runtime.py +5 -5
- camel/services/agent_openapi_server.py +380 -0
- camel/societies/__init__.py +2 -0
- camel/societies/role_playing.py +26 -28
- camel/societies/workforce/__init__.py +2 -0
- camel/societies/workforce/events.py +122 -0
- camel/societies/workforce/prompts.py +249 -38
- camel/societies/workforce/role_playing_worker.py +82 -20
- camel/societies/workforce/single_agent_worker.py +634 -34
- camel/societies/workforce/structured_output_handler.py +512 -0
- camel/societies/workforce/task_channel.py +169 -23
- camel/societies/workforce/utils.py +176 -9
- camel/societies/workforce/worker.py +77 -23
- camel/societies/workforce/workflow_memory_manager.py +772 -0
- camel/societies/workforce/workforce.py +3168 -478
- camel/societies/workforce/workforce_callback.py +74 -0
- camel/societies/workforce/workforce_logger.py +203 -175
- camel/societies/workforce/workforce_metrics.py +33 -0
- camel/storages/__init__.py +4 -0
- camel/storages/key_value_storages/json.py +15 -2
- camel/storages/key_value_storages/mem0_cloud.py +48 -47
- camel/storages/object_storages/google_cloud.py +1 -1
- camel/storages/vectordb_storages/__init__.py +6 -0
- camel/storages/vectordb_storages/chroma.py +731 -0
- camel/storages/vectordb_storages/oceanbase.py +13 -13
- camel/storages/vectordb_storages/pgvector.py +349 -0
- camel/storages/vectordb_storages/qdrant.py +3 -3
- camel/storages/vectordb_storages/surreal.py +365 -0
- camel/storages/vectordb_storages/tidb.py +8 -6
- camel/tasks/task.py +244 -27
- camel/toolkits/__init__.py +46 -8
- camel/toolkits/aci_toolkit.py +64 -19
- camel/toolkits/arxiv_toolkit.py +6 -6
- camel/toolkits/base.py +63 -5
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/context_summarizer_toolkit.py +684 -0
- camel/toolkits/craw4ai_toolkit.py +93 -0
- camel/toolkits/dappier_toolkit.py +10 -6
- camel/toolkits/dingtalk.py +1135 -0
- camel/toolkits/edgeone_pages_mcp_toolkit.py +49 -0
- camel/toolkits/excel_toolkit.py +901 -67
- camel/toolkits/file_toolkit.py +1402 -0
- camel/toolkits/function_tool.py +30 -6
- camel/toolkits/github_toolkit.py +107 -20
- camel/toolkits/gmail_toolkit.py +1839 -0
- camel/toolkits/google_calendar_toolkit.py +38 -4
- camel/toolkits/google_drive_mcp_toolkit.py +54 -0
- camel/toolkits/human_toolkit.py +34 -10
- camel/toolkits/hybrid_browser_toolkit/__init__.py +18 -0
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +185 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +246 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +1973 -0
- camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +3749 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package.json +32 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-scripts.js +125 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +1815 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +233 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +590 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/index.ts +7 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +130 -0
- camel/toolkits/hybrid_browser_toolkit/ts/tsconfig.json +26 -0
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +319 -0
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +1032 -0
- camel/toolkits/hybrid_browser_toolkit_py/__init__.py +17 -0
- camel/toolkits/hybrid_browser_toolkit_py/actions.py +575 -0
- camel/toolkits/hybrid_browser_toolkit_py/agent.py +311 -0
- camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +787 -0
- camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +490 -0
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +2390 -0
- camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +233 -0
- camel/toolkits/hybrid_browser_toolkit_py/stealth_script.js +0 -0
- camel/toolkits/hybrid_browser_toolkit_py/unified_analyzer.js +1043 -0
- camel/toolkits/image_generation_toolkit.py +390 -0
- camel/toolkits/jina_reranker_toolkit.py +3 -4
- camel/toolkits/klavis_toolkit.py +5 -1
- camel/toolkits/markitdown_toolkit.py +104 -0
- camel/toolkits/math_toolkit.py +64 -10
- camel/toolkits/mcp_toolkit.py +370 -45
- camel/toolkits/memory_toolkit.py +5 -1
- camel/toolkits/message_agent_toolkit.py +608 -0
- camel/toolkits/message_integration.py +724 -0
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/note_taking_toolkit.py +277 -0
- camel/toolkits/notion_mcp_toolkit.py +224 -0
- camel/toolkits/openbb_toolkit.py +5 -1
- camel/toolkits/origene_mcp_toolkit.py +56 -0
- camel/toolkits/playwright_mcp_toolkit.py +12 -31
- camel/toolkits/pptx_toolkit.py +25 -12
- camel/toolkits/resend_toolkit.py +168 -0
- camel/toolkits/screenshot_toolkit.py +213 -0
- camel/toolkits/search_toolkit.py +437 -142
- camel/toolkits/slack_toolkit.py +104 -50
- camel/toolkits/sympy_toolkit.py +1 -1
- camel/toolkits/task_planning_toolkit.py +3 -3
- camel/toolkits/terminal_toolkit/__init__.py +18 -0
- camel/toolkits/terminal_toolkit/terminal_toolkit.py +957 -0
- camel/toolkits/terminal_toolkit/utils.py +532 -0
- camel/toolkits/thinking_toolkit.py +1 -1
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +106 -26
- camel/toolkits/video_download_toolkit.py +17 -14
- camel/toolkits/web_deploy_toolkit.py +1219 -0
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/toolkits/zapier_toolkit.py +5 -1
- camel/types/__init__.py +2 -2
- camel/types/agents/tool_calling_record.py +4 -1
- camel/types/enums.py +316 -40
- camel/types/openai_types.py +2 -2
- camel/types/unified_model_type.py +31 -4
- camel/utils/commons.py +36 -5
- camel/utils/constants.py +3 -0
- camel/utils/context_utils.py +1003 -0
- camel/utils/mcp.py +138 -4
- camel/utils/mcp_client.py +45 -1
- camel/utils/message_summarizer.py +148 -0
- camel/utils/token_counting.py +43 -20
- camel/utils/tool_result.py +44 -0
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/METADATA +296 -85
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/RECORD +219 -146
- camel/loaders/pandas_reader.py +0 -368
- camel/toolkits/dalle_toolkit.py +0 -175
- camel/toolkits/file_write_toolkit.py +0 -444
- camel/toolkits/openai_agent_toolkit.py +0 -135
- camel/toolkits/terminal_toolkit.py +0 -1037
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/licenses/LICENSE +0 -0
camel/models/base_audio_model.py
CHANGED
|
@@ -16,6 +16,8 @@ import os
|
|
|
16
16
|
from abc import ABC, abstractmethod
|
|
17
17
|
from typing import Any, Optional
|
|
18
18
|
|
|
19
|
+
from camel.utils import Constants
|
|
20
|
+
|
|
19
21
|
|
|
20
22
|
class BaseAudioModel(ABC):
|
|
21
23
|
r"""Base class for audio models providing Text-to-Speech (TTS) and
|
|
@@ -26,7 +28,7 @@ class BaseAudioModel(ABC):
|
|
|
26
28
|
self,
|
|
27
29
|
api_key: Optional[str] = None,
|
|
28
30
|
url: Optional[str] = None,
|
|
29
|
-
timeout: Optional[float] =
|
|
31
|
+
timeout: Optional[float] = Constants.TIMEOUT_THRESHOLD,
|
|
30
32
|
) -> None:
|
|
31
33
|
r"""Initialize an instance of BaseAudioModel.
|
|
32
34
|
|
camel/models/base_model.py
CHANGED
|
@@ -12,13 +12,19 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import abc
|
|
15
|
+
import os
|
|
15
16
|
import re
|
|
16
17
|
from abc import ABC, abstractmethod
|
|
17
18
|
from typing import Any, Dict, List, Optional, Type, Union
|
|
18
19
|
|
|
19
20
|
from openai import AsyncStream, Stream
|
|
21
|
+
from openai.lib.streaming.chat import (
|
|
22
|
+
AsyncChatCompletionStreamManager,
|
|
23
|
+
ChatCompletionStreamManager,
|
|
24
|
+
)
|
|
20
25
|
from pydantic import BaseModel
|
|
21
26
|
|
|
27
|
+
from camel.logger import get_logger as camel_get_logger
|
|
22
28
|
from camel.messages import OpenAIMessage
|
|
23
29
|
from camel.types import (
|
|
24
30
|
ChatCompletion,
|
|
@@ -27,7 +33,22 @@ from camel.types import (
|
|
|
27
33
|
ParsedChatCompletion,
|
|
28
34
|
UnifiedModelType,
|
|
29
35
|
)
|
|
30
|
-
from camel.utils import BaseTokenCounter
|
|
36
|
+
from camel.utils import BaseTokenCounter, Constants
|
|
37
|
+
|
|
38
|
+
if os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
|
|
39
|
+
try:
|
|
40
|
+
from traceroot import get_logger # type: ignore[import]
|
|
41
|
+
from traceroot import trace as observe # type: ignore[import]
|
|
42
|
+
|
|
43
|
+
logger = get_logger('base_model')
|
|
44
|
+
except ImportError:
|
|
45
|
+
from camel.utils import observe
|
|
46
|
+
|
|
47
|
+
logger = camel_get_logger('base_model')
|
|
48
|
+
else:
|
|
49
|
+
from camel.utils import observe
|
|
50
|
+
|
|
51
|
+
logger = camel_get_logger('base_model')
|
|
31
52
|
|
|
32
53
|
|
|
33
54
|
class ModelBackendMeta(abc.ABCMeta):
|
|
@@ -82,7 +103,7 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
|
|
|
82
103
|
api_key: Optional[str] = None,
|
|
83
104
|
url: Optional[str] = None,
|
|
84
105
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
85
|
-
timeout: Optional[float] =
|
|
106
|
+
timeout: Optional[float] = Constants.TIMEOUT_THRESHOLD,
|
|
86
107
|
max_retries: int = 3,
|
|
87
108
|
) -> None:
|
|
88
109
|
self.model_type: UnifiedModelType = UnifiedModelType(model_type)
|
|
@@ -94,7 +115,12 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
|
|
|
94
115
|
self._token_counter = token_counter
|
|
95
116
|
self._timeout = timeout
|
|
96
117
|
self._max_retries = max_retries
|
|
97
|
-
|
|
118
|
+
# Initialize logging configuration
|
|
119
|
+
self._log_enabled = (
|
|
120
|
+
os.environ.get("CAMEL_MODEL_LOG_ENABLED", "False").lower()
|
|
121
|
+
== "true"
|
|
122
|
+
)
|
|
123
|
+
self._log_dir = os.environ.get("CAMEL_LOG_DIR", "camel_logs")
|
|
98
124
|
|
|
99
125
|
@property
|
|
100
126
|
@abstractmethod
|
|
@@ -232,13 +258,96 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
|
|
|
232
258
|
|
|
233
259
|
return formatted_messages
|
|
234
260
|
|
|
261
|
+
def _log_request(self, messages: List[OpenAIMessage]) -> Optional[str]:
|
|
262
|
+
r"""Log the request messages to a JSON file if logging is enabled.
|
|
263
|
+
|
|
264
|
+
Args:
|
|
265
|
+
messages (List[OpenAIMessage]): The messages to log.
|
|
266
|
+
|
|
267
|
+
Returns:
|
|
268
|
+
Optional[str]: The path to the log file if logging is enabled,
|
|
269
|
+
None otherwise.
|
|
270
|
+
"""
|
|
271
|
+
if not self._log_enabled:
|
|
272
|
+
return None
|
|
273
|
+
|
|
274
|
+
import json
|
|
275
|
+
from datetime import datetime
|
|
276
|
+
|
|
277
|
+
os.makedirs(self._log_dir, exist_ok=True)
|
|
278
|
+
|
|
279
|
+
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S_%f')
|
|
280
|
+
log_file_path = os.path.join(self._log_dir, f"conv_{timestamp}.json")
|
|
281
|
+
|
|
282
|
+
log_entry = {
|
|
283
|
+
"request_timestamp": datetime.now().isoformat(),
|
|
284
|
+
"model": str(self.model_type),
|
|
285
|
+
"request": {"messages": messages},
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
with open(log_file_path, "w") as f:
|
|
289
|
+
json.dump(log_entry, f, indent=4)
|
|
290
|
+
|
|
291
|
+
return log_file_path
|
|
292
|
+
|
|
293
|
+
def _log_response(self, log_path: str, response: Any) -> None:
|
|
294
|
+
r"""Log the response to the existing log file.
|
|
295
|
+
|
|
296
|
+
Args:
|
|
297
|
+
log_path (str): The path to the log file.
|
|
298
|
+
response (Any): The response to log.
|
|
299
|
+
"""
|
|
300
|
+
if not self._log_enabled or not log_path:
|
|
301
|
+
return
|
|
302
|
+
|
|
303
|
+
import json
|
|
304
|
+
from datetime import datetime
|
|
305
|
+
|
|
306
|
+
with open(log_path, "r+") as f:
|
|
307
|
+
log_data = json.load(f)
|
|
308
|
+
|
|
309
|
+
log_data["response_timestamp"] = datetime.now().isoformat()
|
|
310
|
+
if isinstance(response, BaseModel):
|
|
311
|
+
log_data["response"] = response.model_dump()
|
|
312
|
+
else:
|
|
313
|
+
try:
|
|
314
|
+
json.dumps(response)
|
|
315
|
+
log_data["response"] = response
|
|
316
|
+
except TypeError:
|
|
317
|
+
log_data["response"] = str(response)
|
|
318
|
+
|
|
319
|
+
f.seek(0)
|
|
320
|
+
json.dump(log_data, f, indent=4)
|
|
321
|
+
f.truncate()
|
|
322
|
+
|
|
235
323
|
@abstractmethod
|
|
236
324
|
def _run(
|
|
237
325
|
self,
|
|
238
326
|
messages: List[OpenAIMessage],
|
|
239
327
|
response_format: Optional[Type[BaseModel]] = None,
|
|
240
328
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
241
|
-
) -> Union[
|
|
329
|
+
) -> Union[
|
|
330
|
+
ChatCompletion,
|
|
331
|
+
Stream[ChatCompletionChunk],
|
|
332
|
+
ChatCompletionStreamManager[BaseModel],
|
|
333
|
+
]:
|
|
334
|
+
r"""Runs the query to the backend model in a non-stream mode.
|
|
335
|
+
|
|
336
|
+
Args:
|
|
337
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
338
|
+
in OpenAI API format.
|
|
339
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
340
|
+
response.
|
|
341
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
342
|
+
use for the request.
|
|
343
|
+
|
|
344
|
+
Returns:
|
|
345
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk], Any]:
|
|
346
|
+
`ChatCompletion` in the non-stream mode, or
|
|
347
|
+
`Stream[ChatCompletionChunk]` in the stream mode,
|
|
348
|
+
or `ChatCompletionStreamManager[BaseModel]` in the structured
|
|
349
|
+
stream mode.
|
|
350
|
+
"""
|
|
242
351
|
pass
|
|
243
352
|
|
|
244
353
|
@abstractmethod
|
|
@@ -247,15 +356,41 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
|
|
|
247
356
|
messages: List[OpenAIMessage],
|
|
248
357
|
response_format: Optional[Type[BaseModel]] = None,
|
|
249
358
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
250
|
-
) -> Union[
|
|
359
|
+
) -> Union[
|
|
360
|
+
ChatCompletion,
|
|
361
|
+
AsyncStream[ChatCompletionChunk],
|
|
362
|
+
AsyncChatCompletionStreamManager[BaseModel],
|
|
363
|
+
]:
|
|
364
|
+
r"""Runs the query to the backend model in async non-stream mode.
|
|
365
|
+
|
|
366
|
+
Args:
|
|
367
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
368
|
+
in OpenAI API format.
|
|
369
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
370
|
+
response.
|
|
371
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
372
|
+
use for the request.
|
|
373
|
+
|
|
374
|
+
Returns:
|
|
375
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk], Any]:
|
|
376
|
+
`ChatCompletion` in the non-stream mode, or
|
|
377
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode,
|
|
378
|
+
or `AsyncChatCompletionStreamManager[BaseModel]` in the
|
|
379
|
+
structured stream mode.
|
|
380
|
+
"""
|
|
251
381
|
pass
|
|
252
382
|
|
|
383
|
+
@observe()
|
|
253
384
|
def run(
|
|
254
385
|
self,
|
|
255
386
|
messages: List[OpenAIMessage],
|
|
256
387
|
response_format: Optional[Type[BaseModel]] = None,
|
|
257
388
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
258
|
-
) -> Union[
|
|
389
|
+
) -> Union[
|
|
390
|
+
ChatCompletion,
|
|
391
|
+
Stream[ChatCompletionChunk],
|
|
392
|
+
ChatCompletionStreamManager[BaseModel],
|
|
393
|
+
]:
|
|
259
394
|
r"""Runs the query to the backend model.
|
|
260
395
|
|
|
261
396
|
Args:
|
|
@@ -269,24 +404,47 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
|
|
|
269
404
|
(default: :obj:`None`)
|
|
270
405
|
|
|
271
406
|
Returns:
|
|
272
|
-
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
273
|
-
`ChatCompletion` in the non-stream mode,
|
|
274
|
-
`Stream[ChatCompletionChunk]` in the stream mode
|
|
407
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk], Any]:
|
|
408
|
+
`ChatCompletion` in the non-stream mode,
|
|
409
|
+
`Stream[ChatCompletionChunk]` in the stream mode, or
|
|
410
|
+
`ChatCompletionStreamManager[BaseModel]` in the structured
|
|
411
|
+
stream mode.
|
|
275
412
|
"""
|
|
413
|
+
# Log the request if logging is enabled
|
|
414
|
+
log_path = self._log_request(messages)
|
|
415
|
+
|
|
276
416
|
# None -> use default tools
|
|
277
417
|
if tools is None:
|
|
278
418
|
tools = self.model_config_dict.get("tools", None)
|
|
279
419
|
# Empty -> use no tools
|
|
280
420
|
elif not tools:
|
|
281
421
|
tools = None
|
|
282
|
-
return self._run(messages, response_format, tools)
|
|
283
422
|
|
|
423
|
+
logger.info("Running model: %s", self.model_type)
|
|
424
|
+
logger.info("Messages: %s", messages)
|
|
425
|
+
logger.info("Response format: %s", response_format)
|
|
426
|
+
logger.info("Tools: %s", tools)
|
|
427
|
+
|
|
428
|
+
result = self._run(messages, response_format, tools)
|
|
429
|
+
logger.info("Result: %s", result)
|
|
430
|
+
|
|
431
|
+
# Log the response if logging is enabled
|
|
432
|
+
if log_path:
|
|
433
|
+
self._log_response(log_path, result)
|
|
434
|
+
|
|
435
|
+
return result
|
|
436
|
+
|
|
437
|
+
@observe()
|
|
284
438
|
async def arun(
|
|
285
439
|
self,
|
|
286
440
|
messages: List[OpenAIMessage],
|
|
287
441
|
response_format: Optional[Type[BaseModel]] = None,
|
|
288
442
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
289
|
-
) -> Union[
|
|
443
|
+
) -> Union[
|
|
444
|
+
ChatCompletion,
|
|
445
|
+
AsyncStream[ChatCompletionChunk],
|
|
446
|
+
AsyncChatCompletionStreamManager[BaseModel],
|
|
447
|
+
]:
|
|
290
448
|
r"""Runs the query to the backend model asynchronously.
|
|
291
449
|
|
|
292
450
|
Args:
|
|
@@ -300,26 +458,33 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
|
|
|
300
458
|
(default: :obj:`None`)
|
|
301
459
|
|
|
302
460
|
Returns:
|
|
303
|
-
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
304
|
-
`ChatCompletion` in the non-stream mode,
|
|
305
|
-
`AsyncStream[ChatCompletionChunk]` in the stream mode
|
|
461
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk], Any]:
|
|
462
|
+
`ChatCompletion` in the non-stream mode,
|
|
463
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode, or
|
|
464
|
+
`AsyncChatCompletionStreamManager[BaseModel]` in the structured
|
|
465
|
+
stream mode.
|
|
306
466
|
"""
|
|
467
|
+
# Log the request if logging is enabled
|
|
468
|
+
log_path = self._log_request(messages)
|
|
469
|
+
|
|
307
470
|
if tools is None:
|
|
308
471
|
tools = self.model_config_dict.get("tools", None)
|
|
309
472
|
elif not tools:
|
|
310
473
|
tools = None
|
|
311
|
-
return await self._arun(messages, response_format, tools)
|
|
312
474
|
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
475
|
+
logger.info("Running model: %s", self.model_type)
|
|
476
|
+
logger.info("Messages: %s", messages)
|
|
477
|
+
logger.info("Response format: %s", response_format)
|
|
478
|
+
logger.info("Tools: %s", tools)
|
|
317
479
|
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
480
|
+
result = await self._arun(messages, response_format, tools)
|
|
481
|
+
logger.info("Result: %s", result)
|
|
482
|
+
|
|
483
|
+
# Log the response if logging is enabled
|
|
484
|
+
if log_path:
|
|
485
|
+
self._log_response(log_path, result)
|
|
486
|
+
|
|
487
|
+
return result
|
|
323
488
|
|
|
324
489
|
def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
|
|
325
490
|
r"""Count the number of tokens in the messages using the specific
|
camel/models/cohere_model.py
CHANGED
|
@@ -21,9 +21,12 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
|
|
|
21
21
|
from pydantic import BaseModel
|
|
22
22
|
|
|
23
23
|
if TYPE_CHECKING:
|
|
24
|
-
from cohere.types import
|
|
24
|
+
from cohere.types import ( # type: ignore[attr-defined]
|
|
25
|
+
ChatMessageV2,
|
|
26
|
+
ChatResponse,
|
|
27
|
+
)
|
|
25
28
|
|
|
26
|
-
from camel.configs import
|
|
29
|
+
from camel.configs import CohereConfig
|
|
27
30
|
from camel.messages import OpenAIMessage
|
|
28
31
|
from camel.models import BaseModelBackend
|
|
29
32
|
from camel.models._utils import try_modify_message_with_format
|
|
@@ -451,21 +454,6 @@ class CohereModel(BaseModelBackend):
|
|
|
451
454
|
|
|
452
455
|
return openai_response
|
|
453
456
|
|
|
454
|
-
def check_model_config(self):
|
|
455
|
-
r"""Check whether the model configuration contains any unexpected
|
|
456
|
-
arguments to Cohere API.
|
|
457
|
-
|
|
458
|
-
Raises:
|
|
459
|
-
ValueError: If the model configuration dictionary contains any
|
|
460
|
-
unexpected arguments to Cohere API.
|
|
461
|
-
"""
|
|
462
|
-
for param in self.model_config_dict:
|
|
463
|
-
if param not in COHERE_API_PARAMS:
|
|
464
|
-
raise ValueError(
|
|
465
|
-
f"Unexpected argument `{param}` is "
|
|
466
|
-
"input into Cohere model backend."
|
|
467
|
-
)
|
|
468
|
-
|
|
469
457
|
@property
|
|
470
458
|
def stream(self) -> bool:
|
|
471
459
|
r"""Returns whether the model is in stream mode, which sends partial
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import os
|
|
15
|
+
from typing import Any, Dict, Optional, Union
|
|
16
|
+
|
|
17
|
+
from camel.configs import CometAPIConfig
|
|
18
|
+
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
19
|
+
from camel.types import ModelType
|
|
20
|
+
from camel.utils import (
|
|
21
|
+
BaseTokenCounter,
|
|
22
|
+
api_keys_required,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class CometAPIModel(OpenAICompatibleModel):
|
|
27
|
+
r"""LLM API served by CometAPI in a unified OpenAICompatibleModel
|
|
28
|
+
interface.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
32
|
+
created.
|
|
33
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
34
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`.
|
|
35
|
+
If:obj:`None`, :obj:`CometAPIConfig().as_dict()` will be used.
|
|
36
|
+
(default: :obj:`None`)
|
|
37
|
+
api_key (Optional[str], optional): The API key for authenticating
|
|
38
|
+
with the CometAPI service. (default: :obj:`None`).
|
|
39
|
+
url (Optional[str], optional): The url to the CometAPI service.
|
|
40
|
+
(default: :obj:`None`)
|
|
41
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
42
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
43
|
+
ModelType.GPT_4O_MINI)` will be used.
|
|
44
|
+
(default: :obj:`None`)
|
|
45
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
46
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
47
|
+
environment variable or default to 180 seconds.
|
|
48
|
+
(default: :obj:`None`)
|
|
49
|
+
max_retries (int, optional): Maximum number of retries for API calls.
|
|
50
|
+
(default: :obj:`3`)
|
|
51
|
+
**kwargs (Any): Additional arguments to pass to the client
|
|
52
|
+
initialization.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
@api_keys_required([("api_key", "COMETAPI_KEY")])
|
|
56
|
+
def __init__(
|
|
57
|
+
self,
|
|
58
|
+
model_type: Union[ModelType, str],
|
|
59
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
60
|
+
api_key: Optional[str] = None,
|
|
61
|
+
url: Optional[str] = None,
|
|
62
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
63
|
+
timeout: Optional[float] = None,
|
|
64
|
+
max_retries: int = 3,
|
|
65
|
+
**kwargs: Any,
|
|
66
|
+
) -> None:
|
|
67
|
+
if model_config_dict is None:
|
|
68
|
+
model_config_dict = CometAPIConfig().as_dict()
|
|
69
|
+
api_key = api_key or os.environ.get("COMETAPI_KEY")
|
|
70
|
+
url = url or os.environ.get(
|
|
71
|
+
"COMETAPI_API_BASE_URL", "https://api.cometapi.com/v1"
|
|
72
|
+
)
|
|
73
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
74
|
+
super().__init__(
|
|
75
|
+
model_type=model_type,
|
|
76
|
+
model_config_dict=model_config_dict,
|
|
77
|
+
api_key=api_key,
|
|
78
|
+
url=url,
|
|
79
|
+
token_counter=token_counter,
|
|
80
|
+
timeout=timeout,
|
|
81
|
+
max_retries=max_retries,
|
|
82
|
+
**kwargs,
|
|
83
|
+
)
|
camel/models/crynux_model.py
CHANGED
|
@@ -15,7 +15,7 @@
|
|
|
15
15
|
import os
|
|
16
16
|
from typing import Any, Dict, Optional, Union
|
|
17
17
|
|
|
18
|
-
from camel.configs import
|
|
18
|
+
from camel.configs import CrynuxConfig
|
|
19
19
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
20
20
|
from camel.types import ModelType
|
|
21
21
|
from camel.utils import (
|
|
@@ -85,18 +85,3 @@ class CrynuxModel(OpenAICompatibleModel):
|
|
|
85
85
|
max_retries=max_retries,
|
|
86
86
|
**kwargs,
|
|
87
87
|
)
|
|
88
|
-
|
|
89
|
-
def check_model_config(self):
|
|
90
|
-
r"""Check whether the model configuration contains any
|
|
91
|
-
unexpected arguments to Crynux API.
|
|
92
|
-
|
|
93
|
-
Raises:
|
|
94
|
-
ValueError: If the model configuration dictionary contains any
|
|
95
|
-
unexpected arguments to Crynux API.
|
|
96
|
-
"""
|
|
97
|
-
for param in self.model_config_dict:
|
|
98
|
-
if param not in CRYNUX_API_PARAMS:
|
|
99
|
-
raise ValueError(
|
|
100
|
-
f"Unexpected argument `{param}` is "
|
|
101
|
-
"input into Crynux model backend."
|
|
102
|
-
)
|
camel/models/deepseek_model.py
CHANGED
|
@@ -18,7 +18,7 @@ from typing import Any, Dict, List, Optional, Type, Union
|
|
|
18
18
|
from openai import AsyncStream, Stream
|
|
19
19
|
from pydantic import BaseModel
|
|
20
20
|
|
|
21
|
-
from camel.configs import
|
|
21
|
+
from camel.configs import DeepSeekConfig
|
|
22
22
|
from camel.logger import get_logger
|
|
23
23
|
from camel.messages import OpenAIMessage
|
|
24
24
|
from camel.models._utils import try_modify_message_with_format
|
|
@@ -40,6 +40,11 @@ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
|
|
|
40
40
|
from langfuse.decorators import observe
|
|
41
41
|
except ImportError:
|
|
42
42
|
from camel.utils import observe
|
|
43
|
+
elif os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
|
|
44
|
+
try:
|
|
45
|
+
from traceroot import trace as observe # type: ignore[import]
|
|
46
|
+
except ImportError:
|
|
47
|
+
from camel.utils import observe
|
|
43
48
|
else:
|
|
44
49
|
from camel.utils import observe
|
|
45
50
|
|
|
@@ -282,18 +287,3 @@ class DeepSeekModel(OpenAICompatibleModel):
|
|
|
282
287
|
)
|
|
283
288
|
|
|
284
289
|
return self._post_handle_response(response)
|
|
285
|
-
|
|
286
|
-
def check_model_config(self):
|
|
287
|
-
r"""Check whether the model configuration contains any
|
|
288
|
-
unexpected arguments to DeepSeek API.
|
|
289
|
-
|
|
290
|
-
Raises:
|
|
291
|
-
ValueError: If the model configuration dictionary contains any
|
|
292
|
-
unexpected arguments to DeepSeek API.
|
|
293
|
-
"""
|
|
294
|
-
for param in self.model_config_dict:
|
|
295
|
-
if param not in DEEPSEEK_API_PARAMS:
|
|
296
|
-
raise ValueError(
|
|
297
|
-
f"Unexpected argument `{param}` is "
|
|
298
|
-
"input into DeepSeek model backend."
|
|
299
|
-
)
|
camel/models/fish_audio_model.py
CHANGED
|
@@ -44,6 +44,12 @@ class FishAudioModel(BaseAudioModel):
|
|
|
44
44
|
self._url = url or os.environ.get(
|
|
45
45
|
"FISHAUDIO_API_BASE_URL", "https://api.fish.audio"
|
|
46
46
|
)
|
|
47
|
+
if self._api_key is None:
|
|
48
|
+
raise ValueError(
|
|
49
|
+
"API key is required for FishAudio. Please provide it via "
|
|
50
|
+
"the 'api_key' parameter or set the 'FISHAUDIO_API_KEY' "
|
|
51
|
+
"environment variable."
|
|
52
|
+
)
|
|
47
53
|
self.session = Session(apikey=self._api_key, base_url=self._url)
|
|
48
54
|
|
|
49
55
|
def text_to_speech(
|
camel/models/gemini_model.py
CHANGED
|
@@ -17,7 +17,7 @@ from typing import Any, Dict, List, Optional, Type, Union
|
|
|
17
17
|
from openai import AsyncStream, Stream
|
|
18
18
|
from pydantic import BaseModel
|
|
19
19
|
|
|
20
|
-
from camel.configs import
|
|
20
|
+
from camel.configs import GeminiConfig
|
|
21
21
|
from camel.messages import OpenAIMessage
|
|
22
22
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
23
23
|
from camel.types import (
|
|
@@ -37,6 +37,11 @@ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
|
|
|
37
37
|
from langfuse.decorators import observe
|
|
38
38
|
except ImportError:
|
|
39
39
|
from camel.utils import observe
|
|
40
|
+
elif os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
|
|
41
|
+
try:
|
|
42
|
+
from traceroot import trace as observe # type: ignore[import]
|
|
43
|
+
except ImportError:
|
|
44
|
+
from camel.utils import observe
|
|
40
45
|
else:
|
|
41
46
|
from camel.utils import observe
|
|
42
47
|
|
|
@@ -107,13 +112,46 @@ class GeminiModel(OpenAICompatibleModel):
|
|
|
107
112
|
|
|
108
113
|
def _process_messages(self, messages) -> List[OpenAIMessage]:
|
|
109
114
|
r"""Process the messages for Gemini API to ensure no empty content,
|
|
110
|
-
which is not accepted by Gemini.
|
|
115
|
+
which is not accepted by Gemini. Also preserves thought signatures
|
|
116
|
+
required for Gemini 3 Pro function calling and adds fallback signatures
|
|
117
|
+
when they are missing.
|
|
111
118
|
"""
|
|
119
|
+
import copy
|
|
120
|
+
|
|
112
121
|
processed_messages = []
|
|
113
122
|
for msg in messages:
|
|
114
|
-
|
|
123
|
+
# Use deep copy to preserve all nested structures including
|
|
124
|
+
# thought signatures in extra_content
|
|
125
|
+
msg_copy = copy.deepcopy(msg)
|
|
115
126
|
if 'content' in msg_copy and msg_copy['content'] == '':
|
|
116
127
|
msg_copy['content'] = 'null'
|
|
128
|
+
|
|
129
|
+
# Handle missing thought signatures for function calls
|
|
130
|
+
# This is required for Gemini 3 Pro compatibility
|
|
131
|
+
# TODO: support multi round thought signatures
|
|
132
|
+
if (
|
|
133
|
+
msg_copy.get('role') == 'assistant'
|
|
134
|
+
and 'tool_calls' in msg_copy
|
|
135
|
+
and isinstance(msg_copy['tool_calls'], list)
|
|
136
|
+
):
|
|
137
|
+
for i, tool_call in enumerate(msg_copy['tool_calls']):
|
|
138
|
+
# Check if this is the first tool call in a parallel set
|
|
139
|
+
# or any tool call that's missing a thought signature
|
|
140
|
+
if i == 0: # First tool call should have a signature
|
|
141
|
+
# Check if thought signature is missing
|
|
142
|
+
extra_content = tool_call.get('extra_content', {})
|
|
143
|
+
google_content = extra_content.get('google', {})
|
|
144
|
+
|
|
145
|
+
if 'thought_signature' not in google_content:
|
|
146
|
+
# Add fallback signature for missing signatures
|
|
147
|
+
if 'extra_content' not in tool_call:
|
|
148
|
+
tool_call['extra_content'] = {}
|
|
149
|
+
if 'google' not in tool_call['extra_content']:
|
|
150
|
+
tool_call['extra_content']['google'] = {}
|
|
151
|
+
tool_call['extra_content']['google'][
|
|
152
|
+
'thought_signature'
|
|
153
|
+
] = "skip_thought_signature_validator"
|
|
154
|
+
|
|
117
155
|
processed_messages.append(msg_copy)
|
|
118
156
|
return processed_messages
|
|
119
157
|
|
|
@@ -238,7 +276,7 @@ class GeminiModel(OpenAICompatibleModel):
|
|
|
238
276
|
function_dict = tool.get('function', {})
|
|
239
277
|
function_dict.pop("strict", None)
|
|
240
278
|
|
|
241
|
-
# Process parameters to remove anyOf
|
|
279
|
+
# Process parameters to remove anyOf and handle enum/format
|
|
242
280
|
if 'parameters' in function_dict:
|
|
243
281
|
params = function_dict['parameters']
|
|
244
282
|
if 'properties' in params:
|
|
@@ -255,6 +293,20 @@ class GeminiModel(OpenAICompatibleModel):
|
|
|
255
293
|
'description'
|
|
256
294
|
] = prop_value['description']
|
|
257
295
|
|
|
296
|
+
# Handle enum and format restrictions for Gemini
|
|
297
|
+
# API enum: only allowed for string type
|
|
298
|
+
if prop_value.get('type') != 'string':
|
|
299
|
+
prop_value.pop('enum', None)
|
|
300
|
+
|
|
301
|
+
# format: only allowed for string, integer, and
|
|
302
|
+
# number types
|
|
303
|
+
if prop_value.get('type') not in [
|
|
304
|
+
'string',
|
|
305
|
+
'integer',
|
|
306
|
+
'number',
|
|
307
|
+
]:
|
|
308
|
+
prop_value.pop('format', None)
|
|
309
|
+
|
|
258
310
|
request_config["tools"] = tools
|
|
259
311
|
|
|
260
312
|
return self._client.chat.completions.create(
|
|
@@ -278,7 +330,7 @@ class GeminiModel(OpenAICompatibleModel):
|
|
|
278
330
|
function_dict = tool.get('function', {})
|
|
279
331
|
function_dict.pop("strict", None)
|
|
280
332
|
|
|
281
|
-
# Process parameters to remove anyOf
|
|
333
|
+
# Process parameters to remove anyOf and handle enum/format
|
|
282
334
|
if 'parameters' in function_dict:
|
|
283
335
|
params = function_dict['parameters']
|
|
284
336
|
if 'properties' in params:
|
|
@@ -295,6 +347,20 @@ class GeminiModel(OpenAICompatibleModel):
|
|
|
295
347
|
'description'
|
|
296
348
|
] = prop_value['description']
|
|
297
349
|
|
|
350
|
+
# Handle enum and format restrictions for Gemini
|
|
351
|
+
# API enum: only allowed for string type
|
|
352
|
+
if prop_value.get('type') != 'string':
|
|
353
|
+
prop_value.pop('enum', None)
|
|
354
|
+
|
|
355
|
+
# format: only allowed for string, integer, and
|
|
356
|
+
# number types
|
|
357
|
+
if prop_value.get('type') not in [
|
|
358
|
+
'string',
|
|
359
|
+
'integer',
|
|
360
|
+
'number',
|
|
361
|
+
]:
|
|
362
|
+
prop_value.pop('format', None)
|
|
363
|
+
|
|
298
364
|
request_config["tools"] = tools
|
|
299
365
|
|
|
300
366
|
return await self._async_client.chat.completions.create(
|
|
@@ -302,18 +368,3 @@ class GeminiModel(OpenAICompatibleModel):
|
|
|
302
368
|
model=self.model_type,
|
|
303
369
|
**request_config,
|
|
304
370
|
)
|
|
305
|
-
|
|
306
|
-
def check_model_config(self):
|
|
307
|
-
r"""Check whether the model configuration contains any
|
|
308
|
-
unexpected arguments to Gemini API.
|
|
309
|
-
|
|
310
|
-
Raises:
|
|
311
|
-
ValueError: If the model configuration dictionary contains any
|
|
312
|
-
unexpected arguments to Gemini API.
|
|
313
|
-
"""
|
|
314
|
-
for param in self.model_config_dict:
|
|
315
|
-
if param not in Gemini_API_PARAMS:
|
|
316
|
-
raise ValueError(
|
|
317
|
-
f"Unexpected argument `{param}` is "
|
|
318
|
-
"input into Gemini model backend."
|
|
319
|
-
)
|