camel-ai 0.2.8__py3-none-any.whl → 0.2.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +5 -5
- camel/agents/__init__.py +4 -4
- camel/agents/base.py +4 -4
- camel/agents/chat_agent.py +32 -37
- camel/agents/critic_agent.py +4 -4
- camel/agents/deductive_reasoner_agent.py +4 -4
- camel/agents/embodied_agent.py +4 -4
- camel/agents/knowledge_graph_agent.py +4 -4
- camel/agents/role_assignment_agent.py +4 -4
- camel/agents/search_agent.py +4 -4
- camel/agents/task_agent.py +4 -4
- camel/agents/tool_agents/__init__.py +4 -4
- camel/agents/tool_agents/base.py +4 -4
- camel/agents/tool_agents/hugging_face_tool_agent.py +4 -4
- camel/bots/__init__.py +4 -4
- camel/bots/discord_app.py +4 -4
- camel/bots/slack/__init__.py +4 -4
- camel/bots/slack/models.py +4 -4
- camel/bots/slack/slack_app.py +4 -4
- camel/bots/telegram_bot.py +4 -4
- camel/configs/__init__.py +10 -4
- camel/configs/anthropic_config.py +4 -4
- camel/configs/base_config.py +4 -4
- camel/configs/cohere_config.py +76 -0
- camel/configs/deepseek_config.py +134 -0
- camel/configs/gemini_config.py +85 -127
- camel/configs/groq_config.py +4 -4
- camel/configs/litellm_config.py +4 -4
- camel/configs/mistral_config.py +4 -4
- camel/configs/ollama_config.py +4 -4
- camel/configs/openai_config.py +32 -7
- camel/configs/qwen_config.py +4 -4
- camel/configs/reka_config.py +4 -4
- camel/configs/samba_config.py +4 -4
- camel/configs/togetherai_config.py +4 -4
- camel/configs/vllm_config.py +4 -4
- camel/configs/yi_config.py +4 -4
- camel/configs/zhipuai_config.py +4 -4
- camel/embeddings/__init__.py +6 -4
- camel/embeddings/base.py +4 -4
- camel/embeddings/mistral_embedding.py +4 -4
- camel/embeddings/openai_compatible_embedding.py +91 -0
- camel/embeddings/openai_embedding.py +4 -4
- camel/embeddings/sentence_transformers_embeddings.py +4 -4
- camel/embeddings/vlm_embedding.py +4 -4
- camel/generators.py +4 -4
- camel/human.py +4 -4
- camel/interpreters/__init__.py +4 -4
- camel/interpreters/base.py +4 -4
- camel/interpreters/docker_interpreter.py +4 -4
- camel/interpreters/internal_python_interpreter.py +4 -4
- camel/interpreters/interpreter_error.py +4 -4
- camel/interpreters/ipython_interpreter.py +4 -4
- camel/interpreters/subprocess_interpreter.py +4 -4
- camel/loaders/__init__.py +4 -4
- camel/loaders/apify_reader.py +4 -4
- camel/loaders/base_io.py +4 -4
- camel/loaders/chunkr_reader.py +4 -4
- camel/loaders/firecrawl_reader.py +4 -4
- camel/loaders/jina_url_reader.py +4 -4
- camel/loaders/unstructured_io.py +4 -4
- camel/memories/__init__.py +4 -4
- camel/memories/agent_memories.py +4 -4
- camel/memories/base.py +4 -4
- camel/memories/blocks/__init__.py +4 -4
- camel/memories/blocks/chat_history_block.py +4 -4
- camel/memories/blocks/vectordb_block.py +4 -4
- camel/memories/context_creators/__init__.py +4 -4
- camel/memories/context_creators/score_based.py +4 -4
- camel/memories/records.py +4 -4
- camel/messages/__init__.py +20 -4
- camel/messages/base.py +108 -4
- camel/messages/conversion/__init__.py +29 -0
- camel/messages/conversion/models.py +178 -0
- camel/messages/conversion/sharegpt/__init__.py +20 -0
- camel/messages/conversion/sharegpt/function_call_formatter.py +49 -0
- camel/messages/conversion/sharegpt/hermes/__init__.py +19 -0
- camel/messages/conversion/sharegpt/hermes/hermes_function_formatter.py +128 -0
- camel/messages/func_message.py +50 -4
- camel/models/__init__.py +8 -4
- camel/models/anthropic_model.py +4 -4
- camel/models/azure_openai_model.py +4 -4
- camel/models/base_model.py +4 -4
- camel/models/cohere_model.py +282 -0
- camel/models/deepseek_model.py +139 -0
- camel/models/gemini_model.py +61 -146
- camel/models/groq_model.py +4 -4
- camel/models/litellm_model.py +4 -4
- camel/models/mistral_model.py +4 -4
- camel/models/model_factory.py +10 -4
- camel/models/nemotron_model.py +4 -4
- camel/models/ollama_model.py +4 -4
- camel/models/openai_audio_models.py +4 -4
- camel/models/openai_compatible_model.py +4 -4
- camel/models/openai_model.py +42 -4
- camel/models/qwen_model.py +4 -4
- camel/models/reka_model.py +4 -4
- camel/models/samba_model.py +6 -5
- camel/models/stub_model.py +4 -4
- camel/models/togetherai_model.py +4 -4
- camel/models/vllm_model.py +4 -4
- camel/models/yi_model.py +4 -4
- camel/models/zhipuai_model.py +4 -4
- camel/personas/__init__.py +17 -0
- camel/personas/persona.py +103 -0
- camel/personas/persona_hub.py +293 -0
- camel/prompts/__init__.py +6 -4
- camel/prompts/ai_society.py +4 -4
- camel/prompts/base.py +4 -4
- camel/prompts/code.py +4 -4
- camel/prompts/evaluation.py +4 -4
- camel/prompts/generate_text_embedding_data.py +4 -4
- camel/prompts/image_craft.py +4 -4
- camel/prompts/misalignment.py +4 -4
- camel/prompts/multi_condition_image_craft.py +4 -4
- camel/prompts/object_recognition.py +4 -4
- camel/prompts/persona_hub.py +61 -0
- camel/prompts/prompt_templates.py +4 -4
- camel/prompts/role_description_prompt_template.py +4 -4
- camel/prompts/solution_extraction.py +4 -4
- camel/prompts/task_prompt_template.py +4 -4
- camel/prompts/translation.py +4 -4
- camel/prompts/video_description_prompt.py +4 -4
- camel/responses/__init__.py +4 -4
- camel/responses/agent_responses.py +4 -4
- camel/retrievers/__init__.py +4 -4
- camel/retrievers/auto_retriever.py +4 -4
- camel/retrievers/base.py +4 -4
- camel/retrievers/bm25_retriever.py +4 -4
- camel/retrievers/cohere_rerank_retriever.py +7 -9
- camel/retrievers/vector_retriever.py +4 -4
- camel/runtime/__init__.py +29 -0
- camel/runtime/api.py +93 -0
- camel/runtime/base.py +45 -0
- camel/runtime/configs.py +56 -0
- camel/runtime/docker_runtime.py +404 -0
- camel/runtime/llm_guard_runtime.py +199 -0
- camel/runtime/remote_http_runtime.py +204 -0
- camel/runtime/utils/__init__.py +20 -0
- camel/runtime/utils/function_risk_toolkit.py +58 -0
- camel/runtime/utils/ignore_risk_toolkit.py +72 -0
- camel/schemas/__init__.py +17 -0
- camel/schemas/base.py +45 -0
- camel/schemas/openai_converter.py +116 -0
- camel/societies/__init__.py +4 -4
- camel/societies/babyagi_playing.py +4 -4
- camel/societies/role_playing.py +7 -14
- camel/societies/workforce/__init__.py +4 -4
- camel/societies/workforce/base.py +4 -4
- camel/societies/workforce/prompts.py +4 -4
- camel/societies/workforce/role_playing_worker.py +4 -4
- camel/societies/workforce/single_agent_worker.py +4 -4
- camel/societies/workforce/task_channel.py +4 -4
- camel/societies/workforce/utils.py +4 -4
- camel/societies/workforce/worker.py +4 -4
- camel/societies/workforce/workforce.py +7 -7
- camel/storages/__init__.py +4 -4
- camel/storages/graph_storages/__init__.py +4 -4
- camel/storages/graph_storages/base.py +4 -4
- camel/storages/graph_storages/graph_element.py +4 -4
- camel/storages/graph_storages/nebula_graph.py +4 -4
- camel/storages/graph_storages/neo4j_graph.py +4 -4
- camel/storages/key_value_storages/__init__.py +4 -4
- camel/storages/key_value_storages/base.py +4 -4
- camel/storages/key_value_storages/in_memory.py +4 -4
- camel/storages/key_value_storages/json.py +4 -4
- camel/storages/key_value_storages/redis.py +4 -4
- camel/storages/object_storages/__init__.py +4 -4
- camel/storages/object_storages/amazon_s3.py +4 -4
- camel/storages/object_storages/azure_blob.py +4 -4
- camel/storages/object_storages/base.py +4 -4
- camel/storages/object_storages/google_cloud.py +4 -4
- camel/storages/vectordb_storages/__init__.py +4 -4
- camel/storages/vectordb_storages/base.py +4 -4
- camel/storages/vectordb_storages/milvus.py +4 -4
- camel/storages/vectordb_storages/qdrant.py +4 -4
- camel/tasks/__init__.py +4 -4
- camel/tasks/task.py +4 -4
- camel/tasks/task_prompt.py +4 -4
- camel/terminators/__init__.py +4 -4
- camel/terminators/base.py +4 -4
- camel/terminators/response_terminator.py +4 -4
- camel/terminators/token_limit_terminator.py +4 -4
- camel/toolkits/__init__.py +13 -17
- camel/toolkits/arxiv_toolkit.py +4 -4
- camel/toolkits/ask_news_toolkit.py +7 -18
- camel/toolkits/base.py +4 -4
- camel/toolkits/code_execution.py +20 -7
- camel/toolkits/dalle_toolkit.py +4 -7
- camel/toolkits/data_commons_toolkit.py +4 -4
- camel/toolkits/function_tool.py +220 -69
- camel/toolkits/github_toolkit.py +4 -4
- camel/toolkits/google_maps_toolkit.py +4 -4
- camel/toolkits/google_scholar_toolkit.py +4 -4
- camel/toolkits/human_toolkit.py +52 -0
- camel/toolkits/linkedin_toolkit.py +4 -4
- camel/toolkits/math_toolkit.py +4 -7
- camel/toolkits/notion_toolkit.py +4 -4
- camel/toolkits/open_api_specs/biztoc/__init__.py +4 -4
- camel/toolkits/open_api_specs/coursera/__init__.py +4 -4
- camel/toolkits/open_api_specs/create_qr_code/__init__.py +4 -4
- camel/toolkits/open_api_specs/klarna/__init__.py +4 -4
- camel/toolkits/open_api_specs/nasa_apod/__init__.py +4 -4
- camel/toolkits/open_api_specs/outschool/__init__.py +4 -4
- camel/toolkits/open_api_specs/outschool/paths/__init__.py +4 -4
- camel/toolkits/open_api_specs/outschool/paths/get_classes.py +4 -4
- camel/toolkits/open_api_specs/outschool/paths/search_teachers.py +4 -4
- camel/toolkits/open_api_specs/security_config.py +4 -4
- camel/toolkits/open_api_specs/speak/__init__.py +4 -4
- camel/toolkits/open_api_specs/web_scraper/__init__.py +4 -4
- camel/toolkits/open_api_specs/web_scraper/paths/__init__.py +4 -4
- camel/toolkits/open_api_specs/web_scraper/paths/scraper.py +4 -4
- camel/toolkits/open_api_toolkit.py +4 -4
- camel/toolkits/reddit_toolkit.py +4 -4
- camel/toolkits/retrieval_toolkit.py +4 -4
- camel/toolkits/search_toolkit.py +49 -29
- camel/toolkits/slack_toolkit.py +4 -4
- camel/toolkits/twitter_toolkit.py +10 -13
- camel/toolkits/video_toolkit.py +211 -0
- camel/toolkits/weather_toolkit.py +4 -7
- camel/toolkits/whatsapp_toolkit.py +6 -6
- camel/types/__init__.py +6 -4
- camel/types/enums.py +77 -7
- camel/types/openai_types.py +6 -4
- camel/types/unified_model_type.py +9 -4
- camel/utils/__init__.py +35 -33
- camel/utils/async_func.py +4 -4
- camel/utils/commons.py +4 -4
- camel/utils/constants.py +4 -4
- camel/utils/response_format.py +63 -0
- camel/utils/token_counting.py +4 -4
- {camel_ai-0.2.8.dist-info → camel_ai-0.2.10.dist-info}/METADATA +108 -56
- camel_ai-0.2.10.dist-info/RECORD +246 -0
- camel_ai-0.2.8.dist-info/RECORD +0 -215
- {camel_ai-0.2.8.dist-info → camel_ai-0.2.10.dist-info}/LICENSE +0 -0
- {camel_ai-0.2.8.dist-info → camel_ai-0.2.10.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,282 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import ast
|
|
15
|
+
import json
|
|
16
|
+
import logging
|
|
17
|
+
import os
|
|
18
|
+
import uuid
|
|
19
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
|
|
20
|
+
|
|
21
|
+
if TYPE_CHECKING:
|
|
22
|
+
from cohere.types import ChatMessageV2, ChatResponse
|
|
23
|
+
|
|
24
|
+
from camel.configs import COHERE_API_PARAMS, CohereConfig
|
|
25
|
+
from camel.messages import OpenAIMessage
|
|
26
|
+
from camel.models import BaseModelBackend
|
|
27
|
+
from camel.types import ChatCompletion, ModelType
|
|
28
|
+
from camel.utils import (
|
|
29
|
+
BaseTokenCounter,
|
|
30
|
+
OpenAITokenCounter,
|
|
31
|
+
api_keys_required,
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
if os.getenv("AGENTOPS_API_KEY") is not None:
|
|
36
|
+
from agentops import LLMEvent, record
|
|
37
|
+
else:
|
|
38
|
+
raise ImportError
|
|
39
|
+
except (ImportError, AttributeError):
|
|
40
|
+
LLMEvent = None
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class CohereModel(BaseModelBackend):
|
|
44
|
+
r"""Cohere API in a unified BaseModelBackend interface."""
|
|
45
|
+
|
|
46
|
+
def __init__(
|
|
47
|
+
self,
|
|
48
|
+
model_type: Union[ModelType, str],
|
|
49
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
50
|
+
api_key: Optional[str] = None,
|
|
51
|
+
url: Optional[str] = None,
|
|
52
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
53
|
+
):
|
|
54
|
+
import cohere
|
|
55
|
+
|
|
56
|
+
if model_config_dict is None:
|
|
57
|
+
model_config_dict = CohereConfig().as_dict()
|
|
58
|
+
|
|
59
|
+
api_key = api_key or os.environ.get("COHERE_API_KEY")
|
|
60
|
+
url = url or os.environ.get("COHERE_API_BASE_URL")
|
|
61
|
+
super().__init__(
|
|
62
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
63
|
+
)
|
|
64
|
+
self._client = cohere.ClientV2(api_key=self._api_key)
|
|
65
|
+
|
|
66
|
+
def _to_openai_response(self, response: 'ChatResponse') -> ChatCompletion:
|
|
67
|
+
if response.usage and response.usage.tokens:
|
|
68
|
+
input_tokens = response.usage.tokens.input_tokens or 0
|
|
69
|
+
output_tokens = response.usage.tokens.output_tokens or 0
|
|
70
|
+
usage = {
|
|
71
|
+
"prompt_tokens": input_tokens,
|
|
72
|
+
"completion_tokens": output_tokens,
|
|
73
|
+
"total_tokens": input_tokens + output_tokens,
|
|
74
|
+
}
|
|
75
|
+
else:
|
|
76
|
+
usage = {}
|
|
77
|
+
|
|
78
|
+
tool_calls = response.message.tool_calls
|
|
79
|
+
choices = []
|
|
80
|
+
if tool_calls:
|
|
81
|
+
for tool_call in tool_calls:
|
|
82
|
+
openai_tool_calls = [
|
|
83
|
+
dict(
|
|
84
|
+
id=tool_call.id,
|
|
85
|
+
function={
|
|
86
|
+
"name": tool_call.function.name,
|
|
87
|
+
"arguments": tool_call.function.arguments,
|
|
88
|
+
}
|
|
89
|
+
if tool_call.function
|
|
90
|
+
else {},
|
|
91
|
+
type=tool_call.type,
|
|
92
|
+
)
|
|
93
|
+
]
|
|
94
|
+
|
|
95
|
+
choice = dict(
|
|
96
|
+
index=None,
|
|
97
|
+
message={
|
|
98
|
+
"role": "assistant",
|
|
99
|
+
"content": response.message.tool_plan,
|
|
100
|
+
"tool_calls": openai_tool_calls,
|
|
101
|
+
},
|
|
102
|
+
finish_reason=response.finish_reason
|
|
103
|
+
if response.finish_reason
|
|
104
|
+
else None,
|
|
105
|
+
)
|
|
106
|
+
choices.append(choice)
|
|
107
|
+
|
|
108
|
+
else:
|
|
109
|
+
openai_tool_calls = None
|
|
110
|
+
|
|
111
|
+
choice = dict(
|
|
112
|
+
index=None,
|
|
113
|
+
message={
|
|
114
|
+
"role": "assistant",
|
|
115
|
+
"content": response.message.content[0].text, # type: ignore[union-attr,index]
|
|
116
|
+
"tool_calls": openai_tool_calls,
|
|
117
|
+
},
|
|
118
|
+
finish_reason=response.finish_reason
|
|
119
|
+
if response.finish_reason
|
|
120
|
+
else None,
|
|
121
|
+
)
|
|
122
|
+
choices.append(choice)
|
|
123
|
+
|
|
124
|
+
obj = ChatCompletion.construct(
|
|
125
|
+
id=response.id,
|
|
126
|
+
choices=choices,
|
|
127
|
+
created=None,
|
|
128
|
+
model=self.model_type,
|
|
129
|
+
object="chat.completion",
|
|
130
|
+
usage=usage,
|
|
131
|
+
)
|
|
132
|
+
return obj
|
|
133
|
+
|
|
134
|
+
def _to_cohere_chatmessage(
|
|
135
|
+
self, messages: List[OpenAIMessage]
|
|
136
|
+
) -> List["ChatMessageV2"]:
|
|
137
|
+
from cohere.types import ToolCallV2Function
|
|
138
|
+
from cohere.types.chat_message_v2 import (
|
|
139
|
+
AssistantChatMessageV2,
|
|
140
|
+
SystemChatMessageV2,
|
|
141
|
+
ToolCallV2,
|
|
142
|
+
ToolChatMessageV2,
|
|
143
|
+
UserChatMessageV2,
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
tool_call_id = None
|
|
147
|
+
new_messages = []
|
|
148
|
+
for msg in messages:
|
|
149
|
+
role = msg.get("role")
|
|
150
|
+
content = msg.get("content")
|
|
151
|
+
function_call = msg.get("function_call")
|
|
152
|
+
|
|
153
|
+
if role == "user":
|
|
154
|
+
new_message = UserChatMessageV2(role="user", content=content) # type: ignore[arg-type]
|
|
155
|
+
elif role in {"tool", "function"}:
|
|
156
|
+
new_message = ToolChatMessageV2(
|
|
157
|
+
role="tool",
|
|
158
|
+
tool_call_id=tool_call_id, # type: ignore[arg-type]
|
|
159
|
+
content=content, # type: ignore[assignment,arg-type]
|
|
160
|
+
)
|
|
161
|
+
elif role == "assistant":
|
|
162
|
+
if not function_call:
|
|
163
|
+
new_message = AssistantChatMessageV2( # type: ignore[assignment]
|
|
164
|
+
role="assistant",
|
|
165
|
+
content=content, # type: ignore[arg-type]
|
|
166
|
+
)
|
|
167
|
+
else:
|
|
168
|
+
arguments = function_call.get("arguments") # type: ignore[attr-defined]
|
|
169
|
+
arguments_dict = ast.literal_eval(arguments)
|
|
170
|
+
arguments_json = json.dumps(arguments_dict)
|
|
171
|
+
|
|
172
|
+
assis_tool_call_id = str(uuid.uuid4())
|
|
173
|
+
tool_call_id = assis_tool_call_id
|
|
174
|
+
new_message = AssistantChatMessageV2( # type: ignore[assignment]
|
|
175
|
+
role="assistant",
|
|
176
|
+
tool_calls=[
|
|
177
|
+
ToolCallV2(
|
|
178
|
+
id=assis_tool_call_id,
|
|
179
|
+
type="function",
|
|
180
|
+
function=ToolCallV2Function(
|
|
181
|
+
name=function_call.get("name"), # type: ignore[attr-defined]
|
|
182
|
+
arguments=arguments_json, # type: ignore[attr-defined]
|
|
183
|
+
),
|
|
184
|
+
)
|
|
185
|
+
],
|
|
186
|
+
content=content, # type: ignore[arg-type]
|
|
187
|
+
)
|
|
188
|
+
elif role == "system":
|
|
189
|
+
new_message = SystemChatMessageV2( # type: ignore[assignment]
|
|
190
|
+
role="system",
|
|
191
|
+
content=content, # type: ignore[arg-type]
|
|
192
|
+
)
|
|
193
|
+
else:
|
|
194
|
+
raise ValueError(f"Unsupported message role: {role}")
|
|
195
|
+
|
|
196
|
+
new_messages.append(new_message)
|
|
197
|
+
return new_messages # type: ignore[return-value]
|
|
198
|
+
|
|
199
|
+
@property
|
|
200
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
201
|
+
r"""Initialize the token counter for the model backend.
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
BaseTokenCounter: The token counter following the model's
|
|
205
|
+
tokenization style.
|
|
206
|
+
"""
|
|
207
|
+
if not self._token_counter:
|
|
208
|
+
self._token_counter = OpenAITokenCounter(
|
|
209
|
+
model=ModelType.GPT_4O_MINI
|
|
210
|
+
)
|
|
211
|
+
return self._token_counter
|
|
212
|
+
|
|
213
|
+
@api_keys_required("COHERE_API_KEY")
|
|
214
|
+
def run(self, messages: List[OpenAIMessage]) -> ChatCompletion:
|
|
215
|
+
r"""Runs inference of Cohere chat completion.
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
219
|
+
in OpenAI API format.
|
|
220
|
+
Returns:
|
|
221
|
+
ChatCompletion.
|
|
222
|
+
"""
|
|
223
|
+
from cohere.core.api_error import ApiError
|
|
224
|
+
|
|
225
|
+
cohere_messages = self._to_cohere_chatmessage(messages)
|
|
226
|
+
|
|
227
|
+
try:
|
|
228
|
+
response = self._client.chat(
|
|
229
|
+
messages=cohere_messages,
|
|
230
|
+
model=self.model_type,
|
|
231
|
+
**self.model_config_dict,
|
|
232
|
+
)
|
|
233
|
+
except ApiError as e:
|
|
234
|
+
logging.error(f"Cohere API Error: {e.status_code}")
|
|
235
|
+
logging.error(f"Error body: {e.body}")
|
|
236
|
+
raise
|
|
237
|
+
except Exception as e:
|
|
238
|
+
logging.error(f"Unexpected error when calling Cohere API: {e!s}")
|
|
239
|
+
raise
|
|
240
|
+
|
|
241
|
+
openai_response = self._to_openai_response(response)
|
|
242
|
+
|
|
243
|
+
# Add AgentOps LLM Event tracking
|
|
244
|
+
if LLMEvent:
|
|
245
|
+
llm_event = LLMEvent(
|
|
246
|
+
thread_id=openai_response.id,
|
|
247
|
+
prompt=" ".join(
|
|
248
|
+
[message.get("content") for message in messages] # type: ignore[misc]
|
|
249
|
+
),
|
|
250
|
+
prompt_tokens=openai_response.usage.prompt_tokens, # type: ignore[union-attr]
|
|
251
|
+
completion=openai_response.choices[0].message.content,
|
|
252
|
+
completion_tokens=openai_response.usage.completion_tokens, # type: ignore[union-attr]
|
|
253
|
+
model=self.model_type,
|
|
254
|
+
)
|
|
255
|
+
record(llm_event)
|
|
256
|
+
|
|
257
|
+
return openai_response
|
|
258
|
+
|
|
259
|
+
def check_model_config(self):
|
|
260
|
+
r"""Check whether the model configuration contains any unexpected
|
|
261
|
+
arguments to Cohere API.
|
|
262
|
+
|
|
263
|
+
Raises:
|
|
264
|
+
ValueError: If the model configuration dictionary contains any
|
|
265
|
+
unexpected arguments to Cohere API.
|
|
266
|
+
"""
|
|
267
|
+
for param in self.model_config_dict:
|
|
268
|
+
if param not in COHERE_API_PARAMS:
|
|
269
|
+
raise ValueError(
|
|
270
|
+
f"Unexpected argument `{param}` is "
|
|
271
|
+
"input into Cohere model backend."
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
@property
|
|
275
|
+
def stream(self) -> bool:
|
|
276
|
+
r"""Returns whether the model is in stream mode, which sends partial
|
|
277
|
+
results each time. Current it's not supported.
|
|
278
|
+
|
|
279
|
+
Returns:
|
|
280
|
+
bool: Whether the model is in stream mode.
|
|
281
|
+
"""
|
|
282
|
+
return False
|
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
from typing import Any, Dict, List, Optional, Union
|
|
17
|
+
|
|
18
|
+
from openai import OpenAI, Stream
|
|
19
|
+
|
|
20
|
+
from camel.configs import DEEPSEEK_API_PARAMS, DeepSeekConfig
|
|
21
|
+
from camel.messages import OpenAIMessage
|
|
22
|
+
from camel.models.base_model import BaseModelBackend
|
|
23
|
+
from camel.types import (
|
|
24
|
+
ChatCompletion,
|
|
25
|
+
ChatCompletionChunk,
|
|
26
|
+
ModelType,
|
|
27
|
+
)
|
|
28
|
+
from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_keys_required
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class DeepSeekModel(BaseModelBackend):
|
|
32
|
+
r"""DeepSeek API in a unified BaseModelBackend interface.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
36
|
+
created.
|
|
37
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
38
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`. If
|
|
39
|
+
:obj:`None`, :obj:`DeepSeekConfig().as_dict()` will be used.
|
|
40
|
+
(default: :obj:`None`)
|
|
41
|
+
api_key (Optional[str], optional): The API key for authenticating with
|
|
42
|
+
the DeepSeek service. (default: :obj:`None`)
|
|
43
|
+
url (Optional[str], optional): The url to the DeepSeek service.
|
|
44
|
+
(default: :obj:`https://api.deepseek.com`)
|
|
45
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
46
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter`
|
|
47
|
+
will be used. (default: :obj:`None`)
|
|
48
|
+
|
|
49
|
+
References:
|
|
50
|
+
https://api-docs.deepseek.com/
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
def __init__(
|
|
54
|
+
self,
|
|
55
|
+
model_type: Union[ModelType, str],
|
|
56
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
57
|
+
api_key: Optional[str] = None,
|
|
58
|
+
url: Optional[str] = None,
|
|
59
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
60
|
+
) -> None:
|
|
61
|
+
if model_config_dict is None:
|
|
62
|
+
model_config_dict = DeepSeekConfig().as_dict()
|
|
63
|
+
api_key = api_key or os.environ.get("DEEPSEEK_API_KEY")
|
|
64
|
+
url = url or os.environ.get(
|
|
65
|
+
"DEEPSEEK_API_BASE_URL",
|
|
66
|
+
"https://api.deepseek.com",
|
|
67
|
+
)
|
|
68
|
+
super().__init__(
|
|
69
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
self._client = OpenAI(
|
|
73
|
+
timeout=60,
|
|
74
|
+
max_retries=3,
|
|
75
|
+
api_key=self._api_key,
|
|
76
|
+
base_url=self._url,
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
@property
|
|
80
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
81
|
+
r"""Initialize the token counter for the model backend.
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
BaseTokenCounter: The token counter following the model's
|
|
85
|
+
tokenization style.
|
|
86
|
+
"""
|
|
87
|
+
if not self._token_counter:
|
|
88
|
+
self._token_counter = OpenAITokenCounter(
|
|
89
|
+
model=ModelType.GPT_4O_MINI
|
|
90
|
+
)
|
|
91
|
+
return self._token_counter
|
|
92
|
+
|
|
93
|
+
@api_keys_required("DEEPSEEK_API_KEY")
|
|
94
|
+
def run(
|
|
95
|
+
self,
|
|
96
|
+
messages: List[OpenAIMessage],
|
|
97
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
98
|
+
r"""Runs inference of DeepSeek chat completion.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
102
|
+
in OpenAI API format.
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
106
|
+
`ChatCompletion` in the non-stream mode, or
|
|
107
|
+
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
108
|
+
"""
|
|
109
|
+
response = self._client.chat.completions.create(
|
|
110
|
+
messages=messages,
|
|
111
|
+
model=self.model_type,
|
|
112
|
+
**self.model_config_dict,
|
|
113
|
+
)
|
|
114
|
+
return response
|
|
115
|
+
|
|
116
|
+
def check_model_config(self):
|
|
117
|
+
r"""Check whether the model configuration contains any
|
|
118
|
+
unexpected arguments to DeepSeek API.
|
|
119
|
+
|
|
120
|
+
Raises:
|
|
121
|
+
ValueError: If the model configuration dictionary contains any
|
|
122
|
+
unexpected arguments to DeepSeek API.
|
|
123
|
+
"""
|
|
124
|
+
for param in self.model_config_dict:
|
|
125
|
+
if param not in DEEPSEEK_API_PARAMS:
|
|
126
|
+
raise ValueError(
|
|
127
|
+
f"Unexpected argument `{param}` is "
|
|
128
|
+
"input into DeepSeek model backend."
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
@property
|
|
132
|
+
def stream(self) -> bool:
|
|
133
|
+
r"""Returns whether the model is in stream mode, which sends partial
|
|
134
|
+
results each time.
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
bool: Whether the model is in stream mode.
|
|
138
|
+
"""
|
|
139
|
+
return self.model_config_dict.get("stream", False)
|