camel-ai 0.2.9__py3-none-any.whl → 0.2.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +10 -5
- camel/agents/__init__.py +4 -4
- camel/agents/base.py +4 -4
- camel/agents/chat_agent.py +106 -42
- camel/agents/critic_agent.py +4 -4
- camel/agents/deductive_reasoner_agent.py +8 -5
- camel/agents/embodied_agent.py +4 -4
- camel/agents/knowledge_graph_agent.py +4 -4
- camel/agents/role_assignment_agent.py +4 -4
- camel/agents/search_agent.py +4 -4
- camel/agents/task_agent.py +4 -4
- camel/agents/tool_agents/__init__.py +4 -4
- camel/agents/tool_agents/base.py +4 -4
- camel/agents/tool_agents/hugging_face_tool_agent.py +4 -4
- camel/bots/__init__.py +4 -4
- camel/bots/discord_app.py +4 -4
- camel/bots/slack/__init__.py +4 -4
- camel/bots/slack/models.py +4 -4
- camel/bots/slack/slack_app.py +4 -4
- camel/bots/telegram_bot.py +4 -4
- camel/configs/__init__.py +13 -4
- camel/configs/anthropic_config.py +4 -4
- camel/configs/base_config.py +4 -4
- camel/configs/cohere_config.py +76 -0
- camel/configs/deepseek_config.py +134 -0
- camel/configs/gemini_config.py +85 -127
- camel/configs/groq_config.py +4 -4
- camel/configs/litellm_config.py +4 -4
- camel/configs/mistral_config.py +4 -7
- camel/configs/nvidia_config.py +70 -0
- camel/configs/ollama_config.py +4 -4
- camel/configs/openai_config.py +32 -7
- camel/configs/qwen_config.py +4 -4
- camel/configs/reka_config.py +4 -4
- camel/configs/samba_config.py +4 -4
- camel/configs/togetherai_config.py +4 -4
- camel/configs/vllm_config.py +14 -5
- camel/configs/yi_config.py +4 -4
- camel/configs/zhipuai_config.py +4 -4
- camel/embeddings/__init__.py +6 -4
- camel/embeddings/base.py +4 -4
- camel/embeddings/mistral_embedding.py +4 -4
- camel/embeddings/openai_compatible_embedding.py +91 -0
- camel/embeddings/openai_embedding.py +4 -4
- camel/embeddings/sentence_transformers_embeddings.py +4 -4
- camel/embeddings/vlm_embedding.py +8 -5
- camel/generators.py +4 -4
- camel/human.py +4 -4
- camel/interpreters/__init__.py +4 -4
- camel/interpreters/base.py +4 -4
- camel/interpreters/docker_interpreter.py +11 -6
- camel/interpreters/internal_python_interpreter.py +4 -4
- camel/interpreters/interpreter_error.py +4 -4
- camel/interpreters/ipython_interpreter.py +4 -4
- camel/interpreters/subprocess_interpreter.py +11 -6
- camel/loaders/__init__.py +4 -4
- camel/loaders/apify_reader.py +4 -4
- camel/loaders/base_io.py +4 -4
- camel/loaders/chunkr_reader.py +4 -4
- camel/loaders/firecrawl_reader.py +4 -7
- camel/loaders/jina_url_reader.py +4 -4
- camel/loaders/unstructured_io.py +4 -4
- camel/logger.py +112 -0
- camel/memories/__init__.py +4 -4
- camel/memories/agent_memories.py +4 -4
- camel/memories/base.py +4 -4
- camel/memories/blocks/__init__.py +4 -4
- camel/memories/blocks/chat_history_block.py +4 -4
- camel/memories/blocks/vectordb_block.py +4 -4
- camel/memories/context_creators/__init__.py +4 -4
- camel/memories/context_creators/score_based.py +4 -4
- camel/memories/records.py +4 -4
- camel/messages/__init__.py +20 -4
- camel/messages/base.py +118 -11
- camel/messages/conversion/__init__.py +31 -0
- camel/messages/conversion/alpaca.py +122 -0
- camel/messages/conversion/conversation_models.py +178 -0
- camel/messages/conversion/sharegpt/__init__.py +20 -0
- camel/messages/conversion/sharegpt/function_call_formatter.py +49 -0
- camel/messages/conversion/sharegpt/hermes/__init__.py +19 -0
- camel/messages/conversion/sharegpt/hermes/hermes_function_formatter.py +128 -0
- camel/messages/func_message.py +50 -4
- camel/models/__init__.py +13 -4
- camel/models/anthropic_model.py +4 -4
- camel/models/azure_openai_model.py +4 -4
- camel/models/base_model.py +4 -4
- camel/models/cohere_model.py +282 -0
- camel/models/deepseek_model.py +139 -0
- camel/models/gemini_model.py +61 -146
- camel/models/groq_model.py +4 -4
- camel/models/litellm_model.py +4 -4
- camel/models/mistral_model.py +4 -4
- camel/models/model_factory.py +13 -4
- camel/models/model_manager.py +212 -0
- camel/models/nemotron_model.py +4 -4
- camel/models/nvidia_model.py +141 -0
- camel/models/ollama_model.py +4 -4
- camel/models/openai_audio_models.py +4 -4
- camel/models/openai_compatible_model.py +4 -4
- camel/models/openai_model.py +43 -4
- camel/models/qwen_model.py +4 -4
- camel/models/reka_model.py +4 -4
- camel/models/samba_model.py +6 -5
- camel/models/stub_model.py +4 -4
- camel/models/togetherai_model.py +4 -4
- camel/models/vllm_model.py +4 -4
- camel/models/yi_model.py +4 -4
- camel/models/zhipuai_model.py +4 -4
- camel/personas/__init__.py +17 -0
- camel/personas/persona.py +103 -0
- camel/personas/persona_hub.py +293 -0
- camel/prompts/__init__.py +6 -4
- camel/prompts/ai_society.py +4 -4
- camel/prompts/base.py +4 -4
- camel/prompts/code.py +4 -4
- camel/prompts/evaluation.py +4 -4
- camel/prompts/generate_text_embedding_data.py +4 -4
- camel/prompts/image_craft.py +4 -4
- camel/prompts/misalignment.py +4 -4
- camel/prompts/multi_condition_image_craft.py +4 -4
- camel/prompts/object_recognition.py +4 -4
- camel/prompts/persona_hub.py +61 -0
- camel/prompts/prompt_templates.py +4 -4
- camel/prompts/role_description_prompt_template.py +4 -4
- camel/prompts/solution_extraction.py +4 -4
- camel/prompts/task_prompt_template.py +4 -4
- camel/prompts/translation.py +4 -4
- camel/prompts/video_description_prompt.py +4 -4
- camel/responses/__init__.py +4 -4
- camel/responses/agent_responses.py +4 -4
- camel/retrievers/__init__.py +4 -4
- camel/retrievers/auto_retriever.py +4 -4
- camel/retrievers/base.py +4 -4
- camel/retrievers/bm25_retriever.py +4 -4
- camel/retrievers/cohere_rerank_retriever.py +7 -9
- camel/retrievers/vector_retriever.py +26 -9
- camel/runtime/__init__.py +29 -0
- camel/runtime/api.py +93 -0
- camel/runtime/base.py +45 -0
- camel/runtime/configs.py +56 -0
- camel/runtime/docker_runtime.py +404 -0
- camel/runtime/llm_guard_runtime.py +199 -0
- camel/runtime/remote_http_runtime.py +204 -0
- camel/runtime/utils/__init__.py +20 -0
- camel/runtime/utils/function_risk_toolkit.py +58 -0
- camel/runtime/utils/ignore_risk_toolkit.py +72 -0
- camel/schemas/__init__.py +17 -0
- camel/schemas/base.py +45 -0
- camel/schemas/openai_converter.py +116 -0
- camel/societies/__init__.py +4 -4
- camel/societies/babyagi_playing.py +8 -5
- camel/societies/role_playing.py +4 -4
- camel/societies/workforce/__init__.py +4 -4
- camel/societies/workforce/base.py +4 -4
- camel/societies/workforce/prompts.py +4 -4
- camel/societies/workforce/role_playing_worker.py +4 -4
- camel/societies/workforce/single_agent_worker.py +4 -4
- camel/societies/workforce/task_channel.py +4 -4
- camel/societies/workforce/utils.py +4 -4
- camel/societies/workforce/worker.py +4 -4
- camel/societies/workforce/workforce.py +7 -7
- camel/storages/__init__.py +4 -4
- camel/storages/graph_storages/__init__.py +4 -4
- camel/storages/graph_storages/base.py +4 -4
- camel/storages/graph_storages/graph_element.py +4 -4
- camel/storages/graph_storages/nebula_graph.py +4 -4
- camel/storages/graph_storages/neo4j_graph.py +4 -4
- camel/storages/key_value_storages/__init__.py +4 -4
- camel/storages/key_value_storages/base.py +4 -4
- camel/storages/key_value_storages/in_memory.py +4 -4
- camel/storages/key_value_storages/json.py +4 -4
- camel/storages/key_value_storages/redis.py +4 -4
- camel/storages/object_storages/__init__.py +4 -4
- camel/storages/object_storages/amazon_s3.py +4 -4
- camel/storages/object_storages/azure_blob.py +4 -4
- camel/storages/object_storages/base.py +4 -4
- camel/storages/object_storages/google_cloud.py +4 -4
- camel/storages/vectordb_storages/__init__.py +4 -4
- camel/storages/vectordb_storages/base.py +4 -4
- camel/storages/vectordb_storages/milvus.py +4 -4
- camel/storages/vectordb_storages/qdrant.py +4 -4
- camel/tasks/__init__.py +4 -4
- camel/tasks/task.py +4 -4
- camel/tasks/task_prompt.py +4 -4
- camel/terminators/__init__.py +4 -4
- camel/terminators/base.py +4 -4
- camel/terminators/response_terminator.py +4 -4
- camel/terminators/token_limit_terminator.py +4 -4
- camel/toolkits/__init__.py +16 -17
- camel/toolkits/arxiv_toolkit.py +4 -4
- camel/toolkits/ask_news_toolkit.py +7 -18
- camel/toolkits/base.py +4 -4
- camel/toolkits/code_execution.py +57 -10
- camel/toolkits/dalle_toolkit.py +4 -7
- camel/toolkits/data_commons_toolkit.py +4 -4
- camel/toolkits/function_tool.py +220 -69
- camel/toolkits/github_toolkit.py +4 -4
- camel/toolkits/google_maps_toolkit.py +4 -4
- camel/toolkits/google_scholar_toolkit.py +4 -4
- camel/toolkits/human_toolkit.py +53 -0
- camel/toolkits/linkedin_toolkit.py +4 -4
- camel/toolkits/math_toolkit.py +4 -7
- camel/toolkits/meshy_toolkit.py +185 -0
- camel/toolkits/notion_toolkit.py +4 -4
- camel/toolkits/open_api_specs/biztoc/__init__.py +4 -4
- camel/toolkits/open_api_specs/coursera/__init__.py +4 -4
- camel/toolkits/open_api_specs/create_qr_code/__init__.py +4 -4
- camel/toolkits/open_api_specs/klarna/__init__.py +4 -4
- camel/toolkits/open_api_specs/nasa_apod/__init__.py +4 -4
- camel/toolkits/open_api_specs/outschool/__init__.py +4 -4
- camel/toolkits/open_api_specs/outschool/paths/__init__.py +4 -4
- camel/toolkits/open_api_specs/outschool/paths/get_classes.py +4 -4
- camel/toolkits/open_api_specs/outschool/paths/search_teachers.py +4 -4
- camel/toolkits/open_api_specs/security_config.py +4 -4
- camel/toolkits/open_api_specs/speak/__init__.py +4 -4
- camel/toolkits/open_api_specs/web_scraper/__init__.py +4 -4
- camel/toolkits/open_api_specs/web_scraper/paths/__init__.py +4 -4
- camel/toolkits/open_api_specs/web_scraper/paths/scraper.py +4 -4
- camel/toolkits/open_api_toolkit.py +4 -4
- camel/toolkits/reddit_toolkit.py +4 -4
- camel/toolkits/retrieval_toolkit.py +4 -4
- camel/toolkits/search_toolkit.py +49 -29
- camel/toolkits/slack_toolkit.py +4 -4
- camel/toolkits/twitter_toolkit.py +13 -13
- camel/toolkits/video_toolkit.py +211 -0
- camel/toolkits/weather_toolkit.py +4 -7
- camel/toolkits/whatsapp_toolkit.py +6 -6
- camel/types/__init__.py +6 -4
- camel/types/enums.py +118 -15
- camel/types/openai_types.py +6 -4
- camel/types/unified_model_type.py +9 -4
- camel/utils/__init__.py +35 -33
- camel/utils/async_func.py +4 -4
- camel/utils/commons.py +26 -9
- camel/utils/constants.py +4 -4
- camel/utils/response_format.py +63 -0
- camel/utils/token_counting.py +8 -5
- {camel_ai-0.2.9.dist-info → camel_ai-0.2.11.dist-info}/METADATA +108 -56
- camel_ai-0.2.11.dist-info/RECORD +252 -0
- camel_ai-0.2.9.dist-info/RECORD +0 -215
- {camel_ai-0.2.9.dist-info → camel_ai-0.2.11.dist-info}/LICENSE +0 -0
- {camel_ai-0.2.9.dist-info → camel_ai-0.2.11.dist-info}/WHEEL +0 -0
camel/models/gemini_model.py
CHANGED
|
@@ -1,63 +1,57 @@
|
|
|
1
|
-
#
|
|
2
|
-
# Licensed under the Apache License, Version 2.0 (the
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
3
|
# you may not use this file except in compliance with the License.
|
|
4
4
|
# You may obtain a copy of the License at
|
|
5
5
|
#
|
|
6
6
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
7
|
#
|
|
8
8
|
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
-
# distributed under the License is distributed on an
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
10
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
|
-
#
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
|
-
from typing import
|
|
15
|
+
from typing import Any, Dict, List, Optional, Union
|
|
16
|
+
|
|
17
|
+
from openai import OpenAI, Stream
|
|
16
18
|
|
|
17
19
|
from camel.configs import Gemini_API_PARAMS, GeminiConfig
|
|
18
20
|
from camel.messages import OpenAIMessage
|
|
19
21
|
from camel.models import BaseModelBackend
|
|
20
22
|
from camel.types import (
|
|
21
23
|
ChatCompletion,
|
|
22
|
-
|
|
23
|
-
Choice,
|
|
24
|
+
ChatCompletionChunk,
|
|
24
25
|
ModelType,
|
|
25
26
|
)
|
|
26
27
|
from camel.utils import (
|
|
27
28
|
BaseTokenCounter,
|
|
28
|
-
|
|
29
|
+
OpenAITokenCounter,
|
|
29
30
|
api_keys_required,
|
|
30
|
-
dependencies_required,
|
|
31
31
|
)
|
|
32
32
|
|
|
33
|
-
if TYPE_CHECKING:
|
|
34
|
-
from google.generativeai.types import ContentsType, GenerateContentResponse
|
|
35
|
-
|
|
36
33
|
|
|
37
34
|
class GeminiModel(BaseModelBackend):
|
|
38
35
|
r"""Gemini API in a unified BaseModelBackend interface.
|
|
39
36
|
|
|
40
37
|
Args:
|
|
41
38
|
model_type (Union[ModelType, str]): Model for which a backend is
|
|
42
|
-
created.
|
|
39
|
+
created, one of Gemini series.
|
|
43
40
|
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
44
|
-
that will be fed into:obj:`
|
|
45
|
-
|
|
41
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`. If
|
|
42
|
+
:obj:`None`, :obj:`GeminiConfig().as_dict()` will be used.
|
|
46
43
|
(default: :obj:`None`)
|
|
47
44
|
api_key (Optional[str], optional): The API key for authenticating with
|
|
48
|
-
the
|
|
49
|
-
url (Optional[str], optional): The url to the
|
|
50
|
-
(default: :obj:`
|
|
45
|
+
the Gemini service. (default: :obj:`None`)
|
|
46
|
+
url (Optional[str], optional): The url to the Gemini service.
|
|
47
|
+
(default: :obj:`https://generativelanguage.googleapis.com/v1beta/
|
|
48
|
+
openai/`)
|
|
51
49
|
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
52
|
-
use for the model. If not provided, :obj:`
|
|
53
|
-
be used.
|
|
54
|
-
|
|
55
|
-
Notes:
|
|
56
|
-
Currently :obj:`"stream": True` is not supported with Gemini due to the
|
|
57
|
-
limitation of the current camel design.
|
|
50
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
51
|
+
ModelType.GPT_4O_MINI)` will be used.
|
|
52
|
+
(default: :obj:`None`)
|
|
58
53
|
"""
|
|
59
54
|
|
|
60
|
-
@dependencies_required('google')
|
|
61
55
|
def __init__(
|
|
62
56
|
self,
|
|
63
57
|
model_type: Union[ModelType, str],
|
|
@@ -66,29 +60,45 @@ class GeminiModel(BaseModelBackend):
|
|
|
66
60
|
url: Optional[str] = None,
|
|
67
61
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
68
62
|
) -> None:
|
|
69
|
-
import google.generativeai as genai
|
|
70
|
-
from google.generativeai.types.generation_types import GenerationConfig
|
|
71
|
-
|
|
72
63
|
if model_config_dict is None:
|
|
73
64
|
model_config_dict = GeminiConfig().as_dict()
|
|
74
|
-
|
|
75
|
-
|
|
65
|
+
api_key = api_key or os.environ.get("GEMINI_API_KEY")
|
|
66
|
+
url = url or os.environ.get(
|
|
67
|
+
"GEMINI_API_BASE_URL",
|
|
68
|
+
"https://generativelanguage.googleapis.com/v1beta/openai/",
|
|
69
|
+
)
|
|
76
70
|
super().__init__(
|
|
77
71
|
model_type, model_config_dict, api_key, url, token_counter
|
|
78
72
|
)
|
|
79
|
-
|
|
80
|
-
|
|
73
|
+
self._client = OpenAI(
|
|
74
|
+
timeout=60,
|
|
75
|
+
max_retries=3,
|
|
76
|
+
api_key=self._api_key,
|
|
77
|
+
base_url=self._url,
|
|
78
|
+
)
|
|
81
79
|
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
80
|
+
@api_keys_required("GEMINI_API_KEY")
|
|
81
|
+
def run(
|
|
82
|
+
self,
|
|
83
|
+
messages: List[OpenAIMessage],
|
|
84
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
85
|
+
r"""Runs inference of Gemini chat completion.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
89
|
+
in OpenAI API format.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
93
|
+
`ChatCompletion` in the non-stream mode, or
|
|
94
|
+
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
95
|
+
"""
|
|
96
|
+
response = self._client.chat.completions.create(
|
|
97
|
+
messages=messages,
|
|
98
|
+
model=self.model_type,
|
|
99
|
+
**self.model_config_dict,
|
|
90
100
|
)
|
|
91
|
-
|
|
101
|
+
return response
|
|
92
102
|
|
|
93
103
|
@property
|
|
94
104
|
def token_counter(self) -> BaseTokenCounter:
|
|
@@ -99,125 +109,30 @@ class GeminiModel(BaseModelBackend):
|
|
|
99
109
|
tokenization style.
|
|
100
110
|
"""
|
|
101
111
|
if not self._token_counter:
|
|
102
|
-
self._token_counter =
|
|
112
|
+
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
103
113
|
return self._token_counter
|
|
104
114
|
|
|
105
|
-
@api_keys_required("GOOGLE_API_KEY")
|
|
106
|
-
def run(
|
|
107
|
-
self,
|
|
108
|
-
messages: List[OpenAIMessage],
|
|
109
|
-
) -> ChatCompletion:
|
|
110
|
-
r"""Runs inference of Gemini model.
|
|
111
|
-
This method can handle multimodal input
|
|
112
|
-
|
|
113
|
-
Args:
|
|
114
|
-
messages: Message list or Message with the chat history
|
|
115
|
-
in OpenAi format.
|
|
116
|
-
|
|
117
|
-
Returns:
|
|
118
|
-
response: A ChatCompletion object formatted for the OpenAI API.
|
|
119
|
-
"""
|
|
120
|
-
response = self._client.generate_content(
|
|
121
|
-
contents=self.to_gemini_req(messages),
|
|
122
|
-
**self.model_config_dict,
|
|
123
|
-
)
|
|
124
|
-
response.resolve()
|
|
125
|
-
return self.to_openai_response(response)
|
|
126
|
-
|
|
127
115
|
def check_model_config(self):
|
|
128
116
|
r"""Check whether the model configuration contains any
|
|
129
117
|
unexpected arguments to Gemini API.
|
|
130
118
|
|
|
131
119
|
Raises:
|
|
132
120
|
ValueError: If the model configuration dictionary contains any
|
|
133
|
-
unexpected arguments to
|
|
121
|
+
unexpected arguments to Gemini API.
|
|
134
122
|
"""
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
)
|
|
123
|
+
for param in self.model_config_dict:
|
|
124
|
+
if param not in Gemini_API_PARAMS:
|
|
125
|
+
raise ValueError(
|
|
126
|
+
f"Unexpected argument `{param}` is "
|
|
127
|
+
"input into Gemini model backend."
|
|
128
|
+
)
|
|
142
129
|
|
|
143
130
|
@property
|
|
144
131
|
def stream(self) -> bool:
|
|
145
|
-
r"""Returns whether the model is in stream mode,
|
|
146
|
-
|
|
132
|
+
r"""Returns whether the model is in stream mode, which sends partial
|
|
133
|
+
results each time.
|
|
147
134
|
|
|
148
135
|
Returns:
|
|
149
136
|
bool: Whether the model is in stream mode.
|
|
150
137
|
"""
|
|
151
138
|
return self.model_config_dict.get('stream', False)
|
|
152
|
-
|
|
153
|
-
def to_gemini_req(self, messages: List[OpenAIMessage]) -> 'ContentsType':
|
|
154
|
-
r"""Converts the request from the OpenAI API format to the Gemini API
|
|
155
|
-
request format.
|
|
156
|
-
|
|
157
|
-
Args:
|
|
158
|
-
messages: The request object from the OpenAI API.
|
|
159
|
-
|
|
160
|
-
Returns:
|
|
161
|
-
converted_messages: A list of messages formatted for Gemini API.
|
|
162
|
-
"""
|
|
163
|
-
# role reference
|
|
164
|
-
# https://ai.google.dev/api/python/google/generativeai/protos/Content
|
|
165
|
-
converted_messages = []
|
|
166
|
-
for message in messages:
|
|
167
|
-
role = message.get('role')
|
|
168
|
-
if role == 'assistant':
|
|
169
|
-
role_to_gemini = 'model'
|
|
170
|
-
else:
|
|
171
|
-
role_to_gemini = 'user'
|
|
172
|
-
converted_message = {
|
|
173
|
-
"role": role_to_gemini,
|
|
174
|
-
"parts": message.get("content"),
|
|
175
|
-
}
|
|
176
|
-
converted_messages.append(converted_message)
|
|
177
|
-
return converted_messages
|
|
178
|
-
|
|
179
|
-
def to_openai_response(
|
|
180
|
-
self,
|
|
181
|
-
response: 'GenerateContentResponse',
|
|
182
|
-
) -> ChatCompletion:
|
|
183
|
-
r"""Converts the response from the Gemini API to the OpenAI API
|
|
184
|
-
response format.
|
|
185
|
-
|
|
186
|
-
Args:
|
|
187
|
-
response: The response object returned by the Gemini API
|
|
188
|
-
|
|
189
|
-
Returns:
|
|
190
|
-
openai_response: A ChatCompletion object formatted for
|
|
191
|
-
the OpenAI API.
|
|
192
|
-
"""
|
|
193
|
-
import time
|
|
194
|
-
import uuid
|
|
195
|
-
|
|
196
|
-
openai_response = ChatCompletion(
|
|
197
|
-
id=f"chatcmpl-{uuid.uuid4().hex!s}",
|
|
198
|
-
object="chat.completion",
|
|
199
|
-
created=int(time.time()),
|
|
200
|
-
model=self.model_type,
|
|
201
|
-
choices=[],
|
|
202
|
-
)
|
|
203
|
-
for i, candidate in enumerate(response.candidates):
|
|
204
|
-
content = ""
|
|
205
|
-
if candidate.content and len(candidate.content.parts) > 0:
|
|
206
|
-
content = candidate.content.parts[0].text
|
|
207
|
-
finish_reason = candidate.finish_reason
|
|
208
|
-
finish_reason_mapping = {
|
|
209
|
-
"FinishReason.STOP": "stop",
|
|
210
|
-
"FinishReason.SAFETY": "content_filter",
|
|
211
|
-
"FinishReason.RECITATION": "content_filter",
|
|
212
|
-
"FinishReason.MAX_TOKENS": "length",
|
|
213
|
-
}
|
|
214
|
-
finish_reason = finish_reason_mapping.get(finish_reason, "stop")
|
|
215
|
-
choice = Choice(
|
|
216
|
-
index=i,
|
|
217
|
-
message=ChatCompletionMessage(
|
|
218
|
-
role="assistant", content=content
|
|
219
|
-
),
|
|
220
|
-
finish_reason=finish_reason,
|
|
221
|
-
)
|
|
222
|
-
openai_response.choices.append(choice)
|
|
223
|
-
return openai_response
|
camel/models/groq_model.py
CHANGED
|
@@ -1,16 +1,16 @@
|
|
|
1
|
-
#
|
|
2
|
-
# Licensed under the Apache License, Version 2.0 (the
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
3
|
# you may not use this file except in compliance with the License.
|
|
4
4
|
# You may obtain a copy of the License at
|
|
5
5
|
#
|
|
6
6
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
7
|
#
|
|
8
8
|
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
-
# distributed under the License is distributed on an
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
10
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
|
-
#
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
15
|
from typing import Any, Dict, List, Optional, Union
|
|
16
16
|
|
camel/models/litellm_model.py
CHANGED
|
@@ -1,16 +1,16 @@
|
|
|
1
|
-
#
|
|
2
|
-
# Licensed under the Apache License, Version 2.0 (the
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
3
|
# you may not use this file except in compliance with the License.
|
|
4
4
|
# You may obtain a copy of the License at
|
|
5
5
|
#
|
|
6
6
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
7
|
#
|
|
8
8
|
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
-
# distributed under the License is distributed on an
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
10
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
|
-
#
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
from typing import Any, Dict, List, Optional, Union
|
|
15
15
|
|
|
16
16
|
from camel.configs import LITELLM_API_PARAMS, LiteLLMConfig
|
camel/models/mistral_model.py
CHANGED
|
@@ -1,16 +1,16 @@
|
|
|
1
|
-
#
|
|
2
|
-
# Licensed under the Apache License, Version 2.0 (the
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
3
|
# you may not use this file except in compliance with the License.
|
|
4
4
|
# You may obtain a copy of the License at
|
|
5
5
|
#
|
|
6
6
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
7
|
#
|
|
8
8
|
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
-
# distributed under the License is distributed on an
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
10
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
|
-
#
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
15
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
|
|
16
16
|
|
camel/models/model_factory.py
CHANGED
|
@@ -1,25 +1,28 @@
|
|
|
1
|
-
#
|
|
2
|
-
# Licensed under the Apache License, Version 2.0 (the
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
3
|
# you may not use this file except in compliance with the License.
|
|
4
4
|
# You may obtain a copy of the License at
|
|
5
5
|
#
|
|
6
6
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
7
|
#
|
|
8
8
|
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
-
# distributed under the License is distributed on an
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
10
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
|
-
#
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
from typing import Dict, Optional, Type, Union
|
|
15
15
|
|
|
16
16
|
from camel.models.anthropic_model import AnthropicModel
|
|
17
17
|
from camel.models.azure_openai_model import AzureOpenAIModel
|
|
18
18
|
from camel.models.base_model import BaseModelBackend
|
|
19
|
+
from camel.models.cohere_model import CohereModel
|
|
20
|
+
from camel.models.deepseek_model import DeepSeekModel
|
|
19
21
|
from camel.models.gemini_model import GeminiModel
|
|
20
22
|
from camel.models.groq_model import GroqModel
|
|
21
23
|
from camel.models.litellm_model import LiteLLMModel
|
|
22
24
|
from camel.models.mistral_model import MistralModel
|
|
25
|
+
from camel.models.nvidia_model import NvidiaModel
|
|
23
26
|
from camel.models.ollama_model import OllamaModel
|
|
24
27
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
25
28
|
from camel.models.openai_model import OpenAIModel
|
|
@@ -91,6 +94,8 @@ class ModelFactory:
|
|
|
91
94
|
model_class = TogetherAIModel
|
|
92
95
|
elif model_platform.is_litellm:
|
|
93
96
|
model_class = LiteLLMModel
|
|
97
|
+
elif model_platform.is_nvidia:
|
|
98
|
+
model_class = NvidiaModel
|
|
94
99
|
|
|
95
100
|
elif model_platform.is_openai and model_type.is_openai:
|
|
96
101
|
model_class = OpenAIModel
|
|
@@ -108,10 +113,14 @@ class ModelFactory:
|
|
|
108
113
|
model_class = MistralModel
|
|
109
114
|
elif model_platform.is_reka and model_type.is_reka:
|
|
110
115
|
model_class = RekaModel
|
|
116
|
+
elif model_platform.is_cohere and model_type.is_cohere:
|
|
117
|
+
model_class = CohereModel
|
|
111
118
|
elif model_platform.is_yi and model_type.is_yi:
|
|
112
119
|
model_class = YiModel
|
|
113
120
|
elif model_platform.is_qwen and model_type.is_qwen:
|
|
114
121
|
model_class = QwenModel
|
|
122
|
+
elif model_platform.is_deepseek:
|
|
123
|
+
model_class = DeepSeekModel
|
|
115
124
|
elif model_type == ModelType.STUB:
|
|
116
125
|
model_class = StubModel
|
|
117
126
|
|
|
@@ -0,0 +1,212 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import logging
|
|
16
|
+
from itertools import cycle
|
|
17
|
+
from random import choice
|
|
18
|
+
from typing import (
|
|
19
|
+
Any,
|
|
20
|
+
Callable,
|
|
21
|
+
Dict,
|
|
22
|
+
List,
|
|
23
|
+
Union,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
from openai import Stream
|
|
27
|
+
|
|
28
|
+
from camel.messages import OpenAIMessage
|
|
29
|
+
from camel.models.base_model import BaseModelBackend
|
|
30
|
+
from camel.types import (
|
|
31
|
+
ChatCompletion,
|
|
32
|
+
ChatCompletionChunk,
|
|
33
|
+
UnifiedModelType,
|
|
34
|
+
)
|
|
35
|
+
from camel.utils import BaseTokenCounter
|
|
36
|
+
|
|
37
|
+
logger = logging.getLogger(__name__)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class ModelProcessingError(Exception):
|
|
41
|
+
r"""Raised when an error occurs during model processing."""
|
|
42
|
+
|
|
43
|
+
pass
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class ModelManager:
|
|
47
|
+
r"""ModelManager choosing a model from provided list.
|
|
48
|
+
Models are picked according to defined strategy.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
models(Union[BaseModelBackend, List[BaseModelBackend]]):
|
|
52
|
+
model backend or list of model backends
|
|
53
|
+
(e.g., model instances, APIs)
|
|
54
|
+
scheduling_strategy (str): name of function that defines how
|
|
55
|
+
to select the next model. (default: :str:`round_robin`)
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
def __init__(
|
|
59
|
+
self,
|
|
60
|
+
models: Union[BaseModelBackend, List[BaseModelBackend]],
|
|
61
|
+
scheduling_strategy: str = "round_robin",
|
|
62
|
+
):
|
|
63
|
+
if isinstance(models, list):
|
|
64
|
+
self.models = models
|
|
65
|
+
else:
|
|
66
|
+
self.models = [models]
|
|
67
|
+
self.models_cycle = cycle(self.models)
|
|
68
|
+
self.current_model = self.models[0]
|
|
69
|
+
|
|
70
|
+
# Set the scheduling strategy; default is round-robin
|
|
71
|
+
try:
|
|
72
|
+
self.scheduling_strategy = getattr(self, scheduling_strategy)
|
|
73
|
+
except AttributeError:
|
|
74
|
+
logger.warning(
|
|
75
|
+
f"Provided strategy: {scheduling_strategy} is not implemented."
|
|
76
|
+
f"Using default 'round robin'"
|
|
77
|
+
)
|
|
78
|
+
self.scheduling_strategy = self.round_robin
|
|
79
|
+
|
|
80
|
+
@property
|
|
81
|
+
def model_type(self) -> UnifiedModelType:
|
|
82
|
+
r"""Return type of the current model.
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
Union[ModelType, str]: Current model type.
|
|
86
|
+
"""
|
|
87
|
+
return self.current_model.model_type
|
|
88
|
+
|
|
89
|
+
@property
|
|
90
|
+
def model_config_dict(self) -> Dict[str, Any]:
|
|
91
|
+
r"""Return model_config_dict of the current model.
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
Dict[str, Any]: Config dictionary of the current model.
|
|
95
|
+
"""
|
|
96
|
+
return self.current_model.model_config_dict
|
|
97
|
+
|
|
98
|
+
@model_config_dict.setter
|
|
99
|
+
def model_config_dict(self, model_config_dict: Dict[str, Any]):
|
|
100
|
+
r"""Set model_config_dict to the current model.
|
|
101
|
+
|
|
102
|
+
Args:
|
|
103
|
+
model_config_dict (Dict[str, Any]): Config dictionary to be set at
|
|
104
|
+
current model.
|
|
105
|
+
"""
|
|
106
|
+
self.current_model.model_config_dict = model_config_dict
|
|
107
|
+
|
|
108
|
+
@property
|
|
109
|
+
def current_model_index(self) -> int:
|
|
110
|
+
r"""Return the index of current model in self.models list.
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
int: index of current model in given list of models.
|
|
114
|
+
"""
|
|
115
|
+
return self.models.index(self.current_model)
|
|
116
|
+
|
|
117
|
+
@property
|
|
118
|
+
def token_limit(self):
|
|
119
|
+
r"""Returns the maximum token limit for current model.
|
|
120
|
+
|
|
121
|
+
This method retrieves the maximum token limit either from the
|
|
122
|
+
`model_config_dict` or from the model's default token limit.
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
int: The maximum token limit for the given model.
|
|
126
|
+
"""
|
|
127
|
+
return self.current_model.token_limit
|
|
128
|
+
|
|
129
|
+
@property
|
|
130
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
131
|
+
r"""Return token_counter of the current model.
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
BaseTokenCounter: The token counter following the model's
|
|
135
|
+
tokenization style.
|
|
136
|
+
"""
|
|
137
|
+
return self.current_model.token_counter
|
|
138
|
+
|
|
139
|
+
def add_strategy(self, name: str, strategy_fn: Callable):
|
|
140
|
+
r"""Add a scheduling strategy method provided by user in case when none
|
|
141
|
+
of existent strategies fits.
|
|
142
|
+
When custom strategy is provided, it will be set as
|
|
143
|
+
"self.scheduling_strategy" attribute.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
name (str): The name of the strategy.
|
|
147
|
+
strategy_fn (Callable): The scheduling strategy function.
|
|
148
|
+
"""
|
|
149
|
+
if not callable(strategy_fn):
|
|
150
|
+
raise ValueError("strategy_fn must be a callable function.")
|
|
151
|
+
setattr(self, name, strategy_fn.__get__(self))
|
|
152
|
+
self.scheduling_strategy = getattr(self, name)
|
|
153
|
+
logger.info(f"Custom strategy '{name}' added.")
|
|
154
|
+
|
|
155
|
+
# Strategies
|
|
156
|
+
def round_robin(self) -> BaseModelBackend:
|
|
157
|
+
r"""Return models one by one in simple round-robin fashion.
|
|
158
|
+
|
|
159
|
+
Returns:
|
|
160
|
+
BaseModelBackend for processing incoming messages.
|
|
161
|
+
"""
|
|
162
|
+
return next(self.models_cycle)
|
|
163
|
+
|
|
164
|
+
def always_first(self) -> BaseModelBackend:
|
|
165
|
+
r"""Always return the first model from self.models.
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
BaseModelBackend for processing incoming messages.
|
|
169
|
+
"""
|
|
170
|
+
return self.models[0]
|
|
171
|
+
|
|
172
|
+
def random_model(self) -> BaseModelBackend:
|
|
173
|
+
r"""Return random model from self.models list.
|
|
174
|
+
|
|
175
|
+
Returns:
|
|
176
|
+
BaseModelBackend for processing incoming messages.
|
|
177
|
+
"""
|
|
178
|
+
return choice(self.models)
|
|
179
|
+
|
|
180
|
+
def run(
|
|
181
|
+
self, messages: List[OpenAIMessage]
|
|
182
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
183
|
+
r"""Process a list of messages by selecting a model based on
|
|
184
|
+
the scheduling strategy.
|
|
185
|
+
Sends the entire list of messages to the selected model,
|
|
186
|
+
and returns a single response.
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
messages (List[OpenAIMessage]): Message list with the chat
|
|
190
|
+
history in OpenAI API format.
|
|
191
|
+
|
|
192
|
+
Returns:
|
|
193
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
194
|
+
`ChatCompletion` in the non-stream mode, or
|
|
195
|
+
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
196
|
+
"""
|
|
197
|
+
self.current_model = self.scheduling_strategy()
|
|
198
|
+
|
|
199
|
+
# Pass all messages to the selected model and get the response
|
|
200
|
+
try:
|
|
201
|
+
response = self.current_model.run(messages)
|
|
202
|
+
except Exception as exc:
|
|
203
|
+
logger.error(f"Error processing with model: {self.current_model}")
|
|
204
|
+
if self.scheduling_strategy == self.always_first:
|
|
205
|
+
self.scheduling_strategy = self.round_robin
|
|
206
|
+
logger.warning(
|
|
207
|
+
"The scheduling strategy has been changed to 'round_robin'"
|
|
208
|
+
)
|
|
209
|
+
# Skip already used one
|
|
210
|
+
self.current_model = self.scheduling_strategy()
|
|
211
|
+
raise exc
|
|
212
|
+
return response
|
camel/models/nemotron_model.py
CHANGED
|
@@ -1,16 +1,16 @@
|
|
|
1
|
-
#
|
|
2
|
-
# Licensed under the Apache License, Version 2.0 (the
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
3
|
# you may not use this file except in compliance with the License.
|
|
4
4
|
# You may obtain a copy of the License at
|
|
5
5
|
#
|
|
6
6
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
7
|
#
|
|
8
8
|
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
-
# distributed under the License is distributed on an
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
10
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
|
-
#
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
15
|
from typing import List, Optional, Union
|
|
16
16
|
|