camel-ai 0.1.9__py3-none-any.whl → 0.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +334 -113
- camel/agents/knowledge_graph_agent.py +4 -6
- camel/bots/__init__.py +34 -0
- camel/bots/discord_app.py +138 -0
- camel/bots/slack/__init__.py +30 -0
- camel/bots/slack/models.py +158 -0
- camel/bots/slack/slack_app.py +255 -0
- camel/bots/telegram_bot.py +82 -0
- camel/configs/__init__.py +1 -2
- camel/configs/anthropic_config.py +2 -5
- camel/configs/base_config.py +6 -6
- camel/configs/gemini_config.py +1 -1
- camel/configs/groq_config.py +2 -3
- camel/configs/ollama_config.py +1 -2
- camel/configs/openai_config.py +2 -23
- camel/configs/samba_config.py +2 -2
- camel/configs/togetherai_config.py +1 -1
- camel/configs/vllm_config.py +1 -1
- camel/configs/zhipuai_config.py +2 -3
- camel/embeddings/openai_embedding.py +2 -2
- camel/loaders/__init__.py +2 -0
- camel/loaders/chunkr_reader.py +163 -0
- camel/loaders/firecrawl_reader.py +13 -45
- camel/loaders/unstructured_io.py +65 -29
- camel/messages/__init__.py +1 -0
- camel/messages/func_message.py +2 -2
- camel/models/__init__.py +2 -4
- camel/models/anthropic_model.py +32 -26
- camel/models/azure_openai_model.py +39 -36
- camel/models/base_model.py +31 -20
- camel/models/gemini_model.py +37 -29
- camel/models/groq_model.py +29 -23
- camel/models/litellm_model.py +44 -61
- camel/models/mistral_model.py +33 -30
- camel/models/model_factory.py +66 -76
- camel/models/nemotron_model.py +33 -23
- camel/models/ollama_model.py +42 -47
- camel/models/{openai_compatibility_model.py → openai_compatible_model.py} +36 -41
- camel/models/openai_model.py +60 -25
- camel/models/reka_model.py +30 -28
- camel/models/samba_model.py +82 -177
- camel/models/stub_model.py +2 -2
- camel/models/togetherai_model.py +37 -43
- camel/models/vllm_model.py +43 -50
- camel/models/zhipuai_model.py +33 -27
- camel/retrievers/auto_retriever.py +28 -10
- camel/retrievers/vector_retriever.py +72 -44
- camel/societies/babyagi_playing.py +6 -3
- camel/societies/role_playing.py +17 -3
- camel/storages/__init__.py +2 -0
- camel/storages/graph_storages/__init__.py +2 -0
- camel/storages/graph_storages/graph_element.py +3 -5
- camel/storages/graph_storages/nebula_graph.py +547 -0
- camel/storages/key_value_storages/json.py +6 -1
- camel/tasks/task.py +11 -4
- camel/tasks/task_prompt.py +4 -0
- camel/toolkits/__init__.py +28 -24
- camel/toolkits/arxiv_toolkit.py +155 -0
- camel/toolkits/ask_news_toolkit.py +653 -0
- camel/toolkits/base.py +2 -3
- camel/toolkits/code_execution.py +6 -7
- camel/toolkits/dalle_toolkit.py +6 -6
- camel/toolkits/{openai_function.py → function_tool.py} +34 -11
- camel/toolkits/github_toolkit.py +9 -10
- camel/toolkits/google_maps_toolkit.py +7 -14
- camel/toolkits/google_scholar_toolkit.py +146 -0
- camel/toolkits/linkedin_toolkit.py +7 -10
- camel/toolkits/math_toolkit.py +8 -8
- camel/toolkits/open_api_toolkit.py +5 -8
- camel/toolkits/reddit_toolkit.py +7 -10
- camel/toolkits/retrieval_toolkit.py +5 -9
- camel/toolkits/search_toolkit.py +9 -9
- camel/toolkits/slack_toolkit.py +11 -14
- camel/toolkits/twitter_toolkit.py +377 -454
- camel/toolkits/weather_toolkit.py +6 -6
- camel/toolkits/whatsapp_toolkit.py +177 -0
- camel/types/__init__.py +6 -1
- camel/types/enums.py +43 -85
- camel/types/openai_types.py +3 -0
- camel/types/unified_model_type.py +104 -0
- camel/utils/__init__.py +0 -2
- camel/utils/async_func.py +7 -7
- camel/utils/commons.py +40 -4
- camel/utils/token_counting.py +38 -214
- camel/workforce/__init__.py +6 -6
- camel/workforce/base.py +9 -5
- camel/workforce/prompts.py +179 -0
- camel/workforce/role_playing_worker.py +181 -0
- camel/workforce/{single_agent_node.py → single_agent_worker.py} +49 -23
- camel/workforce/task_channel.py +7 -8
- camel/workforce/utils.py +20 -50
- camel/workforce/{worker_node.py → worker.py} +15 -12
- camel/workforce/workforce.py +456 -19
- camel_ai-0.2.3.dist-info/LICENSE +201 -0
- {camel_ai-0.1.9.dist-info → camel_ai-0.2.3.dist-info}/METADATA +40 -65
- {camel_ai-0.1.9.dist-info → camel_ai-0.2.3.dist-info}/RECORD +98 -86
- {camel_ai-0.1.9.dist-info → camel_ai-0.2.3.dist-info}/WHEEL +1 -1
- camel/models/open_source_model.py +0 -170
- camel/workforce/manager_node.py +0 -299
- camel/workforce/role_playing_node.py +0 -168
- camel/workforce/workforce_prompt.py +0 -125
camel/models/groq_model.py
CHANGED
|
@@ -16,7 +16,7 @@ from typing import Any, Dict, List, Optional, Union
|
|
|
16
16
|
|
|
17
17
|
from openai import OpenAI, Stream
|
|
18
18
|
|
|
19
|
-
from camel.configs import GROQ_API_PARAMS
|
|
19
|
+
from camel.configs import GROQ_API_PARAMS, GroqConfig
|
|
20
20
|
from camel.messages import OpenAIMessage
|
|
21
21
|
from camel.models import BaseModelBackend
|
|
22
22
|
from camel.types import (
|
|
@@ -32,42 +32,48 @@ from camel.utils import (
|
|
|
32
32
|
|
|
33
33
|
|
|
34
34
|
class GroqModel(BaseModelBackend):
|
|
35
|
-
r"""LLM API served by Groq in a unified BaseModelBackend interface.
|
|
35
|
+
r"""LLM API served by Groq in a unified BaseModelBackend interface.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
39
|
+
created.
|
|
40
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
41
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`.
|
|
42
|
+
If:obj:`None`, :obj:`GroqConfig().as_dict()` will be used.
|
|
43
|
+
(default: :obj:`None`)
|
|
44
|
+
api_key (Optional[str], optional): The API key for authenticating
|
|
45
|
+
with the Groq service. (default: :obj:`None`).
|
|
46
|
+
url (Optional[str], optional): The url to the Groq service.
|
|
47
|
+
(default: :obj:`None`)
|
|
48
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
49
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
50
|
+
ModelType.GPT_4O_MINI)` will be used.
|
|
51
|
+
(default: :obj:`None`)
|
|
52
|
+
"""
|
|
36
53
|
|
|
37
54
|
def __init__(
|
|
38
55
|
self,
|
|
39
|
-
model_type: ModelType,
|
|
40
|
-
model_config_dict: Dict[str, Any],
|
|
56
|
+
model_type: Union[ModelType, str],
|
|
57
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
41
58
|
api_key: Optional[str] = None,
|
|
42
59
|
url: Optional[str] = None,
|
|
43
60
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
44
61
|
) -> None:
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
api_key (Optional[str]): The API key for authenticating with the
|
|
52
|
-
Groq service. (default: :obj:`None`).
|
|
53
|
-
url (Optional[str]): The url to the Groq service. (default:
|
|
54
|
-
:obj:`"https://api.groq.com/openai/v1"`)
|
|
55
|
-
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
56
|
-
for the model. If not provided, `OpenAITokenCounter(ModelType.
|
|
57
|
-
GPT_4O_MINI)` will be used.
|
|
58
|
-
"""
|
|
62
|
+
if model_config_dict is None:
|
|
63
|
+
model_config_dict = GroqConfig().as_dict()
|
|
64
|
+
api_key = api_key or os.environ.get("GROQ_API_KEY")
|
|
65
|
+
url = url or os.environ.get(
|
|
66
|
+
"GROQ_API_BASE_URL" or "https://api.groq.com/openai/v1"
|
|
67
|
+
)
|
|
59
68
|
super().__init__(
|
|
60
69
|
model_type, model_config_dict, api_key, url, token_counter
|
|
61
70
|
)
|
|
62
|
-
self._url = url or os.environ.get("GROQ_API_BASE_URL")
|
|
63
|
-
self._api_key = api_key or os.environ.get("GROQ_API_KEY")
|
|
64
71
|
self._client = OpenAI(
|
|
65
72
|
timeout=60,
|
|
66
73
|
max_retries=3,
|
|
67
74
|
api_key=self._api_key,
|
|
68
|
-
base_url=self._url
|
|
75
|
+
base_url=self._url,
|
|
69
76
|
)
|
|
70
|
-
self._token_counter = token_counter
|
|
71
77
|
|
|
72
78
|
@property
|
|
73
79
|
def token_counter(self) -> BaseTokenCounter:
|
|
@@ -101,7 +107,7 @@ class GroqModel(BaseModelBackend):
|
|
|
101
107
|
"""
|
|
102
108
|
response = self._client.chat.completions.create(
|
|
103
109
|
messages=messages,
|
|
104
|
-
model=self.model_type
|
|
110
|
+
model=self.model_type,
|
|
105
111
|
**self.model_config_dict,
|
|
106
112
|
)
|
|
107
113
|
|
camel/models/litellm_model.py
CHANGED
|
@@ -11,49 +11,58 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
-
from typing import Any, Dict, List, Optional
|
|
14
|
+
from typing import Any, Dict, List, Optional, Union
|
|
15
15
|
|
|
16
|
-
from camel.configs import LITELLM_API_PARAMS
|
|
16
|
+
from camel.configs import LITELLM_API_PARAMS, LiteLLMConfig
|
|
17
17
|
from camel.messages import OpenAIMessage
|
|
18
|
-
from camel.
|
|
19
|
-
from camel.
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
18
|
+
from camel.models import BaseModelBackend
|
|
19
|
+
from camel.types import ChatCompletion, ModelType
|
|
20
|
+
from camel.utils import (
|
|
21
|
+
BaseTokenCounter,
|
|
22
|
+
LiteLLMTokenCounter,
|
|
23
|
+
dependencies_required,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class LiteLLMModel(BaseModelBackend):
|
|
28
|
+
r"""Constructor for LiteLLM backend with OpenAI compatibility.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
32
|
+
created, such as GPT-3.5-turbo, Claude-2, etc.
|
|
33
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
34
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`.
|
|
35
|
+
If:obj:`None`, :obj:`LiteLLMConfig().as_dict()` will be used.
|
|
36
|
+
(default: :obj:`None`)
|
|
37
|
+
api_key (Optional[str], optional): The API key for authenticating with
|
|
38
|
+
the model service. (default: :obj:`None`)
|
|
39
|
+
url (Optional[str], optional): The url to the model service.
|
|
40
|
+
(default: :obj:`None`)
|
|
41
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
42
|
+
use for the model. If not provided, :obj:`LiteLLMTokenCounter` will
|
|
43
|
+
be used. (default: :obj:`None`)
|
|
44
|
+
"""
|
|
24
45
|
|
|
25
46
|
# NOTE: Currently stream mode is not supported.
|
|
26
47
|
|
|
48
|
+
@dependencies_required('litellm')
|
|
27
49
|
def __init__(
|
|
28
50
|
self,
|
|
29
|
-
model_type: str,
|
|
30
|
-
model_config_dict: Dict[str, Any],
|
|
51
|
+
model_type: Union[ModelType, str],
|
|
52
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
31
53
|
api_key: Optional[str] = None,
|
|
32
54
|
url: Optional[str] = None,
|
|
33
55
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
34
56
|
) -> None:
|
|
35
|
-
|
|
57
|
+
from litellm import completion
|
|
36
58
|
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
url (Optional[str]): The url to the model service. (default:
|
|
45
|
-
:obj:`None`)
|
|
46
|
-
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
47
|
-
for the model. If not provided, `LiteLLMTokenCounter` will
|
|
48
|
-
be used.
|
|
49
|
-
"""
|
|
50
|
-
self.model_type = model_type
|
|
51
|
-
self.model_config_dict = model_config_dict
|
|
52
|
-
self._client = None
|
|
53
|
-
self._token_counter = token_counter
|
|
54
|
-
self.check_model_config()
|
|
55
|
-
self._url = url
|
|
56
|
-
self._api_key = api_key
|
|
59
|
+
if model_config_dict is None:
|
|
60
|
+
model_config_dict = LiteLLMConfig().as_dict()
|
|
61
|
+
|
|
62
|
+
super().__init__(
|
|
63
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
64
|
+
)
|
|
65
|
+
self.client = completion
|
|
57
66
|
|
|
58
67
|
def _convert_response_from_litellm_to_openai(
|
|
59
68
|
self, response
|
|
@@ -86,26 +95,16 @@ class LiteLLMModel:
|
|
|
86
95
|
)
|
|
87
96
|
|
|
88
97
|
@property
|
|
89
|
-
def
|
|
90
|
-
if self._client is None:
|
|
91
|
-
from litellm import completion
|
|
92
|
-
|
|
93
|
-
self._client = completion
|
|
94
|
-
return self._client
|
|
95
|
-
|
|
96
|
-
@property
|
|
97
|
-
def token_counter(self) -> LiteLLMTokenCounter:
|
|
98
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
98
99
|
r"""Initialize the token counter for the model backend.
|
|
99
100
|
|
|
100
101
|
Returns:
|
|
101
|
-
|
|
102
|
+
BaseTokenCounter: The token counter following the model's
|
|
102
103
|
tokenization style.
|
|
103
104
|
"""
|
|
104
105
|
if not self._token_counter:
|
|
105
|
-
self._token_counter = LiteLLMTokenCounter(
|
|
106
|
-
|
|
107
|
-
)
|
|
108
|
-
return self._token_counter # type: ignore[return-value]
|
|
106
|
+
self._token_counter = LiteLLMTokenCounter(self.model_type)
|
|
107
|
+
return self._token_counter
|
|
109
108
|
|
|
110
109
|
def run(
|
|
111
110
|
self,
|
|
@@ -144,19 +143,3 @@ class LiteLLMModel:
|
|
|
144
143
|
f"Unexpected argument `{param}` is "
|
|
145
144
|
"input into LiteLLM model backend."
|
|
146
145
|
)
|
|
147
|
-
|
|
148
|
-
@property
|
|
149
|
-
def token_limit(self) -> int:
|
|
150
|
-
r"""Returns the maximum token limit for the given model.
|
|
151
|
-
|
|
152
|
-
Returns:
|
|
153
|
-
int: The maximum token limit for the given model.
|
|
154
|
-
"""
|
|
155
|
-
max_tokens = self.model_config_dict.get("max_tokens")
|
|
156
|
-
if isinstance(max_tokens, int):
|
|
157
|
-
return max_tokens
|
|
158
|
-
print(
|
|
159
|
-
"Must set `max_tokens` as an integer in `model_config_dict` when"
|
|
160
|
-
" setting up the model. Using 4096 as default value."
|
|
161
|
-
)
|
|
162
|
-
return 4096
|
camel/models/mistral_model.py
CHANGED
|
@@ -12,7 +12,7 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
import os
|
|
15
|
-
from typing import TYPE_CHECKING, Any, Dict, List, Optional
|
|
15
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
|
|
16
16
|
|
|
17
17
|
if TYPE_CHECKING:
|
|
18
18
|
from mistralai.models import (
|
|
@@ -20,7 +20,7 @@ if TYPE_CHECKING:
|
|
|
20
20
|
Messages,
|
|
21
21
|
)
|
|
22
22
|
|
|
23
|
-
from camel.configs import MISTRAL_API_PARAMS
|
|
23
|
+
from camel.configs import MISTRAL_API_PARAMS, MistralConfig
|
|
24
24
|
from camel.messages import OpenAIMessage
|
|
25
25
|
from camel.models import BaseModelBackend
|
|
26
26
|
from camel.types import ChatCompletion, ModelType
|
|
@@ -28,11 +28,10 @@ from camel.utils import (
|
|
|
28
28
|
BaseTokenCounter,
|
|
29
29
|
OpenAITokenCounter,
|
|
30
30
|
api_keys_required,
|
|
31
|
+
dependencies_required,
|
|
31
32
|
)
|
|
32
33
|
|
|
33
34
|
try:
|
|
34
|
-
import os
|
|
35
|
-
|
|
36
35
|
if os.getenv("AGENTOPS_API_KEY") is not None:
|
|
37
36
|
from agentops import LLMEvent, record
|
|
38
37
|
else:
|
|
@@ -42,40 +41,44 @@ except (ImportError, AttributeError):
|
|
|
42
41
|
|
|
43
42
|
|
|
44
43
|
class MistralModel(BaseModelBackend):
|
|
45
|
-
r"""Mistral API in a unified BaseModelBackend interface.
|
|
46
|
-
|
|
44
|
+
r"""Mistral API in a unified BaseModelBackend interface.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
48
|
+
created, one of MISTRAL_* series.
|
|
49
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
50
|
+
that will be fed into:obj:`Mistral.chat.complete()`.
|
|
51
|
+
If:obj:`None`, :obj:`MistralConfig().as_dict()` will be used.
|
|
52
|
+
(default: :obj:`None`)
|
|
53
|
+
api_key (Optional[str], optional): The API key for authenticating with
|
|
54
|
+
the mistral service. (default: :obj:`None`)
|
|
55
|
+
url (Optional[str], optional): The url to the mistral service.
|
|
56
|
+
(default: :obj:`None`)
|
|
57
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
58
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter` will
|
|
59
|
+
be used. (default: :obj:`None`)
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
@dependencies_required('mistralai')
|
|
47
63
|
def __init__(
|
|
48
64
|
self,
|
|
49
|
-
model_type: ModelType,
|
|
50
|
-
model_config_dict: Dict[str, Any],
|
|
65
|
+
model_type: Union[ModelType, str],
|
|
66
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
51
67
|
api_key: Optional[str] = None,
|
|
52
68
|
url: Optional[str] = None,
|
|
53
69
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
54
70
|
) -> None:
|
|
55
|
-
|
|
71
|
+
from mistralai import Mistral
|
|
56
72
|
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
api_key (Optional[str]): The API key for authenticating with the
|
|
63
|
-
mistral service. (default: :obj:`None`)
|
|
64
|
-
url (Optional[str]): The url to the mistral service.
|
|
65
|
-
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
66
|
-
for the model. If not provided, `OpenAITokenCounter` will be
|
|
67
|
-
used.
|
|
68
|
-
"""
|
|
73
|
+
if model_config_dict is None:
|
|
74
|
+
model_config_dict = MistralConfig().as_dict()
|
|
75
|
+
|
|
76
|
+
api_key = api_key or os.environ.get("MISTRAL_API_KEY")
|
|
77
|
+
url = url or os.environ.get("MISTRAL_API_BASE_URL")
|
|
69
78
|
super().__init__(
|
|
70
79
|
model_type, model_config_dict, api_key, url, token_counter
|
|
71
80
|
)
|
|
72
|
-
self._api_key = api_key or os.environ.get("MISTRAL_API_KEY")
|
|
73
|
-
self._url = url or os.environ.get("MISTRAL_SERVER_URL")
|
|
74
|
-
|
|
75
|
-
from mistralai import Mistral
|
|
76
|
-
|
|
77
81
|
self._client = Mistral(api_key=self._api_key, server_url=self._url)
|
|
78
|
-
self._token_counter: Optional[BaseTokenCounter] = None
|
|
79
82
|
|
|
80
83
|
def _to_openai_response(
|
|
81
84
|
self, response: 'ChatCompletionResponse'
|
|
@@ -93,7 +96,7 @@ class MistralModel(BaseModelBackend):
|
|
|
93
96
|
"name": tool_call.function.name, # type: ignore[union-attr]
|
|
94
97
|
"arguments": tool_call.function.arguments, # type: ignore[union-attr]
|
|
95
98
|
},
|
|
96
|
-
type=tool_call.
|
|
99
|
+
type=tool_call.type, # type: ignore[union-attr]
|
|
97
100
|
)
|
|
98
101
|
for tool_call in response.choices[0].message.tool_calls
|
|
99
102
|
]
|
|
@@ -215,7 +218,7 @@ class MistralModel(BaseModelBackend):
|
|
|
215
218
|
|
|
216
219
|
response = self._client.chat.complete(
|
|
217
220
|
messages=mistral_messages,
|
|
218
|
-
model=self.model_type
|
|
221
|
+
model=self.model_type,
|
|
219
222
|
**self.model_config_dict,
|
|
220
223
|
)
|
|
221
224
|
|
|
@@ -231,7 +234,7 @@ class MistralModel(BaseModelBackend):
|
|
|
231
234
|
prompt_tokens=openai_response.usage.prompt_tokens, # type: ignore[union-attr]
|
|
232
235
|
completion=openai_response.choices[0].message.content,
|
|
233
236
|
completion_tokens=openai_response.usage.completion_tokens, # type: ignore[union-attr]
|
|
234
|
-
model=self.model_type
|
|
237
|
+
model=self.model_type,
|
|
235
238
|
)
|
|
236
239
|
record(llm_event)
|
|
237
240
|
|
camel/models/model_factory.py
CHANGED
|
@@ -11,7 +11,7 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
-
from typing import
|
|
14
|
+
from typing import Dict, Optional, Type, Union
|
|
15
15
|
|
|
16
16
|
from camel.models.anthropic_model import AnthropicModel
|
|
17
17
|
from camel.models.azure_openai_model import AzureOpenAIModel
|
|
@@ -21,8 +21,7 @@ from camel.models.groq_model import GroqModel
|
|
|
21
21
|
from camel.models.litellm_model import LiteLLMModel
|
|
22
22
|
from camel.models.mistral_model import MistralModel
|
|
23
23
|
from camel.models.ollama_model import OllamaModel
|
|
24
|
-
from camel.models.
|
|
25
|
-
from camel.models.openai_compatibility_model import OpenAICompatibilityModel
|
|
24
|
+
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
26
25
|
from camel.models.openai_model import OpenAIModel
|
|
27
26
|
from camel.models.reka_model import RekaModel
|
|
28
27
|
from camel.models.samba_model import SambaModel
|
|
@@ -30,7 +29,7 @@ from camel.models.stub_model import StubModel
|
|
|
30
29
|
from camel.models.togetherai_model import TogetherAIModel
|
|
31
30
|
from camel.models.vllm_model import VLLMModel
|
|
32
31
|
from camel.models.zhipuai_model import ZhipuAIModel
|
|
33
|
-
from camel.types import ModelPlatformType, ModelType
|
|
32
|
+
from camel.types import ModelPlatformType, ModelType, UnifiedModelType
|
|
34
33
|
from camel.utils import BaseTokenCounter
|
|
35
34
|
|
|
36
35
|
|
|
@@ -45,7 +44,7 @@ class ModelFactory:
|
|
|
45
44
|
def create(
|
|
46
45
|
model_platform: ModelPlatformType,
|
|
47
46
|
model_type: Union[ModelType, str],
|
|
48
|
-
model_config_dict: Dict,
|
|
47
|
+
model_config_dict: Optional[Dict] = None,
|
|
49
48
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
50
49
|
api_key: Optional[str] = None,
|
|
51
50
|
url: Optional[str] = None,
|
|
@@ -55,80 +54,71 @@ class ModelFactory:
|
|
|
55
54
|
Args:
|
|
56
55
|
model_platform (ModelPlatformType): Platform from which the model
|
|
57
56
|
originates.
|
|
58
|
-
model_type (Union[ModelType, str]): Model for which a
|
|
59
|
-
created
|
|
60
|
-
model_config_dict (Dict): A dictionary that will be fed
|
|
61
|
-
the backend constructor.
|
|
62
|
-
token_counter (Optional[BaseTokenCounter]): Token
|
|
63
|
-
for the model. If not provided,
|
|
64
|
-
|
|
65
|
-
provide official
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
ValueError: If there is not backend for the model.
|
|
57
|
+
model_type (Union[ModelType, str]): Model for which a
|
|
58
|
+
backend is created. Can be a `str` for open source platforms.
|
|
59
|
+
model_config_dict (Optional[Dict]): A dictionary that will be fed
|
|
60
|
+
into the backend constructor. (default: :obj:`None`)
|
|
61
|
+
token_counter (Optional[BaseTokenCounter], optional): Token
|
|
62
|
+
counter to use for the model. If not provided,
|
|
63
|
+
:obj:`OpenAITokenCounter(ModelType.GPT_4O_MINI)`
|
|
64
|
+
will be used if the model platform didn't provide official
|
|
65
|
+
token counter. (default: :obj:`None`)
|
|
66
|
+
api_key (Optional[str], optional): The API key for authenticating
|
|
67
|
+
with the model service. (default: :obj:`None`)
|
|
68
|
+
url (Optional[str], optional): The url to the model service.
|
|
69
|
+
(default: :obj:`None`)
|
|
72
70
|
|
|
73
71
|
Returns:
|
|
74
72
|
BaseModelBackend: The initialized backend.
|
|
73
|
+
|
|
74
|
+
Raises:
|
|
75
|
+
ValueError: If there is no backend for the model.
|
|
75
76
|
"""
|
|
76
|
-
model_class:
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
elif
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
model_class = OpenAICompatibilityModel
|
|
118
|
-
elif model_platform.is_samba:
|
|
119
|
-
model_class = SambaModel
|
|
120
|
-
elif model_platform.is_together:
|
|
121
|
-
model_class = TogetherAIModel
|
|
122
|
-
return model_class(
|
|
123
|
-
model_type, model_config_dict, api_key, token_counter
|
|
124
|
-
)
|
|
125
|
-
else:
|
|
126
|
-
raise ValueError(
|
|
127
|
-
f"Unknown pair of model platform `{model_platform}` "
|
|
128
|
-
f"and model type `{model_type}`."
|
|
129
|
-
)
|
|
130
|
-
else:
|
|
131
|
-
raise ValueError(f"Invalid model type `{model_type}` provided.")
|
|
77
|
+
model_class: Optional[Type[BaseModelBackend]] = None
|
|
78
|
+
model_type = UnifiedModelType(model_type)
|
|
79
|
+
|
|
80
|
+
if model_platform.is_ollama:
|
|
81
|
+
model_class = OllamaModel
|
|
82
|
+
elif model_platform.is_vllm:
|
|
83
|
+
model_class = VLLMModel
|
|
84
|
+
elif model_platform.is_openai_compatible_model:
|
|
85
|
+
model_class = OpenAICompatibleModel
|
|
86
|
+
elif model_platform.is_samba:
|
|
87
|
+
model_class = SambaModel
|
|
88
|
+
elif model_platform.is_together:
|
|
89
|
+
model_class = TogetherAIModel
|
|
90
|
+
elif model_platform.is_litellm:
|
|
91
|
+
model_class = LiteLLMModel
|
|
92
|
+
|
|
93
|
+
elif model_platform.is_openai and model_type.is_openai:
|
|
94
|
+
model_class = OpenAIModel
|
|
95
|
+
elif model_platform.is_azure and model_type.is_azure_openai:
|
|
96
|
+
model_class = AzureOpenAIModel
|
|
97
|
+
elif model_platform.is_anthropic and model_type.is_anthropic:
|
|
98
|
+
model_class = AnthropicModel
|
|
99
|
+
elif model_platform.is_groq and model_type.is_groq:
|
|
100
|
+
model_class = GroqModel
|
|
101
|
+
elif model_platform.is_zhipuai and model_type.is_zhipuai:
|
|
102
|
+
model_class = ZhipuAIModel
|
|
103
|
+
elif model_platform.is_gemini and model_type.is_gemini:
|
|
104
|
+
model_class = GeminiModel
|
|
105
|
+
elif model_platform.is_mistral and model_type.is_mistral:
|
|
106
|
+
model_class = MistralModel
|
|
107
|
+
elif model_platform.is_reka and model_type.is_reka:
|
|
108
|
+
model_class = RekaModel
|
|
109
|
+
elif model_type == ModelType.STUB:
|
|
110
|
+
model_class = StubModel
|
|
111
|
+
|
|
112
|
+
if model_class is None:
|
|
113
|
+
raise ValueError(
|
|
114
|
+
f"Unknown pair of model platform `{model_platform}` "
|
|
115
|
+
f"and model type `{model_type}`."
|
|
116
|
+
)
|
|
117
|
+
|
|
132
118
|
return model_class(
|
|
133
|
-
model_type,
|
|
119
|
+
model_type=model_type,
|
|
120
|
+
model_config_dict=model_config_dict,
|
|
121
|
+
api_key=api_key,
|
|
122
|
+
url=url,
|
|
123
|
+
token_counter=token_counter,
|
|
134
124
|
)
|
camel/models/nemotron_model.py
CHANGED
|
@@ -12,11 +12,12 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
import os
|
|
15
|
-
from typing import List, Optional
|
|
15
|
+
from typing import List, Optional, Union
|
|
16
16
|
|
|
17
17
|
from openai import OpenAI
|
|
18
18
|
|
|
19
19
|
from camel.messages import OpenAIMessage
|
|
20
|
+
from camel.models import BaseModelBackend
|
|
20
21
|
from camel.types import ChatCompletion, ModelType
|
|
21
22
|
from camel.utils import (
|
|
22
23
|
BaseTokenCounter,
|
|
@@ -24,40 +25,38 @@ from camel.utils import (
|
|
|
24
25
|
)
|
|
25
26
|
|
|
26
27
|
|
|
27
|
-
class NemotronModel:
|
|
28
|
-
r"""Nemotron model API backend with OpenAI compatibility.
|
|
28
|
+
class NemotronModel(BaseModelBackend):
|
|
29
|
+
r"""Nemotron model API backend with OpenAI compatibility.
|
|
29
30
|
|
|
30
|
-
|
|
31
|
+
Args:
|
|
32
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
33
|
+
created.
|
|
34
|
+
api_key (Optional[str], optional): The API key for authenticating with
|
|
35
|
+
the Nvidia service. (default: :obj:`None`)
|
|
36
|
+
url (Optional[str], optional): The url to the Nvidia service.
|
|
37
|
+
(default: :obj:`https://integrate.api.nvidia.com/v1`)
|
|
38
|
+
|
|
39
|
+
Notes:
|
|
40
|
+
Nemotron model doesn't support additional model config like OpenAI.
|
|
41
|
+
"""
|
|
31
42
|
|
|
32
43
|
def __init__(
|
|
33
44
|
self,
|
|
34
|
-
model_type: ModelType,
|
|
45
|
+
model_type: Union[ModelType, str],
|
|
35
46
|
api_key: Optional[str] = None,
|
|
36
47
|
url: Optional[str] = None,
|
|
37
48
|
) -> None:
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
Nvidia service. (default: :obj:`None`)
|
|
44
|
-
url (Optional[str]): The url to the Nvidia service. (default:
|
|
45
|
-
:obj:`None`)
|
|
46
|
-
"""
|
|
47
|
-
self.model_type = model_type
|
|
48
|
-
self._url = url or os.environ.get("NVIDIA_API_BASE_URL")
|
|
49
|
-
self._api_key = api_key or os.environ.get("NVIDIA_API_KEY")
|
|
50
|
-
if not self._url or not self._api_key:
|
|
51
|
-
raise ValueError(
|
|
52
|
-
"NVIDIA_API_BASE_URL and NVIDIA_API_KEY should be set."
|
|
53
|
-
)
|
|
49
|
+
url = url or os.environ.get(
|
|
50
|
+
"NVIDIA_API_BASE_URL", "https://integrate.api.nvidia.com/v1"
|
|
51
|
+
)
|
|
52
|
+
api_key = api_key or os.environ.get("NVIDIA_API_KEY")
|
|
53
|
+
super().__init__(model_type, {}, api_key, url)
|
|
54
54
|
self._client = OpenAI(
|
|
55
55
|
timeout=60,
|
|
56
56
|
max_retries=3,
|
|
57
57
|
base_url=self._url,
|
|
58
58
|
api_key=self._api_key,
|
|
59
59
|
)
|
|
60
|
-
self._token_counter: Optional[BaseTokenCounter] = None
|
|
61
60
|
|
|
62
61
|
@api_keys_required("NVIDIA_API_KEY")
|
|
63
62
|
def run(
|
|
@@ -74,6 +73,17 @@ class NemotronModel:
|
|
|
74
73
|
"""
|
|
75
74
|
response = self._client.chat.completions.create(
|
|
76
75
|
messages=messages,
|
|
77
|
-
model=self.model_type
|
|
76
|
+
model=self.model_type,
|
|
78
77
|
)
|
|
79
78
|
return response
|
|
79
|
+
|
|
80
|
+
@property
|
|
81
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
82
|
+
raise NotImplementedError(
|
|
83
|
+
"Nemotron model doesn't support token counter."
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
def check_model_config(self):
|
|
87
|
+
raise NotImplementedError(
|
|
88
|
+
"Nemotron model doesn't support model config."
|
|
89
|
+
)
|