camel-ai 0.1.5.6__py3-none-any.whl → 0.1.5.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +3 -3
- camel/agents/critic_agent.py +1 -1
- camel/agents/deductive_reasoner_agent.py +4 -4
- camel/agents/embodied_agent.py +1 -1
- camel/agents/knowledge_graph_agent.py +2 -2
- camel/agents/role_assignment_agent.py +1 -1
- camel/agents/search_agent.py +4 -5
- camel/agents/task_agent.py +5 -5
- camel/configs/__init__.py +9 -0
- camel/configs/gemini_config.py +15 -14
- camel/configs/groq_config.py +119 -0
- camel/configs/litellm_config.py +1 -1
- camel/configs/mistral_config.py +81 -0
- camel/configs/ollama_config.py +1 -1
- camel/configs/openai_config.py +1 -1
- camel/configs/vllm_config.py +103 -0
- camel/configs/zhipuai_config.py +1 -1
- camel/embeddings/__init__.py +2 -0
- camel/embeddings/mistral_embedding.py +89 -0
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/ipython_interpreter.py +167 -0
- camel/models/__init__.py +8 -0
- camel/models/anthropic_model.py +7 -2
- camel/models/azure_openai_model.py +152 -0
- camel/models/base_model.py +9 -2
- camel/models/gemini_model.py +14 -2
- camel/models/groq_model.py +131 -0
- camel/models/litellm_model.py +26 -4
- camel/models/mistral_model.py +169 -0
- camel/models/model_factory.py +30 -3
- camel/models/ollama_model.py +21 -2
- camel/models/open_source_model.py +11 -3
- camel/models/openai_model.py +7 -2
- camel/models/stub_model.py +4 -4
- camel/models/vllm_model.py +138 -0
- camel/models/zhipuai_model.py +7 -4
- camel/prompts/__init__.py +2 -2
- camel/prompts/task_prompt_template.py +4 -4
- camel/prompts/{descripte_video_prompt.py → video_description_prompt.py} +1 -1
- camel/retrievers/auto_retriever.py +2 -0
- camel/storages/graph_storages/neo4j_graph.py +5 -0
- camel/toolkits/__init__.py +36 -0
- camel/toolkits/base.py +1 -1
- camel/toolkits/code_execution.py +1 -1
- camel/toolkits/github_toolkit.py +3 -2
- camel/toolkits/google_maps_toolkit.py +367 -0
- camel/toolkits/math_toolkit.py +79 -0
- camel/toolkits/open_api_toolkit.py +548 -0
- camel/toolkits/retrieval_toolkit.py +76 -0
- camel/toolkits/search_toolkit.py +326 -0
- camel/toolkits/slack_toolkit.py +308 -0
- camel/toolkits/twitter_toolkit.py +522 -0
- camel/toolkits/weather_toolkit.py +173 -0
- camel/types/enums.py +149 -34
- camel/utils/__init__.py +2 -0
- camel/utils/async_func.py +1 -1
- camel/utils/token_counting.py +148 -40
- {camel_ai-0.1.5.6.dist-info → camel_ai-0.1.5.9.dist-info}/METADATA +42 -3
- camel_ai-0.1.5.9.dist-info/RECORD +165 -0
- camel/functions/__init__.py +0 -51
- camel/functions/google_maps_function.py +0 -335
- camel/functions/math_functions.py +0 -61
- camel/functions/open_api_function.py +0 -508
- camel/functions/retrieval_functions.py +0 -61
- camel/functions/search_functions.py +0 -298
- camel/functions/slack_functions.py +0 -286
- camel/functions/twitter_function.py +0 -479
- camel/functions/weather_functions.py +0 -144
- camel_ai-0.1.5.6.dist-info/RECORD +0 -157
- /camel/{functions → toolkits}/open_api_specs/biztoc/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/biztoc/ai-plugin.json +0 -0
- /camel/{functions → toolkits}/open_api_specs/biztoc/openapi.yaml +0 -0
- /camel/{functions → toolkits}/open_api_specs/coursera/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/coursera/openapi.yaml +0 -0
- /camel/{functions → toolkits}/open_api_specs/create_qr_code/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/create_qr_code/openapi.yaml +0 -0
- /camel/{functions → toolkits}/open_api_specs/klarna/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/klarna/openapi.yaml +0 -0
- /camel/{functions → toolkits}/open_api_specs/nasa_apod/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/nasa_apod/openapi.yaml +0 -0
- /camel/{functions → toolkits}/open_api_specs/outschool/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/outschool/ai-plugin.json +0 -0
- /camel/{functions → toolkits}/open_api_specs/outschool/openapi.yaml +0 -0
- /camel/{functions → toolkits}/open_api_specs/outschool/paths/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/outschool/paths/get_classes.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/outschool/paths/search_teachers.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/security_config.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/speak/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/speak/openapi.yaml +0 -0
- /camel/{functions → toolkits}/open_api_specs/web_scraper/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/web_scraper/ai-plugin.json +0 -0
- /camel/{functions → toolkits}/open_api_specs/web_scraper/openapi.yaml +0 -0
- /camel/{functions → toolkits}/open_api_specs/web_scraper/paths/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/web_scraper/paths/scraper.py +0 -0
- /camel/{functions → toolkits}/openai_function.py +0 -0
- {camel_ai-0.1.5.6.dist-info → camel_ai-0.1.5.9.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,131 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
import os
|
|
15
|
+
from typing import Any, Dict, List, Optional, Union
|
|
16
|
+
|
|
17
|
+
from openai import OpenAI, Stream
|
|
18
|
+
|
|
19
|
+
from camel.configs import GROQ_API_PARAMS
|
|
20
|
+
from camel.messages import OpenAIMessage
|
|
21
|
+
from camel.models import BaseModelBackend
|
|
22
|
+
from camel.types import (
|
|
23
|
+
ChatCompletion,
|
|
24
|
+
ChatCompletionChunk,
|
|
25
|
+
ModelType,
|
|
26
|
+
)
|
|
27
|
+
from camel.utils import (
|
|
28
|
+
BaseTokenCounter,
|
|
29
|
+
OpenAITokenCounter,
|
|
30
|
+
api_keys_required,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class GroqModel(BaseModelBackend):
|
|
35
|
+
r"""LLM API served by Groq in a unified BaseModelBackend interface."""
|
|
36
|
+
|
|
37
|
+
def __init__(
|
|
38
|
+
self,
|
|
39
|
+
model_type: ModelType,
|
|
40
|
+
model_config_dict: Dict[str, Any],
|
|
41
|
+
api_key: Optional[str] = None,
|
|
42
|
+
url: Optional[str] = None,
|
|
43
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
44
|
+
) -> None:
|
|
45
|
+
r"""Constructor for Groq backend.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
model_type (str): Model for which a backend is created.
|
|
49
|
+
model_config_dict (Dict[str, Any]): A dictionary of parameters for
|
|
50
|
+
the model configuration.
|
|
51
|
+
api_key (Optional[str]): The API key for authenticating with the
|
|
52
|
+
Groq service. (default: :obj:`None`).
|
|
53
|
+
url (Optional[str]): The url to the Groq service. (default:
|
|
54
|
+
:obj:`None`)
|
|
55
|
+
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
56
|
+
for the model. If not provided, `OpenAITokenCounter(ModelType.
|
|
57
|
+
GPT_3_5_TURBO)` will be used.
|
|
58
|
+
"""
|
|
59
|
+
super().__init__(
|
|
60
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
61
|
+
)
|
|
62
|
+
self._url = url or "https://api.groq.com/openai/v1"
|
|
63
|
+
self._api_key = api_key or os.environ.get("GROQ_API_KEY")
|
|
64
|
+
self._client = OpenAI(
|
|
65
|
+
timeout=60,
|
|
66
|
+
max_retries=3,
|
|
67
|
+
api_key=self._api_key,
|
|
68
|
+
base_url=self._url,
|
|
69
|
+
)
|
|
70
|
+
self._token_counter = token_counter
|
|
71
|
+
|
|
72
|
+
@property
|
|
73
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
74
|
+
r"""Initialize the token counter for the model backend.
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
BaseTokenCounter: The token counter following the model's
|
|
78
|
+
tokenization style.
|
|
79
|
+
"""
|
|
80
|
+
# Make sure you have the access to these open-source model in
|
|
81
|
+
# HuggingFace
|
|
82
|
+
if not self._token_counter:
|
|
83
|
+
self._token_counter = OpenAITokenCounter(ModelType.GPT_3_5_TURBO)
|
|
84
|
+
return self._token_counter
|
|
85
|
+
|
|
86
|
+
@api_keys_required("GROQ_API_KEY")
|
|
87
|
+
def run(
|
|
88
|
+
self,
|
|
89
|
+
messages: List[OpenAIMessage],
|
|
90
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
91
|
+
r"""Runs inference of OpenAI chat completion.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
95
|
+
in OpenAI API format.
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
99
|
+
`ChatCompletion` in the non-stream mode, or
|
|
100
|
+
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
101
|
+
"""
|
|
102
|
+
response = self._client.chat.completions.create(
|
|
103
|
+
messages=messages,
|
|
104
|
+
model=self.model_type.value,
|
|
105
|
+
**self.model_config_dict,
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
return response
|
|
109
|
+
|
|
110
|
+
def check_model_config(self):
|
|
111
|
+
r"""Check whether the model configuration contains any unexpected
|
|
112
|
+
arguments to Groq API. But Groq API does not have any additional
|
|
113
|
+
arguments to check.
|
|
114
|
+
|
|
115
|
+
Raises:
|
|
116
|
+
ValueError: If the model configuration dictionary contains any
|
|
117
|
+
unexpected arguments to Groq API.
|
|
118
|
+
"""
|
|
119
|
+
for param in self.model_config_dict:
|
|
120
|
+
if param not in GROQ_API_PARAMS:
|
|
121
|
+
raise ValueError(
|
|
122
|
+
f"Unexpected argument `{param}` is "
|
|
123
|
+
"input into Groq model backend."
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
@property
|
|
127
|
+
def stream(self) -> bool:
|
|
128
|
+
r"""Returns whether the model supports streaming. But Groq API does
|
|
129
|
+
not support streaming.
|
|
130
|
+
"""
|
|
131
|
+
return False
|
camel/models/litellm_model.py
CHANGED
|
@@ -16,7 +16,7 @@ from typing import Any, Dict, List, Optional
|
|
|
16
16
|
from camel.configs import LITELLM_API_PARAMS
|
|
17
17
|
from camel.messages import OpenAIMessage
|
|
18
18
|
from camel.types import ChatCompletion
|
|
19
|
-
from camel.utils import LiteLLMTokenCounter
|
|
19
|
+
from camel.utils import BaseTokenCounter, LiteLLMTokenCounter
|
|
20
20
|
|
|
21
21
|
|
|
22
22
|
class LiteLLMModel:
|
|
@@ -30,6 +30,7 @@ class LiteLLMModel:
|
|
|
30
30
|
model_config_dict: Dict[str, Any],
|
|
31
31
|
api_key: Optional[str] = None,
|
|
32
32
|
url: Optional[str] = None,
|
|
33
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
33
34
|
) -> None:
|
|
34
35
|
r"""Constructor for LiteLLM backend.
|
|
35
36
|
|
|
@@ -42,11 +43,14 @@ class LiteLLMModel:
|
|
|
42
43
|
model service. (default: :obj:`None`)
|
|
43
44
|
url (Optional[str]): The url to the model service. (default:
|
|
44
45
|
:obj:`None`)
|
|
46
|
+
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
47
|
+
for the model. If not provided, `LiteLLMTokenCounter` will
|
|
48
|
+
be used.
|
|
45
49
|
"""
|
|
46
50
|
self.model_type = model_type
|
|
47
51
|
self.model_config_dict = model_config_dict
|
|
48
52
|
self._client = None
|
|
49
|
-
self._token_counter
|
|
53
|
+
self._token_counter = token_counter
|
|
50
54
|
self.check_model_config()
|
|
51
55
|
self._url = url
|
|
52
56
|
self._api_key = api_key
|
|
@@ -98,8 +102,10 @@ class LiteLLMModel:
|
|
|
98
102
|
tokenization style.
|
|
99
103
|
"""
|
|
100
104
|
if not self._token_counter:
|
|
101
|
-
self._token_counter = LiteLLMTokenCounter(
|
|
102
|
-
|
|
105
|
+
self._token_counter = LiteLLMTokenCounter( # type: ignore[assignment]
|
|
106
|
+
self.model_type
|
|
107
|
+
)
|
|
108
|
+
return self._token_counter # type: ignore[return-value]
|
|
103
109
|
|
|
104
110
|
def run(
|
|
105
111
|
self,
|
|
@@ -138,3 +144,19 @@ class LiteLLMModel:
|
|
|
138
144
|
f"Unexpected argument `{param}` is "
|
|
139
145
|
"input into LiteLLM model backend."
|
|
140
146
|
)
|
|
147
|
+
|
|
148
|
+
@property
|
|
149
|
+
def token_limit(self) -> int:
|
|
150
|
+
"""Returns the maximum token limit for the given model.
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
int: The maximum token limit for the given model.
|
|
154
|
+
"""
|
|
155
|
+
max_tokens = self.model_config_dict.get("max_tokens")
|
|
156
|
+
if isinstance(max_tokens, int):
|
|
157
|
+
return max_tokens
|
|
158
|
+
print(
|
|
159
|
+
"Must set `max_tokens` as an integer in `model_config_dict` when"
|
|
160
|
+
" setting up the model. Using 4096 as default value."
|
|
161
|
+
)
|
|
162
|
+
return 4096
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
import os
|
|
15
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from mistralai.models.chat_completion import ChatCompletionResponse
|
|
19
|
+
|
|
20
|
+
from camel.configs import MISTRAL_API_PARAMS
|
|
21
|
+
from camel.messages import OpenAIMessage
|
|
22
|
+
from camel.models import BaseModelBackend
|
|
23
|
+
from camel.types import ChatCompletion, ModelType
|
|
24
|
+
from camel.utils import (
|
|
25
|
+
BaseTokenCounter,
|
|
26
|
+
MistralTokenCounter,
|
|
27
|
+
api_keys_required,
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class MistralModel(BaseModelBackend):
|
|
32
|
+
r"""Mistral API in a unified BaseModelBackend interface."""
|
|
33
|
+
|
|
34
|
+
# TODO: Support tool calling.
|
|
35
|
+
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
model_type: ModelType,
|
|
39
|
+
model_config_dict: Dict[str, Any],
|
|
40
|
+
api_key: Optional[str] = None,
|
|
41
|
+
url: Optional[str] = None,
|
|
42
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
43
|
+
) -> None:
|
|
44
|
+
r"""Constructor for Mistral backend.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
model_type (ModelType): Model for which a backend is created,
|
|
48
|
+
one of MISTRAL_* series.
|
|
49
|
+
model_config_dict (Dict[str, Any]): A dictionary that will
|
|
50
|
+
be fed into `MistralClient.chat`.
|
|
51
|
+
api_key (Optional[str]): The API key for authenticating with the
|
|
52
|
+
mistral service. (default: :obj:`None`)
|
|
53
|
+
url (Optional[str]): The url to the mistral service.
|
|
54
|
+
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
55
|
+
for the model. If not provided, `MistralTokenCounter` will be
|
|
56
|
+
used.
|
|
57
|
+
"""
|
|
58
|
+
super().__init__(
|
|
59
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
60
|
+
)
|
|
61
|
+
self._api_key = api_key or os.environ.get("MISTRAL_API_KEY")
|
|
62
|
+
|
|
63
|
+
from mistralai.client import MistralClient
|
|
64
|
+
|
|
65
|
+
self._client = MistralClient(api_key=self._api_key)
|
|
66
|
+
self._token_counter: Optional[BaseTokenCounter] = None
|
|
67
|
+
|
|
68
|
+
def _convert_response_from_mistral_to_openai(
|
|
69
|
+
self, response: 'ChatCompletionResponse'
|
|
70
|
+
) -> ChatCompletion:
|
|
71
|
+
tool_calls = None
|
|
72
|
+
if response.choices[0].message.tool_calls is not None:
|
|
73
|
+
tool_calls = [
|
|
74
|
+
dict(
|
|
75
|
+
id=tool_call.id,
|
|
76
|
+
function={
|
|
77
|
+
"name": tool_call.function.name,
|
|
78
|
+
"arguments": tool_call.function.arguments,
|
|
79
|
+
},
|
|
80
|
+
type=tool_call.type.value,
|
|
81
|
+
)
|
|
82
|
+
for tool_call in response.choices[0].message.tool_calls
|
|
83
|
+
]
|
|
84
|
+
|
|
85
|
+
obj = ChatCompletion.construct(
|
|
86
|
+
id=response.id,
|
|
87
|
+
choices=[
|
|
88
|
+
dict(
|
|
89
|
+
index=response.choices[0].index,
|
|
90
|
+
message={
|
|
91
|
+
"role": response.choices[0].message.role,
|
|
92
|
+
"content": response.choices[0].message.content,
|
|
93
|
+
"tool_calls": tool_calls,
|
|
94
|
+
},
|
|
95
|
+
finish_reason=response.choices[0].finish_reason.value
|
|
96
|
+
if response.choices[0].finish_reason
|
|
97
|
+
else None,
|
|
98
|
+
)
|
|
99
|
+
],
|
|
100
|
+
created=response.created,
|
|
101
|
+
model=response.model,
|
|
102
|
+
object="chat.completion",
|
|
103
|
+
usage=response.usage,
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
return obj
|
|
107
|
+
|
|
108
|
+
@property
|
|
109
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
110
|
+
r"""Initialize the token counter for the model backend.
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
BaseTokenCounter: The token counter following the model's
|
|
114
|
+
tokenization style.
|
|
115
|
+
"""
|
|
116
|
+
if not self._token_counter:
|
|
117
|
+
self._token_counter = MistralTokenCounter(
|
|
118
|
+
model_type=self.model_type
|
|
119
|
+
)
|
|
120
|
+
return self._token_counter
|
|
121
|
+
|
|
122
|
+
@api_keys_required("MISTRAL_API_KEY")
|
|
123
|
+
def run(
|
|
124
|
+
self,
|
|
125
|
+
messages: List[OpenAIMessage],
|
|
126
|
+
) -> ChatCompletion:
|
|
127
|
+
r"""Runs inference of Mistral chat completion.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
131
|
+
in OpenAI API format.
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
ChatCompletion
|
|
135
|
+
"""
|
|
136
|
+
response = self._client.chat(
|
|
137
|
+
messages=messages,
|
|
138
|
+
model=self.model_type.value,
|
|
139
|
+
**self.model_config_dict,
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
response = self._convert_response_from_mistral_to_openai(response) # type:ignore[assignment]
|
|
143
|
+
|
|
144
|
+
return response # type:ignore[return-value]
|
|
145
|
+
|
|
146
|
+
def check_model_config(self):
|
|
147
|
+
r"""Check whether the model configuration contains any
|
|
148
|
+
unexpected arguments to Mistral API.
|
|
149
|
+
|
|
150
|
+
Raises:
|
|
151
|
+
ValueError: If the model configuration dictionary contains any
|
|
152
|
+
unexpected arguments to Mistral API.
|
|
153
|
+
"""
|
|
154
|
+
for param in self.model_config_dict:
|
|
155
|
+
if param not in MISTRAL_API_PARAMS:
|
|
156
|
+
raise ValueError(
|
|
157
|
+
f"Unexpected argument `{param}` is "
|
|
158
|
+
"input into Mistral model backend."
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
@property
|
|
162
|
+
def stream(self) -> bool:
|
|
163
|
+
r"""Returns whether the model is in stream mode, which sends partial
|
|
164
|
+
results each time. Mistral doesn't support stream mode.
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
bool: Whether the model is in stream mode.
|
|
168
|
+
"""
|
|
169
|
+
return False
|
camel/models/model_factory.py
CHANGED
|
@@ -14,15 +14,20 @@
|
|
|
14
14
|
from typing import Any, Dict, Optional, Union
|
|
15
15
|
|
|
16
16
|
from camel.models.anthropic_model import AnthropicModel
|
|
17
|
+
from camel.models.azure_openai_model import AzureOpenAIModel
|
|
17
18
|
from camel.models.base_model import BaseModelBackend
|
|
18
19
|
from camel.models.gemini_model import GeminiModel
|
|
20
|
+
from camel.models.groq_model import GroqModel
|
|
19
21
|
from camel.models.litellm_model import LiteLLMModel
|
|
22
|
+
from camel.models.mistral_model import MistralModel
|
|
20
23
|
from camel.models.ollama_model import OllamaModel
|
|
21
24
|
from camel.models.open_source_model import OpenSourceModel
|
|
22
25
|
from camel.models.openai_model import OpenAIModel
|
|
23
26
|
from camel.models.stub_model import StubModel
|
|
27
|
+
from camel.models.vllm_model import VLLMModel
|
|
24
28
|
from camel.models.zhipuai_model import ZhipuAIModel
|
|
25
29
|
from camel.types import ModelPlatformType, ModelType
|
|
30
|
+
from camel.utils import BaseTokenCounter
|
|
26
31
|
|
|
27
32
|
|
|
28
33
|
class ModelFactory:
|
|
@@ -37,6 +42,7 @@ class ModelFactory:
|
|
|
37
42
|
model_platform: ModelPlatformType,
|
|
38
43
|
model_type: Union[ModelType, str],
|
|
39
44
|
model_config_dict: Dict,
|
|
45
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
40
46
|
api_key: Optional[str] = None,
|
|
41
47
|
url: Optional[str] = None,
|
|
42
48
|
) -> BaseModelBackend:
|
|
@@ -49,6 +55,10 @@ class ModelFactory:
|
|
|
49
55
|
created can be a `str` for open source platforms.
|
|
50
56
|
model_config_dict (Dict): A dictionary that will be fed into
|
|
51
57
|
the backend constructor.
|
|
58
|
+
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
59
|
+
for the model. If not provided, OpenAITokenCounter(ModelType.
|
|
60
|
+
GPT_3_5_TURBO) will be used if the model platform didn't
|
|
61
|
+
provide official token counter.
|
|
52
62
|
api_key (Optional[str]): The API key for authenticating with the
|
|
53
63
|
model service.
|
|
54
64
|
url (Optional[str]): The url to the model service.
|
|
@@ -63,15 +73,23 @@ class ModelFactory:
|
|
|
63
73
|
if isinstance(model_type, ModelType):
|
|
64
74
|
if model_platform.is_open_source and model_type.is_open_source:
|
|
65
75
|
model_class = OpenSourceModel
|
|
66
|
-
return model_class(
|
|
76
|
+
return model_class(
|
|
77
|
+
model_type, model_config_dict, url, token_counter
|
|
78
|
+
)
|
|
67
79
|
if model_platform.is_openai and model_type.is_openai:
|
|
68
80
|
model_class = OpenAIModel
|
|
81
|
+
elif model_platform.is_azure and model_type.is_azure_openai:
|
|
82
|
+
model_class = AzureOpenAIModel
|
|
69
83
|
elif model_platform.is_anthropic and model_type.is_anthropic:
|
|
70
84
|
model_class = AnthropicModel
|
|
85
|
+
elif model_type.is_groq:
|
|
86
|
+
model_class = GroqModel
|
|
71
87
|
elif model_platform.is_zhipuai and model_type.is_zhipuai:
|
|
72
88
|
model_class = ZhipuAIModel
|
|
73
89
|
elif model_platform.is_gemini and model_type.is_gemini:
|
|
74
90
|
model_class = GeminiModel
|
|
91
|
+
elif model_platform.is_mistral and model_type.is_mistral:
|
|
92
|
+
model_class = MistralModel
|
|
75
93
|
elif model_type == ModelType.STUB:
|
|
76
94
|
model_class = StubModel
|
|
77
95
|
else:
|
|
@@ -82,7 +100,14 @@ class ModelFactory:
|
|
|
82
100
|
elif isinstance(model_type, str):
|
|
83
101
|
if model_platform.is_ollama:
|
|
84
102
|
model_class = OllamaModel
|
|
85
|
-
return model_class(
|
|
103
|
+
return model_class(
|
|
104
|
+
model_type, model_config_dict, url, token_counter
|
|
105
|
+
)
|
|
106
|
+
elif model_platform.is_vllm:
|
|
107
|
+
model_class = VLLMModel
|
|
108
|
+
return model_class(
|
|
109
|
+
model_type, model_config_dict, url, api_key, token_counter
|
|
110
|
+
)
|
|
86
111
|
elif model_platform.is_litellm:
|
|
87
112
|
model_class = LiteLLMModel
|
|
88
113
|
else:
|
|
@@ -92,4 +117,6 @@ class ModelFactory:
|
|
|
92
117
|
)
|
|
93
118
|
else:
|
|
94
119
|
raise ValueError(f"Invalid model type `{model_type}` provided.")
|
|
95
|
-
return model_class(
|
|
120
|
+
return model_class(
|
|
121
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
122
|
+
)
|
camel/models/ollama_model.py
CHANGED
|
@@ -29,6 +29,7 @@ class OllamaModel:
|
|
|
29
29
|
model_type: str,
|
|
30
30
|
model_config_dict: Dict[str, Any],
|
|
31
31
|
url: Optional[str] = None,
|
|
32
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
32
33
|
) -> None:
|
|
33
34
|
r"""Constructor for Ollama backend with OpenAI compatibility.
|
|
34
35
|
|
|
@@ -40,6 +41,9 @@ class OllamaModel:
|
|
|
40
41
|
be fed into openai.ChatCompletion.create().
|
|
41
42
|
url (Optional[str]): The url to the model service. (default:
|
|
42
43
|
:obj:`None`)
|
|
44
|
+
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
45
|
+
for the model. If not provided, `OpenAITokenCounter(ModelType.
|
|
46
|
+
GPT_3_5_TURBO)` will be used.
|
|
43
47
|
"""
|
|
44
48
|
self.model_type = model_type
|
|
45
49
|
self.model_config_dict = model_config_dict
|
|
@@ -50,7 +54,7 @@ class OllamaModel:
|
|
|
50
54
|
base_url=url,
|
|
51
55
|
api_key="ollama", # required but ignored
|
|
52
56
|
)
|
|
53
|
-
self._token_counter
|
|
57
|
+
self._token_counter = token_counter
|
|
54
58
|
self.check_model_config()
|
|
55
59
|
|
|
56
60
|
@property
|
|
@@ -61,7 +65,6 @@ class OllamaModel:
|
|
|
61
65
|
BaseTokenCounter: The token counter following the model's
|
|
62
66
|
tokenization style.
|
|
63
67
|
"""
|
|
64
|
-
# NOTE: Use OpenAITokenCounter temporarily
|
|
65
68
|
if not self._token_counter:
|
|
66
69
|
self._token_counter = OpenAITokenCounter(ModelType.GPT_3_5_TURBO)
|
|
67
70
|
return self._token_counter
|
|
@@ -104,6 +107,22 @@ class OllamaModel:
|
|
|
104
107
|
)
|
|
105
108
|
return response
|
|
106
109
|
|
|
110
|
+
@property
|
|
111
|
+
def token_limit(self) -> int:
|
|
112
|
+
"""Returns the maximum token limit for the given model.
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
int: The maximum token limit for the given model.
|
|
116
|
+
"""
|
|
117
|
+
max_tokens = self.model_config_dict.get("max_tokens")
|
|
118
|
+
if isinstance(max_tokens, int):
|
|
119
|
+
return max_tokens
|
|
120
|
+
print(
|
|
121
|
+
"Must set `max_tokens` as an integer in `model_config_dict` when"
|
|
122
|
+
" setting up the model. Using 4096 as default value."
|
|
123
|
+
)
|
|
124
|
+
return 4096
|
|
125
|
+
|
|
107
126
|
@property
|
|
108
127
|
def stream(self) -> bool:
|
|
109
128
|
r"""Returns whether the model is in stream mode, which sends partial
|
|
@@ -19,7 +19,10 @@ from camel.configs import OPENAI_API_PARAMS
|
|
|
19
19
|
from camel.messages import OpenAIMessage
|
|
20
20
|
from camel.models import BaseModelBackend
|
|
21
21
|
from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
|
|
22
|
-
from camel.utils import
|
|
22
|
+
from camel.utils import (
|
|
23
|
+
BaseTokenCounter,
|
|
24
|
+
OpenSourceTokenCounter,
|
|
25
|
+
)
|
|
23
26
|
|
|
24
27
|
|
|
25
28
|
class OpenSourceModel(BaseModelBackend):
|
|
@@ -33,6 +36,7 @@ class OpenSourceModel(BaseModelBackend):
|
|
|
33
36
|
model_config_dict: Dict[str, Any],
|
|
34
37
|
api_key: Optional[str] = None,
|
|
35
38
|
url: Optional[str] = None,
|
|
39
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
36
40
|
) -> None:
|
|
37
41
|
r"""Constructor for model backends of Open-source models.
|
|
38
42
|
|
|
@@ -43,9 +47,13 @@ class OpenSourceModel(BaseModelBackend):
|
|
|
43
47
|
api_key (Optional[str]): The API key for authenticating with the
|
|
44
48
|
model service. (ignored for open-source models)
|
|
45
49
|
url (Optional[str]): The url to the model service.
|
|
50
|
+
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
51
|
+
for the model. If not provided, `OpenSourceTokenCounter` will
|
|
52
|
+
be used.
|
|
46
53
|
"""
|
|
47
|
-
super().__init__(
|
|
48
|
-
|
|
54
|
+
super().__init__(
|
|
55
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
56
|
+
)
|
|
49
57
|
|
|
50
58
|
# Check whether the input model type is open-source
|
|
51
59
|
if not model_type.is_open_source:
|
camel/models/openai_model.py
CHANGED
|
@@ -36,6 +36,7 @@ class OpenAIModel(BaseModelBackend):
|
|
|
36
36
|
model_config_dict: Dict[str, Any],
|
|
37
37
|
api_key: Optional[str] = None,
|
|
38
38
|
url: Optional[str] = None,
|
|
39
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
39
40
|
) -> None:
|
|
40
41
|
r"""Constructor for OpenAI backend.
|
|
41
42
|
|
|
@@ -48,8 +49,13 @@ class OpenAIModel(BaseModelBackend):
|
|
|
48
49
|
OpenAI service. (default: :obj:`None`)
|
|
49
50
|
url (Optional[str]): The url to the OpenAI service. (default:
|
|
50
51
|
:obj:`None`)
|
|
52
|
+
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
53
|
+
for the model. If not provided, `OpenAITokenCounter` will
|
|
54
|
+
be used.
|
|
51
55
|
"""
|
|
52
|
-
super().__init__(
|
|
56
|
+
super().__init__(
|
|
57
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
58
|
+
)
|
|
53
59
|
self._url = url or os.environ.get("OPENAI_API_BASE_URL")
|
|
54
60
|
self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
|
|
55
61
|
self._client = OpenAI(
|
|
@@ -58,7 +64,6 @@ class OpenAIModel(BaseModelBackend):
|
|
|
58
64
|
base_url=self._url,
|
|
59
65
|
api_key=self._api_key,
|
|
60
66
|
)
|
|
61
|
-
self._token_counter: Optional[BaseTokenCounter] = None
|
|
62
67
|
|
|
63
68
|
@property
|
|
64
69
|
def token_counter(self) -> BaseTokenCounter:
|
camel/models/stub_model.py
CHANGED
|
@@ -55,12 +55,12 @@ class StubModel(BaseModelBackend):
|
|
|
55
55
|
model_config_dict: Dict[str, Any],
|
|
56
56
|
api_key: Optional[str] = None,
|
|
57
57
|
url: Optional[str] = None,
|
|
58
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
58
59
|
) -> None:
|
|
59
60
|
r"""All arguments are unused for the dummy model."""
|
|
60
|
-
super().__init__(
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
self._url = url
|
|
61
|
+
super().__init__(
|
|
62
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
63
|
+
)
|
|
64
64
|
|
|
65
65
|
@property
|
|
66
66
|
def token_counter(self) -> BaseTokenCounter:
|