camel-ai 0.2.3__py3-none-any.whl → 0.2.3a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +69 -93
- camel/agents/knowledge_graph_agent.py +6 -4
- camel/bots/__init__.py +2 -16
- camel/bots/discord_bot.py +206 -0
- camel/configs/__init__.py +2 -1
- camel/configs/anthropic_config.py +5 -2
- camel/configs/base_config.py +6 -6
- camel/configs/groq_config.py +3 -2
- camel/configs/ollama_config.py +2 -1
- camel/configs/openai_config.py +23 -2
- camel/configs/samba_config.py +2 -2
- camel/configs/togetherai_config.py +1 -1
- camel/configs/vllm_config.py +1 -1
- camel/configs/zhipuai_config.py +3 -2
- camel/embeddings/openai_embedding.py +2 -2
- camel/loaders/__init__.py +0 -2
- camel/loaders/firecrawl_reader.py +3 -3
- camel/loaders/unstructured_io.py +33 -35
- camel/messages/__init__.py +0 -1
- camel/models/__init__.py +4 -2
- camel/models/anthropic_model.py +26 -32
- camel/models/azure_openai_model.py +36 -39
- camel/models/base_model.py +20 -31
- camel/models/gemini_model.py +29 -37
- camel/models/groq_model.py +23 -29
- camel/models/litellm_model.py +61 -44
- camel/models/mistral_model.py +29 -32
- camel/models/model_factory.py +76 -66
- camel/models/nemotron_model.py +23 -33
- camel/models/ollama_model.py +47 -42
- camel/models/open_source_model.py +170 -0
- camel/models/{openai_compatible_model.py → openai_compatibility_model.py} +49 -31
- camel/models/openai_model.py +29 -48
- camel/models/reka_model.py +28 -30
- camel/models/samba_model.py +177 -82
- camel/models/stub_model.py +2 -2
- camel/models/togetherai_model.py +43 -37
- camel/models/vllm_model.py +50 -43
- camel/models/zhipuai_model.py +27 -33
- camel/retrievers/auto_retriever.py +10 -28
- camel/retrievers/vector_retriever.py +47 -58
- camel/societies/babyagi_playing.py +3 -6
- camel/societies/role_playing.py +3 -5
- camel/storages/graph_storages/graph_element.py +5 -3
- camel/storages/key_value_storages/json.py +1 -6
- camel/toolkits/__init__.py +7 -20
- camel/toolkits/base.py +3 -2
- camel/toolkits/code_execution.py +7 -6
- camel/toolkits/dalle_toolkit.py +6 -6
- camel/toolkits/github_toolkit.py +10 -9
- camel/toolkits/google_maps_toolkit.py +7 -7
- camel/toolkits/linkedin_toolkit.py +7 -7
- camel/toolkits/math_toolkit.py +8 -8
- camel/toolkits/open_api_toolkit.py +5 -5
- camel/toolkits/{function_tool.py → openai_function.py} +11 -34
- camel/toolkits/reddit_toolkit.py +7 -7
- camel/toolkits/retrieval_toolkit.py +5 -5
- camel/toolkits/search_toolkit.py +9 -9
- camel/toolkits/slack_toolkit.py +11 -11
- camel/toolkits/twitter_toolkit.py +452 -378
- camel/toolkits/weather_toolkit.py +6 -6
- camel/types/__init__.py +1 -6
- camel/types/enums.py +85 -40
- camel/types/openai_types.py +0 -3
- camel/utils/__init__.py +2 -0
- camel/utils/async_func.py +7 -7
- camel/utils/commons.py +3 -32
- camel/utils/token_counting.py +212 -30
- camel/workforce/role_playing_worker.py +1 -1
- camel/workforce/single_agent_worker.py +1 -1
- camel/workforce/task_channel.py +3 -4
- camel/workforce/workforce.py +4 -4
- {camel_ai-0.2.3.dist-info → camel_ai-0.2.3a1.dist-info}/METADATA +56 -27
- {camel_ai-0.2.3.dist-info → camel_ai-0.2.3a1.dist-info}/RECORD +76 -85
- {camel_ai-0.2.3.dist-info → camel_ai-0.2.3a1.dist-info}/WHEEL +1 -1
- camel/bots/discord_app.py +0 -138
- camel/bots/slack/__init__.py +0 -30
- camel/bots/slack/models.py +0 -158
- camel/bots/slack/slack_app.py +0 -255
- camel/loaders/chunkr_reader.py +0 -163
- camel/toolkits/arxiv_toolkit.py +0 -155
- camel/toolkits/ask_news_toolkit.py +0 -653
- camel/toolkits/google_scholar_toolkit.py +0 -146
- camel/toolkits/whatsapp_toolkit.py +0 -177
- camel/types/unified_model_type.py +0 -104
- camel_ai-0.2.3.dist-info/LICENSE +0 -201
camel/models/model_factory.py
CHANGED
|
@@ -11,7 +11,7 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
-
from typing import Dict, Optional,
|
|
14
|
+
from typing import Any, Dict, Optional, Union
|
|
15
15
|
|
|
16
16
|
from camel.models.anthropic_model import AnthropicModel
|
|
17
17
|
from camel.models.azure_openai_model import AzureOpenAIModel
|
|
@@ -21,7 +21,8 @@ from camel.models.groq_model import GroqModel
|
|
|
21
21
|
from camel.models.litellm_model import LiteLLMModel
|
|
22
22
|
from camel.models.mistral_model import MistralModel
|
|
23
23
|
from camel.models.ollama_model import OllamaModel
|
|
24
|
-
from camel.models.
|
|
24
|
+
from camel.models.open_source_model import OpenSourceModel
|
|
25
|
+
from camel.models.openai_compatibility_model import OpenAICompatibilityModel
|
|
25
26
|
from camel.models.openai_model import OpenAIModel
|
|
26
27
|
from camel.models.reka_model import RekaModel
|
|
27
28
|
from camel.models.samba_model import SambaModel
|
|
@@ -29,7 +30,7 @@ from camel.models.stub_model import StubModel
|
|
|
29
30
|
from camel.models.togetherai_model import TogetherAIModel
|
|
30
31
|
from camel.models.vllm_model import VLLMModel
|
|
31
32
|
from camel.models.zhipuai_model import ZhipuAIModel
|
|
32
|
-
from camel.types import ModelPlatformType, ModelType
|
|
33
|
+
from camel.types import ModelPlatformType, ModelType
|
|
33
34
|
from camel.utils import BaseTokenCounter
|
|
34
35
|
|
|
35
36
|
|
|
@@ -44,7 +45,7 @@ class ModelFactory:
|
|
|
44
45
|
def create(
|
|
45
46
|
model_platform: ModelPlatformType,
|
|
46
47
|
model_type: Union[ModelType, str],
|
|
47
|
-
model_config_dict:
|
|
48
|
+
model_config_dict: Dict,
|
|
48
49
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
49
50
|
api_key: Optional[str] = None,
|
|
50
51
|
url: Optional[str] = None,
|
|
@@ -54,71 +55,80 @@ class ModelFactory:
|
|
|
54
55
|
Args:
|
|
55
56
|
model_platform (ModelPlatformType): Platform from which the model
|
|
56
57
|
originates.
|
|
57
|
-
model_type (Union[ModelType, str]): Model for which a
|
|
58
|
-
|
|
59
|
-
model_config_dict (
|
|
60
|
-
|
|
61
|
-
token_counter (Optional[BaseTokenCounter]
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
58
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
59
|
+
created can be a `str` for open source platforms.
|
|
60
|
+
model_config_dict (Dict): A dictionary that will be fed into
|
|
61
|
+
the backend constructor.
|
|
62
|
+
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
63
|
+
for the model. If not provided, OpenAITokenCounter(ModelType.
|
|
64
|
+
GPT_3_5_TURBO) will be used if the model platform didn't
|
|
65
|
+
provide official token counter.
|
|
66
|
+
api_key (Optional[str]): The API key for authenticating with the
|
|
67
|
+
model service.
|
|
68
|
+
url (Optional[str]): The url to the model service.
|
|
69
|
+
|
|
70
|
+
Raises:
|
|
71
|
+
ValueError: If there is not backend for the model.
|
|
70
72
|
|
|
71
73
|
Returns:
|
|
72
74
|
BaseModelBackend: The initialized backend.
|
|
73
|
-
|
|
74
|
-
Raises:
|
|
75
|
-
ValueError: If there is no backend for the model.
|
|
76
75
|
"""
|
|
77
|
-
model_class:
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
elif
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
76
|
+
model_class: Any
|
|
77
|
+
if isinstance(model_type, ModelType):
|
|
78
|
+
if model_platform.is_open_source and model_type.is_open_source:
|
|
79
|
+
model_class = OpenSourceModel
|
|
80
|
+
return model_class(
|
|
81
|
+
model_type, model_config_dict, url, token_counter
|
|
82
|
+
)
|
|
83
|
+
if model_platform.is_openai and model_type.is_openai:
|
|
84
|
+
model_class = OpenAIModel
|
|
85
|
+
elif model_platform.is_azure and model_type.is_azure_openai:
|
|
86
|
+
model_class = AzureOpenAIModel
|
|
87
|
+
elif model_platform.is_anthropic and model_type.is_anthropic:
|
|
88
|
+
model_class = AnthropicModel
|
|
89
|
+
elif model_type.is_groq:
|
|
90
|
+
model_class = GroqModel
|
|
91
|
+
elif model_platform.is_zhipuai and model_type.is_zhipuai:
|
|
92
|
+
model_class = ZhipuAIModel
|
|
93
|
+
elif model_platform.is_gemini and model_type.is_gemini:
|
|
94
|
+
model_class = GeminiModel
|
|
95
|
+
elif model_platform.is_mistral and model_type.is_mistral:
|
|
96
|
+
model_class = MistralModel
|
|
97
|
+
elif model_platform.is_reka and model_type.is_reka:
|
|
98
|
+
model_class = RekaModel
|
|
99
|
+
elif model_type == ModelType.STUB:
|
|
100
|
+
model_class = StubModel
|
|
101
|
+
else:
|
|
102
|
+
raise ValueError(
|
|
103
|
+
f"Unknown pair of model platform `{model_platform}` "
|
|
104
|
+
f"and model type `{model_type}`."
|
|
105
|
+
)
|
|
106
|
+
elif isinstance(model_type, str):
|
|
107
|
+
if model_platform.is_ollama:
|
|
108
|
+
model_class = OllamaModel
|
|
109
|
+
return model_class(
|
|
110
|
+
model_type, model_config_dict, url, token_counter
|
|
111
|
+
)
|
|
112
|
+
elif model_platform.is_vllm:
|
|
113
|
+
model_class = VLLMModel
|
|
114
|
+
elif model_platform.is_litellm:
|
|
115
|
+
model_class = LiteLLMModel
|
|
116
|
+
elif model_platform.is_openai_compatibility_model:
|
|
117
|
+
model_class = OpenAICompatibilityModel
|
|
118
|
+
elif model_platform.is_samba:
|
|
119
|
+
model_class = SambaModel
|
|
120
|
+
elif model_platform.is_together:
|
|
121
|
+
model_class = TogetherAIModel
|
|
122
|
+
return model_class(
|
|
123
|
+
model_type, model_config_dict, api_key, token_counter
|
|
124
|
+
)
|
|
125
|
+
else:
|
|
126
|
+
raise ValueError(
|
|
127
|
+
f"Unknown pair of model platform `{model_platform}` "
|
|
128
|
+
f"and model type `{model_type}`."
|
|
129
|
+
)
|
|
130
|
+
else:
|
|
131
|
+
raise ValueError(f"Invalid model type `{model_type}` provided.")
|
|
118
132
|
return model_class(
|
|
119
|
-
model_type
|
|
120
|
-
model_config_dict=model_config_dict,
|
|
121
|
-
api_key=api_key,
|
|
122
|
-
url=url,
|
|
123
|
-
token_counter=token_counter,
|
|
133
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
124
134
|
)
|
camel/models/nemotron_model.py
CHANGED
|
@@ -12,12 +12,11 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
import os
|
|
15
|
-
from typing import List, Optional
|
|
15
|
+
from typing import List, Optional
|
|
16
16
|
|
|
17
17
|
from openai import OpenAI
|
|
18
18
|
|
|
19
19
|
from camel.messages import OpenAIMessage
|
|
20
|
-
from camel.models import BaseModelBackend
|
|
21
20
|
from camel.types import ChatCompletion, ModelType
|
|
22
21
|
from camel.utils import (
|
|
23
22
|
BaseTokenCounter,
|
|
@@ -25,38 +24,40 @@ from camel.utils import (
|
|
|
25
24
|
)
|
|
26
25
|
|
|
27
26
|
|
|
28
|
-
class NemotronModel
|
|
29
|
-
r"""Nemotron model API backend with OpenAI compatibility.
|
|
27
|
+
class NemotronModel:
|
|
28
|
+
r"""Nemotron model API backend with OpenAI compatibility."""
|
|
30
29
|
|
|
31
|
-
|
|
32
|
-
model_type (Union[ModelType, str]): Model for which a backend is
|
|
33
|
-
created.
|
|
34
|
-
api_key (Optional[str], optional): The API key for authenticating with
|
|
35
|
-
the Nvidia service. (default: :obj:`None`)
|
|
36
|
-
url (Optional[str], optional): The url to the Nvidia service.
|
|
37
|
-
(default: :obj:`https://integrate.api.nvidia.com/v1`)
|
|
38
|
-
|
|
39
|
-
Notes:
|
|
40
|
-
Nemotron model doesn't support additional model config like OpenAI.
|
|
41
|
-
"""
|
|
30
|
+
# NOTE: Nemotron model doesn't support additional model config like OpenAI.
|
|
42
31
|
|
|
43
32
|
def __init__(
|
|
44
33
|
self,
|
|
45
|
-
model_type:
|
|
34
|
+
model_type: ModelType,
|
|
46
35
|
api_key: Optional[str] = None,
|
|
47
36
|
url: Optional[str] = None,
|
|
48
37
|
) -> None:
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
38
|
+
r"""Constructor for Nvidia backend.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
model_type (ModelType): Model for which a backend is created.
|
|
42
|
+
api_key (Optional[str]): The API key for authenticating with the
|
|
43
|
+
Nvidia service. (default: :obj:`None`)
|
|
44
|
+
url (Optional[str]): The url to the Nvidia service. (default:
|
|
45
|
+
:obj:`None`)
|
|
46
|
+
"""
|
|
47
|
+
self.model_type = model_type
|
|
48
|
+
self._url = url or os.environ.get("NVIDIA_API_BASE_URL")
|
|
49
|
+
self._api_key = api_key or os.environ.get("NVIDIA_API_KEY")
|
|
50
|
+
if not self._url or not self._api_key:
|
|
51
|
+
raise ValueError(
|
|
52
|
+
"NVIDIA_API_BASE_URL and NVIDIA_API_KEY should be set."
|
|
53
|
+
)
|
|
54
54
|
self._client = OpenAI(
|
|
55
55
|
timeout=60,
|
|
56
56
|
max_retries=3,
|
|
57
57
|
base_url=self._url,
|
|
58
58
|
api_key=self._api_key,
|
|
59
59
|
)
|
|
60
|
+
self._token_counter: Optional[BaseTokenCounter] = None
|
|
60
61
|
|
|
61
62
|
@api_keys_required("NVIDIA_API_KEY")
|
|
62
63
|
def run(
|
|
@@ -73,17 +74,6 @@ class NemotronModel(BaseModelBackend):
|
|
|
73
74
|
"""
|
|
74
75
|
response = self._client.chat.completions.create(
|
|
75
76
|
messages=messages,
|
|
76
|
-
model=self.model_type,
|
|
77
|
+
model=self.model_type.value,
|
|
77
78
|
)
|
|
78
79
|
return response
|
|
79
|
-
|
|
80
|
-
@property
|
|
81
|
-
def token_counter(self) -> BaseTokenCounter:
|
|
82
|
-
raise NotImplementedError(
|
|
83
|
-
"Nemotron model doesn't support token counter."
|
|
84
|
-
)
|
|
85
|
-
|
|
86
|
-
def check_model_config(self):
|
|
87
|
-
raise NotImplementedError(
|
|
88
|
-
"Nemotron model doesn't support model config."
|
|
89
|
-
)
|
camel/models/ollama_model.py
CHANGED
|
@@ -17,64 +17,54 @@ from typing import Any, Dict, List, Optional, Union
|
|
|
17
17
|
|
|
18
18
|
from openai import OpenAI, Stream
|
|
19
19
|
|
|
20
|
-
from camel.configs import OLLAMA_API_PARAMS
|
|
20
|
+
from camel.configs import OLLAMA_API_PARAMS
|
|
21
21
|
from camel.messages import OpenAIMessage
|
|
22
|
-
from camel.
|
|
23
|
-
from camel.types import (
|
|
24
|
-
ChatCompletion,
|
|
25
|
-
ChatCompletionChunk,
|
|
26
|
-
ModelType,
|
|
27
|
-
)
|
|
22
|
+
from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
|
|
28
23
|
from camel.utils import BaseTokenCounter, OpenAITokenCounter
|
|
29
24
|
|
|
30
25
|
|
|
31
|
-
class OllamaModel
|
|
32
|
-
r"""Ollama service interface.
|
|
33
|
-
|
|
34
|
-
Args:
|
|
35
|
-
model_type (Union[ModelType, str]): Model for which a backend is
|
|
36
|
-
created.
|
|
37
|
-
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
38
|
-
that will be fed into:obj:`openai.ChatCompletion.create()`.
|
|
39
|
-
If:obj:`None`, :obj:`OllamaConfig().as_dict()` will be used.
|
|
40
|
-
(default: :obj:`None`)
|
|
41
|
-
api_key (Optional[str], optional): The API key for authenticating with
|
|
42
|
-
the model service. Ollama doesn't need API key, it would be
|
|
43
|
-
ignored if set. (default: :obj:`None`)
|
|
44
|
-
url (Optional[str], optional): The url to the model service.
|
|
45
|
-
(default: :obj:`None`)
|
|
46
|
-
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
47
|
-
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
48
|
-
ModelType.GPT_4O_MINI)` will be used.
|
|
49
|
-
(default: :obj:`None`)
|
|
50
|
-
|
|
51
|
-
References:
|
|
52
|
-
https://github.com/ollama/ollama/blob/main/docs/openai.md
|
|
53
|
-
"""
|
|
26
|
+
class OllamaModel:
|
|
27
|
+
r"""Ollama service interface."""
|
|
54
28
|
|
|
55
29
|
def __init__(
|
|
56
30
|
self,
|
|
57
|
-
model_type:
|
|
58
|
-
model_config_dict:
|
|
59
|
-
api_key: Optional[str] = None,
|
|
31
|
+
model_type: str,
|
|
32
|
+
model_config_dict: Dict[str, Any],
|
|
60
33
|
url: Optional[str] = None,
|
|
61
34
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
62
35
|
) -> None:
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
36
|
+
r"""Constructor for Ollama backend with OpenAI compatibility.
|
|
37
|
+
|
|
38
|
+
# Reference: https://github.com/ollama/ollama/blob/main/docs/openai.md
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
model_type (str): Model for which a backend is created.
|
|
42
|
+
model_config_dict (Dict[str, Any]): A dictionary that will
|
|
43
|
+
be fed into openai.ChatCompletion.create().
|
|
44
|
+
url (Optional[str]): The url to the model service. (default:
|
|
45
|
+
:obj:`"http://localhost:11434/v1"`)
|
|
46
|
+
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
47
|
+
for the model. If not provided, `OpenAITokenCounter(ModelType.
|
|
48
|
+
GPT_4O_MINI)` will be used.
|
|
49
|
+
"""
|
|
50
|
+
self.model_type = model_type
|
|
51
|
+
self.model_config_dict = model_config_dict
|
|
52
|
+
self._url = (
|
|
53
|
+
url
|
|
54
|
+
or os.environ.get("OLLAMA_BASE_URL")
|
|
55
|
+
or "http://localhost:11434/v1"
|
|
68
56
|
)
|
|
69
|
-
if not
|
|
57
|
+
if not url and not os.environ.get("OLLAMA_BASE_URL"):
|
|
70
58
|
self._start_server()
|
|
71
59
|
# Use OpenAI client as interface call Ollama
|
|
72
60
|
self._client = OpenAI(
|
|
73
61
|
timeout=60,
|
|
74
62
|
max_retries=3,
|
|
75
|
-
api_key="Set-but-ignored", # required but ignored
|
|
76
63
|
base_url=self._url,
|
|
64
|
+
api_key="ollama", # required but ignored
|
|
77
65
|
)
|
|
66
|
+
self._token_counter = token_counter
|
|
67
|
+
self.check_model_config()
|
|
78
68
|
|
|
79
69
|
def _start_server(self) -> None:
|
|
80
70
|
r"""Starts the Ollama server in a subprocess."""
|
|
@@ -84,9 +74,8 @@ class OllamaModel(BaseModelBackend):
|
|
|
84
74
|
stdout=subprocess.PIPE,
|
|
85
75
|
stderr=subprocess.PIPE,
|
|
86
76
|
)
|
|
87
|
-
self._url = "http://localhost:11434/v1"
|
|
88
77
|
print(
|
|
89
|
-
f"Ollama server started on
|
|
78
|
+
f"Ollama server started on http://localhost:11434/v1 "
|
|
90
79
|
f"for {self.model_type} model."
|
|
91
80
|
)
|
|
92
81
|
except Exception as e:
|
|
@@ -142,6 +131,22 @@ class OllamaModel(BaseModelBackend):
|
|
|
142
131
|
)
|
|
143
132
|
return response
|
|
144
133
|
|
|
134
|
+
@property
|
|
135
|
+
def token_limit(self) -> int:
|
|
136
|
+
r"""Returns the maximum token limit for the given model.
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
int: The maximum token limit for the given model.
|
|
140
|
+
"""
|
|
141
|
+
max_tokens = self.model_config_dict.get("max_tokens")
|
|
142
|
+
if isinstance(max_tokens, int):
|
|
143
|
+
return max_tokens
|
|
144
|
+
print(
|
|
145
|
+
"Must set `max_tokens` as an integer in `model_config_dict` when"
|
|
146
|
+
" setting up the model. Using 4096 as default value."
|
|
147
|
+
)
|
|
148
|
+
return 4096
|
|
149
|
+
|
|
145
150
|
@property
|
|
146
151
|
def stream(self) -> bool:
|
|
147
152
|
r"""Returns whether the model is in stream mode, which sends partial
|
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from typing import Any, Dict, List, Optional, Union
|
|
15
|
+
|
|
16
|
+
from openai import OpenAI, Stream
|
|
17
|
+
|
|
18
|
+
from camel.configs import OPENAI_API_PARAMS
|
|
19
|
+
from camel.messages import OpenAIMessage
|
|
20
|
+
from camel.models import BaseModelBackend
|
|
21
|
+
from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
|
|
22
|
+
from camel.utils import (
|
|
23
|
+
BaseTokenCounter,
|
|
24
|
+
OpenSourceTokenCounter,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class OpenSourceModel(BaseModelBackend):
|
|
29
|
+
r"""Class for interace with OpenAI-API-compatible servers running
|
|
30
|
+
open-source models.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
model_type: ModelType,
|
|
36
|
+
model_config_dict: Dict[str, Any],
|
|
37
|
+
api_key: Optional[str] = None,
|
|
38
|
+
url: Optional[str] = None,
|
|
39
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
40
|
+
) -> None:
|
|
41
|
+
r"""Constructor for model backends of Open-source models.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
model_type (ModelType): Model for which a backend is created.
|
|
45
|
+
model_config_dict (Dict[str, Any]): A dictionary that will
|
|
46
|
+
be fed into :obj:`openai.ChatCompletion.create()`.
|
|
47
|
+
api_key (Optional[str]): The API key for authenticating with the
|
|
48
|
+
model service. (ignored for open-source models)
|
|
49
|
+
url (Optional[str]): The url to the model service.
|
|
50
|
+
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
51
|
+
for the model. If not provided, `OpenSourceTokenCounter` will
|
|
52
|
+
be used.
|
|
53
|
+
"""
|
|
54
|
+
super().__init__(
|
|
55
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
# Check whether the input model type is open-source
|
|
59
|
+
if not model_type.is_open_source:
|
|
60
|
+
raise ValueError(
|
|
61
|
+
f"Model `{model_type}` is not a supported open-source model."
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
# Check whether input model path is empty
|
|
65
|
+
model_path: Optional[str] = self.model_config_dict.get(
|
|
66
|
+
"model_path", None
|
|
67
|
+
)
|
|
68
|
+
if not model_path:
|
|
69
|
+
raise ValueError("Path to open-source model is not provided.")
|
|
70
|
+
self.model_path: str = model_path
|
|
71
|
+
|
|
72
|
+
# Check whether the model name matches the model type
|
|
73
|
+
self.model_name: str = self.model_path.split('/')[-1]
|
|
74
|
+
if not self.model_type.validate_model_name(self.model_name):
|
|
75
|
+
raise ValueError(
|
|
76
|
+
f"Model name `{self.model_name}` does not match model type "
|
|
77
|
+
f"`{self.model_type.value}`."
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
# Load the server URL and check whether it is None
|
|
81
|
+
server_url: Optional[str] = url or self.model_config_dict.get(
|
|
82
|
+
"server_url", None
|
|
83
|
+
)
|
|
84
|
+
if not server_url:
|
|
85
|
+
raise ValueError(
|
|
86
|
+
"URL to server running open-source LLM is not provided."
|
|
87
|
+
)
|
|
88
|
+
self.server_url: str = server_url
|
|
89
|
+
self._client = OpenAI(
|
|
90
|
+
base_url=self.server_url,
|
|
91
|
+
timeout=60,
|
|
92
|
+
max_retries=3,
|
|
93
|
+
api_key="fake_key",
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
# Replace `model_config_dict` with only the params to be
|
|
97
|
+
# passed to OpenAI API
|
|
98
|
+
self.model_config_dict = self.model_config_dict["api_params"]
|
|
99
|
+
|
|
100
|
+
@property
|
|
101
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
102
|
+
r"""Initialize the token counter for the model backend.
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
BaseTokenCounter: The token counter following the model's
|
|
106
|
+
tokenization style.
|
|
107
|
+
"""
|
|
108
|
+
if not self._token_counter:
|
|
109
|
+
self._token_counter = OpenSourceTokenCounter(
|
|
110
|
+
self.model_type, self.model_path
|
|
111
|
+
)
|
|
112
|
+
return self._token_counter
|
|
113
|
+
|
|
114
|
+
def run(
|
|
115
|
+
self,
|
|
116
|
+
messages: List[OpenAIMessage],
|
|
117
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
118
|
+
r"""Runs inference of OpenAI-API-style chat completion.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
122
|
+
in OpenAI API format.
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
126
|
+
`ChatCompletion` in the non-stream mode, or
|
|
127
|
+
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
128
|
+
"""
|
|
129
|
+
messages_openai: List[OpenAIMessage] = messages
|
|
130
|
+
response = self._client.chat.completions.create(
|
|
131
|
+
messages=messages_openai,
|
|
132
|
+
model=self.model_name,
|
|
133
|
+
**self.model_config_dict,
|
|
134
|
+
)
|
|
135
|
+
return response
|
|
136
|
+
|
|
137
|
+
def check_model_config(self):
|
|
138
|
+
r"""Check whether the model configuration is valid for open-source
|
|
139
|
+
model backends.
|
|
140
|
+
|
|
141
|
+
Raises:
|
|
142
|
+
ValueError: If the model configuration dictionary contains any
|
|
143
|
+
unexpected arguments to OpenAI API, or it does not contain
|
|
144
|
+
:obj:`model_path` or :obj:`server_url`.
|
|
145
|
+
"""
|
|
146
|
+
if (
|
|
147
|
+
"model_path" not in self.model_config_dict
|
|
148
|
+
or "server_url" not in self.model_config_dict
|
|
149
|
+
):
|
|
150
|
+
raise ValueError(
|
|
151
|
+
"Invalid configuration for open-source model backend with "
|
|
152
|
+
":obj:`model_path` or :obj:`server_url` missing."
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
for param in self.model_config_dict["api_params"]:
|
|
156
|
+
if param not in OPENAI_API_PARAMS:
|
|
157
|
+
raise ValueError(
|
|
158
|
+
f"Unexpected argument `{param}` is "
|
|
159
|
+
"input into open-source model backend."
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
@property
|
|
163
|
+
def stream(self) -> bool:
|
|
164
|
+
r"""Returns whether the model is in stream mode,
|
|
165
|
+
which sends partial results each time.
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
bool: Whether the model is in stream mode.
|
|
169
|
+
"""
|
|
170
|
+
return self.model_config_dict.get('stream', False)
|