camel-ai 0.1.9__py3-none-any.whl → 0.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +334 -113
- camel/agents/knowledge_graph_agent.py +4 -6
- camel/bots/__init__.py +34 -0
- camel/bots/discord_app.py +138 -0
- camel/bots/slack/__init__.py +30 -0
- camel/bots/slack/models.py +158 -0
- camel/bots/slack/slack_app.py +255 -0
- camel/bots/telegram_bot.py +82 -0
- camel/configs/__init__.py +1 -2
- camel/configs/anthropic_config.py +2 -5
- camel/configs/base_config.py +6 -6
- camel/configs/gemini_config.py +1 -1
- camel/configs/groq_config.py +2 -3
- camel/configs/ollama_config.py +1 -2
- camel/configs/openai_config.py +2 -23
- camel/configs/samba_config.py +2 -2
- camel/configs/togetherai_config.py +1 -1
- camel/configs/vllm_config.py +1 -1
- camel/configs/zhipuai_config.py +2 -3
- camel/embeddings/openai_embedding.py +2 -2
- camel/loaders/__init__.py +2 -0
- camel/loaders/chunkr_reader.py +163 -0
- camel/loaders/firecrawl_reader.py +13 -45
- camel/loaders/unstructured_io.py +65 -29
- camel/messages/__init__.py +1 -0
- camel/messages/func_message.py +2 -2
- camel/models/__init__.py +2 -4
- camel/models/anthropic_model.py +32 -26
- camel/models/azure_openai_model.py +39 -36
- camel/models/base_model.py +31 -20
- camel/models/gemini_model.py +37 -29
- camel/models/groq_model.py +29 -23
- camel/models/litellm_model.py +44 -61
- camel/models/mistral_model.py +33 -30
- camel/models/model_factory.py +66 -76
- camel/models/nemotron_model.py +33 -23
- camel/models/ollama_model.py +42 -47
- camel/models/{openai_compatibility_model.py → openai_compatible_model.py} +36 -41
- camel/models/openai_model.py +60 -25
- camel/models/reka_model.py +30 -28
- camel/models/samba_model.py +82 -177
- camel/models/stub_model.py +2 -2
- camel/models/togetherai_model.py +37 -43
- camel/models/vllm_model.py +43 -50
- camel/models/zhipuai_model.py +33 -27
- camel/retrievers/auto_retriever.py +28 -10
- camel/retrievers/vector_retriever.py +72 -44
- camel/societies/babyagi_playing.py +6 -3
- camel/societies/role_playing.py +17 -3
- camel/storages/__init__.py +2 -0
- camel/storages/graph_storages/__init__.py +2 -0
- camel/storages/graph_storages/graph_element.py +3 -5
- camel/storages/graph_storages/nebula_graph.py +547 -0
- camel/storages/key_value_storages/json.py +6 -1
- camel/tasks/task.py +11 -4
- camel/tasks/task_prompt.py +4 -0
- camel/toolkits/__init__.py +28 -24
- camel/toolkits/arxiv_toolkit.py +155 -0
- camel/toolkits/ask_news_toolkit.py +653 -0
- camel/toolkits/base.py +2 -3
- camel/toolkits/code_execution.py +6 -7
- camel/toolkits/dalle_toolkit.py +6 -6
- camel/toolkits/{openai_function.py → function_tool.py} +34 -11
- camel/toolkits/github_toolkit.py +9 -10
- camel/toolkits/google_maps_toolkit.py +7 -14
- camel/toolkits/google_scholar_toolkit.py +146 -0
- camel/toolkits/linkedin_toolkit.py +7 -10
- camel/toolkits/math_toolkit.py +8 -8
- camel/toolkits/open_api_toolkit.py +5 -8
- camel/toolkits/reddit_toolkit.py +7 -10
- camel/toolkits/retrieval_toolkit.py +5 -9
- camel/toolkits/search_toolkit.py +9 -9
- camel/toolkits/slack_toolkit.py +11 -14
- camel/toolkits/twitter_toolkit.py +377 -454
- camel/toolkits/weather_toolkit.py +6 -6
- camel/toolkits/whatsapp_toolkit.py +177 -0
- camel/types/__init__.py +6 -1
- camel/types/enums.py +43 -85
- camel/types/openai_types.py +3 -0
- camel/types/unified_model_type.py +104 -0
- camel/utils/__init__.py +0 -2
- camel/utils/async_func.py +7 -7
- camel/utils/commons.py +40 -4
- camel/utils/token_counting.py +38 -214
- camel/workforce/__init__.py +6 -6
- camel/workforce/base.py +9 -5
- camel/workforce/prompts.py +179 -0
- camel/workforce/role_playing_worker.py +181 -0
- camel/workforce/{single_agent_node.py → single_agent_worker.py} +49 -23
- camel/workforce/task_channel.py +7 -8
- camel/workforce/utils.py +20 -50
- camel/workforce/{worker_node.py → worker.py} +15 -12
- camel/workforce/workforce.py +456 -19
- camel_ai-0.2.3.dist-info/LICENSE +201 -0
- {camel_ai-0.1.9.dist-info → camel_ai-0.2.3.dist-info}/METADATA +40 -65
- {camel_ai-0.1.9.dist-info → camel_ai-0.2.3.dist-info}/RECORD +98 -86
- {camel_ai-0.1.9.dist-info → camel_ai-0.2.3.dist-info}/WHEEL +1 -1
- camel/models/open_source_model.py +0 -170
- camel/workforce/manager_node.py +0 -299
- camel/workforce/role_playing_node.py +0 -168
- camel/workforce/workforce_prompt.py +0 -125
camel/models/samba_model.py
CHANGED
|
@@ -22,10 +22,11 @@ from openai import OpenAI, Stream
|
|
|
22
22
|
|
|
23
23
|
from camel.configs import (
|
|
24
24
|
SAMBA_CLOUD_API_PARAMS,
|
|
25
|
-
SAMBA_FAST_API_PARAMS,
|
|
26
25
|
SAMBA_VERSE_API_PARAMS,
|
|
26
|
+
SambaCloudAPIConfig,
|
|
27
27
|
)
|
|
28
28
|
from camel.messages import OpenAIMessage
|
|
29
|
+
from camel.models import BaseModelBackend
|
|
29
30
|
from camel.types import (
|
|
30
31
|
ChatCompletion,
|
|
31
32
|
ChatCompletionChunk,
|
|
@@ -38,48 +39,59 @@ from camel.utils import (
|
|
|
38
39
|
api_keys_required,
|
|
39
40
|
)
|
|
40
41
|
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
42
|
+
try:
|
|
43
|
+
if os.getenv("AGENTOPS_API_KEY") is not None:
|
|
44
|
+
from agentops import LLMEvent, record
|
|
45
|
+
else:
|
|
46
|
+
raise ImportError
|
|
47
|
+
except (ImportError, AttributeError):
|
|
48
|
+
LLMEvent = None
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class SambaModel(BaseModelBackend):
|
|
52
|
+
r"""SambaNova service interface.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
model_type (Union[ModelType, str]): Model for which a SambaNova backend
|
|
56
|
+
is created. Supported models via SambaNova Cloud:
|
|
57
|
+
`https://community.sambanova.ai/t/supported-models/193`.
|
|
58
|
+
Supported models via SambaVerse API is listed in
|
|
59
|
+
`https://sambaverse.sambanova.ai/models`.
|
|
60
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
61
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`. If
|
|
62
|
+
:obj:`None`, :obj:`SambaCloudAPIConfig().as_dict()` will be used.
|
|
63
|
+
(default: :obj:`None`)
|
|
64
|
+
api_key (Optional[str], optional): The API key for authenticating
|
|
65
|
+
with the SambaNova service. (default: :obj:`None`)
|
|
66
|
+
url (Optional[str], optional): The url to the SambaNova service.
|
|
67
|
+
Current support SambaVerse API:
|
|
68
|
+
:obj:`"https://sambaverse.sambanova.ai/api/predict"` and
|
|
69
|
+
SambaNova Cloud:
|
|
70
|
+
:obj:`"https://api.sambanova.ai/v1"` (default: :obj:`https://api.
|
|
71
|
+
sambanova.ai/v1`)
|
|
72
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
73
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
74
|
+
ModelType.GPT_4O_MINI)` will be used.
|
|
75
|
+
"""
|
|
44
76
|
|
|
45
77
|
def __init__(
|
|
46
78
|
self,
|
|
47
|
-
model_type: str,
|
|
48
|
-
model_config_dict: Dict[str, Any],
|
|
79
|
+
model_type: Union[ModelType, str],
|
|
80
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
49
81
|
api_key: Optional[str] = None,
|
|
50
82
|
url: Optional[str] = None,
|
|
51
83
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
52
84
|
) -> None:
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
created. Supported models via Fast API: `https://sambanova.ai/
|
|
58
|
-
fast-api?api_ref=128521`. Supported models via SambaVerse API
|
|
59
|
-
is listed in `https://sambaverse.sambanova.ai/models`.
|
|
60
|
-
model_config_dict (Dict[str, Any]): A dictionary that will
|
|
61
|
-
be fed into API request.
|
|
62
|
-
api_key (Optional[str]): The API key for authenticating with the
|
|
63
|
-
SambaNova service. (default: :obj:`None`)
|
|
64
|
-
url (Optional[str]): The url to the SambaNova service. Current
|
|
65
|
-
support SambaNova Fast API: :obj:`"https://fast-api.snova.ai/
|
|
66
|
-
v1/chat/ completions"`, SambaVerse API: :obj:`"https://
|
|
67
|
-
sambaverse.sambanova.ai/api/predict"` and SambaNova Cloud:
|
|
68
|
-
:obj:`"https://api.sambanova.ai/v1"`
|
|
69
|
-
(default::obj:`"https://fast-api.snova.ai/v1/chat/completions"`)
|
|
70
|
-
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
71
|
-
for the model. If not provided, `OpenAITokenCounter(ModelType.
|
|
72
|
-
GPT_4O_MINI)` will be used.
|
|
73
|
-
"""
|
|
74
|
-
self.model_type = model_type
|
|
75
|
-
self._api_key = api_key or os.environ.get("SAMBA_API_KEY")
|
|
76
|
-
self._url = url or os.environ.get(
|
|
85
|
+
if model_config_dict is None:
|
|
86
|
+
model_config_dict = SambaCloudAPIConfig().as_dict()
|
|
87
|
+
api_key = api_key or os.environ.get("SAMBA_API_KEY")
|
|
88
|
+
url = url or os.environ.get(
|
|
77
89
|
"SAMBA_API_BASE_URL",
|
|
78
|
-
"https://
|
|
90
|
+
"https://api.sambanova.ai/v1",
|
|
91
|
+
)
|
|
92
|
+
super().__init__(
|
|
93
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
79
94
|
)
|
|
80
|
-
self._token_counter = token_counter
|
|
81
|
-
self.model_config_dict = model_config_dict
|
|
82
|
-
self.check_model_config()
|
|
83
95
|
|
|
84
96
|
if self._url == "https://api.sambanova.ai/v1":
|
|
85
97
|
self._client = OpenAI(
|
|
@@ -109,14 +121,7 @@ class SambaModel:
|
|
|
109
121
|
ValueError: If the model configuration dictionary contains any
|
|
110
122
|
unexpected arguments to SambaNova API.
|
|
111
123
|
"""
|
|
112
|
-
if self._url == "https://
|
|
113
|
-
for param in self.model_config_dict:
|
|
114
|
-
if param not in SAMBA_FAST_API_PARAMS:
|
|
115
|
-
raise ValueError(
|
|
116
|
-
f"Unexpected argument `{param}` is "
|
|
117
|
-
"input into SambaNova Fast API."
|
|
118
|
-
)
|
|
119
|
-
elif self._url == "https://sambaverse.sambanova.ai/api/predict":
|
|
124
|
+
if self._url == "https://sambaverse.sambanova.ai/api/predict":
|
|
120
125
|
for param in self.model_config_dict:
|
|
121
126
|
if param not in SAMBA_VERSE_API_PARAMS:
|
|
122
127
|
raise ValueError(
|
|
@@ -159,7 +164,7 @@ class SambaModel:
|
|
|
159
164
|
else:
|
|
160
165
|
return self._run_non_streaming(messages)
|
|
161
166
|
|
|
162
|
-
def _run_streaming(
|
|
167
|
+
def _run_streaming(
|
|
163
168
|
self, messages: List[OpenAIMessage]
|
|
164
169
|
) -> Stream[ChatCompletionChunk]:
|
|
165
170
|
r"""Handles streaming inference with SambaNova's API.
|
|
@@ -175,48 +180,30 @@ class SambaModel:
|
|
|
175
180
|
|
|
176
181
|
Raises:
|
|
177
182
|
RuntimeError: If the HTTP request fails.
|
|
183
|
+
ValueError: If the API doesn't support stream mode.
|
|
178
184
|
"""
|
|
179
|
-
|
|
180
|
-
# Handle SambaNova's Fast API
|
|
181
|
-
if self._url == "https://fast-api.snova.ai/v1/chat/completions":
|
|
182
|
-
headers = {
|
|
183
|
-
"Authorization": f"Basic {self._api_key}",
|
|
184
|
-
"Content-Type": "application/json",
|
|
185
|
-
}
|
|
186
|
-
|
|
187
|
-
data = {
|
|
188
|
-
"messages": messages,
|
|
189
|
-
"max_tokens": self.token_limit,
|
|
190
|
-
"stop": self.model_config_dict.get("stop"),
|
|
191
|
-
"model": self.model_type,
|
|
192
|
-
"stream": True,
|
|
193
|
-
"stream_options": self.model_config_dict.get("stream_options"),
|
|
194
|
-
}
|
|
195
|
-
|
|
196
|
-
try:
|
|
197
|
-
with httpx.stream(
|
|
198
|
-
"POST",
|
|
199
|
-
self._url,
|
|
200
|
-
headers=headers,
|
|
201
|
-
json=data,
|
|
202
|
-
) as api_response:
|
|
203
|
-
stream = Stream[ChatCompletionChunk](
|
|
204
|
-
cast_to=ChatCompletionChunk,
|
|
205
|
-
response=api_response,
|
|
206
|
-
client=OpenAI(api_key="required_but_not_used"),
|
|
207
|
-
)
|
|
208
|
-
for chunk in stream:
|
|
209
|
-
yield chunk
|
|
210
|
-
except httpx.HTTPError as e:
|
|
211
|
-
raise RuntimeError(f"HTTP request failed: {e!s}")
|
|
212
|
-
|
|
213
185
|
# Handle SambaNova's Cloud API
|
|
214
|
-
|
|
186
|
+
if self._url == "https://api.sambanova.ai/v1":
|
|
215
187
|
response = self._client.chat.completions.create(
|
|
216
188
|
messages=messages,
|
|
217
189
|
model=self.model_type,
|
|
218
190
|
**self.model_config_dict,
|
|
219
191
|
)
|
|
192
|
+
|
|
193
|
+
# Add AgentOps LLM Event tracking
|
|
194
|
+
if LLMEvent:
|
|
195
|
+
llm_event = LLMEvent(
|
|
196
|
+
thread_id=response.id,
|
|
197
|
+
prompt=" ".join(
|
|
198
|
+
[message.get("content") for message in messages] # type: ignore[misc]
|
|
199
|
+
),
|
|
200
|
+
prompt_tokens=response.usage.prompt_tokens, # type: ignore[union-attr]
|
|
201
|
+
completion=response.choices[0].message.content,
|
|
202
|
+
completion_tokens=response.usage.completion_tokens, # type: ignore[union-attr]
|
|
203
|
+
model=self.model_type,
|
|
204
|
+
)
|
|
205
|
+
record(llm_event)
|
|
206
|
+
|
|
220
207
|
return response
|
|
221
208
|
|
|
222
209
|
elif self._url == "https://sambaverse.sambanova.ai/api/predict":
|
|
@@ -224,6 +211,7 @@ class SambaModel:
|
|
|
224
211
|
"https://sambaverse.sambanova.ai/api/predict doesn't support"
|
|
225
212
|
" stream mode"
|
|
226
213
|
)
|
|
214
|
+
raise RuntimeError(f"Unknown URL: {self._url}")
|
|
227
215
|
|
|
228
216
|
def _run_non_streaming(
|
|
229
217
|
self, messages: List[OpenAIMessage]
|
|
@@ -243,51 +231,28 @@ class SambaModel:
|
|
|
243
231
|
ValueError: If the JSON response cannot be decoded or is missing
|
|
244
232
|
expected data.
|
|
245
233
|
"""
|
|
246
|
-
|
|
247
|
-
# Handle SambaNova's Fast API
|
|
248
|
-
if self._url == "https://fast-api.snova.ai/v1/chat/completions":
|
|
249
|
-
headers = {
|
|
250
|
-
"Authorization": f"Basic {self._api_key}",
|
|
251
|
-
"Content-Type": "application/json",
|
|
252
|
-
}
|
|
253
|
-
|
|
254
|
-
data = {
|
|
255
|
-
"messages": messages,
|
|
256
|
-
"max_tokens": self.token_limit,
|
|
257
|
-
"stop": self.model_config_dict.get("stop"),
|
|
258
|
-
"model": self.model_type,
|
|
259
|
-
"stream": True,
|
|
260
|
-
"stream_options": self.model_config_dict.get("stream_options"),
|
|
261
|
-
}
|
|
262
|
-
|
|
263
|
-
try:
|
|
264
|
-
with httpx.stream(
|
|
265
|
-
"POST",
|
|
266
|
-
self._url,
|
|
267
|
-
headers=headers,
|
|
268
|
-
json=data,
|
|
269
|
-
) as api_response:
|
|
270
|
-
samba_response = []
|
|
271
|
-
for chunk in api_response.iter_text():
|
|
272
|
-
if chunk.startswith('data: '):
|
|
273
|
-
chunk = chunk[6:]
|
|
274
|
-
if '[DONE]' in chunk:
|
|
275
|
-
break
|
|
276
|
-
json_data = json.loads(chunk)
|
|
277
|
-
samba_response.append(json_data)
|
|
278
|
-
return self._fastapi_to_openai_response(samba_response)
|
|
279
|
-
except httpx.HTTPError as e:
|
|
280
|
-
raise RuntimeError(f"HTTP request failed: {e!s}")
|
|
281
|
-
except json.JSONDecodeError as e:
|
|
282
|
-
raise ValueError(f"Failed to decode JSON response: {e!s}")
|
|
283
|
-
|
|
284
234
|
# Handle SambaNova's Cloud API
|
|
285
|
-
|
|
235
|
+
if self._url == "https://api.sambanova.ai/v1":
|
|
286
236
|
response = self._client.chat.completions.create(
|
|
287
237
|
messages=messages,
|
|
288
238
|
model=self.model_type,
|
|
289
239
|
**self.model_config_dict,
|
|
290
240
|
)
|
|
241
|
+
|
|
242
|
+
# Add AgentOps LLM Event tracking
|
|
243
|
+
if LLMEvent:
|
|
244
|
+
llm_event = LLMEvent(
|
|
245
|
+
thread_id=response.id,
|
|
246
|
+
prompt=" ".join(
|
|
247
|
+
[message.get("content") for message in messages] # type: ignore[misc]
|
|
248
|
+
),
|
|
249
|
+
prompt_tokens=response.usage.prompt_tokens, # type: ignore[union-attr]
|
|
250
|
+
completion=response.choices[0].message.content,
|
|
251
|
+
completion_tokens=response.usage.completion_tokens, # type: ignore[union-attr]
|
|
252
|
+
model=self.model_type,
|
|
253
|
+
)
|
|
254
|
+
record(llm_event)
|
|
255
|
+
|
|
291
256
|
return response
|
|
292
257
|
|
|
293
258
|
# Handle SambaNova's Sambaverse API
|
|
@@ -370,56 +335,6 @@ class SambaModel:
|
|
|
370
335
|
except httpx.HTTPStatusError:
|
|
371
336
|
raise RuntimeError(f"HTTP request failed: {raw_text}")
|
|
372
337
|
|
|
373
|
-
def _fastapi_to_openai_response(
|
|
374
|
-
self, samba_response: List[Dict[str, Any]]
|
|
375
|
-
) -> ChatCompletion:
|
|
376
|
-
r"""Converts SambaNova Fast API response chunks into an
|
|
377
|
-
OpenAI-compatible response.
|
|
378
|
-
|
|
379
|
-
Args:
|
|
380
|
-
samba_response (List[Dict[str, Any]]): A list of dictionaries
|
|
381
|
-
representing partial responses from the SambaNova Fast API.
|
|
382
|
-
|
|
383
|
-
Returns:
|
|
384
|
-
ChatCompletion: A `ChatCompletion` object constructed from the
|
|
385
|
-
aggregated response data.
|
|
386
|
-
"""
|
|
387
|
-
|
|
388
|
-
# Step 1: Combine the content from each chunk
|
|
389
|
-
full_content = ""
|
|
390
|
-
for chunk in samba_response:
|
|
391
|
-
if chunk['choices']:
|
|
392
|
-
for choice in chunk['choices']:
|
|
393
|
-
delta_content = choice['delta'].get('content', '')
|
|
394
|
-
full_content += delta_content
|
|
395
|
-
|
|
396
|
-
# Step 2: Create the ChatCompletion object
|
|
397
|
-
# Extract relevant information from the first chunk
|
|
398
|
-
first_chunk = samba_response[0]
|
|
399
|
-
|
|
400
|
-
choices = [
|
|
401
|
-
dict(
|
|
402
|
-
index=0, # type: ignore[index]
|
|
403
|
-
message={
|
|
404
|
-
"role": 'assistant',
|
|
405
|
-
"content": full_content.strip(),
|
|
406
|
-
},
|
|
407
|
-
finish_reason=samba_response[-1]['choices'][0]['finish_reason']
|
|
408
|
-
or None,
|
|
409
|
-
)
|
|
410
|
-
]
|
|
411
|
-
|
|
412
|
-
obj = ChatCompletion.construct(
|
|
413
|
-
id=first_chunk['id'],
|
|
414
|
-
choices=choices,
|
|
415
|
-
created=first_chunk['created'],
|
|
416
|
-
model=first_chunk['model'],
|
|
417
|
-
object="chat.completion",
|
|
418
|
-
usage=None,
|
|
419
|
-
)
|
|
420
|
-
|
|
421
|
-
return obj
|
|
422
|
-
|
|
423
338
|
def _sambaverse_to_openai_response(
|
|
424
339
|
self, samba_response: Dict[str, Any]
|
|
425
340
|
) -> ChatCompletion:
|
|
@@ -469,16 +384,6 @@ class SambaModel:
|
|
|
469
384
|
|
|
470
385
|
return obj
|
|
471
386
|
|
|
472
|
-
@property
|
|
473
|
-
def token_limit(self) -> int:
|
|
474
|
-
r"""Returns the maximum token limit for the given model.
|
|
475
|
-
|
|
476
|
-
Returns:
|
|
477
|
-
int: The maximum token limit for the given model.
|
|
478
|
-
"""
|
|
479
|
-
max_tokens = self.model_config_dict["max_tokens"]
|
|
480
|
-
return max_tokens
|
|
481
|
-
|
|
482
387
|
@property
|
|
483
388
|
def stream(self) -> bool:
|
|
484
389
|
r"""Returns whether the model is in stream mode, which sends partial
|
camel/models/stub_model.py
CHANGED
|
@@ -51,8 +51,8 @@ class StubModel(BaseModelBackend):
|
|
|
51
51
|
|
|
52
52
|
def __init__(
|
|
53
53
|
self,
|
|
54
|
-
model_type: ModelType,
|
|
55
|
-
model_config_dict: Dict[str, Any],
|
|
54
|
+
model_type: Union[ModelType, str],
|
|
55
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
56
56
|
api_key: Optional[str] = None,
|
|
57
57
|
url: Optional[str] = None,
|
|
58
58
|
token_counter: Optional[BaseTokenCounter] = None,
|
camel/models/togetherai_model.py
CHANGED
|
@@ -17,9 +17,14 @@ from typing import Any, Dict, List, Optional, Union
|
|
|
17
17
|
|
|
18
18
|
from openai import OpenAI, Stream
|
|
19
19
|
|
|
20
|
-
from camel.configs import TOGETHERAI_API_PARAMS
|
|
20
|
+
from camel.configs import TOGETHERAI_API_PARAMS, TogetherAIConfig
|
|
21
21
|
from camel.messages import OpenAIMessage
|
|
22
|
-
from camel.
|
|
22
|
+
from camel.models import BaseModelBackend
|
|
23
|
+
from camel.types import (
|
|
24
|
+
ChatCompletion,
|
|
25
|
+
ChatCompletionChunk,
|
|
26
|
+
ModelType,
|
|
27
|
+
)
|
|
23
28
|
from camel.utils import (
|
|
24
29
|
BaseTokenCounter,
|
|
25
30
|
OpenAITokenCounter,
|
|
@@ -27,45 +32,50 @@ from camel.utils import (
|
|
|
27
32
|
)
|
|
28
33
|
|
|
29
34
|
|
|
30
|
-
class TogetherAIModel:
|
|
35
|
+
class TogetherAIModel(BaseModelBackend):
|
|
31
36
|
r"""Constructor for Together AI backend with OpenAI compatibility.
|
|
32
|
-
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
40
|
+
created, supported model can be found here:
|
|
41
|
+
https://docs.together.ai/docs/chat-models
|
|
42
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
43
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`. If
|
|
44
|
+
:obj:`None`, :obj:`TogetherAIConfig().as_dict()` will be used.
|
|
45
|
+
(default: :obj:`None`)
|
|
46
|
+
api_key (Optional[str], optional): The API key for authenticating with
|
|
47
|
+
the Together service. (default: :obj:`None`)
|
|
48
|
+
url (Optional[str], optional): The url to the Together AI service.
|
|
49
|
+
If not provided, "https://api.together.xyz/v1" will be used.
|
|
50
|
+
(default: :obj:`None`)
|
|
51
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
52
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
53
|
+
ModelType.GPT_4O_MINI)` will be used.
|
|
33
54
|
"""
|
|
34
55
|
|
|
35
56
|
def __init__(
|
|
36
57
|
self,
|
|
37
|
-
model_type: str,
|
|
38
|
-
model_config_dict: Dict[str, Any],
|
|
58
|
+
model_type: Union[ModelType, str],
|
|
59
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
39
60
|
api_key: Optional[str] = None,
|
|
40
61
|
url: Optional[str] = None,
|
|
41
62
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
42
63
|
) -> None:
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
url (Optional[str]): The url to the Together AI service. (default:
|
|
53
|
-
:obj:`"https://api.together.xyz/v1"`)
|
|
54
|
-
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
55
|
-
for the model. If not provided, `OpenAITokenCounter(ModelType.
|
|
56
|
-
GPT_4O_MINI)` will be used.
|
|
57
|
-
"""
|
|
58
|
-
self.model_type = model_type
|
|
59
|
-
self.model_config_dict = model_config_dict
|
|
60
|
-
self._token_counter = token_counter
|
|
61
|
-
self._api_key = api_key or os.environ.get("TOGETHER_API_KEY")
|
|
62
|
-
self._url = url or os.environ.get("TOGETHER_API_BASE_URL")
|
|
64
|
+
if model_config_dict is None:
|
|
65
|
+
model_config_dict = TogetherAIConfig().as_dict()
|
|
66
|
+
api_key = api_key or os.environ.get("TOGETHER_API_KEY")
|
|
67
|
+
url = url or os.environ.get(
|
|
68
|
+
"TOGETHER_API_BASE_URL", "https://api.together.xyz/v1"
|
|
69
|
+
)
|
|
70
|
+
super().__init__(
|
|
71
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
72
|
+
)
|
|
63
73
|
|
|
64
74
|
self._client = OpenAI(
|
|
65
75
|
timeout=60,
|
|
66
76
|
max_retries=3,
|
|
67
77
|
api_key=self._api_key,
|
|
68
|
-
base_url=self._url
|
|
78
|
+
base_url=self._url,
|
|
69
79
|
)
|
|
70
80
|
|
|
71
81
|
@api_keys_required("TOGETHER_API_KEY")
|
|
@@ -130,19 +140,3 @@ class TogetherAIModel:
|
|
|
130
140
|
bool: Whether the model is in stream mode.
|
|
131
141
|
"""
|
|
132
142
|
return self.model_config_dict.get('stream', False)
|
|
133
|
-
|
|
134
|
-
@property
|
|
135
|
-
def token_limit(self) -> int:
|
|
136
|
-
r"""Returns the maximum token limit for the given model.
|
|
137
|
-
|
|
138
|
-
Returns:
|
|
139
|
-
int: The maximum token limit for the given model.
|
|
140
|
-
"""
|
|
141
|
-
max_tokens = self.model_config_dict.get("max_tokens")
|
|
142
|
-
if isinstance(max_tokens, int):
|
|
143
|
-
return max_tokens
|
|
144
|
-
print(
|
|
145
|
-
"Must set `max_tokens` as an integer in `model_config_dict` when"
|
|
146
|
-
" setting up the model. Using 4096 as default value."
|
|
147
|
-
)
|
|
148
|
-
return 4096
|
camel/models/vllm_model.py
CHANGED
|
@@ -17,58 +17,66 @@ from typing import Any, Dict, List, Optional, Union
|
|
|
17
17
|
|
|
18
18
|
from openai import OpenAI, Stream
|
|
19
19
|
|
|
20
|
-
from camel.configs import VLLM_API_PARAMS
|
|
20
|
+
from camel.configs import VLLM_API_PARAMS, VLLMConfig
|
|
21
21
|
from camel.messages import OpenAIMessage
|
|
22
|
-
from camel.
|
|
22
|
+
from camel.models import BaseModelBackend
|
|
23
|
+
from camel.types import (
|
|
24
|
+
ChatCompletion,
|
|
25
|
+
ChatCompletionChunk,
|
|
26
|
+
ModelType,
|
|
27
|
+
)
|
|
23
28
|
from camel.utils import BaseTokenCounter, OpenAITokenCounter
|
|
24
29
|
|
|
25
30
|
|
|
26
31
|
# flake8: noqa: E501
|
|
27
|
-
class VLLMModel:
|
|
28
|
-
r"""vLLM service interface.
|
|
32
|
+
class VLLMModel(BaseModelBackend):
|
|
33
|
+
r"""vLLM service interface.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
37
|
+
created.
|
|
38
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
39
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`. If
|
|
40
|
+
:obj:`None`, :obj:`VLLMConfig().as_dict()` will be used.
|
|
41
|
+
(default: :obj:`None`)
|
|
42
|
+
api_key (Optional[str], optional): The API key for authenticating with
|
|
43
|
+
the model service. vLLM doesn't need API key, it would be ignored
|
|
44
|
+
if set. (default: :obj:`None`)
|
|
45
|
+
url (Optional[str], optional): The url to the model service. If not
|
|
46
|
+
provided, :obj:`"http://localhost:8000/v1"` will be used.
|
|
47
|
+
(default: :obj:`None`)
|
|
48
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
49
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
50
|
+
ModelType.GPT_4O_MINI)` will be used.
|
|
51
|
+
(default: :obj:`None`)
|
|
52
|
+
|
|
53
|
+
References:
|
|
54
|
+
https://docs.vllm.ai/en/latest/serving/openai_compatible_server.html
|
|
55
|
+
"""
|
|
29
56
|
|
|
30
57
|
def __init__(
|
|
31
58
|
self,
|
|
32
|
-
model_type: str,
|
|
33
|
-
model_config_dict: Dict[str, Any],
|
|
34
|
-
url: Optional[str] = None,
|
|
59
|
+
model_type: Union[ModelType, str],
|
|
60
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
35
61
|
api_key: Optional[str] = None,
|
|
62
|
+
url: Optional[str] = None,
|
|
36
63
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
37
64
|
) -> None:
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
model_type (str): Model for which a backend is created.
|
|
44
|
-
model_config_dict (Dict[str, Any]): A dictionary that will
|
|
45
|
-
be fed into openai.ChatCompletion.create().
|
|
46
|
-
url (Optional[str]): The url to the model service. (default:
|
|
47
|
-
:obj:`"http://localhost:8000/v1"`)
|
|
48
|
-
api_key (Optional[str]): The API key for authenticating with the
|
|
49
|
-
model service.
|
|
50
|
-
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
51
|
-
for the model. If not provided, `OpenAITokenCounter(ModelType.
|
|
52
|
-
GPT_4O_MINI)` will be used.
|
|
53
|
-
"""
|
|
54
|
-
self.model_type = model_type
|
|
55
|
-
self.model_config_dict = model_config_dict
|
|
56
|
-
self._url = (
|
|
57
|
-
url
|
|
58
|
-
or os.environ.get("VLLM_BASE_URL")
|
|
59
|
-
or "http://localhost:8000/v1"
|
|
65
|
+
if model_config_dict is None:
|
|
66
|
+
model_config_dict = VLLMConfig().as_dict()
|
|
67
|
+
url = url or os.environ.get("VLLM_BASE_URL")
|
|
68
|
+
super().__init__(
|
|
69
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
60
70
|
)
|
|
61
|
-
if not
|
|
71
|
+
if not self._url:
|
|
62
72
|
self._start_server()
|
|
63
73
|
# Use OpenAI cilent as interface call vLLM
|
|
64
74
|
self._client = OpenAI(
|
|
65
75
|
timeout=60,
|
|
66
76
|
max_retries=3,
|
|
77
|
+
api_key="Set-but-ignored", # required but ignored
|
|
67
78
|
base_url=self._url,
|
|
68
|
-
api_key=api_key,
|
|
69
79
|
)
|
|
70
|
-
self._token_counter = token_counter
|
|
71
|
-
self.check_model_config()
|
|
72
80
|
|
|
73
81
|
def _start_server(self) -> None:
|
|
74
82
|
r"""Starts the vllm server in a subprocess."""
|
|
@@ -78,8 +86,9 @@ class VLLMModel:
|
|
|
78
86
|
stdout=subprocess.PIPE,
|
|
79
87
|
stderr=subprocess.PIPE,
|
|
80
88
|
)
|
|
89
|
+
self._url = "http://localhost:8000/v1"
|
|
81
90
|
print(
|
|
82
|
-
f"vllm server started on
|
|
91
|
+
f"vllm server started on {self._url} "
|
|
83
92
|
f"for {self.model_type} model."
|
|
84
93
|
)
|
|
85
94
|
except Exception as e:
|
|
@@ -135,22 +144,6 @@ class VLLMModel:
|
|
|
135
144
|
)
|
|
136
145
|
return response
|
|
137
146
|
|
|
138
|
-
@property
|
|
139
|
-
def token_limit(self) -> int:
|
|
140
|
-
r"""Returns the maximum token limit for the given model.
|
|
141
|
-
|
|
142
|
-
Returns:
|
|
143
|
-
int: The maximum token limit for the given model.
|
|
144
|
-
"""
|
|
145
|
-
max_tokens = self.model_config_dict.get("max_tokens")
|
|
146
|
-
if isinstance(max_tokens, int):
|
|
147
|
-
return max_tokens
|
|
148
|
-
print(
|
|
149
|
-
"Must set `max_tokens` as an integer in `model_config_dict` when"
|
|
150
|
-
" setting up the model. Using 4096 as default value."
|
|
151
|
-
)
|
|
152
|
-
return 4096
|
|
153
|
-
|
|
154
147
|
@property
|
|
155
148
|
def stream(self) -> bool:
|
|
156
149
|
r"""Returns whether the model is in stream mode, which sends partial
|