camel-ai 0.2.3a1__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (87) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +93 -69
  3. camel/agents/knowledge_graph_agent.py +4 -6
  4. camel/bots/__init__.py +16 -2
  5. camel/bots/discord_app.py +138 -0
  6. camel/bots/slack/__init__.py +30 -0
  7. camel/bots/slack/models.py +158 -0
  8. camel/bots/slack/slack_app.py +255 -0
  9. camel/configs/__init__.py +1 -2
  10. camel/configs/anthropic_config.py +2 -5
  11. camel/configs/base_config.py +6 -6
  12. camel/configs/groq_config.py +2 -3
  13. camel/configs/ollama_config.py +1 -2
  14. camel/configs/openai_config.py +2 -23
  15. camel/configs/samba_config.py +2 -2
  16. camel/configs/togetherai_config.py +1 -1
  17. camel/configs/vllm_config.py +1 -1
  18. camel/configs/zhipuai_config.py +2 -3
  19. camel/embeddings/openai_embedding.py +2 -2
  20. camel/loaders/__init__.py +2 -0
  21. camel/loaders/chunkr_reader.py +163 -0
  22. camel/loaders/firecrawl_reader.py +3 -3
  23. camel/loaders/unstructured_io.py +35 -33
  24. camel/messages/__init__.py +1 -0
  25. camel/models/__init__.py +2 -4
  26. camel/models/anthropic_model.py +32 -26
  27. camel/models/azure_openai_model.py +39 -36
  28. camel/models/base_model.py +31 -20
  29. camel/models/gemini_model.py +37 -29
  30. camel/models/groq_model.py +29 -23
  31. camel/models/litellm_model.py +44 -61
  32. camel/models/mistral_model.py +32 -29
  33. camel/models/model_factory.py +66 -76
  34. camel/models/nemotron_model.py +33 -23
  35. camel/models/ollama_model.py +42 -47
  36. camel/models/{openai_compatibility_model.py → openai_compatible_model.py} +31 -49
  37. camel/models/openai_model.py +48 -29
  38. camel/models/reka_model.py +30 -28
  39. camel/models/samba_model.py +82 -177
  40. camel/models/stub_model.py +2 -2
  41. camel/models/togetherai_model.py +37 -43
  42. camel/models/vllm_model.py +43 -50
  43. camel/models/zhipuai_model.py +33 -27
  44. camel/retrievers/auto_retriever.py +29 -97
  45. camel/retrievers/vector_retriever.py +58 -47
  46. camel/societies/babyagi_playing.py +6 -3
  47. camel/societies/role_playing.py +5 -3
  48. camel/storages/graph_storages/graph_element.py +2 -2
  49. camel/storages/key_value_storages/json.py +6 -1
  50. camel/toolkits/__init__.py +20 -7
  51. camel/toolkits/arxiv_toolkit.py +155 -0
  52. camel/toolkits/ask_news_toolkit.py +653 -0
  53. camel/toolkits/base.py +2 -3
  54. camel/toolkits/code_execution.py +6 -7
  55. camel/toolkits/dalle_toolkit.py +6 -6
  56. camel/toolkits/{openai_function.py → function_tool.py} +34 -11
  57. camel/toolkits/github_toolkit.py +9 -10
  58. camel/toolkits/google_maps_toolkit.py +7 -7
  59. camel/toolkits/google_scholar_toolkit.py +146 -0
  60. camel/toolkits/linkedin_toolkit.py +7 -7
  61. camel/toolkits/math_toolkit.py +8 -8
  62. camel/toolkits/open_api_toolkit.py +5 -5
  63. camel/toolkits/reddit_toolkit.py +7 -7
  64. camel/toolkits/retrieval_toolkit.py +5 -5
  65. camel/toolkits/search_toolkit.py +9 -9
  66. camel/toolkits/slack_toolkit.py +11 -11
  67. camel/toolkits/twitter_toolkit.py +378 -452
  68. camel/toolkits/weather_toolkit.py +6 -6
  69. camel/toolkits/whatsapp_toolkit.py +177 -0
  70. camel/types/__init__.py +6 -1
  71. camel/types/enums.py +40 -85
  72. camel/types/openai_types.py +3 -0
  73. camel/types/unified_model_type.py +104 -0
  74. camel/utils/__init__.py +0 -2
  75. camel/utils/async_func.py +7 -7
  76. camel/utils/commons.py +32 -3
  77. camel/utils/token_counting.py +30 -212
  78. camel/workforce/role_playing_worker.py +1 -1
  79. camel/workforce/single_agent_worker.py +1 -1
  80. camel/workforce/task_channel.py +4 -3
  81. camel/workforce/workforce.py +4 -4
  82. camel_ai-0.2.4.dist-info/LICENSE +201 -0
  83. {camel_ai-0.2.3a1.dist-info → camel_ai-0.2.4.dist-info}/METADATA +27 -56
  84. {camel_ai-0.2.3a1.dist-info → camel_ai-0.2.4.dist-info}/RECORD +85 -76
  85. {camel_ai-0.2.3a1.dist-info → camel_ai-0.2.4.dist-info}/WHEEL +1 -1
  86. camel/bots/discord_bot.py +0 -206
  87. camel/models/open_source_model.py +0 -170
@@ -11,10 +11,9 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- import os
15
- from typing import TYPE_CHECKING, Any, Dict, List, Optional
14
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
16
15
 
17
- from camel.configs import REKA_API_PARAMS
16
+ from camel.configs import REKA_API_PARAMS, RekaConfig
18
17
  from camel.messages import OpenAIMessage
19
18
  from camel.models import BaseModelBackend
20
19
  from camel.types import ChatCompletion, ModelType
@@ -22,6 +21,7 @@ from camel.utils import (
22
21
  BaseTokenCounter,
23
22
  OpenAITokenCounter,
24
23
  api_keys_required,
24
+ dependencies_required,
25
25
  )
26
26
 
27
27
  if TYPE_CHECKING:
@@ -39,40 +39,42 @@ except (ImportError, AttributeError):
39
39
 
40
40
 
41
41
  class RekaModel(BaseModelBackend):
42
- r"""Reka API in a unified BaseModelBackend interface."""
43
-
42
+ r"""Reka API in a unified BaseModelBackend interface.
43
+
44
+ Args:
45
+ model_type (Union[ModelType, str]): Model for which a backend is
46
+ created, one of REKA_* series.
47
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
48
+ that will be fed into:obj:`Reka.chat.create()`. If :obj:`None`,
49
+ :obj:`RekaConfig().as_dict()` will be used. (default: :obj:`None`)
50
+ api_key (Optional[str], optional): The API key for authenticating with
51
+ the Reka service. (default: :obj:`None`)
52
+ url (Optional[str], optional): The url to the Reka service.
53
+ (default: :obj:`None`)
54
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
55
+ use for the model. If not provided, :obj:`OpenAITokenCounter` will
56
+ be used. (default: :obj:`None`)
57
+ """
58
+
59
+ @dependencies_required('reka')
44
60
  def __init__(
45
61
  self,
46
- model_type: ModelType,
47
- model_config_dict: Dict[str, Any],
62
+ model_type: Union[ModelType, str],
63
+ model_config_dict: Optional[Dict[str, Any]] = None,
48
64
  api_key: Optional[str] = None,
49
65
  url: Optional[str] = None,
50
66
  token_counter: Optional[BaseTokenCounter] = None,
51
67
  ) -> None:
52
- r"""Constructor for Reka backend.
68
+ from reka.client import Reka
53
69
 
54
- Args:
55
- model_type (ModelType): Model for which a backend is created,
56
- one of REKA_* series.
57
- model_config_dict (Dict[str, Any]): A dictionary that will
58
- be fed into `Reka.chat.create`.
59
- api_key (Optional[str]): The API key for authenticating with the
60
- Reka service. (default: :obj:`None`)
61
- url (Optional[str]): The url to the Reka service.
62
- token_counter (Optional[BaseTokenCounter]): Token counter to use
63
- for the model. If not provided, `OpenAITokenCounter` will be
64
- used.
65
- """
70
+ if model_config_dict is None:
71
+ model_config_dict = RekaConfig().as_dict()
72
+ api_key = api_key or os.environ.get("REKA_API_KEY")
73
+ url = url or os.environ.get("REKA_API_BASE_URL")
66
74
  super().__init__(
67
75
  model_type, model_config_dict, api_key, url, token_counter
68
76
  )
69
- self._api_key = api_key or os.environ.get("REKA_API_KEY")
70
- self._url = url or os.environ.get("REKA_SERVER_URL")
71
-
72
- from reka.client import Reka
73
-
74
77
  self._client = Reka(api_key=self._api_key, base_url=self._url)
75
- self._token_counter: Optional[BaseTokenCounter] = None
76
78
 
77
79
  def _convert_reka_to_openai_response(
78
80
  self, response: 'ChatResponse'
@@ -184,7 +186,7 @@ class RekaModel(BaseModelBackend):
184
186
 
185
187
  response = self._client.chat.create(
186
188
  messages=reka_messages,
187
- model=self.model_type.value,
189
+ model=self.model_type,
188
190
  **self.model_config_dict,
189
191
  )
190
192
 
@@ -200,7 +202,7 @@ class RekaModel(BaseModelBackend):
200
202
  prompt_tokens=openai_response.usage.input_tokens, # type: ignore[union-attr]
201
203
  completion=openai_response.choices[0].message.content,
202
204
  completion_tokens=openai_response.usage.output_tokens, # type: ignore[union-attr]
203
- model=self.model_type.value,
205
+ model=self.model_type,
204
206
  )
205
207
  record(llm_event)
206
208
 
@@ -22,10 +22,11 @@ from openai import OpenAI, Stream
22
22
 
23
23
  from camel.configs import (
24
24
  SAMBA_CLOUD_API_PARAMS,
25
- SAMBA_FAST_API_PARAMS,
26
25
  SAMBA_VERSE_API_PARAMS,
26
+ SambaCloudAPIConfig,
27
27
  )
28
28
  from camel.messages import OpenAIMessage
29
+ from camel.models import BaseModelBackend
29
30
  from camel.types import (
30
31
  ChatCompletion,
31
32
  ChatCompletionChunk,
@@ -38,48 +39,59 @@ from camel.utils import (
38
39
  api_keys_required,
39
40
  )
40
41
 
41
-
42
- class SambaModel:
43
- r"""SambaNova service interface."""
42
+ try:
43
+ if os.getenv("AGENTOPS_API_KEY") is not None:
44
+ from agentops import LLMEvent, record
45
+ else:
46
+ raise ImportError
47
+ except (ImportError, AttributeError):
48
+ LLMEvent = None
49
+
50
+
51
+ class SambaModel(BaseModelBackend):
52
+ r"""SambaNova service interface.
53
+
54
+ Args:
55
+ model_type (Union[ModelType, str]): Model for which a SambaNova backend
56
+ is created. Supported models via SambaNova Cloud:
57
+ `https://community.sambanova.ai/t/supported-models/193`.
58
+ Supported models via SambaVerse API is listed in
59
+ `https://sambaverse.sambanova.ai/models`.
60
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
61
+ that will be fed into:obj:`openai.ChatCompletion.create()`. If
62
+ :obj:`None`, :obj:`SambaCloudAPIConfig().as_dict()` will be used.
63
+ (default: :obj:`None`)
64
+ api_key (Optional[str], optional): The API key for authenticating
65
+ with the SambaNova service. (default: :obj:`None`)
66
+ url (Optional[str], optional): The url to the SambaNova service.
67
+ Current support SambaVerse API:
68
+ :obj:`"https://sambaverse.sambanova.ai/api/predict"` and
69
+ SambaNova Cloud:
70
+ :obj:`"https://api.sambanova.ai/v1"` (default: :obj:`https://api.
71
+ sambanova.ai/v1`)
72
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
73
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
74
+ ModelType.GPT_4O_MINI)` will be used.
75
+ """
44
76
 
45
77
  def __init__(
46
78
  self,
47
- model_type: str,
48
- model_config_dict: Dict[str, Any],
79
+ model_type: Union[ModelType, str],
80
+ model_config_dict: Optional[Dict[str, Any]] = None,
49
81
  api_key: Optional[str] = None,
50
82
  url: Optional[str] = None,
51
83
  token_counter: Optional[BaseTokenCounter] = None,
52
84
  ) -> None:
53
- r"""Constructor for SambaNova backend.
54
-
55
- Args:
56
- model_type (str): Model for which a SambaNova backend is
57
- created. Supported models via Fast API: `https://sambanova.ai/
58
- fast-api?api_ref=128521`. Supported models via SambaVerse API
59
- is listed in `https://sambaverse.sambanova.ai/models`.
60
- model_config_dict (Dict[str, Any]): A dictionary that will
61
- be fed into API request.
62
- api_key (Optional[str]): The API key for authenticating with the
63
- SambaNova service. (default: :obj:`None`)
64
- url (Optional[str]): The url to the SambaNova service. Current
65
- support SambaNova Fast API: :obj:`"https://fast-api.snova.ai/
66
- v1/chat/ completions"`, SambaVerse API: :obj:`"https://
67
- sambaverse.sambanova.ai/api/predict"` and SambaNova Cloud:
68
- :obj:`"https://api.sambanova.ai/v1"`
69
- (default::obj:`"https://fast-api.snova.ai/v1/chat/completions"`)
70
- token_counter (Optional[BaseTokenCounter]): Token counter to use
71
- for the model. If not provided, `OpenAITokenCounter(ModelType.
72
- GPT_4O_MINI)` will be used.
73
- """
74
- self.model_type = model_type
75
- self._api_key = api_key or os.environ.get("SAMBA_API_KEY")
76
- self._url = url or os.environ.get(
85
+ if model_config_dict is None:
86
+ model_config_dict = SambaCloudAPIConfig().as_dict()
87
+ api_key = api_key or os.environ.get("SAMBA_API_KEY")
88
+ url = url or os.environ.get(
77
89
  "SAMBA_API_BASE_URL",
78
- "https://fast-api.snova.ai/v1/chat/completions",
90
+ "https://api.sambanova.ai/v1",
91
+ )
92
+ super().__init__(
93
+ model_type, model_config_dict, api_key, url, token_counter
79
94
  )
80
- self._token_counter = token_counter
81
- self.model_config_dict = model_config_dict
82
- self.check_model_config()
83
95
 
84
96
  if self._url == "https://api.sambanova.ai/v1":
85
97
  self._client = OpenAI(
@@ -109,14 +121,7 @@ class SambaModel:
109
121
  ValueError: If the model configuration dictionary contains any
110
122
  unexpected arguments to SambaNova API.
111
123
  """
112
- if self._url == "https://fast-api.snova.ai/v1/chat/completions":
113
- for param in self.model_config_dict:
114
- if param not in SAMBA_FAST_API_PARAMS:
115
- raise ValueError(
116
- f"Unexpected argument `{param}` is "
117
- "input into SambaNova Fast API."
118
- )
119
- elif self._url == "https://sambaverse.sambanova.ai/api/predict":
124
+ if self._url == "https://sambaverse.sambanova.ai/api/predict":
120
125
  for param in self.model_config_dict:
121
126
  if param not in SAMBA_VERSE_API_PARAMS:
122
127
  raise ValueError(
@@ -159,7 +164,7 @@ class SambaModel:
159
164
  else:
160
165
  return self._run_non_streaming(messages)
161
166
 
162
- def _run_streaming( # type: ignore[misc]
167
+ def _run_streaming(
163
168
  self, messages: List[OpenAIMessage]
164
169
  ) -> Stream[ChatCompletionChunk]:
165
170
  r"""Handles streaming inference with SambaNova's API.
@@ -175,48 +180,30 @@ class SambaModel:
175
180
 
176
181
  Raises:
177
182
  RuntimeError: If the HTTP request fails.
183
+ ValueError: If the API doesn't support stream mode.
178
184
  """
179
-
180
- # Handle SambaNova's Fast API
181
- if self._url == "https://fast-api.snova.ai/v1/chat/completions":
182
- headers = {
183
- "Authorization": f"Basic {self._api_key}",
184
- "Content-Type": "application/json",
185
- }
186
-
187
- data = {
188
- "messages": messages,
189
- "max_tokens": self.token_limit,
190
- "stop": self.model_config_dict.get("stop"),
191
- "model": self.model_type,
192
- "stream": True,
193
- "stream_options": self.model_config_dict.get("stream_options"),
194
- }
195
-
196
- try:
197
- with httpx.stream(
198
- "POST",
199
- self._url,
200
- headers=headers,
201
- json=data,
202
- ) as api_response:
203
- stream = Stream[ChatCompletionChunk](
204
- cast_to=ChatCompletionChunk,
205
- response=api_response,
206
- client=OpenAI(api_key="required_but_not_used"),
207
- )
208
- for chunk in stream:
209
- yield chunk
210
- except httpx.HTTPError as e:
211
- raise RuntimeError(f"HTTP request failed: {e!s}")
212
-
213
185
  # Handle SambaNova's Cloud API
214
- elif self._url == "https://api.sambanova.ai/v1":
186
+ if self._url == "https://api.sambanova.ai/v1":
215
187
  response = self._client.chat.completions.create(
216
188
  messages=messages,
217
189
  model=self.model_type,
218
190
  **self.model_config_dict,
219
191
  )
192
+
193
+ # Add AgentOps LLM Event tracking
194
+ if LLMEvent:
195
+ llm_event = LLMEvent(
196
+ thread_id=response.id,
197
+ prompt=" ".join(
198
+ [message.get("content") for message in messages] # type: ignore[misc]
199
+ ),
200
+ prompt_tokens=response.usage.prompt_tokens, # type: ignore[union-attr]
201
+ completion=response.choices[0].message.content,
202
+ completion_tokens=response.usage.completion_tokens, # type: ignore[union-attr]
203
+ model=self.model_type,
204
+ )
205
+ record(llm_event)
206
+
220
207
  return response
221
208
 
222
209
  elif self._url == "https://sambaverse.sambanova.ai/api/predict":
@@ -224,6 +211,7 @@ class SambaModel:
224
211
  "https://sambaverse.sambanova.ai/api/predict doesn't support"
225
212
  " stream mode"
226
213
  )
214
+ raise RuntimeError(f"Unknown URL: {self._url}")
227
215
 
228
216
  def _run_non_streaming(
229
217
  self, messages: List[OpenAIMessage]
@@ -243,51 +231,28 @@ class SambaModel:
243
231
  ValueError: If the JSON response cannot be decoded or is missing
244
232
  expected data.
245
233
  """
246
-
247
- # Handle SambaNova's Fast API
248
- if self._url == "https://fast-api.snova.ai/v1/chat/completions":
249
- headers = {
250
- "Authorization": f"Basic {self._api_key}",
251
- "Content-Type": "application/json",
252
- }
253
-
254
- data = {
255
- "messages": messages,
256
- "max_tokens": self.token_limit,
257
- "stop": self.model_config_dict.get("stop"),
258
- "model": self.model_type,
259
- "stream": True,
260
- "stream_options": self.model_config_dict.get("stream_options"),
261
- }
262
-
263
- try:
264
- with httpx.stream(
265
- "POST",
266
- self._url,
267
- headers=headers,
268
- json=data,
269
- ) as api_response:
270
- samba_response = []
271
- for chunk in api_response.iter_text():
272
- if chunk.startswith('data: '):
273
- chunk = chunk[6:]
274
- if '[DONE]' in chunk:
275
- break
276
- json_data = json.loads(chunk)
277
- samba_response.append(json_data)
278
- return self._fastapi_to_openai_response(samba_response)
279
- except httpx.HTTPError as e:
280
- raise RuntimeError(f"HTTP request failed: {e!s}")
281
- except json.JSONDecodeError as e:
282
- raise ValueError(f"Failed to decode JSON response: {e!s}")
283
-
284
234
  # Handle SambaNova's Cloud API
285
- elif self._url == "https://api.sambanova.ai/v1":
235
+ if self._url == "https://api.sambanova.ai/v1":
286
236
  response = self._client.chat.completions.create(
287
237
  messages=messages,
288
238
  model=self.model_type,
289
239
  **self.model_config_dict,
290
240
  )
241
+
242
+ # Add AgentOps LLM Event tracking
243
+ if LLMEvent:
244
+ llm_event = LLMEvent(
245
+ thread_id=response.id,
246
+ prompt=" ".join(
247
+ [message.get("content") for message in messages] # type: ignore[misc]
248
+ ),
249
+ prompt_tokens=response.usage.prompt_tokens, # type: ignore[union-attr]
250
+ completion=response.choices[0].message.content,
251
+ completion_tokens=response.usage.completion_tokens, # type: ignore[union-attr]
252
+ model=self.model_type,
253
+ )
254
+ record(llm_event)
255
+
291
256
  return response
292
257
 
293
258
  # Handle SambaNova's Sambaverse API
@@ -370,56 +335,6 @@ class SambaModel:
370
335
  except httpx.HTTPStatusError:
371
336
  raise RuntimeError(f"HTTP request failed: {raw_text}")
372
337
 
373
- def _fastapi_to_openai_response(
374
- self, samba_response: List[Dict[str, Any]]
375
- ) -> ChatCompletion:
376
- r"""Converts SambaNova Fast API response chunks into an
377
- OpenAI-compatible response.
378
-
379
- Args:
380
- samba_response (List[Dict[str, Any]]): A list of dictionaries
381
- representing partial responses from the SambaNova Fast API.
382
-
383
- Returns:
384
- ChatCompletion: A `ChatCompletion` object constructed from the
385
- aggregated response data.
386
- """
387
-
388
- # Step 1: Combine the content from each chunk
389
- full_content = ""
390
- for chunk in samba_response:
391
- if chunk['choices']:
392
- for choice in chunk['choices']:
393
- delta_content = choice['delta'].get('content', '')
394
- full_content += delta_content
395
-
396
- # Step 2: Create the ChatCompletion object
397
- # Extract relevant information from the first chunk
398
- first_chunk = samba_response[0]
399
-
400
- choices = [
401
- dict(
402
- index=0, # type: ignore[index]
403
- message={
404
- "role": 'assistant',
405
- "content": full_content.strip(),
406
- },
407
- finish_reason=samba_response[-1]['choices'][0]['finish_reason']
408
- or None,
409
- )
410
- ]
411
-
412
- obj = ChatCompletion.construct(
413
- id=first_chunk['id'],
414
- choices=choices,
415
- created=first_chunk['created'],
416
- model=first_chunk['model'],
417
- object="chat.completion",
418
- usage=None,
419
- )
420
-
421
- return obj
422
-
423
338
  def _sambaverse_to_openai_response(
424
339
  self, samba_response: Dict[str, Any]
425
340
  ) -> ChatCompletion:
@@ -469,16 +384,6 @@ class SambaModel:
469
384
 
470
385
  return obj
471
386
 
472
- @property
473
- def token_limit(self) -> int:
474
- r"""Returns the maximum token limit for the given model.
475
-
476
- Returns:
477
- int: The maximum token limit for the given model.
478
- """
479
- max_tokens = self.model_config_dict["max_tokens"]
480
- return max_tokens
481
-
482
387
  @property
483
388
  def stream(self) -> bool:
484
389
  r"""Returns whether the model is in stream mode, which sends partial
@@ -51,8 +51,8 @@ class StubModel(BaseModelBackend):
51
51
 
52
52
  def __init__(
53
53
  self,
54
- model_type: ModelType,
55
- model_config_dict: Dict[str, Any],
54
+ model_type: Union[ModelType, str],
55
+ model_config_dict: Optional[Dict[str, Any]] = None,
56
56
  api_key: Optional[str] = None,
57
57
  url: Optional[str] = None,
58
58
  token_counter: Optional[BaseTokenCounter] = None,
@@ -17,9 +17,14 @@ from typing import Any, Dict, List, Optional, Union
17
17
 
18
18
  from openai import OpenAI, Stream
19
19
 
20
- from camel.configs import TOGETHERAI_API_PARAMS
20
+ from camel.configs import TOGETHERAI_API_PARAMS, TogetherAIConfig
21
21
  from camel.messages import OpenAIMessage
22
- from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
22
+ from camel.models import BaseModelBackend
23
+ from camel.types import (
24
+ ChatCompletion,
25
+ ChatCompletionChunk,
26
+ ModelType,
27
+ )
23
28
  from camel.utils import (
24
29
  BaseTokenCounter,
25
30
  OpenAITokenCounter,
@@ -27,45 +32,50 @@ from camel.utils import (
27
32
  )
28
33
 
29
34
 
30
- class TogetherAIModel:
35
+ class TogetherAIModel(BaseModelBackend):
31
36
  r"""Constructor for Together AI backend with OpenAI compatibility.
32
- TODO: Add function calling support
37
+
38
+ Args:
39
+ model_type (Union[ModelType, str]): Model for which a backend is
40
+ created, supported model can be found here:
41
+ https://docs.together.ai/docs/chat-models
42
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
43
+ that will be fed into:obj:`openai.ChatCompletion.create()`. If
44
+ :obj:`None`, :obj:`TogetherAIConfig().as_dict()` will be used.
45
+ (default: :obj:`None`)
46
+ api_key (Optional[str], optional): The API key for authenticating with
47
+ the Together service. (default: :obj:`None`)
48
+ url (Optional[str], optional): The url to the Together AI service.
49
+ If not provided, "https://api.together.xyz/v1" will be used.
50
+ (default: :obj:`None`)
51
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
52
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
53
+ ModelType.GPT_4O_MINI)` will be used.
33
54
  """
34
55
 
35
56
  def __init__(
36
57
  self,
37
- model_type: str,
38
- model_config_dict: Dict[str, Any],
58
+ model_type: Union[ModelType, str],
59
+ model_config_dict: Optional[Dict[str, Any]] = None,
39
60
  api_key: Optional[str] = None,
40
61
  url: Optional[str] = None,
41
62
  token_counter: Optional[BaseTokenCounter] = None,
42
63
  ) -> None:
43
- r"""Constructor for TogetherAI backend.
44
-
45
- Args:
46
- model_type (str): Model for which a backend is created, supported
47
- model can be found here: https://docs.together.ai/docs/chat-models
48
- model_config_dict (Dict[str, Any]): A dictionary that will
49
- be fed into openai.ChatCompletion.create().
50
- api_key (Optional[str]): The API key for authenticating with the
51
- Together service. (default: :obj:`None`)
52
- url (Optional[str]): The url to the Together AI service. (default:
53
- :obj:`"https://api.together.xyz/v1"`)
54
- token_counter (Optional[BaseTokenCounter]): Token counter to use
55
- for the model. If not provided, `OpenAITokenCounter(ModelType.
56
- GPT_4O_MINI)` will be used.
57
- """
58
- self.model_type = model_type
59
- self.model_config_dict = model_config_dict
60
- self._token_counter = token_counter
61
- self._api_key = api_key or os.environ.get("TOGETHER_API_KEY")
62
- self._url = url or os.environ.get("TOGETHER_API_BASE_URL")
64
+ if model_config_dict is None:
65
+ model_config_dict = TogetherAIConfig().as_dict()
66
+ api_key = api_key or os.environ.get("TOGETHER_API_KEY")
67
+ url = url or os.environ.get(
68
+ "TOGETHER_API_BASE_URL", "https://api.together.xyz/v1"
69
+ )
70
+ super().__init__(
71
+ model_type, model_config_dict, api_key, url, token_counter
72
+ )
63
73
 
64
74
  self._client = OpenAI(
65
75
  timeout=60,
66
76
  max_retries=3,
67
77
  api_key=self._api_key,
68
- base_url=self._url or "https://api.together.xyz/v1",
78
+ base_url=self._url,
69
79
  )
70
80
 
71
81
  @api_keys_required("TOGETHER_API_KEY")
@@ -130,19 +140,3 @@ class TogetherAIModel:
130
140
  bool: Whether the model is in stream mode.
131
141
  """
132
142
  return self.model_config_dict.get('stream', False)
133
-
134
- @property
135
- def token_limit(self) -> int:
136
- r"""Returns the maximum token limit for the given model.
137
-
138
- Returns:
139
- int: The maximum token limit for the given model.
140
- """
141
- max_tokens = self.model_config_dict.get("max_tokens")
142
- if isinstance(max_tokens, int):
143
- return max_tokens
144
- print(
145
- "Must set `max_tokens` as an integer in `model_config_dict` when"
146
- " setting up the model. Using 4096 as default value."
147
- )
148
- return 4096