camel-ai 0.1.5.1__py3-none-any.whl → 0.1.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (86) hide show
  1. camel/agents/__init__.py +2 -0
  2. camel/agents/chat_agent.py +237 -52
  3. camel/agents/critic_agent.py +6 -9
  4. camel/agents/deductive_reasoner_agent.py +93 -40
  5. camel/agents/embodied_agent.py +6 -9
  6. camel/agents/knowledge_graph_agent.py +49 -27
  7. camel/agents/role_assignment_agent.py +14 -12
  8. camel/agents/search_agent.py +122 -0
  9. camel/agents/task_agent.py +26 -38
  10. camel/bots/__init__.py +20 -0
  11. camel/bots/discord_bot.py +103 -0
  12. camel/bots/telegram_bot.py +84 -0
  13. camel/configs/__init__.py +3 -0
  14. camel/configs/anthropic_config.py +1 -1
  15. camel/configs/litellm_config.py +113 -0
  16. camel/configs/openai_config.py +14 -0
  17. camel/embeddings/__init__.py +2 -0
  18. camel/embeddings/openai_embedding.py +2 -2
  19. camel/embeddings/sentence_transformers_embeddings.py +6 -5
  20. camel/embeddings/vlm_embedding.py +146 -0
  21. camel/functions/__init__.py +9 -0
  22. camel/functions/open_api_function.py +161 -33
  23. camel/functions/open_api_specs/biztoc/__init__.py +13 -0
  24. camel/functions/open_api_specs/biztoc/ai-plugin.json +34 -0
  25. camel/functions/open_api_specs/biztoc/openapi.yaml +21 -0
  26. camel/functions/open_api_specs/create_qr_code/__init__.py +13 -0
  27. camel/functions/open_api_specs/create_qr_code/openapi.yaml +44 -0
  28. camel/functions/open_api_specs/nasa_apod/__init__.py +13 -0
  29. camel/functions/open_api_specs/nasa_apod/openapi.yaml +72 -0
  30. camel/functions/open_api_specs/outschool/__init__.py +13 -0
  31. camel/functions/open_api_specs/outschool/ai-plugin.json +34 -0
  32. camel/functions/open_api_specs/outschool/openapi.yaml +1 -0
  33. camel/functions/open_api_specs/outschool/paths/__init__.py +14 -0
  34. camel/functions/open_api_specs/outschool/paths/get_classes.py +29 -0
  35. camel/functions/open_api_specs/outschool/paths/search_teachers.py +29 -0
  36. camel/functions/open_api_specs/security_config.py +21 -0
  37. camel/functions/open_api_specs/web_scraper/__init__.py +13 -0
  38. camel/functions/open_api_specs/web_scraper/ai-plugin.json +34 -0
  39. camel/functions/open_api_specs/web_scraper/openapi.yaml +71 -0
  40. camel/functions/open_api_specs/web_scraper/paths/__init__.py +13 -0
  41. camel/functions/open_api_specs/web_scraper/paths/scraper.py +29 -0
  42. camel/functions/openai_function.py +3 -1
  43. camel/functions/search_functions.py +104 -171
  44. camel/functions/slack_functions.py +16 -3
  45. camel/human.py +3 -1
  46. camel/loaders/base_io.py +3 -1
  47. camel/loaders/unstructured_io.py +16 -22
  48. camel/messages/base.py +135 -46
  49. camel/models/__init__.py +8 -0
  50. camel/models/anthropic_model.py +24 -16
  51. camel/models/base_model.py +6 -1
  52. camel/models/litellm_model.py +112 -0
  53. camel/models/model_factory.py +44 -16
  54. camel/models/nemotron_model.py +71 -0
  55. camel/models/ollama_model.py +121 -0
  56. camel/models/open_source_model.py +8 -2
  57. camel/models/openai_model.py +14 -5
  58. camel/models/stub_model.py +3 -1
  59. camel/models/zhipuai_model.py +125 -0
  60. camel/prompts/__init__.py +6 -0
  61. camel/prompts/base.py +2 -1
  62. camel/prompts/descripte_video_prompt.py +33 -0
  63. camel/prompts/generate_text_embedding_data.py +79 -0
  64. camel/prompts/task_prompt_template.py +13 -3
  65. camel/retrievers/auto_retriever.py +20 -11
  66. camel/retrievers/base.py +4 -2
  67. camel/retrievers/bm25_retriever.py +2 -1
  68. camel/retrievers/cohere_rerank_retriever.py +2 -1
  69. camel/retrievers/vector_retriever.py +10 -4
  70. camel/societies/babyagi_playing.py +2 -1
  71. camel/societies/role_playing.py +18 -20
  72. camel/storages/graph_storages/base.py +1 -0
  73. camel/storages/graph_storages/neo4j_graph.py +5 -3
  74. camel/storages/vectordb_storages/base.py +2 -1
  75. camel/storages/vectordb_storages/milvus.py +5 -2
  76. camel/toolkits/github_toolkit.py +120 -26
  77. camel/types/__init__.py +5 -2
  78. camel/types/enums.py +95 -4
  79. camel/utils/__init__.py +11 -2
  80. camel/utils/commons.py +78 -4
  81. camel/utils/constants.py +26 -0
  82. camel/utils/token_counting.py +62 -7
  83. {camel_ai-0.1.5.1.dist-info → camel_ai-0.1.5.3.dist-info}/METADATA +82 -53
  84. camel_ai-0.1.5.3.dist-info/RECORD +151 -0
  85. camel_ai-0.1.5.1.dist-info/RECORD +0 -119
  86. {camel_ai-0.1.5.1.dist-info → camel_ai-0.1.5.3.dist-info}/WHEEL +0 -0
@@ -131,30 +131,22 @@ class UnstructuredIO:
131
131
  self,
132
132
  input_path: str,
133
133
  **kwargs: Any,
134
- ) -> Union[Any, List[Any]]:
135
- r"""Loads a file or a URL and parses its contents as unstructured data.
134
+ ) -> List[Element]:
135
+ r"""Loads a file or a URL and parses its contents into elements.
136
136
 
137
137
  Args:
138
138
  input_path (str): Path to the file or URL to be parsed.
139
139
  **kwargs: Extra kwargs passed to the partition function.
140
140
 
141
141
  Returns:
142
- List[Any]: The elements after parsing the file or URL, could be a
143
- dict, list, etc., depending on the content. If return_str is
144
- True, returns a tuple with a string representation of the
145
- elements and the elements themselves.
142
+ List[Element]: List of elements after parsing the file or URL.
146
143
 
147
144
  Raises:
148
- FileNotFoundError: If the file does not exist
149
- at the path specified.
145
+ FileNotFoundError: If the file does not exist at the path
146
+ specified.
150
147
  Exception: For any other issues during file or URL parsing.
151
148
 
152
149
  Notes:
153
- By default we use the basic "unstructured" library,
154
- if you are processing document types beyond the basics,
155
- you can install the necessary extras like:
156
- `pip install "unstructured[docx,pptx]"` or
157
- `pip install "unstructured[all-docs]"`.
158
150
  Available document types:
159
151
  "csv", "doc", "docx", "epub", "image", "md", "msg", "odt",
160
152
  "org", "pdf", "ppt", "pptx", "rtf", "rst", "tsv", "xlsx".
@@ -185,7 +177,9 @@ class UnstructuredIO:
185
177
 
186
178
  # Check if the file exists
187
179
  if not os.path.exists(input_path):
188
- raise FileNotFoundError(f"The file {input_path} was not found.")
180
+ raise FileNotFoundError(
181
+ f"The file {input_path} was not found."
182
+ )
189
183
 
190
184
  # Read the file
191
185
  try:
@@ -193,7 +187,9 @@ class UnstructuredIO:
193
187
  elements = partition(file=f, **kwargs)
194
188
  return elements
195
189
  except Exception as e:
196
- raise Exception("Failed to parse the unstructured file.") from e
190
+ raise Exception(
191
+ "Failed to parse the unstructured file."
192
+ ) from e
197
193
 
198
194
  def clean_text_data(
199
195
  self,
@@ -433,9 +429,8 @@ class UnstructuredIO:
433
429
  els, kw.get('metadata', [])
434
430
  ),
435
431
  "stage_for_baseplate": baseplate.stage_for_baseplate,
436
- "stage_for_datasaur": lambda els, **kw: datasaur.stage_for_datasaur(
437
- els, kw.get('entities', [])
438
- ),
432
+ "stage_for_datasaur": lambda els,
433
+ **kw: datasaur.stage_for_datasaur(els, kw.get('entities', [])),
439
434
  "stage_for_label_box": lambda els,
440
435
  **kw: label_box.stage_for_label_box(els, **kw),
441
436
  "stage_for_label_studio": lambda els,
@@ -450,11 +445,11 @@ class UnstructuredIO:
450
445
 
451
446
  def chunk_elements(
452
447
  self, elements: List[Any], chunk_type: str, **kwargs
453
- ) -> List[Any]:
448
+ ) -> List[Element]:
454
449
  r"""Chunks elements by titles.
455
450
 
456
451
  Args:
457
- elements (List[Any]): List of Element objects to be chunked.
452
+ elements (List[Element]): List of Element objects to be chunked.
458
453
  chunk_type (str): Type chunk going to apply. Supported types:
459
454
  'chunk_by_title'.
460
455
  **kwargs: Additional keyword arguments for chunking.
@@ -531,8 +526,7 @@ class UnstructuredIO:
531
526
  account_name: str,
532
527
  num_processes: int = 2,
533
528
  ) -> None:
534
- """
535
- Processes documents from an Azure storage container and stores
529
+ r"""Processes documents from an Azure storage container and stores
536
530
  structured outputs locally.
537
531
 
538
532
  Args:
camel/messages/base.py CHANGED
@@ -16,6 +16,7 @@ import io
16
16
  from dataclasses import dataclass
17
17
  from typing import Any, Dict, List, Literal, Optional, Tuple, Union
18
18
 
19
+ import numpy as np
19
20
  from PIL import Image
20
21
 
21
22
  from camel.messages import (
@@ -27,10 +28,11 @@ from camel.messages import (
27
28
  from camel.prompts import CodePrompt, TextPrompt
28
29
  from camel.types import (
29
30
  OpenAIBackendRole,
30
- OpenAIImageDetailType,
31
31
  OpenAIImageType,
32
+ OpenAIVisionDetailType,
32
33
  RoleType,
33
34
  )
35
+ from camel.utils import Constants
34
36
 
35
37
 
36
38
  @dataclass
@@ -39,19 +41,29 @@ class BaseMessage:
39
41
 
40
42
  Args:
41
43
  role_name (str): The name of the user or assistant role.
42
- role_type (RoleType): The type of role, either
43
- :obj:`RoleType.ASSISTANT` or :obj:`RoleType.USER`.
44
+ role_type (RoleType): The type of role, either :obj:`RoleType.
45
+ ASSISTANT` or :obj:`RoleType.USER`.
44
46
  meta_dict (Optional[Dict[str, str]]): Additional metadata dictionary
45
47
  for the message.
46
48
  content (str): The content of the message.
49
+ video_bytes (Optional[bytes]): Optional bytes of a video associated
50
+ with the message. Default is None.
51
+ image_list (Optional[List[Image.Image]]): Optional list of PIL Image
52
+ objects associated with the message. Default is None.
53
+ image_detail (Literal["auto", "low", "high"]): Detail level of the
54
+ images associated with the message. Default is "auto".
55
+ video_detail (Literal["auto", "low", "high"]): Detail level of the
56
+ videos associated with the message. Default is "low".
47
57
  """
48
58
 
49
59
  role_name: str
50
60
  role_type: RoleType
51
61
  meta_dict: Optional[Dict[str, str]]
52
62
  content: str
53
- image: Optional[Image.Image] = None
63
+ video_bytes: Optional[bytes] = None
64
+ image_list: Optional[List[Image.Image]] = None
54
65
  image_detail: Literal["auto", "low", "high"] = "auto"
66
+ video_detail: Literal["auto", "low", "high"] = "low"
55
67
 
56
68
  @classmethod
57
69
  def make_user_message(
@@ -59,16 +71,24 @@ class BaseMessage:
59
71
  role_name: str,
60
72
  content: str,
61
73
  meta_dict: Optional[Dict[str, str]] = None,
62
- image: Optional[Image.Image] = None,
63
- image_detail: Union[OpenAIImageDetailType, str] = "auto",
64
- ) -> 'BaseMessage':
74
+ video_bytes: Optional[bytes] = None,
75
+ image_list: Optional[List[Image.Image]] = None,
76
+ image_detail: Union[
77
+ OpenAIVisionDetailType, str
78
+ ] = OpenAIVisionDetailType.AUTO,
79
+ video_detail: Union[
80
+ OpenAIVisionDetailType, str
81
+ ] = OpenAIVisionDetailType.LOW,
82
+ ) -> "BaseMessage":
65
83
  return cls(
66
84
  role_name,
67
85
  RoleType.USER,
68
86
  meta_dict,
69
87
  content,
70
- image,
71
- OpenAIImageDetailType(image_detail).value,
88
+ video_bytes,
89
+ image_list,
90
+ OpenAIVisionDetailType(image_detail).value,
91
+ OpenAIVisionDetailType(video_detail).value,
72
92
  )
73
93
 
74
94
  @classmethod
@@ -77,16 +97,24 @@ class BaseMessage:
77
97
  role_name: str,
78
98
  content: str,
79
99
  meta_dict: Optional[Dict[str, str]] = None,
80
- image: Optional[Image.Image] = None,
81
- image_detail: Union[OpenAIImageDetailType, str] = "auto",
82
- ) -> 'BaseMessage':
100
+ video_bytes: Optional[bytes] = None,
101
+ image_list: Optional[List[Image.Image]] = None,
102
+ image_detail: Union[
103
+ OpenAIVisionDetailType, str
104
+ ] = OpenAIVisionDetailType.AUTO,
105
+ video_detail: Union[
106
+ OpenAIVisionDetailType, str
107
+ ] = OpenAIVisionDetailType.LOW,
108
+ ) -> "BaseMessage":
83
109
  return cls(
84
110
  role_name,
85
111
  RoleType.ASSISTANT,
86
112
  meta_dict,
87
113
  content,
88
- image,
89
- OpenAIImageDetailType(image_detail).value,
114
+ video_bytes,
115
+ image_list,
116
+ OpenAIVisionDetailType(image_detail).value,
117
+ OpenAIVisionDetailType(video_detail).value,
90
118
  )
91
119
 
92
120
  def create_new_instance(self, content: str) -> "BaseMessage":
@@ -241,46 +269,107 @@ class BaseMessage:
241
269
  Returns:
242
270
  OpenAIUserMessage: The converted :obj:`OpenAIUserMessage` object.
243
271
  """
244
- if self.image is None:
245
- return {"role": "user", "content": self.content}
246
- else:
247
- #
248
- if self.image.format is None:
249
- raise ValueError(
250
- f"Image's `format` is `None`, please "
251
- f"transform the `PIL.Image.Image` to one of "
252
- f"following supported formats, such as "
253
- f"{list(OpenAIImageType)}"
254
- )
255
-
256
- image_type: str = self.image.format.lower()
257
- if image_type not in OpenAIImageType:
258
- raise ValueError(
259
- f"Image type {self.image.format} "
260
- f"is not supported by OpenAI vision model"
261
- )
262
- with io.BytesIO() as buffer:
263
- self.image.save(fp=buffer, format=self.image.format)
264
- encoded_image = base64.b64encode(buffer.getvalue()).decode(
265
- "utf-8"
266
- )
267
- image_prefix = f"data:image/{image_type};base64,"
272
+ hybird_content: List[Any] = []
273
+ hybird_content.append(
274
+ {
275
+ "type": "text",
276
+ "text": self.content,
277
+ }
278
+ )
268
279
 
269
- return {
270
- "role": "user",
271
- "content": [
272
- {
273
- "type": "text",
274
- "text": self.content,
275
- },
280
+ if self.image_list and len(self.image_list) > 0:
281
+ for image in self.image_list:
282
+ if image.format is None:
283
+ raise ValueError(
284
+ f"Image's `format` is `None`, please "
285
+ f"transform the `PIL.Image.Image` to one of "
286
+ f"following supported formats, such as "
287
+ f"{list(OpenAIImageType)}"
288
+ )
289
+
290
+ image_type: str = image.format.lower()
291
+ if image_type not in OpenAIImageType:
292
+ raise ValueError(
293
+ f"Image type {image.format} "
294
+ f"is not supported by OpenAI vision model"
295
+ )
296
+ with io.BytesIO() as buffer:
297
+ image.save(fp=buffer, format=image.format)
298
+ encoded_image = base64.b64encode(buffer.getvalue()).decode(
299
+ "utf-8"
300
+ )
301
+ image_prefix = f"data:image/{image_type};base64,"
302
+ hybird_content.append(
276
303
  {
277
304
  "type": "image_url",
278
305
  "image_url": {
279
306
  "url": f"{image_prefix}{encoded_image}",
280
307
  "detail": self.image_detail,
281
308
  },
309
+ }
310
+ )
311
+
312
+ if self.video_bytes:
313
+ import imageio.v3 as iio
314
+
315
+ base64Frames: List[str] = []
316
+ frame_count = 0
317
+ # read video bytes
318
+ video = iio.imiter(
319
+ self.video_bytes, plugin=Constants.VIDEO_DEFAULT_PLUG_PYAV
320
+ )
321
+
322
+ for frame in video:
323
+ frame_count += 1
324
+ if (
325
+ frame_count % Constants.VIDEO_IMAGE_EXTRACTION_INTERVAL
326
+ == 0
327
+ ):
328
+ # convert frame to numpy array
329
+ frame_array = np.asarray(frame)
330
+ frame_image = Image.fromarray(frame_array)
331
+
332
+ # Get the dimensions of the frame
333
+ width, height = frame_image.size
334
+
335
+ # resize the frame to the default image size
336
+ new_width = Constants.VIDEO_DEFAULT_IMAGE_SIZE
337
+ aspect_ratio = width / height
338
+ new_height = int(new_width / aspect_ratio)
339
+ resized_img = frame_image.resize((new_width, new_height))
340
+
341
+ # encode the image to base64
342
+ with io.BytesIO() as buffer:
343
+ image_format = OpenAIImageType.JPEG.value
344
+ image_format = image_format.upper()
345
+ resized_img.save(fp=buffer, format=image_format)
346
+ encoded_image = base64.b64encode(
347
+ buffer.getvalue()
348
+ ).decode("utf-8")
349
+
350
+ base64Frames.append(encoded_image)
351
+
352
+ for encoded_image in base64Frames:
353
+ item = {
354
+ "type": "image_url",
355
+ "image_url": {
356
+ "url": f"data:image/jpeg;base64,{encoded_image}",
357
+ "detail": self.video_detail,
282
358
  },
283
- ],
359
+ }
360
+
361
+ hybird_content.append(item)
362
+
363
+ if len(hybird_content) > 1:
364
+ return {
365
+ "role": "user",
366
+ "content": hybird_content,
367
+ }
368
+ # This return just for str message
369
+ else:
370
+ return {
371
+ "role": "user",
372
+ "content": self.content,
284
373
  }
285
374
 
286
375
  def to_openai_assistant_message(self) -> OpenAIAssistantMessage:
camel/models/__init__.py CHANGED
@@ -13,18 +13,26 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from .anthropic_model import AnthropicModel
15
15
  from .base_model import BaseModelBackend
16
+ from .litellm_model import LiteLLMModel
16
17
  from .model_factory import ModelFactory
18
+ from .nemotron_model import NemotronModel
19
+ from .ollama_model import OllamaModel
17
20
  from .open_source_model import OpenSourceModel
18
21
  from .openai_audio_models import OpenAIAudioModels
19
22
  from .openai_model import OpenAIModel
20
23
  from .stub_model import StubModel
24
+ from .zhipuai_model import ZhipuAIModel
21
25
 
22
26
  __all__ = [
23
27
  'BaseModelBackend',
24
28
  'OpenAIModel',
25
29
  'AnthropicModel',
26
30
  'StubModel',
31
+ 'ZhipuAIModel',
27
32
  'OpenSourceModel',
28
33
  'ModelFactory',
34
+ 'LiteLLMModel',
29
35
  'OpenAIAudioModels',
36
+ 'NemotronModel',
37
+ 'OllamaModel',
30
38
  ]
@@ -12,15 +12,19 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  import os
15
- from typing import Any, Dict, Optional
15
+ from typing import Any, Dict, List, Optional
16
16
 
17
- from anthropic import Anthropic
18
- from anthropic._types import NOT_GIVEN
17
+ from anthropic import NOT_GIVEN, Anthropic
19
18
 
20
19
  from camel.configs import ANTHROPIC_API_PARAMS
20
+ from camel.messages import OpenAIMessage
21
21
  from camel.models.base_model import BaseModelBackend
22
22
  from camel.types import ChatCompletion, ModelType
23
- from camel.utils import AnthropicTokenCounter, BaseTokenCounter
23
+ from camel.utils import (
24
+ AnthropicTokenCounter,
25
+ BaseTokenCounter,
26
+ model_api_key_required,
27
+ )
24
28
 
25
29
 
26
30
  class AnthropicModel(BaseModelBackend):
@@ -31,20 +35,22 @@ class AnthropicModel(BaseModelBackend):
31
35
  model_type: ModelType,
32
36
  model_config_dict: Dict[str, Any],
33
37
  api_key: Optional[str] = None,
38
+ url: Optional[str] = None,
34
39
  ) -> None:
35
40
  r"""Constructor for Anthropic backend.
36
41
 
37
42
  Args:
38
43
  model_type (ModelType): Model for which a backend is created,
39
- one of GPT_* series.
44
+ one of CLAUDE_* series.
40
45
  model_config_dict (Dict[str, Any]): A dictionary that will
41
- be fed into openai.ChatCompletion.create().
46
+ be fed into Anthropic.messages.create().
42
47
  api_key (Optional[str]): The API key for authenticating with the
43
48
  Anthropic service. (default: :obj:`None`)
49
+ url (Optional[str]): The url to the model service.
44
50
  """
45
- super().__init__(model_type, model_config_dict)
51
+ super().__init__(model_type, model_config_dict, api_key, url)
46
52
  self._api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
47
- self.client = Anthropic(api_key=self._api_key)
53
+ self.client = Anthropic(api_key=self._api_key, base_url=url)
48
54
  self._token_counter: Optional[BaseTokenCounter] = None
49
55
 
50
56
  def _convert_response_from_anthropic_to_openai(self, response):
@@ -90,28 +96,29 @@ class AnthropicModel(BaseModelBackend):
90
96
  """
91
97
  return self.client.count_tokens(prompt)
92
98
 
99
+ @model_api_key_required
93
100
  def run(
94
101
  self,
95
- messages,
102
+ messages: List[OpenAIMessage],
96
103
  ):
97
104
  r"""Run inference of Anthropic chat completion.
98
105
 
99
106
  Args:
100
- messages (List[Dict]): Message list with the chat history
107
+ messages (List[OpenAIMessage]): Message list with the chat history
101
108
  in OpenAI API format.
102
109
 
103
110
  Returns:
104
- Dict[str, Any]: Response in the OpenAI API format.
111
+ ChatCompletion: Response in the OpenAI API format.
105
112
  """
106
113
 
107
114
  if messages[0]["role"] == "system":
108
- sys_msg = messages.pop(0)["content"]
115
+ sys_msg = str(messages.pop(0)["content"])
109
116
  else:
110
- sys_msg = NOT_GIVEN
117
+ sys_msg = NOT_GIVEN # type: ignore[assignment]
111
118
  response = self.client.messages.create(
112
119
  model=self.model_type.value,
113
120
  system=sys_msg,
114
- messages=messages,
121
+ messages=messages, # type: ignore[arg-type]
115
122
  **self.model_config_dict,
116
123
  )
117
124
 
@@ -138,8 +145,9 @@ class AnthropicModel(BaseModelBackend):
138
145
 
139
146
  @property
140
147
  def stream(self) -> bool:
141
- r"""Returns whether the model is in stream mode,
142
- which sends partial results each time.
148
+ r"""Returns whether the model is in stream mode, which sends partial
149
+ results each time.
150
+
143
151
  Returns:
144
152
  bool: Whether the model is in stream mode.
145
153
  """
@@ -31,6 +31,7 @@ class BaseModelBackend(ABC):
31
31
  model_type: ModelType,
32
32
  model_config_dict: Dict[str, Any],
33
33
  api_key: Optional[str] = None,
34
+ url: Optional[str] = None,
34
35
  ) -> None:
35
36
  r"""Constructor for the model backend.
36
37
 
@@ -38,12 +39,14 @@ class BaseModelBackend(ABC):
38
39
  model_type (ModelType): Model for which a backend is created.
39
40
  model_config_dict (Dict[str, Any]): A config dictionary.
40
41
  api_key (Optional[str]): The API key for authenticating with the
41
- LLM service.
42
+ model service.
43
+ url (Optional[str]): The url to the model service.
42
44
  """
43
45
  self.model_type = model_type
44
46
 
45
47
  self.model_config_dict = model_config_dict
46
48
  self._api_key = api_key
49
+ self._url = url
47
50
  self.check_model_config()
48
51
 
49
52
  @property
@@ -102,6 +105,7 @@ class BaseModelBackend(ABC):
102
105
  @property
103
106
  def token_limit(self) -> int:
104
107
  r"""Returns the maximum token limit for a given model.
108
+
105
109
  Returns:
106
110
  int: The maximum token limit for the given model.
107
111
  """
@@ -111,6 +115,7 @@ class BaseModelBackend(ABC):
111
115
  def stream(self) -> bool:
112
116
  r"""Returns whether the model is in stream mode,
113
117
  which sends partial results each time.
118
+
114
119
  Returns:
115
120
  bool: Whether the model is in stream mode.
116
121
  """
@@ -0,0 +1,112 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
15
+
16
+ from camel.configs import LITELLM_API_PARAMS
17
+ from camel.messages import OpenAIMessage
18
+ from camel.utils import LiteLLMTokenCounter
19
+
20
+ if TYPE_CHECKING:
21
+ from litellm.utils import CustomStreamWrapper, ModelResponse
22
+
23
+
24
+ class LiteLLMModel:
25
+ r"""Constructor for LiteLLM backend with OpenAI compatibility."""
26
+
27
+ # NOTE: Currently "stream": True is not supported with LiteLLM due to the
28
+ # limitation of the current camel design.
29
+
30
+ def __init__(
31
+ self, model_type: str, model_config_dict: Dict[str, Any]
32
+ ) -> None:
33
+ r"""Constructor for LiteLLM backend.
34
+
35
+ Args:
36
+ model_type (str): Model for which a backend is created,
37
+ such as GPT-3.5-turbo, Claude-2, etc.
38
+ model_config_dict (Dict[str, Any]): A dictionary of parameters for
39
+ the model configuration.
40
+ """
41
+ self.model_type = model_type
42
+ self.model_config_dict = model_config_dict
43
+ self._client = None
44
+ self._token_counter: Optional[LiteLLMTokenCounter] = None
45
+ self.check_model_config()
46
+
47
+ @property
48
+ def client(self):
49
+ if self._client is None:
50
+ from litellm import completion
51
+
52
+ self._client = completion
53
+ return self._client
54
+
55
+ @property
56
+ def token_counter(self) -> LiteLLMTokenCounter:
57
+ r"""Initialize the token counter for the model backend.
58
+
59
+ Returns:
60
+ LiteLLMTokenCounter: The token counter following the model's
61
+ tokenization style.
62
+ """
63
+ if not self._token_counter:
64
+ self._token_counter = LiteLLMTokenCounter(self.model_type)
65
+ return self._token_counter
66
+
67
+ def run(
68
+ self,
69
+ messages: List[OpenAIMessage],
70
+ ) -> Union['ModelResponse', 'CustomStreamWrapper']:
71
+ r"""Runs inference of LiteLLM chat completion.
72
+
73
+ Args:
74
+ messages (List[OpenAIMessage]): Message list with the chat history
75
+ in OpenAI format.
76
+
77
+ Returns:
78
+ Union[ModelResponse, CustomStreamWrapper]:
79
+ `ModelResponse` in the non-stream mode, or
80
+ `CustomStreamWrapper` in the stream mode.
81
+ """
82
+ response = self.client(
83
+ model=self.model_type,
84
+ messages=messages,
85
+ **self.model_config_dict,
86
+ )
87
+ return response
88
+
89
+ def check_model_config(self):
90
+ r"""Check whether the model configuration contains any unexpected
91
+ arguments to LiteLLM API.
92
+
93
+ Raises:
94
+ ValueError: If the model configuration dictionary contains any
95
+ unexpected arguments.
96
+ """
97
+ for param in self.model_config_dict:
98
+ if param not in LITELLM_API_PARAMS:
99
+ raise ValueError(
100
+ f"Unexpected argument `{param}` is "
101
+ "input into LiteLLM model backend."
102
+ )
103
+
104
+ @property
105
+ def stream(self) -> bool:
106
+ r"""Returns whether the model is in stream mode, which sends partial
107
+ results each time.
108
+
109
+ Returns:
110
+ bool: Whether the model is in stream mode.
111
+ """
112
+ return self.model_config_dict.get('stream', False)