camel-ai 0.1.5__py3-none-any.whl → 0.1.5.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/agents/__init__.py +2 -0
- camel/agents/chat_agent.py +217 -36
- camel/agents/deductive_reasoner_agent.py +86 -31
- camel/agents/knowledge_graph_agent.py +41 -18
- camel/agents/role_assignment_agent.py +4 -1
- camel/agents/search_agent.py +122 -0
- camel/bots/__init__.py +20 -0
- camel/bots/discord_bot.py +103 -0
- camel/bots/telegram_bot.py +84 -0
- camel/configs/__init__.py +3 -0
- camel/configs/anthropic_config.py +1 -1
- camel/configs/litellm_config.py +113 -0
- camel/embeddings/__init__.py +2 -0
- camel/embeddings/openai_embedding.py +2 -2
- camel/embeddings/sentence_transformers_embeddings.py +6 -5
- camel/embeddings/vlm_embedding.py +146 -0
- camel/functions/__init__.py +9 -0
- camel/functions/open_api_function.py +150 -29
- camel/functions/open_api_specs/biztoc/__init__.py +13 -0
- camel/functions/open_api_specs/biztoc/ai-plugin.json +34 -0
- camel/functions/open_api_specs/biztoc/openapi.yaml +21 -0
- camel/functions/open_api_specs/create_qr_code/__init__.py +13 -0
- camel/functions/open_api_specs/create_qr_code/openapi.yaml +44 -0
- camel/functions/open_api_specs/nasa_apod/__init__.py +13 -0
- camel/functions/open_api_specs/nasa_apod/openapi.yaml +72 -0
- camel/functions/open_api_specs/outschool/__init__.py +13 -0
- camel/functions/open_api_specs/outschool/ai-plugin.json +34 -0
- camel/functions/open_api_specs/outschool/openapi.yaml +1 -0
- camel/functions/open_api_specs/outschool/paths/__init__.py +14 -0
- camel/functions/open_api_specs/outschool/paths/get_classes.py +29 -0
- camel/functions/open_api_specs/outschool/paths/search_teachers.py +29 -0
- camel/functions/open_api_specs/security_config.py +21 -0
- camel/functions/open_api_specs/web_scraper/__init__.py +13 -0
- camel/functions/open_api_specs/web_scraper/ai-plugin.json +34 -0
- camel/functions/open_api_specs/web_scraper/openapi.yaml +71 -0
- camel/functions/open_api_specs/web_scraper/paths/__init__.py +13 -0
- camel/functions/open_api_specs/web_scraper/paths/scraper.py +29 -0
- camel/functions/openai_function.py +3 -1
- camel/functions/search_functions.py +104 -171
- camel/functions/slack_functions.py +2 -1
- camel/human.py +3 -1
- camel/loaders/base_io.py +3 -1
- camel/loaders/unstructured_io.py +16 -22
- camel/messages/base.py +135 -46
- camel/models/__init__.py +4 -0
- camel/models/anthropic_model.py +20 -14
- camel/models/base_model.py +2 -0
- camel/models/litellm_model.py +112 -0
- camel/models/model_factory.py +8 -1
- camel/models/open_source_model.py +1 -0
- camel/models/openai_model.py +6 -2
- camel/models/zhipuai_model.py +125 -0
- camel/prompts/__init__.py +2 -0
- camel/prompts/base.py +2 -1
- camel/prompts/descripte_video_prompt.py +33 -0
- camel/prompts/task_prompt_template.py +9 -3
- camel/retrievers/auto_retriever.py +20 -11
- camel/retrievers/base.py +4 -2
- camel/retrievers/bm25_retriever.py +2 -1
- camel/retrievers/cohere_rerank_retriever.py +2 -1
- camel/retrievers/vector_retriever.py +10 -4
- camel/societies/babyagi_playing.py +2 -1
- camel/societies/role_playing.py +2 -1
- camel/storages/graph_storages/base.py +1 -0
- camel/storages/graph_storages/neo4j_graph.py +5 -3
- camel/storages/vectordb_storages/base.py +2 -1
- camel/storages/vectordb_storages/milvus.py +5 -2
- camel/toolkits/github_toolkit.py +120 -26
- camel/types/__init__.py +3 -2
- camel/types/enums.py +25 -1
- camel/utils/__init__.py +11 -2
- camel/utils/commons.py +74 -4
- camel/utils/constants.py +26 -0
- camel/utils/token_counting.py +58 -5
- {camel_ai-0.1.5.dist-info → camel_ai-0.1.5.2.dist-info}/METADATA +29 -13
- camel_ai-0.1.5.2.dist-info/RECORD +148 -0
- camel_ai-0.1.5.dist-info/RECORD +0 -119
- {camel_ai-0.1.5.dist-info → camel_ai-0.1.5.2.dist-info}/WHEEL +0 -0
camel/messages/base.py
CHANGED
|
@@ -16,6 +16,7 @@ import io
|
|
|
16
16
|
from dataclasses import dataclass
|
|
17
17
|
from typing import Any, Dict, List, Literal, Optional, Tuple, Union
|
|
18
18
|
|
|
19
|
+
import numpy as np
|
|
19
20
|
from PIL import Image
|
|
20
21
|
|
|
21
22
|
from camel.messages import (
|
|
@@ -27,10 +28,11 @@ from camel.messages import (
|
|
|
27
28
|
from camel.prompts import CodePrompt, TextPrompt
|
|
28
29
|
from camel.types import (
|
|
29
30
|
OpenAIBackendRole,
|
|
30
|
-
OpenAIImageDetailType,
|
|
31
31
|
OpenAIImageType,
|
|
32
|
+
OpenAIVisionDetailType,
|
|
32
33
|
RoleType,
|
|
33
34
|
)
|
|
35
|
+
from camel.utils import Constants
|
|
34
36
|
|
|
35
37
|
|
|
36
38
|
@dataclass
|
|
@@ -39,19 +41,29 @@ class BaseMessage:
|
|
|
39
41
|
|
|
40
42
|
Args:
|
|
41
43
|
role_name (str): The name of the user or assistant role.
|
|
42
|
-
role_type (RoleType): The type of role, either
|
|
43
|
-
|
|
44
|
+
role_type (RoleType): The type of role, either :obj:`RoleType.
|
|
45
|
+
ASSISTANT` or :obj:`RoleType.USER`.
|
|
44
46
|
meta_dict (Optional[Dict[str, str]]): Additional metadata dictionary
|
|
45
47
|
for the message.
|
|
46
48
|
content (str): The content of the message.
|
|
49
|
+
video_bytes (Optional[bytes]): Optional bytes of a video associated
|
|
50
|
+
with the message. Default is None.
|
|
51
|
+
image_list (Optional[List[Image.Image]]): Optional list of PIL Image
|
|
52
|
+
objects associated with the message. Default is None.
|
|
53
|
+
image_detail (Literal["auto", "low", "high"]): Detail level of the
|
|
54
|
+
images associated with the message. Default is "auto".
|
|
55
|
+
video_detail (Literal["auto", "low", "high"]): Detail level of the
|
|
56
|
+
videos associated with the message. Default is "low".
|
|
47
57
|
"""
|
|
48
58
|
|
|
49
59
|
role_name: str
|
|
50
60
|
role_type: RoleType
|
|
51
61
|
meta_dict: Optional[Dict[str, str]]
|
|
52
62
|
content: str
|
|
53
|
-
|
|
63
|
+
video_bytes: Optional[bytes] = None
|
|
64
|
+
image_list: Optional[List[Image.Image]] = None
|
|
54
65
|
image_detail: Literal["auto", "low", "high"] = "auto"
|
|
66
|
+
video_detail: Literal["auto", "low", "high"] = "low"
|
|
55
67
|
|
|
56
68
|
@classmethod
|
|
57
69
|
def make_user_message(
|
|
@@ -59,16 +71,24 @@ class BaseMessage:
|
|
|
59
71
|
role_name: str,
|
|
60
72
|
content: str,
|
|
61
73
|
meta_dict: Optional[Dict[str, str]] = None,
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
74
|
+
video_bytes: Optional[bytes] = None,
|
|
75
|
+
image_list: Optional[List[Image.Image]] = None,
|
|
76
|
+
image_detail: Union[
|
|
77
|
+
OpenAIVisionDetailType, str
|
|
78
|
+
] = OpenAIVisionDetailType.AUTO,
|
|
79
|
+
video_detail: Union[
|
|
80
|
+
OpenAIVisionDetailType, str
|
|
81
|
+
] = OpenAIVisionDetailType.LOW,
|
|
82
|
+
) -> "BaseMessage":
|
|
65
83
|
return cls(
|
|
66
84
|
role_name,
|
|
67
85
|
RoleType.USER,
|
|
68
86
|
meta_dict,
|
|
69
87
|
content,
|
|
70
|
-
|
|
71
|
-
|
|
88
|
+
video_bytes,
|
|
89
|
+
image_list,
|
|
90
|
+
OpenAIVisionDetailType(image_detail).value,
|
|
91
|
+
OpenAIVisionDetailType(video_detail).value,
|
|
72
92
|
)
|
|
73
93
|
|
|
74
94
|
@classmethod
|
|
@@ -77,16 +97,24 @@ class BaseMessage:
|
|
|
77
97
|
role_name: str,
|
|
78
98
|
content: str,
|
|
79
99
|
meta_dict: Optional[Dict[str, str]] = None,
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
100
|
+
video_bytes: Optional[bytes] = None,
|
|
101
|
+
image_list: Optional[List[Image.Image]] = None,
|
|
102
|
+
image_detail: Union[
|
|
103
|
+
OpenAIVisionDetailType, str
|
|
104
|
+
] = OpenAIVisionDetailType.AUTO,
|
|
105
|
+
video_detail: Union[
|
|
106
|
+
OpenAIVisionDetailType, str
|
|
107
|
+
] = OpenAIVisionDetailType.LOW,
|
|
108
|
+
) -> "BaseMessage":
|
|
83
109
|
return cls(
|
|
84
110
|
role_name,
|
|
85
111
|
RoleType.ASSISTANT,
|
|
86
112
|
meta_dict,
|
|
87
113
|
content,
|
|
88
|
-
|
|
89
|
-
|
|
114
|
+
video_bytes,
|
|
115
|
+
image_list,
|
|
116
|
+
OpenAIVisionDetailType(image_detail).value,
|
|
117
|
+
OpenAIVisionDetailType(video_detail).value,
|
|
90
118
|
)
|
|
91
119
|
|
|
92
120
|
def create_new_instance(self, content: str) -> "BaseMessage":
|
|
@@ -241,46 +269,107 @@ class BaseMessage:
|
|
|
241
269
|
Returns:
|
|
242
270
|
OpenAIUserMessage: The converted :obj:`OpenAIUserMessage` object.
|
|
243
271
|
"""
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
f"transform the `PIL.Image.Image` to one of "
|
|
252
|
-
f"following supported formats, such as "
|
|
253
|
-
f"{list(OpenAIImageType)}"
|
|
254
|
-
)
|
|
255
|
-
|
|
256
|
-
image_type: str = self.image.format.lower()
|
|
257
|
-
if image_type not in OpenAIImageType:
|
|
258
|
-
raise ValueError(
|
|
259
|
-
f"Image type {self.image.format} "
|
|
260
|
-
f"is not supported by OpenAI vision model"
|
|
261
|
-
)
|
|
262
|
-
with io.BytesIO() as buffer:
|
|
263
|
-
self.image.save(fp=buffer, format=self.image.format)
|
|
264
|
-
encoded_image = base64.b64encode(buffer.getvalue()).decode(
|
|
265
|
-
"utf-8"
|
|
266
|
-
)
|
|
267
|
-
image_prefix = f"data:image/{image_type};base64,"
|
|
272
|
+
hybird_content: List[Any] = []
|
|
273
|
+
hybird_content.append(
|
|
274
|
+
{
|
|
275
|
+
"type": "text",
|
|
276
|
+
"text": self.content,
|
|
277
|
+
}
|
|
278
|
+
)
|
|
268
279
|
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
"
|
|
274
|
-
"
|
|
275
|
-
|
|
280
|
+
if self.image_list and len(self.image_list) > 0:
|
|
281
|
+
for image in self.image_list:
|
|
282
|
+
if image.format is None:
|
|
283
|
+
raise ValueError(
|
|
284
|
+
f"Image's `format` is `None`, please "
|
|
285
|
+
f"transform the `PIL.Image.Image` to one of "
|
|
286
|
+
f"following supported formats, such as "
|
|
287
|
+
f"{list(OpenAIImageType)}"
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
image_type: str = image.format.lower()
|
|
291
|
+
if image_type not in OpenAIImageType:
|
|
292
|
+
raise ValueError(
|
|
293
|
+
f"Image type {image.format} "
|
|
294
|
+
f"is not supported by OpenAI vision model"
|
|
295
|
+
)
|
|
296
|
+
with io.BytesIO() as buffer:
|
|
297
|
+
image.save(fp=buffer, format=image.format)
|
|
298
|
+
encoded_image = base64.b64encode(buffer.getvalue()).decode(
|
|
299
|
+
"utf-8"
|
|
300
|
+
)
|
|
301
|
+
image_prefix = f"data:image/{image_type};base64,"
|
|
302
|
+
hybird_content.append(
|
|
276
303
|
{
|
|
277
304
|
"type": "image_url",
|
|
278
305
|
"image_url": {
|
|
279
306
|
"url": f"{image_prefix}{encoded_image}",
|
|
280
307
|
"detail": self.image_detail,
|
|
281
308
|
},
|
|
309
|
+
}
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
if self.video_bytes:
|
|
313
|
+
import imageio.v3 as iio
|
|
314
|
+
|
|
315
|
+
base64Frames: List[str] = []
|
|
316
|
+
frame_count = 0
|
|
317
|
+
# read video bytes
|
|
318
|
+
video = iio.imiter(
|
|
319
|
+
self.video_bytes, plugin=Constants.VIDEO_DEFAULT_PLUG_PYAV
|
|
320
|
+
)
|
|
321
|
+
|
|
322
|
+
for frame in video:
|
|
323
|
+
frame_count += 1
|
|
324
|
+
if (
|
|
325
|
+
frame_count % Constants.VIDEO_IMAGE_EXTRACTION_INTERVAL
|
|
326
|
+
== 0
|
|
327
|
+
):
|
|
328
|
+
# convert frame to numpy array
|
|
329
|
+
frame_array = np.asarray(frame)
|
|
330
|
+
frame_image = Image.fromarray(frame_array)
|
|
331
|
+
|
|
332
|
+
# Get the dimensions of the frame
|
|
333
|
+
width, height = frame_image.size
|
|
334
|
+
|
|
335
|
+
# resize the frame to the default image size
|
|
336
|
+
new_width = Constants.VIDEO_DEFAULT_IMAGE_SIZE
|
|
337
|
+
aspect_ratio = width / height
|
|
338
|
+
new_height = int(new_width / aspect_ratio)
|
|
339
|
+
resized_img = frame_image.resize((new_width, new_height))
|
|
340
|
+
|
|
341
|
+
# encode the image to base64
|
|
342
|
+
with io.BytesIO() as buffer:
|
|
343
|
+
image_format = OpenAIImageType.JPEG.value
|
|
344
|
+
image_format = image_format.upper()
|
|
345
|
+
resized_img.save(fp=buffer, format=image_format)
|
|
346
|
+
encoded_image = base64.b64encode(
|
|
347
|
+
buffer.getvalue()
|
|
348
|
+
).decode("utf-8")
|
|
349
|
+
|
|
350
|
+
base64Frames.append(encoded_image)
|
|
351
|
+
|
|
352
|
+
for encoded_image in base64Frames:
|
|
353
|
+
item = {
|
|
354
|
+
"type": "image_url",
|
|
355
|
+
"image_url": {
|
|
356
|
+
"url": f"data:image/jpeg;base64,{encoded_image}",
|
|
357
|
+
"detail": self.video_detail,
|
|
282
358
|
},
|
|
283
|
-
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
hybird_content.append(item)
|
|
362
|
+
|
|
363
|
+
if len(hybird_content) > 1:
|
|
364
|
+
return {
|
|
365
|
+
"role": "user",
|
|
366
|
+
"content": hybird_content,
|
|
367
|
+
}
|
|
368
|
+
# This return just for str message
|
|
369
|
+
else:
|
|
370
|
+
return {
|
|
371
|
+
"role": "user",
|
|
372
|
+
"content": self.content,
|
|
284
373
|
}
|
|
285
374
|
|
|
286
375
|
def to_openai_assistant_message(self) -> OpenAIAssistantMessage:
|
camel/models/__init__.py
CHANGED
|
@@ -13,18 +13,22 @@
|
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
from .anthropic_model import AnthropicModel
|
|
15
15
|
from .base_model import BaseModelBackend
|
|
16
|
+
from .litellm_model import LiteLLMModel
|
|
16
17
|
from .model_factory import ModelFactory
|
|
17
18
|
from .open_source_model import OpenSourceModel
|
|
18
19
|
from .openai_audio_models import OpenAIAudioModels
|
|
19
20
|
from .openai_model import OpenAIModel
|
|
20
21
|
from .stub_model import StubModel
|
|
22
|
+
from .zhipuai_model import ZhipuAIModel
|
|
21
23
|
|
|
22
24
|
__all__ = [
|
|
23
25
|
'BaseModelBackend',
|
|
24
26
|
'OpenAIModel',
|
|
25
27
|
'AnthropicModel',
|
|
26
28
|
'StubModel',
|
|
29
|
+
'ZhipuAIModel',
|
|
27
30
|
'OpenSourceModel',
|
|
28
31
|
'ModelFactory',
|
|
32
|
+
'LiteLLMModel',
|
|
29
33
|
'OpenAIAudioModels',
|
|
30
34
|
]
|
camel/models/anthropic_model.py
CHANGED
|
@@ -12,15 +12,19 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
import os
|
|
15
|
-
from typing import Any, Dict, Optional
|
|
15
|
+
from typing import Any, Dict, List, Optional
|
|
16
16
|
|
|
17
|
-
from anthropic import Anthropic
|
|
18
|
-
from anthropic._types import NOT_GIVEN
|
|
17
|
+
from anthropic import NOT_GIVEN, Anthropic
|
|
19
18
|
|
|
20
19
|
from camel.configs import ANTHROPIC_API_PARAMS
|
|
20
|
+
from camel.messages import OpenAIMessage
|
|
21
21
|
from camel.models.base_model import BaseModelBackend
|
|
22
22
|
from camel.types import ChatCompletion, ModelType
|
|
23
|
-
from camel.utils import
|
|
23
|
+
from camel.utils import (
|
|
24
|
+
AnthropicTokenCounter,
|
|
25
|
+
BaseTokenCounter,
|
|
26
|
+
model_api_key_required,
|
|
27
|
+
)
|
|
24
28
|
|
|
25
29
|
|
|
26
30
|
class AnthropicModel(BaseModelBackend):
|
|
@@ -36,9 +40,9 @@ class AnthropicModel(BaseModelBackend):
|
|
|
36
40
|
|
|
37
41
|
Args:
|
|
38
42
|
model_type (ModelType): Model for which a backend is created,
|
|
39
|
-
one of
|
|
43
|
+
one of CLAUDE_* series.
|
|
40
44
|
model_config_dict (Dict[str, Any]): A dictionary that will
|
|
41
|
-
be fed into
|
|
45
|
+
be fed into Anthropic.messages.create().
|
|
42
46
|
api_key (Optional[str]): The API key for authenticating with the
|
|
43
47
|
Anthropic service. (default: :obj:`None`)
|
|
44
48
|
"""
|
|
@@ -90,28 +94,29 @@ class AnthropicModel(BaseModelBackend):
|
|
|
90
94
|
"""
|
|
91
95
|
return self.client.count_tokens(prompt)
|
|
92
96
|
|
|
97
|
+
@model_api_key_required
|
|
93
98
|
def run(
|
|
94
99
|
self,
|
|
95
|
-
messages,
|
|
100
|
+
messages: List[OpenAIMessage],
|
|
96
101
|
):
|
|
97
102
|
r"""Run inference of Anthropic chat completion.
|
|
98
103
|
|
|
99
104
|
Args:
|
|
100
|
-
messages (List[
|
|
105
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
101
106
|
in OpenAI API format.
|
|
102
107
|
|
|
103
108
|
Returns:
|
|
104
|
-
|
|
109
|
+
ChatCompletion: Response in the OpenAI API format.
|
|
105
110
|
"""
|
|
106
111
|
|
|
107
112
|
if messages[0]["role"] == "system":
|
|
108
|
-
sys_msg = messages.pop(0)["content"]
|
|
113
|
+
sys_msg = str(messages.pop(0)["content"])
|
|
109
114
|
else:
|
|
110
|
-
sys_msg = NOT_GIVEN
|
|
115
|
+
sys_msg = NOT_GIVEN # type: ignore[assignment]
|
|
111
116
|
response = self.client.messages.create(
|
|
112
117
|
model=self.model_type.value,
|
|
113
118
|
system=sys_msg,
|
|
114
|
-
messages=messages,
|
|
119
|
+
messages=messages, # type: ignore[arg-type]
|
|
115
120
|
**self.model_config_dict,
|
|
116
121
|
)
|
|
117
122
|
|
|
@@ -138,8 +143,9 @@ class AnthropicModel(BaseModelBackend):
|
|
|
138
143
|
|
|
139
144
|
@property
|
|
140
145
|
def stream(self) -> bool:
|
|
141
|
-
r"""Returns whether the model is in stream mode,
|
|
142
|
-
|
|
146
|
+
r"""Returns whether the model is in stream mode, which sends partial
|
|
147
|
+
results each time.
|
|
148
|
+
|
|
143
149
|
Returns:
|
|
144
150
|
bool: Whether the model is in stream mode.
|
|
145
151
|
"""
|
camel/models/base_model.py
CHANGED
|
@@ -102,6 +102,7 @@ class BaseModelBackend(ABC):
|
|
|
102
102
|
@property
|
|
103
103
|
def token_limit(self) -> int:
|
|
104
104
|
r"""Returns the maximum token limit for a given model.
|
|
105
|
+
|
|
105
106
|
Returns:
|
|
106
107
|
int: The maximum token limit for the given model.
|
|
107
108
|
"""
|
|
@@ -111,6 +112,7 @@ class BaseModelBackend(ABC):
|
|
|
111
112
|
def stream(self) -> bool:
|
|
112
113
|
r"""Returns whether the model is in stream mode,
|
|
113
114
|
which sends partial results each time.
|
|
115
|
+
|
|
114
116
|
Returns:
|
|
115
117
|
bool: Whether the model is in stream mode.
|
|
116
118
|
"""
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
|
|
15
|
+
|
|
16
|
+
from camel.configs import LITELLM_API_PARAMS
|
|
17
|
+
from camel.messages import OpenAIMessage
|
|
18
|
+
from camel.utils import LiteLLMTokenCounter
|
|
19
|
+
|
|
20
|
+
if TYPE_CHECKING:
|
|
21
|
+
from litellm.utils import CustomStreamWrapper, ModelResponse
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class LiteLLMModel:
|
|
25
|
+
r"""Constructor for LiteLLM backend with OpenAI compatibility."""
|
|
26
|
+
|
|
27
|
+
# NOTE: Currently "stream": True is not supported with LiteLLM due to the
|
|
28
|
+
# limitation of the current camel design.
|
|
29
|
+
|
|
30
|
+
def __init__(
|
|
31
|
+
self, model_type: str, model_config_dict: Dict[str, Any]
|
|
32
|
+
) -> None:
|
|
33
|
+
r"""Constructor for LiteLLM backend.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
model_type (str): Model for which a backend is created,
|
|
37
|
+
such as GPT-3.5-turbo, Claude-2, etc.
|
|
38
|
+
model_config_dict (Dict[str, Any]): A dictionary of parameters for
|
|
39
|
+
the model configuration.
|
|
40
|
+
"""
|
|
41
|
+
self.model_type = model_type
|
|
42
|
+
self.model_config_dict = model_config_dict
|
|
43
|
+
self._client = None
|
|
44
|
+
self._token_counter: Optional[LiteLLMTokenCounter] = None
|
|
45
|
+
self.check_model_config()
|
|
46
|
+
|
|
47
|
+
@property
|
|
48
|
+
def client(self):
|
|
49
|
+
if self._client is None:
|
|
50
|
+
from litellm import completion
|
|
51
|
+
|
|
52
|
+
self._client = completion
|
|
53
|
+
return self._client
|
|
54
|
+
|
|
55
|
+
@property
|
|
56
|
+
def token_counter(self) -> LiteLLMTokenCounter:
|
|
57
|
+
r"""Initialize the token counter for the model backend.
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
LiteLLMTokenCounter: The token counter following the model's
|
|
61
|
+
tokenization style.
|
|
62
|
+
"""
|
|
63
|
+
if not self._token_counter:
|
|
64
|
+
self._token_counter = LiteLLMTokenCounter(self.model_type)
|
|
65
|
+
return self._token_counter
|
|
66
|
+
|
|
67
|
+
def run(
|
|
68
|
+
self,
|
|
69
|
+
messages: List[OpenAIMessage],
|
|
70
|
+
) -> Union['ModelResponse', 'CustomStreamWrapper']:
|
|
71
|
+
r"""Runs inference of LiteLLM chat completion.
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
75
|
+
in OpenAI format.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Union[ModelResponse, CustomStreamWrapper]:
|
|
79
|
+
`ModelResponse` in the non-stream mode, or
|
|
80
|
+
`CustomStreamWrapper` in the stream mode.
|
|
81
|
+
"""
|
|
82
|
+
response = self.client(
|
|
83
|
+
model=self.model_type,
|
|
84
|
+
messages=messages,
|
|
85
|
+
**self.model_config_dict,
|
|
86
|
+
)
|
|
87
|
+
return response
|
|
88
|
+
|
|
89
|
+
def check_model_config(self):
|
|
90
|
+
r"""Check whether the model configuration contains any unexpected
|
|
91
|
+
arguments to LiteLLM API.
|
|
92
|
+
|
|
93
|
+
Raises:
|
|
94
|
+
ValueError: If the model configuration dictionary contains any
|
|
95
|
+
unexpected arguments.
|
|
96
|
+
"""
|
|
97
|
+
for param in self.model_config_dict:
|
|
98
|
+
if param not in LITELLM_API_PARAMS:
|
|
99
|
+
raise ValueError(
|
|
100
|
+
f"Unexpected argument `{param}` is "
|
|
101
|
+
"input into LiteLLM model backend."
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
@property
|
|
105
|
+
def stream(self) -> bool:
|
|
106
|
+
r"""Returns whether the model is in stream mode, which sends partial
|
|
107
|
+
results each time.
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
bool: Whether the model is in stream mode.
|
|
111
|
+
"""
|
|
112
|
+
return self.model_config_dict.get('stream', False)
|
camel/models/model_factory.py
CHANGED
|
@@ -18,6 +18,7 @@ from camel.models.base_model import BaseModelBackend
|
|
|
18
18
|
from camel.models.open_source_model import OpenSourceModel
|
|
19
19
|
from camel.models.openai_model import OpenAIModel
|
|
20
20
|
from camel.models.stub_model import StubModel
|
|
21
|
+
from camel.models.zhipuai_model import ZhipuAIModel
|
|
21
22
|
from camel.types import ModelType
|
|
22
23
|
|
|
23
24
|
|
|
@@ -58,8 +59,14 @@ class ModelFactory:
|
|
|
58
59
|
model_class = OpenSourceModel
|
|
59
60
|
elif model_type.is_anthropic:
|
|
60
61
|
model_class = AnthropicModel
|
|
62
|
+
elif model_type.is_zhipuai:
|
|
63
|
+
model_class = ZhipuAIModel
|
|
61
64
|
else:
|
|
62
65
|
raise ValueError(f"Unknown model type `{model_type}` is input")
|
|
63
66
|
|
|
64
|
-
|
|
67
|
+
if model_type.is_open_source:
|
|
68
|
+
inst = model_class(model_type, model_config_dict)
|
|
69
|
+
else:
|
|
70
|
+
inst = model_class(model_type, model_config_dict, api_key)
|
|
71
|
+
|
|
65
72
|
return inst
|
camel/models/openai_model.py
CHANGED
|
@@ -20,7 +20,11 @@ from camel.configs import OPENAI_API_PARAMS
|
|
|
20
20
|
from camel.messages import OpenAIMessage
|
|
21
21
|
from camel.models import BaseModelBackend
|
|
22
22
|
from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
|
|
23
|
-
from camel.utils import
|
|
23
|
+
from camel.utils import (
|
|
24
|
+
BaseTokenCounter,
|
|
25
|
+
OpenAITokenCounter,
|
|
26
|
+
model_api_key_required,
|
|
27
|
+
)
|
|
24
28
|
|
|
25
29
|
|
|
26
30
|
class OpenAIModel(BaseModelBackend):
|
|
@@ -62,7 +66,7 @@ class OpenAIModel(BaseModelBackend):
|
|
|
62
66
|
self._token_counter = OpenAITokenCounter(self.model_type)
|
|
63
67
|
return self._token_counter
|
|
64
68
|
|
|
65
|
-
@
|
|
69
|
+
@model_api_key_required
|
|
66
70
|
def run(
|
|
67
71
|
self,
|
|
68
72
|
messages: List[OpenAIMessage],
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
from typing import Any, Dict, List, Optional, Union
|
|
17
|
+
|
|
18
|
+
from openai import OpenAI, Stream
|
|
19
|
+
|
|
20
|
+
from camel.configs import OPENAI_API_PARAMS
|
|
21
|
+
from camel.messages import OpenAIMessage
|
|
22
|
+
from camel.models import BaseModelBackend
|
|
23
|
+
from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
|
|
24
|
+
from camel.utils import (
|
|
25
|
+
BaseTokenCounter,
|
|
26
|
+
OpenAITokenCounter,
|
|
27
|
+
model_api_key_required,
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class ZhipuAIModel(BaseModelBackend):
|
|
32
|
+
r"""ZhipuAI API in a unified BaseModelBackend interface."""
|
|
33
|
+
|
|
34
|
+
def __init__(
|
|
35
|
+
self,
|
|
36
|
+
model_type: ModelType,
|
|
37
|
+
model_config_dict: Dict[str, Any],
|
|
38
|
+
api_key: Optional[str] = None,
|
|
39
|
+
url: Optional[str] = None,
|
|
40
|
+
) -> None:
|
|
41
|
+
r"""Constructor for ZhipuAI backend.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
model_type (ModelType): Model for which a backend is created,
|
|
45
|
+
such as GLM_* series.
|
|
46
|
+
model_config_dict (Dict[str, Any]): A dictionary that will
|
|
47
|
+
be fed into openai.ChatCompletion.create().
|
|
48
|
+
api_key (Optional[str]): The API key for authenticating with the
|
|
49
|
+
ZhipuAI service. (default: :obj:`None`)
|
|
50
|
+
"""
|
|
51
|
+
super().__init__(model_type, model_config_dict)
|
|
52
|
+
self._url = url or os.environ.get("ZHIPUAI_API_BASE_URL")
|
|
53
|
+
self._api_key = api_key or os.environ.get("ZHIPUAI_API_KEY")
|
|
54
|
+
self._client = OpenAI(
|
|
55
|
+
timeout=60,
|
|
56
|
+
max_retries=3,
|
|
57
|
+
api_key=self._api_key,
|
|
58
|
+
base_url=self._url,
|
|
59
|
+
)
|
|
60
|
+
self._token_counter: Optional[BaseTokenCounter] = None
|
|
61
|
+
|
|
62
|
+
@model_api_key_required
|
|
63
|
+
def run(
|
|
64
|
+
self,
|
|
65
|
+
messages: List[OpenAIMessage],
|
|
66
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
67
|
+
r"""Runs inference of OpenAI chat completion.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
71
|
+
in OpenAI API format.
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
75
|
+
`ChatCompletion` in the non-stream mode, or
|
|
76
|
+
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
77
|
+
"""
|
|
78
|
+
# Use OpenAI cilent as interface call ZhipuAI
|
|
79
|
+
# Reference: https://open.bigmodel.cn/dev/api#openai_sdk
|
|
80
|
+
response = self._client.chat.completions.create(
|
|
81
|
+
messages=messages,
|
|
82
|
+
model=self.model_type.value,
|
|
83
|
+
**self.model_config_dict,
|
|
84
|
+
)
|
|
85
|
+
return response
|
|
86
|
+
|
|
87
|
+
@property
|
|
88
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
89
|
+
r"""Initialize the token counter for the model backend.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
OpenAITokenCounter: The token counter following the model's
|
|
93
|
+
tokenization style.
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
if not self._token_counter:
|
|
97
|
+
# It's a temporary setting for token counter.
|
|
98
|
+
self._token_counter = OpenAITokenCounter(ModelType.GPT_3_5_TURBO)
|
|
99
|
+
return self._token_counter
|
|
100
|
+
|
|
101
|
+
def check_model_config(self):
|
|
102
|
+
r"""Check whether the model configuration contains any
|
|
103
|
+
unexpected arguments to OpenAI API.
|
|
104
|
+
|
|
105
|
+
Raises:
|
|
106
|
+
ValueError: If the model configuration dictionary contains any
|
|
107
|
+
unexpected arguments to OpenAI API.
|
|
108
|
+
"""
|
|
109
|
+
for param in self.model_config_dict:
|
|
110
|
+
if param not in OPENAI_API_PARAMS:
|
|
111
|
+
raise ValueError(
|
|
112
|
+
f"Unexpected argument `{param}` is "
|
|
113
|
+
"input into OpenAI model backend."
|
|
114
|
+
)
|
|
115
|
+
pass
|
|
116
|
+
|
|
117
|
+
@property
|
|
118
|
+
def stream(self) -> bool:
|
|
119
|
+
r"""Returns whether the model is in stream mode, which sends partial
|
|
120
|
+
results each time.
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
bool: Whether the model is in stream mode.
|
|
124
|
+
"""
|
|
125
|
+
return self.model_config_dict.get('stream', False)
|
camel/prompts/__init__.py
CHANGED
|
@@ -14,6 +14,7 @@
|
|
|
14
14
|
from .ai_society import AISocietyPromptTemplateDict
|
|
15
15
|
from .base import CodePrompt, TextPrompt, TextPromptDict
|
|
16
16
|
from .code import CodePromptTemplateDict
|
|
17
|
+
from .descripte_video_prompt import DescriptionVideoPromptTemplateDict
|
|
17
18
|
from .evaluation import EvaluationPromptTemplateDict
|
|
18
19
|
from .misalignment import MisalignmentPromptTemplateDict
|
|
19
20
|
from .object_recognition import ObjectRecognitionPromptTemplateDict
|
|
@@ -37,4 +38,5 @@ __all__ = [
|
|
|
37
38
|
'PromptTemplateGenerator',
|
|
38
39
|
'SolutionExtractionPromptTemplateDict',
|
|
39
40
|
'ObjectRecognitionPromptTemplateDict',
|
|
41
|
+
'DescriptionVideoPromptTemplateDict',
|
|
40
42
|
]
|