camel-ai 0.2.21__py3-none-any.whl → 0.2.23a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/_types.py +41 -0
- camel/agents/_utils.py +188 -0
- camel/agents/chat_agent.py +556 -965
- camel/agents/knowledge_graph_agent.py +7 -1
- camel/agents/multi_hop_generator_agent.py +1 -1
- camel/configs/base_config.py +10 -13
- camel/configs/deepseek_config.py +4 -30
- camel/configs/gemini_config.py +5 -31
- camel/configs/openai_config.py +14 -32
- camel/configs/qwen_config.py +36 -36
- camel/datagen/self_improving_cot.py +79 -1
- camel/datagen/self_instruct/filter/instruction_filter.py +19 -3
- camel/datagen/self_instruct/self_instruct.py +7 -2
- camel/datasets/__init__.py +28 -0
- camel/datasets/base.py +969 -0
- camel/embeddings/openai_embedding.py +10 -1
- camel/environments/__init__.py +16 -0
- camel/environments/base.py +503 -0
- camel/extractors/__init__.py +16 -0
- camel/extractors/base.py +263 -0
- camel/interpreters/docker/Dockerfile +12 -0
- camel/interpreters/docker_interpreter.py +19 -1
- camel/interpreters/subprocess_interpreter.py +42 -17
- camel/loaders/__init__.py +2 -0
- camel/loaders/mineru_extractor.py +250 -0
- camel/memories/agent_memories.py +16 -1
- camel/memories/blocks/chat_history_block.py +10 -2
- camel/memories/blocks/vectordb_block.py +1 -0
- camel/memories/context_creators/score_based.py +20 -3
- camel/memories/records.py +10 -0
- camel/messages/base.py +8 -8
- camel/models/_utils.py +57 -0
- camel/models/aiml_model.py +48 -17
- camel/models/anthropic_model.py +41 -3
- camel/models/azure_openai_model.py +39 -3
- camel/models/base_model.py +132 -4
- camel/models/cohere_model.py +88 -11
- camel/models/deepseek_model.py +107 -63
- camel/models/gemini_model.py +133 -15
- camel/models/groq_model.py +72 -10
- camel/models/internlm_model.py +14 -3
- camel/models/litellm_model.py +9 -2
- camel/models/mistral_model.py +42 -5
- camel/models/model_manager.py +48 -3
- camel/models/moonshot_model.py +33 -4
- camel/models/nemotron_model.py +32 -3
- camel/models/nvidia_model.py +43 -3
- camel/models/ollama_model.py +139 -17
- camel/models/openai_audio_models.py +7 -1
- camel/models/openai_compatible_model.py +37 -3
- camel/models/openai_model.py +158 -46
- camel/models/qwen_model.py +61 -4
- camel/models/reka_model.py +53 -3
- camel/models/samba_model.py +209 -4
- camel/models/sglang_model.py +153 -14
- camel/models/siliconflow_model.py +16 -3
- camel/models/stub_model.py +46 -4
- camel/models/togetherai_model.py +38 -3
- camel/models/vllm_model.py +37 -3
- camel/models/yi_model.py +36 -3
- camel/models/zhipuai_model.py +38 -3
- camel/retrievers/__init__.py +3 -0
- camel/retrievers/hybrid_retrival.py +237 -0
- camel/toolkits/__init__.py +9 -2
- camel/toolkits/arxiv_toolkit.py +2 -1
- camel/toolkits/ask_news_toolkit.py +4 -2
- camel/toolkits/base.py +22 -3
- camel/toolkits/code_execution.py +2 -0
- camel/toolkits/dappier_toolkit.py +2 -1
- camel/toolkits/data_commons_toolkit.py +38 -12
- camel/toolkits/function_tool.py +13 -0
- camel/toolkits/github_toolkit.py +5 -1
- camel/toolkits/google_maps_toolkit.py +2 -1
- camel/toolkits/google_scholar_toolkit.py +2 -0
- camel/toolkits/human_toolkit.py +0 -3
- camel/toolkits/linkedin_toolkit.py +3 -2
- camel/toolkits/meshy_toolkit.py +3 -2
- camel/toolkits/mineru_toolkit.py +178 -0
- camel/toolkits/networkx_toolkit.py +240 -0
- camel/toolkits/notion_toolkit.py +2 -0
- camel/toolkits/openbb_toolkit.py +3 -2
- camel/toolkits/reddit_toolkit.py +11 -3
- camel/toolkits/retrieval_toolkit.py +6 -1
- camel/toolkits/semantic_scholar_toolkit.py +2 -1
- camel/toolkits/stripe_toolkit.py +8 -2
- camel/toolkits/sympy_toolkit.py +44 -1
- camel/toolkits/video_toolkit.py +2 -0
- camel/toolkits/whatsapp_toolkit.py +3 -2
- camel/toolkits/zapier_toolkit.py +191 -0
- camel/types/__init__.py +2 -2
- camel/types/agents/__init__.py +16 -0
- camel/types/agents/tool_calling_record.py +52 -0
- camel/types/enums.py +3 -0
- camel/types/openai_types.py +16 -14
- camel/utils/__init__.py +2 -1
- camel/utils/async_func.py +2 -2
- camel/utils/commons.py +114 -1
- camel/verifiers/__init__.py +23 -0
- camel/verifiers/base.py +340 -0
- camel/verifiers/models.py +82 -0
- camel/verifiers/python_verifier.py +202 -0
- {camel_ai-0.2.21.dist-info → camel_ai-0.2.23a0.dist-info}/METADATA +273 -256
- {camel_ai-0.2.21.dist-info → camel_ai-0.2.23a0.dist-info}/RECORD +106 -85
- {camel_ai-0.2.21.dist-info → camel_ai-0.2.23a0.dist-info}/WHEEL +1 -1
- {camel_ai-0.2.21.dist-info → camel_ai-0.2.23a0.dist-info}/LICENSE +0 -0
camel/memories/agent_memories.py
CHANGED
|
@@ -12,6 +12,7 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
|
+
import warnings
|
|
15
16
|
from typing import List, Optional
|
|
16
17
|
|
|
17
18
|
from camel.memories.base import AgentMemory, BaseContextCreator
|
|
@@ -49,7 +50,17 @@ class ChatHistoryMemory(AgentMemory):
|
|
|
49
50
|
self._chat_history_block = ChatHistoryBlock(storage=storage)
|
|
50
51
|
|
|
51
52
|
def retrieve(self) -> List[ContextRecord]:
|
|
52
|
-
|
|
53
|
+
records = self._chat_history_block.retrieve(self._window_size)
|
|
54
|
+
if self._window_size is not None and len(records) == self._window_size:
|
|
55
|
+
warnings.warn(
|
|
56
|
+
f"Chat history window size limit ({self._window_size}) "
|
|
57
|
+
f"reached. Some earlier messages will not be included in "
|
|
58
|
+
f"the context. Consider increasing window_size if you need "
|
|
59
|
+
f"a longer context.",
|
|
60
|
+
UserWarning,
|
|
61
|
+
stacklevel=2,
|
|
62
|
+
)
|
|
63
|
+
return records
|
|
53
64
|
|
|
54
65
|
def write_records(self, records: List[MemoryRecord]) -> None:
|
|
55
66
|
self._chat_history_block.write_records(records)
|
|
@@ -103,6 +114,10 @@ class VectorDBMemory(AgentMemory):
|
|
|
103
114
|
def get_context_creator(self) -> BaseContextCreator:
|
|
104
115
|
return self._context_creator
|
|
105
116
|
|
|
117
|
+
def clear(self) -> None:
|
|
118
|
+
r"""Removes all records from the vector database memory."""
|
|
119
|
+
self._vectordb_block.clear()
|
|
120
|
+
|
|
106
121
|
|
|
107
122
|
class LongtermAgentMemory(AgentMemory):
|
|
108
123
|
r"""An implementation of the :obj:`AgentMemory` abstract base class for
|
|
@@ -85,13 +85,21 @@ class ChatHistoryBlock(MemoryBlock):
|
|
|
85
85
|
if record.role_at_backend == OpenAIBackendRole.SYSTEM:
|
|
86
86
|
# System messages are always kept.
|
|
87
87
|
output_records.append(
|
|
88
|
-
ContextRecord(
|
|
88
|
+
ContextRecord(
|
|
89
|
+
memory_record=record,
|
|
90
|
+
score=1.0,
|
|
91
|
+
timestamp=record.timestamp,
|
|
92
|
+
)
|
|
89
93
|
)
|
|
90
94
|
else:
|
|
91
95
|
# Other messages' score drops down gradually
|
|
92
96
|
score *= self.keep_rate
|
|
93
97
|
output_records.append(
|
|
94
|
-
ContextRecord(
|
|
98
|
+
ContextRecord(
|
|
99
|
+
memory_record=record,
|
|
100
|
+
score=score,
|
|
101
|
+
timestamp=record.timestamp,
|
|
102
|
+
)
|
|
95
103
|
)
|
|
96
104
|
|
|
97
105
|
output_records.reverse()
|
|
@@ -74,6 +74,7 @@ class VectorDBBlock(MemoryBlock):
|
|
|
74
74
|
ContextRecord(
|
|
75
75
|
memory_record=MemoryRecord.from_dict(result.record.payload),
|
|
76
76
|
score=result.similarity,
|
|
77
|
+
timestamp=result.record.payload['timestamp'],
|
|
77
78
|
)
|
|
78
79
|
for result in results
|
|
79
80
|
if result.record.payload is not None
|
|
@@ -15,11 +15,14 @@ from typing import List, Tuple
|
|
|
15
15
|
|
|
16
16
|
from pydantic import BaseModel
|
|
17
17
|
|
|
18
|
+
from camel.logger import get_logger
|
|
18
19
|
from camel.memories.base import BaseContextCreator
|
|
19
20
|
from camel.memories.records import ContextRecord
|
|
20
21
|
from camel.messages import OpenAIMessage
|
|
21
22
|
from camel.utils import BaseTokenCounter
|
|
22
23
|
|
|
24
|
+
logger = get_logger(__name__)
|
|
25
|
+
|
|
23
26
|
|
|
24
27
|
class _ContextUnit(BaseModel):
|
|
25
28
|
idx: int
|
|
@@ -101,18 +104,30 @@ class ScoreBasedContextCreator(BaseContextCreator):
|
|
|
101
104
|
# If not exceed token limit, simply return
|
|
102
105
|
total_tokens = sum([unit.num_tokens for unit in context_units])
|
|
103
106
|
if total_tokens <= self.token_limit:
|
|
107
|
+
context_units = sorted(
|
|
108
|
+
context_units,
|
|
109
|
+
key=lambda unit: (unit.record.timestamp, unit.record.score),
|
|
110
|
+
)
|
|
104
111
|
return self._create_output(context_units)
|
|
105
112
|
|
|
113
|
+
# Log warning about token limit being exceeded
|
|
114
|
+
logger.warning(
|
|
115
|
+
f"Token limit reached ({total_tokens} > {self.token_limit}). "
|
|
116
|
+
f"Some messages will be pruned from memory to meet the limit."
|
|
117
|
+
)
|
|
118
|
+
|
|
106
119
|
# Sort by score
|
|
107
120
|
context_units = sorted(
|
|
108
|
-
context_units,
|
|
121
|
+
context_units,
|
|
122
|
+
key=lambda unit: (unit.record.timestamp, unit.record.score),
|
|
109
123
|
)
|
|
110
124
|
|
|
111
125
|
# Remove the least score messages until total token number is smaller
|
|
112
126
|
# than token limit
|
|
113
127
|
truncate_idx = None
|
|
114
128
|
for i, unit in enumerate(context_units):
|
|
115
|
-
if
|
|
129
|
+
if i == len(context_units) - 1:
|
|
130
|
+
# If we reach the end of the list and still exceed the token
|
|
116
131
|
raise RuntimeError(
|
|
117
132
|
"Cannot create context: exceed token limit.", total_tokens
|
|
118
133
|
)
|
|
@@ -135,7 +150,9 @@ class ScoreBasedContextCreator(BaseContextCreator):
|
|
|
135
150
|
for output, specifically a list of OpenAIMessages and an integer
|
|
136
151
|
representing the total token count.
|
|
137
152
|
"""
|
|
138
|
-
context_units = sorted(
|
|
153
|
+
context_units = sorted(
|
|
154
|
+
context_units, key=lambda unit: unit.record.timestamp
|
|
155
|
+
)
|
|
139
156
|
return [
|
|
140
157
|
unit.record.memory_record.to_openai_message()
|
|
141
158
|
for unit in context_units
|
camel/memories/records.py
CHANGED
|
@@ -13,6 +13,7 @@
|
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
15
|
from dataclasses import asdict
|
|
16
|
+
from datetime import datetime, timezone
|
|
16
17
|
from typing import Any, ClassVar, Dict
|
|
17
18
|
from uuid import UUID, uuid4
|
|
18
19
|
|
|
@@ -37,6 +38,7 @@ class MemoryRecord(BaseModel):
|
|
|
37
38
|
extra_info (Dict[str, str], optional): A dictionary of additional
|
|
38
39
|
key-value pairs that provide more information. If not given, it
|
|
39
40
|
will be an empty `Dict`.
|
|
41
|
+
timestamp (float, optional): The timestamp when the record was created.
|
|
40
42
|
"""
|
|
41
43
|
|
|
42
44
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
@@ -45,6 +47,9 @@ class MemoryRecord(BaseModel):
|
|
|
45
47
|
role_at_backend: OpenAIBackendRole
|
|
46
48
|
uuid: UUID = Field(default_factory=uuid4)
|
|
47
49
|
extra_info: Dict[str, str] = Field(default_factory=dict)
|
|
50
|
+
timestamp: float = Field(
|
|
51
|
+
default_factory=lambda: datetime.now(timezone.utc).timestamp()
|
|
52
|
+
)
|
|
48
53
|
|
|
49
54
|
_MESSAGE_TYPES: ClassVar[dict] = {
|
|
50
55
|
"BaseMessage": BaseMessage,
|
|
@@ -67,6 +72,7 @@ class MemoryRecord(BaseModel):
|
|
|
67
72
|
message=reconstructed_message,
|
|
68
73
|
role_at_backend=record_dict["role_at_backend"],
|
|
69
74
|
extra_info=record_dict["extra_info"],
|
|
75
|
+
timestamp=record_dict["timestamp"],
|
|
70
76
|
)
|
|
71
77
|
|
|
72
78
|
def to_dict(self) -> Dict[str, Any]:
|
|
@@ -81,6 +87,7 @@ class MemoryRecord(BaseModel):
|
|
|
81
87
|
},
|
|
82
88
|
"role_at_backend": self.role_at_backend,
|
|
83
89
|
"extra_info": self.extra_info,
|
|
90
|
+
"timestamp": self.timestamp,
|
|
84
91
|
}
|
|
85
92
|
|
|
86
93
|
def to_openai_message(self) -> OpenAIMessage:
|
|
@@ -93,3 +100,6 @@ class ContextRecord(BaseModel):
|
|
|
93
100
|
|
|
94
101
|
memory_record: MemoryRecord
|
|
95
102
|
score: float
|
|
103
|
+
timestamp: float = Field(
|
|
104
|
+
default_factory=lambda: datetime.now(timezone.utc).timestamp()
|
|
105
|
+
)
|
camel/messages/base.py
CHANGED
|
@@ -15,7 +15,7 @@ import base64
|
|
|
15
15
|
import io
|
|
16
16
|
import re
|
|
17
17
|
from dataclasses import dataclass
|
|
18
|
-
from typing import Any, Dict, List, Literal, Optional, Tuple,
|
|
18
|
+
from typing import Any, Dict, List, Literal, Optional, Tuple, Union
|
|
19
19
|
|
|
20
20
|
import numpy as np
|
|
21
21
|
from PIL import Image
|
|
@@ -72,7 +72,7 @@ class BaseMessage:
|
|
|
72
72
|
image_list: Optional[List[Image.Image]] = None
|
|
73
73
|
image_detail: Literal["auto", "low", "high"] = "auto"
|
|
74
74
|
video_detail: Literal["auto", "low", "high"] = "low"
|
|
75
|
-
parsed: Optional[Union[
|
|
75
|
+
parsed: Optional[Union[BaseModel, dict]] = None
|
|
76
76
|
|
|
77
77
|
@classmethod
|
|
78
78
|
def make_user_message(
|
|
@@ -416,8 +416,8 @@ class BaseMessage:
|
|
|
416
416
|
Returns:
|
|
417
417
|
OpenAIUserMessage: The converted :obj:`OpenAIUserMessage` object.
|
|
418
418
|
"""
|
|
419
|
-
|
|
420
|
-
|
|
419
|
+
hybrid_content: List[Any] = []
|
|
420
|
+
hybrid_content.append(
|
|
421
421
|
{
|
|
422
422
|
"type": "text",
|
|
423
423
|
"text": self.content,
|
|
@@ -445,7 +445,7 @@ class BaseMessage:
|
|
|
445
445
|
"utf-8"
|
|
446
446
|
)
|
|
447
447
|
image_prefix = f"data:image/{image_type};base64,"
|
|
448
|
-
|
|
448
|
+
hybrid_content.append(
|
|
449
449
|
{
|
|
450
450
|
"type": "image_url",
|
|
451
451
|
"image_url": {
|
|
@@ -504,12 +504,12 @@ class BaseMessage:
|
|
|
504
504
|
},
|
|
505
505
|
}
|
|
506
506
|
|
|
507
|
-
|
|
507
|
+
hybrid_content.append(item)
|
|
508
508
|
|
|
509
|
-
if len(
|
|
509
|
+
if len(hybrid_content) > 1:
|
|
510
510
|
return {
|
|
511
511
|
"role": "user",
|
|
512
|
-
"content":
|
|
512
|
+
"content": hybrid_content,
|
|
513
513
|
}
|
|
514
514
|
# This return just for str message
|
|
515
515
|
else:
|
camel/models/_utils.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import textwrap
|
|
15
|
+
from typing import Optional, Type
|
|
16
|
+
|
|
17
|
+
from pydantic import BaseModel
|
|
18
|
+
|
|
19
|
+
from camel.messages import OpenAIMessage
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def try_modify_message_with_format(
|
|
23
|
+
message: OpenAIMessage,
|
|
24
|
+
response_format: Optional[Type[BaseModel]],
|
|
25
|
+
) -> None:
|
|
26
|
+
r"""Modifies the content of the message to include the instruction of using
|
|
27
|
+
the response format.
|
|
28
|
+
|
|
29
|
+
The message will not be modified in the following cases:
|
|
30
|
+
- response_format is None
|
|
31
|
+
- message content is not a string
|
|
32
|
+
- message role is assistant
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
response_format (Optional[Type[BaseModel]]): The Pydantic model class.
|
|
36
|
+
message (OpenAIMessage): The message to be modified.
|
|
37
|
+
"""
|
|
38
|
+
if response_format is None:
|
|
39
|
+
return
|
|
40
|
+
|
|
41
|
+
if not isinstance(message["content"], str):
|
|
42
|
+
return
|
|
43
|
+
|
|
44
|
+
if message["role"] == "assistant":
|
|
45
|
+
return
|
|
46
|
+
|
|
47
|
+
json_schema = response_format.model_json_schema()
|
|
48
|
+
updated_prompt = textwrap.dedent(
|
|
49
|
+
f"""\
|
|
50
|
+
{message["content"]}
|
|
51
|
+
|
|
52
|
+
Please generate a JSON response adhering to the following JSON schema:
|
|
53
|
+
{json_schema}
|
|
54
|
+
Make sure the JSON response is valid and matches the EXACT structure defined in the schema. Your result should ONLY be a valid json object, WITHOUT ANY OTHER TEXT OR COMMENTS.
|
|
55
|
+
""" # noqa: E501
|
|
56
|
+
)
|
|
57
|
+
message["content"] = updated_prompt
|
camel/models/aiml_model.py
CHANGED
|
@@ -12,12 +12,14 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
|
-
from typing import Any, Dict, List, Optional, Union
|
|
15
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
16
16
|
|
|
17
|
-
from openai import OpenAI, Stream
|
|
17
|
+
from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
|
|
18
|
+
from pydantic import BaseModel
|
|
18
19
|
|
|
19
20
|
from camel.configs import AIML_API_PARAMS, AIMLConfig
|
|
20
21
|
from camel.messages import OpenAIMessage
|
|
22
|
+
from camel.models._utils import try_modify_message_with_format
|
|
21
23
|
from camel.models.base_model import BaseModelBackend
|
|
22
24
|
from camel.types import (
|
|
23
25
|
ChatCompletion,
|
|
@@ -52,11 +54,7 @@ class AIMLModel(BaseModelBackend):
|
|
|
52
54
|
(default: :obj:`None`)
|
|
53
55
|
"""
|
|
54
56
|
|
|
55
|
-
@api_keys_required(
|
|
56
|
-
[
|
|
57
|
-
("api_key", 'AIML_API_KEY'),
|
|
58
|
-
]
|
|
59
|
-
)
|
|
57
|
+
@api_keys_required([("api_key", "AIML_API_KEY")])
|
|
60
58
|
def __init__(
|
|
61
59
|
self,
|
|
62
60
|
model_type: Union[ModelType, str],
|
|
@@ -81,12 +79,34 @@ class AIMLModel(BaseModelBackend):
|
|
|
81
79
|
api_key=self._api_key,
|
|
82
80
|
base_url=self._url,
|
|
83
81
|
)
|
|
82
|
+
self._async_client = AsyncOpenAI(
|
|
83
|
+
timeout=180,
|
|
84
|
+
max_retries=3,
|
|
85
|
+
api_key=self._api_key,
|
|
86
|
+
base_url=self._url,
|
|
87
|
+
)
|
|
84
88
|
|
|
85
|
-
def
|
|
89
|
+
def _prepare_request(
|
|
90
|
+
self,
|
|
91
|
+
messages: List[OpenAIMessage],
|
|
92
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
93
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
94
|
+
) -> Dict[str, Any]:
|
|
95
|
+
request_config = self.model_config_dict.copy()
|
|
96
|
+
if tools:
|
|
97
|
+
request_config["tools"] = tools
|
|
98
|
+
if response_format:
|
|
99
|
+
# AIML API does not natively support response format
|
|
100
|
+
try_modify_message_with_format(messages[-1], response_format)
|
|
101
|
+
return request_config
|
|
102
|
+
|
|
103
|
+
def _run(
|
|
86
104
|
self,
|
|
87
105
|
messages: List[OpenAIMessage],
|
|
106
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
107
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
88
108
|
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
89
|
-
r"""Runs inference of
|
|
109
|
+
r"""Runs inference of AIML chat completion.
|
|
90
110
|
|
|
91
111
|
Args:
|
|
92
112
|
messages (List[OpenAIMessage]): Message list with the chat history
|
|
@@ -97,15 +117,26 @@ class AIMLModel(BaseModelBackend):
|
|
|
97
117
|
`ChatCompletion` in the non-stream mode, or
|
|
98
118
|
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
99
119
|
"""
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
# Handle special case for tools parameter
|
|
104
|
-
if model_config.get('tools') is None:
|
|
105
|
-
model_config['tools'] = []
|
|
120
|
+
request_config = self._prepare_request(
|
|
121
|
+
messages, response_format, tools
|
|
122
|
+
)
|
|
106
123
|
|
|
107
124
|
response = self._client.chat.completions.create(
|
|
108
|
-
messages=messages, model=self.model_type, **
|
|
125
|
+
messages=messages, model=self.model_type, **request_config
|
|
126
|
+
)
|
|
127
|
+
return response
|
|
128
|
+
|
|
129
|
+
async def _arun(
|
|
130
|
+
self,
|
|
131
|
+
messages: List[OpenAIMessage],
|
|
132
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
133
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
134
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
135
|
+
request_config = self._prepare_request(
|
|
136
|
+
messages, response_format, tools
|
|
137
|
+
)
|
|
138
|
+
response = await self._async_client.chat.completions.create(
|
|
139
|
+
messages=messages, model=self.model_type, **request_config
|
|
109
140
|
)
|
|
110
141
|
return response
|
|
111
142
|
|
|
@@ -144,4 +175,4 @@ class AIMLModel(BaseModelBackend):
|
|
|
144
175
|
Returns:
|
|
145
176
|
bool: Whether the model is in stream mode.
|
|
146
177
|
"""
|
|
147
|
-
return self.model_config_dict.get(
|
|
178
|
+
return self.model_config_dict.get("stream", False)
|
camel/models/anthropic_model.py
CHANGED
|
@@ -12,7 +12,9 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
|
-
from typing import Any, Dict, List, Optional, Union
|
|
15
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
16
|
+
|
|
17
|
+
from pydantic import BaseModel
|
|
16
18
|
|
|
17
19
|
from camel.configs import ANTHROPIC_API_PARAMS, AnthropicConfig
|
|
18
20
|
from camel.messages import OpenAIMessage
|
|
@@ -59,7 +61,7 @@ class AnthropicModel(BaseModelBackend):
|
|
|
59
61
|
url: Optional[str] = None,
|
|
60
62
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
61
63
|
) -> None:
|
|
62
|
-
from anthropic import Anthropic
|
|
64
|
+
from anthropic import Anthropic, AsyncAnthropic
|
|
63
65
|
|
|
64
66
|
if model_config_dict is None:
|
|
65
67
|
model_config_dict = AnthropicConfig().as_dict()
|
|
@@ -69,6 +71,9 @@ class AnthropicModel(BaseModelBackend):
|
|
|
69
71
|
model_type, model_config_dict, api_key, url, token_counter
|
|
70
72
|
)
|
|
71
73
|
self.client = Anthropic(api_key=self._api_key, base_url=self._url)
|
|
74
|
+
self.async_client = AsyncAnthropic(
|
|
75
|
+
api_key=self._api_key, base_url=self._url
|
|
76
|
+
)
|
|
72
77
|
|
|
73
78
|
def _convert_response_from_anthropic_to_openai(self, response):
|
|
74
79
|
# openai ^1.0.0 format, reference openai/types/chat/chat_completion.py
|
|
@@ -102,9 +107,11 @@ class AnthropicModel(BaseModelBackend):
|
|
|
102
107
|
self._token_counter = AnthropicTokenCounter(self.model_type)
|
|
103
108
|
return self._token_counter
|
|
104
109
|
|
|
105
|
-
def
|
|
110
|
+
def _run(
|
|
106
111
|
self,
|
|
107
112
|
messages: List[OpenAIMessage],
|
|
113
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
114
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
108
115
|
):
|
|
109
116
|
r"""Run inference of Anthropic chat completion.
|
|
110
117
|
|
|
@@ -133,6 +140,37 @@ class AnthropicModel(BaseModelBackend):
|
|
|
133
140
|
|
|
134
141
|
return response
|
|
135
142
|
|
|
143
|
+
async def _arun(
|
|
144
|
+
self,
|
|
145
|
+
messages: List[OpenAIMessage],
|
|
146
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
147
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
148
|
+
) -> ChatCompletion:
|
|
149
|
+
r"""Run inference of Anthropic chat completion.
|
|
150
|
+
|
|
151
|
+
Args:
|
|
152
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
153
|
+
in OpenAI API format.
|
|
154
|
+
|
|
155
|
+
Returns:
|
|
156
|
+
ChatCompletion: Response in the OpenAI API format.
|
|
157
|
+
"""
|
|
158
|
+
from anthropic import NOT_GIVEN
|
|
159
|
+
|
|
160
|
+
if messages[0]["role"] == "system":
|
|
161
|
+
sys_msg = str(messages.pop(0)["content"])
|
|
162
|
+
else:
|
|
163
|
+
sys_msg = NOT_GIVEN # type: ignore[assignment]
|
|
164
|
+
response = await self.async_client.messages.create(
|
|
165
|
+
model=self.model_type,
|
|
166
|
+
system=sys_msg,
|
|
167
|
+
messages=messages, # type: ignore[arg-type]
|
|
168
|
+
**self.model_config_dict,
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
# format response to openai format
|
|
172
|
+
return self._convert_response_from_anthropic_to_openai(response)
|
|
173
|
+
|
|
136
174
|
def check_model_config(self):
|
|
137
175
|
r"""Check whether the model configuration is valid for anthropic
|
|
138
176
|
model backends.
|
|
@@ -12,9 +12,10 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
|
-
from typing import Any, Dict, List, Optional, Union
|
|
15
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
16
16
|
|
|
17
|
-
from openai import AzureOpenAI, Stream
|
|
17
|
+
from openai import AsyncAzureOpenAI, AsyncStream, AzureOpenAI, Stream
|
|
18
|
+
from pydantic import BaseModel
|
|
18
19
|
|
|
19
20
|
from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig
|
|
20
21
|
from camel.messages import OpenAIMessage
|
|
@@ -95,6 +96,15 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
95
96
|
max_retries=3,
|
|
96
97
|
)
|
|
97
98
|
|
|
99
|
+
self._async_client = AsyncAzureOpenAI(
|
|
100
|
+
azure_endpoint=str(self._url),
|
|
101
|
+
azure_deployment=self.azure_deployment_name,
|
|
102
|
+
api_version=self.api_version,
|
|
103
|
+
api_key=self._api_key,
|
|
104
|
+
timeout=180,
|
|
105
|
+
max_retries=3,
|
|
106
|
+
)
|
|
107
|
+
|
|
98
108
|
@property
|
|
99
109
|
def token_counter(self) -> BaseTokenCounter:
|
|
100
110
|
r"""Initialize the token counter for the model backend.
|
|
@@ -107,9 +117,11 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
107
117
|
self._token_counter = OpenAITokenCounter(self.model_type)
|
|
108
118
|
return self._token_counter
|
|
109
119
|
|
|
110
|
-
def
|
|
120
|
+
def _run(
|
|
111
121
|
self,
|
|
112
122
|
messages: List[OpenAIMessage],
|
|
123
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
124
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
113
125
|
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
114
126
|
r"""Runs inference of Azure OpenAI chat completion.
|
|
115
127
|
|
|
@@ -129,6 +141,30 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
129
141
|
)
|
|
130
142
|
return response
|
|
131
143
|
|
|
144
|
+
async def _arun(
|
|
145
|
+
self,
|
|
146
|
+
messages: List[OpenAIMessage],
|
|
147
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
148
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
149
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
150
|
+
r"""Runs inference of Azure OpenAI chat completion.
|
|
151
|
+
|
|
152
|
+
Args:
|
|
153
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
154
|
+
in OpenAI API format.
|
|
155
|
+
|
|
156
|
+
Returns:
|
|
157
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
158
|
+
`ChatCompletion` in the non-stream mode, or
|
|
159
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
160
|
+
"""
|
|
161
|
+
response = await self._async_client.chat.completions.create(
|
|
162
|
+
messages=messages,
|
|
163
|
+
model=self.azure_deployment_name, # type:ignore[arg-type]
|
|
164
|
+
**self.model_config_dict,
|
|
165
|
+
)
|
|
166
|
+
return response
|
|
167
|
+
|
|
132
168
|
def check_model_config(self):
|
|
133
169
|
r"""Check whether the model configuration contains any
|
|
134
170
|
unexpected arguments to Azure OpenAI API.
|