camel-ai 0.1.5.9__py3-none-any.whl → 0.1.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +246 -33
- camel/agents/critic_agent.py +17 -1
- camel/agents/deductive_reasoner_agent.py +12 -0
- camel/agents/embodied_agent.py +19 -5
- camel/agents/knowledge_graph_agent.py +22 -3
- camel/agents/role_assignment_agent.py +12 -0
- camel/agents/search_agent.py +12 -0
- camel/agents/task_agent.py +15 -0
- camel/configs/__init__.py +2 -9
- camel/configs/anthropic_config.py +5 -6
- camel/configs/base_config.py +50 -4
- camel/configs/gemini_config.py +69 -18
- camel/configs/groq_config.py +6 -20
- camel/configs/litellm_config.py +2 -8
- camel/configs/mistral_config.py +17 -20
- camel/configs/ollama_config.py +6 -8
- camel/configs/openai_config.py +12 -23
- camel/configs/vllm_config.py +7 -8
- camel/configs/zhipuai_config.py +5 -11
- camel/human.py +1 -1
- camel/loaders/__init__.py +2 -0
- camel/loaders/firecrawl_reader.py +213 -0
- camel/memories/agent_memories.py +1 -4
- camel/memories/blocks/chat_history_block.py +6 -2
- camel/memories/blocks/vectordb_block.py +3 -1
- camel/memories/context_creators/score_based.py +6 -6
- camel/memories/records.py +9 -7
- camel/messages/base.py +1 -0
- camel/models/open_source_model.py +2 -2
- camel/prompts/__init__.py +7 -0
- camel/prompts/image_craft.py +34 -0
- camel/prompts/multi_condition_image_craft.py +34 -0
- camel/prompts/task_prompt_template.py +6 -0
- camel/responses/agent_responses.py +4 -3
- camel/retrievers/auto_retriever.py +0 -2
- camel/societies/babyagi_playing.py +6 -4
- camel/societies/role_playing.py +16 -8
- camel/storages/graph_storages/graph_element.py +10 -14
- camel/storages/vectordb_storages/base.py +24 -13
- camel/storages/vectordb_storages/milvus.py +1 -1
- camel/storages/vectordb_storages/qdrant.py +2 -3
- camel/tasks/__init__.py +22 -0
- camel/tasks/task.py +408 -0
- camel/tasks/task_prompt.py +65 -0
- camel/toolkits/__init__.py +3 -0
- camel/toolkits/base.py +3 -1
- camel/toolkits/dalle_toolkit.py +146 -0
- camel/toolkits/github_toolkit.py +16 -32
- camel/toolkits/google_maps_toolkit.py +2 -1
- camel/toolkits/open_api_toolkit.py +1 -2
- camel/toolkits/openai_function.py +2 -7
- camel/types/enums.py +6 -2
- camel/utils/__init__.py +14 -2
- camel/utils/commons.py +167 -2
- camel/utils/constants.py +3 -0
- camel/workforce/__init__.py +23 -0
- camel/workforce/base.py +50 -0
- camel/workforce/manager_node.py +299 -0
- camel/workforce/role_playing_node.py +168 -0
- camel/workforce/single_agent_node.py +77 -0
- camel/workforce/task_channel.py +173 -0
- camel/workforce/utils.py +97 -0
- camel/workforce/worker_node.py +115 -0
- camel/workforce/workforce.py +49 -0
- camel/workforce/workforce_prompt.py +125 -0
- {camel_ai-0.1.5.9.dist-info → camel_ai-0.1.6.2.dist-info}/METADATA +5 -2
- {camel_ai-0.1.5.9.dist-info → camel_ai-0.1.6.2.dist-info}/RECORD +69 -52
- {camel_ai-0.1.5.9.dist-info → camel_ai-0.1.6.2.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
from typing import Any, Dict, Optional
|
|
17
|
+
|
|
18
|
+
from pydantic import BaseModel
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class Firecrawl:
|
|
22
|
+
r"""Firecrawl allows you to turn entire websites into LLM-ready markdown.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
api_key (Optional[str]): API key for authenticating with the Firecrawl
|
|
26
|
+
API.
|
|
27
|
+
api_url (Optional[str]): Base URL for the Firecrawl API.
|
|
28
|
+
|
|
29
|
+
References:
|
|
30
|
+
https://docs.firecrawl.dev/introduction
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
api_key: Optional[str] = None,
|
|
36
|
+
api_url: Optional[str] = None,
|
|
37
|
+
) -> None:
|
|
38
|
+
from firecrawl import FirecrawlApp
|
|
39
|
+
|
|
40
|
+
self._api_key = api_key or os.environ.get("FIRECRAWL_API_KEY")
|
|
41
|
+
self._api_url = api_url or os.environ.get("FIRECRAWL_API_URL")
|
|
42
|
+
|
|
43
|
+
self.app = FirecrawlApp(api_key=self._api_key, api_url=self._api_url)
|
|
44
|
+
|
|
45
|
+
def crawl(
|
|
46
|
+
self,
|
|
47
|
+
url: str,
|
|
48
|
+
params: Optional[Dict[str, Any]] = None,
|
|
49
|
+
wait_until_done: bool = True,
|
|
50
|
+
**kwargs: Any,
|
|
51
|
+
) -> Any:
|
|
52
|
+
r"""Crawl a URL and all accessible subpages. Customize the crawl by
|
|
53
|
+
setting different parameters, and receive the full response or a job
|
|
54
|
+
ID based on the specified options.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
url (str): The URL to crawl.
|
|
58
|
+
params (Optional[Dict[str, Any]]): Additional parameters for the
|
|
59
|
+
crawl request. Defaults to `None`.
|
|
60
|
+
wait_until_done (bool): Whether to wait until the crawl job is
|
|
61
|
+
completed. Defaults to `True`.
|
|
62
|
+
**kwargs (Any): Additional keyword arguments, such as
|
|
63
|
+
`poll_interval`, `idempotency_key`, etc.
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
Any: The list content of the URL if `wait_until_done` is True;
|
|
67
|
+
otherwise, a string job ID.
|
|
68
|
+
|
|
69
|
+
Raises:
|
|
70
|
+
RuntimeError: If the crawling process fails.
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
try:
|
|
74
|
+
crawl_response = self.app.crawl_url(
|
|
75
|
+
url=url,
|
|
76
|
+
params=params,
|
|
77
|
+
**kwargs,
|
|
78
|
+
wait_until_done=wait_until_done,
|
|
79
|
+
)
|
|
80
|
+
return (
|
|
81
|
+
crawl_response
|
|
82
|
+
if wait_until_done
|
|
83
|
+
else crawl_response.get("jobId")
|
|
84
|
+
)
|
|
85
|
+
except Exception as e:
|
|
86
|
+
raise RuntimeError(f"Failed to crawl the URL: {e}")
|
|
87
|
+
|
|
88
|
+
def markdown_crawl(self, url: str) -> str:
|
|
89
|
+
r"""Crawl a URL and all accessible subpages and return the content in
|
|
90
|
+
Markdown format.
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
url (str): The URL to crawl.
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
str: The content of the URL in Markdown format.
|
|
97
|
+
|
|
98
|
+
Raises:
|
|
99
|
+
RuntimeError: If the crawling process fails.
|
|
100
|
+
"""
|
|
101
|
+
|
|
102
|
+
try:
|
|
103
|
+
crawl_result = self.app.crawl_url(url=url)
|
|
104
|
+
if not isinstance(crawl_result, list):
|
|
105
|
+
raise ValueError("Unexpected response format")
|
|
106
|
+
markdown_contents = [
|
|
107
|
+
result.get('markdown', '') for result in crawl_result
|
|
108
|
+
]
|
|
109
|
+
return '\n'.join(markdown_contents)
|
|
110
|
+
except Exception as e:
|
|
111
|
+
raise RuntimeError(
|
|
112
|
+
f"Failed to crawl the URL and retrieve markdown: {e}"
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
def check_crawl_job(self, job_id: str) -> Dict:
|
|
116
|
+
r"""Check the status of a crawl job.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
job_id (str): The ID of the crawl job.
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
Dict: The response including status of the crawl job.
|
|
123
|
+
|
|
124
|
+
Raises:
|
|
125
|
+
RuntimeError: If the check process fails.
|
|
126
|
+
"""
|
|
127
|
+
|
|
128
|
+
try:
|
|
129
|
+
return self.app.check_crawl_status(job_id)
|
|
130
|
+
except Exception as e:
|
|
131
|
+
raise RuntimeError(f"Failed to check the crawl job status: {e}")
|
|
132
|
+
|
|
133
|
+
def scrape(
|
|
134
|
+
self,
|
|
135
|
+
url: str,
|
|
136
|
+
params: Optional[Dict[str, Any]] = None,
|
|
137
|
+
) -> Dict:
|
|
138
|
+
r"""To scrape a single URL. This function supports advanced scraping
|
|
139
|
+
by setting different parameters and returns the full scraped data as a
|
|
140
|
+
dictionary.
|
|
141
|
+
|
|
142
|
+
Reference: https://docs.firecrawl.dev/advanced-scraping-guide
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
url (str): The URL to read.
|
|
146
|
+
params (Optional[Dict[str, Any]]): Additional parameters for the
|
|
147
|
+
scrape request.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
Dict: The scraped data.
|
|
151
|
+
|
|
152
|
+
Raises:
|
|
153
|
+
RuntimeError: If the scrape process fails.
|
|
154
|
+
"""
|
|
155
|
+
try:
|
|
156
|
+
return self.app.scrape_url(url=url, params=params)
|
|
157
|
+
except Exception as e:
|
|
158
|
+
raise RuntimeError(f"Failed to scrape the URL: {e}")
|
|
159
|
+
|
|
160
|
+
def structured_scrape(self, url: str, output_schema: BaseModel) -> Dict:
|
|
161
|
+
r"""Use LLM to extract structured data from given URL.
|
|
162
|
+
|
|
163
|
+
Args:
|
|
164
|
+
url (str): The URL to read.
|
|
165
|
+
output_schema (BaseModel): A pydantic model
|
|
166
|
+
that includes value types and field descriptions used to
|
|
167
|
+
generate a structured response by LLM. This schema helps
|
|
168
|
+
in defining the expected output format.
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
Dict: The content of the URL.
|
|
172
|
+
|
|
173
|
+
Raises:
|
|
174
|
+
RuntimeError: If the scrape process fails.
|
|
175
|
+
"""
|
|
176
|
+
try:
|
|
177
|
+
data = self.app.scrape_url(
|
|
178
|
+
url,
|
|
179
|
+
{
|
|
180
|
+
'extractorOptions': {
|
|
181
|
+
"mode": "llm-extraction",
|
|
182
|
+
"extractionPrompt": "Based on the information on "
|
|
183
|
+
"the page, extract the information from the schema.",
|
|
184
|
+
'extractionSchema': output_schema.model_json_schema(),
|
|
185
|
+
},
|
|
186
|
+
'pageOptions': {'onlyMainContent': True},
|
|
187
|
+
},
|
|
188
|
+
)
|
|
189
|
+
return data.get("llm_extraction", {})
|
|
190
|
+
except Exception as e:
|
|
191
|
+
raise RuntimeError(f"Failed to perform structured scrape: {e}")
|
|
192
|
+
|
|
193
|
+
def tidy_scrape(self, url: str) -> str:
|
|
194
|
+
r"""Only return the main content of the page, excluding headers,
|
|
195
|
+
navigation bars, footers, etc. in Markdown format.
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
url (str): The URL to read.
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
str: The markdown content of the URL.
|
|
202
|
+
|
|
203
|
+
Raises:
|
|
204
|
+
RuntimeError: If the scrape process fails.
|
|
205
|
+
"""
|
|
206
|
+
|
|
207
|
+
try:
|
|
208
|
+
scrape_result = self.app.scrape_url(
|
|
209
|
+
url, {'pageOptions': {'onlyMainContent': True}}
|
|
210
|
+
)
|
|
211
|
+
return scrape_result.get("markdown", "")
|
|
212
|
+
except Exception as e:
|
|
213
|
+
raise RuntimeError(f"Failed to perform tidy scrape: {e}")
|
camel/memories/agent_memories.py
CHANGED
|
@@ -16,10 +16,7 @@ from typing import List, Optional
|
|
|
16
16
|
|
|
17
17
|
from camel.memories.base import AgentMemory, BaseContextCreator
|
|
18
18
|
from camel.memories.blocks import ChatHistoryBlock, VectorDBBlock
|
|
19
|
-
from camel.memories.records import
|
|
20
|
-
ContextRecord,
|
|
21
|
-
MemoryRecord,
|
|
22
|
-
)
|
|
19
|
+
from camel.memories.records import ContextRecord, MemoryRecord
|
|
23
20
|
from camel.storages import BaseKeyValueStorage, BaseVectorStorage
|
|
24
21
|
from camel.types import OpenAIBackendRole
|
|
25
22
|
|
|
@@ -84,11 +84,15 @@ class ChatHistoryBlock(MemoryBlock):
|
|
|
84
84
|
for record in reversed(chat_records):
|
|
85
85
|
if record.role_at_backend == OpenAIBackendRole.SYSTEM:
|
|
86
86
|
# System messages are always kept.
|
|
87
|
-
output_records.append(
|
|
87
|
+
output_records.append(
|
|
88
|
+
ContextRecord(memory_record=record, score=1.0)
|
|
89
|
+
)
|
|
88
90
|
else:
|
|
89
91
|
# Other messages' score drops down gradually
|
|
90
92
|
score *= self.keep_rate
|
|
91
|
-
output_records.append(
|
|
93
|
+
output_records.append(
|
|
94
|
+
ContextRecord(memory_record=record, score=score)
|
|
95
|
+
)
|
|
92
96
|
|
|
93
97
|
output_records.reverse()
|
|
94
98
|
return output_records
|
|
@@ -67,7 +67,9 @@ class VectorDBBlock(MemoryBlock):
|
|
|
67
67
|
vector database based on similarity to :obj:`current_state`.
|
|
68
68
|
"""
|
|
69
69
|
query_vector = self.embedding.embed(keyword)
|
|
70
|
-
results = self.storage.query(
|
|
70
|
+
results = self.storage.query(
|
|
71
|
+
VectorDBQuery(query_vector=query_vector, top_k=limit)
|
|
72
|
+
)
|
|
71
73
|
return [
|
|
72
74
|
ContextRecord(
|
|
73
75
|
memory_record=MemoryRecord.from_dict(result.record.payload),
|
|
@@ -11,17 +11,17 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
-
from dataclasses import dataclass
|
|
15
14
|
from typing import List, Tuple
|
|
16
15
|
|
|
16
|
+
from pydantic import BaseModel
|
|
17
|
+
|
|
17
18
|
from camel.memories.base import BaseContextCreator
|
|
18
19
|
from camel.memories.records import ContextRecord
|
|
19
20
|
from camel.messages import OpenAIMessage
|
|
20
21
|
from camel.utils import BaseTokenCounter
|
|
21
22
|
|
|
22
23
|
|
|
23
|
-
|
|
24
|
-
class _ContextUnit:
|
|
24
|
+
class _ContextUnit(BaseModel):
|
|
25
25
|
idx: int
|
|
26
26
|
record: ContextRecord
|
|
27
27
|
num_tokens: int
|
|
@@ -88,9 +88,9 @@ class ScoreBasedContextCreator(BaseContextCreator):
|
|
|
88
88
|
uuid_set.add(record.memory_record.uuid)
|
|
89
89
|
context_units.append(
|
|
90
90
|
_ContextUnit(
|
|
91
|
-
idx,
|
|
92
|
-
record,
|
|
93
|
-
self.token_counter.count_tokens_from_messages(
|
|
91
|
+
idx=idx,
|
|
92
|
+
record=record,
|
|
93
|
+
num_tokens=self.token_counter.count_tokens_from_messages(
|
|
94
94
|
[record.memory_record.to_openai_message()]
|
|
95
95
|
),
|
|
96
96
|
)
|
camel/memories/records.py
CHANGED
|
@@ -12,16 +12,17 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
|
|
15
|
-
from dataclasses import asdict
|
|
15
|
+
from dataclasses import asdict
|
|
16
16
|
from typing import Any, ClassVar, Dict
|
|
17
17
|
from uuid import UUID, uuid4
|
|
18
18
|
|
|
19
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
20
|
+
|
|
19
21
|
from camel.messages import BaseMessage, FunctionCallingMessage, OpenAIMessage
|
|
20
22
|
from camel.types import OpenAIBackendRole
|
|
21
23
|
|
|
22
24
|
|
|
23
|
-
|
|
24
|
-
class MemoryRecord:
|
|
25
|
+
class MemoryRecord(BaseModel):
|
|
25
26
|
r"""The basic message storing unit in the CAMEL memory system.
|
|
26
27
|
|
|
27
28
|
Attributes:
|
|
@@ -38,10 +39,12 @@ class MemoryRecord:
|
|
|
38
39
|
will be an empty `Dict`.
|
|
39
40
|
"""
|
|
40
41
|
|
|
42
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
43
|
+
|
|
41
44
|
message: BaseMessage
|
|
42
45
|
role_at_backend: OpenAIBackendRole
|
|
43
|
-
uuid: UUID =
|
|
44
|
-
extra_info: Dict[str, str] =
|
|
46
|
+
uuid: UUID = Field(default_factory=uuid4)
|
|
47
|
+
extra_info: Dict[str, str] = Field(default_factory=dict)
|
|
45
48
|
|
|
46
49
|
_MESSAGE_TYPES: ClassVar[dict] = {
|
|
47
50
|
"BaseMessage": BaseMessage,
|
|
@@ -85,8 +88,7 @@ class MemoryRecord:
|
|
|
85
88
|
return self.message.to_openai_message(self.role_at_backend)
|
|
86
89
|
|
|
87
90
|
|
|
88
|
-
|
|
89
|
-
class ContextRecord:
|
|
91
|
+
class ContextRecord(BaseModel):
|
|
90
92
|
r"""The result of memory retrieving."""
|
|
91
93
|
|
|
92
94
|
memory_record: MemoryRecord
|
camel/messages/base.py
CHANGED
|
@@ -95,7 +95,7 @@ class OpenSourceModel(BaseModelBackend):
|
|
|
95
95
|
|
|
96
96
|
# Replace `model_config_dict` with only the params to be
|
|
97
97
|
# passed to OpenAI API
|
|
98
|
-
self.model_config_dict = self.model_config_dict["api_params"]
|
|
98
|
+
self.model_config_dict = self.model_config_dict["api_params"]
|
|
99
99
|
|
|
100
100
|
@property
|
|
101
101
|
def token_counter(self) -> BaseTokenCounter:
|
|
@@ -152,7 +152,7 @@ class OpenSourceModel(BaseModelBackend):
|
|
|
152
152
|
":obj:`model_path` or :obj:`server_url` missing."
|
|
153
153
|
)
|
|
154
154
|
|
|
155
|
-
for param in self.model_config_dict["api_params"]
|
|
155
|
+
for param in self.model_config_dict["api_params"]:
|
|
156
156
|
if param not in OPENAI_API_PARAMS:
|
|
157
157
|
raise ValueError(
|
|
158
158
|
f"Unexpected argument `{param}` is "
|
camel/prompts/__init__.py
CHANGED
|
@@ -18,7 +18,11 @@ from .evaluation import EvaluationPromptTemplateDict
|
|
|
18
18
|
from .generate_text_embedding_data import (
|
|
19
19
|
GenerateTextEmbeddingDataPromptTemplateDict,
|
|
20
20
|
)
|
|
21
|
+
from .image_craft import ImageCraftPromptTemplateDict
|
|
21
22
|
from .misalignment import MisalignmentPromptTemplateDict
|
|
23
|
+
from .multi_condition_image_craft import (
|
|
24
|
+
MultiConditionImageCraftPromptTemplateDict,
|
|
25
|
+
)
|
|
22
26
|
from .object_recognition import ObjectRecognitionPromptTemplateDict
|
|
23
27
|
from .prompt_templates import PromptTemplateGenerator
|
|
24
28
|
from .role_description_prompt_template import RoleDescriptionPromptTemplateDict
|
|
@@ -42,5 +46,8 @@ __all__ = [
|
|
|
42
46
|
'SolutionExtractionPromptTemplateDict',
|
|
43
47
|
'GenerateTextEmbeddingDataPromptTemplateDict',
|
|
44
48
|
'ObjectRecognitionPromptTemplateDict',
|
|
49
|
+
'ImageCraftPromptTemplateDict',
|
|
50
|
+
'MultiConditionImageCraftPromptTemplateDict',
|
|
51
|
+
'DescriptionVideoPromptTemplateDict',
|
|
45
52
|
'VideoDescriptionPromptTemplateDict',
|
|
46
53
|
]
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
from camel.prompts import TextPrompt, TextPromptDict
|
|
17
|
+
from camel.types import RoleType
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ImageCraftPromptTemplateDict(TextPromptDict):
|
|
21
|
+
ASSISTANT_PROMPT = TextPrompt(
|
|
22
|
+
"""You are tasked with creating an original image based on
|
|
23
|
+
the provided descriptive captions. Use your imagination
|
|
24
|
+
and artistic skills to visualize and draw the images and
|
|
25
|
+
explain your thought process."""
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
|
29
|
+
super().__init__(*args, **kwargs)
|
|
30
|
+
self.update(
|
|
31
|
+
{
|
|
32
|
+
RoleType.ASSISTANT: self.ASSISTANT_PROMPT,
|
|
33
|
+
}
|
|
34
|
+
)
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
from camel.prompts import TextPrompt, TextPromptDict
|
|
17
|
+
from camel.types import RoleType
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class MultiConditionImageCraftPromptTemplateDict(TextPromptDict):
|
|
21
|
+
ASSISTANT_PROMPT = TextPrompt(
|
|
22
|
+
"""You are tasked with creating an image based on
|
|
23
|
+
the provided text and images conditions. Please use your
|
|
24
|
+
imagination and artistic capabilities to visualize and
|
|
25
|
+
draw the images and explain what you are thinking about."""
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
|
29
|
+
super().__init__(*args, **kwargs)
|
|
30
|
+
self.update(
|
|
31
|
+
{
|
|
32
|
+
RoleType.ASSISTANT: self.ASSISTANT_PROMPT,
|
|
33
|
+
}
|
|
34
|
+
)
|
|
@@ -24,7 +24,11 @@ from camel.prompts.evaluation import (
|
|
|
24
24
|
from camel.prompts.generate_text_embedding_data import (
|
|
25
25
|
GenerateTextEmbeddingDataPromptTemplateDict,
|
|
26
26
|
)
|
|
27
|
+
from camel.prompts.image_craft import ImageCraftPromptTemplateDict
|
|
27
28
|
from camel.prompts.misalignment import MisalignmentPromptTemplateDict
|
|
29
|
+
from camel.prompts.multi_condition_image_craft import (
|
|
30
|
+
MultiConditionImageCraftPromptTemplateDict,
|
|
31
|
+
)
|
|
28
32
|
from camel.prompts.object_recognition import (
|
|
29
33
|
ObjectRecognitionPromptTemplateDict,
|
|
30
34
|
)
|
|
@@ -64,6 +68,8 @@ class TaskPromptTemplateDict(Dict[Any, TextPromptDict]):
|
|
|
64
68
|
TaskType.ROLE_DESCRIPTION: RoleDescriptionPromptTemplateDict(),
|
|
65
69
|
TaskType.OBJECT_RECOGNITION: ObjectRecognitionPromptTemplateDict(), # noqa: E501
|
|
66
70
|
TaskType.GENERATE_TEXT_EMBEDDING_DATA: GenerateTextEmbeddingDataPromptTemplateDict(), # noqa: E501
|
|
71
|
+
TaskType.IMAGE_CRAFT: ImageCraftPromptTemplateDict(),
|
|
72
|
+
TaskType.MULTI_CONDITION_IMAGE_CRAFT: MultiConditionImageCraftPromptTemplateDict(), # noqa: E501
|
|
67
73
|
TaskType.VIDEO_DESCRIPTION: VideoDescriptionPromptTemplateDict(), # noqa: E501
|
|
68
74
|
}
|
|
69
75
|
)
|
|
@@ -11,14 +11,14 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
-
from dataclasses import dataclass
|
|
15
14
|
from typing import Any, Dict, List
|
|
16
15
|
|
|
16
|
+
from pydantic import BaseModel, ConfigDict
|
|
17
|
+
|
|
17
18
|
from camel.messages import BaseMessage
|
|
18
19
|
|
|
19
20
|
|
|
20
|
-
|
|
21
|
-
class ChatAgentResponse:
|
|
21
|
+
class ChatAgentResponse(BaseModel):
|
|
22
22
|
r"""Response of a ChatAgent.
|
|
23
23
|
|
|
24
24
|
Attributes:
|
|
@@ -31,6 +31,7 @@ class ChatAgentResponse:
|
|
|
31
31
|
info (Dict[str, Any]): Extra information about the chat message.
|
|
32
32
|
"""
|
|
33
33
|
|
|
34
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
34
35
|
msgs: List[BaseMessage]
|
|
35
36
|
terminated: bool
|
|
36
37
|
info: Dict[str, Any]
|
|
@@ -270,10 +270,12 @@ class BabyAGI:
|
|
|
270
270
|
"All tasks are solved"
|
|
271
271
|
)
|
|
272
272
|
return ChatAgentResponse(
|
|
273
|
-
[assistant_msg],
|
|
273
|
+
msgs=[assistant_msg],
|
|
274
|
+
terminated=terminated,
|
|
275
|
+
info=assistant_response.info,
|
|
274
276
|
)
|
|
275
277
|
return ChatAgentResponse(
|
|
276
|
-
[assistant_msg],
|
|
277
|
-
assistant_response.terminated,
|
|
278
|
-
assistant_response.info,
|
|
278
|
+
msgs=[assistant_msg],
|
|
279
|
+
terminated=assistant_response.terminated,
|
|
280
|
+
info=assistant_response.info,
|
|
279
281
|
)
|
camel/societies/role_playing.py
CHANGED
|
@@ -478,9 +478,11 @@ class RolePlaying:
|
|
|
478
478
|
user_response = self.user_agent.step(assistant_msg)
|
|
479
479
|
if user_response.terminated or user_response.msgs is None:
|
|
480
480
|
return (
|
|
481
|
-
ChatAgentResponse([], False, {}),
|
|
481
|
+
ChatAgentResponse(msgs=[], terminated=False, info={}),
|
|
482
482
|
ChatAgentResponse(
|
|
483
|
-
[],
|
|
483
|
+
msgs=[],
|
|
484
|
+
terminated=user_response.terminated,
|
|
485
|
+
info=user_response.info,
|
|
484
486
|
),
|
|
485
487
|
)
|
|
486
488
|
user_msg = self._reduce_message_options(user_response.msgs)
|
|
@@ -490,20 +492,26 @@ class RolePlaying:
|
|
|
490
492
|
if assistant_response.terminated or assistant_response.msgs is None:
|
|
491
493
|
return (
|
|
492
494
|
ChatAgentResponse(
|
|
493
|
-
[],
|
|
495
|
+
msgs=[],
|
|
496
|
+
terminated=assistant_response.terminated,
|
|
497
|
+
info=assistant_response.info,
|
|
498
|
+
),
|
|
499
|
+
ChatAgentResponse(
|
|
500
|
+
msgs=[user_msg], terminated=False, info=user_response.info
|
|
494
501
|
),
|
|
495
|
-
ChatAgentResponse([user_msg], False, user_response.info),
|
|
496
502
|
)
|
|
497
503
|
assistant_msg = self._reduce_message_options(assistant_response.msgs)
|
|
498
504
|
self.assistant_agent.record_message(assistant_msg)
|
|
499
505
|
|
|
500
506
|
return (
|
|
501
507
|
ChatAgentResponse(
|
|
502
|
-
[assistant_msg],
|
|
503
|
-
assistant_response.terminated,
|
|
504
|
-
assistant_response.info,
|
|
508
|
+
msgs=[assistant_msg],
|
|
509
|
+
terminated=assistant_response.terminated,
|
|
510
|
+
info=assistant_response.info,
|
|
505
511
|
),
|
|
506
512
|
ChatAgentResponse(
|
|
507
|
-
[user_msg],
|
|
513
|
+
msgs=[user_msg],
|
|
514
|
+
terminated=user_response.terminated,
|
|
515
|
+
info=user_response.info,
|
|
508
516
|
),
|
|
509
517
|
)
|
|
@@ -13,17 +13,17 @@
|
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
from __future__ import annotations
|
|
15
15
|
|
|
16
|
-
from dataclasses import dataclass, field
|
|
17
16
|
from typing import List, Union
|
|
18
17
|
|
|
18
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
19
|
+
|
|
19
20
|
try:
|
|
20
21
|
from unstructured.documents.elements import Element
|
|
21
22
|
except ImportError:
|
|
22
23
|
Element = None
|
|
23
24
|
|
|
24
25
|
|
|
25
|
-
|
|
26
|
-
class Node:
|
|
26
|
+
class Node(BaseModel):
|
|
27
27
|
r"""Represents a node in a graph with associated properties.
|
|
28
28
|
|
|
29
29
|
Attributes:
|
|
@@ -35,11 +35,10 @@ class Node:
|
|
|
35
35
|
|
|
36
36
|
id: Union[str, int]
|
|
37
37
|
type: str = "Node"
|
|
38
|
-
properties: dict =
|
|
38
|
+
properties: dict = Field(default_factory=dict)
|
|
39
39
|
|
|
40
40
|
|
|
41
|
-
|
|
42
|
-
class Relationship:
|
|
41
|
+
class Relationship(BaseModel):
|
|
43
42
|
r"""Represents a directed relationship between two nodes in a graph.
|
|
44
43
|
|
|
45
44
|
Attributes:
|
|
@@ -53,24 +52,21 @@ class Relationship:
|
|
|
53
52
|
subj: Node
|
|
54
53
|
obj: Node
|
|
55
54
|
type: str = "Relationship"
|
|
56
|
-
properties: dict =
|
|
55
|
+
properties: dict = Field(default_factory=dict)
|
|
57
56
|
|
|
58
57
|
|
|
59
|
-
|
|
60
|
-
class GraphElement:
|
|
58
|
+
class GraphElement(BaseModel):
|
|
61
59
|
r"""A graph element with lists of nodes and relationships.
|
|
62
60
|
|
|
63
61
|
Attributes:
|
|
64
62
|
nodes (List[Node]): A list of nodes in the graph.
|
|
65
63
|
relationships (List[Relationship]): A list of relationships in the
|
|
66
|
-
|
|
64
|
+
graph.
|
|
67
65
|
source (Element): The element from which the graph information is
|
|
68
|
-
|
|
66
|
+
derived.
|
|
69
67
|
"""
|
|
70
68
|
|
|
71
|
-
|
|
72
|
-
class Config:
|
|
73
|
-
arbitrary_types_allowed = True
|
|
69
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
74
70
|
|
|
75
71
|
nodes: List[Node]
|
|
76
72
|
relationships: List[Relationship]
|