camel-ai 0.2.14__py3-none-any.whl → 0.2.15a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +362 -237
- camel/datagen/__init__.py +17 -0
- camel/datagen/o1datagen.py +435 -0
- camel/datahubs/models.py +4 -1
- camel/messages/conversion/sharegpt/hermes/hermes_function_formatter.py +4 -1
- camel/retrievers/auto_retriever.py +8 -0
- camel/retrievers/vector_retriever.py +6 -3
- camel/societies/role_playing.py +4 -4
- camel/societies/workforce/workforce.py +2 -2
- camel/storages/graph_storages/nebula_graph.py +30 -7
- camel/types/enums.py +7 -1
- camel/types/unified_model_type.py +5 -0
- camel/utils/__init__.py +2 -0
- camel/utils/commons.py +38 -1
- {camel_ai-0.2.14.dist-info → camel_ai-0.2.15a0.dist-info}/METADATA +3 -3
- {camel_ai-0.2.14.dist-info → camel_ai-0.2.15a0.dist-info}/RECORD +19 -17
- {camel_ai-0.2.14.dist-info → camel_ai-0.2.15a0.dist-info}/LICENSE +0 -0
- {camel_ai-0.2.14.dist-info → camel_ai-0.2.15a0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
from .o1datagen import O1DataGenerator
|
|
16
|
+
|
|
17
|
+
__all__ = ['O1DataGenerator']
|
|
@@ -0,0 +1,435 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
from datetime import datetime
|
|
17
|
+
from typing import Annotated, Dict, Optional, Union
|
|
18
|
+
|
|
19
|
+
from pydantic import BaseModel, Field, confloat
|
|
20
|
+
|
|
21
|
+
from camel.agents import ChatAgent
|
|
22
|
+
from camel.logger import get_logger
|
|
23
|
+
|
|
24
|
+
# Get a logger for this module
|
|
25
|
+
logger = get_logger('o1datagenerator')
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class AgentResponse(BaseModel):
|
|
29
|
+
r"""Model for structured agent responses.
|
|
30
|
+
|
|
31
|
+
A Pydantic model class that represents structured responses from agents,
|
|
32
|
+
including a similarity score that measures the quality of the response.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
score (float): A similarity score between 0 and 1 that compares the
|
|
36
|
+
current answer to the correct answer. Must be within the range
|
|
37
|
+
[0, 1].
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
score: Annotated[float, confloat(ge=0, le=1)] = Field(
|
|
41
|
+
...,
|
|
42
|
+
description="""Similarity score between 0 and 1
|
|
43
|
+
comparing current answer to correct answer""",
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class VerificationResponse(BaseModel):
|
|
48
|
+
r"""Model for structured verification responses.
|
|
49
|
+
|
|
50
|
+
A Pydantic model class that represents verification results from agents,
|
|
51
|
+
indicating whether an answer is correct or not.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
is_correct (bool): Boolean indicating if the answer is correct.
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
is_correct: bool = Field(
|
|
58
|
+
...,
|
|
59
|
+
description="Boolean indicating if the answer is correct",
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class O1DataGenerator:
|
|
64
|
+
r"""Class for generating and managing data through chat agent interactions.
|
|
65
|
+
|
|
66
|
+
handling the generation of data by a chat agent, managing golden answers,
|
|
67
|
+
and maintaining a solution tree for correct solution steps.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
chat_agent (Optional[ChatAgent]): Optional single agent
|
|
71
|
+
for both tasks (legacy mode). (default::obj:`None`)
|
|
72
|
+
generator_agent (Optional[ChatAgent]): Optional specialized agent for
|
|
73
|
+
answer generation. (default::obj:`None`)
|
|
74
|
+
verifier_agent (Optional[ChatAgent]): Optional specialized agent for
|
|
75
|
+
answer verification. (default::obj:`None`)
|
|
76
|
+
golden_answers (Dict[str, str]): Dictionary containing pre-defined
|
|
77
|
+
correct answers for validation and comparison. Required for answer
|
|
78
|
+
verification.
|
|
79
|
+
search_limit (int): Maximum number of search iterations allowed.
|
|
80
|
+
(default::obj:`100`)
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
def __init__(
|
|
84
|
+
self,
|
|
85
|
+
chat_agent: Optional[ChatAgent] = None,
|
|
86
|
+
*,
|
|
87
|
+
generator_agent: Optional[ChatAgent] = None,
|
|
88
|
+
verifier_agent: Optional[ChatAgent] = None,
|
|
89
|
+
golden_answers: Dict[str, str],
|
|
90
|
+
search_limit: int = 100,
|
|
91
|
+
):
|
|
92
|
+
r"""Initialize the O1DataGenerator.
|
|
93
|
+
|
|
94
|
+
This constructor supports both single-agent and dual-agent modes:
|
|
95
|
+
1. Single-agent mode (legacy): Pass a single chat_agent that will be
|
|
96
|
+
used for both generation and verification.
|
|
97
|
+
2. Dual-agent mode: Pass separate generator_agent and verifier_agent
|
|
98
|
+
for specialized tasks.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
chat_agent (Optional[ChatAgent]): Optional single agent for both
|
|
102
|
+
tasks (legacy mode). (default::obj:`None`)
|
|
103
|
+
generator_agent (Optional[ChatAgent]): Optional specialized agent
|
|
104
|
+
for answer generation. (default::obj:`None`)
|
|
105
|
+
verifier_agent (Optional[ChatAgent]): Optional specialized agent
|
|
106
|
+
for answer verification. (default::obj:`None`)
|
|
107
|
+
golden_answers (Dict[str, str]): Dictionary containing pre-defined
|
|
108
|
+
correct answers for validation and comparison. Required for
|
|
109
|
+
answer verification.
|
|
110
|
+
search_limit (int): Maximum number of search iterations allowed.
|
|
111
|
+
(default::obj:`100`)
|
|
112
|
+
"""
|
|
113
|
+
if chat_agent is not None:
|
|
114
|
+
if generator_agent is not None or verifier_agent is not None:
|
|
115
|
+
raise ValueError(
|
|
116
|
+
"Cannot specify both chat_agent \
|
|
117
|
+
and generator/verifier agents"
|
|
118
|
+
)
|
|
119
|
+
self.generator_agent = chat_agent
|
|
120
|
+
self.verifier_agent = chat_agent
|
|
121
|
+
else:
|
|
122
|
+
if generator_agent is None or verifier_agent is None:
|
|
123
|
+
raise ValueError(
|
|
124
|
+
"Must specify either chat_agent or both generator and "
|
|
125
|
+
"verifier agents"
|
|
126
|
+
)
|
|
127
|
+
self.generator_agent = generator_agent
|
|
128
|
+
self.verifier_agent = verifier_agent
|
|
129
|
+
|
|
130
|
+
self.golden_answers = golden_answers
|
|
131
|
+
self.search_limit = search_limit
|
|
132
|
+
self.solution_tree: Dict[str, Dict[str, Union[str, int]]] = {}
|
|
133
|
+
logger.info(
|
|
134
|
+
"O1DataGenerator initialized with search_limit=%d", search_limit
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
def get_answer(self, question: str, context: str = "") -> str:
|
|
138
|
+
r"""Get an answer from the chat agent for a given question.
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
question (str): The question to ask.
|
|
142
|
+
context (str): Additional context for the question.
|
|
143
|
+
(default::obj:`""`)
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
str: The generated answer.
|
|
147
|
+
"""
|
|
148
|
+
prompt = f"""
|
|
149
|
+
Please think step by step and solve this problem: {question}
|
|
150
|
+
Existing content: {context}
|
|
151
|
+
Requirements:
|
|
152
|
+
1. Analyze the problem requirements
|
|
153
|
+
2. List the steps to solve the problem
|
|
154
|
+
3. Execute the solution process
|
|
155
|
+
4. Provide the final answer
|
|
156
|
+
Please explain the thought process of each step in detail.
|
|
157
|
+
"""
|
|
158
|
+
self.generator_agent.reset()
|
|
159
|
+
response = self.generator_agent.step(prompt)
|
|
160
|
+
answer = response.msgs[0].content
|
|
161
|
+
logger.info("AI thought process:\n%s", answer)
|
|
162
|
+
return answer
|
|
163
|
+
|
|
164
|
+
def verify_answer(self, question: str, answer: str) -> bool:
|
|
165
|
+
r"""Verify if a generated answer is semantically equivalent to
|
|
166
|
+
the golden answer for a given question.
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
question (str): The question being answered.
|
|
170
|
+
answer (str): The answer to verify.
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
bool: True if the answer matches the golden answer based on
|
|
174
|
+
semantic equivalence (meaning the core content and meaning are
|
|
175
|
+
the same, even if the exact wording differs).
|
|
176
|
+
False in the following cases:
|
|
177
|
+
- If the provided question doesn't exist in the golden answers
|
|
178
|
+
- If the answer's meaning differs from the golden answer
|
|
179
|
+
"""
|
|
180
|
+
golden_answer = self.golden_answers.get(question)
|
|
181
|
+
if not golden_answer:
|
|
182
|
+
raise ValueError(
|
|
183
|
+
f"No golden answer found for question: {question}"
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
prompt = (
|
|
187
|
+
f"Question: {question}\n"
|
|
188
|
+
f"Student Answer: {answer}\n"
|
|
189
|
+
f"Correct Answer: {golden_answer}\n"
|
|
190
|
+
"Is the student's answer correct? Please respond with 'true' or "
|
|
191
|
+
"'false' only."
|
|
192
|
+
)
|
|
193
|
+
self.verifier_agent.reset()
|
|
194
|
+
response = self.verifier_agent.step(
|
|
195
|
+
prompt, response_format=VerificationResponse
|
|
196
|
+
)
|
|
197
|
+
is_correct = response.msgs[0].parsed.is_correct # type:ignore [union-attr]
|
|
198
|
+
logger.info("Answer verification result: %s", is_correct)
|
|
199
|
+
return is_correct
|
|
200
|
+
|
|
201
|
+
def monte_carlo_tree_search(
|
|
202
|
+
self, question: str, partial_solution: str = ""
|
|
203
|
+
) -> float:
|
|
204
|
+
r"""Perform Monte Carlo Tree Search to find the best solution.
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
question (str): The question to solve.
|
|
208
|
+
partial_solution (str): The current partial solution.
|
|
209
|
+
(default::obj:`""`)
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
float: The similarity score between the current
|
|
213
|
+
solution and golden answer.
|
|
214
|
+
"""
|
|
215
|
+
if question not in self.golden_answers:
|
|
216
|
+
raise ValueError(
|
|
217
|
+
f"No golden answer found for question: {question}"
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
golden_answer = self.golden_answers[question]
|
|
221
|
+
|
|
222
|
+
prompt = (
|
|
223
|
+
f"Please evaluate this solution and "
|
|
224
|
+
f"give a score between 0-1:\n"
|
|
225
|
+
f"Question: {question}\n"
|
|
226
|
+
f"Solution: {partial_solution}\n"
|
|
227
|
+
f"Correct answer: {golden_answer}\n"
|
|
228
|
+
f"Return a JSON object with a single field 'score' containing "
|
|
229
|
+
f"a float between 0 and 1, like this: {{'score': 0.85}}\n"
|
|
230
|
+
)
|
|
231
|
+
self.generator_agent.reset()
|
|
232
|
+
response = self.generator_agent.step(
|
|
233
|
+
prompt, response_format=AgentResponse
|
|
234
|
+
)
|
|
235
|
+
agent_response = response.msgs[0].parsed.score # type: ignore [union-attr]
|
|
236
|
+
|
|
237
|
+
return agent_response
|
|
238
|
+
|
|
239
|
+
def binary_search_error(self, question: str, solution: str) -> int:
|
|
240
|
+
r"""Use binary search to locate the first error in the solution.
|
|
241
|
+
This method splits the solution into sentences using both English and
|
|
242
|
+
Chinese sentence delimiters and performs binary search to find the
|
|
243
|
+
first error.
|
|
244
|
+
|
|
245
|
+
Args:
|
|
246
|
+
question (str): The question being solved.
|
|
247
|
+
solution (str): The complete solution to analyze.
|
|
248
|
+
|
|
249
|
+
Returns:
|
|
250
|
+
int: The position of the first error found in the solution.
|
|
251
|
+
Returns -1. If no errors are found (all sentences are correct).
|
|
252
|
+
"""
|
|
253
|
+
logger.info("Starting binary search for error location")
|
|
254
|
+
# Split by both English period and Chinese period
|
|
255
|
+
sentences = [
|
|
256
|
+
s.strip()
|
|
257
|
+
for s in solution.replace('。', '.').split('.')
|
|
258
|
+
if s.strip()
|
|
259
|
+
]
|
|
260
|
+
|
|
261
|
+
# First check if the entire solution is correct
|
|
262
|
+
if self.verify_answer(question, solution):
|
|
263
|
+
return -1
|
|
264
|
+
|
|
265
|
+
left, right = 0, len(sentences)
|
|
266
|
+
while left < right:
|
|
267
|
+
mid = (left + right) // 2
|
|
268
|
+
partial_solution = '. '.join(sentences[:mid]) + '.'
|
|
269
|
+
logger.info("Checking solution fragment:\n%s", partial_solution)
|
|
270
|
+
# Verify if the current part is correct
|
|
271
|
+
is_correct = self.verify_answer(question, partial_solution)
|
|
272
|
+
if is_correct:
|
|
273
|
+
left = mid + 1
|
|
274
|
+
else:
|
|
275
|
+
right = mid
|
|
276
|
+
logger.info("First error position found: sentence %d", left)
|
|
277
|
+
return left
|
|
278
|
+
|
|
279
|
+
def solve(self, question: str) -> str:
|
|
280
|
+
r"""Solve a question using a multi-step approach.
|
|
281
|
+
|
|
282
|
+
The solution process follows these steps:
|
|
283
|
+
1. Try to solve directly - if correct, return the solution
|
|
284
|
+
2. If not correct, use Monte Carlo Tree Search to find a good solution
|
|
285
|
+
3. If the solution isn't perfect, use binary search to locate errors
|
|
286
|
+
4. Generate a new solution based on the correct part
|
|
287
|
+
|
|
288
|
+
Args:
|
|
289
|
+
question (str): The question to solve.
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
str: The best solution found.
|
|
293
|
+
"""
|
|
294
|
+
# 1. Try direct solution first
|
|
295
|
+
solution = self.get_answer(question)
|
|
296
|
+
if self.verify_answer(question, solution):
|
|
297
|
+
logger.info("Initial solution is correct")
|
|
298
|
+
return solution
|
|
299
|
+
|
|
300
|
+
# 2. If direct solution fails, try Monte Carlo Tree Search
|
|
301
|
+
# to find a solution with high similarity score
|
|
302
|
+
best_solution = ""
|
|
303
|
+
best_score: float = 0.0
|
|
304
|
+
for i in range(self.search_limit):
|
|
305
|
+
# Generate new answer
|
|
306
|
+
current_solution = self.get_answer(question, best_solution)
|
|
307
|
+
|
|
308
|
+
# Evaluate solution similarity score
|
|
309
|
+
prompt = (
|
|
310
|
+
f"Please evaluate this solution and "
|
|
311
|
+
f"give a score between 0-1:\n"
|
|
312
|
+
f"Question: {question}\n"
|
|
313
|
+
f"Solution: {current_solution}\n"
|
|
314
|
+
f"Correct answer: {self.golden_answers.get(question, '')}\n"
|
|
315
|
+
f"Return a JSON object with a single field 'score' containing "
|
|
316
|
+
f"a float between 0 and 1, like this: {{'score': 0.85}}\n"
|
|
317
|
+
)
|
|
318
|
+
self.generator_agent.reset()
|
|
319
|
+
response = self.generator_agent.step(prompt)
|
|
320
|
+
try:
|
|
321
|
+
response = self.generator_agent.step(
|
|
322
|
+
prompt, response_format=AgentResponse
|
|
323
|
+
)
|
|
324
|
+
agent_response = response.msgs[0].parsed.score # type: ignore [union-attr]
|
|
325
|
+
score = agent_response
|
|
326
|
+
|
|
327
|
+
# Exit early if we find a very good solution (score > 0.9)
|
|
328
|
+
if score > 0.9:
|
|
329
|
+
logger.info(
|
|
330
|
+
"Found excellent solution with score %.2f. "
|
|
331
|
+
"Stopping search early.",
|
|
332
|
+
score,
|
|
333
|
+
)
|
|
334
|
+
return current_solution
|
|
335
|
+
|
|
336
|
+
if score > best_score:
|
|
337
|
+
best_score = score
|
|
338
|
+
best_solution = current_solution
|
|
339
|
+
|
|
340
|
+
logger.info(
|
|
341
|
+
"Current search progress: %d/%d, best score: %.2f",
|
|
342
|
+
i + 1,
|
|
343
|
+
self.search_limit,
|
|
344
|
+
best_score,
|
|
345
|
+
)
|
|
346
|
+
except Exception as e:
|
|
347
|
+
logger.error("Error parsing agent response: %s", str(e))
|
|
348
|
+
continue
|
|
349
|
+
|
|
350
|
+
# 3. If the answer is not completely correct,
|
|
351
|
+
# use binary search to locate the error
|
|
352
|
+
error_pos = self.binary_search_error(question, best_solution)
|
|
353
|
+
|
|
354
|
+
# If no errors found (error_pos == -1), return the current solution
|
|
355
|
+
if error_pos == -1:
|
|
356
|
+
logger.info("No specific errors found in the solution")
|
|
357
|
+
return best_solution
|
|
358
|
+
|
|
359
|
+
# 4. Generate new solution based on correct part
|
|
360
|
+
correct_part = '. '.join(best_solution.split('. ')[:error_pos]) + '.'
|
|
361
|
+
final_solution = self.get_answer(question, correct_part)
|
|
362
|
+
self.solution_tree[question] = {
|
|
363
|
+
"solution": final_solution,
|
|
364
|
+
"error_position": error_pos,
|
|
365
|
+
}
|
|
366
|
+
return final_solution
|
|
367
|
+
|
|
368
|
+
def import_qa_from_json(self, data: Union[str, Dict[str, str]]) -> bool:
|
|
369
|
+
r"""Import question and answer data from either a JSON file or a
|
|
370
|
+
dictionary.
|
|
371
|
+
|
|
372
|
+
Args:
|
|
373
|
+
data (Union[str, Dict[str, str]]): Either a path to a JSON file
|
|
374
|
+
containing QA pairs or a dictionary of question-answer pairs.
|
|
375
|
+
If a string is provided, it's treated as a file path.
|
|
376
|
+
The expected format is:
|
|
377
|
+
{"question1": "answer1",
|
|
378
|
+
"question2": "answer2",
|
|
379
|
+
...}
|
|
380
|
+
|
|
381
|
+
Returns:
|
|
382
|
+
bool: True if import was successful, False otherwise.
|
|
383
|
+
"""
|
|
384
|
+
try:
|
|
385
|
+
if isinstance(data, str):
|
|
386
|
+
logger.info("Loading QA pairs from file: %s", data)
|
|
387
|
+
with open(data, 'r', encoding='utf-8') as f:
|
|
388
|
+
qa_data = json.load(f)
|
|
389
|
+
else:
|
|
390
|
+
logger.info("Loading QA pairs from provided dictionary")
|
|
391
|
+
qa_data = data
|
|
392
|
+
|
|
393
|
+
# Validate the data format
|
|
394
|
+
if not isinstance(qa_data, dict):
|
|
395
|
+
logger.error("Invalid data format: expected dictionary")
|
|
396
|
+
return False
|
|
397
|
+
|
|
398
|
+
# Update golden answers
|
|
399
|
+
self.golden_answers.update(qa_data)
|
|
400
|
+
logger.info("Successfully imported %d QA pairs", len(qa_data))
|
|
401
|
+
return True
|
|
402
|
+
|
|
403
|
+
except Exception as e:
|
|
404
|
+
logger.error("Error importing QA data: %s", str(e))
|
|
405
|
+
return False
|
|
406
|
+
|
|
407
|
+
def export_solutions(self, filepath: str = 'solutions.json') -> None:
|
|
408
|
+
r"""Export the solution process and results to a JSON file.
|
|
409
|
+
Exports the solution tree, golden answers,
|
|
410
|
+
and export timestamp to a JSON file.
|
|
411
|
+
The exported data includes:
|
|
412
|
+
- solutions: The solution tree
|
|
413
|
+
with intermediate steps
|
|
414
|
+
- golden_answers: The reference answers used for verification
|
|
415
|
+
- export_time: ISO format timestamp of the export
|
|
416
|
+
|
|
417
|
+
Args:
|
|
418
|
+
filepath (str, optional): Path where the JSON file will be saved.
|
|
419
|
+
(default::obj:`'solutions.json'`)
|
|
420
|
+
|
|
421
|
+
Returns:
|
|
422
|
+
None: The method writes to a file and logs the result but does not
|
|
423
|
+
return any value.
|
|
424
|
+
"""
|
|
425
|
+
export_data = {
|
|
426
|
+
"solutions": self.solution_tree,
|
|
427
|
+
"golden_answers": self.golden_answers,
|
|
428
|
+
"export_time": datetime.now().isoformat(),
|
|
429
|
+
}
|
|
430
|
+
try:
|
|
431
|
+
with open(filepath, 'w', encoding='utf-8') as f:
|
|
432
|
+
json.dump(export_data, f, ensure_ascii=False, indent=2)
|
|
433
|
+
logger.info(f"Solutions exported successfully to {filepath}")
|
|
434
|
+
except Exception as e:
|
|
435
|
+
logger.error(f"Error exporting solutions: {e!s}")
|
camel/datahubs/models.py
CHANGED
|
@@ -19,4 +19,7 @@ from pydantic import BaseModel
|
|
|
19
19
|
class Record(BaseModel):
|
|
20
20
|
id: Optional[str] = None
|
|
21
21
|
metadata: Optional[Dict[str, Any]] = None
|
|
22
|
-
content: Dict[str, Any]
|
|
22
|
+
content: Optional[Dict[str, Any]] = None
|
|
23
|
+
|
|
24
|
+
class Config:
|
|
25
|
+
extra = "allow" # Allow any additional fields
|
|
@@ -109,7 +109,10 @@ class HermesFunctionFormatter(
|
|
|
109
109
|
format.
|
|
110
110
|
"""
|
|
111
111
|
tool_call_dict = {"name": func_name, "arguments": args}
|
|
112
|
-
|
|
112
|
+
|
|
113
|
+
if content:
|
|
114
|
+
return f"{content}\n<tool_call>\n{tool_call_dict}\n</tool_call>"
|
|
115
|
+
return f"<tool_call>\n{tool_call_dict}\n</tool_call>"
|
|
113
116
|
|
|
114
117
|
def format_tool_response(self, func_name: str, result: Any) -> str:
|
|
115
118
|
r"""Formats a tool response message with the given function name and
|
|
@@ -121,6 +121,14 @@ class AutoRetriever:
|
|
|
121
121
|
|
|
122
122
|
collection_name = re.sub(r'[^a-zA-Z0-9]', '', content)[:20]
|
|
123
123
|
|
|
124
|
+
# Ensure the first character is either an underscore or a letter for
|
|
125
|
+
# Milvus
|
|
126
|
+
if (
|
|
127
|
+
self.storage_type == StorageType.MILVUS
|
|
128
|
+
and not collection_name[0].isalpha()
|
|
129
|
+
):
|
|
130
|
+
collection_name = f"_{collection_name}"
|
|
131
|
+
|
|
124
132
|
return collection_name
|
|
125
133
|
|
|
126
134
|
def run_vector_retriever(
|
|
@@ -161,13 +161,16 @@ class VectorRetriever(BaseRetriever):
|
|
|
161
161
|
# content path, chunk metadata, and chunk text
|
|
162
162
|
for vector, chunk in zip(batch_vectors, batch_chunks):
|
|
163
163
|
if isinstance(content, str):
|
|
164
|
-
content_path_info = {"content path": content}
|
|
164
|
+
content_path_info = {"content path": content[:100]}
|
|
165
165
|
elif isinstance(content, IOBase):
|
|
166
166
|
content_path_info = {"content path": "From file bytes"}
|
|
167
167
|
elif isinstance(content, Element):
|
|
168
168
|
content_path_info = {
|
|
169
|
-
"content path": content.metadata.file_directory
|
|
170
|
-
|
|
169
|
+
"content path": content.metadata.file_directory[
|
|
170
|
+
:100
|
|
171
|
+
]
|
|
172
|
+
if content.metadata.file_directory
|
|
173
|
+
else ""
|
|
171
174
|
}
|
|
172
175
|
|
|
173
176
|
chunk_metadata = {"metadata": chunk.metadata.to_dict()}
|
camel/societies/role_playing.py
CHANGED
|
@@ -509,8 +509,8 @@ class RolePlaying:
|
|
|
509
509
|
# step and once in role play), and the model generates only one
|
|
510
510
|
# response when multi-response support is enabled.
|
|
511
511
|
if (
|
|
512
|
-
'n' in self.user_agent.model_config_dict.keys()
|
|
513
|
-
and self.user_agent.model_config_dict['n'] > 1
|
|
512
|
+
'n' in self.user_agent.model_backend.model_config_dict.keys()
|
|
513
|
+
and self.user_agent.model_backend.model_config_dict['n'] > 1
|
|
514
514
|
):
|
|
515
515
|
self.user_agent.record_message(user_msg)
|
|
516
516
|
|
|
@@ -532,8 +532,8 @@ class RolePlaying:
|
|
|
532
532
|
# step and once in role play), and the model generates only one
|
|
533
533
|
# response when multi-response support is enabled.
|
|
534
534
|
if (
|
|
535
|
-
'n' in self.assistant_agent.model_config_dict.keys()
|
|
536
|
-
and self.assistant_agent.model_config_dict['n'] > 1
|
|
535
|
+
'n' in self.assistant_agent.model_backend.model_config_dict.keys()
|
|
536
|
+
and self.assistant_agent.model_backend.model_config_dict['n'] > 1
|
|
537
537
|
):
|
|
538
538
|
self.assistant_agent.record_message(assistant_msg)
|
|
539
539
|
|
|
@@ -251,7 +251,7 @@ class Workforce(BaseNode):
|
|
|
251
251
|
additional_info = "A Workforce node"
|
|
252
252
|
elif isinstance(child, SingleAgentWorker):
|
|
253
253
|
additional_info = "tools: " + (
|
|
254
|
-
", ".join(child.worker.
|
|
254
|
+
", ".join(child.worker.tool_dict.keys())
|
|
255
255
|
)
|
|
256
256
|
elif isinstance(child, RolePlayingWorker):
|
|
257
257
|
additional_info = "A Role playing node"
|
|
@@ -369,7 +369,7 @@ class Workforce(BaseNode):
|
|
|
369
369
|
model_config_dict=model_config_dict,
|
|
370
370
|
)
|
|
371
371
|
|
|
372
|
-
return ChatAgent(worker_sys_msg, model=model, tools=function_list)
|
|
372
|
+
return ChatAgent(worker_sys_msg, model=model, tools=function_list) # type: ignore[arg-type]
|
|
373
373
|
|
|
374
374
|
async def _get_returned_task(self) -> Task:
|
|
375
375
|
r"""Get the task that's published by this node and just get returned
|
|
@@ -12,9 +12,20 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
|
+
import logging
|
|
16
|
+
import re
|
|
15
17
|
import time
|
|
16
18
|
from typing import TYPE_CHECKING, Any, Dict, List, Tuple
|
|
17
19
|
|
|
20
|
+
from camel.storages.graph_storages.base import BaseGraphStorage
|
|
21
|
+
from camel.storages.graph_storages.graph_element import (
|
|
22
|
+
GraphElement,
|
|
23
|
+
)
|
|
24
|
+
from camel.utils.commons import dependencies_required
|
|
25
|
+
|
|
26
|
+
logger = logging.getLogger(__name__)
|
|
27
|
+
|
|
28
|
+
|
|
18
29
|
if TYPE_CHECKING:
|
|
19
30
|
from nebula3.data.ResultSet import ( # type: ignore[import-untyped]
|
|
20
31
|
ResultSet,
|
|
@@ -24,11 +35,6 @@ if TYPE_CHECKING:
|
|
|
24
35
|
Session,
|
|
25
36
|
)
|
|
26
37
|
|
|
27
|
-
from camel.storages.graph_storages.base import BaseGraphStorage
|
|
28
|
-
from camel.storages.graph_storages.graph_element import (
|
|
29
|
-
GraphElement,
|
|
30
|
-
)
|
|
31
|
-
from camel.utils.commons import dependencies_required
|
|
32
38
|
|
|
33
39
|
MAX_RETRIES = 5
|
|
34
40
|
RETRY_DELAY = 3
|
|
@@ -178,11 +184,21 @@ class NebulaGraph(BaseGraphStorage):
|
|
|
178
184
|
"""
|
|
179
185
|
nodes = self._extract_nodes(graph_elements)
|
|
180
186
|
for node in nodes:
|
|
181
|
-
|
|
187
|
+
try:
|
|
188
|
+
self.add_node(node['id'], node['type'])
|
|
189
|
+
except Exception as e:
|
|
190
|
+
logger.warning(f"Failed to add node {node}. Error: {e}")
|
|
191
|
+
continue
|
|
182
192
|
|
|
183
193
|
relationships = self._extract_relationships(graph_elements)
|
|
184
194
|
for rel in relationships:
|
|
185
|
-
|
|
195
|
+
try:
|
|
196
|
+
self.add_triplet(
|
|
197
|
+
rel['subj']['id'], rel['obj']['id'], rel['type']
|
|
198
|
+
)
|
|
199
|
+
except Exception as e:
|
|
200
|
+
logger.warning(f"Failed to add relationship {rel}. Error: {e}")
|
|
201
|
+
continue
|
|
186
202
|
|
|
187
203
|
def ensure_edge_type_exists(
|
|
188
204
|
self,
|
|
@@ -253,6 +269,9 @@ class NebulaGraph(BaseGraphStorage):
|
|
|
253
269
|
node_id (str): The ID of the node.
|
|
254
270
|
tag_name (str): The tag name of the node.
|
|
255
271
|
"""
|
|
272
|
+
node_id = re.sub(r'[^a-zA-Z0-9\u4e00-\u9fa5]', '', node_id)
|
|
273
|
+
tag_name = re.sub(r'[^a-zA-Z0-9\u4e00-\u9fa5]', '', tag_name)
|
|
274
|
+
|
|
256
275
|
self.ensure_tag_exists(tag_name)
|
|
257
276
|
|
|
258
277
|
# Insert node without properties
|
|
@@ -409,6 +428,10 @@ class NebulaGraph(BaseGraphStorage):
|
|
|
409
428
|
obj (str): The identifier for the object entity.
|
|
410
429
|
rel (str): The relationship between the subject and object.
|
|
411
430
|
"""
|
|
431
|
+
subj = re.sub(r'[^a-zA-Z0-9\u4e00-\u9fa5]', '', subj)
|
|
432
|
+
obj = re.sub(r'[^a-zA-Z0-9\u4e00-\u9fa5]', '', obj)
|
|
433
|
+
rel = re.sub(r'[^a-zA-Z0-9\u4e00-\u9fa5]', '', rel)
|
|
434
|
+
|
|
412
435
|
self.ensure_tag_exists(subj)
|
|
413
436
|
self.ensure_tag_exists(obj)
|
|
414
437
|
self.ensure_edge_type_exists(rel)
|
camel/types/enums.py
CHANGED
|
@@ -154,9 +154,15 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
154
154
|
return self.value
|
|
155
155
|
return "gpt-4o-mini"
|
|
156
156
|
|
|
157
|
+
@property
|
|
158
|
+
def support_native_structured_output(self) -> bool:
|
|
159
|
+
return self.is_openai
|
|
160
|
+
|
|
157
161
|
@property
|
|
158
162
|
def support_native_tool_calling(self) -> bool:
|
|
159
|
-
return any(
|
|
163
|
+
return any(
|
|
164
|
+
[self.is_openai, self.is_gemini, self.is_mistral, self.is_qwen]
|
|
165
|
+
)
|
|
160
166
|
|
|
161
167
|
@property
|
|
162
168
|
def is_openai(self) -> bool:
|
|
@@ -113,6 +113,11 @@ class UnifiedModelType(str):
|
|
|
113
113
|
r"""Returns whether the model is a Qwen model."""
|
|
114
114
|
return True
|
|
115
115
|
|
|
116
|
+
@property
|
|
117
|
+
def support_native_structured_output(self) -> bool:
|
|
118
|
+
r"""Returns whether the model supports native structured output."""
|
|
119
|
+
return False
|
|
120
|
+
|
|
116
121
|
@property
|
|
117
122
|
def support_native_tool_calling(self) -> bool:
|
|
118
123
|
r"""Returns whether the model supports native tool calling."""
|
camel/utils/__init__.py
CHANGED
|
@@ -21,6 +21,7 @@ from .commons import (
|
|
|
21
21
|
dependencies_required,
|
|
22
22
|
download_tasks,
|
|
23
23
|
func_string_to_callable,
|
|
24
|
+
generate_prompt_for_structured_output,
|
|
24
25
|
get_first_int,
|
|
25
26
|
get_prompt_template_key_words,
|
|
26
27
|
get_pydantic_major_version,
|
|
@@ -78,4 +79,5 @@ __all__ = [
|
|
|
78
79
|
"track_agent",
|
|
79
80
|
"handle_http_error",
|
|
80
81
|
"get_pydantic_model",
|
|
82
|
+
"generate_prompt_for_structured_output",
|
|
81
83
|
]
|