camel-ai 0.2.13__py3-none-any.whl → 0.2.15a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +362 -237
- camel/datagen/__init__.py +17 -0
- camel/datagen/o1datagen.py +435 -0
- camel/datahubs/models.py +4 -1
- camel/messages/conversion/sharegpt/hermes/hermes_function_formatter.py +4 -1
- camel/models/openai_model.py +23 -11
- camel/retrievers/auto_retriever.py +8 -0
- camel/retrievers/vector_retriever.py +6 -3
- camel/schemas/__init__.py +2 -1
- camel/schemas/base.py +2 -4
- camel/schemas/outlines_converter.py +249 -0
- camel/societies/role_playing.py +4 -4
- camel/societies/workforce/workforce.py +2 -2
- camel/storages/graph_storages/nebula_graph.py +30 -7
- camel/types/enums.py +10 -1
- camel/types/unified_model_type.py +5 -0
- camel/utils/__init__.py +2 -0
- camel/utils/commons.py +42 -23
- camel/utils/token_counting.py +1 -0
- {camel_ai-0.2.13.dist-info → camel_ai-0.2.15a0.dist-info}/METADATA +6 -6
- {camel_ai-0.2.13.dist-info → camel_ai-0.2.15a0.dist-info}/RECORD +24 -21
- {camel_ai-0.2.13.dist-info → camel_ai-0.2.15a0.dist-info}/LICENSE +0 -0
- {camel_ai-0.2.13.dist-info → camel_ai-0.2.15a0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
from .o1datagen import O1DataGenerator
|
|
16
|
+
|
|
17
|
+
__all__ = ['O1DataGenerator']
|
|
@@ -0,0 +1,435 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
from datetime import datetime
|
|
17
|
+
from typing import Annotated, Dict, Optional, Union
|
|
18
|
+
|
|
19
|
+
from pydantic import BaseModel, Field, confloat
|
|
20
|
+
|
|
21
|
+
from camel.agents import ChatAgent
|
|
22
|
+
from camel.logger import get_logger
|
|
23
|
+
|
|
24
|
+
# Get a logger for this module
|
|
25
|
+
logger = get_logger('o1datagenerator')
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class AgentResponse(BaseModel):
|
|
29
|
+
r"""Model for structured agent responses.
|
|
30
|
+
|
|
31
|
+
A Pydantic model class that represents structured responses from agents,
|
|
32
|
+
including a similarity score that measures the quality of the response.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
score (float): A similarity score between 0 and 1 that compares the
|
|
36
|
+
current answer to the correct answer. Must be within the range
|
|
37
|
+
[0, 1].
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
score: Annotated[float, confloat(ge=0, le=1)] = Field(
|
|
41
|
+
...,
|
|
42
|
+
description="""Similarity score between 0 and 1
|
|
43
|
+
comparing current answer to correct answer""",
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class VerificationResponse(BaseModel):
|
|
48
|
+
r"""Model for structured verification responses.
|
|
49
|
+
|
|
50
|
+
A Pydantic model class that represents verification results from agents,
|
|
51
|
+
indicating whether an answer is correct or not.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
is_correct (bool): Boolean indicating if the answer is correct.
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
is_correct: bool = Field(
|
|
58
|
+
...,
|
|
59
|
+
description="Boolean indicating if the answer is correct",
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class O1DataGenerator:
|
|
64
|
+
r"""Class for generating and managing data through chat agent interactions.
|
|
65
|
+
|
|
66
|
+
handling the generation of data by a chat agent, managing golden answers,
|
|
67
|
+
and maintaining a solution tree for correct solution steps.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
chat_agent (Optional[ChatAgent]): Optional single agent
|
|
71
|
+
for both tasks (legacy mode). (default::obj:`None`)
|
|
72
|
+
generator_agent (Optional[ChatAgent]): Optional specialized agent for
|
|
73
|
+
answer generation. (default::obj:`None`)
|
|
74
|
+
verifier_agent (Optional[ChatAgent]): Optional specialized agent for
|
|
75
|
+
answer verification. (default::obj:`None`)
|
|
76
|
+
golden_answers (Dict[str, str]): Dictionary containing pre-defined
|
|
77
|
+
correct answers for validation and comparison. Required for answer
|
|
78
|
+
verification.
|
|
79
|
+
search_limit (int): Maximum number of search iterations allowed.
|
|
80
|
+
(default::obj:`100`)
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
def __init__(
|
|
84
|
+
self,
|
|
85
|
+
chat_agent: Optional[ChatAgent] = None,
|
|
86
|
+
*,
|
|
87
|
+
generator_agent: Optional[ChatAgent] = None,
|
|
88
|
+
verifier_agent: Optional[ChatAgent] = None,
|
|
89
|
+
golden_answers: Dict[str, str],
|
|
90
|
+
search_limit: int = 100,
|
|
91
|
+
):
|
|
92
|
+
r"""Initialize the O1DataGenerator.
|
|
93
|
+
|
|
94
|
+
This constructor supports both single-agent and dual-agent modes:
|
|
95
|
+
1. Single-agent mode (legacy): Pass a single chat_agent that will be
|
|
96
|
+
used for both generation and verification.
|
|
97
|
+
2. Dual-agent mode: Pass separate generator_agent and verifier_agent
|
|
98
|
+
for specialized tasks.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
chat_agent (Optional[ChatAgent]): Optional single agent for both
|
|
102
|
+
tasks (legacy mode). (default::obj:`None`)
|
|
103
|
+
generator_agent (Optional[ChatAgent]): Optional specialized agent
|
|
104
|
+
for answer generation. (default::obj:`None`)
|
|
105
|
+
verifier_agent (Optional[ChatAgent]): Optional specialized agent
|
|
106
|
+
for answer verification. (default::obj:`None`)
|
|
107
|
+
golden_answers (Dict[str, str]): Dictionary containing pre-defined
|
|
108
|
+
correct answers for validation and comparison. Required for
|
|
109
|
+
answer verification.
|
|
110
|
+
search_limit (int): Maximum number of search iterations allowed.
|
|
111
|
+
(default::obj:`100`)
|
|
112
|
+
"""
|
|
113
|
+
if chat_agent is not None:
|
|
114
|
+
if generator_agent is not None or verifier_agent is not None:
|
|
115
|
+
raise ValueError(
|
|
116
|
+
"Cannot specify both chat_agent \
|
|
117
|
+
and generator/verifier agents"
|
|
118
|
+
)
|
|
119
|
+
self.generator_agent = chat_agent
|
|
120
|
+
self.verifier_agent = chat_agent
|
|
121
|
+
else:
|
|
122
|
+
if generator_agent is None or verifier_agent is None:
|
|
123
|
+
raise ValueError(
|
|
124
|
+
"Must specify either chat_agent or both generator and "
|
|
125
|
+
"verifier agents"
|
|
126
|
+
)
|
|
127
|
+
self.generator_agent = generator_agent
|
|
128
|
+
self.verifier_agent = verifier_agent
|
|
129
|
+
|
|
130
|
+
self.golden_answers = golden_answers
|
|
131
|
+
self.search_limit = search_limit
|
|
132
|
+
self.solution_tree: Dict[str, Dict[str, Union[str, int]]] = {}
|
|
133
|
+
logger.info(
|
|
134
|
+
"O1DataGenerator initialized with search_limit=%d", search_limit
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
def get_answer(self, question: str, context: str = "") -> str:
|
|
138
|
+
r"""Get an answer from the chat agent for a given question.
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
question (str): The question to ask.
|
|
142
|
+
context (str): Additional context for the question.
|
|
143
|
+
(default::obj:`""`)
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
str: The generated answer.
|
|
147
|
+
"""
|
|
148
|
+
prompt = f"""
|
|
149
|
+
Please think step by step and solve this problem: {question}
|
|
150
|
+
Existing content: {context}
|
|
151
|
+
Requirements:
|
|
152
|
+
1. Analyze the problem requirements
|
|
153
|
+
2. List the steps to solve the problem
|
|
154
|
+
3. Execute the solution process
|
|
155
|
+
4. Provide the final answer
|
|
156
|
+
Please explain the thought process of each step in detail.
|
|
157
|
+
"""
|
|
158
|
+
self.generator_agent.reset()
|
|
159
|
+
response = self.generator_agent.step(prompt)
|
|
160
|
+
answer = response.msgs[0].content
|
|
161
|
+
logger.info("AI thought process:\n%s", answer)
|
|
162
|
+
return answer
|
|
163
|
+
|
|
164
|
+
def verify_answer(self, question: str, answer: str) -> bool:
|
|
165
|
+
r"""Verify if a generated answer is semantically equivalent to
|
|
166
|
+
the golden answer for a given question.
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
question (str): The question being answered.
|
|
170
|
+
answer (str): The answer to verify.
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
bool: True if the answer matches the golden answer based on
|
|
174
|
+
semantic equivalence (meaning the core content and meaning are
|
|
175
|
+
the same, even if the exact wording differs).
|
|
176
|
+
False in the following cases:
|
|
177
|
+
- If the provided question doesn't exist in the golden answers
|
|
178
|
+
- If the answer's meaning differs from the golden answer
|
|
179
|
+
"""
|
|
180
|
+
golden_answer = self.golden_answers.get(question)
|
|
181
|
+
if not golden_answer:
|
|
182
|
+
raise ValueError(
|
|
183
|
+
f"No golden answer found for question: {question}"
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
prompt = (
|
|
187
|
+
f"Question: {question}\n"
|
|
188
|
+
f"Student Answer: {answer}\n"
|
|
189
|
+
f"Correct Answer: {golden_answer}\n"
|
|
190
|
+
"Is the student's answer correct? Please respond with 'true' or "
|
|
191
|
+
"'false' only."
|
|
192
|
+
)
|
|
193
|
+
self.verifier_agent.reset()
|
|
194
|
+
response = self.verifier_agent.step(
|
|
195
|
+
prompt, response_format=VerificationResponse
|
|
196
|
+
)
|
|
197
|
+
is_correct = response.msgs[0].parsed.is_correct # type:ignore [union-attr]
|
|
198
|
+
logger.info("Answer verification result: %s", is_correct)
|
|
199
|
+
return is_correct
|
|
200
|
+
|
|
201
|
+
def monte_carlo_tree_search(
|
|
202
|
+
self, question: str, partial_solution: str = ""
|
|
203
|
+
) -> float:
|
|
204
|
+
r"""Perform Monte Carlo Tree Search to find the best solution.
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
question (str): The question to solve.
|
|
208
|
+
partial_solution (str): The current partial solution.
|
|
209
|
+
(default::obj:`""`)
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
float: The similarity score between the current
|
|
213
|
+
solution and golden answer.
|
|
214
|
+
"""
|
|
215
|
+
if question not in self.golden_answers:
|
|
216
|
+
raise ValueError(
|
|
217
|
+
f"No golden answer found for question: {question}"
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
golden_answer = self.golden_answers[question]
|
|
221
|
+
|
|
222
|
+
prompt = (
|
|
223
|
+
f"Please evaluate this solution and "
|
|
224
|
+
f"give a score between 0-1:\n"
|
|
225
|
+
f"Question: {question}\n"
|
|
226
|
+
f"Solution: {partial_solution}\n"
|
|
227
|
+
f"Correct answer: {golden_answer}\n"
|
|
228
|
+
f"Return a JSON object with a single field 'score' containing "
|
|
229
|
+
f"a float between 0 and 1, like this: {{'score': 0.85}}\n"
|
|
230
|
+
)
|
|
231
|
+
self.generator_agent.reset()
|
|
232
|
+
response = self.generator_agent.step(
|
|
233
|
+
prompt, response_format=AgentResponse
|
|
234
|
+
)
|
|
235
|
+
agent_response = response.msgs[0].parsed.score # type: ignore [union-attr]
|
|
236
|
+
|
|
237
|
+
return agent_response
|
|
238
|
+
|
|
239
|
+
def binary_search_error(self, question: str, solution: str) -> int:
|
|
240
|
+
r"""Use binary search to locate the first error in the solution.
|
|
241
|
+
This method splits the solution into sentences using both English and
|
|
242
|
+
Chinese sentence delimiters and performs binary search to find the
|
|
243
|
+
first error.
|
|
244
|
+
|
|
245
|
+
Args:
|
|
246
|
+
question (str): The question being solved.
|
|
247
|
+
solution (str): The complete solution to analyze.
|
|
248
|
+
|
|
249
|
+
Returns:
|
|
250
|
+
int: The position of the first error found in the solution.
|
|
251
|
+
Returns -1. If no errors are found (all sentences are correct).
|
|
252
|
+
"""
|
|
253
|
+
logger.info("Starting binary search for error location")
|
|
254
|
+
# Split by both English period and Chinese period
|
|
255
|
+
sentences = [
|
|
256
|
+
s.strip()
|
|
257
|
+
for s in solution.replace('。', '.').split('.')
|
|
258
|
+
if s.strip()
|
|
259
|
+
]
|
|
260
|
+
|
|
261
|
+
# First check if the entire solution is correct
|
|
262
|
+
if self.verify_answer(question, solution):
|
|
263
|
+
return -1
|
|
264
|
+
|
|
265
|
+
left, right = 0, len(sentences)
|
|
266
|
+
while left < right:
|
|
267
|
+
mid = (left + right) // 2
|
|
268
|
+
partial_solution = '. '.join(sentences[:mid]) + '.'
|
|
269
|
+
logger.info("Checking solution fragment:\n%s", partial_solution)
|
|
270
|
+
# Verify if the current part is correct
|
|
271
|
+
is_correct = self.verify_answer(question, partial_solution)
|
|
272
|
+
if is_correct:
|
|
273
|
+
left = mid + 1
|
|
274
|
+
else:
|
|
275
|
+
right = mid
|
|
276
|
+
logger.info("First error position found: sentence %d", left)
|
|
277
|
+
return left
|
|
278
|
+
|
|
279
|
+
def solve(self, question: str) -> str:
|
|
280
|
+
r"""Solve a question using a multi-step approach.
|
|
281
|
+
|
|
282
|
+
The solution process follows these steps:
|
|
283
|
+
1. Try to solve directly - if correct, return the solution
|
|
284
|
+
2. If not correct, use Monte Carlo Tree Search to find a good solution
|
|
285
|
+
3. If the solution isn't perfect, use binary search to locate errors
|
|
286
|
+
4. Generate a new solution based on the correct part
|
|
287
|
+
|
|
288
|
+
Args:
|
|
289
|
+
question (str): The question to solve.
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
str: The best solution found.
|
|
293
|
+
"""
|
|
294
|
+
# 1. Try direct solution first
|
|
295
|
+
solution = self.get_answer(question)
|
|
296
|
+
if self.verify_answer(question, solution):
|
|
297
|
+
logger.info("Initial solution is correct")
|
|
298
|
+
return solution
|
|
299
|
+
|
|
300
|
+
# 2. If direct solution fails, try Monte Carlo Tree Search
|
|
301
|
+
# to find a solution with high similarity score
|
|
302
|
+
best_solution = ""
|
|
303
|
+
best_score: float = 0.0
|
|
304
|
+
for i in range(self.search_limit):
|
|
305
|
+
# Generate new answer
|
|
306
|
+
current_solution = self.get_answer(question, best_solution)
|
|
307
|
+
|
|
308
|
+
# Evaluate solution similarity score
|
|
309
|
+
prompt = (
|
|
310
|
+
f"Please evaluate this solution and "
|
|
311
|
+
f"give a score between 0-1:\n"
|
|
312
|
+
f"Question: {question}\n"
|
|
313
|
+
f"Solution: {current_solution}\n"
|
|
314
|
+
f"Correct answer: {self.golden_answers.get(question, '')}\n"
|
|
315
|
+
f"Return a JSON object with a single field 'score' containing "
|
|
316
|
+
f"a float between 0 and 1, like this: {{'score': 0.85}}\n"
|
|
317
|
+
)
|
|
318
|
+
self.generator_agent.reset()
|
|
319
|
+
response = self.generator_agent.step(prompt)
|
|
320
|
+
try:
|
|
321
|
+
response = self.generator_agent.step(
|
|
322
|
+
prompt, response_format=AgentResponse
|
|
323
|
+
)
|
|
324
|
+
agent_response = response.msgs[0].parsed.score # type: ignore [union-attr]
|
|
325
|
+
score = agent_response
|
|
326
|
+
|
|
327
|
+
# Exit early if we find a very good solution (score > 0.9)
|
|
328
|
+
if score > 0.9:
|
|
329
|
+
logger.info(
|
|
330
|
+
"Found excellent solution with score %.2f. "
|
|
331
|
+
"Stopping search early.",
|
|
332
|
+
score,
|
|
333
|
+
)
|
|
334
|
+
return current_solution
|
|
335
|
+
|
|
336
|
+
if score > best_score:
|
|
337
|
+
best_score = score
|
|
338
|
+
best_solution = current_solution
|
|
339
|
+
|
|
340
|
+
logger.info(
|
|
341
|
+
"Current search progress: %d/%d, best score: %.2f",
|
|
342
|
+
i + 1,
|
|
343
|
+
self.search_limit,
|
|
344
|
+
best_score,
|
|
345
|
+
)
|
|
346
|
+
except Exception as e:
|
|
347
|
+
logger.error("Error parsing agent response: %s", str(e))
|
|
348
|
+
continue
|
|
349
|
+
|
|
350
|
+
# 3. If the answer is not completely correct,
|
|
351
|
+
# use binary search to locate the error
|
|
352
|
+
error_pos = self.binary_search_error(question, best_solution)
|
|
353
|
+
|
|
354
|
+
# If no errors found (error_pos == -1), return the current solution
|
|
355
|
+
if error_pos == -1:
|
|
356
|
+
logger.info("No specific errors found in the solution")
|
|
357
|
+
return best_solution
|
|
358
|
+
|
|
359
|
+
# 4. Generate new solution based on correct part
|
|
360
|
+
correct_part = '. '.join(best_solution.split('. ')[:error_pos]) + '.'
|
|
361
|
+
final_solution = self.get_answer(question, correct_part)
|
|
362
|
+
self.solution_tree[question] = {
|
|
363
|
+
"solution": final_solution,
|
|
364
|
+
"error_position": error_pos,
|
|
365
|
+
}
|
|
366
|
+
return final_solution
|
|
367
|
+
|
|
368
|
+
def import_qa_from_json(self, data: Union[str, Dict[str, str]]) -> bool:
|
|
369
|
+
r"""Import question and answer data from either a JSON file or a
|
|
370
|
+
dictionary.
|
|
371
|
+
|
|
372
|
+
Args:
|
|
373
|
+
data (Union[str, Dict[str, str]]): Either a path to a JSON file
|
|
374
|
+
containing QA pairs or a dictionary of question-answer pairs.
|
|
375
|
+
If a string is provided, it's treated as a file path.
|
|
376
|
+
The expected format is:
|
|
377
|
+
{"question1": "answer1",
|
|
378
|
+
"question2": "answer2",
|
|
379
|
+
...}
|
|
380
|
+
|
|
381
|
+
Returns:
|
|
382
|
+
bool: True if import was successful, False otherwise.
|
|
383
|
+
"""
|
|
384
|
+
try:
|
|
385
|
+
if isinstance(data, str):
|
|
386
|
+
logger.info("Loading QA pairs from file: %s", data)
|
|
387
|
+
with open(data, 'r', encoding='utf-8') as f:
|
|
388
|
+
qa_data = json.load(f)
|
|
389
|
+
else:
|
|
390
|
+
logger.info("Loading QA pairs from provided dictionary")
|
|
391
|
+
qa_data = data
|
|
392
|
+
|
|
393
|
+
# Validate the data format
|
|
394
|
+
if not isinstance(qa_data, dict):
|
|
395
|
+
logger.error("Invalid data format: expected dictionary")
|
|
396
|
+
return False
|
|
397
|
+
|
|
398
|
+
# Update golden answers
|
|
399
|
+
self.golden_answers.update(qa_data)
|
|
400
|
+
logger.info("Successfully imported %d QA pairs", len(qa_data))
|
|
401
|
+
return True
|
|
402
|
+
|
|
403
|
+
except Exception as e:
|
|
404
|
+
logger.error("Error importing QA data: %s", str(e))
|
|
405
|
+
return False
|
|
406
|
+
|
|
407
|
+
def export_solutions(self, filepath: str = 'solutions.json') -> None:
|
|
408
|
+
r"""Export the solution process and results to a JSON file.
|
|
409
|
+
Exports the solution tree, golden answers,
|
|
410
|
+
and export timestamp to a JSON file.
|
|
411
|
+
The exported data includes:
|
|
412
|
+
- solutions: The solution tree
|
|
413
|
+
with intermediate steps
|
|
414
|
+
- golden_answers: The reference answers used for verification
|
|
415
|
+
- export_time: ISO format timestamp of the export
|
|
416
|
+
|
|
417
|
+
Args:
|
|
418
|
+
filepath (str, optional): Path where the JSON file will be saved.
|
|
419
|
+
(default::obj:`'solutions.json'`)
|
|
420
|
+
|
|
421
|
+
Returns:
|
|
422
|
+
None: The method writes to a file and logs the result but does not
|
|
423
|
+
return any value.
|
|
424
|
+
"""
|
|
425
|
+
export_data = {
|
|
426
|
+
"solutions": self.solution_tree,
|
|
427
|
+
"golden_answers": self.golden_answers,
|
|
428
|
+
"export_time": datetime.now().isoformat(),
|
|
429
|
+
}
|
|
430
|
+
try:
|
|
431
|
+
with open(filepath, 'w', encoding='utf-8') as f:
|
|
432
|
+
json.dump(export_data, f, ensure_ascii=False, indent=2)
|
|
433
|
+
logger.info(f"Solutions exported successfully to {filepath}")
|
|
434
|
+
except Exception as e:
|
|
435
|
+
logger.error(f"Error exporting solutions: {e!s}")
|
camel/datahubs/models.py
CHANGED
|
@@ -19,4 +19,7 @@ from pydantic import BaseModel
|
|
|
19
19
|
class Record(BaseModel):
|
|
20
20
|
id: Optional[str] = None
|
|
21
21
|
metadata: Optional[Dict[str, Any]] = None
|
|
22
|
-
content: Dict[str, Any]
|
|
22
|
+
content: Optional[Dict[str, Any]] = None
|
|
23
|
+
|
|
24
|
+
class Config:
|
|
25
|
+
extra = "allow" # Allow any additional fields
|
|
@@ -109,7 +109,10 @@ class HermesFunctionFormatter(
|
|
|
109
109
|
format.
|
|
110
110
|
"""
|
|
111
111
|
tool_call_dict = {"name": func_name, "arguments": args}
|
|
112
|
-
|
|
112
|
+
|
|
113
|
+
if content:
|
|
114
|
+
return f"{content}\n<tool_call>\n{tool_call_dict}\n</tool_call>"
|
|
115
|
+
return f"<tool_call>\n{tool_call_dict}\n</tool_call>"
|
|
113
116
|
|
|
114
117
|
def format_tool_response(self, func_name: str, result: Any) -> str:
|
|
115
118
|
r"""Formats a tool response message with the given function name and
|
camel/models/openai_model.py
CHANGED
|
@@ -21,6 +21,7 @@ from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig
|
|
|
21
21
|
from camel.messages import OpenAIMessage
|
|
22
22
|
from camel.models import BaseModelBackend
|
|
23
23
|
from camel.types import (
|
|
24
|
+
NOT_GIVEN,
|
|
24
25
|
ChatCompletion,
|
|
25
26
|
ChatCompletionChunk,
|
|
26
27
|
ModelType,
|
|
@@ -103,7 +104,11 @@ class OpenAIModel(BaseModelBackend):
|
|
|
103
104
|
"""
|
|
104
105
|
# o1-preview and o1-mini have Beta limitations
|
|
105
106
|
# reference: https://platform.openai.com/docs/guides/reasoning
|
|
106
|
-
if self.model_type in [
|
|
107
|
+
if self.model_type in [
|
|
108
|
+
ModelType.O1,
|
|
109
|
+
ModelType.O1_MINI,
|
|
110
|
+
ModelType.O1_PREVIEW,
|
|
111
|
+
]:
|
|
107
112
|
warnings.warn(
|
|
108
113
|
"Warning: You are using an O1 model (O1_MINI or O1_PREVIEW), "
|
|
109
114
|
"which has certain limitations, reference: "
|
|
@@ -111,22 +116,21 @@ class OpenAIModel(BaseModelBackend):
|
|
|
111
116
|
UserWarning,
|
|
112
117
|
)
|
|
113
118
|
|
|
114
|
-
# Remove system message that is not supported in o1 model.
|
|
115
|
-
messages = [msg for msg in messages if msg.get("role") != "system"]
|
|
116
|
-
|
|
117
119
|
# Check and remove unsupported parameters and reset the fixed
|
|
118
120
|
# parameters
|
|
119
|
-
unsupported_keys = [
|
|
121
|
+
unsupported_keys = [
|
|
122
|
+
"temperature",
|
|
123
|
+
"top_p",
|
|
124
|
+
"presence_penalty",
|
|
125
|
+
"frequency_penalty",
|
|
126
|
+
"logprobs",
|
|
127
|
+
"top_logprobs",
|
|
128
|
+
"logit_bias",
|
|
129
|
+
]
|
|
120
130
|
for key in unsupported_keys:
|
|
121
131
|
if key in self.model_config_dict:
|
|
122
132
|
del self.model_config_dict[key]
|
|
123
133
|
|
|
124
|
-
self.model_config_dict["temperature"] = 1.0
|
|
125
|
-
self.model_config_dict["top_p"] = 1.0
|
|
126
|
-
self.model_config_dict["n"] = 1
|
|
127
|
-
self.model_config_dict["presence_penalty"] = 0.0
|
|
128
|
-
self.model_config_dict["frequency_penalty"] = 0.0
|
|
129
|
-
|
|
130
134
|
if self.model_config_dict.get("response_format"):
|
|
131
135
|
# stream is not supported in beta.chat.completions.parse
|
|
132
136
|
if "stream" in self.model_config_dict:
|
|
@@ -140,6 +144,14 @@ class OpenAIModel(BaseModelBackend):
|
|
|
140
144
|
|
|
141
145
|
return self._to_chat_completion(response)
|
|
142
146
|
|
|
147
|
+
# Removing 'strict': True from the dictionary for
|
|
148
|
+
# client.chat.completions.create
|
|
149
|
+
if self.model_config_dict.get('tools') is not NOT_GIVEN:
|
|
150
|
+
for tool in self.model_config_dict.get('tools', []):
|
|
151
|
+
function_dict = tool.get('function', {})
|
|
152
|
+
if 'strict' in function_dict:
|
|
153
|
+
del function_dict['strict']
|
|
154
|
+
|
|
143
155
|
response = self._client.chat.completions.create(
|
|
144
156
|
messages=messages,
|
|
145
157
|
model=self.model_type,
|
|
@@ -121,6 +121,14 @@ class AutoRetriever:
|
|
|
121
121
|
|
|
122
122
|
collection_name = re.sub(r'[^a-zA-Z0-9]', '', content)[:20]
|
|
123
123
|
|
|
124
|
+
# Ensure the first character is either an underscore or a letter for
|
|
125
|
+
# Milvus
|
|
126
|
+
if (
|
|
127
|
+
self.storage_type == StorageType.MILVUS
|
|
128
|
+
and not collection_name[0].isalpha()
|
|
129
|
+
):
|
|
130
|
+
collection_name = f"_{collection_name}"
|
|
131
|
+
|
|
124
132
|
return collection_name
|
|
125
133
|
|
|
126
134
|
def run_vector_retriever(
|
|
@@ -161,13 +161,16 @@ class VectorRetriever(BaseRetriever):
|
|
|
161
161
|
# content path, chunk metadata, and chunk text
|
|
162
162
|
for vector, chunk in zip(batch_vectors, batch_chunks):
|
|
163
163
|
if isinstance(content, str):
|
|
164
|
-
content_path_info = {"content path": content}
|
|
164
|
+
content_path_info = {"content path": content[:100]}
|
|
165
165
|
elif isinstance(content, IOBase):
|
|
166
166
|
content_path_info = {"content path": "From file bytes"}
|
|
167
167
|
elif isinstance(content, Element):
|
|
168
168
|
content_path_info = {
|
|
169
|
-
"content path": content.metadata.file_directory
|
|
170
|
-
|
|
169
|
+
"content path": content.metadata.file_directory[
|
|
170
|
+
:100
|
|
171
|
+
]
|
|
172
|
+
if content.metadata.file_directory
|
|
173
|
+
else ""
|
|
171
174
|
}
|
|
172
175
|
|
|
173
176
|
chunk_metadata = {"metadata": chunk.metadata.to_dict()}
|
camel/schemas/__init__.py
CHANGED
|
@@ -13,5 +13,6 @@
|
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
15
|
from .openai_converter import OpenAISchemaConverter
|
|
16
|
+
from .outlines_converter import OutlinesConverter
|
|
16
17
|
|
|
17
|
-
__all__ = ["OpenAISchemaConverter"]
|
|
18
|
+
__all__ = ["OpenAISchemaConverter", "OutlinesConverter"]
|
camel/schemas/base.py
CHANGED
|
@@ -15,8 +15,6 @@
|
|
|
15
15
|
from abc import ABC, abstractmethod
|
|
16
16
|
from typing import Any, Dict
|
|
17
17
|
|
|
18
|
-
from pydantic import BaseModel
|
|
19
|
-
|
|
20
18
|
|
|
21
19
|
class BaseConverter(ABC):
|
|
22
20
|
r"""A base class for schema outputs that includes functionality
|
|
@@ -30,7 +28,7 @@ class BaseConverter(ABC):
|
|
|
30
28
|
@abstractmethod
|
|
31
29
|
def convert(
|
|
32
30
|
self, content: str, *args: Any, **kwargs: Dict[str, Any]
|
|
33
|
-
) ->
|
|
31
|
+
) -> Any:
|
|
34
32
|
r"""Structures the input text into the expected response format.
|
|
35
33
|
|
|
36
34
|
Args:
|
|
@@ -40,6 +38,6 @@ class BaseConverter(ABC):
|
|
|
40
38
|
prompt (Optional[str], optional): The prompt to be used.
|
|
41
39
|
|
|
42
40
|
Returns:
|
|
43
|
-
|
|
41
|
+
Any: The converted response.
|
|
44
42
|
"""
|
|
45
43
|
pass
|