camel-ai 0.2.31__py3-none-any.whl → 0.2.34__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +0 -52
- camel/agents/knowledge_graph_agent.py +5 -0
- camel/datasets/__init__.py +2 -4
- camel/datasets/base_generator.py +1 -218
- camel/datasets/few_shot_generator.py +261 -0
- camel/datasets/static_dataset.py +54 -2
- camel/models/openai_compatible_model.py +2 -4
- camel/models/sglang_model.py +4 -1
- camel/toolkits/file_write_toolkit.py +24 -2
- camel/toolkits/github_toolkit.py +15 -3
- camel/toolkits/mcp_toolkit.py +5 -1
- camel/types/enums.py +16 -0
- {camel_ai-0.2.31.dist-info → camel_ai-0.2.34.dist-info}/METADATA +3 -6
- {camel_ai-0.2.31.dist-info → camel_ai-0.2.34.dist-info}/RECORD +17 -16
- {camel_ai-0.2.31.dist-info → camel_ai-0.2.34.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.31.dist-info → camel_ai-0.2.34.dist-info}/licenses/LICENSE +0 -0
camel/__init__.py
CHANGED
camel/agents/chat_agent.py
CHANGED
|
@@ -446,7 +446,6 @@ class ChatAgent(BaseAgent):
|
|
|
446
446
|
self,
|
|
447
447
|
input_message: Union[BaseMessage, str],
|
|
448
448
|
response_format: Optional[Type[BaseModel]] = None,
|
|
449
|
-
reason_params: Optional[Dict[str, Any]] = None,
|
|
450
449
|
) -> ChatAgentResponse:
|
|
451
450
|
r"""Executes a single step in the chat session, generating a response
|
|
452
451
|
to the input message.
|
|
@@ -459,13 +458,6 @@ class ChatAgent(BaseAgent):
|
|
|
459
458
|
model defining the expected structure of the response. Used to
|
|
460
459
|
generate a structured response if provided. (default:
|
|
461
460
|
:obj:`None`)
|
|
462
|
-
reason_params (Optional[Dict[str, Any]], optional): A dictionary
|
|
463
|
-
containing the parameters for the reasoning step.
|
|
464
|
-
Argument `choices` is the number of choices/candidates to
|
|
465
|
-
consider.
|
|
466
|
-
Argument `threshold` is the threshold for the probability of
|
|
467
|
-
the choices.
|
|
468
|
-
(default: :obj:`None`)
|
|
469
461
|
|
|
470
462
|
Returns:
|
|
471
463
|
ChatAgentResponse: Contains output messages, a termination status
|
|
@@ -478,9 +470,6 @@ class ChatAgent(BaseAgent):
|
|
|
478
470
|
role_name="User", content=input_message
|
|
479
471
|
)
|
|
480
472
|
|
|
481
|
-
# Inject thinking steps
|
|
482
|
-
input_message = self._update_reasoning(input_message, reason_params)
|
|
483
|
-
|
|
484
473
|
# Add user input to memory
|
|
485
474
|
self.update_memory(input_message, OpenAIBackendRole.USER)
|
|
486
475
|
|
|
@@ -522,47 +511,6 @@ class ChatAgent(BaseAgent):
|
|
|
522
511
|
response, tool_call_records, num_tokens, external_tool_call_request
|
|
523
512
|
)
|
|
524
513
|
|
|
525
|
-
def _update_reasoning(
|
|
526
|
-
self,
|
|
527
|
-
input_message: BaseMessage,
|
|
528
|
-
reason_params: Optional[Dict[str, Any]] = None,
|
|
529
|
-
) -> BaseMessage:
|
|
530
|
-
r"""Updates the input message to include reasoning instructions and
|
|
531
|
-
adds human interaction capability.
|
|
532
|
-
|
|
533
|
-
Args:
|
|
534
|
-
input_message (BaseMessage): The message to be updated with
|
|
535
|
-
reasoning instructions.
|
|
536
|
-
reason_params (Optional[Dict[str, Any]], optional): Parameters for
|
|
537
|
-
the reasoning process.
|
|
538
|
-
|
|
539
|
-
Returns:
|
|
540
|
-
BaseMessage: The updated message with reasoning instructions.
|
|
541
|
-
"""
|
|
542
|
-
if reason_params is None:
|
|
543
|
-
return input_message
|
|
544
|
-
choices = reason_params.get("choices", 3)
|
|
545
|
-
threshold = reason_params.get("threshold", 0.5)
|
|
546
|
-
|
|
547
|
-
input_message.content += f"""First, come up with potential {choices}
|
|
548
|
-
choices/candidates.
|
|
549
|
-
Next, assign a probability/credibility between 0 and 1 to each choice
|
|
550
|
-
(make sure they add up to 1).
|
|
551
|
-
Finally, if only one choice has a probability/credibility greater than
|
|
552
|
-
{threshold}, continue with that choice.
|
|
553
|
-
Otherwise, call tool `ask_human_via_console` to ask the user to decide
|
|
554
|
-
which one to continue with, give user the probability/credibility of
|
|
555
|
-
all choices, and the reason for each choice.
|
|
556
|
-
"""
|
|
557
|
-
|
|
558
|
-
# Add tools to agent
|
|
559
|
-
from camel.toolkits.human_toolkit import HumanToolkit
|
|
560
|
-
|
|
561
|
-
human_toolkit = HumanToolkit()
|
|
562
|
-
self.add_tool(human_toolkit.ask_human_via_console)
|
|
563
|
-
|
|
564
|
-
return input_message
|
|
565
|
-
|
|
566
514
|
@property
|
|
567
515
|
def chat_history(self) -> List[OpenAIMessage]:
|
|
568
516
|
openai_messages, _ = self.memory.get_context()
|
|
@@ -70,6 +70,11 @@ provided Node and Relationship classes.
|
|
|
70
70
|
Ensure that the extracted data adheres to the structure defined by the classes.
|
|
71
71
|
Output the structured data in a format that can be easily validated against
|
|
72
72
|
the provided code.
|
|
73
|
+
Do not wrap the output in lists or dictionaries, provide the Node and
|
|
74
|
+
Relationship with unique identifiers.
|
|
75
|
+
Strictly follow the format provided in the example output, do not add any
|
|
76
|
+
additional information.
|
|
77
|
+
|
|
73
78
|
|
|
74
79
|
Instructions for you:
|
|
75
80
|
Read the provided content thoroughly.
|
camel/datasets/__init__.py
CHANGED
|
@@ -11,10 +11,8 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
-
from .base_generator import
|
|
15
|
-
|
|
16
|
-
FewShotGenerator,
|
|
17
|
-
)
|
|
14
|
+
from .base_generator import BaseGenerator
|
|
15
|
+
from .few_shot_generator import FewShotGenerator
|
|
18
16
|
from .models import DataPoint
|
|
19
17
|
from .static_dataset import StaticDataset
|
|
20
18
|
|
camel/datasets/base_generator.py
CHANGED
|
@@ -15,22 +15,12 @@
|
|
|
15
15
|
import abc
|
|
16
16
|
import json
|
|
17
17
|
import random
|
|
18
|
-
from datetime import datetime
|
|
19
18
|
from pathlib import Path
|
|
20
|
-
from typing import
|
|
21
|
-
List,
|
|
22
|
-
Union,
|
|
23
|
-
)
|
|
19
|
+
from typing import List, Union
|
|
24
20
|
|
|
25
|
-
from pydantic import ValidationError
|
|
26
|
-
|
|
27
|
-
from camel.agents import ChatAgent
|
|
28
21
|
from camel.logger import get_logger
|
|
29
|
-
from camel.verifiers import BaseVerifier
|
|
30
|
-
from camel.verifiers.models import VerifierInput
|
|
31
22
|
|
|
32
23
|
from .models import DataPoint
|
|
33
|
-
from .static_dataset import StaticDataset
|
|
34
24
|
|
|
35
25
|
logger = get_logger(__name__)
|
|
36
26
|
|
|
@@ -126,210 +116,3 @@ class BaseGenerator(abc.ABC):
|
|
|
126
116
|
except IOError as e:
|
|
127
117
|
logger.error(f"Error writing to file {file_path}: {e}")
|
|
128
118
|
raise
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
class FewShotGenerator(BaseGenerator):
|
|
132
|
-
r"""A generator for creating synthetic datapoints using few-shot learning.
|
|
133
|
-
|
|
134
|
-
This class leverages a seed dataset, an agent, and a verifier to generate
|
|
135
|
-
new synthetic datapoints on demand through few-shot prompting.
|
|
136
|
-
"""
|
|
137
|
-
|
|
138
|
-
def __init__(
|
|
139
|
-
self,
|
|
140
|
-
seed_dataset: StaticDataset,
|
|
141
|
-
verifier: BaseVerifier,
|
|
142
|
-
agent: ChatAgent,
|
|
143
|
-
seed: int = 42,
|
|
144
|
-
**kwargs,
|
|
145
|
-
):
|
|
146
|
-
r"""Initialize the few-shot generator.
|
|
147
|
-
|
|
148
|
-
Args:
|
|
149
|
-
seed_dataset (StaticDataset): Validated static dataset to
|
|
150
|
-
use for examples.
|
|
151
|
-
verifier (BaseVerifier): Verifier to validate generated content.
|
|
152
|
-
agent (ChatAgent): Agent to generate new datapoints.
|
|
153
|
-
seed (int): Random seed for reproducibility. (default: :obj:`42`)
|
|
154
|
-
**kwargs: Additional generator parameters.
|
|
155
|
-
"""
|
|
156
|
-
super().__init__(seed=seed, **kwargs)
|
|
157
|
-
self.seed_dataset = seed_dataset
|
|
158
|
-
try:
|
|
159
|
-
self._validate_seed_dataset()
|
|
160
|
-
except Exception:
|
|
161
|
-
raise RuntimeError("Seed Data does not follow Datapoint format")
|
|
162
|
-
self.verifier = verifier
|
|
163
|
-
self.agent = agent
|
|
164
|
-
|
|
165
|
-
# TODO: Validate that seed dataset contains rationale
|
|
166
|
-
def _validate_seed_dataset(self) -> None:
|
|
167
|
-
pass
|
|
168
|
-
|
|
169
|
-
def _construct_prompt(self, examples: List[DataPoint]) -> str:
|
|
170
|
-
r"""Construct a prompt for generating new datapoints
|
|
171
|
-
using a fixed sample of examples from the seed dataset.
|
|
172
|
-
|
|
173
|
-
Args:
|
|
174
|
-
examples (List[DataPoint]): Examples to include in the prompt.
|
|
175
|
-
|
|
176
|
-
Returns:
|
|
177
|
-
str: Formatted prompt with examples.
|
|
178
|
-
"""
|
|
179
|
-
prompt = (
|
|
180
|
-
"Generate a new datapoint similar to the following examples:\n\n"
|
|
181
|
-
)
|
|
182
|
-
for i, example in enumerate(examples, 1):
|
|
183
|
-
prompt += f"Example {i}:\n"
|
|
184
|
-
prompt += f"Question: {example.question}\n"
|
|
185
|
-
if example.rationale is not None:
|
|
186
|
-
prompt += f"Rationale: {example.rationale}\n"
|
|
187
|
-
else:
|
|
188
|
-
prompt += "Rationale: None\n"
|
|
189
|
-
prompt += f"Final Answer: {example.final_answer}\n\n"
|
|
190
|
-
prompt += "New datapoint:"
|
|
191
|
-
return prompt
|
|
192
|
-
|
|
193
|
-
async def generate_new(
|
|
194
|
-
self,
|
|
195
|
-
n: int,
|
|
196
|
-
max_retries: int = 10,
|
|
197
|
-
num_examples: int = 3,
|
|
198
|
-
**kwargs,
|
|
199
|
-
) -> List[DataPoint]:
|
|
200
|
-
r"""Generates and validates `n` new datapoints through
|
|
201
|
-
few-shot prompting, with a retry limit.
|
|
202
|
-
|
|
203
|
-
Steps:
|
|
204
|
-
1. Samples examples from the seed dataset.
|
|
205
|
-
2. Constructs a prompt using the selected examples.
|
|
206
|
-
3. Uses an agent to generate a new datapoint,
|
|
207
|
-
consisting of a question and code to solve the question.
|
|
208
|
-
4. Executes code using a verifier to get pseudo ground truth.
|
|
209
|
-
5. Stores valid datapoints in memory.
|
|
210
|
-
|
|
211
|
-
Args:
|
|
212
|
-
n (int): Number of valid datapoints to generate.
|
|
213
|
-
max_retries (int): Maximum number of retries before stopping.
|
|
214
|
-
(default: :obj:`10`)
|
|
215
|
-
num_examples (int): Number of examples to sample from the
|
|
216
|
-
seed dataset for few shot prompting.
|
|
217
|
-
(default: :obj:`3`)
|
|
218
|
-
**kwargs: Additional generation parameters.
|
|
219
|
-
|
|
220
|
-
Returns:
|
|
221
|
-
List[DataPoint]: A list of newly generated valid datapoints.
|
|
222
|
-
|
|
223
|
-
Raises:
|
|
224
|
-
TypeError: If the agent's output is not a dictionary (or does not
|
|
225
|
-
match the expected format).
|
|
226
|
-
KeyError: If required keys are missing from the response.
|
|
227
|
-
AttributeError: If the verifier response lacks attributes.
|
|
228
|
-
ValidationError: If a datapoint fails schema validation.
|
|
229
|
-
RuntimeError: If retries are exhausted before `n` valid datapoints
|
|
230
|
-
are generated.
|
|
231
|
-
|
|
232
|
-
Notes:
|
|
233
|
-
- Retries on validation failures until `n` valid datapoints exist
|
|
234
|
-
or `max_retries` is reached, whichever comes first.
|
|
235
|
-
- If retries are exhausted before reaching `n`, a `RuntimeError`
|
|
236
|
-
is raised.
|
|
237
|
-
- Metadata includes a timestamp for tracking datapoint creation.
|
|
238
|
-
"""
|
|
239
|
-
valid_data_points: List[DataPoint] = []
|
|
240
|
-
retries = 0
|
|
241
|
-
|
|
242
|
-
while len(valid_data_points) < n and retries < max_retries:
|
|
243
|
-
try:
|
|
244
|
-
examples = [
|
|
245
|
-
self.seed_dataset.sample() for _ in range(num_examples)
|
|
246
|
-
]
|
|
247
|
-
prompt = self._construct_prompt(examples)
|
|
248
|
-
|
|
249
|
-
try:
|
|
250
|
-
agent_output = (
|
|
251
|
-
self.agent.step(prompt, response_format=DataPoint)
|
|
252
|
-
.msgs[0]
|
|
253
|
-
.parsed
|
|
254
|
-
)
|
|
255
|
-
if not isinstance(agent_output, dict):
|
|
256
|
-
raise TypeError("Agent output must be a dictionary")
|
|
257
|
-
if "question" not in agent_output:
|
|
258
|
-
raise KeyError(
|
|
259
|
-
"Missing 'question' in agent"
|
|
260
|
-
f"output {agent_output}"
|
|
261
|
-
)
|
|
262
|
-
if "rationale" not in agent_output:
|
|
263
|
-
raise KeyError(
|
|
264
|
-
"Missing 'rationale' in agent"
|
|
265
|
-
f"output {agent_output}"
|
|
266
|
-
)
|
|
267
|
-
except (TypeError, KeyError) as e:
|
|
268
|
-
logger.warning(
|
|
269
|
-
f"Agent output issue: {e}, retrying... "
|
|
270
|
-
f"({retries + 1}/{max_retries})"
|
|
271
|
-
)
|
|
272
|
-
retries += 1
|
|
273
|
-
continue
|
|
274
|
-
|
|
275
|
-
rationale = agent_output.get("rationale")
|
|
276
|
-
|
|
277
|
-
if not isinstance(rationale, str):
|
|
278
|
-
raise TypeError(f"Rationale {rationale} is not a string.")
|
|
279
|
-
|
|
280
|
-
try:
|
|
281
|
-
verifier_response = await self.verifier.verify(
|
|
282
|
-
VerifierInput(
|
|
283
|
-
llm_response=rationale,
|
|
284
|
-
ground_truth=None,
|
|
285
|
-
)
|
|
286
|
-
)
|
|
287
|
-
if not verifier_response or not verifier_response.result:
|
|
288
|
-
raise ValueError(
|
|
289
|
-
"Verifier unsuccessful, response: "
|
|
290
|
-
f"{verifier_response}"
|
|
291
|
-
)
|
|
292
|
-
except (ValueError, AttributeError) as e:
|
|
293
|
-
logger.warning(
|
|
294
|
-
f"Verifier issue: {e}, "
|
|
295
|
-
f"retrying... ({retries + 1}/{max_retries})"
|
|
296
|
-
)
|
|
297
|
-
retries += 1
|
|
298
|
-
continue
|
|
299
|
-
|
|
300
|
-
try:
|
|
301
|
-
new_datapoint = DataPoint(
|
|
302
|
-
question=agent_output["question"],
|
|
303
|
-
rationale=rationale,
|
|
304
|
-
final_answer=verifier_response.result,
|
|
305
|
-
metadata={
|
|
306
|
-
"synthetic": str(True),
|
|
307
|
-
"created": datetime.now().isoformat(),
|
|
308
|
-
"generator": "few_shot",
|
|
309
|
-
},
|
|
310
|
-
)
|
|
311
|
-
except ValidationError as e:
|
|
312
|
-
logger.warning(
|
|
313
|
-
f"Datapoint validation failed: {e}, "
|
|
314
|
-
f"retrying... ({retries + 1}/{max_retries})"
|
|
315
|
-
)
|
|
316
|
-
retries += 1
|
|
317
|
-
continue
|
|
318
|
-
|
|
319
|
-
valid_data_points.append(new_datapoint)
|
|
320
|
-
|
|
321
|
-
except Exception as e:
|
|
322
|
-
logger.warning(
|
|
323
|
-
f"Unexpected error: {e}, retrying..."
|
|
324
|
-
f" ({retries + 1}/{max_retries})"
|
|
325
|
-
)
|
|
326
|
-
retries += 1
|
|
327
|
-
|
|
328
|
-
if len(valid_data_points) < n:
|
|
329
|
-
raise RuntimeError(
|
|
330
|
-
f"Failed to generate {n} valid datapoints "
|
|
331
|
-
f"after {max_retries} retries."
|
|
332
|
-
)
|
|
333
|
-
|
|
334
|
-
self._data.extend(valid_data_points)
|
|
335
|
-
return valid_data_points
|
|
@@ -0,0 +1,261 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import asyncio
|
|
16
|
+
from datetime import datetime
|
|
17
|
+
from typing import List
|
|
18
|
+
|
|
19
|
+
from pydantic import ValidationError
|
|
20
|
+
|
|
21
|
+
from camel.agents import ChatAgent
|
|
22
|
+
from camel.logger import get_logger
|
|
23
|
+
from camel.models.base_model import BaseModelBackend
|
|
24
|
+
from camel.verifiers import BaseVerifier
|
|
25
|
+
from camel.verifiers.models import VerifierInput
|
|
26
|
+
|
|
27
|
+
from .base_generator import BaseGenerator
|
|
28
|
+
from .models import DataPoint
|
|
29
|
+
from .static_dataset import StaticDataset
|
|
30
|
+
|
|
31
|
+
logger = get_logger(__name__)
|
|
32
|
+
|
|
33
|
+
SYSTEM_PROMPT = """**You are an advanced data generation assistant.**
|
|
34
|
+
Your goal is to generate high-quality synthetic data points based on
|
|
35
|
+
provided examples. Your output must be well-structured,
|
|
36
|
+
logically sound, and formatted correctly.
|
|
37
|
+
|
|
38
|
+
**Instructions:**
|
|
39
|
+
1. **Follow the Structure**
|
|
40
|
+
Each data point must include:
|
|
41
|
+
- **Question**: A clear, well-formed query.
|
|
42
|
+
- **Rationale**: A step-by-step, executable reasoning process ending
|
|
43
|
+
with `print(final_answer)`.
|
|
44
|
+
- **Final Answer**: The correct, concise result.
|
|
45
|
+
|
|
46
|
+
2. **Ensure Logical Consistency**
|
|
47
|
+
- The `rationale` must be code that runs correctly.
|
|
48
|
+
- The `final_answer` should match the printed output.
|
|
49
|
+
|
|
50
|
+
3. **Output Format (Strict)**
|
|
51
|
+
```
|
|
52
|
+
Question: [Generated question]
|
|
53
|
+
Rationale: [Code that solves the question, ending in a print statement,
|
|
54
|
+
outputting the answer.]
|
|
55
|
+
Final Answer: [The Final Answer]
|
|
56
|
+
|
|
57
|
+
**Now, generate a new data point based on the given examples.**
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class FewShotGenerator(BaseGenerator):
|
|
62
|
+
r"""A generator for creating synthetic datapoints using few-shot learning.
|
|
63
|
+
|
|
64
|
+
This class leverages a seed dataset, an agent, and a verifier to generate
|
|
65
|
+
new synthetic datapoints on demand through few-shot prompting.
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
def __init__(
|
|
69
|
+
self,
|
|
70
|
+
seed_dataset: StaticDataset,
|
|
71
|
+
verifier: BaseVerifier,
|
|
72
|
+
model: BaseModelBackend,
|
|
73
|
+
seed: int = 42,
|
|
74
|
+
**kwargs,
|
|
75
|
+
):
|
|
76
|
+
r"""Initialize the few-shot generator.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
seed_dataset (StaticDataset): Validated static dataset to
|
|
80
|
+
use for examples.
|
|
81
|
+
verifier (BaseVerifier): Verifier to validate generated content.
|
|
82
|
+
model (BaseModelBackend): The underlying LLM that the generating
|
|
83
|
+
agent will be initiated with.
|
|
84
|
+
seed (int): Random seed for reproducibility. (default: :obj:`42`)
|
|
85
|
+
**kwargs: Additional generator parameters.
|
|
86
|
+
"""
|
|
87
|
+
super().__init__(seed=seed, **kwargs)
|
|
88
|
+
self.seed_dataset = seed_dataset
|
|
89
|
+
try:
|
|
90
|
+
self._validate_seed_dataset()
|
|
91
|
+
except Exception:
|
|
92
|
+
raise RuntimeError("Seed Data does not follow Datapoint format")
|
|
93
|
+
self.verifier = verifier
|
|
94
|
+
self.agent = ChatAgent(system_message=SYSTEM_PROMPT, model=model)
|
|
95
|
+
|
|
96
|
+
# TODO: Validate that seed dataset contains rationale
|
|
97
|
+
def _validate_seed_dataset(self) -> None:
|
|
98
|
+
pass
|
|
99
|
+
|
|
100
|
+
def _construct_prompt(self, examples: List[DataPoint]) -> str:
|
|
101
|
+
r"""Construct a prompt for generating new datapoints
|
|
102
|
+
using a fixed sample of examples from the seed dataset.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
examples (List[DataPoint]): Examples to include in the prompt.
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
str: Formatted prompt with examples.
|
|
109
|
+
"""
|
|
110
|
+
prompt = (
|
|
111
|
+
"Generate a new datapoint similar to the following examples:\n\n"
|
|
112
|
+
)
|
|
113
|
+
for i, example in enumerate(examples, 1):
|
|
114
|
+
prompt += f"Example {i}:\n"
|
|
115
|
+
prompt += f"Question: {example.question}\n"
|
|
116
|
+
if example.rationale is not None:
|
|
117
|
+
prompt += f"Rationale: {example.rationale}\n"
|
|
118
|
+
else:
|
|
119
|
+
prompt += "Rationale: None\n"
|
|
120
|
+
prompt += f"Final Answer: {example.final_answer}\n\n"
|
|
121
|
+
prompt += "New datapoint:"
|
|
122
|
+
return prompt
|
|
123
|
+
|
|
124
|
+
async def generate_new(
|
|
125
|
+
self,
|
|
126
|
+
n: int,
|
|
127
|
+
max_retries: int = 10,
|
|
128
|
+
num_examples: int = 3,
|
|
129
|
+
**kwargs,
|
|
130
|
+
) -> List[DataPoint]:
|
|
131
|
+
r"""Generates and validates `n` new datapoints through
|
|
132
|
+
few-shot prompting, with a retry limit.
|
|
133
|
+
|
|
134
|
+
Steps:
|
|
135
|
+
1. Samples examples from the seed dataset.
|
|
136
|
+
2. Constructs a prompt using the selected examples.
|
|
137
|
+
3. Uses an agent to generate a new datapoint,
|
|
138
|
+
consisting of a question and code to solve the question.
|
|
139
|
+
4. Executes code using a verifier to get pseudo ground truth.
|
|
140
|
+
5. Stores valid datapoints in memory.
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
n (int): Number of valid datapoints to generate.
|
|
144
|
+
max_retries (int): Maximum number of retries before stopping.
|
|
145
|
+
(default: :obj:`10`)
|
|
146
|
+
num_examples (int): Number of examples to sample from the
|
|
147
|
+
seed dataset for few shot prompting.
|
|
148
|
+
(default: :obj:`3`)
|
|
149
|
+
**kwargs: Additional generation parameters.
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
List[DataPoint]: A list of newly generated valid datapoints.
|
|
153
|
+
|
|
154
|
+
Raises:
|
|
155
|
+
TypeError: If the agent's output is not a dictionary (or does not
|
|
156
|
+
match the expected format).
|
|
157
|
+
KeyError: If required keys are missing from the response.
|
|
158
|
+
AttributeError: If the verifier response lacks attributes.
|
|
159
|
+
ValidationError: If a datapoint fails schema validation.
|
|
160
|
+
RuntimeError: If retries are exhausted before `n` valid datapoints
|
|
161
|
+
are generated.
|
|
162
|
+
|
|
163
|
+
Notes:
|
|
164
|
+
- Retries on validation failures until `n` valid datapoints exist
|
|
165
|
+
or `max_retries` is reached, whichever comes first.
|
|
166
|
+
- If retries are exhausted before reaching `n`, a `RuntimeError`
|
|
167
|
+
is raised.
|
|
168
|
+
- Metadata includes a timestamp for tracking datapoint creation.
|
|
169
|
+
"""
|
|
170
|
+
valid_data_points: List[DataPoint] = []
|
|
171
|
+
retries = 0
|
|
172
|
+
|
|
173
|
+
while len(valid_data_points) < n and retries < max_retries:
|
|
174
|
+
try:
|
|
175
|
+
examples = [
|
|
176
|
+
self.seed_dataset.sample() for _ in range(num_examples)
|
|
177
|
+
]
|
|
178
|
+
prompt = self._construct_prompt(examples)
|
|
179
|
+
|
|
180
|
+
try:
|
|
181
|
+
agent_output = (
|
|
182
|
+
self.agent.step(prompt, response_format=DataPoint)
|
|
183
|
+
.msgs[0]
|
|
184
|
+
.parsed
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
assert isinstance(agent_output, DataPoint)
|
|
188
|
+
|
|
189
|
+
self.agent.reset()
|
|
190
|
+
|
|
191
|
+
except (TypeError, KeyError) as e:
|
|
192
|
+
logger.warning(
|
|
193
|
+
f"Agent output issue: {e}, retrying... "
|
|
194
|
+
f"({retries + 1}/{max_retries})"
|
|
195
|
+
)
|
|
196
|
+
retries += 1
|
|
197
|
+
continue
|
|
198
|
+
|
|
199
|
+
rationale = agent_output.rationale
|
|
200
|
+
|
|
201
|
+
if not isinstance(rationale, str):
|
|
202
|
+
raise TypeError(f"Rationale {rationale} is not a string.")
|
|
203
|
+
|
|
204
|
+
try:
|
|
205
|
+
verifier_response = await self.verifier.verify(
|
|
206
|
+
VerifierInput(
|
|
207
|
+
llm_response=rationale,
|
|
208
|
+
ground_truth=None,
|
|
209
|
+
)
|
|
210
|
+
)
|
|
211
|
+
if not verifier_response or not verifier_response.result:
|
|
212
|
+
raise ValueError(
|
|
213
|
+
"Verifier unsuccessful, response: "
|
|
214
|
+
f"{verifier_response}"
|
|
215
|
+
)
|
|
216
|
+
except (ValueError, AttributeError) as e:
|
|
217
|
+
logger.warning(
|
|
218
|
+
f"Verifier issue: {e}, "
|
|
219
|
+
f"retrying... ({retries + 1}/{max_retries})"
|
|
220
|
+
)
|
|
221
|
+
retries += 1
|
|
222
|
+
continue
|
|
223
|
+
|
|
224
|
+
try:
|
|
225
|
+
new_datapoint = DataPoint(
|
|
226
|
+
question=agent_output.question,
|
|
227
|
+
rationale=rationale,
|
|
228
|
+
final_answer=verifier_response.result,
|
|
229
|
+
metadata={
|
|
230
|
+
"synthetic": str(True),
|
|
231
|
+
"created": datetime.now().isoformat(),
|
|
232
|
+
"generator": "few_shot",
|
|
233
|
+
},
|
|
234
|
+
)
|
|
235
|
+
except ValidationError as e:
|
|
236
|
+
logger.warning(
|
|
237
|
+
f"Datapoint validation failed: {e}, "
|
|
238
|
+
f"retrying... ({retries + 1}/{max_retries})"
|
|
239
|
+
)
|
|
240
|
+
retries += 1
|
|
241
|
+
continue
|
|
242
|
+
|
|
243
|
+
valid_data_points.append(new_datapoint)
|
|
244
|
+
|
|
245
|
+
except Exception as e:
|
|
246
|
+
logger.warning(
|
|
247
|
+
f"Unexpected error: {e}, retrying..."
|
|
248
|
+
f" ({retries + 1}/{max_retries})"
|
|
249
|
+
)
|
|
250
|
+
retries += 1
|
|
251
|
+
|
|
252
|
+
if len(valid_data_points) < n:
|
|
253
|
+
raise RuntimeError(
|
|
254
|
+
f"Failed to generate {n} valid datapoints "
|
|
255
|
+
f"after {max_retries} retries."
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
# Thread-safe way to extend the data list
|
|
259
|
+
async with asyncio.Lock():
|
|
260
|
+
self._data.extend(valid_data_points)
|
|
261
|
+
return valid_data_points
|
camel/datasets/static_dataset.py
CHANGED
|
@@ -60,7 +60,7 @@ class StaticDataset(Dataset):
|
|
|
60
60
|
Input data, which can be one of the following:
|
|
61
61
|
- A Hugging Face Dataset (:obj:`HFDataset`).
|
|
62
62
|
- A PyTorch Dataset (:obj:`torch.utils.data.Dataset`).
|
|
63
|
-
- A :obj:`Path` object representing a JSON file.
|
|
63
|
+
- A :obj:`Path` object representing a JSON or JSONL file.
|
|
64
64
|
- A list of dictionaries with :obj:`DataPoint`-compatible
|
|
65
65
|
fields.
|
|
66
66
|
seed (int): Random seed for reproducibility.
|
|
@@ -112,6 +112,7 @@ class StaticDataset(Dataset):
|
|
|
112
112
|
|
|
113
113
|
Raises:
|
|
114
114
|
TypeError: If the input data type is unsupported.
|
|
115
|
+
ValueError: If the Path has an unsupported file extension.
|
|
115
116
|
"""
|
|
116
117
|
|
|
117
118
|
if isinstance(data, HFDataset):
|
|
@@ -119,7 +120,16 @@ class StaticDataset(Dataset):
|
|
|
119
120
|
elif isinstance(data, Dataset):
|
|
120
121
|
raw_data = self._init_from_pytorch_dataset(data)
|
|
121
122
|
elif isinstance(data, Path):
|
|
122
|
-
|
|
123
|
+
if data.suffix == ".jsonl":
|
|
124
|
+
raw_data = self._init_from_jsonl_path(data)
|
|
125
|
+
elif data.suffix == ".json":
|
|
126
|
+
raw_data = self._init_from_json_path(data)
|
|
127
|
+
else:
|
|
128
|
+
raise ValueError(
|
|
129
|
+
f"Unsupported file extension: {data.suffix}."
|
|
130
|
+
" Please enter a .json or .jsonl object."
|
|
131
|
+
)
|
|
132
|
+
|
|
123
133
|
elif isinstance(data, list):
|
|
124
134
|
raw_data = self._init_from_list(data)
|
|
125
135
|
else:
|
|
@@ -322,6 +332,48 @@ class StaticDataset(Dataset):
|
|
|
322
332
|
)
|
|
323
333
|
return loaded_data
|
|
324
334
|
|
|
335
|
+
def _init_from_jsonl_path(self, data: Path) -> List[Dict[str, Any]]:
|
|
336
|
+
r"""Load and parse a dataset from a JSONL file.
|
|
337
|
+
|
|
338
|
+
Args:
|
|
339
|
+
data (Path): Path to the JSONL file.
|
|
340
|
+
|
|
341
|
+
Returns:
|
|
342
|
+
List[Dict[str, Any]]: A list of datapoint dictionaries.
|
|
343
|
+
|
|
344
|
+
Raises:
|
|
345
|
+
FileNotFoundError: If the specified JSONL file does not exist.
|
|
346
|
+
ValueError: If a line in the file contains invalid JSON or
|
|
347
|
+
is not a dictionary.
|
|
348
|
+
"""
|
|
349
|
+
if not data.exists():
|
|
350
|
+
raise FileNotFoundError(f"JSONL file not found: {data}")
|
|
351
|
+
|
|
352
|
+
raw_data = []
|
|
353
|
+
logger.debug(f"Loading JSONL from {data}")
|
|
354
|
+
with data.open('r', encoding='utf-8') as f:
|
|
355
|
+
for line_number, line in enumerate(f, start=1):
|
|
356
|
+
line = line.strip()
|
|
357
|
+
if not line:
|
|
358
|
+
continue # Skip blank lines if any exist.
|
|
359
|
+
try:
|
|
360
|
+
record = json.loads(line)
|
|
361
|
+
except json.JSONDecodeError as e:
|
|
362
|
+
raise ValueError(
|
|
363
|
+
f"Invalid JSON on line {line_number} in file "
|
|
364
|
+
f"{data}: {e}"
|
|
365
|
+
)
|
|
366
|
+
raw_data.append(record)
|
|
367
|
+
logger.info(f"Successfully loaded {len(raw_data)} items from {data}")
|
|
368
|
+
|
|
369
|
+
for i, item in enumerate(raw_data):
|
|
370
|
+
if not isinstance(item, dict):
|
|
371
|
+
raise ValueError(
|
|
372
|
+
f"Expected a dictionary at record {i+1} (line {i+1}), "
|
|
373
|
+
f"got {type(item).__name__}"
|
|
374
|
+
)
|
|
375
|
+
return raw_data
|
|
376
|
+
|
|
325
377
|
def _init_from_list(
|
|
326
378
|
self, data: List[Dict[str, Any]]
|
|
327
379
|
) -> List[Dict[str, Any]]:
|
|
@@ -56,10 +56,8 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
56
56
|
url: Optional[str] = None,
|
|
57
57
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
58
58
|
) -> None:
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
)
|
|
62
|
-
self.url = url or os.environ.get("OPENAI_COMPATIBILITY_API_BASE_URL")
|
|
59
|
+
api_key = api_key or os.environ.get("OPENAI_COMPATIBILITY_API_KEY")
|
|
60
|
+
url = url or os.environ.get("OPENAI_COMPATIBILITY_API_BASE_URL")
|
|
63
61
|
super().__init__(
|
|
64
62
|
model_type, model_config_dict, api_key, url, token_counter
|
|
65
63
|
)
|
camel/models/sglang_model.py
CHANGED
|
@@ -324,7 +324,10 @@ def _kill_process_tree(
|
|
|
324
324
|
|
|
325
325
|
# Sometime processes cannot be killed with SIGKILL
|
|
326
326
|
# so we send an additional signal to kill them.
|
|
327
|
-
|
|
327
|
+
if hasattr(signal, "SIGQUIT"):
|
|
328
|
+
itself.send_signal(signal.SIGQUIT)
|
|
329
|
+
else:
|
|
330
|
+
itself.send_signal(signal.SIGTERM)
|
|
328
331
|
except psutil.NoSuchProcess:
|
|
329
332
|
pass
|
|
330
333
|
|
|
@@ -13,6 +13,7 @@
|
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
15
|
|
|
16
|
+
import re
|
|
16
17
|
from datetime import datetime
|
|
17
18
|
from pathlib import Path
|
|
18
19
|
from typing import List, Optional, Union
|
|
@@ -69,17 +70,22 @@ class FileWriteToolkit(BaseToolkit):
|
|
|
69
70
|
r"""Convert the given string path to a Path object.
|
|
70
71
|
|
|
71
72
|
If the provided path is not absolute, it is made relative to the
|
|
72
|
-
default output directory.
|
|
73
|
+
default output directory. The filename part is sanitized to replace
|
|
74
|
+
spaces and special characters with underscores, ensuring safe usage
|
|
75
|
+
in downstream processing.
|
|
73
76
|
|
|
74
77
|
Args:
|
|
75
78
|
file_path (str): The file path to resolve.
|
|
76
79
|
|
|
77
80
|
Returns:
|
|
78
|
-
Path: A fully resolved (absolute) Path object.
|
|
81
|
+
Path: A fully resolved (absolute) and sanitized Path object.
|
|
79
82
|
"""
|
|
80
83
|
path_obj = Path(file_path)
|
|
81
84
|
if not path_obj.is_absolute():
|
|
82
85
|
path_obj = self.output_dir / path_obj
|
|
86
|
+
|
|
87
|
+
sanitized_filename = self._sanitize_filename(path_obj.name)
|
|
88
|
+
path_obj = path_obj.parent / sanitized_filename
|
|
83
89
|
return path_obj.resolve()
|
|
84
90
|
|
|
85
91
|
def _write_text_file(
|
|
@@ -369,3 +375,19 @@ class FileWriteToolkit(BaseToolkit):
|
|
|
369
375
|
return [
|
|
370
376
|
FunctionTool(self.write_to_file),
|
|
371
377
|
]
|
|
378
|
+
|
|
379
|
+
def _sanitize_filename(self, filename: str) -> str:
|
|
380
|
+
r"""Sanitize a filename by replacing any character that is not
|
|
381
|
+
alphanumeric, a dot (.), hyphen (-), or underscore (_) with an
|
|
382
|
+
underscore (_).
|
|
383
|
+
|
|
384
|
+
Args:
|
|
385
|
+
filename (str): The original filename which may contain spaces or
|
|
386
|
+
special characters.
|
|
387
|
+
|
|
388
|
+
Returns:
|
|
389
|
+
str: The sanitized filename with disallowed characters replaced by
|
|
390
|
+
underscores.
|
|
391
|
+
"""
|
|
392
|
+
safe = re.sub(r'[^\w\-.]', '_', filename)
|
|
393
|
+
return safe
|
camel/toolkits/github_toolkit.py
CHANGED
|
@@ -110,9 +110,21 @@ class GithubToolkit(BaseToolkit):
|
|
|
110
110
|
successfully or not.
|
|
111
111
|
"""
|
|
112
112
|
sb = self.repo.get_branch(self.repo.default_branch)
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
113
|
+
from github import GithubException
|
|
114
|
+
|
|
115
|
+
try:
|
|
116
|
+
self.repo.create_git_ref(
|
|
117
|
+
ref=f"refs/heads/{branch_name}", sha=sb.commit.sha
|
|
118
|
+
)
|
|
119
|
+
except GithubException as e:
|
|
120
|
+
if e.message == "Reference already exists":
|
|
121
|
+
# agent might have pushed the branch separately.
|
|
122
|
+
logger.warning(
|
|
123
|
+
f"Branch {branch_name} already exists. "
|
|
124
|
+
"Continuing with the existing branch."
|
|
125
|
+
)
|
|
126
|
+
else:
|
|
127
|
+
raise
|
|
116
128
|
|
|
117
129
|
file = self.repo.get_contents(file_path)
|
|
118
130
|
|
camel/toolkits/mcp_toolkit.py
CHANGED
|
@@ -102,8 +102,12 @@ class _MCPServer(BaseToolkit):
|
|
|
102
102
|
sse_client(self.command_or_url)
|
|
103
103
|
)
|
|
104
104
|
else:
|
|
105
|
+
command = self.command_or_url
|
|
106
|
+
if os.name == "nt" and command.lower() == "npx":
|
|
107
|
+
command = "npx.cmd"
|
|
108
|
+
|
|
105
109
|
server_parameters = StdioServerParameters(
|
|
106
|
-
command=
|
|
110
|
+
command=command, args=self.args, env=self.env
|
|
107
111
|
)
|
|
108
112
|
(
|
|
109
113
|
read_stream,
|
camel/types/enums.py
CHANGED
|
@@ -217,6 +217,14 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
217
217
|
def __new__(cls, value) -> "ModelType":
|
|
218
218
|
return cast("ModelType", UnifiedModelType.__new__(cls, value))
|
|
219
219
|
|
|
220
|
+
@classmethod
|
|
221
|
+
def from_name(cls, name):
|
|
222
|
+
r"""Returns the ModelType enum value from a string."""
|
|
223
|
+
for model_type in cls:
|
|
224
|
+
if model_type.value == name:
|
|
225
|
+
return model_type
|
|
226
|
+
raise ValueError(f"Unknown ModelType name: {name}")
|
|
227
|
+
|
|
220
228
|
@property
|
|
221
229
|
def value_for_tiktoken(self) -> str:
|
|
222
230
|
if self.is_openai:
|
|
@@ -895,6 +903,14 @@ class ModelPlatformType(Enum):
|
|
|
895
903
|
AIML = "aiml"
|
|
896
904
|
VOLCANO = "volcano"
|
|
897
905
|
|
|
906
|
+
@classmethod
|
|
907
|
+
def from_name(cls, name):
|
|
908
|
+
r"""Returns the ModelPlatformType enum value from a string."""
|
|
909
|
+
for model_platfrom_type in cls:
|
|
910
|
+
if model_platfrom_type.value == name:
|
|
911
|
+
return model_platfrom_type
|
|
912
|
+
raise ValueError(f"Unknown ModelPlatformType name: {name}")
|
|
913
|
+
|
|
898
914
|
@property
|
|
899
915
|
def is_openai(self) -> bool:
|
|
900
916
|
r"""Returns whether this platform is openai."""
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: camel-ai
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.34
|
|
4
4
|
Summary: Communicative Agents for AI Society Study
|
|
5
5
|
Project-URL: Homepage, https://www.camel-ai.org/
|
|
6
6
|
Project-URL: Repository, https://github.com/camel-ai/camel
|
|
@@ -11,14 +11,11 @@ License-File: LICENSE
|
|
|
11
11
|
Keywords: ai-societies,artificial-intelligence,communicative-ai,cooperative-ai,deep-learning,large-language-models,multi-agent-systems,natural-language-processing
|
|
12
12
|
Requires-Python: <3.13,>=3.10
|
|
13
13
|
Requires-Dist: colorama<0.5,>=0.4.6
|
|
14
|
-
Requires-Dist: curl-cffi==0.6.2
|
|
15
14
|
Requires-Dist: docstring-parser<0.16,>=0.15
|
|
16
|
-
Requires-Dist: eval-type-backport==0.2.0
|
|
17
15
|
Requires-Dist: httpx<1.0.0dev,>=0.28.0
|
|
18
16
|
Requires-Dist: jsonschema<5,>=4
|
|
19
17
|
Requires-Dist: numpy~=1.26
|
|
20
18
|
Requires-Dist: openai<2,>=1.59.7
|
|
21
|
-
Requires-Dist: protobuf<6,>=5
|
|
22
19
|
Requires-Dist: psutil<6,>=5.9.8
|
|
23
20
|
Requires-Dist: pydantic<2.10,>=1.9
|
|
24
21
|
Requires-Dist: pyyaml<7,>=6.0.2
|
|
@@ -461,7 +458,7 @@ Explore different types of agents, their roles, and their applications.
|
|
|
461
458
|
|
|
462
459
|
### Seeking Help
|
|
463
460
|
|
|
464
|
-
Please
|
|
461
|
+
Please reach out to us on [CAMEL discord](https://discord.camel-ai.org/) if you encounter any issue set up CAMEL.
|
|
465
462
|
|
|
466
463
|
<br>
|
|
467
464
|
|
|
@@ -535,7 +532,7 @@ We believe that studying these agents on a large scale offers valuable insights
|
|
|
535
532
|
|
|
536
533
|
<br>
|
|
537
534
|
|
|
538
|
-
##
|
|
535
|
+
## Synthetic Datasets
|
|
539
536
|
|
|
540
537
|
### 1. Utilize Various LLMs as Backends
|
|
541
538
|
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
camel/__init__.py,sha256=
|
|
1
|
+
camel/__init__.py,sha256=pnGCl1B_v9rwhoHltxtP1DUKL_fHxZqwilp-r4r6mFY,912
|
|
2
2
|
camel/generators.py,sha256=JRqj9_m1PF4qT6UtybzTQ-KBT9MJQt18OAAYvQ_fr2o,13844
|
|
3
3
|
camel/human.py,sha256=9X09UmxI2JqQnhrFfnZ3B9EzFmVfdSWQcjLWTIXKXe0,4962
|
|
4
4
|
camel/logger.py,sha256=rZVeOVYuQ9RYJ5Tqyv0usqy0g4zaVEq4qSfZ9nd2640,5755
|
|
@@ -7,11 +7,11 @@ camel/agents/__init__.py,sha256=LcS4m8s97-yADfznvcaAdUe9W0E9h3m6zrSc9H6m9so,1545
|
|
|
7
7
|
camel/agents/_types.py,sha256=GGpZ9FGq_SGla_Vz-YcYW7KMQzwE8lfM4Ga0QaGzKxk,1423
|
|
8
8
|
camel/agents/_utils.py,sha256=0O9YjIin-ZSEux2fQ2NPuB2YIxXsPyd9Ws1Kll6Vxhw,6248
|
|
9
9
|
camel/agents/base.py,sha256=c4bJYL3G3Z41SaFdMPMn8ZjLdFiFaVOFO6EQIfuCVR8,1124
|
|
10
|
-
camel/agents/chat_agent.py,sha256=
|
|
10
|
+
camel/agents/chat_agent.py,sha256=19JMd5Rz06I5m5X36MsXshrmjB5QoJfR8oN9bfgMlWw,45888
|
|
11
11
|
camel/agents/critic_agent.py,sha256=qFVlHlQo0CVgmPWfWYLT8_oP_KyzCLFsQw_nN_vu5Bs,7487
|
|
12
12
|
camel/agents/deductive_reasoner_agent.py,sha256=6BZGaq1hR6hKJuQtOfoYQnk_AkZpw_Mr7mUy2MspQgs,13540
|
|
13
13
|
camel/agents/embodied_agent.py,sha256=XBxBu5ZMmSJ4B2U3Z7SMwvLlgp6yNpaBe8HNQmY9CZA,7536
|
|
14
|
-
camel/agents/knowledge_graph_agent.py,sha256=
|
|
14
|
+
camel/agents/knowledge_graph_agent.py,sha256=7Tchhyvm1s8tQ3at7iGKZt70xWZllRXu2vwUFR37p10,9681
|
|
15
15
|
camel/agents/multi_hop_generator_agent.py,sha256=aYsZNsEFHxIq8_wDN8lZRkvRbfhlOYGBKezWr87y8Bs,4325
|
|
16
16
|
camel/agents/programmed_agent_instruction.py,sha256=99fLe41che3X6wPpNPJXRwl4If6EoQqQVWIoT3DKE1s,7124
|
|
17
17
|
camel/agents/role_assignment_agent.py,sha256=8bkTc14XToFHkP-ZOef5KP0P4hTlCDv0eNsDZPYuukA,5088
|
|
@@ -82,10 +82,11 @@ camel/datahubs/__init__.py,sha256=1a8fRuzgirO2pHtPnuisZ76iF_AN9GxMFq9gwFKWE5I,90
|
|
|
82
82
|
camel/datahubs/base.py,sha256=4QKWiJaeL5ReQpyTAbOtzHs-2CzAYbVyoMngYwdpZGU,4357
|
|
83
83
|
camel/datahubs/huggingface.py,sha256=LgRruML4XnwHrm_jMB-aB-Ha-M9ErRrA7YmiL6saGis,14929
|
|
84
84
|
camel/datahubs/models.py,sha256=tGb9OP_aomIhnwc0VapJjTg9PmyV_QCp5to9sABXF0Y,978
|
|
85
|
-
camel/datasets/__init__.py,sha256=
|
|
86
|
-
camel/datasets/base_generator.py,sha256=
|
|
85
|
+
camel/datasets/__init__.py,sha256=KnAYddFU8Yoi9mLusyn7p9yIbkv7gAxZ_o-q9U1DDb4,963
|
|
86
|
+
camel/datasets/base_generator.py,sha256=twZFYoaxl8zZaSfxO__R0M1_8xacdxVXZGSg8xy-ooM,3987
|
|
87
|
+
camel/datasets/few_shot_generator.py,sha256=KpV5ZPHuXxtB2xb0IwHw0hZgAs4-choxx5oyHhdXpmM,9837
|
|
87
88
|
camel/datasets/models.py,sha256=H0ksOfkwiPFjVr9xHMYbVoj8YTTWaLI2GYiWqesmiVs,2228
|
|
88
|
-
camel/datasets/static_dataset.py,sha256=
|
|
89
|
+
camel/datasets/static_dataset.py,sha256=GrszkO6-gKZV8ljIN4k5Y4A1TT8_lB7hJ7SbEEi4zkw,14243
|
|
89
90
|
camel/embeddings/__init__.py,sha256=YKCFO_YVY-x4A4uWmRuoIEtltrilBmC17DkCcK4zSj8,1263
|
|
90
91
|
camel/embeddings/base.py,sha256=mxqFkWh2AfbxuVKPOqVx16fCznmuSh9QXGjaEeZHvoY,2190
|
|
91
92
|
camel/embeddings/jina_embedding.py,sha256=6aakojtsJ6KLp3nqYLhEOtoFm2shoXlRzxb1YYN_uwo,6623
|
|
@@ -160,12 +161,12 @@ camel/models/nemotron_model.py,sha256=jJrW8tpTlEJDT1FjflB9krhgEQhD5KBeLmyUIcZvWP
|
|
|
160
161
|
camel/models/nvidia_model.py,sha256=lqp1iPwVDq6zSQ9B0SyBZ48Z3J5WbXwPshwlhj1ogZ8,6711
|
|
161
162
|
camel/models/ollama_model.py,sha256=byJ0YbMlilEFRKJZIot-MPUcojwMHLIaBES0a1SURtg,10604
|
|
162
163
|
camel/models/openai_audio_models.py,sha256=fYpxFvxT8p93KVb5BYODTuI5wdNXV9pu_bvxfARgVYk,13193
|
|
163
|
-
camel/models/openai_compatible_model.py,sha256=
|
|
164
|
+
camel/models/openai_compatible_model.py,sha256=fy9OSvkCM4YQhsFBBZ6D8lIiaHmCKu8_i26VlIdWwW0,8134
|
|
164
165
|
camel/models/openai_model.py,sha256=CbfD9yVtAltyqdFpjnLXncFnmaGPDZq8JhJDaSfG0pc,10186
|
|
165
166
|
camel/models/qwen_model.py,sha256=_LeeB0yrXRMI-gZOEEOHg0bWNOJpuQHf2G7u40--3r8,7064
|
|
166
167
|
camel/models/reka_model.py,sha256=15DscZf3lbqsIzm6kzjzDrhblBt1_0xlphT4isuQMu0,10146
|
|
167
168
|
camel/models/samba_model.py,sha256=t8b9TA1iVlOUizYSn5NDw4RZWjURqsyd4mkisDXef_s,22558
|
|
168
|
-
camel/models/sglang_model.py,sha256=
|
|
169
|
+
camel/models/sglang_model.py,sha256=6w7lW86EM50g23c5s7da93j6R_eaPGwSsS0s362bOb0,13898
|
|
169
170
|
camel/models/siliconflow_model.py,sha256=c5vk4zAhZVf8pDF1uh-iSa_v8d0QoPLuIN27EemdMGE,5659
|
|
170
171
|
camel/models/stub_model.py,sha256=dygYoxemnWWaxEX21L8QyKe-c75ti2CK9HnTuyHL5vs,5160
|
|
171
172
|
camel/models/togetherai_model.py,sha256=-YwZV1S1bkrX8jGguQI5dbtIHVuqhv96MoAcl33ptPo,6657
|
|
@@ -271,16 +272,16 @@ camel/toolkits/dalle_toolkit.py,sha256=Usmw3JiJErLQgWSB1qKq_bOACNwbUTQPFc_EsVzTr
|
|
|
271
272
|
camel/toolkits/dappier_toolkit.py,sha256=_69IAmXE2QSbwGxnSEycaV2XrrkiM5wKI6heM7-4MfU,8175
|
|
272
273
|
camel/toolkits/data_commons_toolkit.py,sha256=VmDipqHabDdYVCzhuoaPE832i76yXt-uom7p5ObH1w0,14121
|
|
273
274
|
camel/toolkits/excel_toolkit.py,sha256=7ihj4vAmbWA1RFNQb0b5h86HY0cFYLlgX5h6laGCM-E,5908
|
|
274
|
-
camel/toolkits/file_write_toolkit.py,sha256
|
|
275
|
+
camel/toolkits/file_write_toolkit.py,sha256=tz42coCt05WidhzxkMKjjvsoFPV2GQ1bCGqOGUtBRnY,14304
|
|
275
276
|
camel/toolkits/function_tool.py,sha256=9I-7HHGf5TzdQDJce9xyz1tfrGZr5Os5iAopMK4p0XA,30325
|
|
276
|
-
camel/toolkits/github_toolkit.py,sha256=
|
|
277
|
+
camel/toolkits/github_toolkit.py,sha256=5trpfbztCgTVFI6UTWGR2ZnhrE8PKPAb3gIaLgMCrTs,12165
|
|
277
278
|
camel/toolkits/google_maps_toolkit.py,sha256=WTnkURpGri9KcY5OwV7AJJHOzmpu5RNmYE1QCVqvwWM,12023
|
|
278
279
|
camel/toolkits/google_scholar_toolkit.py,sha256=pRFr-GZeGaYARuzbEhg3aDKyzWwAfj02YVp1Y5WOGTQ,7515
|
|
279
280
|
camel/toolkits/human_toolkit.py,sha256=9CjB1flGXIx7mzkIliDjcwXATUvZNdrRCKWyEgR9EJc,1791
|
|
280
281
|
camel/toolkits/image_analysis_toolkit.py,sha256=dpvT8n49s8B8AhJ8aFdy4OONb8E8r_Cwxpx-ByFruy8,7209
|
|
281
282
|
camel/toolkits/linkedin_toolkit.py,sha256=5ZSMG01RXjibJ2CtB1vLlQ4B-rv4sqf_2cUZC78WTE8,8041
|
|
282
283
|
camel/toolkits/math_toolkit.py,sha256=5yVF0bKuwkZIV01uICd3TOfktXlTERjKt4DrFyz_oaE,3639
|
|
283
|
-
camel/toolkits/mcp_toolkit.py,sha256=
|
|
284
|
+
camel/toolkits/mcp_toolkit.py,sha256=j4twcLhZiEQCAEH0N3eQ_RLqDd59ObH93gyZMes3c84,17787
|
|
284
285
|
camel/toolkits/meshy_toolkit.py,sha256=Fd6sQV2JtduxyvHxCBA0_zl2OCgJRAlvDEe58hX8gRg,6463
|
|
285
286
|
camel/toolkits/mineru_toolkit.py,sha256=vRX9LholLNkpbJ6axfEN4pTG85aWb0PDmlVy3rAAXhg,6868
|
|
286
287
|
camel/toolkits/networkx_toolkit.py,sha256=Zdnk5zmM_xzyoQ0qH0YRu8HY1Y0Uojg69sg1VVBvPcQ,8523
|
|
@@ -329,7 +330,7 @@ camel/toolkits/open_api_specs/web_scraper/openapi.yaml,sha256=u_WalQ01e8W1D27VnZ
|
|
|
329
330
|
camel/toolkits/open_api_specs/web_scraper/paths/__init__.py,sha256=OKCZrQCDwaWtXIN_2rA9FSqEvgpQRieRoHh7Ek6N16A,702
|
|
330
331
|
camel/toolkits/open_api_specs/web_scraper/paths/scraper.py,sha256=aWy1_ppV4NVVEZfnbN3tu9XA9yAPAC9bRStJ5JuXMRU,1117
|
|
331
332
|
camel/types/__init__.py,sha256=VLWhAt857IFct3XepY5BNOIhyhDhfmODTezr9jhO_TI,2251
|
|
332
|
-
camel/types/enums.py,sha256=
|
|
333
|
+
camel/types/enums.py,sha256=ZZjClKeJB-ggpAmyyus713mks1C6aSxgx9hLwbULQ_8,35045
|
|
333
334
|
camel/types/openai_types.py,sha256=8ZFzLe-zGmKNPfuVZFzxlxAX98lGf18gtrPhOgMmzus,2104
|
|
334
335
|
camel/types/unified_model_type.py,sha256=GP5GYtA3RfvLsqnk1c4UcOaRKMFhjDgZrLr0ln6JFw8,4253
|
|
335
336
|
camel/types/agents/__init__.py,sha256=cbvVkogPoZgcwZrgxLH6EtpGXk0kavF79nOic0Dc1vg,786
|
|
@@ -345,7 +346,7 @@ camel/verifiers/__init__.py,sha256=p6UEyvaOlwUQaFACGB4C07fL1xSnpTouElt19YRuneQ,9
|
|
|
345
346
|
camel/verifiers/base.py,sha256=efWZV9g58IHzJ24U4zr109y34CaAi8tV9WZPMCzP3YI,12017
|
|
346
347
|
camel/verifiers/models.py,sha256=hC6m_YxEX-mqi_tkCNZHZWLBWf04ZTyv5vfKR-BEyU4,2818
|
|
347
348
|
camel/verifiers/python_verifier.py,sha256=bj-UGxeJTZzxVVa3a8IEQ1lNOpSaaW3JdGnUEoPeQD0,7519
|
|
348
|
-
camel_ai-0.2.
|
|
349
|
-
camel_ai-0.2.
|
|
350
|
-
camel_ai-0.2.
|
|
351
|
-
camel_ai-0.2.
|
|
349
|
+
camel_ai-0.2.34.dist-info/METADATA,sha256=weUlWcKtcLp4-K7qbg4mnibSh-ZlgvsnYPF6euq0clI,37943
|
|
350
|
+
camel_ai-0.2.34.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
351
|
+
camel_ai-0.2.34.dist-info/licenses/LICENSE,sha256=id0nB2my5kG0xXeimIu5zZrbHLS6EQvxvkKkzIHaT2k,11343
|
|
352
|
+
camel_ai-0.2.34.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|