camel-ai 0.2.38__py3-none-any.whl → 0.2.40__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +4 -0
- camel/agents/repo_agent.py +2 -2
- camel/benchmarks/apibank.py +1 -1
- camel/benchmarks/apibench.py +1 -1
- camel/configs/__init__.py +3 -0
- camel/configs/modelscope_config.py +59 -0
- camel/datagen/self_improving_cot.py +1 -1
- camel/datasets/__init__.py +2 -0
- camel/datasets/base_generator.py +22 -9
- camel/datasets/few_shot_generator.py +2 -3
- camel/datasets/self_instruct_generator.py +415 -0
- camel/embeddings/openai_compatible_embedding.py +13 -5
- camel/environments/models.py +1 -1
- camel/environments/single_step.py +155 -89
- camel/interpreters/docker_interpreter.py +1 -1
- camel/interpreters/internal_python_interpreter.py +1 -1
- camel/loaders/unstructured_io.py +2 -1
- camel/memories/blocks/chat_history_block.py +1 -1
- camel/memories/context_creators/score_based.py +2 -2
- camel/models/__init__.py +2 -0
- camel/models/model_factory.py +119 -0
- camel/models/modelscope_model.py +208 -0
- camel/models/openai_audio_models.py +2 -2
- camel/models/openai_model.py +49 -2
- camel/models/togetherai_model.py +2 -2
- camel/models/vllm_model.py +1 -1
- camel/models/zhipuai_model.py +2 -2
- camel/retrievers/vector_retriever.py +1 -1
- camel/storages/graph_storages/neo4j_graph.py +1 -1
- camel/storages/vectordb_storages/base.py +2 -2
- camel/storages/vectordb_storages/milvus.py +2 -2
- camel/storages/vectordb_storages/qdrant.py +2 -2
- camel/tasks/task.py +2 -2
- camel/toolkits/__init__.py +4 -1
- camel/toolkits/arxiv_toolkit.py +2 -1
- camel/toolkits/ask_news_toolkit.py +11 -3
- camel/toolkits/audio_analysis_toolkit.py +2 -0
- camel/toolkits/base.py +3 -0
- camel/toolkits/code_execution.py +3 -1
- camel/toolkits/dappier_toolkit.py +2 -1
- camel/toolkits/data_commons_toolkit.py +2 -0
- camel/toolkits/excel_toolkit.py +2 -0
- camel/toolkits/file_write_toolkit.py +2 -0
- camel/toolkits/github_toolkit.py +6 -4
- camel/toolkits/google_scholar_toolkit.py +2 -0
- camel/toolkits/human_toolkit.py +17 -1
- camel/toolkits/image_analysis_toolkit.py +2 -0
- camel/toolkits/linkedin_toolkit.py +2 -1
- camel/toolkits/math_toolkit.py +2 -0
- camel/toolkits/mcp_toolkit.py +42 -52
- camel/toolkits/meshy_toolkit.py +20 -2
- camel/toolkits/networkx_toolkit.py +2 -0
- camel/toolkits/notion_toolkit.py +7 -0
- camel/toolkits/openbb_toolkit.py +2 -1
- camel/toolkits/pubmed_toolkit.py +2 -0
- camel/toolkits/reddit_toolkit.py +2 -1
- camel/toolkits/retrieval_toolkit.py +2 -1
- camel/toolkits/search_toolkit.py +2 -1
- camel/toolkits/semantic_scholar_toolkit.py +2 -0
- camel/toolkits/slack_toolkit.py +2 -0
- camel/toolkits/stripe_toolkit.py +2 -1
- camel/toolkits/sympy_toolkit.py +2 -0
- camel/toolkits/terminal_toolkit.py +2 -0
- camel/toolkits/twitter_toolkit.py +2 -1
- camel/toolkits/video_analysis_toolkit.py +2 -1
- camel/toolkits/video_download_toolkit.py +2 -1
- camel/toolkits/weather_toolkit.py +2 -0
- camel/toolkits/whatsapp_toolkit.py +2 -1
- camel/toolkits/zapier_toolkit.py +2 -1
- camel/types/enums.py +65 -0
- camel/types/unified_model_type.py +5 -0
- camel/utils/__init__.py +2 -0
- camel/utils/chunker/code_chunker.py +10 -10
- camel/utils/chunker/uio_chunker.py +4 -3
- camel/utils/commons.py +50 -30
- camel/utils/constants.py +2 -2
- camel/utils/mcp.py +79 -0
- camel/verifiers/__init__.py +2 -0
- camel/verifiers/base.py +15 -15
- camel/verifiers/math_verifier.py +182 -0
- camel/verifiers/python_verifier.py +18 -26
- {camel_ai-0.2.38.dist-info → camel_ai-0.2.40.dist-info}/METADATA +3 -1
- {camel_ai-0.2.38.dist-info → camel_ai-0.2.40.dist-info}/RECORD +86 -81
- {camel_ai-0.2.38.dist-info → camel_ai-0.2.40.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.38.dist-info → camel_ai-0.2.40.dist-info}/licenses/LICENSE +0 -0
|
@@ -30,6 +30,9 @@ class OpenAICompatibleEmbedding(BaseEmbedding[str]):
|
|
|
30
30
|
model_type (str): The model type to be used for text embeddings.
|
|
31
31
|
api_key (str): The API key for authenticating with the model service.
|
|
32
32
|
url (str): The url to the model service.
|
|
33
|
+
output_dim (Optional[int]): The dimensionality of the embedding
|
|
34
|
+
vectors. If None, it will be determined during the first
|
|
35
|
+
embedding call.
|
|
33
36
|
"""
|
|
34
37
|
|
|
35
38
|
@api_keys_required(
|
|
@@ -43,9 +46,10 @@ class OpenAICompatibleEmbedding(BaseEmbedding[str]):
|
|
|
43
46
|
model_type: str,
|
|
44
47
|
api_key: Optional[str] = None,
|
|
45
48
|
url: Optional[str] = None,
|
|
49
|
+
output_dim: Optional[int] = None,
|
|
46
50
|
) -> None:
|
|
47
51
|
self.model_type = model_type
|
|
48
|
-
self.output_dim: Optional[int] =
|
|
52
|
+
self.output_dim: Optional[int] = output_dim
|
|
49
53
|
|
|
50
54
|
self._api_key = api_key or os.environ.get(
|
|
51
55
|
"OPENAI_COMPATIBILITY_API_KEY"
|
|
@@ -87,10 +91,14 @@ class OpenAICompatibleEmbedding(BaseEmbedding[str]):
|
|
|
87
91
|
|
|
88
92
|
Returns:
|
|
89
93
|
int: The dimensionality of the embedding for the current model.
|
|
94
|
+
|
|
95
|
+
Raises:
|
|
96
|
+
ValueError: If the embedding dimension cannot be determined.
|
|
90
97
|
"""
|
|
91
98
|
if self.output_dim is None:
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
)
|
|
99
|
+
self.embed_list(["test"])
|
|
100
|
+
|
|
101
|
+
if self.output_dim is None:
|
|
102
|
+
raise ValueError("Failed to determine embedding dimension")
|
|
103
|
+
|
|
96
104
|
return self.output_dim
|
camel/environments/models.py
CHANGED
|
@@ -33,7 +33,7 @@ class Action(BaseModel):
|
|
|
33
33
|
generated (UTC).
|
|
34
34
|
"""
|
|
35
35
|
|
|
36
|
-
index:
|
|
36
|
+
index: int = Field(default=0, description="...")
|
|
37
37
|
|
|
38
38
|
llm_response: str = Field(description="Generated response from the LLM")
|
|
39
39
|
metadata: Dict[str, Any] = Field(
|
|
@@ -19,6 +19,7 @@ from camel.datasets import BaseGenerator, DataPoint, StaticDataset
|
|
|
19
19
|
from camel.logger import get_logger
|
|
20
20
|
from camel.verifiers.base import (
|
|
21
21
|
BaseVerifier,
|
|
22
|
+
VerificationOutcome,
|
|
22
23
|
VerificationResult,
|
|
23
24
|
)
|
|
24
25
|
|
|
@@ -51,7 +52,7 @@ class SingleStepEnv:
|
|
|
51
52
|
question="Episode ended. This is just a placeholder."
|
|
52
53
|
)
|
|
53
54
|
|
|
54
|
-
ACCURACY_REWARD =
|
|
55
|
+
ACCURACY_REWARD = 1
|
|
55
56
|
|
|
56
57
|
def __init__(
|
|
57
58
|
self,
|
|
@@ -206,37 +207,66 @@ class SingleStepEnv:
|
|
|
206
207
|
return observations[0] if batch_size == 1 else observations
|
|
207
208
|
|
|
208
209
|
elif isinstance(self.dataset, BaseGenerator):
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
210
|
+
self._states = [
|
|
211
|
+
await self.dataset.async_sample() for _ in range(batch_size)
|
|
212
|
+
]
|
|
213
|
+
self.current_batch_size = batch_size
|
|
214
|
+
self._states_done = [False] * batch_size
|
|
215
|
+
|
|
216
|
+
observations = [
|
|
217
|
+
Observation(question=sample.question, context={}, metadata={})
|
|
218
|
+
for sample in self._states
|
|
219
|
+
]
|
|
220
|
+
|
|
221
|
+
return observations[0] if batch_size == 1 else observations
|
|
212
222
|
|
|
213
223
|
else:
|
|
214
224
|
raise TypeError(f"Unsupported dataset type: {type(self.dataset)}")
|
|
215
225
|
|
|
216
226
|
async def step(
|
|
217
|
-
self, action: Union[Action, List[Action]]
|
|
227
|
+
self, action: Union[Action, List[Action], str]
|
|
218
228
|
) -> Union[
|
|
219
229
|
Tuple[Observation, float, bool, Dict[str, Any]],
|
|
220
230
|
List[Tuple[Observation, float, bool, Dict[str, Any]]],
|
|
221
231
|
]:
|
|
222
|
-
r"""
|
|
223
|
-
|
|
232
|
+
r"""Execute one interaction step in the environment using the
|
|
233
|
+
proposed solution.
|
|
234
|
+
|
|
235
|
+
This method processes the agent's response(s) to the current
|
|
236
|
+
observation(s), verifies the correctness of the responses using
|
|
237
|
+
the verifier, computes rewards, and returns the resulting
|
|
238
|
+
state transition(s).
|
|
239
|
+
|
|
240
|
+
The environment is strictly single-step. Once an action is
|
|
241
|
+
submitted for a state, that state is marked as done, and
|
|
242
|
+
the observation will not change.
|
|
224
243
|
|
|
225
244
|
Args:
|
|
226
|
-
action
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
245
|
+
action (Union[Action, List[Action], str]):
|
|
246
|
+
The action(s) taken by the agent,
|
|
247
|
+
which should contain the response(s)
|
|
248
|
+
to the observation(s). Can be:
|
|
249
|
+
- A single `Action` object (for batch size 1),
|
|
250
|
+
- A list of `Action` objects (for batched evaluation),
|
|
251
|
+
- A raw string (only allowed when batch size is 1).
|
|
230
252
|
|
|
231
253
|
Returns:
|
|
232
|
-
Union[
|
|
233
|
-
|
|
254
|
+
Union[Tuple[Observation, float, bool, Dict[str, Any]], List[...]]:
|
|
255
|
+
A tuple or list of tuples containing:
|
|
256
|
+
- `Observation`: Placeholder indicating episode end.
|
|
257
|
+
- `float`: The reward for the response.
|
|
258
|
+
- `bool`: Whether the episode is done
|
|
259
|
+
(always `True` in this case).
|
|
260
|
+
- `dict`: Additional info including the proposed solution,
|
|
261
|
+
verification result, and original data point.
|
|
234
262
|
|
|
235
263
|
Raises:
|
|
236
|
-
RuntimeError: If environment
|
|
237
|
-
|
|
238
|
-
|
|
264
|
+
RuntimeError: If the environment has not been set up,
|
|
265
|
+
or if `reset()` has not been called.
|
|
266
|
+
ValueError: If invalid action format, duplicate indices,
|
|
267
|
+
or out-of-bounds indices are detected.
|
|
239
268
|
"""
|
|
269
|
+
|
|
240
270
|
if not self._is_setup:
|
|
241
271
|
raise RuntimeError("Environment not set up. Call setup() first.")
|
|
242
272
|
if self._batch_done():
|
|
@@ -246,64 +276,10 @@ class SingleStepEnv:
|
|
|
246
276
|
if not self._states:
|
|
247
277
|
raise RuntimeError("No current observation. Call reset() first.")
|
|
248
278
|
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
if len(action) != 1 or not isinstance(action[0], Action):
|
|
253
|
-
raise ValueError(
|
|
254
|
-
"For batch_size=1, expect a single Action or a "
|
|
255
|
-
"list containing exactly one Action"
|
|
256
|
-
)
|
|
257
|
-
elif not isinstance(action, Action):
|
|
258
|
-
raise ValueError(
|
|
259
|
-
"For batch_size=1, expect a single Action or a "
|
|
260
|
-
"list containing exactly one Action"
|
|
261
|
-
)
|
|
262
|
-
if isinstance(action, Action):
|
|
263
|
-
actions = [action]
|
|
264
|
-
else:
|
|
265
|
-
actions = action
|
|
266
|
-
if actions[0].index is None:
|
|
267
|
-
actions[0].index = 0
|
|
268
|
-
if actions[0].index != 0:
|
|
269
|
-
raise ValueError("For batch_size=1, index must be None or 0")
|
|
270
|
-
|
|
271
|
-
else: # batch_size >= 2
|
|
272
|
-
if isinstance(action, Action):
|
|
273
|
-
if action.index is None:
|
|
274
|
-
raise ValueError(
|
|
275
|
-
"For batch_size>=2, each Action must have an index"
|
|
276
|
-
)
|
|
277
|
-
if not isinstance(action.index, int):
|
|
278
|
-
raise ValueError("Index must be an integer")
|
|
279
|
-
actions = [action]
|
|
280
|
-
elif isinstance(action, list):
|
|
281
|
-
if not action: # Empty list
|
|
282
|
-
raise ValueError("Action list cannot be empty")
|
|
283
|
-
actions = action
|
|
284
|
-
for act in actions:
|
|
285
|
-
if not isinstance(act, Action):
|
|
286
|
-
raise ValueError(
|
|
287
|
-
"All elements in list must be Action objects"
|
|
288
|
-
)
|
|
289
|
-
if act.index is None:
|
|
290
|
-
raise ValueError(
|
|
291
|
-
"For batch_size>=2, each Action must have an index"
|
|
292
|
-
)
|
|
293
|
-
if not isinstance(act.index, int):
|
|
294
|
-
raise ValueError("Index must be an integer")
|
|
295
|
-
else:
|
|
296
|
-
raise ValueError(
|
|
297
|
-
"For batch_size>=2, expect an Action or list of Actions"
|
|
298
|
-
)
|
|
279
|
+
actions = self._normalize_actions(action)
|
|
280
|
+
|
|
281
|
+
indices = [a.index for a in actions]
|
|
299
282
|
|
|
300
|
-
# Validate indices
|
|
301
|
-
indices: List[int] = []
|
|
302
|
-
for act in actions:
|
|
303
|
-
assert act.index is not None
|
|
304
|
-
indices.append(act.index)
|
|
305
|
-
if len(set(indices)) != len(indices):
|
|
306
|
-
raise ValueError("Duplicate state indices in actions.")
|
|
307
283
|
for idx in indices:
|
|
308
284
|
if idx < 0 or idx >= len(self._states):
|
|
309
285
|
raise ValueError(f"Invalid state index {idx}.")
|
|
@@ -322,22 +298,30 @@ class SingleStepEnv:
|
|
|
322
298
|
for idx in indices:
|
|
323
299
|
ground_truths.append(self._states[idx].final_answer)
|
|
324
300
|
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
301
|
+
try:
|
|
302
|
+
verification_results = await self.verifier.verify_batch(
|
|
303
|
+
solutions=proposed_solutions,
|
|
304
|
+
reference_answers=ground_truths, # type: ignore [arg-type]
|
|
305
|
+
raise_on_error=True,
|
|
306
|
+
)
|
|
307
|
+
except Exception as e:
|
|
308
|
+
logger.error(f"Verification failed: {e}")
|
|
309
|
+
# Return failed verification results with status=FAILURE
|
|
310
|
+
verification_results = [
|
|
311
|
+
VerificationResult(
|
|
312
|
+
result="",
|
|
313
|
+
status=VerificationOutcome.FAILURE,
|
|
314
|
+
error_message=f"Verification error: {e}",
|
|
315
|
+
)
|
|
316
|
+
for _ in range(len(proposed_solutions))
|
|
317
|
+
]
|
|
330
318
|
|
|
331
319
|
total_rewards, rewards_dicts = await self._compute_reward_batch(
|
|
332
320
|
proposed_solutions, verification_results
|
|
333
321
|
)
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
for i, action in enumerate(actions):
|
|
338
|
-
assert action.index is not None
|
|
339
|
-
idx = action.index
|
|
340
|
-
step_result = StepResult(
|
|
322
|
+
# Create and return step results in batch
|
|
323
|
+
step_results = [
|
|
324
|
+
StepResult(
|
|
341
325
|
observation=self.PLACEHOLDER_OBS,
|
|
342
326
|
reward=total_rewards[i],
|
|
343
327
|
rewards_dict=rewards_dicts[i],
|
|
@@ -345,14 +329,96 @@ class SingleStepEnv:
|
|
|
345
329
|
info={
|
|
346
330
|
"proposed_solution": proposed_solutions[i],
|
|
347
331
|
"verification_result": verification_results[i],
|
|
348
|
-
"state": self._states[
|
|
332
|
+
"state": self._states[indices[i]],
|
|
349
333
|
},
|
|
350
|
-
)
|
|
351
|
-
|
|
334
|
+
).as_tuple()
|
|
335
|
+
for i in range(len(actions))
|
|
336
|
+
]
|
|
337
|
+
for _, idx in enumerate(indices):
|
|
352
338
|
self._states_done[idx] = True
|
|
353
339
|
|
|
354
340
|
return step_results[0] if len(step_results) == 1 else step_results
|
|
355
341
|
|
|
342
|
+
def _normalize_actions(
|
|
343
|
+
self, action: Union[Action, List[Action], str]
|
|
344
|
+
) -> List[Action]:
|
|
345
|
+
r"""Normalize the user-provided action(s) into a validated list
|
|
346
|
+
of `Action` objects.
|
|
347
|
+
|
|
348
|
+
This method handles flexibility in input format by converting
|
|
349
|
+
raw strings (only allowed when batch size is 1) and ensuring
|
|
350
|
+
all necessary structure and integrity checks on actions
|
|
351
|
+
(e.g., index bounds, duplicates).
|
|
352
|
+
|
|
353
|
+
Args:
|
|
354
|
+
action (Union[Action, List[Action], str]):
|
|
355
|
+
The raw input action(s) provided by the agent. Can be:
|
|
356
|
+
- A single `Action` object.
|
|
357
|
+
- A list of `Action` objects.
|
|
358
|
+
- A raw string (if `batch_size == 1`), auto-wrapped
|
|
359
|
+
in an `Action`.
|
|
360
|
+
|
|
361
|
+
Returns:
|
|
362
|
+
List[Action]: A list of validated `Action` instances
|
|
363
|
+
ready for evaluation.
|
|
364
|
+
|
|
365
|
+
Raises:
|
|
366
|
+
ValueError: If:
|
|
367
|
+
- Action indices are invalid or duplicated,
|
|
368
|
+
- Action list is empty,
|
|
369
|
+
- Index mismatches expected values
|
|
370
|
+
(e.g., 0 for batch size 1),
|
|
371
|
+
- Wrong structure is used
|
|
372
|
+
(e.g., string used with batch size > 1).
|
|
373
|
+
TypeError: If the action is of an unsupported type.
|
|
374
|
+
"""
|
|
375
|
+
|
|
376
|
+
if isinstance(action, str):
|
|
377
|
+
if self.current_batch_size != 1:
|
|
378
|
+
raise ValueError(
|
|
379
|
+
"String input for action is only allowed"
|
|
380
|
+
" when batch_size == 1"
|
|
381
|
+
)
|
|
382
|
+
logger.warning("Auto-converting from str to Action", stacklevel=2)
|
|
383
|
+
action = Action(index=0, llm_response=action)
|
|
384
|
+
|
|
385
|
+
if isinstance(action, Action):
|
|
386
|
+
actions = [action]
|
|
387
|
+
elif isinstance(action, list):
|
|
388
|
+
if not action:
|
|
389
|
+
raise ValueError("Action list cannot be empty")
|
|
390
|
+
if not all(isinstance(a, Action) for a in action):
|
|
391
|
+
raise ValueError(
|
|
392
|
+
"All elements in the list must be Action objects"
|
|
393
|
+
)
|
|
394
|
+
actions = action
|
|
395
|
+
else:
|
|
396
|
+
raise TypeError("Action must be a str, Action, or list of Actions")
|
|
397
|
+
|
|
398
|
+
if self.current_batch_size == 1 and len(actions) != 1:
|
|
399
|
+
raise ValueError(
|
|
400
|
+
"For batch_size=1, expect a single Action or a "
|
|
401
|
+
"list containing exactly one Action"
|
|
402
|
+
)
|
|
403
|
+
|
|
404
|
+
# Validate indices
|
|
405
|
+
for a in actions:
|
|
406
|
+
if not isinstance(a.index, int):
|
|
407
|
+
raise ValueError(
|
|
408
|
+
f"Action index must be an integer, got {a.index}"
|
|
409
|
+
)
|
|
410
|
+
if self.current_batch_size == 1:
|
|
411
|
+
if a.index != 0:
|
|
412
|
+
raise ValueError(
|
|
413
|
+
"For batch_size=1, Action index must be 0"
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
indices = [a.index for a in actions]
|
|
417
|
+
if len(set(indices)) != len(indices):
|
|
418
|
+
raise ValueError("Duplicate state indices in actions.")
|
|
419
|
+
|
|
420
|
+
return actions
|
|
421
|
+
|
|
356
422
|
async def _compute_reward_batch(
|
|
357
423
|
self,
|
|
358
424
|
proposed_solutions: List[str],
|
|
@@ -426,7 +492,7 @@ class SingleStepEnv:
|
|
|
426
492
|
return all(self._states_done)
|
|
427
493
|
|
|
428
494
|
def _batch_started(self) -> bool:
|
|
429
|
-
r"""Check if
|
|
495
|
+
r"""Check if the batch processing has started.
|
|
430
496
|
|
|
431
497
|
Returns:
|
|
432
498
|
bool: True if at least one state is marked as done, False
|
|
@@ -185,7 +185,7 @@ class DockerInterpreter(BaseInterpreter):
|
|
|
185
185
|
code: str,
|
|
186
186
|
code_type: str,
|
|
187
187
|
) -> str:
|
|
188
|
-
r"""Executes the given code in the
|
|
188
|
+
r"""Executes the given code in the container attached to the
|
|
189
189
|
interpreter, and captures the stdout and stderr streams.
|
|
190
190
|
|
|
191
191
|
Args:
|
|
@@ -421,7 +421,7 @@ class InternalPythonInterpreter(BaseInterpreter):
|
|
|
421
421
|
result = None
|
|
422
422
|
if not isinstance(if_statement.test, ast.Compare):
|
|
423
423
|
raise InterpreterError(
|
|
424
|
-
"Only
|
|
424
|
+
"Only Compare expr supported in if statement, get"
|
|
425
425
|
f" {if_statement.test.__class__.__name__}"
|
|
426
426
|
)
|
|
427
427
|
if self._execute_condition(if_statement.test):
|
camel/loaders/unstructured_io.py
CHANGED
|
@@ -11,6 +11,7 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import traceback
|
|
14
15
|
import uuid
|
|
15
16
|
import warnings
|
|
16
17
|
from typing import (
|
|
@@ -151,7 +152,7 @@ class UnstructuredIO:
|
|
|
151
152
|
elements = partition(file=f, **kwargs)
|
|
152
153
|
return elements
|
|
153
154
|
except Exception:
|
|
154
|
-
warnings.warn(
|
|
155
|
+
warnings.warn(traceback.format_exc())
|
|
155
156
|
return None
|
|
156
157
|
|
|
157
158
|
@staticmethod
|
|
@@ -38,7 +38,7 @@ class ChatHistoryBlock(MemoryBlock):
|
|
|
38
38
|
keep_rate (float, optional): In historical messages, the score of the
|
|
39
39
|
last message is 1.0, and with each step taken backward, the score
|
|
40
40
|
of the message is multiplied by the `keep_rate`. Higher `keep_rate`
|
|
41
|
-
leads to high
|
|
41
|
+
leads to high possibility to keep history messages during context
|
|
42
42
|
creation.
|
|
43
43
|
"""
|
|
44
44
|
|
|
@@ -168,8 +168,8 @@ class ScoreBasedContextCreator(BaseContextCreator):
|
|
|
168
168
|
# 6. Output Assembly
|
|
169
169
|
# ======================
|
|
170
170
|
|
|
171
|
-
#
|
|
172
|
-
# are empty, raise an error
|
|
171
|
+
# In case system message is the only message in memory when sorted
|
|
172
|
+
# units are empty, raise an error
|
|
173
173
|
if system_unit and len(remaining_units) == 0 and len(records) > 1:
|
|
174
174
|
raise RuntimeError(
|
|
175
175
|
"System message and current message exceeds token limit ",
|
camel/models/__init__.py
CHANGED
|
@@ -26,6 +26,7 @@ from .litellm_model import LiteLLMModel
|
|
|
26
26
|
from .mistral_model import MistralModel
|
|
27
27
|
from .model_factory import ModelFactory
|
|
28
28
|
from .model_manager import ModelManager, ModelProcessingError
|
|
29
|
+
from .modelscope_model import ModelScopeModel
|
|
29
30
|
from .moonshot_model import MoonshotModel
|
|
30
31
|
from .nemotron_model import NemotronModel
|
|
31
32
|
from .nvidia_model import NvidiaModel
|
|
@@ -77,6 +78,7 @@ __all__ = [
|
|
|
77
78
|
'DeepSeekModel',
|
|
78
79
|
'FishAudioModel',
|
|
79
80
|
'InternLMModel',
|
|
81
|
+
'ModelScopeModel',
|
|
80
82
|
'MoonshotModel',
|
|
81
83
|
'AIMLModel',
|
|
82
84
|
'BaseAudioModel',
|
camel/models/model_factory.py
CHANGED
|
@@ -11,8 +11,11 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import json
|
|
14
15
|
from typing import Dict, Optional, Type, Union
|
|
15
16
|
|
|
17
|
+
import yaml
|
|
18
|
+
|
|
16
19
|
from camel.models.aiml_model import AIMLModel
|
|
17
20
|
from camel.models.anthropic_model import AnthropicModel
|
|
18
21
|
from camel.models.azure_openai_model import AzureOpenAIModel
|
|
@@ -24,6 +27,7 @@ from camel.models.groq_model import GroqModel
|
|
|
24
27
|
from camel.models.internlm_model import InternLMModel
|
|
25
28
|
from camel.models.litellm_model import LiteLLMModel
|
|
26
29
|
from camel.models.mistral_model import MistralModel
|
|
30
|
+
from camel.models.modelscope_model import ModelScopeModel
|
|
27
31
|
from camel.models.moonshot_model import MoonshotModel
|
|
28
32
|
from camel.models.nvidia_model import NvidiaModel
|
|
29
33
|
from camel.models.ollama_model import OllamaModel
|
|
@@ -145,6 +149,8 @@ class ModelFactory:
|
|
|
145
149
|
model_class = InternLMModel
|
|
146
150
|
elif model_platform.is_moonshot and model_type.is_moonshot:
|
|
147
151
|
model_class = MoonshotModel
|
|
152
|
+
elif model_platform.is_modelscope:
|
|
153
|
+
model_class = ModelScopeModel
|
|
148
154
|
elif model_type == ModelType.STUB:
|
|
149
155
|
model_class = StubModel
|
|
150
156
|
|
|
@@ -162,3 +168,116 @@ class ModelFactory:
|
|
|
162
168
|
token_counter=token_counter,
|
|
163
169
|
timeout=timeout,
|
|
164
170
|
)
|
|
171
|
+
|
|
172
|
+
@classmethod
|
|
173
|
+
def __parse_model_platform(
|
|
174
|
+
cls, model_platform_str: str
|
|
175
|
+
) -> ModelPlatformType:
|
|
176
|
+
r"""Parses a string and returns the corresponding ModelPlatformType
|
|
177
|
+
enum.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
model_platform_str (str): The platform name as a string. Can be in
|
|
181
|
+
the form "ModelPlatformType.<NAME>" or simply "<NAME>".
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
ModelPlatformType: The matching enum value.
|
|
185
|
+
|
|
186
|
+
Raises:
|
|
187
|
+
ValueError: If the platform name is not a valid member of
|
|
188
|
+
ModelPlatformType.
|
|
189
|
+
"""
|
|
190
|
+
|
|
191
|
+
try:
|
|
192
|
+
if model_platform_str.startswith("ModelPlatformType."):
|
|
193
|
+
platform_name = model_platform_str.split('.')[-1]
|
|
194
|
+
else:
|
|
195
|
+
platform_name = model_platform_str.upper()
|
|
196
|
+
|
|
197
|
+
if platform_name not in ModelPlatformType.__members__:
|
|
198
|
+
raise ValueError(
|
|
199
|
+
f"Invalid model platform: {platform_name}. "
|
|
200
|
+
f"Valid options: "
|
|
201
|
+
f"{', '.join(ModelPlatformType.__members__.keys())}"
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
return ModelPlatformType[platform_name]
|
|
205
|
+
|
|
206
|
+
except KeyError:
|
|
207
|
+
raise KeyError(f"Invalid model platform: {model_platform_str}")
|
|
208
|
+
|
|
209
|
+
@classmethod
|
|
210
|
+
def __load_yaml(cls, filepath: str) -> Dict:
|
|
211
|
+
r"""Loads and parses a YAML file into a dictionary.
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
filepath (str): Path to the YAML configuration file.
|
|
215
|
+
|
|
216
|
+
Returns:
|
|
217
|
+
Dict: The parsed YAML content as a dictionary.
|
|
218
|
+
"""
|
|
219
|
+
with open(filepath, 'r') as file:
|
|
220
|
+
config = yaml.safe_load(file)
|
|
221
|
+
|
|
222
|
+
return config
|
|
223
|
+
|
|
224
|
+
@classmethod
|
|
225
|
+
def __load_json(cls, filepath: str) -> Dict:
|
|
226
|
+
r"""Loads and parses a JSON file into a dictionary.
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
filepath (str): Path to the JSON configuration file.
|
|
230
|
+
|
|
231
|
+
Returns:
|
|
232
|
+
Dict: The parsed JSON content as a dictionary.
|
|
233
|
+
"""
|
|
234
|
+
with open(filepath, 'r') as file:
|
|
235
|
+
config = json.load(file)
|
|
236
|
+
|
|
237
|
+
return config
|
|
238
|
+
|
|
239
|
+
@classmethod
|
|
240
|
+
def create_from_yaml(cls, filepath: str) -> BaseModelBackend:
|
|
241
|
+
r"""Creates and returns a model base backend instance
|
|
242
|
+
from a YAML configuration file.
|
|
243
|
+
|
|
244
|
+
Args:
|
|
245
|
+
filepath (str): Path to the YAML file containing model
|
|
246
|
+
configuration.
|
|
247
|
+
|
|
248
|
+
Returns:
|
|
249
|
+
BaseModelBackend: An instance of the model backend based on the
|
|
250
|
+
configuration.
|
|
251
|
+
"""
|
|
252
|
+
|
|
253
|
+
config = cls.__load_yaml(filepath)
|
|
254
|
+
config["model_platform"] = cls.__parse_model_platform(
|
|
255
|
+
config["model_platform"]
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
model = ModelFactory.create(**config)
|
|
259
|
+
|
|
260
|
+
return model
|
|
261
|
+
|
|
262
|
+
@classmethod
|
|
263
|
+
def create_from_json(cls, filepath: str) -> BaseModelBackend:
|
|
264
|
+
r"""Creates and returns a base model backend instance
|
|
265
|
+
from a JSON configuration file.
|
|
266
|
+
|
|
267
|
+
Args:
|
|
268
|
+
filepath (str): Path to the JSON file containing model
|
|
269
|
+
configuration.
|
|
270
|
+
|
|
271
|
+
Returns:
|
|
272
|
+
BaseModelBackend: An instance of the model backend based on the
|
|
273
|
+
configuration.
|
|
274
|
+
"""
|
|
275
|
+
|
|
276
|
+
config = cls.__load_json(filepath)
|
|
277
|
+
config["model_platform"] = cls.__parse_model_platform(
|
|
278
|
+
config["model_platform"]
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
model = ModelFactory.create(**config)
|
|
282
|
+
|
|
283
|
+
return model
|