camel-ai 0.2.10__py3-none-any.whl → 0.2.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (76) hide show
  1. camel/__init__.py +6 -1
  2. camel/agents/chat_agent.py +87 -6
  3. camel/agents/deductive_reasoner_agent.py +4 -1
  4. camel/benchmarks/__init__.py +18 -0
  5. camel/benchmarks/base.py +152 -0
  6. camel/benchmarks/gaia.py +478 -0
  7. camel/configs/__init__.py +6 -0
  8. camel/configs/mistral_config.py +0 -3
  9. camel/configs/nvidia_config.py +70 -0
  10. camel/configs/ollama_config.py +4 -2
  11. camel/configs/sglang_config.py +71 -0
  12. camel/configs/vllm_config.py +10 -1
  13. camel/data_collector/__init__.py +19 -0
  14. camel/data_collector/alpaca_collector.py +127 -0
  15. camel/data_collector/base.py +211 -0
  16. camel/data_collector/sharegpt_collector.py +205 -0
  17. camel/datahubs/__init__.py +23 -0
  18. camel/datahubs/base.py +136 -0
  19. camel/datahubs/huggingface.py +433 -0
  20. camel/datahubs/models.py +22 -0
  21. camel/embeddings/vlm_embedding.py +4 -1
  22. camel/interpreters/__init__.py +2 -0
  23. camel/interpreters/docker_interpreter.py +7 -2
  24. camel/interpreters/e2b_interpreter.py +136 -0
  25. camel/interpreters/subprocess_interpreter.py +7 -2
  26. camel/loaders/__init__.py +3 -1
  27. camel/loaders/base_io.py +41 -41
  28. camel/loaders/firecrawl_reader.py +0 -3
  29. camel/logger.py +112 -0
  30. camel/messages/__init__.py +3 -1
  31. camel/messages/base.py +10 -7
  32. camel/messages/conversion/__init__.py +3 -1
  33. camel/messages/conversion/alpaca.py +122 -0
  34. camel/models/__init__.py +7 -0
  35. camel/models/anthropic_model.py +14 -4
  36. camel/models/base_model.py +28 -0
  37. camel/models/groq_model.py +1 -1
  38. camel/models/model_factory.py +6 -0
  39. camel/models/model_manager.py +212 -0
  40. camel/models/nvidia_model.py +141 -0
  41. camel/models/ollama_model.py +12 -0
  42. camel/models/openai_model.py +0 -25
  43. camel/models/reward/__init__.py +22 -0
  44. camel/models/reward/base_reward_model.py +58 -0
  45. camel/models/reward/evaluator.py +63 -0
  46. camel/models/reward/nemotron_model.py +112 -0
  47. camel/models/sglang_model.py +225 -0
  48. camel/models/vllm_model.py +1 -1
  49. camel/personas/persona_hub.py +2 -2
  50. camel/retrievers/vector_retriever.py +22 -5
  51. camel/schemas/openai_converter.py +2 -2
  52. camel/societies/babyagi_playing.py +4 -1
  53. camel/societies/workforce/role_playing_worker.py +2 -2
  54. camel/societies/workforce/single_agent_worker.py +2 -2
  55. camel/societies/workforce/workforce.py +3 -3
  56. camel/storages/object_storages/amazon_s3.py +2 -2
  57. camel/storages/object_storages/azure_blob.py +2 -2
  58. camel/storages/object_storages/google_cloud.py +2 -2
  59. camel/toolkits/__init__.py +5 -0
  60. camel/toolkits/code_execution.py +42 -4
  61. camel/toolkits/function_tool.py +41 -0
  62. camel/toolkits/human_toolkit.py +1 -0
  63. camel/toolkits/math_toolkit.py +47 -16
  64. camel/toolkits/meshy_toolkit.py +185 -0
  65. camel/toolkits/search_toolkit.py +154 -2
  66. camel/toolkits/stripe_toolkit.py +273 -0
  67. camel/toolkits/twitter_toolkit.py +3 -0
  68. camel/types/__init__.py +2 -0
  69. camel/types/enums.py +68 -10
  70. camel/utils/commons.py +22 -5
  71. camel/utils/token_counting.py +26 -11
  72. {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/METADATA +13 -6
  73. {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/RECORD +76 -51
  74. /camel/messages/conversion/{models.py → conversation_models.py} +0 -0
  75. {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/LICENSE +0 -0
  76. {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/WHEEL +0 -0
camel/datahubs/base.py ADDED
@@ -0,0 +1,136 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ from abc import ABC, abstractmethod
15
+ from typing import Any, List
16
+
17
+ from camel.datahubs.models import Record
18
+
19
+
20
+ class BaseDatasetManager(ABC):
21
+ r"""Abstract base class for dataset managers."""
22
+
23
+ @abstractmethod
24
+ def create_dataset(self, name: str, **kwargs: Any) -> str:
25
+ r"""Creates a new dataset.
26
+
27
+ Args:
28
+ name (str): The name of the dataset.
29
+ kwargs (Any): Additional keyword arguments.
30
+
31
+ Returns:
32
+ str: The URL of the created dataset.
33
+ """
34
+ pass
35
+
36
+ @abstractmethod
37
+ def list_datasets(
38
+ self, username: str, limit: int = 100, **kwargs: Any
39
+ ) -> List[str]:
40
+ r"""Lists all datasets for the current user.
41
+
42
+ Args:
43
+ username (str): The username of the user whose datasets to list.
44
+ limit (int): The maximum number of datasets to list.
45
+ (default::obj:`100`)
46
+ kwargs (Any): Additional keyword arguments.
47
+
48
+ Returns:
49
+ List[str]: A list of dataset ids.
50
+ """
51
+ pass
52
+
53
+ @abstractmethod
54
+ def delete_dataset(self, dataset_name: str, **kwargs: Any) -> None:
55
+ r"""Deletes a dataset.
56
+
57
+ Args:
58
+ dataset_name (str): The name of the dataset to delete.
59
+ kwargs (Any): Additional keyword arguments.
60
+ """
61
+ pass
62
+
63
+ @abstractmethod
64
+ def add_records(
65
+ self,
66
+ dataset_name: str,
67
+ records: List[Record],
68
+ filepath: str = "records/records.json",
69
+ **kwargs: Any,
70
+ ) -> None:
71
+ r"""Adds records to a dataset.
72
+
73
+ Args:
74
+ dataset_name (str): The name of the dataset.
75
+ records (List[Record]): A list of records to add to the dataset.
76
+ filepath (str): The path to the file containing the records.
77
+ (default::obj:`"records/records.json"`)
78
+ kwargs (Any): Additional keyword arguments.
79
+ """
80
+ pass
81
+
82
+ @abstractmethod
83
+ def update_records(
84
+ self,
85
+ dataset_name: str,
86
+ records: List[Record],
87
+ filepath: str = "records/records.json",
88
+ **kwargs: Any,
89
+ ) -> None:
90
+ r"""Updates records in a dataset.
91
+
92
+ Args:
93
+ dataset_name (str): The name of the dataset.
94
+ records (List[Record]): A list of records to update in the dataset.
95
+ filepath (str): The path to the file containing the records.
96
+ (default::obj:`"records/records.json"`)
97
+ kwargs (Any): Additional keyword arguments.
98
+ """
99
+ pass
100
+
101
+ @abstractmethod
102
+ def list_records(
103
+ self,
104
+ dataset_name: str,
105
+ filepath: str = "records/records.json",
106
+ **kwargs: Any,
107
+ ) -> List[Record]:
108
+ r"""Lists records in a dataset.
109
+
110
+ Args:
111
+ dataset_name (str): The name of the dataset.
112
+ filepath (str): The path to the file containing the records.
113
+ (default::obj:`"records/records.json"`)
114
+ kwargs (Any): Additional keyword arguments.
115
+ """
116
+ pass
117
+
118
+ # New method for record deletion
119
+ @abstractmethod
120
+ def delete_record(
121
+ self,
122
+ dataset_name: str,
123
+ record_id: str,
124
+ filepath: str = "records/records.json",
125
+ **kwargs: Any,
126
+ ) -> None:
127
+ r"""Deletes a record from the dataset.
128
+
129
+ Args:
130
+ dataset_name (str): The name of the dataset.
131
+ record_id (str): The ID of the record to delete.
132
+ filepath (str): The path to the file containing the records.
133
+ (default::obj:`"records/records.json"`)
134
+ kwargs (Any): Additional keyword arguments.
135
+ """
136
+ pass
@@ -0,0 +1,433 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import json
15
+ import os
16
+ import tempfile
17
+ from typing import Any, List, Optional
18
+
19
+ from camel.datahubs.base import BaseDatasetManager
20
+ from camel.datahubs.models import Record
21
+ from camel.logger import get_logger
22
+ from camel.types import HuggingFaceRepoType
23
+ from camel.utils import api_keys_required, dependencies_required
24
+
25
+ logger = get_logger(__name__)
26
+
27
+
28
+ class HuggingFaceDatasetManager(BaseDatasetManager):
29
+ r"""A dataset manager for Hugging Face datasets. This class provides
30
+ methods to create, add, update, delete, and list records in a dataset on
31
+ the Hugging Face Hub.
32
+
33
+ Args:
34
+ token (str): The Hugging Face API token. If not provided, the token
35
+ will be read from the environment variable `HUGGING_FACE_TOKEN`.
36
+ """
37
+
38
+ @api_keys_required("HUGGING_FACE_TOKEN")
39
+ @dependencies_required('huggingface_hub')
40
+ def __init__(self, token: Optional[str] = None):
41
+ from huggingface_hub import HfApi
42
+
43
+ self._api_key = token or os.getenv("HUGGING_FACE_TOKEN")
44
+ self.api = HfApi(token=self._api_key)
45
+
46
+ def create_dataset_card(
47
+ self,
48
+ dataset_name: str,
49
+ description: str,
50
+ license: Optional[str] = None,
51
+ version: Optional[str] = None,
52
+ tags: Optional[List[str]] = None,
53
+ authors: Optional[List[str]] = None,
54
+ size_category: Optional[List[str]] = None,
55
+ language: Optional[List[str]] = None,
56
+ task_categories: Optional[List[str]] = None,
57
+ content: Optional[str] = None,
58
+ ) -> None:
59
+ r"""Creates and uploads a dataset card to the Hugging Face Hub in YAML
60
+ format.
61
+
62
+ Args:
63
+ dataset_name (str): The name of the dataset.
64
+ description (str): A description of the dataset.
65
+ license (str): The license of the dataset. (default: :obj:`None`)
66
+ version (str): The version of the dataset. (default: :obj:`None`)
67
+ tags (list): A list of tags for the dataset.(default: :obj:`None`)
68
+ authors (list): A list of authors of the dataset. (default:
69
+ :obj:`None`)
70
+ size_category (list): A size category for the dataset. (default:
71
+ :obj:`None`)
72
+ language (list): A list of languages the dataset is in. (default:
73
+ :obj:`None`)
74
+ task_categories (list): A list of task categories. (default:
75
+ :obj:`None`)
76
+ content (str): Custom markdown content that the user wants to add
77
+ to the dataset card. (default: :obj:`None`)
78
+ """
79
+ import yaml
80
+
81
+ metadata = {
82
+ "license": license,
83
+ "authors": authors,
84
+ "task_categories": task_categories,
85
+ "language": language,
86
+ "tags": tags,
87
+ "pretty_name": dataset_name,
88
+ "size_categories": size_category,
89
+ "version": version,
90
+ "description": description,
91
+ }
92
+
93
+ # Remove keys with None values
94
+ metadata = {k: v for k, v in metadata.items() if v}
95
+
96
+ card_content = (
97
+ "---\n"
98
+ + yaml.dump(metadata, default_flow_style=False, allow_unicode=True)
99
+ + "\n---"
100
+ )
101
+
102
+ if content:
103
+ card_content += f"\n\n# Additional Information\n{content}\n"
104
+
105
+ self._upload_file(
106
+ file_content=card_content,
107
+ dataset_name=dataset_name,
108
+ filepath="README.md",
109
+ file_type="md",
110
+ )
111
+
112
+ def create_dataset(
113
+ self, name: str, private: bool = False, **kwargs: Any
114
+ ) -> str:
115
+ r"""Creates a new dataset on the Hugging Face Hub.
116
+
117
+ Args:
118
+ name (str): The name of the dataset.
119
+ private (bool): Whether the dataset should be private. defaults to
120
+ False.
121
+ kwargs (Any): Additional keyword arguments.
122
+
123
+ Returns:
124
+ str: The URL of the created dataset.
125
+ """
126
+ from huggingface_hub.errors import RepositoryNotFoundError
127
+
128
+ try:
129
+ self.api.repo_info(
130
+ repo_id=name,
131
+ repo_type=HuggingFaceRepoType.DATASET.value,
132
+ **kwargs,
133
+ )
134
+ except RepositoryNotFoundError:
135
+ self.api.create_repo(
136
+ repo_id=name,
137
+ repo_type=HuggingFaceRepoType.DATASET.value,
138
+ private=private,
139
+ )
140
+
141
+ return f"https://huggingface.co/datasets/{name}"
142
+
143
+ def list_datasets(
144
+ self, username: str, limit: int = 100, **kwargs: Any
145
+ ) -> List[str]:
146
+ r"""Lists all datasets for the current user.
147
+
148
+ Args:
149
+ username (str): The username of the user whose datasets to list.
150
+ limit (int): The maximum number of datasets to list.
151
+ (default: :obj:`100`)
152
+ kwargs (Any): Additional keyword arguments.
153
+
154
+ Returns:
155
+ List[str]: A list of dataset ids.
156
+ """
157
+ try:
158
+ return [
159
+ dataset.id
160
+ for dataset in self.api.list_datasets(
161
+ author=username, limit=limit, **kwargs
162
+ )
163
+ ]
164
+ except Exception as e:
165
+ logger.error(f"Error listing datasets: {e}")
166
+ return []
167
+
168
+ def delete_dataset(self, dataset_name: str, **kwargs: Any) -> None:
169
+ r"""Deletes a dataset from the Hugging Face Hub.
170
+
171
+ Args:
172
+ dataset_name (str): The name of the dataset to delete.
173
+ kwargs (Any): Additional keyword arguments.
174
+ """
175
+ try:
176
+ self.api.delete_repo(
177
+ repo_id=dataset_name,
178
+ repo_type=HuggingFaceRepoType.DATASET.value,
179
+ **kwargs,
180
+ )
181
+ logger.info(f"Dataset '{dataset_name}' deleted successfully.")
182
+ except Exception as e:
183
+ logger.error(f"Error deleting dataset '{dataset_name}': {e}")
184
+ raise
185
+
186
+ def add_records(
187
+ self,
188
+ dataset_name: str,
189
+ records: List[Record],
190
+ filepath: str = "records/records.json",
191
+ **kwargs: Any,
192
+ ) -> None:
193
+ r"""Adds records to a dataset on the Hugging Face Hub.
194
+
195
+ Args:
196
+ dataset_name (str): The name of the dataset.
197
+ records (List[Record]): A list of records to add to the dataset.
198
+ filepath (str): The path to the file containing the records.
199
+ kwargs (Any): Additional keyword arguments.
200
+
201
+ Raises:
202
+ ValueError: If the dataset already has a records file.
203
+ """
204
+ existing_records = self._download_records(
205
+ dataset_name=dataset_name, filepath=filepath, **kwargs
206
+ )
207
+
208
+ if existing_records:
209
+ raise ValueError(
210
+ f"Dataset '{filepath}' already exists. "
211
+ f"Use `update_records` to modify."
212
+ )
213
+
214
+ self._upload_records(
215
+ records=records,
216
+ dataset_name=dataset_name,
217
+ filepath=filepath,
218
+ **kwargs,
219
+ )
220
+
221
+ def update_records(
222
+ self,
223
+ dataset_name: str,
224
+ records: List[Record],
225
+ filepath: str = "records/records.json",
226
+ **kwargs: Any,
227
+ ) -> None:
228
+ r"""Updates records in a dataset on the Hugging Face Hub.
229
+
230
+ Args:
231
+ dataset_name (str): The name of the dataset.
232
+ records (List[Record]): A list of records to update in the dataset.
233
+ filepath (str): The path to the file containing the records.
234
+ kwargs (Any): Additional keyword arguments.
235
+
236
+ Raises:
237
+ ValueError: If the dataset does not have an existing file to update
238
+ records in.
239
+ """
240
+ existing_records = self._download_records(
241
+ dataset_name=dataset_name, filepath=filepath, **kwargs
242
+ )
243
+
244
+ if not existing_records:
245
+ logger.warning(
246
+ f"Dataset '{dataset_name}' does not have existing "
247
+ "records. Adding new records."
248
+ )
249
+ self._upload_records(
250
+ records=records,
251
+ dataset_name=dataset_name,
252
+ filepath=filepath,
253
+ **kwargs,
254
+ )
255
+ return
256
+
257
+ old_dict = {record.id: record for record in existing_records}
258
+ new_dict = {record.id: record for record in records}
259
+ merged_dict = old_dict.copy()
260
+ merged_dict.update(new_dict)
261
+
262
+ self._upload_records(
263
+ records=list(merged_dict.values()),
264
+ dataset_name=dataset_name,
265
+ filepath=filepath,
266
+ **kwargs,
267
+ )
268
+
269
+ def delete_record(
270
+ self,
271
+ dataset_name: str,
272
+ record_id: str,
273
+ filepath: str = "records/records.json",
274
+ **kwargs: Any,
275
+ ) -> None:
276
+ r"""Deletes a record from the dataset.
277
+
278
+ Args:
279
+ dataset_name (str): The name of the dataset.
280
+ record_id (str): The ID of the record to delete.
281
+ filepath (str): The path to the file containing the records.
282
+ kwargs (Any): Additional keyword arguments.
283
+
284
+ Raises:
285
+ ValueError: If the dataset does not have an existing file to delete
286
+ records from.
287
+ """
288
+ existing_records = self._download_records(
289
+ dataset_name=dataset_name, filepath=filepath, **kwargs
290
+ )
291
+
292
+ if not existing_records:
293
+ raise ValueError(
294
+ f"Dataset '{dataset_name}' does not have an existing file to "
295
+ f"delete records from."
296
+ )
297
+
298
+ filtered_records = [
299
+ record for record in existing_records if record.id != record_id
300
+ ]
301
+
302
+ self._upload_records(
303
+ records=filtered_records,
304
+ dataset_name=dataset_name,
305
+ filepath=filepath,
306
+ **kwargs,
307
+ )
308
+
309
+ def list_records(
310
+ self,
311
+ dataset_name: str,
312
+ filepath: str = "records/records.json",
313
+ **kwargs: Any,
314
+ ) -> List[Record]:
315
+ r"""Lists all records in a dataset.
316
+
317
+ Args:
318
+ dataset_name (str): The name of the dataset.
319
+ filepath (str): The path to the file containing the records.
320
+ kwargs (Any): Additional keyword arguments.
321
+
322
+ Returns:
323
+ List[Record]: A list of records in the dataset.
324
+ """
325
+ return self._download_records(
326
+ dataset_name=dataset_name, filepath=filepath, **kwargs
327
+ )
328
+
329
+ def _download_records(
330
+ self, dataset_name: str, filepath: str, **kwargs: Any
331
+ ) -> List[Record]:
332
+ from huggingface_hub import hf_hub_download
333
+ from huggingface_hub.errors import EntryNotFoundError
334
+
335
+ try:
336
+ downloaded_file_path = hf_hub_download(
337
+ repo_id=dataset_name,
338
+ filename=filepath,
339
+ repo_type=HuggingFaceRepoType.DATASET.value,
340
+ token=self._api_key,
341
+ **kwargs,
342
+ )
343
+
344
+ with open(downloaded_file_path, "r") as f:
345
+ records_data = json.load(f)
346
+
347
+ return [Record(**record) for record in records_data]
348
+ except EntryNotFoundError:
349
+ logger.info(f"No records found for dataset '{dataset_name}'.")
350
+ return []
351
+ except Exception as e:
352
+ logger.error(f"Error downloading or processing records: {e}")
353
+ raise e
354
+
355
+ def _upload_records(
356
+ self,
357
+ records: List[Record],
358
+ dataset_name: str,
359
+ filepath: str,
360
+ **kwargs: Any,
361
+ ):
362
+ with tempfile.NamedTemporaryFile(
363
+ delete=False, mode="w", newline="", encoding="utf-8"
364
+ ) as f:
365
+ json.dump([record.model_dump() for record in records], f)
366
+ temp_file_path = f.name
367
+
368
+ try:
369
+ self.api.upload_file(
370
+ path_or_fileobj=temp_file_path,
371
+ path_in_repo=filepath,
372
+ repo_id=dataset_name,
373
+ repo_type=HuggingFaceRepoType.DATASET.value,
374
+ **kwargs,
375
+ )
376
+ except Exception as e:
377
+ logger.error(f"Error uploading records file: {e}")
378
+ raise
379
+ finally:
380
+ if os.path.exists(temp_file_path):
381
+ os.remove(temp_file_path)
382
+
383
+ def _upload_file(
384
+ self,
385
+ file_content: str,
386
+ dataset_name: str,
387
+ filepath: str,
388
+ file_type: str = "json",
389
+ **kwargs: Any,
390
+ ):
391
+ with tempfile.NamedTemporaryFile(
392
+ mode="w", delete=False, suffix=f".{file_type}"
393
+ ) as f:
394
+ if file_type == "json":
395
+ if isinstance(file_content, str):
396
+ try:
397
+ json_content = json.loads(file_content)
398
+ except json.JSONDecodeError:
399
+ raise ValueError(
400
+ "Invalid JSON string provided for file_content."
401
+ )
402
+ else:
403
+ try:
404
+ json.dumps(file_content)
405
+ json_content = file_content
406
+ except (TypeError, ValueError):
407
+ raise ValueError(
408
+ "file_content is not JSON serializable."
409
+ )
410
+
411
+ json.dump(json_content, f)
412
+ elif file_type == "md" or file_type == "txt":
413
+ f.write(file_content)
414
+ else:
415
+ raise ValueError(f"Unsupported file type: {file_type}")
416
+
417
+ temp_file_path = f.name
418
+
419
+ try:
420
+ self.api.upload_file(
421
+ path_or_fileobj=temp_file_path,
422
+ path_in_repo=filepath,
423
+ repo_id=dataset_name,
424
+ repo_type=HuggingFaceRepoType.DATASET.value,
425
+ **kwargs,
426
+ )
427
+ logger.info(f"File uploaded successfully: {filepath}")
428
+ except Exception as e:
429
+ logger.error(f"Error uploading file: {e}")
430
+ raise
431
+
432
+ if os.path.exists(temp_file_path):
433
+ os.remove(temp_file_path)
@@ -0,0 +1,22 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ from typing import Any, Dict, Optional
15
+
16
+ from pydantic import BaseModel
17
+
18
+
19
+ class Record(BaseModel):
20
+ id: Optional[str] = None
21
+ metadata: Optional[Dict[str, Any]] = None
22
+ content: Dict[str, Any]
@@ -16,6 +16,9 @@ from typing import Any, List, Optional, Union
16
16
  from PIL import Image
17
17
 
18
18
  from camel.embeddings import BaseEmbedding
19
+ from camel.logger import get_logger
20
+
21
+ logger = get_logger(__name__)
19
22
 
20
23
 
21
24
  class VisionLanguageEmbedding(BaseEmbedding[Union[str, Image.Image]]):
@@ -60,7 +63,7 @@ class VisionLanguageEmbedding(BaseEmbedding[Union[str, Image.Image]]):
60
63
  "interpolate_pos_encoding",
61
64
  ]
62
65
  except Exception:
63
- print("Warning: not typically processor and model structure")
66
+ logger.warning("not typically processor and model structure")
64
67
  pass
65
68
  self.dim: Optional[int] = None
66
69
 
@@ -14,6 +14,7 @@
14
14
 
15
15
  from .base import BaseInterpreter
16
16
  from .docker_interpreter import DockerInterpreter
17
+ from .e2b_interpreter import E2BInterpreter
17
18
  from .internal_python_interpreter import InternalPythonInterpreter
18
19
  from .interpreter_error import InterpreterError
19
20
  from .ipython_interpreter import JupyterKernelInterpreter
@@ -26,4 +27,5 @@ __all__ = [
26
27
  'SubprocessInterpreter',
27
28
  'DockerInterpreter',
28
29
  'JupyterKernelInterpreter',
30
+ 'E2BInterpreter',
29
31
  ]
@@ -23,11 +23,14 @@ from colorama import Fore
23
23
 
24
24
  from camel.interpreters.base import BaseInterpreter
25
25
  from camel.interpreters.interpreter_error import InterpreterError
26
+ from camel.logger import get_logger
26
27
  from camel.utils import is_docker_running
27
28
 
28
29
  if TYPE_CHECKING:
29
30
  from docker.models.containers import Container
30
31
 
32
+ logger = get_logger(__name__)
33
+
31
34
 
32
35
  class DockerInterpreter(BaseInterpreter):
33
36
  r"""A class for executing code files or code strings in a docker container.
@@ -187,8 +190,10 @@ class DockerInterpreter(BaseInterpreter):
187
190
 
188
191
  # Print code for security checking
189
192
  if self.require_confirm:
190
- print(f"The following {code_type} code will run in container:")
191
- print(Fore.CYAN + code + Fore.RESET)
193
+ logger.info(
194
+ f"The following {code_type} code will run on your "
195
+ "computer: {code}"
196
+ )
192
197
  while True:
193
198
  choice = input("Running code? [Y/n]:").lower()
194
199
  if choice in ["y", "yes", "ye", ""]: