camel-ai 0.1.5.3__py3-none-any.whl → 0.1.5.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (51) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/knowledge_graph_agent.py +4 -1
  3. camel/configs/__init__.py +6 -0
  4. camel/configs/litellm_config.py +8 -18
  5. camel/configs/ollama_config.py +85 -0
  6. camel/configs/zhipuai_config.py +78 -0
  7. camel/embeddings/base.py +10 -9
  8. camel/embeddings/openai_embedding.py +27 -14
  9. camel/embeddings/sentence_transformers_embeddings.py +28 -14
  10. camel/functions/search_functions.py +5 -14
  11. camel/functions/slack_functions.py +5 -7
  12. camel/functions/twitter_function.py +3 -8
  13. camel/functions/weather_functions.py +3 -8
  14. camel/interpreters/__init__.py +2 -0
  15. camel/interpreters/docker_interpreter.py +235 -0
  16. camel/loaders/__init__.py +2 -0
  17. camel/loaders/base_io.py +5 -9
  18. camel/loaders/jina_url_reader.py +99 -0
  19. camel/loaders/unstructured_io.py +4 -6
  20. camel/models/anthropic_model.py +6 -4
  21. camel/models/litellm_model.py +49 -21
  22. camel/models/model_factory.py +1 -0
  23. camel/models/nemotron_model.py +14 -6
  24. camel/models/ollama_model.py +11 -17
  25. camel/models/openai_audio_models.py +10 -2
  26. camel/models/openai_model.py +4 -3
  27. camel/models/zhipuai_model.py +12 -6
  28. camel/retrievers/auto_retriever.py +2 -2
  29. camel/retrievers/bm25_retriever.py +3 -8
  30. camel/retrievers/cohere_rerank_retriever.py +3 -5
  31. camel/storages/__init__.py +2 -0
  32. camel/storages/graph_storages/graph_element.py +9 -1
  33. camel/storages/graph_storages/neo4j_graph.py +3 -7
  34. camel/storages/key_value_storages/__init__.py +2 -0
  35. camel/storages/key_value_storages/redis.py +169 -0
  36. camel/storages/vectordb_storages/milvus.py +3 -7
  37. camel/storages/vectordb_storages/qdrant.py +3 -7
  38. camel/toolkits/__init__.py +2 -0
  39. camel/toolkits/code_execution.py +69 -0
  40. camel/toolkits/github_toolkit.py +5 -9
  41. camel/types/enums.py +49 -20
  42. camel/utils/__init__.py +2 -2
  43. camel/utils/async_func.py +42 -0
  44. camel/utils/commons.py +31 -49
  45. camel/utils/token_counting.py +40 -1
  46. {camel_ai-0.1.5.3.dist-info → camel_ai-0.1.5.5.dist-info}/METADATA +16 -8
  47. {camel_ai-0.1.5.3.dist-info → camel_ai-0.1.5.5.dist-info}/RECORD +48 -44
  48. camel/bots/__init__.py +0 -20
  49. camel/bots/discord_bot.py +0 -103
  50. camel/bots/telegram_bot.py +0 -84
  51. {camel_ai-0.1.5.3.dist-info → camel_ai-0.1.5.5.dist-info}/WHEEL +0 -0
@@ -18,8 +18,8 @@ from datetime import datetime, timedelta
18
18
  from typing import List, Optional
19
19
 
20
20
  from camel.functions import OpenAIFunction
21
-
22
- from .base import BaseToolkit
21
+ from camel.toolkits.base import BaseToolkit
22
+ from camel.utils import dependencies_required
23
23
 
24
24
 
25
25
  @dataclass
@@ -130,6 +130,7 @@ class GithubToolkit(BaseToolkit):
130
130
  `get_github_access_token` method.
131
131
  """
132
132
 
133
+ @dependencies_required('github')
133
134
  def __init__(
134
135
  self, repo_name: str, access_token: Optional[str] = None
135
136
  ) -> None:
@@ -144,13 +145,8 @@ class GithubToolkit(BaseToolkit):
144
145
  if access_token is None:
145
146
  access_token = self.get_github_access_token()
146
147
 
147
- try:
148
- from github import Auth, Github
149
- except ImportError:
150
- raise ImportError(
151
- "Please install `github` first. You can install it by running "
152
- "`pip install pygithub`."
153
- )
148
+ from github import Auth, Github
149
+
154
150
  self.github = Github(auth=Auth.Token(access_token))
155
151
  self.repo = self.github.get_repo(repo_name)
156
152
 
camel/types/enums.py CHANGED
@@ -30,15 +30,19 @@ class ModelType(Enum):
30
30
  GPT_4_TURBO = "gpt-4-turbo"
31
31
  GPT_4O = "gpt-4o"
32
32
  GLM_4 = "glm-4"
33
+ GLM_4_OPEN_SOURCE = "glm-4-open-source"
33
34
  GLM_4V = 'glm-4v'
34
35
  GLM_3_TURBO = "glm-3-turbo"
35
36
 
36
37
  STUB = "stub"
37
38
 
38
39
  LLAMA_2 = "llama-2"
40
+ LLAMA_3 = "llama-3"
39
41
  VICUNA = "vicuna"
40
42
  VICUNA_16K = "vicuna-16k"
41
43
 
44
+ QWEN_2 = "qwen-2"
45
+
42
46
  # Legacy anthropic models
43
47
  # NOTE: anthropic lagecy models only Claude 2.1 has system prompt support
44
48
  CLAUDE_2_1 = "claude-2.1"
@@ -49,6 +53,7 @@ class ModelType(Enum):
49
53
  CLAUDE_3_OPUS = "claude-3-opus-20240229"
50
54
  CLAUDE_3_SONNET = "claude-3-sonnet-20240229"
51
55
  CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
56
+ CLAUDE_3_5_SONNET = "claude-3-5-sonnet-20240620"
52
57
 
53
58
  # Nvidia models
54
59
  NEMOTRON_4_REWARD = "nvidia/nemotron-4-340b-reward"
@@ -86,6 +91,9 @@ class ModelType(Enum):
86
91
  r"""Returns whether this type of models is open-source."""
87
92
  return self in {
88
93
  ModelType.LLAMA_2,
94
+ ModelType.LLAMA_3,
95
+ ModelType.QWEN_2,
96
+ ModelType.GLM_4_OPEN_SOURCE,
89
97
  ModelType.VICUNA,
90
98
  ModelType.VICUNA_16K,
91
99
  }
@@ -104,6 +112,7 @@ class ModelType(Enum):
104
112
  ModelType.CLAUDE_3_OPUS,
105
113
  ModelType.CLAUDE_3_SONNET,
106
114
  ModelType.CLAUDE_3_HAIKU,
115
+ ModelType.CLAUDE_3_5_SONNET,
107
116
  }
108
117
 
109
118
  @property
@@ -133,7 +142,7 @@ class ModelType(Enum):
133
142
  return 128000
134
143
  elif self is ModelType.GPT_4O:
135
144
  return 128000
136
- elif self == ModelType.GLM_4:
145
+ elif self == ModelType.GLM_4_OPEN_SOURCE:
137
146
  return 8192
138
147
  elif self == ModelType.GLM_3_TURBO:
139
148
  return 8192
@@ -143,6 +152,12 @@ class ModelType(Enum):
143
152
  return 4096
144
153
  elif self is ModelType.LLAMA_2:
145
154
  return 4096
155
+ elif self is ModelType.LLAMA_3:
156
+ return 8192
157
+ elif self is ModelType.QWEN_2:
158
+ return 128000
159
+ elif self is ModelType.GLM_4:
160
+ return 8192
146
161
  elif self is ModelType.VICUNA:
147
162
  # reference: https://lmsys.org/blog/2023-03-30-vicuna/
148
163
  return 2048
@@ -155,6 +170,7 @@ class ModelType(Enum):
155
170
  ModelType.CLAUDE_3_OPUS,
156
171
  ModelType.CLAUDE_3_SONNET,
157
172
  ModelType.CLAUDE_3_HAIKU,
173
+ ModelType.CLAUDE_3_5_SONNET,
158
174
  }:
159
175
  return 200_000
160
176
  elif self is ModelType.NEMOTRON_4_REWARD:
@@ -181,40 +197,46 @@ class ModelType(Enum):
181
197
  self.value in model_name.lower()
182
198
  or "llama2" in model_name.lower()
183
199
  )
200
+ elif self is ModelType.LLAMA_3:
201
+ return (
202
+ self.value in model_name.lower()
203
+ or "llama3" in model_name.lower()
204
+ )
205
+ elif self is ModelType.QWEN_2:
206
+ return (
207
+ self.value in model_name.lower()
208
+ or "qwen2" in model_name.lower()
209
+ )
210
+ elif self is ModelType.GLM_4_OPEN_SOURCE:
211
+ return (
212
+ 'glm-4' in model_name.lower() or "glm4" in model_name.lower()
213
+ )
184
214
  else:
185
215
  return self.value in model_name.lower()
186
216
 
187
217
 
188
218
  class EmbeddingModelType(Enum):
189
- ADA_2 = "text-embedding-ada-002"
190
- ADA_1 = "text-embedding-ada-001"
191
- BABBAGE_1 = "text-embedding-babbage-001"
192
- CURIE_1 = "text-embedding-curie-001"
193
- DAVINCI_1 = "text-embedding-davinci-001"
219
+ TEXT_EMBEDDING_ADA_2 = "text-embedding-ada-002"
220
+ TEXT_EMBEDDING_3_SMALL = "text-embedding-3-small"
221
+ TEXT_EMBEDDING_3_LARGE = "text-embedding-3-large"
194
222
 
195
223
  @property
196
224
  def is_openai(self) -> bool:
197
225
  r"""Returns whether this type of models is an OpenAI-released model."""
198
226
  return self in {
199
- EmbeddingModelType.ADA_2,
200
- EmbeddingModelType.ADA_1,
201
- EmbeddingModelType.BABBAGE_1,
202
- EmbeddingModelType.CURIE_1,
203
- EmbeddingModelType.DAVINCI_1,
227
+ EmbeddingModelType.TEXT_EMBEDDING_ADA_2,
228
+ EmbeddingModelType.TEXT_EMBEDDING_3_SMALL,
229
+ EmbeddingModelType.TEXT_EMBEDDING_3_LARGE,
204
230
  }
205
231
 
206
232
  @property
207
233
  def output_dim(self) -> int:
208
- if self is EmbeddingModelType.ADA_2:
234
+ if self is EmbeddingModelType.TEXT_EMBEDDING_ADA_2:
209
235
  return 1536
210
- elif self is EmbeddingModelType.ADA_1:
211
- return 1024
212
- elif self is EmbeddingModelType.BABBAGE_1:
213
- return 2048
214
- elif self is EmbeddingModelType.CURIE_1:
215
- return 4096
216
- elif self is EmbeddingModelType.DAVINCI_1:
217
- return 12288
236
+ elif self is EmbeddingModelType.TEXT_EMBEDDING_3_SMALL:
237
+ return 1536
238
+ elif self is EmbeddingModelType.TEXT_EMBEDDING_3_LARGE:
239
+ return 3072
218
240
  else:
219
241
  raise ValueError(f"Unknown model type {self}.")
220
242
 
@@ -379,3 +401,10 @@ class VoiceType(Enum):
379
401
  VoiceType.NOVA,
380
402
  VoiceType.SHIMMER,
381
403
  }
404
+
405
+
406
+ class JinaReturnFormat(Enum):
407
+ DEFAULT = None
408
+ MARKDOWN = "markdown"
409
+ HTML = "html"
410
+ TEXT = "text"
camel/utils/__init__.py CHANGED
@@ -23,7 +23,7 @@ from .commons import (
23
23
  get_prompt_template_key_words,
24
24
  get_system_information,
25
25
  get_task_list,
26
- model_api_key_required,
26
+ is_docker_running,
27
27
  print_text_animated,
28
28
  text_extract_from_web,
29
29
  to_pascal,
@@ -39,7 +39,6 @@ from .token_counting import (
39
39
  )
40
40
 
41
41
  __all__ = [
42
- 'model_api_key_required',
43
42
  'print_text_animated',
44
43
  'get_prompt_template_key_words',
45
44
  'get_first_int',
@@ -60,4 +59,5 @@ __all__ = [
60
59
  'create_chunks',
61
60
  'dependencies_required',
62
61
  'api_keys_required',
62
+ 'is_docker_running',
63
63
  ]
@@ -0,0 +1,42 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ import asyncio
15
+ from copy import deepcopy
16
+
17
+ from camel.functions.openai_function import OpenAIFunction
18
+
19
+
20
+ def sync_funcs_to_async(funcs: list[OpenAIFunction]) -> list[OpenAIFunction]:
21
+ r"""Convert a list of Python synchronous functions to Python
22
+ asynchronous functions.
23
+
24
+ Args:
25
+ funcs (list[OpenAIFunction]): List of Python synchronous
26
+ functions in the :obj:`OpenAIFunction` format.
27
+
28
+ Returns:
29
+ list[OpenAIFunction]: List of Python asynchronous functions
30
+ in the :obj:`OpenAIFunction` format.
31
+ """
32
+ async_funcs = []
33
+ for func in funcs:
34
+ sync_func = func.func
35
+
36
+ def async_callable(*args, **kwargs):
37
+ return asyncio.to_thread(sync_func, *args, **kwargs) # noqa: B023
38
+
39
+ async_funcs.append(
40
+ OpenAIFunction(async_callable, deepcopy(func.openai_tool_schema))
41
+ )
42
+ return async_funcs
camel/utils/commons.py CHANGED
@@ -16,6 +16,7 @@ import os
16
16
  import platform
17
17
  import re
18
18
  import socket
19
+ import subprocess
19
20
  import time
20
21
  import zipfile
21
22
  from functools import wraps
@@ -30,48 +31,6 @@ from camel.types import TaskType
30
31
  F = TypeVar('F', bound=Callable[..., Any])
31
32
 
32
33
 
33
- def model_api_key_required(func: F) -> F:
34
- r"""Decorator that checks if the API key is available either as an
35
- environment variable or passed directly for a model.
36
-
37
- Args:
38
- func (callable): The function to be wrapped.
39
-
40
- Returns:
41
- callable: The decorated function.
42
-
43
- Raises:
44
- ValueError: If the API key is not found, either as an environment
45
- variable or directly passed.
46
-
47
- Note:
48
- Supported model type: `OpenAI` and `Anthropic`.
49
- """
50
-
51
- @wraps(func)
52
- def wrapper(self, *args, **kwargs):
53
- if self.model_type.is_openai:
54
- if not self._api_key and 'OPENAI_API_KEY' not in os.environ:
55
- raise ValueError('OpenAI API key not found.')
56
- return func(self, *args, **kwargs)
57
- elif self.model_type.is_zhipuai:
58
- if 'ZHIPUAI_API_KEY' not in os.environ:
59
- raise ValueError('ZhiPuAI API key not found.')
60
- return func(self, *args, **kwargs)
61
- elif self.model_type.is_anthropic:
62
- if not self._api_key and 'ANTHROPIC_API_KEY' not in os.environ:
63
- raise ValueError('Anthropic API key not found.')
64
- return func(self, *args, **kwargs)
65
- elif self.model_type.is_nvidia:
66
- if not self._api_key and 'NVIDIA_API_KEY' not in os.environ:
67
- raise ValueError('NVIDIA API key not found.')
68
- return func(self, *args, **kwargs)
69
- else:
70
- raise ValueError('Unsupported model type.')
71
-
72
- return cast(F, wrapper)
73
-
74
-
75
34
  def print_text_animated(text, delay: float = 0.02, end: str = ""):
76
35
  r"""Prints the given text with an animated effect.
77
36
 
@@ -260,7 +219,7 @@ def is_module_available(module_name: str) -> bool:
260
219
 
261
220
  def api_keys_required(*required_keys: str) -> Callable[[F], F]:
262
221
  r"""A decorator to check if the required API keys are
263
- present in the environment variables.
222
+ presented in the environment variables or as an instance attribute.
264
223
 
265
224
  Args:
266
225
  required_keys (str): The required API keys to be checked.
@@ -271,7 +230,7 @@ def api_keys_required(*required_keys: str) -> Callable[[F], F]:
271
230
 
272
231
  Raises:
273
232
  ValueError: If any of the required API keys are missing in the
274
- environment variables.
233
+ environment variables and the instance attribute.
275
234
 
276
235
  Example:
277
236
  ::
@@ -283,13 +242,18 @@ def api_keys_required(*required_keys: str) -> Callable[[F], F]:
283
242
 
284
243
  def decorator(func: F) -> F:
285
244
  @wraps(func)
286
- def wrapper(*args: Any, **kwargs: Any) -> Any:
287
- missing_keys = [k for k in required_keys if k not in os.environ]
288
- if missing_keys:
245
+ def wrapper(self, *args: Any, **kwargs: Any) -> Any:
246
+ missing_environment_keys = [
247
+ k for k in required_keys if k not in os.environ
248
+ ]
249
+ if (
250
+ not getattr(self, '_api_key', None)
251
+ and missing_environment_keys
252
+ ):
289
253
  raise ValueError(
290
- f"Missing API keys: {', '.join(missing_keys)}"
254
+ f"Missing API keys: {', '.join(missing_environment_keys)}"
291
255
  )
292
- return func(*args, **kwargs)
256
+ return func(self, *args, **kwargs)
293
257
 
294
258
  return cast(F, wrapper)
295
259
 
@@ -400,3 +364,21 @@ def create_chunks(text: str, n: int) -> List[str]:
400
364
  chunks.append(text[i:j])
401
365
  i = j
402
366
  return chunks
367
+
368
+
369
+ def is_docker_running() -> bool:
370
+ r"""Check if the Docker daemon is running.
371
+
372
+ Returns:
373
+ bool: True if the Docker daemon is running, False otherwise.
374
+ """
375
+ try:
376
+ result = subprocess.run(
377
+ ["docker", "info"],
378
+ check=True,
379
+ stdout=subprocess.PIPE,
380
+ stderr=subprocess.PIPE,
381
+ )
382
+ return result.returncode == 0
383
+ except (subprocess.CalledProcessError, FileNotFoundError):
384
+ return False
@@ -51,7 +51,7 @@ def messages_to_prompt(messages: List[OpenAIMessage], model: ModelType) -> str:
51
51
  system_message = messages[0]["content"]
52
52
 
53
53
  ret: str
54
- if model == ModelType.LLAMA_2:
54
+ if model == ModelType.LLAMA_2 or model == ModelType.LLAMA_3:
55
55
  # reference: https://github.com/facebookresearch/llama/blob/cfc3fc8c1968d390eb830e65c63865e980873a06/llama/generation.py#L212
56
56
  seps = [" ", " </s><s>"]
57
57
  role_map = {"user": "[INST]", "assistant": "[/INST]"}
@@ -93,6 +93,45 @@ def messages_to_prompt(messages: List[OpenAIMessage], model: ModelType) -> str:
93
93
  else:
94
94
  ret += role + ":"
95
95
  return ret
96
+ elif model == ModelType.GLM_4_OPEN_SOURCE:
97
+ system_prompt = f"[gMASK]<sop><|system|>\n{system_message}"
98
+ ret = system_prompt
99
+ for msg in messages[1:]:
100
+ role = msg["role"]
101
+ content = msg["content"]
102
+ if not isinstance(content, str):
103
+ raise ValueError(
104
+ "Currently multimodal context is not "
105
+ "supported by the token counter."
106
+ )
107
+ if content:
108
+ ret += "<|" + role + "|>" + "\n" + content
109
+ else:
110
+ ret += "<|" + role + "|>" + "\n"
111
+ return ret
112
+ elif model == ModelType.QWEN_2:
113
+ system_prompt = f"<|im_start|>system\n{system_message}<|im_end|>"
114
+ ret = system_prompt + "\n"
115
+ for msg in messages[1:]:
116
+ role = msg["role"]
117
+ content = msg["content"]
118
+ if not isinstance(content, str):
119
+ raise ValueError(
120
+ "Currently multimodal context is not "
121
+ "supported by the token counter."
122
+ )
123
+ if content:
124
+ ret += (
125
+ '<|im_start|>'
126
+ + role
127
+ + '\n'
128
+ + content
129
+ + '<|im_end|>'
130
+ + '\n'
131
+ )
132
+ else:
133
+ ret += '<|im_start|>' + role + '\n'
134
+ return ret
96
135
  else:
97
136
  raise ValueError(f"Invalid model type: {model}")
98
137
 
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: camel-ai
3
- Version: 0.1.5.3
3
+ Version: 0.1.5.5
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Home-page: https://www.camel-ai.org/
6
6
  License: Apache-2.0
7
7
  Keywords: communicative-ai,ai-societies,artificial-intelligence,deep-learning,multi-agent-systems,cooperative-ai,natural-language-processing,large-language-models
8
8
  Author: CAMEL-AI.org
9
- Requires-Python: >=3.8.1,<3.12
9
+ Requires-Python: >=3.9.0,<3.12
10
10
  Classifier: License :: OSI Approved :: Apache Software License
11
11
  Classifier: Programming Language :: Python :: 3
12
12
  Classifier: Programming Language :: Python :: 3.9
@@ -16,6 +16,7 @@ Provides-Extra: all
16
16
  Provides-Extra: encoders
17
17
  Provides-Extra: graph-storages
18
18
  Provides-Extra: huggingface-agent
19
+ Provides-Extra: kv-stroages
19
20
  Provides-Extra: model-platforms
20
21
  Provides-Extra: retrievers
21
22
  Provides-Extra: test
@@ -23,7 +24,7 @@ Provides-Extra: tools
23
24
  Provides-Extra: vector-databases
24
25
  Requires-Dist: PyMuPDF (>=1.22.5,<2.0.0) ; extra == "tools" or extra == "all"
25
26
  Requires-Dist: accelerate (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
26
- Requires-Dist: anthropic (>=0.28.0,<0.29.0)
27
+ Requires-Dist: anthropic (>=0.29.0,<0.30.0)
27
28
  Requires-Dist: beautifulsoup4 (>=4,<5) ; extra == "tools" or extra == "all"
28
29
  Requires-Dist: cohere (>=4.56,<5.0) ; extra == "retrievers" or extra == "all"
29
30
  Requires-Dist: colorama (>=0,<1)
@@ -31,13 +32,15 @@ Requires-Dist: curl_cffi (==0.6.2)
31
32
  Requires-Dist: datasets (>=2,<3) ; extra == "huggingface-agent" or extra == "all"
32
33
  Requires-Dist: diffusers (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
33
34
  Requires-Dist: discord.py (>=2.3.2,<3.0.0) ; extra == "tools" or extra == "all"
35
+ Requires-Dist: docker (>=7.1.0,<8.0.0) ; extra == "tools" or extra == "all"
34
36
  Requires-Dist: docstring-parser (>=0.15,<0.16)
35
37
  Requires-Dist: docx2txt (>=0.8,<0.9) ; extra == "tools" or extra == "all"
36
38
  Requires-Dist: duckduckgo-search (>=6.1.0,<7.0.0) ; extra == "tools" or extra == "all"
37
39
  Requires-Dist: googlemaps (>=4.10.0,<5.0.0) ; extra == "tools" or extra == "all"
38
- Requires-Dist: imageio (>=2.34.1,<3.0.0) ; extra == "tools" or extra == "all"
40
+ Requires-Dist: imageio[pyav] (>=2.34.2,<3.0.0) ; extra == "tools" or extra == "all"
39
41
  Requires-Dist: jsonschema (>=4,<5)
40
42
  Requires-Dist: litellm (>=1.38.1,<2.0.0) ; extra == "model-platforms" or extra == "all"
43
+ Requires-Dist: milvus-lite (>=2.4.0,<=2.4.7)
41
44
  Requires-Dist: mock (>=5,<6) ; extra == "test"
42
45
  Requires-Dist: neo4j (>=5.18.0,<6.0.0) ; extra == "graph-storages" or extra == "all"
43
46
  Requires-Dist: newspaper3k (>=0.2.8,<0.3.0) ; extra == "tools" or extra == "all"
@@ -59,13 +62,14 @@ Requires-Dist: pytest (>=7,<8) ; extra == "test"
59
62
  Requires-Dist: pytest-asyncio (>=0.23.0,<0.24.0) ; extra == "test"
60
63
  Requires-Dist: qdrant-client (>=1.9.0,<2.0.0) ; extra == "vector-databases" or extra == "all"
61
64
  Requires-Dist: rank-bm25 (>=0.2.2,<0.3.0) ; extra == "retrievers" or extra == "all"
65
+ Requires-Dist: redis (>=5.0.6,<6.0.0) ; extra == "kv-stroages" or extra == "all"
62
66
  Requires-Dist: requests_oauthlib (>=1.3.1,<2.0.0) ; extra == "tools" or extra == "all"
63
- Requires-Dist: sentence-transformers (>=2.2.2,<3.0.0) ; extra == "encoders" or extra == "all"
67
+ Requires-Dist: sentence-transformers (>=3.0.1,<4.0.0) ; extra == "encoders" or extra == "all"
64
68
  Requires-Dist: sentencepiece (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
65
69
  Requires-Dist: slack-sdk (>=3.27.2,<4.0.0) ; extra == "tools" or extra == "all"
66
70
  Requires-Dist: soundfile (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
67
71
  Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
68
- Requires-Dist: torch (>=1,<2) ; extra == "huggingface-agent" or extra == "all"
72
+ Requires-Dist: torch (>=2,<3) ; extra == "huggingface-agent" or extra == "all"
69
73
  Requires-Dist: transformers (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
70
74
  Requires-Dist: unstructured[all-docs] (>=0.10.30,<0.11.0) ; extra == "tools" or extra == "all"
71
75
  Requires-Dist: wikipedia (>=1,<2) ; extra == "tools" or extra == "all"
@@ -180,13 +184,13 @@ exit
180
184
  Install `CAMEL` from source with conda and pip:
181
185
  ```sh
182
186
  # Create a conda virtual environment
183
- conda create --name camel python=3.10
187
+ conda create --name camel python=3.9
184
188
 
185
189
  # Activate CAMEL conda environment
186
190
  conda activate camel
187
191
 
188
192
  # Clone github repo
189
- git clone -b v0.1.5.3 https://github.com/camel-ai/camel.git
193
+ git clone -b v0.1.5.5 https://github.com/camel-ai/camel.git
190
194
 
191
195
  # Change directory into project directory
192
196
  cd camel
@@ -198,6 +202,10 @@ pip install -e .
198
202
  pip install -e .[all] # (Optional)
199
203
  ```
200
204
 
205
+ ### From Docker
206
+
207
+ Detailed guidance can be find [here](https://github.com/camel-ai/camel/blob/master/.container/README.md)
208
+
201
209
  ## Documentation
202
210
 
203
211
  [CAMEL package documentation pages](https://camel-ai.github.io/camel/).