camel-ai 0.1.5.5__py3-none-any.whl → 0.1.5.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +3 -3
- camel/agents/critic_agent.py +1 -1
- camel/agents/deductive_reasoner_agent.py +4 -4
- camel/agents/embodied_agent.py +1 -1
- camel/agents/knowledge_graph_agent.py +13 -17
- camel/agents/role_assignment_agent.py +1 -1
- camel/agents/search_agent.py +4 -5
- camel/agents/task_agent.py +5 -6
- camel/configs/__init__.py +15 -0
- camel/configs/gemini_config.py +98 -0
- camel/configs/groq_config.py +119 -0
- camel/configs/litellm_config.py +1 -1
- camel/configs/mistral_config.py +81 -0
- camel/configs/ollama_config.py +1 -1
- camel/configs/openai_config.py +1 -1
- camel/configs/vllm_config.py +103 -0
- camel/configs/zhipuai_config.py +1 -1
- camel/embeddings/__init__.py +2 -0
- camel/embeddings/mistral_embedding.py +89 -0
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/ipython_interpreter.py +167 -0
- camel/models/__init__.py +10 -0
- camel/models/anthropic_model.py +7 -2
- camel/models/azure_openai_model.py +152 -0
- camel/models/base_model.py +9 -2
- camel/models/gemini_model.py +215 -0
- camel/models/groq_model.py +131 -0
- camel/models/litellm_model.py +26 -4
- camel/models/mistral_model.py +169 -0
- camel/models/model_factory.py +33 -5
- camel/models/ollama_model.py +21 -2
- camel/models/open_source_model.py +11 -3
- camel/models/openai_model.py +7 -2
- camel/models/stub_model.py +4 -4
- camel/models/vllm_model.py +138 -0
- camel/models/zhipuai_model.py +7 -4
- camel/prompts/__init__.py +2 -2
- camel/prompts/task_prompt_template.py +4 -4
- camel/prompts/{descripte_video_prompt.py → video_description_prompt.py} +1 -1
- camel/retrievers/auto_retriever.py +2 -0
- camel/storages/graph_storages/neo4j_graph.py +5 -0
- camel/toolkits/__init__.py +36 -0
- camel/toolkits/base.py +1 -1
- camel/toolkits/code_execution.py +1 -1
- camel/toolkits/github_toolkit.py +3 -2
- camel/toolkits/google_maps_toolkit.py +367 -0
- camel/toolkits/math_toolkit.py +79 -0
- camel/toolkits/open_api_toolkit.py +548 -0
- camel/toolkits/retrieval_toolkit.py +76 -0
- camel/toolkits/search_toolkit.py +326 -0
- camel/toolkits/slack_toolkit.py +308 -0
- camel/toolkits/twitter_toolkit.py +522 -0
- camel/toolkits/weather_toolkit.py +173 -0
- camel/types/enums.py +163 -30
- camel/utils/__init__.py +4 -0
- camel/utils/async_func.py +1 -1
- camel/utils/token_counting.py +182 -40
- {camel_ai-0.1.5.5.dist-info → camel_ai-0.1.5.9.dist-info}/METADATA +43 -3
- camel_ai-0.1.5.9.dist-info/RECORD +165 -0
- camel/functions/__init__.py +0 -51
- camel/functions/google_maps_function.py +0 -335
- camel/functions/math_functions.py +0 -61
- camel/functions/open_api_function.py +0 -508
- camel/functions/retrieval_functions.py +0 -61
- camel/functions/search_functions.py +0 -298
- camel/functions/slack_functions.py +0 -286
- camel/functions/twitter_function.py +0 -479
- camel/functions/weather_functions.py +0 -144
- camel_ai-0.1.5.5.dist-info/RECORD +0 -155
- /camel/{functions → toolkits}/open_api_specs/biztoc/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/biztoc/ai-plugin.json +0 -0
- /camel/{functions → toolkits}/open_api_specs/biztoc/openapi.yaml +0 -0
- /camel/{functions → toolkits}/open_api_specs/coursera/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/coursera/openapi.yaml +0 -0
- /camel/{functions → toolkits}/open_api_specs/create_qr_code/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/create_qr_code/openapi.yaml +0 -0
- /camel/{functions → toolkits}/open_api_specs/klarna/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/klarna/openapi.yaml +0 -0
- /camel/{functions → toolkits}/open_api_specs/nasa_apod/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/nasa_apod/openapi.yaml +0 -0
- /camel/{functions → toolkits}/open_api_specs/outschool/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/outschool/ai-plugin.json +0 -0
- /camel/{functions → toolkits}/open_api_specs/outschool/openapi.yaml +0 -0
- /camel/{functions → toolkits}/open_api_specs/outschool/paths/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/outschool/paths/get_classes.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/outschool/paths/search_teachers.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/security_config.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/speak/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/speak/openapi.yaml +0 -0
- /camel/{functions → toolkits}/open_api_specs/web_scraper/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/web_scraper/ai-plugin.json +0 -0
- /camel/{functions → toolkits}/open_api_specs/web_scraper/openapi.yaml +0 -0
- /camel/{functions → toolkits}/open_api_specs/web_scraper/paths/__init__.py +0 -0
- /camel/{functions → toolkits}/open_api_specs/web_scraper/paths/scraper.py +0 -0
- /camel/{functions → toolkits}/openai_function.py +0 -0
- {camel_ai-0.1.5.5.dist-info → camel_ai-0.1.5.9.dist-info}/WHEEL +0 -0
camel/models/ollama_model.py
CHANGED
|
@@ -29,6 +29,7 @@ class OllamaModel:
|
|
|
29
29
|
model_type: str,
|
|
30
30
|
model_config_dict: Dict[str, Any],
|
|
31
31
|
url: Optional[str] = None,
|
|
32
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
32
33
|
) -> None:
|
|
33
34
|
r"""Constructor for Ollama backend with OpenAI compatibility.
|
|
34
35
|
|
|
@@ -40,6 +41,9 @@ class OllamaModel:
|
|
|
40
41
|
be fed into openai.ChatCompletion.create().
|
|
41
42
|
url (Optional[str]): The url to the model service. (default:
|
|
42
43
|
:obj:`None`)
|
|
44
|
+
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
45
|
+
for the model. If not provided, `OpenAITokenCounter(ModelType.
|
|
46
|
+
GPT_3_5_TURBO)` will be used.
|
|
43
47
|
"""
|
|
44
48
|
self.model_type = model_type
|
|
45
49
|
self.model_config_dict = model_config_dict
|
|
@@ -50,7 +54,7 @@ class OllamaModel:
|
|
|
50
54
|
base_url=url,
|
|
51
55
|
api_key="ollama", # required but ignored
|
|
52
56
|
)
|
|
53
|
-
self._token_counter
|
|
57
|
+
self._token_counter = token_counter
|
|
54
58
|
self.check_model_config()
|
|
55
59
|
|
|
56
60
|
@property
|
|
@@ -61,7 +65,6 @@ class OllamaModel:
|
|
|
61
65
|
BaseTokenCounter: The token counter following the model's
|
|
62
66
|
tokenization style.
|
|
63
67
|
"""
|
|
64
|
-
# NOTE: Use OpenAITokenCounter temporarily
|
|
65
68
|
if not self._token_counter:
|
|
66
69
|
self._token_counter = OpenAITokenCounter(ModelType.GPT_3_5_TURBO)
|
|
67
70
|
return self._token_counter
|
|
@@ -104,6 +107,22 @@ class OllamaModel:
|
|
|
104
107
|
)
|
|
105
108
|
return response
|
|
106
109
|
|
|
110
|
+
@property
|
|
111
|
+
def token_limit(self) -> int:
|
|
112
|
+
"""Returns the maximum token limit for the given model.
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
int: The maximum token limit for the given model.
|
|
116
|
+
"""
|
|
117
|
+
max_tokens = self.model_config_dict.get("max_tokens")
|
|
118
|
+
if isinstance(max_tokens, int):
|
|
119
|
+
return max_tokens
|
|
120
|
+
print(
|
|
121
|
+
"Must set `max_tokens` as an integer in `model_config_dict` when"
|
|
122
|
+
" setting up the model. Using 4096 as default value."
|
|
123
|
+
)
|
|
124
|
+
return 4096
|
|
125
|
+
|
|
107
126
|
@property
|
|
108
127
|
def stream(self) -> bool:
|
|
109
128
|
r"""Returns whether the model is in stream mode, which sends partial
|
|
@@ -19,7 +19,10 @@ from camel.configs import OPENAI_API_PARAMS
|
|
|
19
19
|
from camel.messages import OpenAIMessage
|
|
20
20
|
from camel.models import BaseModelBackend
|
|
21
21
|
from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
|
|
22
|
-
from camel.utils import
|
|
22
|
+
from camel.utils import (
|
|
23
|
+
BaseTokenCounter,
|
|
24
|
+
OpenSourceTokenCounter,
|
|
25
|
+
)
|
|
23
26
|
|
|
24
27
|
|
|
25
28
|
class OpenSourceModel(BaseModelBackend):
|
|
@@ -33,6 +36,7 @@ class OpenSourceModel(BaseModelBackend):
|
|
|
33
36
|
model_config_dict: Dict[str, Any],
|
|
34
37
|
api_key: Optional[str] = None,
|
|
35
38
|
url: Optional[str] = None,
|
|
39
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
36
40
|
) -> None:
|
|
37
41
|
r"""Constructor for model backends of Open-source models.
|
|
38
42
|
|
|
@@ -43,9 +47,13 @@ class OpenSourceModel(BaseModelBackend):
|
|
|
43
47
|
api_key (Optional[str]): The API key for authenticating with the
|
|
44
48
|
model service. (ignored for open-source models)
|
|
45
49
|
url (Optional[str]): The url to the model service.
|
|
50
|
+
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
51
|
+
for the model. If not provided, `OpenSourceTokenCounter` will
|
|
52
|
+
be used.
|
|
46
53
|
"""
|
|
47
|
-
super().__init__(
|
|
48
|
-
|
|
54
|
+
super().__init__(
|
|
55
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
56
|
+
)
|
|
49
57
|
|
|
50
58
|
# Check whether the input model type is open-source
|
|
51
59
|
if not model_type.is_open_source:
|
camel/models/openai_model.py
CHANGED
|
@@ -36,6 +36,7 @@ class OpenAIModel(BaseModelBackend):
|
|
|
36
36
|
model_config_dict: Dict[str, Any],
|
|
37
37
|
api_key: Optional[str] = None,
|
|
38
38
|
url: Optional[str] = None,
|
|
39
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
39
40
|
) -> None:
|
|
40
41
|
r"""Constructor for OpenAI backend.
|
|
41
42
|
|
|
@@ -48,8 +49,13 @@ class OpenAIModel(BaseModelBackend):
|
|
|
48
49
|
OpenAI service. (default: :obj:`None`)
|
|
49
50
|
url (Optional[str]): The url to the OpenAI service. (default:
|
|
50
51
|
:obj:`None`)
|
|
52
|
+
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
53
|
+
for the model. If not provided, `OpenAITokenCounter` will
|
|
54
|
+
be used.
|
|
51
55
|
"""
|
|
52
|
-
super().__init__(
|
|
56
|
+
super().__init__(
|
|
57
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
58
|
+
)
|
|
53
59
|
self._url = url or os.environ.get("OPENAI_API_BASE_URL")
|
|
54
60
|
self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
|
|
55
61
|
self._client = OpenAI(
|
|
@@ -58,7 +64,6 @@ class OpenAIModel(BaseModelBackend):
|
|
|
58
64
|
base_url=self._url,
|
|
59
65
|
api_key=self._api_key,
|
|
60
66
|
)
|
|
61
|
-
self._token_counter: Optional[BaseTokenCounter] = None
|
|
62
67
|
|
|
63
68
|
@property
|
|
64
69
|
def token_counter(self) -> BaseTokenCounter:
|
camel/models/stub_model.py
CHANGED
|
@@ -55,12 +55,12 @@ class StubModel(BaseModelBackend):
|
|
|
55
55
|
model_config_dict: Dict[str, Any],
|
|
56
56
|
api_key: Optional[str] = None,
|
|
57
57
|
url: Optional[str] = None,
|
|
58
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
58
59
|
) -> None:
|
|
59
60
|
r"""All arguments are unused for the dummy model."""
|
|
60
|
-
super().__init__(
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
self._url = url
|
|
61
|
+
super().__init__(
|
|
62
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
63
|
+
)
|
|
64
64
|
|
|
65
65
|
@property
|
|
66
66
|
def token_counter(self) -> BaseTokenCounter:
|
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from typing import Any, Dict, List, Optional, Union
|
|
15
|
+
|
|
16
|
+
from openai import OpenAI, Stream
|
|
17
|
+
|
|
18
|
+
from camel.configs import VLLM_API_PARAMS
|
|
19
|
+
from camel.messages import OpenAIMessage
|
|
20
|
+
from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
|
|
21
|
+
from camel.utils import BaseTokenCounter, OpenAITokenCounter
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
# flake8: noqa: E501
|
|
25
|
+
class VLLMModel:
|
|
26
|
+
r"""vLLM service interface."""
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
model_type: str,
|
|
31
|
+
model_config_dict: Dict[str, Any],
|
|
32
|
+
url: Optional[str] = None,
|
|
33
|
+
api_key: Optional[str] = None,
|
|
34
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
35
|
+
) -> None:
|
|
36
|
+
r"""Constructor for vLLM backend with OpenAI compatibility.
|
|
37
|
+
|
|
38
|
+
# Reference: https://docs.vllm.ai/en/latest/serving/openai_compatible_server.html
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
model_type (str): Model for which a backend is created.
|
|
42
|
+
model_config_dict (Dict[str, Any]): A dictionary that will
|
|
43
|
+
be fed into openai.ChatCompletion.create().
|
|
44
|
+
url (Optional[str]): The url to the model service. (default:
|
|
45
|
+
:obj:`None`)
|
|
46
|
+
api_key (Optional[str]): The API key for authenticating with the
|
|
47
|
+
model service.
|
|
48
|
+
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
49
|
+
for the model. If not provided, `OpenAITokenCounter(ModelType.
|
|
50
|
+
GPT_3_5_TURBO)` will be used.
|
|
51
|
+
"""
|
|
52
|
+
self.model_type = model_type
|
|
53
|
+
self.model_config_dict = model_config_dict
|
|
54
|
+
# Use OpenAI cilent as interface call vLLM
|
|
55
|
+
self._client = OpenAI(
|
|
56
|
+
timeout=60,
|
|
57
|
+
max_retries=3,
|
|
58
|
+
base_url=url,
|
|
59
|
+
api_key=api_key,
|
|
60
|
+
)
|
|
61
|
+
self._token_counter = token_counter
|
|
62
|
+
self.check_model_config()
|
|
63
|
+
|
|
64
|
+
@property
|
|
65
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
66
|
+
r"""Initialize the token counter for the model backend.
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
BaseTokenCounter: The token counter following the model's
|
|
70
|
+
tokenization style.
|
|
71
|
+
"""
|
|
72
|
+
if not self._token_counter:
|
|
73
|
+
self._token_counter = OpenAITokenCounter(ModelType.GPT_3_5_TURBO)
|
|
74
|
+
return self._token_counter
|
|
75
|
+
|
|
76
|
+
def check_model_config(self):
|
|
77
|
+
r"""Check whether the model configuration contains any
|
|
78
|
+
unexpected arguments to vLLM API.
|
|
79
|
+
|
|
80
|
+
Raises:
|
|
81
|
+
ValueError: If the model configuration dictionary contains any
|
|
82
|
+
unexpected arguments to OpenAI API.
|
|
83
|
+
"""
|
|
84
|
+
for param in self.model_config_dict:
|
|
85
|
+
if param not in VLLM_API_PARAMS:
|
|
86
|
+
raise ValueError(
|
|
87
|
+
f"Unexpected argument `{param}` is "
|
|
88
|
+
"input into vLLM model backend."
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
def run(
|
|
92
|
+
self,
|
|
93
|
+
messages: List[OpenAIMessage],
|
|
94
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
95
|
+
r"""Runs inference of OpenAI chat completion.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
99
|
+
in OpenAI API format.
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
103
|
+
`ChatCompletion` in the non-stream mode, or
|
|
104
|
+
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
105
|
+
"""
|
|
106
|
+
|
|
107
|
+
response = self._client.chat.completions.create(
|
|
108
|
+
messages=messages,
|
|
109
|
+
model=self.model_type,
|
|
110
|
+
**self.model_config_dict,
|
|
111
|
+
)
|
|
112
|
+
return response
|
|
113
|
+
|
|
114
|
+
@property
|
|
115
|
+
def token_limit(self) -> int:
|
|
116
|
+
"""Returns the maximum token limit for the given model.
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
int: The maximum token limit for the given model.
|
|
120
|
+
"""
|
|
121
|
+
max_tokens = self.model_config_dict.get("max_tokens")
|
|
122
|
+
if isinstance(max_tokens, int):
|
|
123
|
+
return max_tokens
|
|
124
|
+
print(
|
|
125
|
+
"Must set `max_tokens` as an integer in `model_config_dict` when"
|
|
126
|
+
" setting up the model. Using 4096 as default value."
|
|
127
|
+
)
|
|
128
|
+
return 4096
|
|
129
|
+
|
|
130
|
+
@property
|
|
131
|
+
def stream(self) -> bool:
|
|
132
|
+
r"""Returns whether the model is in stream mode, which sends partial
|
|
133
|
+
results each time.
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
bool: Whether the model is in stream mode.
|
|
137
|
+
"""
|
|
138
|
+
return self.model_config_dict.get('stream', False)
|
camel/models/zhipuai_model.py
CHANGED
|
@@ -37,6 +37,7 @@ class ZhipuAIModel(BaseModelBackend):
|
|
|
37
37
|
model_config_dict: Dict[str, Any],
|
|
38
38
|
api_key: Optional[str] = None,
|
|
39
39
|
url: Optional[str] = None,
|
|
40
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
40
41
|
) -> None:
|
|
41
42
|
r"""Constructor for ZhipuAI backend.
|
|
42
43
|
|
|
@@ -49,8 +50,13 @@ class ZhipuAIModel(BaseModelBackend):
|
|
|
49
50
|
ZhipuAI service. (default: :obj:`None`)
|
|
50
51
|
url (Optional[str]): The url to the ZhipuAI service. (default:
|
|
51
52
|
:obj:`None`)
|
|
53
|
+
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
54
|
+
for the model. If not provided, `OpenAITokenCounter(ModelType.
|
|
55
|
+
GPT_3_5_TURBO)` will be used.
|
|
52
56
|
"""
|
|
53
|
-
super().__init__(
|
|
57
|
+
super().__init__(
|
|
58
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
59
|
+
)
|
|
54
60
|
self._url = url or os.environ.get("ZHIPUAI_API_BASE_URL")
|
|
55
61
|
self._api_key = api_key or os.environ.get("ZHIPUAI_API_KEY")
|
|
56
62
|
if not self._url or not self._api_key:
|
|
@@ -63,7 +69,6 @@ class ZhipuAIModel(BaseModelBackend):
|
|
|
63
69
|
api_key=self._api_key,
|
|
64
70
|
base_url=self._url,
|
|
65
71
|
)
|
|
66
|
-
self._token_counter: Optional[BaseTokenCounter] = None
|
|
67
72
|
|
|
68
73
|
@api_keys_required("ZHIPUAI_API_KEY")
|
|
69
74
|
def run(
|
|
@@ -100,7 +105,6 @@ class ZhipuAIModel(BaseModelBackend):
|
|
|
100
105
|
"""
|
|
101
106
|
|
|
102
107
|
if not self._token_counter:
|
|
103
|
-
# It's a temporary setting for token counter.
|
|
104
108
|
self._token_counter = OpenAITokenCounter(ModelType.GPT_3_5_TURBO)
|
|
105
109
|
return self._token_counter
|
|
106
110
|
|
|
@@ -118,7 +122,6 @@ class ZhipuAIModel(BaseModelBackend):
|
|
|
118
122
|
f"Unexpected argument `{param}` is "
|
|
119
123
|
"input into ZhipuAI model backend."
|
|
120
124
|
)
|
|
121
|
-
pass
|
|
122
125
|
|
|
123
126
|
@property
|
|
124
127
|
def stream(self) -> bool:
|
camel/prompts/__init__.py
CHANGED
|
@@ -14,7 +14,6 @@
|
|
|
14
14
|
from .ai_society import AISocietyPromptTemplateDict
|
|
15
15
|
from .base import CodePrompt, TextPrompt, TextPromptDict
|
|
16
16
|
from .code import CodePromptTemplateDict
|
|
17
|
-
from .descripte_video_prompt import DescriptionVideoPromptTemplateDict
|
|
18
17
|
from .evaluation import EvaluationPromptTemplateDict
|
|
19
18
|
from .generate_text_embedding_data import (
|
|
20
19
|
GenerateTextEmbeddingDataPromptTemplateDict,
|
|
@@ -26,6 +25,7 @@ from .role_description_prompt_template import RoleDescriptionPromptTemplateDict
|
|
|
26
25
|
from .solution_extraction import SolutionExtractionPromptTemplateDict
|
|
27
26
|
from .task_prompt_template import TaskPromptTemplateDict
|
|
28
27
|
from .translation import TranslationPromptTemplateDict
|
|
28
|
+
from .video_description_prompt import VideoDescriptionPromptTemplateDict
|
|
29
29
|
|
|
30
30
|
__all__ = [
|
|
31
31
|
'TextPrompt',
|
|
@@ -42,5 +42,5 @@ __all__ = [
|
|
|
42
42
|
'SolutionExtractionPromptTemplateDict',
|
|
43
43
|
'GenerateTextEmbeddingDataPromptTemplateDict',
|
|
44
44
|
'ObjectRecognitionPromptTemplateDict',
|
|
45
|
-
'
|
|
45
|
+
'VideoDescriptionPromptTemplateDict',
|
|
46
46
|
]
|
|
@@ -18,9 +18,6 @@ from camel.prompts.ai_society import (
|
|
|
18
18
|
TextPromptDict,
|
|
19
19
|
)
|
|
20
20
|
from camel.prompts.code import CodePromptTemplateDict
|
|
21
|
-
from camel.prompts.descripte_video_prompt import (
|
|
22
|
-
DescriptionVideoPromptTemplateDict,
|
|
23
|
-
)
|
|
24
21
|
from camel.prompts.evaluation import (
|
|
25
22
|
EvaluationPromptTemplateDict,
|
|
26
23
|
)
|
|
@@ -38,6 +35,9 @@ from camel.prompts.solution_extraction import (
|
|
|
38
35
|
SolutionExtractionPromptTemplateDict,
|
|
39
36
|
)
|
|
40
37
|
from camel.prompts.translation import TranslationPromptTemplateDict
|
|
38
|
+
from camel.prompts.video_description_prompt import (
|
|
39
|
+
VideoDescriptionPromptTemplateDict,
|
|
40
|
+
)
|
|
41
41
|
from camel.types import TaskType
|
|
42
42
|
|
|
43
43
|
|
|
@@ -64,6 +64,6 @@ class TaskPromptTemplateDict(Dict[Any, TextPromptDict]):
|
|
|
64
64
|
TaskType.ROLE_DESCRIPTION: RoleDescriptionPromptTemplateDict(),
|
|
65
65
|
TaskType.OBJECT_RECOGNITION: ObjectRecognitionPromptTemplateDict(), # noqa: E501
|
|
66
66
|
TaskType.GENERATE_TEXT_EMBEDDING_DATA: GenerateTextEmbeddingDataPromptTemplateDict(), # noqa: E501
|
|
67
|
-
TaskType.VIDEO_DESCRIPTION:
|
|
67
|
+
TaskType.VIDEO_DESCRIPTION: VideoDescriptionPromptTemplateDict(), # noqa: E501
|
|
68
68
|
}
|
|
69
69
|
)
|
|
@@ -18,7 +18,7 @@ from camel.types import RoleType
|
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
# flake8: noqa :E501
|
|
21
|
-
class
|
|
21
|
+
class VideoDescriptionPromptTemplateDict(TextPromptDict):
|
|
22
22
|
ASSISTANT_PROMPT = TextPrompt(
|
|
23
23
|
"""You are a master of video analysis.
|
|
24
24
|
Please provide a shot description of the content of the current video."""
|
|
@@ -283,12 +283,14 @@ class AutoRetriever:
|
|
|
283
283
|
vr = VectorRetriever(
|
|
284
284
|
storage=vector_storage_instance,
|
|
285
285
|
similarity_threshold=similarity_threshold,
|
|
286
|
+
embedding_model=self.embedding_model,
|
|
286
287
|
)
|
|
287
288
|
vr.process(content_input_path)
|
|
288
289
|
else:
|
|
289
290
|
vr = VectorRetriever(
|
|
290
291
|
storage=vector_storage_instance,
|
|
291
292
|
similarity_threshold=similarity_threshold,
|
|
293
|
+
embedding_model=self.embedding_model,
|
|
292
294
|
)
|
|
293
295
|
# Retrieve info by given query from the vector storage
|
|
294
296
|
retrieved_info = vr.query(query, top_k)
|
|
@@ -12,6 +12,7 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
import logging
|
|
15
|
+
import os
|
|
15
16
|
from hashlib import md5
|
|
16
17
|
from typing import Any, Dict, List, Optional
|
|
17
18
|
|
|
@@ -95,6 +96,10 @@ class Neo4jGraph(BaseGraphStorage):
|
|
|
95
96
|
r"""Create a new Neo4j graph instance."""
|
|
96
97
|
import neo4j
|
|
97
98
|
|
|
99
|
+
url = os.environ.get("NEO4J_URI") or url
|
|
100
|
+
username = os.environ.get("NEO4J_USERNAME") or username
|
|
101
|
+
password = os.environ.get("NEO4J_PASSWORD") or password
|
|
102
|
+
|
|
98
103
|
self.driver = neo4j.GraphDatabase.driver(
|
|
99
104
|
url, auth=(username, password)
|
|
100
105
|
)
|
camel/toolkits/__init__.py
CHANGED
|
@@ -11,13 +11,49 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
# ruff: noqa: I001
|
|
15
|
+
from .openai_function import (
|
|
16
|
+
OpenAIFunction,
|
|
17
|
+
get_openai_function_schema,
|
|
18
|
+
get_openai_tool_schema,
|
|
19
|
+
)
|
|
20
|
+
from .open_api_specs.security_config import openapi_security_config
|
|
21
|
+
|
|
22
|
+
from .google_maps_toolkit import MAP_FUNCS, GoogleMapsToolkit
|
|
23
|
+
from .math_toolkit import MATH_FUNCS, MathToolkit
|
|
24
|
+
from .open_api_toolkit import OPENAPI_FUNCS, OpenAPIToolkit
|
|
25
|
+
from .retrieval_toolkit import RETRIEVAL_FUNCS, RetrievalToolkit
|
|
26
|
+
from .search_toolkit import SEARCH_FUNCS, SearchToolkit
|
|
27
|
+
from .twitter_toolkit import TWITTER_FUNCS, TwitterToolkit
|
|
28
|
+
from .weather_toolkit import WEATHER_FUNCS, WeatherToolkit
|
|
29
|
+
from .slack_toolkit import SLACK_FUNCS, SlackToolkit
|
|
14
30
|
|
|
15
31
|
from .base import BaseToolkit
|
|
16
32
|
from .code_execution import CodeExecutionToolkit
|
|
17
33
|
from .github_toolkit import GithubToolkit
|
|
18
34
|
|
|
19
35
|
__all__ = [
|
|
36
|
+
'OpenAIFunction',
|
|
37
|
+
'get_openai_function_schema',
|
|
38
|
+
'get_openai_tool_schema',
|
|
39
|
+
'openapi_security_config',
|
|
40
|
+
'MATH_FUNCS',
|
|
41
|
+
'MAP_FUNCS',
|
|
42
|
+
'OPENAPI_FUNCS',
|
|
43
|
+
'RETRIEVAL_FUNCS',
|
|
44
|
+
'SEARCH_FUNCS',
|
|
45
|
+
'TWITTER_FUNCS',
|
|
46
|
+
'WEATHER_FUNCS',
|
|
47
|
+
'SLACK_FUNCS',
|
|
20
48
|
'BaseToolkit',
|
|
21
49
|
'GithubToolkit',
|
|
50
|
+
'MathToolkit',
|
|
51
|
+
'GoogleMapsToolkit',
|
|
52
|
+
'SearchToolkit',
|
|
53
|
+
'SlackToolkit',
|
|
54
|
+
'TwitterToolkit',
|
|
55
|
+
'WeatherToolkit',
|
|
56
|
+
'RetrievalToolkit',
|
|
57
|
+
'OpenAPIToolkit',
|
|
22
58
|
'CodeExecutionToolkit',
|
|
23
59
|
]
|
camel/toolkits/base.py
CHANGED
camel/toolkits/code_execution.py
CHANGED
|
@@ -13,8 +13,8 @@
|
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
from typing import List, Literal
|
|
15
15
|
|
|
16
|
-
from camel.functions import OpenAIFunction
|
|
17
16
|
from camel.interpreters import InternalPythonInterpreter
|
|
17
|
+
from camel.toolkits import OpenAIFunction
|
|
18
18
|
|
|
19
19
|
from .base import BaseToolkit
|
|
20
20
|
|
camel/toolkits/github_toolkit.py
CHANGED
|
@@ -17,10 +17,11 @@ from dataclasses import dataclass
|
|
|
17
17
|
from datetime import datetime, timedelta
|
|
18
18
|
from typing import List, Optional
|
|
19
19
|
|
|
20
|
-
from camel.functions import OpenAIFunction
|
|
21
|
-
from camel.toolkits.base import BaseToolkit
|
|
22
20
|
from camel.utils import dependencies_required
|
|
23
21
|
|
|
22
|
+
from .base import BaseToolkit
|
|
23
|
+
from .openai_function import OpenAIFunction
|
|
24
|
+
|
|
24
25
|
|
|
25
26
|
@dataclass
|
|
26
27
|
class GithubIssue:
|