camel-ai 0.1.5.7__py3-none-any.whl → 0.1.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +2 -2
- camel/agents/critic_agent.py +1 -1
- camel/agents/deductive_reasoner_agent.py +4 -4
- camel/agents/embodied_agent.py +1 -1
- camel/agents/knowledge_graph_agent.py +2 -2
- camel/agents/role_assignment_agent.py +1 -1
- camel/agents/search_agent.py +4 -5
- camel/agents/task_agent.py +5 -5
- camel/configs/__init__.py +9 -0
- camel/configs/groq_config.py +119 -0
- camel/configs/mistral_config.py +81 -0
- camel/configs/ollama_config.py +1 -1
- camel/configs/vllm_config.py +103 -0
- camel/embeddings/__init__.py +2 -0
- camel/embeddings/mistral_embedding.py +89 -0
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/ipython_interpreter.py +167 -0
- camel/models/__init__.py +8 -0
- camel/models/anthropic_model.py +7 -2
- camel/models/azure_openai_model.py +152 -0
- camel/models/base_model.py +5 -1
- camel/models/gemini_model.py +14 -2
- camel/models/groq_model.py +131 -0
- camel/models/litellm_model.py +10 -4
- camel/models/mistral_model.py +169 -0
- camel/models/model_factory.py +30 -3
- camel/models/ollama_model.py +5 -2
- camel/models/open_source_model.py +11 -3
- camel/models/openai_model.py +7 -2
- camel/models/stub_model.py +4 -4
- camel/models/vllm_model.py +138 -0
- camel/models/zhipuai_model.py +7 -3
- camel/prompts/__init__.py +2 -2
- camel/prompts/task_prompt_template.py +4 -4
- camel/prompts/{descripte_video_prompt.py → video_description_prompt.py} +1 -1
- camel/retrievers/auto_retriever.py +2 -2
- camel/storages/graph_storages/neo4j_graph.py +5 -0
- camel/types/enums.py +152 -35
- camel/utils/__init__.py +2 -0
- camel/utils/token_counting.py +148 -40
- {camel_ai-0.1.5.7.dist-info → camel_ai-0.1.6.0.dist-info}/METADATA +42 -3
- {camel_ai-0.1.5.7.dist-info → camel_ai-0.1.6.0.dist-info}/RECORD +44 -35
- {camel_ai-0.1.5.7.dist-info → camel_ai-0.1.6.0.dist-info}/WHEEL +0 -0
camel/__init__.py
CHANGED
camel/agents/chat_agent.py
CHANGED
|
@@ -82,7 +82,7 @@ class ChatAgent(BaseAgent):
|
|
|
82
82
|
system_message (BaseMessage): The system message for the chat agent.
|
|
83
83
|
model (BaseModelBackend, optional): The model backend to use for
|
|
84
84
|
generating responses. (default: :obj:`OpenAIModel` with
|
|
85
|
-
`
|
|
85
|
+
`GPT_4O_MINI`)
|
|
86
86
|
api_key (str, optional): The API key for authenticating with the
|
|
87
87
|
LLM service. Only OpenAI and Anthropic model supported (default:
|
|
88
88
|
:obj:`None`)
|
|
@@ -127,7 +127,7 @@ class ChatAgent(BaseAgent):
|
|
|
127
127
|
if model is not None
|
|
128
128
|
else ModelFactory.create(
|
|
129
129
|
model_platform=ModelPlatformType.OPENAI,
|
|
130
|
-
model_type=ModelType.
|
|
130
|
+
model_type=ModelType.GPT_4O_MINI,
|
|
131
131
|
model_config_dict=ChatGPTConfig().__dict__,
|
|
132
132
|
api_key=self._api_key,
|
|
133
133
|
)
|
camel/agents/critic_agent.py
CHANGED
|
@@ -33,7 +33,7 @@ class CriticAgent(ChatAgent):
|
|
|
33
33
|
agent.
|
|
34
34
|
model (BaseModelBackend, optional): The model backend to use for
|
|
35
35
|
generating responses. (default: :obj:`OpenAIModel` with
|
|
36
|
-
`
|
|
36
|
+
`GPT_4O_MINI`)
|
|
37
37
|
message_window_size (int, optional): The maximum number of previous
|
|
38
38
|
messages to include in the context window. If `None`, no windowing
|
|
39
39
|
is performed. (default: :obj:`6`)
|
|
@@ -35,7 +35,7 @@ class DeductiveReasonerAgent(ChatAgent):
|
|
|
35
35
|
Args:
|
|
36
36
|
model (BaseModelBackend, optional): The model backend to use for
|
|
37
37
|
generating responses. (default: :obj:`OpenAIModel` with
|
|
38
|
-
`
|
|
38
|
+
`GPT_4O_MINI`)
|
|
39
39
|
"""
|
|
40
40
|
|
|
41
41
|
def __init__(
|
|
@@ -126,7 +126,7 @@ $B$.
|
|
|
126
126
|
- Direct Path Analysis: What are the immediate and direct conditions
|
|
127
127
|
required to move from $A$ to $B$?
|
|
128
128
|
- Intermediate States: Are there states between $A$ and $B$ that must be
|
|
129
|
-
|
|
129
|
+
traversed or can be used to make the transition smoother or more
|
|
130
130
|
efficient? If yes, what is the content?
|
|
131
131
|
- Constraints & Limitations: Identify potential barriers or restrictions
|
|
132
132
|
in moving from $A$ to $B$. These can be external (e.g., environmental
|
|
@@ -244,7 +244,7 @@ square brackets)
|
|
|
244
244
|
print(f"Message content:\n{msg.content}")
|
|
245
245
|
|
|
246
246
|
# Extract the conditions from the message
|
|
247
|
-
|
|
247
|
+
conditions_dict = {
|
|
248
248
|
f"condition {i}": cdt.replace("<", "")
|
|
249
249
|
.replace(">", "")
|
|
250
250
|
.strip()
|
|
@@ -281,7 +281,7 @@ square brackets)
|
|
|
281
281
|
conditions_and_quality_json: Dict[
|
|
282
282
|
str, Union[List[str], Dict[str, str]]
|
|
283
283
|
] = {}
|
|
284
|
-
conditions_and_quality_json["conditions"] =
|
|
284
|
+
conditions_and_quality_json["conditions"] = conditions_dict
|
|
285
285
|
conditions_and_quality_json["labels"] = labels
|
|
286
286
|
conditions_and_quality_json["evaluate_quality"] = quality
|
|
287
287
|
|
camel/agents/embodied_agent.py
CHANGED
|
@@ -35,7 +35,7 @@ class EmbodiedAgent(ChatAgent):
|
|
|
35
35
|
system_message (BaseMessage): The system message for the chat agent.
|
|
36
36
|
model (BaseModelBackend, optional): The model backend to use for
|
|
37
37
|
generating responses. (default: :obj:`OpenAIModel` with
|
|
38
|
-
`
|
|
38
|
+
`GPT_4O_MINI`)
|
|
39
39
|
message_window_size (int, optional): The maximum number of previous
|
|
40
40
|
messages to include in the context window. If `None`, no windowing
|
|
41
41
|
is performed. (default: :obj:`None`)
|
|
@@ -115,14 +115,14 @@ class KnowledgeGraphAgent(ChatAgent):
|
|
|
115
115
|
Args:
|
|
116
116
|
model (BaseModelBackend, optional): The model backend to use for
|
|
117
117
|
generating responses. (default: :obj:`OpenAIModel` with
|
|
118
|
-
`
|
|
118
|
+
`GPT_4O_MINI`)
|
|
119
119
|
"""
|
|
120
120
|
system_message = BaseMessage(
|
|
121
121
|
role_name="Graphify",
|
|
122
122
|
role_type=RoleType.ASSISTANT,
|
|
123
123
|
meta_dict=None,
|
|
124
124
|
content="Your mission is to transform unstructured content "
|
|
125
|
-
"
|
|
125
|
+
"into structured graph data. Extract nodes and relationships with "
|
|
126
126
|
"precision, and let the connections unfold. Your graphs will "
|
|
127
127
|
"illuminate the hidden connections within the chaos of "
|
|
128
128
|
"information.",
|
|
@@ -27,7 +27,7 @@ class RoleAssignmentAgent(ChatAgent):
|
|
|
27
27
|
Args:
|
|
28
28
|
model (BaseModelBackend, optional): The model backend to use for
|
|
29
29
|
generating responses. (default: :obj:`OpenAIModel` with
|
|
30
|
-
`
|
|
30
|
+
`GPT_4O_MINI`)
|
|
31
31
|
|
|
32
32
|
Attributes:
|
|
33
33
|
role_assignment_prompt (TextPrompt): A prompt for the agent to generate
|
camel/agents/search_agent.py
CHANGED
|
@@ -26,10 +26,9 @@ class SearchAgent(ChatAgent):
|
|
|
26
26
|
relevance of an answer.
|
|
27
27
|
|
|
28
28
|
Args:
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
(default: :obj:`None`)
|
|
29
|
+
model (BaseModelBackend, optional): The model backend to use for
|
|
30
|
+
generating responses. (default: :obj:`OpenAIModel` with
|
|
31
|
+
`GPT_4O_MINI`)
|
|
33
32
|
"""
|
|
34
33
|
|
|
35
34
|
def __init__(
|
|
@@ -76,7 +75,7 @@ class SearchAgent(ChatAgent):
|
|
|
76
75
|
result = self.step(user_msg).msg.content
|
|
77
76
|
results += result + "\n"
|
|
78
77
|
|
|
79
|
-
# Final
|
|
78
|
+
# Final summarization
|
|
80
79
|
final_prompt = TextPrompt(
|
|
81
80
|
'''Here are some summarized texts which split from one text. Using
|
|
82
81
|
the information to answer the question. If can't find the answer,
|
camel/agents/task_agent.py
CHANGED
|
@@ -32,7 +32,7 @@ class TaskSpecifyAgent(ChatAgent):
|
|
|
32
32
|
Args:
|
|
33
33
|
model (BaseModelBackend, optional): The model backend to use for
|
|
34
34
|
generating responses. (default: :obj:`OpenAIModel` with
|
|
35
|
-
`
|
|
35
|
+
`GPT_4O_MINI`)
|
|
36
36
|
task_type (TaskType, optional): The type of task for which to generate
|
|
37
37
|
a prompt. (default: :obj:`TaskType.AI_SOCIETY`)
|
|
38
38
|
task_specify_prompt (Union[str, TextPrompt], optional): The prompt for
|
|
@@ -126,7 +126,7 @@ class TaskPlannerAgent(ChatAgent):
|
|
|
126
126
|
Args:
|
|
127
127
|
model (BaseModelBackend, optional): The model backend to use for
|
|
128
128
|
generating responses. (default: :obj:`OpenAIModel` with
|
|
129
|
-
`
|
|
129
|
+
`GPT_4O_MINI`)
|
|
130
130
|
output_language (str, optional): The language to be output by the
|
|
131
131
|
agent. (default: :obj:`None`)
|
|
132
132
|
"""
|
|
@@ -201,7 +201,7 @@ class TaskCreationAgent(ChatAgent):
|
|
|
201
201
|
perform the task.
|
|
202
202
|
model (BaseModelBackend, optional): The LLM backend to use for
|
|
203
203
|
generating responses. (default: :obj:`OpenAIModel` with
|
|
204
|
-
`
|
|
204
|
+
`GPT_4O_MINI`)
|
|
205
205
|
output_language (str, optional): The language to be output by the
|
|
206
206
|
agent. (default: :obj:`None`)
|
|
207
207
|
message_window_size (int, optional): The maximum number of previous
|
|
@@ -233,7 +233,7 @@ The result must be a numbered list in the format:
|
|
|
233
233
|
#. Third Task
|
|
234
234
|
|
|
235
235
|
You can only give me up to {max_task_num} tasks at a time. \
|
|
236
|
-
Each task
|
|
236
|
+
Each task should be concise, concrete and doable for a {role_name}.
|
|
237
237
|
You should make task plan and not ask me questions.
|
|
238
238
|
If you think no new tasks are needed right now, write "No tasks to add."
|
|
239
239
|
Now start to give me new tasks one by one. No more than three tasks.
|
|
@@ -312,7 +312,7 @@ class TaskPrioritizationAgent(ChatAgent):
|
|
|
312
312
|
perform the task.
|
|
313
313
|
model (BaseModelBackend, optional): The LLM backend to use for
|
|
314
314
|
generating responses. (default: :obj:`OpenAIModel` with
|
|
315
|
-
`
|
|
315
|
+
`GPT_4O_MINI`)
|
|
316
316
|
output_language (str, optional): The language to be output by the
|
|
317
317
|
agent. (default: :obj:`None`)
|
|
318
318
|
message_window_size (int, optional): The maximum number of previous
|
camel/configs/__init__.py
CHANGED
|
@@ -17,13 +17,16 @@ from .gemini_config import (
|
|
|
17
17
|
Gemini_API_PARAMS,
|
|
18
18
|
GeminiConfig,
|
|
19
19
|
)
|
|
20
|
+
from .groq_config import GROQ_API_PARAMS, GroqConfig
|
|
20
21
|
from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
|
|
22
|
+
from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
|
|
21
23
|
from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
|
|
22
24
|
from .openai_config import (
|
|
23
25
|
OPENAI_API_PARAMS,
|
|
24
26
|
ChatGPTConfig,
|
|
25
27
|
OpenSourceConfig,
|
|
26
28
|
)
|
|
29
|
+
from .vllm_config import VLLM_API_PARAMS, VLLMConfig
|
|
27
30
|
from .zhipuai_config import ZHIPUAI_API_PARAMS, ZhipuAIConfig
|
|
28
31
|
|
|
29
32
|
__all__ = [
|
|
@@ -32,6 +35,8 @@ __all__ = [
|
|
|
32
35
|
'OPENAI_API_PARAMS',
|
|
33
36
|
'AnthropicConfig',
|
|
34
37
|
'ANTHROPIC_API_PARAMS',
|
|
38
|
+
'GROQ_API_PARAMS',
|
|
39
|
+
'GroqConfig',
|
|
35
40
|
'OpenSourceConfig',
|
|
36
41
|
'LiteLLMConfig',
|
|
37
42
|
'LITELLM_API_PARAMS',
|
|
@@ -41,4 +46,8 @@ __all__ = [
|
|
|
41
46
|
'ZHIPUAI_API_PARAMS',
|
|
42
47
|
'GeminiConfig',
|
|
43
48
|
'Gemini_API_PARAMS',
|
|
49
|
+
'VLLMConfig',
|
|
50
|
+
'VLLM_API_PARAMS',
|
|
51
|
+
'MistralConfig',
|
|
52
|
+
'MISTRAL_API_PARAMS',
|
|
44
53
|
]
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from dataclasses import asdict, dataclass
|
|
17
|
+
from typing import TYPE_CHECKING, Optional, Sequence
|
|
18
|
+
|
|
19
|
+
from openai._types import NOT_GIVEN, NotGiven
|
|
20
|
+
|
|
21
|
+
from camel.configs.base_config import BaseConfig
|
|
22
|
+
|
|
23
|
+
if TYPE_CHECKING:
|
|
24
|
+
from camel.toolkits import OpenAIFunction
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass(frozen=True)
|
|
28
|
+
class GroqConfig(BaseConfig):
|
|
29
|
+
r"""Defines the parameters for generating chat completions using OpenAI
|
|
30
|
+
compatibility.
|
|
31
|
+
|
|
32
|
+
Reference: https://console.groq.com/docs/openai
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
temperature (float, optional): Sampling temperature to use, between
|
|
36
|
+
:obj:`0` and :obj:`2`. Higher values make the output more random,
|
|
37
|
+
while lower values make it more focused and deterministic.
|
|
38
|
+
(default: :obj:`0.2`)
|
|
39
|
+
top_p (float, optional): An alternative to sampling with temperature,
|
|
40
|
+
called nucleus sampling, where the model considers the results of
|
|
41
|
+
the tokens with top_p probability mass. So :obj:`0.1` means only
|
|
42
|
+
the tokens comprising the top 10% probability mass are considered.
|
|
43
|
+
(default: :obj:`1.0`)
|
|
44
|
+
n (int, optional): How many chat completion choices to generate for
|
|
45
|
+
each input message. (default: :obj:`1`)
|
|
46
|
+
response_format (object, optional): An object specifying the format
|
|
47
|
+
that the model must output. Compatible with GPT-4 Turbo and all
|
|
48
|
+
GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
|
|
49
|
+
{"type": "json_object"} enables JSON mode, which guarantees the
|
|
50
|
+
message the model generates is valid JSON. Important: when using
|
|
51
|
+
JSON mode, you must also instruct the model to produce JSON
|
|
52
|
+
yourself via a system or user message. Without this, the model
|
|
53
|
+
may generate an unending stream of whitespace until the generation
|
|
54
|
+
reaches the token limit, resulting in a long-running and seemingly
|
|
55
|
+
"stuck" request. Also note that the message content may be
|
|
56
|
+
partially cut off if finish_reason="length", which indicates the
|
|
57
|
+
generation exceeded max_tokens or the conversation exceeded the
|
|
58
|
+
max context length.
|
|
59
|
+
stream (bool, optional): If True, partial message deltas will be sent
|
|
60
|
+
as data-only server-sent events as they become available.
|
|
61
|
+
(default: :obj:`False`)
|
|
62
|
+
stop (str or list, optional): Up to :obj:`4` sequences where the API
|
|
63
|
+
will stop generating further tokens. (default: :obj:`None`)
|
|
64
|
+
max_tokens (int, optional): The maximum number of tokens to generate
|
|
65
|
+
in the chat completion. The total length of input tokens and
|
|
66
|
+
generated tokens is limited by the model's context length.
|
|
67
|
+
(default: :obj:`None`)
|
|
68
|
+
presence_penalty (float, optional): Number between :obj:`-2.0` and
|
|
69
|
+
:obj:`2.0`. Positive values penalize new tokens based on whether
|
|
70
|
+
they appear in the text so far, increasing the model's likelihood
|
|
71
|
+
to talk about new topics. See more information about frequency and
|
|
72
|
+
presence penalties. (default: :obj:`0.0`)
|
|
73
|
+
frequency_penalty (float, optional): Number between :obj:`-2.0` and
|
|
74
|
+
:obj:`2.0`. Positive values penalize new tokens based on their
|
|
75
|
+
existing frequency in the text so far, decreasing the model's
|
|
76
|
+
likelihood to repeat the same line verbatim. See more information
|
|
77
|
+
about frequency and presence penalties. (default: :obj:`0.0`)
|
|
78
|
+
user (str, optional): A unique identifier representing your end-user,
|
|
79
|
+
which can help OpenAI to monitor and detect abuse.
|
|
80
|
+
(default: :obj:`""`)
|
|
81
|
+
tools (list[OpenAIFunction], optional): A list of tools the model may
|
|
82
|
+
call. Currently, only functions are supported as a tool. Use this
|
|
83
|
+
to provide a list of functions the model may generate JSON inputs
|
|
84
|
+
for. A max of 128 functions are supported.
|
|
85
|
+
tool_choice (Union[dict[str, str], str], optional): Controls which (if
|
|
86
|
+
any) tool is called by the model. :obj:`"none"` means the model
|
|
87
|
+
will not call any tool and instead generates a message.
|
|
88
|
+
:obj:`"auto"` means the model can pick between generating a
|
|
89
|
+
message or calling one or more tools. :obj:`"required"` means the
|
|
90
|
+
model must call one or more tools. Specifying a particular tool
|
|
91
|
+
via {"type": "function", "function": {"name": "my_function"}}
|
|
92
|
+
forces the model to call that tool. :obj:`"none"` is the default
|
|
93
|
+
when no tools are present. :obj:`"auto"` is the default if tools
|
|
94
|
+
are present.
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
temperature: float = 0.2 # openai default: 1.0
|
|
98
|
+
top_p: float = 1.0
|
|
99
|
+
n: int = 1
|
|
100
|
+
stream: bool = False
|
|
101
|
+
stop: str | Sequence[str] | NotGiven = NOT_GIVEN
|
|
102
|
+
max_tokens: int | NotGiven = NOT_GIVEN
|
|
103
|
+
presence_penalty: float = 0.0
|
|
104
|
+
response_format: dict | NotGiven = NOT_GIVEN
|
|
105
|
+
frequency_penalty: float = 0.0
|
|
106
|
+
user: str = ""
|
|
107
|
+
tools: Optional[list[OpenAIFunction]] = None
|
|
108
|
+
tool_choice: Optional[dict[str, str] | str] = "none"
|
|
109
|
+
|
|
110
|
+
def __post_init__(self):
|
|
111
|
+
if self.tools is not None:
|
|
112
|
+
object.__setattr__(
|
|
113
|
+
self,
|
|
114
|
+
'tools',
|
|
115
|
+
[tool.get_openai_tool_schema() for tool in self.tools],
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
GROQ_API_PARAMS = {param for param in asdict(GroqConfig()).keys()}
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from dataclasses import asdict, dataclass
|
|
17
|
+
from typing import TYPE_CHECKING, Dict, Optional, Union
|
|
18
|
+
|
|
19
|
+
from camel.configs.base_config import BaseConfig
|
|
20
|
+
|
|
21
|
+
if TYPE_CHECKING:
|
|
22
|
+
from mistralai.models.chat_completion import (
|
|
23
|
+
ResponseFormat,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
from camel.toolkits import OpenAIFunction
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass(frozen=True)
|
|
30
|
+
class MistralConfig(BaseConfig):
|
|
31
|
+
r"""Defines the parameters for generating chat completions using the
|
|
32
|
+
Mistral API.
|
|
33
|
+
|
|
34
|
+
reference: https://github.com/mistralai/client-python/blob/9d238f88c41689821d7b08570f13b43426f97fd6/src/mistralai/client.py#L195
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
temperature (Optional[float], optional): temperature the temperature
|
|
38
|
+
to use for sampling, e.g. 0.5.
|
|
39
|
+
max_tokens (Optional[int], optional): the maximum number of tokens to
|
|
40
|
+
generate, e.g. 100. Defaults to None.
|
|
41
|
+
top_p (Optional[float], optional): the cumulative probability of
|
|
42
|
+
tokens to generate, e.g. 0.9. Defaults to None.
|
|
43
|
+
random_seed (Optional[int], optional): the random seed to use for
|
|
44
|
+
sampling, e.g. 42. Defaults to None.
|
|
45
|
+
safe_mode (bool, optional): deprecated, use safe_prompt instead.
|
|
46
|
+
Defaults to False.
|
|
47
|
+
safe_prompt (bool, optional): whether to use safe prompt, e.g. true.
|
|
48
|
+
Defaults to False.
|
|
49
|
+
response_format (Union[Dict[str, str], ResponseFormat): format of the
|
|
50
|
+
response.
|
|
51
|
+
tools (Optional[list[OpenAIFunction]], optional): a list of tools to
|
|
52
|
+
use.
|
|
53
|
+
tool_choice (str, optional): Controls which (if
|
|
54
|
+
any) tool is called by the model. :obj:`"none"` means the model
|
|
55
|
+
will not call any tool and instead generates a message.
|
|
56
|
+
:obj:`"auto"` means the model can pick between generating a
|
|
57
|
+
message or calling one or more tools. :obj:`"any"` means the
|
|
58
|
+
model must call one or more tools. :obj:`"auto"` is the default
|
|
59
|
+
value.
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
temperature: Optional[float] = None
|
|
63
|
+
max_tokens: Optional[int] = None
|
|
64
|
+
top_p: Optional[float] = None
|
|
65
|
+
random_seed: Optional[int] = None
|
|
66
|
+
safe_mode: bool = False
|
|
67
|
+
safe_prompt: bool = False
|
|
68
|
+
response_format: Optional[Union[Dict[str, str], ResponseFormat]] = None
|
|
69
|
+
tools: Optional[list[OpenAIFunction]] = None
|
|
70
|
+
tool_choice: Optional[str] = "auto"
|
|
71
|
+
|
|
72
|
+
def __post_init__(self):
|
|
73
|
+
if self.tools is not None:
|
|
74
|
+
object.__setattr__(
|
|
75
|
+
self,
|
|
76
|
+
'tools',
|
|
77
|
+
[tool.get_openai_tool_schema() for tool in self.tools],
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
MISTRAL_API_PARAMS = {param for param in asdict(MistralConfig()).keys()}
|
camel/configs/ollama_config.py
CHANGED
|
@@ -24,7 +24,7 @@ from camel.configs.base_config import BaseConfig
|
|
|
24
24
|
@dataclass(frozen=True)
|
|
25
25
|
class OllamaConfig(BaseConfig):
|
|
26
26
|
r"""Defines the parameters for generating chat completions using OpenAI
|
|
27
|
-
compatibility
|
|
27
|
+
compatibility.
|
|
28
28
|
|
|
29
29
|
Reference: https://github.com/ollama/ollama/blob/main/docs/openai.md
|
|
30
30
|
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from dataclasses import asdict, dataclass, field
|
|
17
|
+
from typing import Sequence
|
|
18
|
+
|
|
19
|
+
from openai._types import NOT_GIVEN, NotGiven
|
|
20
|
+
|
|
21
|
+
from camel.configs.base_config import BaseConfig
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
# flake8: noqa: E501
|
|
25
|
+
@dataclass(frozen=True)
|
|
26
|
+
class VLLMConfig(BaseConfig):
|
|
27
|
+
r"""Defines the parameters for generating chat completions using the
|
|
28
|
+
OpenAI API.
|
|
29
|
+
|
|
30
|
+
Reference: https://docs.vllm.ai/en/latest/serving/openai_compatible_server.html
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
temperature (float, optional): Sampling temperature to use, between
|
|
34
|
+
:obj:`0` and :obj:`2`. Higher values make the output more random,
|
|
35
|
+
while lower values make it more focused and deterministic.
|
|
36
|
+
(default: :obj:`0.2`)
|
|
37
|
+
top_p (float, optional): An alternative to sampling with temperature,
|
|
38
|
+
called nucleus sampling, where the model considers the results of
|
|
39
|
+
the tokens with top_p probability mass. So :obj:`0.1` means only
|
|
40
|
+
the tokens comprising the top 10% probability mass are considered.
|
|
41
|
+
(default: :obj:`1.0`)
|
|
42
|
+
n (int, optional): How many chat completion choices to generate for
|
|
43
|
+
each input message. (default: :obj:`1`)
|
|
44
|
+
response_format (object, optional): An object specifying the format
|
|
45
|
+
that the model must output. Compatible with GPT-4 Turbo and all
|
|
46
|
+
GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
|
|
47
|
+
{"type": "json_object"} enables JSON mode, which guarantees the
|
|
48
|
+
message the model generates is valid JSON. Important: when using
|
|
49
|
+
JSON mode, you must also instruct the model to produce JSON
|
|
50
|
+
yourself via a system or user message. Without this, the model
|
|
51
|
+
may generate an unending stream of whitespace until the generation
|
|
52
|
+
reaches the token limit, resulting in a long-running and seemingly
|
|
53
|
+
"stuck" request. Also note that the message content may be
|
|
54
|
+
partially cut off if finish_reason="length", which indicates the
|
|
55
|
+
generation exceeded max_tokens or the conversation exceeded the
|
|
56
|
+
max context length.
|
|
57
|
+
stream (bool, optional): If True, partial message deltas will be sent
|
|
58
|
+
as data-only server-sent events as they become available.
|
|
59
|
+
(default: :obj:`False`)
|
|
60
|
+
stop (str or list, optional): Up to :obj:`4` sequences where the API
|
|
61
|
+
will stop generating further tokens. (default: :obj:`None`)
|
|
62
|
+
max_tokens (int, optional): The maximum number of tokens to generate
|
|
63
|
+
in the chat completion. The total length of input tokens and
|
|
64
|
+
generated tokens is limited by the model's context length.
|
|
65
|
+
(default: :obj:`None`)
|
|
66
|
+
presence_penalty (float, optional): Number between :obj:`-2.0` and
|
|
67
|
+
:obj:`2.0`. Positive values penalize new tokens based on whether
|
|
68
|
+
they appear in the text so far, increasing the model's likelihood
|
|
69
|
+
to talk about new topics. See more information about frequency and
|
|
70
|
+
presence penalties. (default: :obj:`0.0`)
|
|
71
|
+
frequency_penalty (float, optional): Number between :obj:`-2.0` and
|
|
72
|
+
:obj:`2.0`. Positive values penalize new tokens based on their
|
|
73
|
+
existing frequency in the text so far, decreasing the model's
|
|
74
|
+
likelihood to repeat the same line verbatim. See more information
|
|
75
|
+
about frequency and presence penalties. (default: :obj:`0.0`)
|
|
76
|
+
logit_bias (dict, optional): Modify the likelihood of specified tokens
|
|
77
|
+
appearing in the completion. Accepts a json object that maps tokens
|
|
78
|
+
(specified by their token ID in the tokenizer) to an associated
|
|
79
|
+
bias value from :obj:`-100` to :obj:`100`. Mathematically, the bias
|
|
80
|
+
is added to the logits generated by the model prior to sampling.
|
|
81
|
+
The exact effect will vary per model, but values between:obj:` -1`
|
|
82
|
+
and :obj:`1` should decrease or increase likelihood of selection;
|
|
83
|
+
values like :obj:`-100` or :obj:`100` should result in a ban or
|
|
84
|
+
exclusive selection of the relevant token. (default: :obj:`{}`)
|
|
85
|
+
user (str, optional): A unique identifier representing your end-user,
|
|
86
|
+
which can help OpenAI to monitor and detect abuse.
|
|
87
|
+
(default: :obj:`""`)
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
temperature: float = 0.2 # openai default: 1.0
|
|
91
|
+
top_p: float = 1.0
|
|
92
|
+
n: int = 1
|
|
93
|
+
stream: bool = False
|
|
94
|
+
stop: str | Sequence[str] | NotGiven = NOT_GIVEN
|
|
95
|
+
max_tokens: int | NotGiven = NOT_GIVEN
|
|
96
|
+
presence_penalty: float = 0.0
|
|
97
|
+
response_format: dict | NotGiven = NOT_GIVEN
|
|
98
|
+
frequency_penalty: float = 0.0
|
|
99
|
+
logit_bias: dict = field(default_factory=dict)
|
|
100
|
+
user: str = ""
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
VLLM_API_PARAMS = {param for param in asdict(VLLMConfig()).keys()}
|
camel/embeddings/__init__.py
CHANGED
|
@@ -12,6 +12,7 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
from .base import BaseEmbedding
|
|
15
|
+
from .mistral_embedding import MistralEmbedding
|
|
15
16
|
from .openai_embedding import OpenAIEmbedding
|
|
16
17
|
from .sentence_transformers_embeddings import SentenceTransformerEncoder
|
|
17
18
|
from .vlm_embedding import VisionLanguageEmbedding
|
|
@@ -21,4 +22,5 @@ __all__ = [
|
|
|
21
22
|
"OpenAIEmbedding",
|
|
22
23
|
"SentenceTransformerEncoder",
|
|
23
24
|
"VisionLanguageEmbedding",
|
|
25
|
+
"MistralEmbedding",
|
|
24
26
|
]
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
import os
|
|
17
|
+
from typing import Any
|
|
18
|
+
|
|
19
|
+
from camel.embeddings.base import BaseEmbedding
|
|
20
|
+
from camel.types import EmbeddingModelType
|
|
21
|
+
from camel.utils import api_keys_required
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class MistralEmbedding(BaseEmbedding[str]):
|
|
25
|
+
r"""Provides text embedding functionalities using Mistral's models.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
model_type (EmbeddingModelType, optional): The model type to be
|
|
29
|
+
used for text embeddings.
|
|
30
|
+
(default: :obj:`MISTRAL_EMBED`)
|
|
31
|
+
api_key (str, optional): The API key for authenticating with the
|
|
32
|
+
Mistral service. (default: :obj:`None`)
|
|
33
|
+
dimensions (int, optional): The text embedding output dimensions.
|
|
34
|
+
(default: :obj:`None`)
|
|
35
|
+
|
|
36
|
+
Raises:
|
|
37
|
+
RuntimeError: If an unsupported model type is specified.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __init__(
|
|
41
|
+
self,
|
|
42
|
+
model_type: EmbeddingModelType = (EmbeddingModelType.MISTRAL_EMBED),
|
|
43
|
+
api_key: str | None = None,
|
|
44
|
+
dimensions: int | None = None,
|
|
45
|
+
) -> None:
|
|
46
|
+
from mistralai.client import MistralClient
|
|
47
|
+
|
|
48
|
+
if not model_type.is_mistral:
|
|
49
|
+
raise ValueError("Invalid Mistral embedding model type.")
|
|
50
|
+
self.model_type = model_type
|
|
51
|
+
if dimensions is None:
|
|
52
|
+
self.output_dim = model_type.output_dim
|
|
53
|
+
else:
|
|
54
|
+
assert isinstance(dimensions, int)
|
|
55
|
+
self.output_dim = dimensions
|
|
56
|
+
self._api_key = api_key or os.environ.get("MISTRAL_API_KEY")
|
|
57
|
+
self._client = MistralClient(api_key=self._api_key)
|
|
58
|
+
|
|
59
|
+
@api_keys_required("MISTRAL_API_KEY")
|
|
60
|
+
def embed_list(
|
|
61
|
+
self,
|
|
62
|
+
objs: list[str],
|
|
63
|
+
**kwargs: Any,
|
|
64
|
+
) -> list[list[float]]:
|
|
65
|
+
r"""Generates embeddings for the given texts.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
objs (list[str]): The texts for which to generate the embeddings.
|
|
69
|
+
**kwargs (Any): Extra kwargs passed to the embedding API.
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
list[list[float]]: A list that represents the generated embedding
|
|
73
|
+
as a list of floating-point numbers.
|
|
74
|
+
"""
|
|
75
|
+
# TODO: count tokens
|
|
76
|
+
response = self._client.embeddings(
|
|
77
|
+
input=objs,
|
|
78
|
+
model=self.model_type.value,
|
|
79
|
+
**kwargs,
|
|
80
|
+
)
|
|
81
|
+
return [data.embedding for data in response.data]
|
|
82
|
+
|
|
83
|
+
def get_output_dim(self) -> int:
|
|
84
|
+
r"""Returns the output dimension of the embeddings.
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
int: The dimensionality of the embedding for the current model.
|
|
88
|
+
"""
|
|
89
|
+
return self.output_dim
|
camel/interpreters/__init__.py
CHANGED
|
@@ -16,6 +16,7 @@ from .base import BaseInterpreter
|
|
|
16
16
|
from .docker_interpreter import DockerInterpreter
|
|
17
17
|
from .internal_python_interpreter import InternalPythonInterpreter
|
|
18
18
|
from .interpreter_error import InterpreterError
|
|
19
|
+
from .ipython_interpreter import JupyterKernelInterpreter
|
|
19
20
|
from .subprocess_interpreter import SubprocessInterpreter
|
|
20
21
|
|
|
21
22
|
__all__ = [
|
|
@@ -24,4 +25,5 @@ __all__ = [
|
|
|
24
25
|
'InternalPythonInterpreter',
|
|
25
26
|
'SubprocessInterpreter',
|
|
26
27
|
'DockerInterpreter',
|
|
28
|
+
'JupyterKernelInterpreter',
|
|
27
29
|
]
|