camel-ai 0.2.13__py3-none-any.whl → 0.2.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +362 -237
- camel/benchmarks/__init__.py +11 -1
- camel/benchmarks/apibank.py +560 -0
- camel/benchmarks/apibench.py +496 -0
- camel/benchmarks/gaia.py +2 -2
- camel/benchmarks/nexus.py +518 -0
- camel/datagen/__init__.py +21 -0
- camel/datagen/cotdatagen.py +448 -0
- camel/datagen/self_instruct/__init__.py +36 -0
- camel/datagen/self_instruct/filter/__init__.py +34 -0
- camel/datagen/self_instruct/filter/filter_function.py +208 -0
- camel/datagen/self_instruct/filter/filter_registry.py +56 -0
- camel/datagen/self_instruct/filter/instruction_filter.py +76 -0
- camel/datagen/self_instruct/self_instruct.py +393 -0
- camel/datagen/self_instruct/templates.py +384 -0
- camel/datahubs/huggingface.py +12 -2
- camel/datahubs/models.py +4 -2
- camel/embeddings/mistral_embedding.py +5 -1
- camel/embeddings/openai_compatible_embedding.py +6 -1
- camel/embeddings/openai_embedding.py +5 -1
- camel/interpreters/e2b_interpreter.py +5 -1
- camel/loaders/apify_reader.py +5 -1
- camel/loaders/chunkr_reader.py +5 -1
- camel/loaders/firecrawl_reader.py +0 -30
- camel/logger.py +11 -5
- camel/messages/conversion/sharegpt/hermes/hermes_function_formatter.py +4 -1
- camel/models/anthropic_model.py +5 -1
- camel/models/azure_openai_model.py +1 -2
- camel/models/cohere_model.py +5 -1
- camel/models/deepseek_model.py +5 -1
- camel/models/gemini_model.py +5 -1
- camel/models/groq_model.py +5 -1
- camel/models/mistral_model.py +5 -1
- camel/models/nemotron_model.py +5 -1
- camel/models/nvidia_model.py +5 -1
- camel/models/openai_model.py +28 -12
- camel/models/qwen_model.py +5 -1
- camel/models/reka_model.py +5 -1
- camel/models/reward/nemotron_model.py +5 -1
- camel/models/samba_model.py +5 -1
- camel/models/togetherai_model.py +5 -1
- camel/models/yi_model.py +5 -1
- camel/models/zhipuai_model.py +5 -1
- camel/retrievers/auto_retriever.py +8 -0
- camel/retrievers/vector_retriever.py +6 -3
- camel/schemas/__init__.py +2 -1
- camel/schemas/base.py +2 -4
- camel/schemas/openai_converter.py +5 -1
- camel/schemas/outlines_converter.py +249 -0
- camel/societies/role_playing.py +4 -4
- camel/societies/workforce/workforce.py +2 -2
- camel/storages/graph_storages/nebula_graph.py +119 -27
- camel/storages/graph_storages/neo4j_graph.py +138 -0
- camel/toolkits/__init__.py +2 -0
- camel/toolkits/arxiv_toolkit.py +20 -3
- camel/toolkits/function_tool.py +61 -61
- camel/toolkits/meshy_toolkit.py +5 -1
- camel/toolkits/notion_toolkit.py +1 -1
- camel/toolkits/openbb_toolkit.py +869 -0
- camel/toolkits/search_toolkit.py +91 -5
- camel/toolkits/stripe_toolkit.py +5 -1
- camel/toolkits/twitter_toolkit.py +24 -16
- camel/types/enums.py +10 -1
- camel/types/unified_model_type.py +5 -0
- camel/utils/__init__.py +4 -0
- camel/utils/commons.py +146 -42
- camel/utils/token_counting.py +1 -0
- {camel_ai-0.2.13.dist-info → camel_ai-0.2.15.dist-info}/METADATA +18 -7
- {camel_ai-0.2.13.dist-info → camel_ai-0.2.15.dist-info}/RECORD +72 -58
- {camel_ai-0.2.13.dist-info → camel_ai-0.2.15.dist-info}/LICENSE +0 -0
- {camel_ai-0.2.13.dist-info → camel_ai-0.2.15.dist-info}/WHEEL +0 -0
camel/models/cohere_model.py
CHANGED
|
@@ -43,6 +43,11 @@ except (ImportError, AttributeError):
|
|
|
43
43
|
class CohereModel(BaseModelBackend):
|
|
44
44
|
r"""Cohere API in a unified BaseModelBackend interface."""
|
|
45
45
|
|
|
46
|
+
@api_keys_required(
|
|
47
|
+
[
|
|
48
|
+
("api_key", 'COHERE_API_KEY'),
|
|
49
|
+
]
|
|
50
|
+
)
|
|
46
51
|
def __init__(
|
|
47
52
|
self,
|
|
48
53
|
model_type: Union[ModelType, str],
|
|
@@ -210,7 +215,6 @@ class CohereModel(BaseModelBackend):
|
|
|
210
215
|
)
|
|
211
216
|
return self._token_counter
|
|
212
217
|
|
|
213
|
-
@api_keys_required("COHERE_API_KEY")
|
|
214
218
|
def run(self, messages: List[OpenAIMessage]) -> ChatCompletion:
|
|
215
219
|
r"""Runs inference of Cohere chat completion.
|
|
216
220
|
|
camel/models/deepseek_model.py
CHANGED
|
@@ -50,6 +50,11 @@ class DeepSeekModel(BaseModelBackend):
|
|
|
50
50
|
https://api-docs.deepseek.com/
|
|
51
51
|
"""
|
|
52
52
|
|
|
53
|
+
@api_keys_required(
|
|
54
|
+
[
|
|
55
|
+
("api_key", "DEEPSEEK_API_KEY"),
|
|
56
|
+
]
|
|
57
|
+
)
|
|
53
58
|
def __init__(
|
|
54
59
|
self,
|
|
55
60
|
model_type: Union[ModelType, str],
|
|
@@ -90,7 +95,6 @@ class DeepSeekModel(BaseModelBackend):
|
|
|
90
95
|
)
|
|
91
96
|
return self._token_counter
|
|
92
97
|
|
|
93
|
-
@api_keys_required("DEEPSEEK_API_KEY")
|
|
94
98
|
def run(
|
|
95
99
|
self,
|
|
96
100
|
messages: List[OpenAIMessage],
|
camel/models/gemini_model.py
CHANGED
|
@@ -52,6 +52,11 @@ class GeminiModel(BaseModelBackend):
|
|
|
52
52
|
(default: :obj:`None`)
|
|
53
53
|
"""
|
|
54
54
|
|
|
55
|
+
@api_keys_required(
|
|
56
|
+
[
|
|
57
|
+
("api_key", 'GEMINI_API_KEY'),
|
|
58
|
+
]
|
|
59
|
+
)
|
|
55
60
|
def __init__(
|
|
56
61
|
self,
|
|
57
62
|
model_type: Union[ModelType, str],
|
|
@@ -77,7 +82,6 @@ class GeminiModel(BaseModelBackend):
|
|
|
77
82
|
base_url=self._url,
|
|
78
83
|
)
|
|
79
84
|
|
|
80
|
-
@api_keys_required("GEMINI_API_KEY")
|
|
81
85
|
def run(
|
|
82
86
|
self,
|
|
83
87
|
messages: List[OpenAIMessage],
|
camel/models/groq_model.py
CHANGED
|
@@ -51,6 +51,11 @@ class GroqModel(BaseModelBackend):
|
|
|
51
51
|
(default: :obj:`None`)
|
|
52
52
|
"""
|
|
53
53
|
|
|
54
|
+
@api_keys_required(
|
|
55
|
+
[
|
|
56
|
+
("api_key", "GROQ_API_KEY"),
|
|
57
|
+
]
|
|
58
|
+
)
|
|
54
59
|
def __init__(
|
|
55
60
|
self,
|
|
56
61
|
model_type: Union[ModelType, str],
|
|
@@ -89,7 +94,6 @@ class GroqModel(BaseModelBackend):
|
|
|
89
94
|
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
90
95
|
return self._token_counter
|
|
91
96
|
|
|
92
|
-
@api_keys_required("GROQ_API_KEY")
|
|
93
97
|
def run(
|
|
94
98
|
self,
|
|
95
99
|
messages: List[OpenAIMessage],
|
camel/models/mistral_model.py
CHANGED
|
@@ -59,6 +59,11 @@ class MistralModel(BaseModelBackend):
|
|
|
59
59
|
be used. (default: :obj:`None`)
|
|
60
60
|
"""
|
|
61
61
|
|
|
62
|
+
@api_keys_required(
|
|
63
|
+
[
|
|
64
|
+
("api_key", "MISTRAL_API_KEY"),
|
|
65
|
+
]
|
|
66
|
+
)
|
|
62
67
|
@dependencies_required('mistralai')
|
|
63
68
|
def __init__(
|
|
64
69
|
self,
|
|
@@ -200,7 +205,6 @@ class MistralModel(BaseModelBackend):
|
|
|
200
205
|
)
|
|
201
206
|
return self._token_counter
|
|
202
207
|
|
|
203
|
-
@api_keys_required("MISTRAL_API_KEY")
|
|
204
208
|
def run(
|
|
205
209
|
self,
|
|
206
210
|
messages: List[OpenAIMessage],
|
camel/models/nemotron_model.py
CHANGED
|
@@ -40,6 +40,11 @@ class NemotronModel(BaseModelBackend):
|
|
|
40
40
|
Nemotron model doesn't support additional model config like OpenAI.
|
|
41
41
|
"""
|
|
42
42
|
|
|
43
|
+
@api_keys_required(
|
|
44
|
+
[
|
|
45
|
+
("api_key", "NVIDIA_API_KEY"),
|
|
46
|
+
]
|
|
47
|
+
)
|
|
43
48
|
def __init__(
|
|
44
49
|
self,
|
|
45
50
|
model_type: Union[ModelType, str],
|
|
@@ -58,7 +63,6 @@ class NemotronModel(BaseModelBackend):
|
|
|
58
63
|
api_key=self._api_key,
|
|
59
64
|
)
|
|
60
65
|
|
|
61
|
-
@api_keys_required("NVIDIA_API_KEY")
|
|
62
66
|
def run(
|
|
63
67
|
self,
|
|
64
68
|
messages: List[OpenAIMessage],
|
camel/models/nvidia_model.py
CHANGED
|
@@ -48,6 +48,11 @@ class NvidiaModel(BaseModelBackend):
|
|
|
48
48
|
(default: :obj:`None`)
|
|
49
49
|
"""
|
|
50
50
|
|
|
51
|
+
@api_keys_required(
|
|
52
|
+
[
|
|
53
|
+
("api_key", "NVIDIA_API_KEY"),
|
|
54
|
+
]
|
|
55
|
+
)
|
|
51
56
|
def __init__(
|
|
52
57
|
self,
|
|
53
58
|
model_type: Union[ModelType, str],
|
|
@@ -72,7 +77,6 @@ class NvidiaModel(BaseModelBackend):
|
|
|
72
77
|
base_url=self._url,
|
|
73
78
|
)
|
|
74
79
|
|
|
75
|
-
@api_keys_required("NVIDIA_API_KEY")
|
|
76
80
|
def run(
|
|
77
81
|
self,
|
|
78
82
|
messages: List[OpenAIMessage],
|
camel/models/openai_model.py
CHANGED
|
@@ -21,6 +21,7 @@ from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig
|
|
|
21
21
|
from camel.messages import OpenAIMessage
|
|
22
22
|
from camel.models import BaseModelBackend
|
|
23
23
|
from camel.types import (
|
|
24
|
+
NOT_GIVEN,
|
|
24
25
|
ChatCompletion,
|
|
25
26
|
ChatCompletionChunk,
|
|
26
27
|
ModelType,
|
|
@@ -51,6 +52,11 @@ class OpenAIModel(BaseModelBackend):
|
|
|
51
52
|
be used. (default: :obj:`None`)
|
|
52
53
|
"""
|
|
53
54
|
|
|
55
|
+
@api_keys_required(
|
|
56
|
+
[
|
|
57
|
+
("api_key", "OPENAI_API_KEY"),
|
|
58
|
+
]
|
|
59
|
+
)
|
|
54
60
|
def __init__(
|
|
55
61
|
self,
|
|
56
62
|
model_type: Union[ModelType, str],
|
|
@@ -85,7 +91,6 @@ class OpenAIModel(BaseModelBackend):
|
|
|
85
91
|
self._token_counter = OpenAITokenCounter(self.model_type)
|
|
86
92
|
return self._token_counter
|
|
87
93
|
|
|
88
|
-
@api_keys_required("OPENAI_API_KEY")
|
|
89
94
|
def run(
|
|
90
95
|
self,
|
|
91
96
|
messages: List[OpenAIMessage],
|
|
@@ -103,7 +108,11 @@ class OpenAIModel(BaseModelBackend):
|
|
|
103
108
|
"""
|
|
104
109
|
# o1-preview and o1-mini have Beta limitations
|
|
105
110
|
# reference: https://platform.openai.com/docs/guides/reasoning
|
|
106
|
-
if self.model_type in [
|
|
111
|
+
if self.model_type in [
|
|
112
|
+
ModelType.O1,
|
|
113
|
+
ModelType.O1_MINI,
|
|
114
|
+
ModelType.O1_PREVIEW,
|
|
115
|
+
]:
|
|
107
116
|
warnings.warn(
|
|
108
117
|
"Warning: You are using an O1 model (O1_MINI or O1_PREVIEW), "
|
|
109
118
|
"which has certain limitations, reference: "
|
|
@@ -111,22 +120,21 @@ class OpenAIModel(BaseModelBackend):
|
|
|
111
120
|
UserWarning,
|
|
112
121
|
)
|
|
113
122
|
|
|
114
|
-
# Remove system message that is not supported in o1 model.
|
|
115
|
-
messages = [msg for msg in messages if msg.get("role") != "system"]
|
|
116
|
-
|
|
117
123
|
# Check and remove unsupported parameters and reset the fixed
|
|
118
124
|
# parameters
|
|
119
|
-
unsupported_keys = [
|
|
125
|
+
unsupported_keys = [
|
|
126
|
+
"temperature",
|
|
127
|
+
"top_p",
|
|
128
|
+
"presence_penalty",
|
|
129
|
+
"frequency_penalty",
|
|
130
|
+
"logprobs",
|
|
131
|
+
"top_logprobs",
|
|
132
|
+
"logit_bias",
|
|
133
|
+
]
|
|
120
134
|
for key in unsupported_keys:
|
|
121
135
|
if key in self.model_config_dict:
|
|
122
136
|
del self.model_config_dict[key]
|
|
123
137
|
|
|
124
|
-
self.model_config_dict["temperature"] = 1.0
|
|
125
|
-
self.model_config_dict["top_p"] = 1.0
|
|
126
|
-
self.model_config_dict["n"] = 1
|
|
127
|
-
self.model_config_dict["presence_penalty"] = 0.0
|
|
128
|
-
self.model_config_dict["frequency_penalty"] = 0.0
|
|
129
|
-
|
|
130
138
|
if self.model_config_dict.get("response_format"):
|
|
131
139
|
# stream is not supported in beta.chat.completions.parse
|
|
132
140
|
if "stream" in self.model_config_dict:
|
|
@@ -140,6 +148,14 @@ class OpenAIModel(BaseModelBackend):
|
|
|
140
148
|
|
|
141
149
|
return self._to_chat_completion(response)
|
|
142
150
|
|
|
151
|
+
# Removing 'strict': True from the dictionary for
|
|
152
|
+
# client.chat.completions.create
|
|
153
|
+
if self.model_config_dict.get('tools') is not NOT_GIVEN:
|
|
154
|
+
for tool in self.model_config_dict.get('tools', []):
|
|
155
|
+
function_dict = tool.get('function', {})
|
|
156
|
+
if 'strict' in function_dict:
|
|
157
|
+
del function_dict['strict']
|
|
158
|
+
|
|
143
159
|
response = self._client.chat.completions.create(
|
|
144
160
|
messages=messages,
|
|
145
161
|
model=self.model_type,
|
camel/models/qwen_model.py
CHANGED
|
@@ -52,6 +52,11 @@ class QwenModel(BaseModelBackend):
|
|
|
52
52
|
(default: :obj:`None`)
|
|
53
53
|
"""
|
|
54
54
|
|
|
55
|
+
@api_keys_required(
|
|
56
|
+
[
|
|
57
|
+
("api_key", "QWEN_API_KEY"),
|
|
58
|
+
]
|
|
59
|
+
)
|
|
55
60
|
def __init__(
|
|
56
61
|
self,
|
|
57
62
|
model_type: Union[ModelType, str],
|
|
@@ -77,7 +82,6 @@ class QwenModel(BaseModelBackend):
|
|
|
77
82
|
base_url=self._url,
|
|
78
83
|
)
|
|
79
84
|
|
|
80
|
-
@api_keys_required("QWEN_API_KEY")
|
|
81
85
|
def run(
|
|
82
86
|
self,
|
|
83
87
|
messages: List[OpenAIMessage],
|
camel/models/reka_model.py
CHANGED
|
@@ -56,6 +56,11 @@ class RekaModel(BaseModelBackend):
|
|
|
56
56
|
be used. (default: :obj:`None`)
|
|
57
57
|
"""
|
|
58
58
|
|
|
59
|
+
@api_keys_required(
|
|
60
|
+
[
|
|
61
|
+
("api_key", "REKA_API_KEY"),
|
|
62
|
+
]
|
|
63
|
+
)
|
|
59
64
|
@dependencies_required('reka')
|
|
60
65
|
def __init__(
|
|
61
66
|
self,
|
|
@@ -168,7 +173,6 @@ class RekaModel(BaseModelBackend):
|
|
|
168
173
|
)
|
|
169
174
|
return self._token_counter
|
|
170
175
|
|
|
171
|
-
@api_keys_required("REKA_API_KEY")
|
|
172
176
|
def run(
|
|
173
177
|
self,
|
|
174
178
|
messages: List[OpenAIMessage],
|
|
@@ -53,7 +53,11 @@ class NemotronRewardModel(BaseRewardModel):
|
|
|
53
53
|
api_key=self.api_key,
|
|
54
54
|
)
|
|
55
55
|
|
|
56
|
-
@api_keys_required(
|
|
56
|
+
@api_keys_required(
|
|
57
|
+
[
|
|
58
|
+
(None, "NVIDIA_API_KEY"),
|
|
59
|
+
]
|
|
60
|
+
)
|
|
57
61
|
def evaluate(self, messages: List[Dict[str, str]]) -> Dict[str, float]:
|
|
58
62
|
r"""Evaluate the messages using the Nemotron model.
|
|
59
63
|
|
camel/models/samba_model.py
CHANGED
|
@@ -74,6 +74,11 @@ class SambaModel(BaseModelBackend):
|
|
|
74
74
|
ModelType.GPT_4O_MINI)` will be used.
|
|
75
75
|
"""
|
|
76
76
|
|
|
77
|
+
@api_keys_required(
|
|
78
|
+
[
|
|
79
|
+
("api_key", 'SAMBA_API_KEY'),
|
|
80
|
+
]
|
|
81
|
+
)
|
|
77
82
|
def __init__(
|
|
78
83
|
self,
|
|
79
84
|
model_type: Union[ModelType, str],
|
|
@@ -143,7 +148,6 @@ class SambaModel(BaseModelBackend):
|
|
|
143
148
|
" SambaNova service"
|
|
144
149
|
)
|
|
145
150
|
|
|
146
|
-
@api_keys_required("SAMBA_API_KEY")
|
|
147
151
|
def run( # type: ignore[misc]
|
|
148
152
|
self, messages: List[OpenAIMessage]
|
|
149
153
|
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
camel/models/togetherai_model.py
CHANGED
|
@@ -53,6 +53,11 @@ class TogetherAIModel(BaseModelBackend):
|
|
|
53
53
|
ModelType.GPT_4O_MINI)` will be used.
|
|
54
54
|
"""
|
|
55
55
|
|
|
56
|
+
@api_keys_required(
|
|
57
|
+
[
|
|
58
|
+
("api_key", 'TOGETHER_API_KEY'),
|
|
59
|
+
]
|
|
60
|
+
)
|
|
56
61
|
def __init__(
|
|
57
62
|
self,
|
|
58
63
|
model_type: Union[ModelType, str],
|
|
@@ -78,7 +83,6 @@ class TogetherAIModel(BaseModelBackend):
|
|
|
78
83
|
base_url=self._url,
|
|
79
84
|
)
|
|
80
85
|
|
|
81
|
-
@api_keys_required("TOGETHER_API_KEY")
|
|
82
86
|
def run(
|
|
83
87
|
self,
|
|
84
88
|
messages: List[OpenAIMessage],
|
camel/models/yi_model.py
CHANGED
|
@@ -52,6 +52,11 @@ class YiModel(BaseModelBackend):
|
|
|
52
52
|
(default: :obj:`None`)
|
|
53
53
|
"""
|
|
54
54
|
|
|
55
|
+
@api_keys_required(
|
|
56
|
+
[
|
|
57
|
+
("api_key", 'YI_API_KEY'),
|
|
58
|
+
]
|
|
59
|
+
)
|
|
55
60
|
def __init__(
|
|
56
61
|
self,
|
|
57
62
|
model_type: Union[ModelType, str],
|
|
@@ -76,7 +81,6 @@ class YiModel(BaseModelBackend):
|
|
|
76
81
|
base_url=self._url,
|
|
77
82
|
)
|
|
78
83
|
|
|
79
|
-
@api_keys_required("YI_API_KEY")
|
|
80
84
|
def run(
|
|
81
85
|
self,
|
|
82
86
|
messages: List[OpenAIMessage],
|
camel/models/zhipuai_model.py
CHANGED
|
@@ -52,6 +52,11 @@ class ZhipuAIModel(BaseModelBackend):
|
|
|
52
52
|
(default: :obj:`None`)
|
|
53
53
|
"""
|
|
54
54
|
|
|
55
|
+
@api_keys_required(
|
|
56
|
+
[
|
|
57
|
+
("api_key", 'ZHIPUAI_API_KEY'),
|
|
58
|
+
]
|
|
59
|
+
)
|
|
55
60
|
def __init__(
|
|
56
61
|
self,
|
|
57
62
|
model_type: Union[ModelType, str],
|
|
@@ -76,7 +81,6 @@ class ZhipuAIModel(BaseModelBackend):
|
|
|
76
81
|
base_url=self._url,
|
|
77
82
|
)
|
|
78
83
|
|
|
79
|
-
@api_keys_required("ZHIPUAI_API_KEY")
|
|
80
84
|
def run(
|
|
81
85
|
self,
|
|
82
86
|
messages: List[OpenAIMessage],
|
|
@@ -121,6 +121,14 @@ class AutoRetriever:
|
|
|
121
121
|
|
|
122
122
|
collection_name = re.sub(r'[^a-zA-Z0-9]', '', content)[:20]
|
|
123
123
|
|
|
124
|
+
# Ensure the first character is either an underscore or a letter for
|
|
125
|
+
# Milvus
|
|
126
|
+
if (
|
|
127
|
+
self.storage_type == StorageType.MILVUS
|
|
128
|
+
and not collection_name[0].isalpha()
|
|
129
|
+
):
|
|
130
|
+
collection_name = f"_{collection_name}"
|
|
131
|
+
|
|
124
132
|
return collection_name
|
|
125
133
|
|
|
126
134
|
def run_vector_retriever(
|
|
@@ -161,13 +161,16 @@ class VectorRetriever(BaseRetriever):
|
|
|
161
161
|
# content path, chunk metadata, and chunk text
|
|
162
162
|
for vector, chunk in zip(batch_vectors, batch_chunks):
|
|
163
163
|
if isinstance(content, str):
|
|
164
|
-
content_path_info = {"content path": content}
|
|
164
|
+
content_path_info = {"content path": content[:100]}
|
|
165
165
|
elif isinstance(content, IOBase):
|
|
166
166
|
content_path_info = {"content path": "From file bytes"}
|
|
167
167
|
elif isinstance(content, Element):
|
|
168
168
|
content_path_info = {
|
|
169
|
-
"content path": content.metadata.file_directory
|
|
170
|
-
|
|
169
|
+
"content path": content.metadata.file_directory[
|
|
170
|
+
:100
|
|
171
|
+
]
|
|
172
|
+
if content.metadata.file_directory
|
|
173
|
+
else ""
|
|
171
174
|
}
|
|
172
175
|
|
|
173
176
|
chunk_metadata = {"metadata": chunk.metadata.to_dict()}
|
camel/schemas/__init__.py
CHANGED
|
@@ -13,5 +13,6 @@
|
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
15
|
from .openai_converter import OpenAISchemaConverter
|
|
16
|
+
from .outlines_converter import OutlinesConverter
|
|
16
17
|
|
|
17
|
-
__all__ = ["OpenAISchemaConverter"]
|
|
18
|
+
__all__ = ["OpenAISchemaConverter", "OutlinesConverter"]
|
camel/schemas/base.py
CHANGED
|
@@ -15,8 +15,6 @@
|
|
|
15
15
|
from abc import ABC, abstractmethod
|
|
16
16
|
from typing import Any, Dict
|
|
17
17
|
|
|
18
|
-
from pydantic import BaseModel
|
|
19
|
-
|
|
20
18
|
|
|
21
19
|
class BaseConverter(ABC):
|
|
22
20
|
r"""A base class for schema outputs that includes functionality
|
|
@@ -30,7 +28,7 @@ class BaseConverter(ABC):
|
|
|
30
28
|
@abstractmethod
|
|
31
29
|
def convert(
|
|
32
30
|
self, content: str, *args: Any, **kwargs: Dict[str, Any]
|
|
33
|
-
) ->
|
|
31
|
+
) -> Any:
|
|
34
32
|
r"""Structures the input text into the expected response format.
|
|
35
33
|
|
|
36
34
|
Args:
|
|
@@ -40,6 +38,6 @@ class BaseConverter(ABC):
|
|
|
40
38
|
prompt (Optional[str], optional): The prompt to be used.
|
|
41
39
|
|
|
42
40
|
Returns:
|
|
43
|
-
|
|
41
|
+
Any: The converted response.
|
|
44
42
|
"""
|
|
45
43
|
pass
|
|
@@ -53,6 +53,11 @@ class OpenAISchemaConverter(BaseConverter):
|
|
|
53
53
|
|
|
54
54
|
"""
|
|
55
55
|
|
|
56
|
+
@api_keys_required(
|
|
57
|
+
[
|
|
58
|
+
("api_key", "OPENAI_API_KEY"),
|
|
59
|
+
]
|
|
60
|
+
)
|
|
56
61
|
def __init__(
|
|
57
62
|
self,
|
|
58
63
|
model_type: ModelType = ModelType.GPT_4O_MINI,
|
|
@@ -69,7 +74,6 @@ class OpenAISchemaConverter(BaseConverter):
|
|
|
69
74
|
)._client
|
|
70
75
|
super().__init__()
|
|
71
76
|
|
|
72
|
-
@api_keys_required("OPENAI_API_KEY")
|
|
73
77
|
def convert( # type: ignore[override]
|
|
74
78
|
self,
|
|
75
79
|
content: str,
|