camel-ai 0.2.36__py3-none-any.whl → 0.2.38__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/__init__.py +2 -0
- camel/agents/repo_agent.py +579 -0
- camel/configs/aiml_config.py +20 -19
- camel/configs/anthropic_config.py +25 -27
- camel/configs/cohere_config.py +11 -10
- camel/configs/deepseek_config.py +16 -16
- camel/configs/gemini_config.py +8 -8
- camel/configs/groq_config.py +18 -19
- camel/configs/internlm_config.py +8 -8
- camel/configs/litellm_config.py +26 -24
- camel/configs/mistral_config.py +8 -8
- camel/configs/moonshot_config.py +11 -11
- camel/configs/nvidia_config.py +13 -13
- camel/configs/ollama_config.py +14 -15
- camel/configs/openai_config.py +3 -3
- camel/configs/openrouter_config.py +9 -9
- camel/configs/qwen_config.py +8 -8
- camel/configs/reka_config.py +12 -11
- camel/configs/samba_config.py +14 -14
- camel/configs/sglang_config.py +15 -16
- camel/configs/siliconflow_config.py +18 -17
- camel/configs/togetherai_config.py +18 -19
- camel/configs/vllm_config.py +18 -19
- camel/configs/yi_config.py +7 -8
- camel/configs/zhipuai_config.py +8 -9
- camel/datagen/evol_instruct/__init__.py +20 -0
- camel/datagen/evol_instruct/evol_instruct.py +424 -0
- camel/datagen/evol_instruct/scorer.py +166 -0
- camel/datagen/evol_instruct/templates.py +268 -0
- camel/datasets/static_dataset.py +25 -23
- camel/environments/models.py +10 -1
- camel/environments/single_step.py +296 -136
- camel/extractors/__init__.py +16 -1
- camel/interpreters/docker_interpreter.py +1 -1
- camel/interpreters/e2b_interpreter.py +1 -1
- camel/interpreters/subprocess_interpreter.py +1 -1
- camel/loaders/__init__.py +2 -2
- camel/loaders/{panda_reader.py → pandas_reader.py} +61 -30
- camel/memories/context_creators/score_based.py +198 -67
- camel/models/aiml_model.py +9 -3
- camel/models/anthropic_model.py +11 -3
- camel/models/azure_openai_model.py +9 -3
- camel/models/base_audio_model.py +6 -0
- camel/models/base_model.py +4 -0
- camel/models/deepseek_model.py +9 -3
- camel/models/gemini_model.py +9 -3
- camel/models/groq_model.py +9 -3
- camel/models/internlm_model.py +8 -2
- camel/models/model_factory.py +4 -0
- camel/models/moonshot_model.py +8 -2
- camel/models/nemotron_model.py +9 -3
- camel/models/nvidia_model.py +9 -3
- camel/models/ollama_model.py +9 -3
- camel/models/openai_audio_models.py +5 -3
- camel/models/openai_compatible_model.py +9 -3
- camel/models/openai_model.py +9 -3
- camel/models/openrouter_model.py +9 -3
- camel/models/qwen_model.py +9 -3
- camel/models/samba_model.py +9 -3
- camel/models/sglang_model.py +11 -4
- camel/models/siliconflow_model.py +8 -2
- camel/models/stub_model.py +2 -1
- camel/models/togetherai_model.py +9 -3
- camel/models/vllm_model.py +9 -3
- camel/models/yi_model.py +9 -3
- camel/models/zhipuai_model.py +9 -3
- camel/retrievers/auto_retriever.py +14 -0
- camel/storages/__init__.py +2 -0
- camel/storages/vectordb_storages/__init__.py +2 -0
- camel/storages/vectordb_storages/tidb.py +332 -0
- camel/toolkits/__init__.py +7 -0
- camel/toolkits/browser_toolkit.py +84 -61
- camel/toolkits/openai_agent_toolkit.py +131 -0
- camel/toolkits/searxng_toolkit.py +207 -0
- camel/toolkits/thinking_toolkit.py +230 -0
- camel/types/enums.py +4 -0
- camel/utils/chunker/code_chunker.py +9 -15
- camel/verifiers/base.py +28 -5
- camel/verifiers/python_verifier.py +321 -68
- {camel_ai-0.2.36.dist-info → camel_ai-0.2.38.dist-info}/METADATA +103 -8
- {camel_ai-0.2.36.dist-info → camel_ai-0.2.38.dist-info}/RECORD +84 -75
- {camel_ai-0.2.36.dist-info → camel_ai-0.2.38.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.36.dist-info → camel_ai-0.2.38.dist-info}/licenses/LICENSE +0 -0
|
@@ -11,13 +11,10 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
-
import os
|
|
15
14
|
from functools import wraps
|
|
16
15
|
from pathlib import Path
|
|
17
16
|
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union
|
|
18
17
|
|
|
19
|
-
import pandas as pd
|
|
20
|
-
|
|
21
18
|
if TYPE_CHECKING:
|
|
22
19
|
from pandas import DataFrame
|
|
23
20
|
from pandasai import SmartDataframe
|
|
@@ -50,25 +47,18 @@ def check_suffix(valid_suffixs: List[str]) -> Callable:
|
|
|
50
47
|
return decorator
|
|
51
48
|
|
|
52
49
|
|
|
53
|
-
class
|
|
50
|
+
class PandasReader:
|
|
54
51
|
def __init__(self, config: Optional[Dict[str, Any]] = None) -> None:
|
|
55
|
-
r"""Initializes the
|
|
52
|
+
r"""Initializes the PandasReader class.
|
|
56
53
|
|
|
57
54
|
Args:
|
|
58
55
|
config (Optional[Dict[str, Any]], optional): The configuration
|
|
59
56
|
dictionary that can include LLM API settings for LLM-based
|
|
60
|
-
processing. If not provided,
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
the config dictionary. (default: :obj:`None`)
|
|
57
|
+
processing. If not provided, no LLM will be configured by
|
|
58
|
+
default. You can customize the LLM configuration by providing
|
|
59
|
+
a 'llm' key in the config dictionary. (default: :obj:`None`)
|
|
64
60
|
"""
|
|
65
|
-
from pandasai.llm import OpenAI # type: ignore[import-untyped]
|
|
66
|
-
|
|
67
61
|
self.config = config or {}
|
|
68
|
-
if "llm" not in self.config:
|
|
69
|
-
self.config["llm"] = OpenAI(
|
|
70
|
-
api_token=os.getenv("OPENAI_API_KEY"),
|
|
71
|
-
)
|
|
72
62
|
|
|
73
63
|
self.__LOADER = {
|
|
74
64
|
".csv": self.read_csv,
|
|
@@ -91,8 +81,13 @@ class PandaReader:
|
|
|
91
81
|
data: Union["DataFrame", str],
|
|
92
82
|
*args: Any,
|
|
93
83
|
**kwargs: Dict[str, Any],
|
|
94
|
-
) -> "SmartDataframe":
|
|
95
|
-
r"""Loads a file or DataFrame and returns a
|
|
84
|
+
) -> Union["DataFrame", "SmartDataframe"]:
|
|
85
|
+
r"""Loads a file or DataFrame and returns a DataFrame or
|
|
86
|
+
SmartDataframe object.
|
|
87
|
+
|
|
88
|
+
If an LLM is configured in the config dictionary, a SmartDataframe
|
|
89
|
+
will be returned, otherwise a regular pandas DataFrame will be
|
|
90
|
+
returned.
|
|
96
91
|
|
|
97
92
|
args:
|
|
98
93
|
data (Union[DataFrame, str]): The data to load.
|
|
@@ -100,24 +95,32 @@ class PandaReader:
|
|
|
100
95
|
**kwargs (Dict[str, Any]): Additional keyword arguments.
|
|
101
96
|
|
|
102
97
|
Returns:
|
|
103
|
-
SmartDataframe: The SmartDataframe
|
|
98
|
+
Union[DataFrame, SmartDataframe]: The DataFrame or SmartDataframe
|
|
99
|
+
object.
|
|
104
100
|
"""
|
|
105
101
|
from pandas import DataFrame
|
|
106
|
-
from pandasai import SmartDataframe
|
|
107
102
|
|
|
103
|
+
# Load the data into a pandas DataFrame
|
|
108
104
|
if isinstance(data, DataFrame):
|
|
109
|
-
|
|
110
|
-
file_path = str(data)
|
|
111
|
-
path = Path(file_path)
|
|
112
|
-
if not file_path.startswith("http") and not path.exists():
|
|
113
|
-
raise FileNotFoundError(f"File {file_path} not found")
|
|
114
|
-
if path.suffix in self.__LOADER:
|
|
115
|
-
return SmartDataframe(
|
|
116
|
-
self.__LOADER[path.suffix](file_path, *args, **kwargs), # type: ignore[operator]
|
|
117
|
-
config=self.config,
|
|
118
|
-
)
|
|
105
|
+
df = data
|
|
119
106
|
else:
|
|
120
|
-
|
|
107
|
+
file_path = str(data)
|
|
108
|
+
path = Path(file_path)
|
|
109
|
+
if not file_path.startswith("http") and not path.exists():
|
|
110
|
+
raise FileNotFoundError(f"File {file_path} not found")
|
|
111
|
+
if path.suffix in self.__LOADER:
|
|
112
|
+
df = self.__LOADER[path.suffix](file_path, *args, **kwargs) # type: ignore[operator]
|
|
113
|
+
else:
|
|
114
|
+
raise ValueError(f"Unsupported file format: {path.suffix}")
|
|
115
|
+
|
|
116
|
+
# If an LLM is configured, return a SmartDataframe, otherwise return a
|
|
117
|
+
# regular DataFrame
|
|
118
|
+
if "llm" in self.config:
|
|
119
|
+
from pandasai import SmartDataframe
|
|
120
|
+
|
|
121
|
+
return SmartDataframe(df, config=self.config)
|
|
122
|
+
else:
|
|
123
|
+
return df
|
|
121
124
|
|
|
122
125
|
@check_suffix([".csv"])
|
|
123
126
|
def read_csv(
|
|
@@ -133,6 +136,8 @@ class PandaReader:
|
|
|
133
136
|
Returns:
|
|
134
137
|
DataFrame: The DataFrame object.
|
|
135
138
|
"""
|
|
139
|
+
import pandas as pd
|
|
140
|
+
|
|
136
141
|
return pd.read_csv(file_path, *args, **kwargs)
|
|
137
142
|
|
|
138
143
|
@check_suffix([".xlsx", ".xls"])
|
|
@@ -149,6 +154,8 @@ class PandaReader:
|
|
|
149
154
|
Returns:
|
|
150
155
|
DataFrame: The DataFrame object.
|
|
151
156
|
"""
|
|
157
|
+
import pandas as pd
|
|
158
|
+
|
|
152
159
|
return pd.read_excel(file_path, *args, **kwargs)
|
|
153
160
|
|
|
154
161
|
@check_suffix([".json"])
|
|
@@ -165,6 +172,8 @@ class PandaReader:
|
|
|
165
172
|
Returns:
|
|
166
173
|
DataFrame: The DataFrame object.
|
|
167
174
|
"""
|
|
175
|
+
import pandas as pd
|
|
176
|
+
|
|
168
177
|
return pd.read_json(file_path, *args, **kwargs)
|
|
169
178
|
|
|
170
179
|
@check_suffix([".parquet"])
|
|
@@ -181,6 +190,8 @@ class PandaReader:
|
|
|
181
190
|
Returns:
|
|
182
191
|
DataFrame: The DataFrame object.
|
|
183
192
|
"""
|
|
193
|
+
import pandas as pd
|
|
194
|
+
|
|
184
195
|
return pd.read_parquet(file_path, *args, **kwargs)
|
|
185
196
|
|
|
186
197
|
def read_sql(self, *args: Any, **kwargs: Dict[str, Any]) -> "DataFrame":
|
|
@@ -193,6 +204,8 @@ class PandaReader:
|
|
|
193
204
|
Returns:
|
|
194
205
|
DataFrame: The DataFrame object.
|
|
195
206
|
"""
|
|
207
|
+
import pandas as pd
|
|
208
|
+
|
|
196
209
|
return pd.read_sql(*args, **kwargs)
|
|
197
210
|
|
|
198
211
|
def read_table(
|
|
@@ -208,6 +221,8 @@ class PandaReader:
|
|
|
208
221
|
Returns:
|
|
209
222
|
DataFrame: The DataFrame object.
|
|
210
223
|
"""
|
|
224
|
+
import pandas as pd
|
|
225
|
+
|
|
211
226
|
return pd.read_table(file_path, *args, **kwargs)
|
|
212
227
|
|
|
213
228
|
def read_clipboard(
|
|
@@ -222,6 +237,8 @@ class PandaReader:
|
|
|
222
237
|
Returns:
|
|
223
238
|
DataFrame: The DataFrame object.
|
|
224
239
|
"""
|
|
240
|
+
import pandas as pd
|
|
241
|
+
|
|
225
242
|
return pd.read_clipboard(*args, **kwargs)
|
|
226
243
|
|
|
227
244
|
@check_suffix([".html"])
|
|
@@ -238,6 +255,8 @@ class PandaReader:
|
|
|
238
255
|
Returns:
|
|
239
256
|
DataFrame: The DataFrame object.
|
|
240
257
|
"""
|
|
258
|
+
import pandas as pd
|
|
259
|
+
|
|
241
260
|
return pd.read_html(file_path, *args, **kwargs)
|
|
242
261
|
|
|
243
262
|
@check_suffix([".feather"])
|
|
@@ -254,6 +273,8 @@ class PandaReader:
|
|
|
254
273
|
Returns:
|
|
255
274
|
DataFrame: The DataFrame object.
|
|
256
275
|
"""
|
|
276
|
+
import pandas as pd
|
|
277
|
+
|
|
257
278
|
return pd.read_feather(file_path, *args, **kwargs)
|
|
258
279
|
|
|
259
280
|
@check_suffix([".dta"])
|
|
@@ -270,6 +291,8 @@ class PandaReader:
|
|
|
270
291
|
Returns:
|
|
271
292
|
DataFrame: The DataFrame object.
|
|
272
293
|
"""
|
|
294
|
+
import pandas as pd
|
|
295
|
+
|
|
273
296
|
return pd.read_stata(file_path, *args, **kwargs)
|
|
274
297
|
|
|
275
298
|
@check_suffix([".sas"])
|
|
@@ -286,6 +309,8 @@ class PandaReader:
|
|
|
286
309
|
Returns:
|
|
287
310
|
DataFrame: The DataFrame object.
|
|
288
311
|
"""
|
|
312
|
+
import pandas as pd
|
|
313
|
+
|
|
289
314
|
return pd.read_sas(file_path, *args, **kwargs)
|
|
290
315
|
|
|
291
316
|
@check_suffix([".pkl"])
|
|
@@ -302,6 +327,8 @@ class PandaReader:
|
|
|
302
327
|
Returns:
|
|
303
328
|
DataFrame: The DataFrame object.
|
|
304
329
|
"""
|
|
330
|
+
import pandas as pd
|
|
331
|
+
|
|
305
332
|
return pd.read_pickle(file_path, *args, **kwargs)
|
|
306
333
|
|
|
307
334
|
@check_suffix([".h5"])
|
|
@@ -318,6 +345,8 @@ class PandaReader:
|
|
|
318
345
|
Returns:
|
|
319
346
|
DataFrame: The DataFrame object.
|
|
320
347
|
"""
|
|
348
|
+
import pandas as pd
|
|
349
|
+
|
|
321
350
|
return pd.read_hdf(file_path, *args, **kwargs)
|
|
322
351
|
|
|
323
352
|
@check_suffix([".orc"])
|
|
@@ -334,4 +363,6 @@ class PandaReader:
|
|
|
334
363
|
Returns:
|
|
335
364
|
DataFrame: The DataFrame object.
|
|
336
365
|
"""
|
|
366
|
+
import pandas as pd
|
|
367
|
+
|
|
337
368
|
return pd.read_orc(file_path, *args, **kwargs)
|
|
@@ -11,7 +11,7 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
-
from typing import List, Tuple
|
|
14
|
+
from typing import List, Optional, Tuple
|
|
15
15
|
|
|
16
16
|
from pydantic import BaseModel
|
|
17
17
|
|
|
@@ -19,6 +19,7 @@ from camel.logger import get_logger
|
|
|
19
19
|
from camel.memories.base import BaseContextCreator
|
|
20
20
|
from camel.memories.records import ContextRecord
|
|
21
21
|
from camel.messages import OpenAIMessage
|
|
22
|
+
from camel.types.enums import OpenAIBackendRole
|
|
22
23
|
from camel.utils import BaseTokenCounter
|
|
23
24
|
|
|
24
25
|
logger = get_logger(__name__)
|
|
@@ -64,96 +65,226 @@ class ScoreBasedContextCreator(BaseContextCreator):
|
|
|
64
65
|
self,
|
|
65
66
|
records: List[ContextRecord],
|
|
66
67
|
) -> Tuple[List[OpenAIMessage], int]:
|
|
67
|
-
r"""
|
|
68
|
+
r"""Constructs conversation context from chat history while respecting
|
|
68
69
|
token limits.
|
|
69
70
|
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
score messages
|
|
71
|
+
Key strategies:
|
|
72
|
+
1. System message is always prioritized and preserved
|
|
73
|
+
2. Truncation removes low-score messages first
|
|
74
|
+
3. Final output maintains chronological order and in history memory,
|
|
75
|
+
the score of each message decreases according to keep_rate. The
|
|
76
|
+
newer the message, the higher the score.
|
|
73
77
|
|
|
74
78
|
Args:
|
|
75
|
-
records (List[ContextRecord]):
|
|
76
|
-
|
|
79
|
+
records (List[ContextRecord]): List of context records with scores
|
|
80
|
+
and timestamps.
|
|
77
81
|
|
|
78
82
|
Returns:
|
|
79
|
-
Tuple[List[OpenAIMessage], int]:
|
|
80
|
-
|
|
83
|
+
Tuple[List[OpenAIMessage], int]:
|
|
84
|
+
- Ordered list of OpenAI messages
|
|
85
|
+
- Total token count of the final context
|
|
81
86
|
|
|
82
87
|
Raises:
|
|
83
|
-
RuntimeError: If
|
|
84
|
-
exceeding the token limit.
|
|
88
|
+
RuntimeError: If system message alone exceeds token limit
|
|
85
89
|
"""
|
|
86
|
-
#
|
|
87
|
-
|
|
88
|
-
|
|
90
|
+
# ======================
|
|
91
|
+
# 1. System Message Handling
|
|
92
|
+
# ======================
|
|
93
|
+
system_unit, regular_units = self._extract_system_message(records)
|
|
94
|
+
system_tokens = system_unit.num_tokens if system_unit else 0
|
|
95
|
+
|
|
96
|
+
# Check early if system message alone exceeds token limit
|
|
97
|
+
if system_tokens > self.token_limit:
|
|
98
|
+
raise RuntimeError(
|
|
99
|
+
f"System message alone exceeds token limit"
|
|
100
|
+
f": {system_tokens} > {self.token_limit}",
|
|
101
|
+
system_tokens,
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
# ======================
|
|
105
|
+
# 2. Deduplication & Initial Processing
|
|
106
|
+
# ======================
|
|
107
|
+
seen_uuids = set()
|
|
108
|
+
if system_unit:
|
|
109
|
+
seen_uuids.add(system_unit.record.memory_record.uuid)
|
|
110
|
+
|
|
111
|
+
# Process non-system messages with deduplication
|
|
89
112
|
for idx, record in enumerate(records):
|
|
90
|
-
if record.memory_record.uuid
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
113
|
+
if record.memory_record.uuid in seen_uuids:
|
|
114
|
+
continue
|
|
115
|
+
seen_uuids.add(record.memory_record.uuid)
|
|
116
|
+
|
|
117
|
+
token_count = self.token_counter.count_tokens_from_messages(
|
|
118
|
+
[record.memory_record.to_openai_message()]
|
|
119
|
+
)
|
|
120
|
+
regular_units.append(
|
|
121
|
+
_ContextUnit(
|
|
122
|
+
idx=idx,
|
|
123
|
+
record=record,
|
|
124
|
+
num_tokens=token_count,
|
|
100
125
|
)
|
|
126
|
+
)
|
|
101
127
|
|
|
102
|
-
#
|
|
128
|
+
# ======================
|
|
129
|
+
# 3. Token Calculation
|
|
130
|
+
# ======================
|
|
131
|
+
total_tokens = system_tokens + sum(u.num_tokens for u in regular_units)
|
|
103
132
|
|
|
104
|
-
#
|
|
105
|
-
|
|
133
|
+
# ======================
|
|
134
|
+
# 4. Early Return if Within Limit
|
|
135
|
+
# ======================
|
|
106
136
|
if total_tokens <= self.token_limit:
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
key=lambda unit: (unit.record.timestamp, unit.record.score),
|
|
137
|
+
sorted_units = sorted(
|
|
138
|
+
regular_units, key=self._conversation_sort_key
|
|
110
139
|
)
|
|
111
|
-
return self.
|
|
140
|
+
return self._assemble_output(sorted_units, system_unit)
|
|
112
141
|
|
|
113
|
-
#
|
|
142
|
+
# ======================
|
|
143
|
+
# 5. Truncation Logic
|
|
144
|
+
# ======================
|
|
114
145
|
logger.warning(
|
|
115
|
-
f"
|
|
116
|
-
f"
|
|
146
|
+
f"Context truncation required "
|
|
147
|
+
f"({total_tokens} > {self.token_limit}), "
|
|
148
|
+
f"pruning low-score messages."
|
|
117
149
|
)
|
|
118
150
|
|
|
119
|
-
# Sort
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
key=
|
|
151
|
+
# Sort for truncation: high scores first, older messages first at same
|
|
152
|
+
# score
|
|
153
|
+
sorted_for_truncation = sorted(
|
|
154
|
+
regular_units, key=self._truncation_sort_key
|
|
123
155
|
)
|
|
124
156
|
|
|
125
|
-
#
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
157
|
+
# Reverse to process from lowest score (end of sorted list)
|
|
158
|
+
remaining_units = []
|
|
159
|
+
current_total = system_tokens
|
|
160
|
+
|
|
161
|
+
for unit in sorted_for_truncation:
|
|
162
|
+
potential_total = current_total + unit.num_tokens
|
|
163
|
+
if potential_total <= self.token_limit:
|
|
164
|
+
remaining_units.append(unit)
|
|
165
|
+
current_total = potential_total
|
|
166
|
+
|
|
167
|
+
# ======================
|
|
168
|
+
# 6. Output Assembly
|
|
169
|
+
# ======================
|
|
170
|
+
|
|
171
|
+
# Incase system message is the only message in memory when sorted units
|
|
172
|
+
# are empty, raise an error
|
|
173
|
+
if system_unit and len(remaining_units) == 0 and len(records) > 1:
|
|
139
174
|
raise RuntimeError(
|
|
140
|
-
"
|
|
175
|
+
"System message and current message exceeds token limit ",
|
|
176
|
+
total_tokens,
|
|
141
177
|
)
|
|
142
|
-
return self._create_output(context_units[truncate_idx + 1 :])
|
|
143
178
|
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
179
|
+
# Sort remaining units chronologically
|
|
180
|
+
final_units = sorted(remaining_units, key=self._conversation_sort_key)
|
|
181
|
+
return self._assemble_output(final_units, system_unit)
|
|
182
|
+
|
|
183
|
+
def _extract_system_message(
|
|
184
|
+
self, records: List[ContextRecord]
|
|
185
|
+
) -> Tuple[Optional[_ContextUnit], List[_ContextUnit]]:
|
|
186
|
+
r"""Extracts the system message from records and validates it.
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
records (List[ContextRecord]): List of context records
|
|
190
|
+
representing conversation history.
|
|
148
191
|
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
192
|
+
Returns:
|
|
193
|
+
Tuple[Optional[_ContextUnit], List[_ContextUnit]]: containing:
|
|
194
|
+
- The system message as a `_ContextUnit`, if valid; otherwise,
|
|
195
|
+
`None`.
|
|
196
|
+
- An empty list, serving as the initial container for regular
|
|
197
|
+
messages.
|
|
152
198
|
"""
|
|
153
|
-
|
|
154
|
-
|
|
199
|
+
if not records:
|
|
200
|
+
return None, []
|
|
201
|
+
|
|
202
|
+
first_record = records[0]
|
|
203
|
+
if (
|
|
204
|
+
first_record.memory_record.role_at_backend
|
|
205
|
+
!= OpenAIBackendRole.SYSTEM
|
|
206
|
+
):
|
|
207
|
+
return None, []
|
|
208
|
+
|
|
209
|
+
message = first_record.memory_record.to_openai_message()
|
|
210
|
+
tokens = self.token_counter.count_tokens_from_messages([message])
|
|
211
|
+
system_message_unit = _ContextUnit(
|
|
212
|
+
idx=0,
|
|
213
|
+
record=first_record,
|
|
214
|
+
num_tokens=tokens,
|
|
155
215
|
)
|
|
156
|
-
return [
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
216
|
+
return system_message_unit, []
|
|
217
|
+
|
|
218
|
+
def _truncation_sort_key(self, unit: _ContextUnit) -> Tuple[float, float]:
|
|
219
|
+
r"""Defines the sorting key for the truncation phase.
|
|
220
|
+
|
|
221
|
+
Sorting priority:
|
|
222
|
+
- Primary: Sort by score in descending order (higher scores first).
|
|
223
|
+
- Secondary: Sort by timestamp in ascending order (older messages
|
|
224
|
+
first when scores are equal).
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
unit (_ContextUnit): A `_ContextUnit` representing a conversation
|
|
228
|
+
record.
|
|
229
|
+
|
|
230
|
+
Returns:
|
|
231
|
+
Tuple[float, float]:
|
|
232
|
+
- Negative score for descending order sorting.
|
|
233
|
+
- Timestamp for ascending order sorting.
|
|
234
|
+
"""
|
|
235
|
+
return (-unit.record.score, unit.record.timestamp)
|
|
236
|
+
|
|
237
|
+
def _conversation_sort_key(
|
|
238
|
+
self, unit: _ContextUnit
|
|
239
|
+
) -> Tuple[float, float]:
|
|
240
|
+
r"""Defines the sorting key for assembling the final output.
|
|
241
|
+
|
|
242
|
+
Sorting priority:
|
|
243
|
+
- Primary: Sort by timestamp in ascending order (chronological order).
|
|
244
|
+
- Secondary: Sort by score in descending order (higher scores first
|
|
245
|
+
when timestamps are equal).
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
unit (_ContextUnit): A `_ContextUnit` representing a conversation
|
|
249
|
+
record.
|
|
250
|
+
|
|
251
|
+
Returns:
|
|
252
|
+
Tuple[float, float]:
|
|
253
|
+
- Timestamp for chronological sorting.
|
|
254
|
+
- Negative score for descending order sorting.
|
|
255
|
+
"""
|
|
256
|
+
return (unit.record.timestamp, -unit.record.score)
|
|
257
|
+
|
|
258
|
+
def _assemble_output(
|
|
259
|
+
self,
|
|
260
|
+
context_units: List[_ContextUnit],
|
|
261
|
+
system_unit: Optional[_ContextUnit],
|
|
262
|
+
) -> Tuple[List[OpenAIMessage], int]:
|
|
263
|
+
r"""Assembles final message list with proper ordering and token count.
|
|
264
|
+
|
|
265
|
+
Args:
|
|
266
|
+
context_units (List[_ContextUnit]): Sorted list of regular message
|
|
267
|
+
units.
|
|
268
|
+
system_unit (Optional[_ContextUnit]): System message unit (if
|
|
269
|
+
present).
|
|
270
|
+
|
|
271
|
+
Returns:
|
|
272
|
+
Tuple[List[OpenAIMessage], int]: Tuple of (ordered messages, total
|
|
273
|
+
tokens)
|
|
274
|
+
"""
|
|
275
|
+
messages = []
|
|
276
|
+
total_tokens = 0
|
|
277
|
+
|
|
278
|
+
# Add system message first if present
|
|
279
|
+
if system_unit:
|
|
280
|
+
messages.append(
|
|
281
|
+
system_unit.record.memory_record.to_openai_message()
|
|
282
|
+
)
|
|
283
|
+
total_tokens += system_unit.num_tokens
|
|
284
|
+
|
|
285
|
+
# Add sorted regular messages
|
|
286
|
+
for unit in context_units:
|
|
287
|
+
messages.append(unit.record.memory_record.to_openai_message())
|
|
288
|
+
total_tokens += unit.num_tokens
|
|
289
|
+
|
|
290
|
+
return messages, total_tokens
|
camel/models/aiml_model.py
CHANGED
|
@@ -52,6 +52,10 @@ class AIMLModel(BaseModelBackend):
|
|
|
52
52
|
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
53
53
|
ModelType.GPT_4O_MINI)` will be used.
|
|
54
54
|
(default: :obj:`None`)
|
|
55
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
56
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
57
|
+
environment variable or default to 180 seconds.
|
|
58
|
+
(default: :obj:`None`)
|
|
55
59
|
"""
|
|
56
60
|
|
|
57
61
|
@api_keys_required([("api_key", "AIML_API_KEY")])
|
|
@@ -62,6 +66,7 @@ class AIMLModel(BaseModelBackend):
|
|
|
62
66
|
api_key: Optional[str] = None,
|
|
63
67
|
url: Optional[str] = None,
|
|
64
68
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
69
|
+
timeout: Optional[float] = None,
|
|
65
70
|
) -> None:
|
|
66
71
|
if model_config_dict is None:
|
|
67
72
|
model_config_dict = AIMLConfig().as_dict()
|
|
@@ -70,17 +75,18 @@ class AIMLModel(BaseModelBackend):
|
|
|
70
75
|
"AIML_API_BASE_URL",
|
|
71
76
|
"https://api.aimlapi.com/v1",
|
|
72
77
|
)
|
|
78
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
73
79
|
super().__init__(
|
|
74
|
-
model_type, model_config_dict, api_key, url, token_counter
|
|
80
|
+
model_type, model_config_dict, api_key, url, token_counter, timeout
|
|
75
81
|
)
|
|
76
82
|
self._client = OpenAI(
|
|
77
|
-
timeout=
|
|
83
|
+
timeout=self._timeout,
|
|
78
84
|
max_retries=3,
|
|
79
85
|
api_key=self._api_key,
|
|
80
86
|
base_url=self._url,
|
|
81
87
|
)
|
|
82
88
|
self._async_client = AsyncOpenAI(
|
|
83
|
-
timeout=
|
|
89
|
+
timeout=self._timeout,
|
|
84
90
|
max_retries=3,
|
|
85
91
|
api_key=self._api_key,
|
|
86
92
|
base_url=self._url,
|
camel/models/anthropic_model.py
CHANGED
|
@@ -45,6 +45,10 @@ class AnthropicModel(BaseModelBackend):
|
|
|
45
45
|
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
46
46
|
use for the model. If not provided, :obj:`AnthropicTokenCounter`
|
|
47
47
|
will be used. (default: :obj:`None`)
|
|
48
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
49
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
50
|
+
environment variable or default to 180 seconds.
|
|
51
|
+
(default: :obj:`None`)
|
|
48
52
|
"""
|
|
49
53
|
|
|
50
54
|
@api_keys_required(
|
|
@@ -60,6 +64,7 @@ class AnthropicModel(BaseModelBackend):
|
|
|
60
64
|
api_key: Optional[str] = None,
|
|
61
65
|
url: Optional[str] = None,
|
|
62
66
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
67
|
+
timeout: Optional[float] = None,
|
|
63
68
|
) -> None:
|
|
64
69
|
from openai import AsyncOpenAI, OpenAI
|
|
65
70
|
|
|
@@ -71,13 +76,16 @@ class AnthropicModel(BaseModelBackend):
|
|
|
71
76
|
or os.environ.get("ANTHROPIC_API_BASE_URL")
|
|
72
77
|
or "https://api.anthropic.com/v1/"
|
|
73
78
|
)
|
|
79
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
74
80
|
super().__init__(
|
|
75
|
-
model_type, model_config_dict, api_key, url, token_counter
|
|
81
|
+
model_type, model_config_dict, api_key, url, token_counter, timeout
|
|
82
|
+
)
|
|
83
|
+
self.client = OpenAI(
|
|
84
|
+
base_url=self._url, api_key=self._api_key, timeout=self._timeout
|
|
76
85
|
)
|
|
77
|
-
self.client = OpenAI(base_url=self._url, api_key=self._api_key)
|
|
78
86
|
|
|
79
87
|
self.async_client = AsyncOpenAI(
|
|
80
|
-
api_key=self._api_key, base_url=self._url
|
|
88
|
+
api_key=self._api_key, base_url=self._url, timeout=self._timeout
|
|
81
89
|
)
|
|
82
90
|
|
|
83
91
|
@property
|
|
@@ -49,6 +49,10 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
49
49
|
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
50
50
|
use for the model. If not provided, :obj:`OpenAITokenCounter`
|
|
51
51
|
will be used. (default: :obj:`None`)
|
|
52
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
53
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
54
|
+
environment variable or default to 180 seconds.
|
|
55
|
+
(default: :obj:`None`)
|
|
52
56
|
|
|
53
57
|
References:
|
|
54
58
|
https://learn.microsoft.com/en-us/azure/ai-services/openai/
|
|
@@ -60,6 +64,7 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
60
64
|
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
61
65
|
api_key: Optional[str] = None,
|
|
62
66
|
url: Optional[str] = None,
|
|
67
|
+
timeout: Optional[float] = None,
|
|
63
68
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
64
69
|
api_version: Optional[str] = None,
|
|
65
70
|
azure_deployment_name: Optional[str] = None,
|
|
@@ -68,8 +73,9 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
68
73
|
model_config_dict = ChatGPTConfig().as_dict()
|
|
69
74
|
api_key = api_key or os.environ.get("AZURE_OPENAI_API_KEY")
|
|
70
75
|
url = url or os.environ.get("AZURE_OPENAI_BASE_URL")
|
|
76
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
71
77
|
super().__init__(
|
|
72
|
-
model_type, model_config_dict, api_key, url, token_counter
|
|
78
|
+
model_type, model_config_dict, api_key, url, token_counter, timeout
|
|
73
79
|
)
|
|
74
80
|
|
|
75
81
|
self.api_version = api_version or os.environ.get("AZURE_API_VERSION")
|
|
@@ -92,7 +98,7 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
92
98
|
azure_deployment=self.azure_deployment_name,
|
|
93
99
|
api_version=self.api_version,
|
|
94
100
|
api_key=self._api_key,
|
|
95
|
-
timeout=
|
|
101
|
+
timeout=self._timeout,
|
|
96
102
|
max_retries=3,
|
|
97
103
|
)
|
|
98
104
|
|
|
@@ -101,7 +107,7 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
101
107
|
azure_deployment=self.azure_deployment_name,
|
|
102
108
|
api_version=self.api_version,
|
|
103
109
|
api_key=self._api_key,
|
|
104
|
-
timeout=
|
|
110
|
+
timeout=self._timeout,
|
|
105
111
|
max_retries=3,
|
|
106
112
|
)
|
|
107
113
|
|