camel-ai 0.2.73a4__py3-none-any.whl → 0.2.80a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- camel/__init__.py +1 -1
- camel/agents/_utils.py +38 -0
- camel/agents/chat_agent.py +2217 -519
- camel/agents/mcp_agent.py +30 -27
- camel/configs/__init__.py +15 -0
- camel/configs/aihubmix_config.py +88 -0
- camel/configs/amd_config.py +70 -0
- camel/configs/cometapi_config.py +104 -0
- camel/configs/minimax_config.py +93 -0
- camel/configs/nebius_config.py +103 -0
- camel/data_collectors/alpaca_collector.py +15 -6
- camel/datasets/base_generator.py +39 -10
- camel/environments/single_step.py +28 -3
- camel/environments/tic_tac_toe.py +1 -1
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/docker/Dockerfile +3 -12
- camel/interpreters/e2b_interpreter.py +34 -1
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/loaders/__init__.py +11 -2
- camel/loaders/chunkr_reader.py +9 -0
- camel/memories/agent_memories.py +48 -4
- camel/memories/base.py +26 -0
- camel/memories/blocks/chat_history_block.py +122 -4
- camel/memories/context_creators/score_based.py +25 -384
- camel/memories/records.py +88 -8
- camel/messages/base.py +153 -34
- camel/models/__init__.py +10 -0
- camel/models/aihubmix_model.py +83 -0
- camel/models/aiml_model.py +1 -16
- camel/models/amd_model.py +101 -0
- camel/models/anthropic_model.py +6 -19
- camel/models/aws_bedrock_model.py +2 -33
- camel/models/azure_openai_model.py +114 -89
- camel/models/base_audio_model.py +3 -1
- camel/models/base_model.py +32 -14
- camel/models/cohere_model.py +1 -16
- camel/models/cometapi_model.py +83 -0
- camel/models/crynux_model.py +1 -16
- camel/models/deepseek_model.py +1 -16
- camel/models/fish_audio_model.py +6 -0
- camel/models/gemini_model.py +36 -18
- camel/models/groq_model.py +1 -17
- camel/models/internlm_model.py +1 -16
- camel/models/litellm_model.py +1 -16
- camel/models/lmstudio_model.py +1 -17
- camel/models/minimax_model.py +83 -0
- camel/models/mistral_model.py +1 -16
- camel/models/model_factory.py +27 -1
- camel/models/modelscope_model.py +1 -16
- camel/models/moonshot_model.py +105 -24
- camel/models/nebius_model.py +83 -0
- camel/models/nemotron_model.py +0 -5
- camel/models/netmind_model.py +1 -16
- camel/models/novita_model.py +1 -16
- camel/models/nvidia_model.py +1 -16
- camel/models/ollama_model.py +4 -19
- camel/models/openai_compatible_model.py +62 -41
- camel/models/openai_model.py +62 -57
- camel/models/openrouter_model.py +1 -17
- camel/models/ppio_model.py +1 -16
- camel/models/qianfan_model.py +1 -16
- camel/models/qwen_model.py +1 -16
- camel/models/reka_model.py +1 -16
- camel/models/samba_model.py +34 -47
- camel/models/sglang_model.py +64 -31
- camel/models/siliconflow_model.py +1 -16
- camel/models/stub_model.py +0 -4
- camel/models/togetherai_model.py +1 -16
- camel/models/vllm_model.py +1 -16
- camel/models/volcano_model.py +0 -17
- camel/models/watsonx_model.py +1 -16
- camel/models/yi_model.py +1 -16
- camel/models/zhipuai_model.py +60 -16
- camel/parsers/__init__.py +18 -0
- camel/parsers/mcp_tool_call_parser.py +176 -0
- camel/retrievers/auto_retriever.py +1 -0
- camel/runtimes/daytona_runtime.py +11 -12
- camel/societies/__init__.py +2 -0
- camel/societies/workforce/__init__.py +2 -0
- camel/societies/workforce/events.py +122 -0
- camel/societies/workforce/prompts.py +146 -66
- camel/societies/workforce/role_playing_worker.py +15 -11
- camel/societies/workforce/single_agent_worker.py +302 -65
- camel/societies/workforce/structured_output_handler.py +30 -18
- camel/societies/workforce/task_channel.py +163 -27
- camel/societies/workforce/utils.py +107 -13
- camel/societies/workforce/workflow_memory_manager.py +772 -0
- camel/societies/workforce/workforce.py +1949 -579
- camel/societies/workforce/workforce_callback.py +74 -0
- camel/societies/workforce/workforce_logger.py +168 -145
- camel/societies/workforce/workforce_metrics.py +33 -0
- camel/storages/key_value_storages/json.py +15 -2
- camel/storages/key_value_storages/mem0_cloud.py +48 -47
- camel/storages/object_storages/google_cloud.py +1 -1
- camel/storages/vectordb_storages/oceanbase.py +13 -13
- camel/storages/vectordb_storages/qdrant.py +3 -3
- camel/storages/vectordb_storages/tidb.py +8 -6
- camel/tasks/task.py +4 -3
- camel/toolkits/__init__.py +20 -7
- camel/toolkits/aci_toolkit.py +45 -0
- camel/toolkits/base.py +6 -4
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/context_summarizer_toolkit.py +684 -0
- camel/toolkits/dappier_toolkit.py +5 -1
- camel/toolkits/dingtalk.py +1135 -0
- camel/toolkits/edgeone_pages_mcp_toolkit.py +11 -31
- camel/toolkits/excel_toolkit.py +1 -1
- camel/toolkits/{file_write_toolkit.py → file_toolkit.py} +430 -36
- camel/toolkits/function_tool.py +13 -3
- camel/toolkits/github_toolkit.py +104 -17
- camel/toolkits/gmail_toolkit.py +1839 -0
- camel/toolkits/google_calendar_toolkit.py +38 -4
- camel/toolkits/google_drive_mcp_toolkit.py +12 -31
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +15 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +77 -8
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +884 -88
- camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +5 -612
- camel/toolkits/hybrid_browser_toolkit/ts/package.json +0 -1
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +959 -89
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +9 -2
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +281 -213
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +23 -3
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +72 -7
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +582 -132
- camel/toolkits/hybrid_browser_toolkit_py/actions.py +158 -0
- camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +55 -8
- camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +43 -0
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +321 -8
- camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +10 -4
- camel/toolkits/hybrid_browser_toolkit_py/unified_analyzer.js +45 -4
- camel/toolkits/{openai_image_toolkit.py → image_generation_toolkit.py} +151 -53
- camel/toolkits/klavis_toolkit.py +5 -1
- camel/toolkits/markitdown_toolkit.py +27 -1
- camel/toolkits/math_toolkit.py +64 -10
- camel/toolkits/mcp_toolkit.py +366 -71
- camel/toolkits/memory_toolkit.py +5 -1
- camel/toolkits/message_integration.py +18 -13
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/note_taking_toolkit.py +19 -10
- camel/toolkits/notion_mcp_toolkit.py +16 -26
- camel/toolkits/openbb_toolkit.py +5 -1
- camel/toolkits/origene_mcp_toolkit.py +8 -49
- camel/toolkits/playwright_mcp_toolkit.py +12 -31
- camel/toolkits/resend_toolkit.py +168 -0
- camel/toolkits/search_toolkit.py +264 -91
- camel/toolkits/slack_toolkit.py +64 -10
- camel/toolkits/terminal_toolkit/__init__.py +18 -0
- camel/toolkits/terminal_toolkit/terminal_toolkit.py +957 -0
- camel/toolkits/terminal_toolkit/utils.py +532 -0
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +17 -11
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/toolkits/zapier_toolkit.py +5 -1
- camel/types/__init__.py +2 -2
- camel/types/enums.py +274 -7
- camel/types/openai_types.py +2 -2
- camel/types/unified_model_type.py +15 -0
- camel/utils/commons.py +36 -5
- camel/utils/constants.py +3 -0
- camel/utils/context_utils.py +1003 -0
- camel/utils/mcp.py +138 -4
- camel/utils/token_counting.py +43 -20
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/METADATA +223 -83
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/RECORD +170 -141
- camel/loaders/pandas_reader.py +0 -368
- camel/toolkits/openai_agent_toolkit.py +0 -135
- camel/toolkits/terminal_toolkit.py +0 -1550
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,684 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import glob
|
|
16
|
+
import os
|
|
17
|
+
from datetime import datetime
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
from typing import TYPE_CHECKING, List, Optional
|
|
20
|
+
|
|
21
|
+
from camel.logger import get_logger
|
|
22
|
+
from camel.toolkits import FunctionTool
|
|
23
|
+
from camel.toolkits.base import BaseToolkit
|
|
24
|
+
from camel.utils.context_utils import ContextUtility
|
|
25
|
+
|
|
26
|
+
if TYPE_CHECKING:
|
|
27
|
+
from camel.agents import ChatAgent
|
|
28
|
+
from camel.memories.records import MemoryRecord
|
|
29
|
+
|
|
30
|
+
logger = get_logger(__name__)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class ContextSummarizerToolkit(BaseToolkit):
|
|
34
|
+
r"""A toolkit that provides intelligent context summarization and
|
|
35
|
+
management for agents.
|
|
36
|
+
|
|
37
|
+
This toolkit enables agents to compress conversation context through
|
|
38
|
+
intelligent summarization, save conversation history to markdown files,
|
|
39
|
+
and search through past conversations. It handles all context management
|
|
40
|
+
needs in a single toolkit.
|
|
41
|
+
|
|
42
|
+
Key features:
|
|
43
|
+
- Intelligent context compression with over-compression prevention
|
|
44
|
+
- Markdown file storage with session management
|
|
45
|
+
- Simple text-based search through conversation history
|
|
46
|
+
- Configurable summarization prompts
|
|
47
|
+
- Context loading and saving capabilities
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
def __init__(
|
|
51
|
+
self,
|
|
52
|
+
agent: "ChatAgent",
|
|
53
|
+
working_directory: Optional[str] = None,
|
|
54
|
+
timeout: Optional[float] = None,
|
|
55
|
+
summary_prompt_template: Optional[str] = None,
|
|
56
|
+
):
|
|
57
|
+
r"""Initialize the ContextSummarizerToolkit.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
agent (ChatAgent): The agent that is using the toolkit.
|
|
61
|
+
This is required to access the agent's memory.
|
|
62
|
+
working_directory (str, optional): The directory path where notes
|
|
63
|
+
will be stored. If not provided, a default directory will be
|
|
64
|
+
used.
|
|
65
|
+
timeout (Optional[float]): The timeout for the toolkit.
|
|
66
|
+
summary_prompt_template (Optional[str]): Custom prompt template
|
|
67
|
+
for summarization. If None, a default task-focused template
|
|
68
|
+
is used. Users can customize this for different use cases.
|
|
69
|
+
"""
|
|
70
|
+
super().__init__(timeout=timeout)
|
|
71
|
+
|
|
72
|
+
self.agent = agent
|
|
73
|
+
self.working_directory_param = working_directory
|
|
74
|
+
self.summary_prompt_template = summary_prompt_template
|
|
75
|
+
|
|
76
|
+
# compression tracking to prevent over-compression
|
|
77
|
+
self.compressed_message_uuids: set = set()
|
|
78
|
+
self.compression_count = 0
|
|
79
|
+
self.existing_summary: Optional[str] = ""
|
|
80
|
+
|
|
81
|
+
# Create a separate agent for summarization without tools to avoid
|
|
82
|
+
# circular calls
|
|
83
|
+
from camel.agents import ChatAgent
|
|
84
|
+
|
|
85
|
+
self.summary_agent = ChatAgent(
|
|
86
|
+
system_message="You are a helpful assistant that creates concise "
|
|
87
|
+
"summaries of conversations.",
|
|
88
|
+
model=self.agent.model_backend,
|
|
89
|
+
agent_id=f"{self.agent.agent_id}_summarizer",
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
# Setup storage and file management using ContextUtility
|
|
93
|
+
self._setup_storage(working_directory)
|
|
94
|
+
|
|
95
|
+
def _setup_storage(self, working_directory: Optional[str]) -> None:
|
|
96
|
+
r"""Initialize storage paths and create session-specific directories
|
|
97
|
+
using ContextUtility for file management."""
|
|
98
|
+
# Determine the base directory for context compression
|
|
99
|
+
if working_directory:
|
|
100
|
+
base_dir = working_directory
|
|
101
|
+
else:
|
|
102
|
+
camel_workdir = os.environ.get("CAMEL_WORKDIR")
|
|
103
|
+
if camel_workdir:
|
|
104
|
+
base_dir = str(Path(camel_workdir) / "context_compression")
|
|
105
|
+
else:
|
|
106
|
+
base_dir = "context_compression"
|
|
107
|
+
|
|
108
|
+
# Initialize ContextUtility with the base directory
|
|
109
|
+
self.context_util = ContextUtility(working_directory=base_dir)
|
|
110
|
+
|
|
111
|
+
# Store references for compatibility
|
|
112
|
+
self.working_directory = self.context_util.get_working_directory()
|
|
113
|
+
self.session_id = self.context_util.get_session_id()
|
|
114
|
+
|
|
115
|
+
# File names
|
|
116
|
+
self.summary_filename = "agent_memory_summary"
|
|
117
|
+
self.history_filename = "agent_memory_history"
|
|
118
|
+
|
|
119
|
+
# ========= CORE COMPRESSION METHODS =========
|
|
120
|
+
|
|
121
|
+
def _summarize_messages(
|
|
122
|
+
self,
|
|
123
|
+
memory_records: List["MemoryRecord"],
|
|
124
|
+
) -> str:
|
|
125
|
+
r"""Generate a summary of the conversation context.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
memory_records (List["MemoryRecord"]): A list of memory records to
|
|
129
|
+
summarize.
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
str: The summary of the conversation context.
|
|
133
|
+
"""
|
|
134
|
+
if not memory_records:
|
|
135
|
+
logger.warning(
|
|
136
|
+
"No memory records provided. Returning existing summary."
|
|
137
|
+
)
|
|
138
|
+
return self.existing_summary or ""
|
|
139
|
+
|
|
140
|
+
# check for over-compression prevention
|
|
141
|
+
record_uuids = {record.uuid for record in memory_records}
|
|
142
|
+
already_compressed = record_uuids.intersection(
|
|
143
|
+
self.compressed_message_uuids
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
if already_compressed:
|
|
147
|
+
logger.warning(
|
|
148
|
+
f"Preventing over-compression: {len(already_compressed)} "
|
|
149
|
+
f"records have already been compressed. Returning existing "
|
|
150
|
+
f"summary."
|
|
151
|
+
)
|
|
152
|
+
return self.existing_summary or ""
|
|
153
|
+
|
|
154
|
+
try:
|
|
155
|
+
# 1. reset summary agent state for clean summarization
|
|
156
|
+
self.summary_agent.reset()
|
|
157
|
+
|
|
158
|
+
# 2. format the conversation
|
|
159
|
+
conversation_text = self._format_conversation(memory_records)
|
|
160
|
+
|
|
161
|
+
# 3. create the summary prompt
|
|
162
|
+
summary_prompt = self._create_summary_prompt(conversation_text)
|
|
163
|
+
|
|
164
|
+
# 4. generate summary using the agent
|
|
165
|
+
response = self.summary_agent.step(summary_prompt)
|
|
166
|
+
|
|
167
|
+
# 5. extract the summary from response and store
|
|
168
|
+
summary_content = response.msgs[-1].content.strip()
|
|
169
|
+
self.existing_summary = summary_content
|
|
170
|
+
|
|
171
|
+
# 6. mark these records as compressed to prevent re-compression
|
|
172
|
+
record_uuids = {record.uuid for record in memory_records}
|
|
173
|
+
self.compressed_message_uuids.update(record_uuids)
|
|
174
|
+
self.compression_count += 1
|
|
175
|
+
|
|
176
|
+
logger.info(
|
|
177
|
+
f"Successfully generated summary for {len(memory_records)} "
|
|
178
|
+
f"messages. Compression count: {self.compression_count}"
|
|
179
|
+
)
|
|
180
|
+
return summary_content
|
|
181
|
+
|
|
182
|
+
except Exception as e:
|
|
183
|
+
logger.error(f"Error generating summary: {e}")
|
|
184
|
+
return self.existing_summary or ""
|
|
185
|
+
|
|
186
|
+
def _save_summary(self, summary: str) -> str:
|
|
187
|
+
r"""Persist conversation summary to markdown file with metadata
|
|
188
|
+
including timestamp and session information.
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
summary (str): The summary text to save.
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
str: "success" or error message starting with "Error:".
|
|
195
|
+
"""
|
|
196
|
+
try:
|
|
197
|
+
# prepare metadata for unified markdown saving
|
|
198
|
+
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
|
199
|
+
metadata = {
|
|
200
|
+
'Save Time': timestamp,
|
|
201
|
+
'Session ID': self.session_id,
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
# use ContextUtility's unified markdown saving
|
|
205
|
+
return self.context_util.save_markdown_file(
|
|
206
|
+
filename=self.summary_filename,
|
|
207
|
+
content=f"## Summary\n\n{summary}\n",
|
|
208
|
+
title=f"Conversation Summary: {self.session_id}",
|
|
209
|
+
metadata=metadata,
|
|
210
|
+
)
|
|
211
|
+
except Exception as e:
|
|
212
|
+
logger.error(f"Error saving summary: {e}")
|
|
213
|
+
return f"Error saving summary: {e}"
|
|
214
|
+
|
|
215
|
+
def _save_history(self, memory_records: List["MemoryRecord"]) -> str:
|
|
216
|
+
r"""Export complete conversation transcript as formatted markdown
|
|
217
|
+
with message roles, agent IDs, and content structure preserved.
|
|
218
|
+
|
|
219
|
+
Args:
|
|
220
|
+
memory_records (List["MemoryRecord"]): The list of memory records
|
|
221
|
+
to save.
|
|
222
|
+
|
|
223
|
+
Returns:
|
|
224
|
+
str: "success" or error message starting with "Error:".
|
|
225
|
+
"""
|
|
226
|
+
try:
|
|
227
|
+
# prepare metadata for markdown saving
|
|
228
|
+
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
|
229
|
+
metadata = {
|
|
230
|
+
'Save Time': timestamp,
|
|
231
|
+
'Total Messages': len(memory_records),
|
|
232
|
+
'Session ID': self.session_id,
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
# format the transcript for markdown
|
|
236
|
+
transcript_content = "## Full Transcript\n\n"
|
|
237
|
+
for i, record in enumerate(memory_records):
|
|
238
|
+
message = record.message
|
|
239
|
+
role = getattr(message, "role_name", "unknown")
|
|
240
|
+
content = getattr(message, "content", str(message))
|
|
241
|
+
agent_id = record.agent_id or "unknown"
|
|
242
|
+
role_at_backend = (
|
|
243
|
+
record.role_at_backend.value
|
|
244
|
+
if hasattr(record.role_at_backend, 'value')
|
|
245
|
+
else str(record.role_at_backend)
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
transcript_content += f"### Message {i + 1} - {role}\n"
|
|
249
|
+
transcript_content += f"**Agent ID:** {agent_id} \n"
|
|
250
|
+
transcript_content += (
|
|
251
|
+
f"**Backend Role:** {role_at_backend} \n"
|
|
252
|
+
)
|
|
253
|
+
transcript_content += f"**Content:**\n```\n{content}\n```\n\n"
|
|
254
|
+
transcript_content += "---\n\n"
|
|
255
|
+
|
|
256
|
+
# use ContextUtility's markdown saving
|
|
257
|
+
return self.context_util.save_markdown_file(
|
|
258
|
+
filename=self.history_filename,
|
|
259
|
+
content=transcript_content,
|
|
260
|
+
title=f"Conversation History: {self.session_id}",
|
|
261
|
+
metadata=metadata,
|
|
262
|
+
)
|
|
263
|
+
except Exception as e:
|
|
264
|
+
logger.error(f"Error saving history: {e}")
|
|
265
|
+
return f"Error saving history: {e}"
|
|
266
|
+
|
|
267
|
+
def _compress_and_save(self, memory_records: List["MemoryRecord"]) -> str:
|
|
268
|
+
r"""Complete compression pipeline: summarize and save both history and
|
|
269
|
+
summary.
|
|
270
|
+
|
|
271
|
+
Args:
|
|
272
|
+
memory_records (List["MemoryRecord"]): The memory records to
|
|
273
|
+
compress and save.
|
|
274
|
+
|
|
275
|
+
Returns:
|
|
276
|
+
str: The generated summary text.
|
|
277
|
+
"""
|
|
278
|
+
try:
|
|
279
|
+
# generate summary
|
|
280
|
+
summary = self._summarize_messages(memory_records)
|
|
281
|
+
|
|
282
|
+
# save both history and summary
|
|
283
|
+
history_result = self._save_history(memory_records)
|
|
284
|
+
summary_result = self._save_summary(summary)
|
|
285
|
+
|
|
286
|
+
if "Error" not in history_result and "Error" not in summary_result:
|
|
287
|
+
return summary
|
|
288
|
+
else:
|
|
289
|
+
error_msg = (
|
|
290
|
+
f"Compression partially failed. History: {history_result},"
|
|
291
|
+
f" Summary: {summary_result}"
|
|
292
|
+
)
|
|
293
|
+
logger.error(error_msg)
|
|
294
|
+
raise Exception(error_msg)
|
|
295
|
+
|
|
296
|
+
except Exception as e:
|
|
297
|
+
logger.error(f"Error in compress_and_save: {e}")
|
|
298
|
+
return self.existing_summary or ""
|
|
299
|
+
|
|
300
|
+
# ========= FILE MANAGEMENT METHODS =========
|
|
301
|
+
|
|
302
|
+
def _load_summary(self) -> str:
|
|
303
|
+
r"""Retrieve previously saved conversation summary from disk
|
|
304
|
+
for context restoration or continuation.
|
|
305
|
+
|
|
306
|
+
Returns:
|
|
307
|
+
str: The summary content, or empty string if not found.
|
|
308
|
+
"""
|
|
309
|
+
return self.context_util.load_markdown_file(self.summary_filename)
|
|
310
|
+
|
|
311
|
+
def _load_history(self) -> str:
|
|
312
|
+
r"""Retrieve complete conversation transcript from saved markdown
|
|
313
|
+
file including all message details and formatting.
|
|
314
|
+
|
|
315
|
+
Returns:
|
|
316
|
+
str: The history content, or empty string if not found.
|
|
317
|
+
"""
|
|
318
|
+
return self.context_util.load_markdown_file(self.history_filename)
|
|
319
|
+
|
|
320
|
+
# ========= PROMPT GENERATION METHODS =========
|
|
321
|
+
|
|
322
|
+
def _format_conversation(
|
|
323
|
+
self,
|
|
324
|
+
memory_records: List["MemoryRecord"],
|
|
325
|
+
) -> str:
|
|
326
|
+
r"""Convert memory records into human-readable conversation format
|
|
327
|
+
with role names and message content for summarization processing.
|
|
328
|
+
|
|
329
|
+
Args:
|
|
330
|
+
memory_records (List["MemoryRecord"]): A list of memory records to
|
|
331
|
+
format.
|
|
332
|
+
|
|
333
|
+
Returns:
|
|
334
|
+
str: The formatted conversation.
|
|
335
|
+
"""
|
|
336
|
+
return self.context_util.format_memory_as_conversation(memory_records)
|
|
337
|
+
|
|
338
|
+
def _create_summary_prompt(
|
|
339
|
+
self,
|
|
340
|
+
conversation_text: str,
|
|
341
|
+
) -> str:
|
|
342
|
+
r"""Construct detailed summarization prompt with instructions
|
|
343
|
+
for extracting key information, goals, and progress from conversation.
|
|
344
|
+
|
|
345
|
+
Args:
|
|
346
|
+
conversation_text (str): The formatted conversation to summarize.
|
|
347
|
+
|
|
348
|
+
Returns:
|
|
349
|
+
str: The complete prompt for summarization.
|
|
350
|
+
"""
|
|
351
|
+
|
|
352
|
+
# use custom template if provided, otherwise use default
|
|
353
|
+
if self.summary_prompt_template:
|
|
354
|
+
base_prompt = self.summary_prompt_template
|
|
355
|
+
else:
|
|
356
|
+
base_prompt = """The following is a conversation history of a \
|
|
357
|
+
large language model agent.
|
|
358
|
+
Analyze it and extract the key information from it. The information will be
|
|
359
|
+
passed on to a new agent that will use it to understand the problem and \
|
|
360
|
+
continue working on it without having to start from scratch. Focus on:
|
|
361
|
+
- User's main goal (e.g. "The user wants my help with data analysis of \
|
|
362
|
+
customer sales data.")
|
|
363
|
+
- Key information about the user and their preferences (e.g. "The user is a \
|
|
364
|
+
student who prefers concise bullet-point responses.")
|
|
365
|
+
- Tasks that were accomplished (e.g. "I found the top 10 customers by total \
|
|
366
|
+
sales amounts, wrote a Python script to...")
|
|
367
|
+
- Tools and methods that were used **if tool/function calls have been made** \
|
|
368
|
+
(e.g. "I used CodeExecutionToolkit to execute a Python script to analyze the \
|
|
369
|
+
data.")
|
|
370
|
+
- Important discoveries or solutions found (e.g. "I found there are duplicate \
|
|
371
|
+
entries in the customer name column, which must be taken care of before \
|
|
372
|
+
proceeding with the analysis.")
|
|
373
|
+
- Technical approaches that worked **if the task is technical** (e.g. "Using \
|
|
374
|
+
Pandas + matplotlib seem to yield the best responses for the user's \
|
|
375
|
+
queries.)
|
|
376
|
+
Return only there summary with no preamble or extra words"""
|
|
377
|
+
|
|
378
|
+
# if we want to extend an existing summary
|
|
379
|
+
if self.existing_summary:
|
|
380
|
+
base_prompt += f"""
|
|
381
|
+
|
|
382
|
+
Existing summary from before:
|
|
383
|
+
{self.existing_summary}
|
|
384
|
+
|
|
385
|
+
Provide an updated summary that incorporates both the previous work and the \
|
|
386
|
+
new conversation."""
|
|
387
|
+
|
|
388
|
+
prompt = f"""{base_prompt}
|
|
389
|
+
|
|
390
|
+
Conversation:
|
|
391
|
+
{conversation_text}
|
|
392
|
+
|
|
393
|
+
Summary:"""
|
|
394
|
+
return prompt
|
|
395
|
+
|
|
396
|
+
# ========= PUBLIC TOOL INTERFACE METHODS =========
|
|
397
|
+
|
|
398
|
+
def summarize_full_conversation_history(self) -> str:
|
|
399
|
+
r"""Save the conversation history and generate an intelligent summary.
|
|
400
|
+
|
|
401
|
+
This function should be used when the memory becomes cluttered with too
|
|
402
|
+
many unrelated conversations or information that might be irrelevant to
|
|
403
|
+
the core task. It will generate a summary and save both the summary
|
|
404
|
+
and full conversation history to markdown files. Then it clears the
|
|
405
|
+
memory and replaces it with the summary for a context refresh. The
|
|
406
|
+
conversation must flow naturally from the summary.
|
|
407
|
+
|
|
408
|
+
Returns:
|
|
409
|
+
str: Success message with brief summary, or error message.
|
|
410
|
+
"""
|
|
411
|
+
try:
|
|
412
|
+
# Get memory records using ContextUtility
|
|
413
|
+
memory_records = self.context_util.get_agent_memory_records(
|
|
414
|
+
self.agent
|
|
415
|
+
)
|
|
416
|
+
message_count = len(memory_records)
|
|
417
|
+
|
|
418
|
+
if message_count == 0:
|
|
419
|
+
return "No conversation history found to save."
|
|
420
|
+
|
|
421
|
+
# Use compression service directly to avoid tool calling loops
|
|
422
|
+
summary = self._compress_and_save(memory_records)
|
|
423
|
+
|
|
424
|
+
# empty memory and replace it with the summary
|
|
425
|
+
self._refresh_context_with_summary(summary)
|
|
426
|
+
|
|
427
|
+
logger.info(
|
|
428
|
+
f"Context compression completed - {message_count} "
|
|
429
|
+
f"messages processed"
|
|
430
|
+
)
|
|
431
|
+
|
|
432
|
+
return (
|
|
433
|
+
"Full context summarized, summary added as user message, "
|
|
434
|
+
"and full history removed."
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
except Exception as e:
|
|
438
|
+
error_msg = f"Failed to save conversation memory: {e}"
|
|
439
|
+
logger.error(error_msg)
|
|
440
|
+
return error_msg
|
|
441
|
+
|
|
442
|
+
def _refresh_context_with_summary(self, summary: str) -> bool:
|
|
443
|
+
r"""Empty the agent's memory and replace it with a summary
|
|
444
|
+
of the conversation history.
|
|
445
|
+
|
|
446
|
+
Args:
|
|
447
|
+
summary (str): The summary of the conversation history.
|
|
448
|
+
|
|
449
|
+
Returns:
|
|
450
|
+
bool: True if the context was refreshed successfully, False
|
|
451
|
+
otherwise.
|
|
452
|
+
"""
|
|
453
|
+
try:
|
|
454
|
+
# clear the memory
|
|
455
|
+
self.agent.clear_memory()
|
|
456
|
+
|
|
457
|
+
# add summary as context as a USER message
|
|
458
|
+
if summary and summary.strip():
|
|
459
|
+
from camel.messages import BaseMessage
|
|
460
|
+
from camel.types import OpenAIBackendRole
|
|
461
|
+
|
|
462
|
+
summary_message = BaseMessage.make_user_message(
|
|
463
|
+
role_name="User",
|
|
464
|
+
content=f"[Context Summary from Previous "
|
|
465
|
+
f"Conversation]\n\n{summary}",
|
|
466
|
+
)
|
|
467
|
+
self.agent.update_memory(
|
|
468
|
+
summary_message, OpenAIBackendRole.USER
|
|
469
|
+
)
|
|
470
|
+
return True
|
|
471
|
+
return False
|
|
472
|
+
|
|
473
|
+
except Exception as e:
|
|
474
|
+
logger.error(
|
|
475
|
+
f"Failed to empty memory and replace it with summary: {e}"
|
|
476
|
+
)
|
|
477
|
+
return False
|
|
478
|
+
|
|
479
|
+
def get_conversation_memory_info(self) -> str:
|
|
480
|
+
r"""Get information about the current conversation memory state
|
|
481
|
+
and saved files. The information includes:
|
|
482
|
+
- Number of messages in memory
|
|
483
|
+
- Save directory
|
|
484
|
+
- If summary and history files exist and how many
|
|
485
|
+
characters they have
|
|
486
|
+
|
|
487
|
+
Returns:
|
|
488
|
+
str: Information about current memory and saved files.
|
|
489
|
+
"""
|
|
490
|
+
try:
|
|
491
|
+
# Current memory info using ContextUtility
|
|
492
|
+
memory_records = self.context_util.get_agent_memory_records(
|
|
493
|
+
self.agent
|
|
494
|
+
)
|
|
495
|
+
current_count = len(memory_records)
|
|
496
|
+
|
|
497
|
+
info_msg = f"Current messages in memory: {current_count}\n"
|
|
498
|
+
info_msg += f"Save directory: {self.working_directory}\n"
|
|
499
|
+
|
|
500
|
+
# Check if saved files exist
|
|
501
|
+
try:
|
|
502
|
+
summary_content = self._load_summary()
|
|
503
|
+
history_content = self._load_history()
|
|
504
|
+
|
|
505
|
+
if summary_content.strip():
|
|
506
|
+
info_msg += (
|
|
507
|
+
f"Summary file: Available ({len(summary_content)} "
|
|
508
|
+
f"chars)\n"
|
|
509
|
+
)
|
|
510
|
+
else:
|
|
511
|
+
info_msg += "Summary file: Not found\n"
|
|
512
|
+
|
|
513
|
+
if history_content.strip():
|
|
514
|
+
info_msg += (
|
|
515
|
+
f"History file: Available ({len(history_content)} "
|
|
516
|
+
f"chars)\n"
|
|
517
|
+
)
|
|
518
|
+
else:
|
|
519
|
+
info_msg += "History file: Not found\n"
|
|
520
|
+
|
|
521
|
+
except Exception:
|
|
522
|
+
info_msg += "Saved files: Unable to check\n"
|
|
523
|
+
|
|
524
|
+
# Add search capability status
|
|
525
|
+
info_msg += "Text search: Enabled (lightweight file-based)\n"
|
|
526
|
+
|
|
527
|
+
# Count available session histories
|
|
528
|
+
base_dir = self.working_directory.parent
|
|
529
|
+
session_pattern = str(
|
|
530
|
+
base_dir / "session_*" / f"{self.history_filename}.md"
|
|
531
|
+
)
|
|
532
|
+
session_count = len(glob.glob(session_pattern))
|
|
533
|
+
info_msg += f"Searchable sessions: {session_count}\n"
|
|
534
|
+
|
|
535
|
+
return info_msg
|
|
536
|
+
|
|
537
|
+
except Exception as e:
|
|
538
|
+
error_msg = f"Failed to get memory info: {e}"
|
|
539
|
+
logger.error(error_msg)
|
|
540
|
+
return error_msg
|
|
541
|
+
|
|
542
|
+
def search_full_conversation_history(
|
|
543
|
+
self,
|
|
544
|
+
keywords: List[str],
|
|
545
|
+
top_k: int = 4,
|
|
546
|
+
) -> str:
|
|
547
|
+
r"""Search the conversation history using keyword matching. This is
|
|
548
|
+
used when information is missing from the summary and the current
|
|
549
|
+
conversation, and can potentially be found in the full conversation
|
|
550
|
+
history before it was summarized.
|
|
551
|
+
|
|
552
|
+
Searches through the current session's history.md file to find the
|
|
553
|
+
top messages that contain the most keywords.
|
|
554
|
+
|
|
555
|
+
Args:
|
|
556
|
+
keywords (List[str]): List of keywords to search for. The
|
|
557
|
+
keywords must be explicitly related to the information
|
|
558
|
+
the user is looking for, and not general terms that
|
|
559
|
+
might be found about any topic. For example, if the user
|
|
560
|
+
is searching for the price of the flight to "Paris"
|
|
561
|
+
which was discussed previously, the keywords should be
|
|
562
|
+
["Paris", "price", "flight", "$", "costs"].
|
|
563
|
+
top_k (int): The number of results to return (default 4).
|
|
564
|
+
|
|
565
|
+
Returns:
|
|
566
|
+
str: The search results or error message.
|
|
567
|
+
"""
|
|
568
|
+
try:
|
|
569
|
+
# Only search current session history
|
|
570
|
+
current_history = (
|
|
571
|
+
self.working_directory / f"{self.history_filename}.md"
|
|
572
|
+
)
|
|
573
|
+
if not current_history.exists():
|
|
574
|
+
return "No history file found in current session."
|
|
575
|
+
|
|
576
|
+
logger.info("Searching through current session history")
|
|
577
|
+
|
|
578
|
+
# Perform keyword-based search directly
|
|
579
|
+
search_results = self.context_util.search_in_file(
|
|
580
|
+
current_history, keywords, top_k
|
|
581
|
+
)
|
|
582
|
+
|
|
583
|
+
if search_results and search_results.strip():
|
|
584
|
+
keywords_str = ", ".join(keywords)
|
|
585
|
+
formatted_results = (
|
|
586
|
+
f"Found relevant conversation excerpts for keywords: "
|
|
587
|
+
f"'{keywords_str}'\n\n"
|
|
588
|
+
f"--- Search Results ---\n"
|
|
589
|
+
f"{search_results}\n"
|
|
590
|
+
f"--- End Results ---\n\n"
|
|
591
|
+
f"Note: Results are ordered by keyword match count."
|
|
592
|
+
)
|
|
593
|
+
return formatted_results
|
|
594
|
+
else:
|
|
595
|
+
keywords_str = ", ".join(keywords)
|
|
596
|
+
return (
|
|
597
|
+
f"No relevant conversations found for keywords: "
|
|
598
|
+
f"'{keywords_str}'. "
|
|
599
|
+
f"Try different keywords."
|
|
600
|
+
)
|
|
601
|
+
|
|
602
|
+
except Exception as e:
|
|
603
|
+
error_msg = f"Failed to search conversation history: {e}"
|
|
604
|
+
logger.error(error_msg)
|
|
605
|
+
return error_msg
|
|
606
|
+
|
|
607
|
+
def should_compress_context(
|
|
608
|
+
self, message_limit: int = 40, token_limit: Optional[int] = None
|
|
609
|
+
) -> bool:
|
|
610
|
+
r"""Check if context should be compressed based on limits.
|
|
611
|
+
|
|
612
|
+
Args:
|
|
613
|
+
message_limit (int): Maximum number of messages before compression.
|
|
614
|
+
token_limit (Optional[int]): Maximum number of tokens before
|
|
615
|
+
compression.
|
|
616
|
+
|
|
617
|
+
Returns:
|
|
618
|
+
bool: True if context should be compressed.
|
|
619
|
+
"""
|
|
620
|
+
try:
|
|
621
|
+
# check token limit first (more efficient)
|
|
622
|
+
if token_limit:
|
|
623
|
+
_, token_count = self.agent.memory.get_context()
|
|
624
|
+
if token_count > token_limit:
|
|
625
|
+
return True
|
|
626
|
+
|
|
627
|
+
# check message limit
|
|
628
|
+
memory_records = self.context_util.get_agent_memory_records(
|
|
629
|
+
self.agent
|
|
630
|
+
)
|
|
631
|
+
if len(memory_records) > message_limit:
|
|
632
|
+
return True
|
|
633
|
+
|
|
634
|
+
return False
|
|
635
|
+
|
|
636
|
+
except Exception as e:
|
|
637
|
+
logger.error(
|
|
638
|
+
f"Error checking if context should be compressed: {e}"
|
|
639
|
+
)
|
|
640
|
+
return False
|
|
641
|
+
|
|
642
|
+
# ========= UTILITY METHODS =========
|
|
643
|
+
|
|
644
|
+
def reset(self) -> None:
|
|
645
|
+
r"""Clear all compression state including stored summaries,
|
|
646
|
+
compressed message tracking, and compression counters."""
|
|
647
|
+
self.existing_summary = None
|
|
648
|
+
self.compressed_message_uuids.clear()
|
|
649
|
+
self.compression_count = 0
|
|
650
|
+
logger.info(
|
|
651
|
+
"Context summarizer toolkit reset - previous summary and "
|
|
652
|
+
"compression tracking cleared"
|
|
653
|
+
)
|
|
654
|
+
|
|
655
|
+
def get_current_summary(self) -> Optional[str]:
|
|
656
|
+
r"""Retrieve the in-memory summary without triggering new
|
|
657
|
+
summarization or file operations.
|
|
658
|
+
|
|
659
|
+
Returns:
|
|
660
|
+
Optional[str]: The current summary, or None if no summary exists.
|
|
661
|
+
"""
|
|
662
|
+
return self.existing_summary
|
|
663
|
+
|
|
664
|
+
def set_summary(self, summary: str) -> None:
|
|
665
|
+
r"""Override the current in-memory summary with provided content
|
|
666
|
+
without affecting saved files or compression tracking.
|
|
667
|
+
|
|
668
|
+
Args:
|
|
669
|
+
summary (str): The summary to store.
|
|
670
|
+
"""
|
|
671
|
+
self.existing_summary = summary
|
|
672
|
+
logger.info("Summary manually set")
|
|
673
|
+
|
|
674
|
+
def get_tools(self) -> List[FunctionTool]:
|
|
675
|
+
r"""Get the tools for the ContextSummarizerToolkit.
|
|
676
|
+
|
|
677
|
+
Returns:
|
|
678
|
+
List[FunctionTool]: The list of tools.
|
|
679
|
+
"""
|
|
680
|
+
return [
|
|
681
|
+
FunctionTool(self.summarize_full_conversation_history),
|
|
682
|
+
FunctionTool(self.search_full_conversation_history),
|
|
683
|
+
FunctionTool(self.get_conversation_memory_info),
|
|
684
|
+
]
|
|
@@ -16,7 +16,11 @@ from typing import Dict, List, Literal, Optional, Union
|
|
|
16
16
|
|
|
17
17
|
from camel.toolkits.base import BaseToolkit
|
|
18
18
|
from camel.toolkits.function_tool import FunctionTool
|
|
19
|
-
from camel.utils import
|
|
19
|
+
from camel.utils import (
|
|
20
|
+
MCPServer,
|
|
21
|
+
api_keys_required,
|
|
22
|
+
dependencies_required,
|
|
23
|
+
)
|
|
20
24
|
|
|
21
25
|
|
|
22
26
|
@MCPServer()
|