rossum-agent 1.0.0rc0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rossum_agent/__init__.py +9 -0
- rossum_agent/agent/__init__.py +32 -0
- rossum_agent/agent/core.py +932 -0
- rossum_agent/agent/memory.py +176 -0
- rossum_agent/agent/models.py +160 -0
- rossum_agent/agent/request_classifier.py +152 -0
- rossum_agent/agent/skills.py +132 -0
- rossum_agent/agent/types.py +5 -0
- rossum_agent/agent_logging.py +56 -0
- rossum_agent/api/__init__.py +1 -0
- rossum_agent/api/cli.py +51 -0
- rossum_agent/api/dependencies.py +190 -0
- rossum_agent/api/main.py +180 -0
- rossum_agent/api/models/__init__.py +1 -0
- rossum_agent/api/models/schemas.py +301 -0
- rossum_agent/api/routes/__init__.py +1 -0
- rossum_agent/api/routes/chats.py +95 -0
- rossum_agent/api/routes/files.py +113 -0
- rossum_agent/api/routes/health.py +44 -0
- rossum_agent/api/routes/messages.py +218 -0
- rossum_agent/api/services/__init__.py +1 -0
- rossum_agent/api/services/agent_service.py +451 -0
- rossum_agent/api/services/chat_service.py +197 -0
- rossum_agent/api/services/file_service.py +65 -0
- rossum_agent/assets/Primary_light_logo.png +0 -0
- rossum_agent/bedrock_client.py +64 -0
- rossum_agent/prompts/__init__.py +27 -0
- rossum_agent/prompts/base_prompt.py +80 -0
- rossum_agent/prompts/system_prompt.py +24 -0
- rossum_agent/py.typed +0 -0
- rossum_agent/redis_storage.py +482 -0
- rossum_agent/rossum_mcp_integration.py +123 -0
- rossum_agent/skills/hook-debugging.md +31 -0
- rossum_agent/skills/organization-setup.md +60 -0
- rossum_agent/skills/rossum-deployment.md +102 -0
- rossum_agent/skills/schema-patching.md +61 -0
- rossum_agent/skills/schema-pruning.md +23 -0
- rossum_agent/skills/ui-settings.md +45 -0
- rossum_agent/streamlit_app/__init__.py +1 -0
- rossum_agent/streamlit_app/app.py +646 -0
- rossum_agent/streamlit_app/beep_sound.py +36 -0
- rossum_agent/streamlit_app/cli.py +17 -0
- rossum_agent/streamlit_app/render_modules.py +123 -0
- rossum_agent/streamlit_app/response_formatting.py +305 -0
- rossum_agent/tools/__init__.py +214 -0
- rossum_agent/tools/core.py +173 -0
- rossum_agent/tools/deploy.py +404 -0
- rossum_agent/tools/dynamic_tools.py +365 -0
- rossum_agent/tools/file_tools.py +62 -0
- rossum_agent/tools/formula.py +187 -0
- rossum_agent/tools/skills.py +31 -0
- rossum_agent/tools/spawn_mcp.py +227 -0
- rossum_agent/tools/subagents/__init__.py +31 -0
- rossum_agent/tools/subagents/base.py +303 -0
- rossum_agent/tools/subagents/hook_debug.py +591 -0
- rossum_agent/tools/subagents/knowledge_base.py +305 -0
- rossum_agent/tools/subagents/mcp_helpers.py +47 -0
- rossum_agent/tools/subagents/schema_patching.py +471 -0
- rossum_agent/url_context.py +167 -0
- rossum_agent/user_detection.py +100 -0
- rossum_agent/utils.py +128 -0
- rossum_agent-1.0.0rc0.dist-info/METADATA +311 -0
- rossum_agent-1.0.0rc0.dist-info/RECORD +67 -0
- rossum_agent-1.0.0rc0.dist-info/WHEEL +5 -0
- rossum_agent-1.0.0rc0.dist-info/entry_points.txt +3 -0
- rossum_agent-1.0.0rc0.dist-info/licenses/LICENSE +21 -0
- rossum_agent-1.0.0rc0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""CLI entry point for Rossum Agent Streamlit application."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import subprocess
|
|
6
|
+
import sys
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def main() -> None:
|
|
11
|
+
"""Launch the Rossum Agent Streamlit application."""
|
|
12
|
+
app_path = Path(__file__).parent / "app.py"
|
|
13
|
+
sys.exit(subprocess.call([sys.executable, "-m", "streamlit", "run", str(app_path), *sys.argv[1:]]))
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
if __name__ == "__main__":
|
|
17
|
+
main()
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
"""Streamlit UI rendering modules for the Rossum Agent app."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import html
|
|
6
|
+
import re
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
from typing import TYPE_CHECKING
|
|
9
|
+
|
|
10
|
+
import streamlit as st
|
|
11
|
+
import streamlit.components.v1 as components
|
|
12
|
+
|
|
13
|
+
if TYPE_CHECKING:
|
|
14
|
+
from rossum_agent.redis_storage import RedisStorage
|
|
15
|
+
|
|
16
|
+
MERMAID_BLOCK_PATTERN = re.compile(r"```mermaid\s*(.*?)```", re.DOTALL)
|
|
17
|
+
|
|
18
|
+
MERMAID_HTML_TEMPLATE = """
|
|
19
|
+
<script src="https://cdn.jsdelivr.net/npm/mermaid/dist/mermaid.min.js"></script>
|
|
20
|
+
<script>mermaid.initialize({{startOnLoad: true, theme: 'default'}});</script>
|
|
21
|
+
<div class="mermaid" style="background: white; padding: 10px; border-radius: 5px;">
|
|
22
|
+
{code}
|
|
23
|
+
</div>
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def render_mermaid_html(code: str, height: int = 400) -> None:
|
|
28
|
+
"""Render mermaid diagram using HTML component."""
|
|
29
|
+
escaped_code = html.escape(code.strip())
|
|
30
|
+
html_content = MERMAID_HTML_TEMPLATE.format(code=escaped_code)
|
|
31
|
+
components.html(html_content, height=height, scrolling=True)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def render_markdown_with_mermaid(content: str) -> None:
|
|
35
|
+
"""Render markdown content with mermaid diagram support.
|
|
36
|
+
|
|
37
|
+
Parses markdown content for ```mermaid``` code blocks and renders them
|
|
38
|
+
using HTML components. Other content is rendered with st.markdown.
|
|
39
|
+
"""
|
|
40
|
+
parts = MERMAID_BLOCK_PATTERN.split(content)
|
|
41
|
+
for i, part in enumerate(parts):
|
|
42
|
+
if not part.strip():
|
|
43
|
+
continue
|
|
44
|
+
if i % 2 == 1:
|
|
45
|
+
render_mermaid_html(part)
|
|
46
|
+
else:
|
|
47
|
+
st.markdown(part)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def render_chat_history(redis_storage: RedisStorage, current_chat_id: str, user_id: str | None = None) -> None:
|
|
51
|
+
"""Render the chat history section in the sidebar.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
redis_storage: Redis storage instance for retrieving chat history
|
|
55
|
+
current_chat_id: The currently active chat ID
|
|
56
|
+
user_id: Optional user ID for filtering chat history
|
|
57
|
+
"""
|
|
58
|
+
st.markdown("---")
|
|
59
|
+
st.subheader("Chat History")
|
|
60
|
+
|
|
61
|
+
if redis_storage.is_connected():
|
|
62
|
+
all_chats = redis_storage.list_all_chats(user_id)
|
|
63
|
+
|
|
64
|
+
if all_chats:
|
|
65
|
+
# Group chats by time period
|
|
66
|
+
now = datetime.now()
|
|
67
|
+
today_chats = []
|
|
68
|
+
last_30_days_chats = []
|
|
69
|
+
|
|
70
|
+
for chat in all_chats:
|
|
71
|
+
chat_date = datetime.fromtimestamp(chat["timestamp"])
|
|
72
|
+
days_ago = (now - chat_date).days
|
|
73
|
+
|
|
74
|
+
if days_ago == 0:
|
|
75
|
+
today_chats.append(chat)
|
|
76
|
+
elif days_ago <= 30:
|
|
77
|
+
last_30_days_chats.append(chat)
|
|
78
|
+
|
|
79
|
+
# Display Today section
|
|
80
|
+
if today_chats:
|
|
81
|
+
st.markdown("**Today**")
|
|
82
|
+
for chat in today_chats:
|
|
83
|
+
is_current = chat["chat_id"] == current_chat_id
|
|
84
|
+
prefix = "📌 " if is_current else "💬 "
|
|
85
|
+
chat_title = (
|
|
86
|
+
chat["first_message"][:40] + "..."
|
|
87
|
+
if len(chat["first_message"]) > 40
|
|
88
|
+
else chat["first_message"]
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
if st.button(
|
|
92
|
+
f"{prefix}{chat_title}",
|
|
93
|
+
key=f"chat_{chat['chat_id']}",
|
|
94
|
+
use_container_width=True,
|
|
95
|
+
disabled=is_current,
|
|
96
|
+
):
|
|
97
|
+
st.query_params["chat_id"] = chat["chat_id"]
|
|
98
|
+
st.rerun()
|
|
99
|
+
|
|
100
|
+
# Display Previous 30 days section
|
|
101
|
+
if last_30_days_chats:
|
|
102
|
+
with st.expander("**Previous 30 days**", expanded=False):
|
|
103
|
+
for chat in last_30_days_chats:
|
|
104
|
+
is_current = chat["chat_id"] == current_chat_id
|
|
105
|
+
prefix = "📌 " if is_current else "💬 "
|
|
106
|
+
chat_title = (
|
|
107
|
+
chat["first_message"][:40] + "..."
|
|
108
|
+
if len(chat["first_message"]) > 40
|
|
109
|
+
else chat["first_message"]
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
if st.button(
|
|
113
|
+
f"{prefix}{chat_title}",
|
|
114
|
+
key=f"chat_{chat['chat_id']}",
|
|
115
|
+
use_container_width=True,
|
|
116
|
+
disabled=is_current,
|
|
117
|
+
):
|
|
118
|
+
st.query_params["chat_id"] = chat["chat_id"]
|
|
119
|
+
st.rerun()
|
|
120
|
+
else:
|
|
121
|
+
st.info("No chat history yet")
|
|
122
|
+
else:
|
|
123
|
+
st.warning("Redis not connected - chat history unavailable")
|
|
@@ -0,0 +1,305 @@
|
|
|
1
|
+
"""Response formatting module for the Rossum Agent Streamlit application.
|
|
2
|
+
|
|
3
|
+
This module handles the formatting and display of agent responses,
|
|
4
|
+
including tool calls, tool results, and final answers.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import ast
|
|
10
|
+
import contextlib
|
|
11
|
+
import dataclasses
|
|
12
|
+
import json
|
|
13
|
+
import pathlib
|
|
14
|
+
from typing import TYPE_CHECKING, Any, Protocol
|
|
15
|
+
|
|
16
|
+
if TYPE_CHECKING:
|
|
17
|
+
from streamlit.delta_generator import DeltaGenerator
|
|
18
|
+
|
|
19
|
+
from rossum_agent.agent import AgentStep
|
|
20
|
+
from rossum_agent.tools.core import SubAgentProgress
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class OutputRenderer(Protocol):
|
|
24
|
+
def markdown(self, body: str, *, unsafe_allow_html: bool = False) -> None: ...
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def get_display_tool_name(tool_call_name: str, tool_arguments: dict[str, Any] | None = None) -> str:
|
|
28
|
+
"""Get display name for a tool, expanding call_on_connection to show the actual MCP tool.
|
|
29
|
+
|
|
30
|
+
For call_on_connection, returns 'call_on_connection[connection_id.tool_name]' format.
|
|
31
|
+
For other tools, returns the original name.
|
|
32
|
+
"""
|
|
33
|
+
if tool_call_name == "call_on_connection" and tool_arguments:
|
|
34
|
+
connection_id = tool_arguments.get("connection_id", "")
|
|
35
|
+
inner_tool = tool_arguments.get("tool_name", "")
|
|
36
|
+
if connection_id and inner_tool:
|
|
37
|
+
return f"call_on_connection[{connection_id}.{inner_tool}]"
|
|
38
|
+
return tool_call_name
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def parse_and_format_final_answer(answer: str) -> str:
|
|
42
|
+
"""Parse and format final answer if it's a dictionary."""
|
|
43
|
+
answer = answer.strip()
|
|
44
|
+
|
|
45
|
+
with contextlib.suppress(json.JSONDecodeError, ValueError):
|
|
46
|
+
data = json.loads(answer)
|
|
47
|
+
if isinstance(data, dict):
|
|
48
|
+
return FinalResponse(data).get_formatted_response()
|
|
49
|
+
|
|
50
|
+
with contextlib.suppress(ValueError, SyntaxError):
|
|
51
|
+
data = ast.literal_eval(answer)
|
|
52
|
+
if isinstance(data, dict):
|
|
53
|
+
return FinalResponse(data).get_formatted_response()
|
|
54
|
+
|
|
55
|
+
return answer
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@dataclasses.dataclass
|
|
59
|
+
class FinalResponse:
|
|
60
|
+
data: dict[str, Any]
|
|
61
|
+
|
|
62
|
+
def __post_init__(self) -> None:
|
|
63
|
+
self.lines: list[str] = []
|
|
64
|
+
self.processed_keys: set[str] = set()
|
|
65
|
+
|
|
66
|
+
self._called: bool = False
|
|
67
|
+
|
|
68
|
+
def get_formatted_response(self) -> str:
|
|
69
|
+
"""Format dictionary response."""
|
|
70
|
+
if self._called:
|
|
71
|
+
return "\n".join(self.lines)
|
|
72
|
+
|
|
73
|
+
if "status" in self.data:
|
|
74
|
+
self.add_status()
|
|
75
|
+
|
|
76
|
+
if "summary" in self.data:
|
|
77
|
+
self.add_summary()
|
|
78
|
+
|
|
79
|
+
self.add_generated_files()
|
|
80
|
+
|
|
81
|
+
self.add_generic_items()
|
|
82
|
+
|
|
83
|
+
self._called = True
|
|
84
|
+
|
|
85
|
+
return "\n".join(self.lines)
|
|
86
|
+
|
|
87
|
+
def add_status(self) -> None:
|
|
88
|
+
status_emoji = "✅" if self.data["status"] == "success" else "❌"
|
|
89
|
+
self.lines.append(f"### {status_emoji} Status: {self.data['status'].title()}\n")
|
|
90
|
+
self.processed_keys.add("status")
|
|
91
|
+
|
|
92
|
+
def add_summary(self) -> None:
|
|
93
|
+
self.lines.append("### 📝 Summary")
|
|
94
|
+
self.lines.append(self.data["summary"])
|
|
95
|
+
self.lines.append("")
|
|
96
|
+
self.processed_keys.add("summary")
|
|
97
|
+
|
|
98
|
+
def add_generated_files(self) -> None:
|
|
99
|
+
for key in self.data:
|
|
100
|
+
if (
|
|
101
|
+
key not in self.processed_keys
|
|
102
|
+
and isinstance(self.data[key], list)
|
|
103
|
+
and ("generated" in key.lower() or "files" in key.lower())
|
|
104
|
+
):
|
|
105
|
+
self.lines.append(f"### 📁 {key.replace('_', ' ').title()}")
|
|
106
|
+
for item in self.data[key]:
|
|
107
|
+
if isinstance(item, str):
|
|
108
|
+
file_name = pathlib.Path(item).name if "/" in item or "\\" in item else item
|
|
109
|
+
self.lines.append(f"- `{file_name}`")
|
|
110
|
+
else:
|
|
111
|
+
self.lines.append(f"- {item}")
|
|
112
|
+
self.lines.append("")
|
|
113
|
+
self.processed_keys.add(key)
|
|
114
|
+
|
|
115
|
+
def add_generic_items(self) -> None:
|
|
116
|
+
for key, value in self.data.items():
|
|
117
|
+
if key in self.processed_keys:
|
|
118
|
+
continue
|
|
119
|
+
|
|
120
|
+
formatted_key = key.replace("_", " ").title()
|
|
121
|
+
|
|
122
|
+
if isinstance(value, dict):
|
|
123
|
+
self.lines.append(f"### {formatted_key}")
|
|
124
|
+
for sub_key, sub_value in value.items():
|
|
125
|
+
self.lines.append(f"- **{sub_key.replace('_', ' ').title()}:** {sub_value}")
|
|
126
|
+
self.lines.append("")
|
|
127
|
+
elif isinstance(value, list):
|
|
128
|
+
self.lines.append(f"### {formatted_key}")
|
|
129
|
+
for item in value:
|
|
130
|
+
self.lines.append(f"- {item}")
|
|
131
|
+
self.lines.append("")
|
|
132
|
+
else:
|
|
133
|
+
self.lines.append(f"**{formatted_key}:** {value}")
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
@dataclasses.dataclass
|
|
137
|
+
class ChatResponse:
|
|
138
|
+
"""Handles formatting and display of agent responses.
|
|
139
|
+
|
|
140
|
+
This class processes AgentStep objects from the new Claude-based agent
|
|
141
|
+
and renders them appropriately in the Streamlit UI. Supports streaming
|
|
142
|
+
updates where thinking is displayed progressively.
|
|
143
|
+
"""
|
|
144
|
+
|
|
145
|
+
prompt: str
|
|
146
|
+
output_placeholder: OutputRenderer | DeltaGenerator
|
|
147
|
+
|
|
148
|
+
def __post_init__(self) -> None:
|
|
149
|
+
self.result: AgentStep | None = None
|
|
150
|
+
self.completed_steps_markdown: list[str] = []
|
|
151
|
+
self.current_step_markdown: str = ""
|
|
152
|
+
self.final_answer_text: str | None = None
|
|
153
|
+
self._current_step_num: int = 0
|
|
154
|
+
self.total_input_tokens: int = 0
|
|
155
|
+
self.total_output_tokens: int = 0
|
|
156
|
+
self.total_tool_calls: int = 0
|
|
157
|
+
self.total_steps: int = 0
|
|
158
|
+
|
|
159
|
+
def process_step(self, step: AgentStep) -> None:
|
|
160
|
+
"""Process and display an agent step.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
step: An AgentStep from the agent's execution.
|
|
164
|
+
"""
|
|
165
|
+
if step.is_streaming:
|
|
166
|
+
self._process_streaming_step(step)
|
|
167
|
+
else:
|
|
168
|
+
self._process_completed_step(step)
|
|
169
|
+
|
|
170
|
+
self._render_display(step)
|
|
171
|
+
|
|
172
|
+
if not step.is_streaming:
|
|
173
|
+
self.result = step
|
|
174
|
+
self.total_input_tokens += step.input_tokens
|
|
175
|
+
self.total_output_tokens += step.output_tokens
|
|
176
|
+
self.total_tool_calls += len(step.tool_calls)
|
|
177
|
+
self.total_steps += 1
|
|
178
|
+
|
|
179
|
+
def _process_streaming_step(self, step: AgentStep) -> None:
|
|
180
|
+
"""Process a streaming step (partial thinking, text, or tool execution).
|
|
181
|
+
|
|
182
|
+
Handles three types of streaming content (matching API's agent_service.py):
|
|
183
|
+
- thinking: Model's chain-of-thought reasoning
|
|
184
|
+
- intermediate: Model's response text before tool calls (via accumulated_text)
|
|
185
|
+
- final_answer: Model's final response streaming (via accumulated_text)
|
|
186
|
+
"""
|
|
187
|
+
if step.step_number != self._current_step_num:
|
|
188
|
+
if self.current_step_markdown:
|
|
189
|
+
self.completed_steps_markdown.append(self.current_step_markdown)
|
|
190
|
+
self._current_step_num = step.step_number
|
|
191
|
+
self.current_step_markdown = f"#### Step {step.step_number}\n"
|
|
192
|
+
|
|
193
|
+
if step.current_tool and step.tool_progress:
|
|
194
|
+
current, total = step.tool_progress
|
|
195
|
+
current_tool_args = None
|
|
196
|
+
for tc in step.tool_calls:
|
|
197
|
+
if tc.name == step.current_tool:
|
|
198
|
+
current_tool_args = tc.arguments
|
|
199
|
+
break
|
|
200
|
+
display_name = get_display_tool_name(step.current_tool, current_tool_args)
|
|
201
|
+
progress_text = f"🔧 Running tool {current}/{total}: **{display_name}**..."
|
|
202
|
+
|
|
203
|
+
if step.sub_agent_progress:
|
|
204
|
+
sub_progress = step.sub_agent_progress
|
|
205
|
+
sub_agent_text = self._format_sub_agent_progress(sub_progress)
|
|
206
|
+
progress_text = f"{progress_text}\n\n{sub_agent_text}"
|
|
207
|
+
|
|
208
|
+
parts = [f"#### Step {step.step_number}\n"]
|
|
209
|
+
if step.thinking:
|
|
210
|
+
parts.append(f"🧠 **Thinking:**\n\n{step.thinking}\n")
|
|
211
|
+
if step.accumulated_text:
|
|
212
|
+
parts.append(f"💬 **Response:**\n\n{step.accumulated_text}\n")
|
|
213
|
+
parts.append(f"{progress_text}\n")
|
|
214
|
+
self.current_step_markdown = "\n".join(parts)
|
|
215
|
+
elif step.accumulated_text is not None:
|
|
216
|
+
parts = [f"#### Step {step.step_number}\n"]
|
|
217
|
+
if step.thinking:
|
|
218
|
+
parts.append(f"🧠 **Thinking:**\n\n{step.thinking}\n")
|
|
219
|
+
parts.append(f"💬 **Response:**\n\n{step.accumulated_text}\n")
|
|
220
|
+
self.current_step_markdown = "\n".join(parts)
|
|
221
|
+
elif step.thinking:
|
|
222
|
+
self.current_step_markdown = f"#### Step {step.step_number}\n\n🧠 **Thinking:**\n\n{step.thinking}\n"
|
|
223
|
+
|
|
224
|
+
def _format_sub_agent_progress(self, progress: SubAgentProgress) -> str:
|
|
225
|
+
"""Format sub-agent progress for display."""
|
|
226
|
+
iteration = progress.iteration
|
|
227
|
+
max_iterations = progress.max_iterations
|
|
228
|
+
status = progress.status
|
|
229
|
+
current_tool = progress.current_tool
|
|
230
|
+
tool_calls = progress.tool_calls
|
|
231
|
+
|
|
232
|
+
if max_iterations > 0:
|
|
233
|
+
lines = [f"> 🤖 **Sub-agent ({progress.tool_name})** - Iteration {iteration}/{max_iterations}"]
|
|
234
|
+
else:
|
|
235
|
+
lines = [f"> 🤖 **Sub-agent ({progress.tool_name})**"]
|
|
236
|
+
|
|
237
|
+
if status == "thinking":
|
|
238
|
+
lines.append("> ⏳ _Thinking..._")
|
|
239
|
+
elif status == "searching":
|
|
240
|
+
lines.append("> 🔍 _Searching Knowledge Base..._")
|
|
241
|
+
elif status == "analyzing":
|
|
242
|
+
lines.append("> 🧠 _Analyzing results..._")
|
|
243
|
+
elif status == "running_tool" and current_tool:
|
|
244
|
+
lines.append(f"> 🔧 Running: `{current_tool}`")
|
|
245
|
+
if tool_calls:
|
|
246
|
+
lines.append(f"> Tools this iteration: {', '.join(f'`{t}`' for t in tool_calls)}")
|
|
247
|
+
elif status == "completed":
|
|
248
|
+
lines.append("> ✅ _Completed_")
|
|
249
|
+
|
|
250
|
+
return "\n".join(lines)
|
|
251
|
+
|
|
252
|
+
def _process_completed_step(self, step: AgentStep) -> None:
|
|
253
|
+
"""Process a completed step with full content."""
|
|
254
|
+
self._current_step_num = step.step_number
|
|
255
|
+
step_md_parts: list[str] = [f"#### Step {step.step_number}\n"]
|
|
256
|
+
|
|
257
|
+
if step.thinking:
|
|
258
|
+
step_md_parts.append(f"🧠 **Thinking:**\n\n{step.thinking}\n")
|
|
259
|
+
|
|
260
|
+
if step.accumulated_text:
|
|
261
|
+
step_md_parts.append(f"💬 **Response:**\n\n{step.accumulated_text}\n")
|
|
262
|
+
|
|
263
|
+
if step.tool_calls:
|
|
264
|
+
tool_names = [get_display_tool_name(tc.name, tc.arguments) for tc in step.tool_calls]
|
|
265
|
+
step_md_parts.append(f"**Tools:** {', '.join(tool_names)}\n")
|
|
266
|
+
|
|
267
|
+
for result in step.tool_results:
|
|
268
|
+
content = result.content
|
|
269
|
+
if result.is_error:
|
|
270
|
+
step_md_parts.append(f"**❌ {result.name} Error:** {content}\n")
|
|
271
|
+
elif len(content) > 200:
|
|
272
|
+
step_md_parts.append(
|
|
273
|
+
f"<details><summary>📋 {result.name} result</summary>\n\n```\n{content}\n```\n</details>\n"
|
|
274
|
+
)
|
|
275
|
+
else:
|
|
276
|
+
step_md_parts.append(f"**Result ({result.name}):** {content}\n")
|
|
277
|
+
|
|
278
|
+
if step.error:
|
|
279
|
+
step_md_parts.append(f"**❌ Error:** {step.error}\n")
|
|
280
|
+
|
|
281
|
+
self.current_step_markdown = "\n".join(step_md_parts)
|
|
282
|
+
self.completed_steps_markdown.append(self.current_step_markdown)
|
|
283
|
+
self.current_step_markdown = ""
|
|
284
|
+
|
|
285
|
+
if step.is_final and step.final_answer is not None:
|
|
286
|
+
self.final_answer_text = parse_and_format_final_answer(step.final_answer)
|
|
287
|
+
|
|
288
|
+
def _render_display(self, step: AgentStep) -> None:
|
|
289
|
+
"""Render the current display state."""
|
|
290
|
+
all_steps = self.completed_steps_markdown.copy()
|
|
291
|
+
if self.current_step_markdown:
|
|
292
|
+
all_steps.append(self.current_step_markdown)
|
|
293
|
+
|
|
294
|
+
display_md = "\n\n".join(all_steps)
|
|
295
|
+
|
|
296
|
+
if step.is_streaming and not step.thinking and step.accumulated_text is None:
|
|
297
|
+
display_md += "\n\n⏳ _Extended thinking in progress..._"
|
|
298
|
+
elif self.final_answer_text is None and not step.is_final:
|
|
299
|
+
display_md += "\n\n⏳ _Processing..._"
|
|
300
|
+
elif self.final_answer_text is not None:
|
|
301
|
+
display_md += f"\n\n---\n\n### ✅ Final Answer\n\n{self.final_answer_text}"
|
|
302
|
+
elif step.error:
|
|
303
|
+
display_md += f"\n\n---\n\n### ❌ Error\n\n{step.error}"
|
|
304
|
+
|
|
305
|
+
self.output_placeholder.markdown(display_md, unsafe_allow_html=True)
|
|
@@ -0,0 +1,214 @@
|
|
|
1
|
+
"""Tools for the Rossum Agent.
|
|
2
|
+
|
|
3
|
+
This package provides local tools executed directly by the agent (file operations, debugging, skills, etc.).
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from typing import TYPE_CHECKING
|
|
9
|
+
|
|
10
|
+
from rossum_agent.tools.core import (
|
|
11
|
+
SubAgentProgress,
|
|
12
|
+
SubAgentProgressCallback,
|
|
13
|
+
SubAgentText,
|
|
14
|
+
SubAgentTextCallback,
|
|
15
|
+
SubAgentTokenCallback,
|
|
16
|
+
SubAgentTokenUsage,
|
|
17
|
+
get_mcp_connection,
|
|
18
|
+
get_mcp_event_loop,
|
|
19
|
+
get_output_dir,
|
|
20
|
+
get_rossum_credentials,
|
|
21
|
+
report_progress,
|
|
22
|
+
report_text,
|
|
23
|
+
report_token_usage,
|
|
24
|
+
require_rossum_credentials,
|
|
25
|
+
set_mcp_connection,
|
|
26
|
+
set_output_dir,
|
|
27
|
+
set_progress_callback,
|
|
28
|
+
set_rossum_credentials,
|
|
29
|
+
set_text_callback,
|
|
30
|
+
set_token_callback,
|
|
31
|
+
)
|
|
32
|
+
from rossum_agent.tools.deploy import (
|
|
33
|
+
DEPLOY_TOOLS,
|
|
34
|
+
create_workspace,
|
|
35
|
+
deploy_compare_workspaces,
|
|
36
|
+
deploy_copy_org,
|
|
37
|
+
deploy_copy_workspace,
|
|
38
|
+
deploy_diff,
|
|
39
|
+
deploy_pull,
|
|
40
|
+
deploy_push,
|
|
41
|
+
deploy_to_org,
|
|
42
|
+
get_deploy_tool_names,
|
|
43
|
+
get_deploy_tools,
|
|
44
|
+
)
|
|
45
|
+
from rossum_agent.tools.dynamic_tools import (
|
|
46
|
+
DISCOVERY_TOOL_NAME,
|
|
47
|
+
CatalogData,
|
|
48
|
+
DynamicToolsState,
|
|
49
|
+
get_destructive_tools,
|
|
50
|
+
get_dynamic_tools,
|
|
51
|
+
get_load_tool_category_definition,
|
|
52
|
+
get_load_tool_definition,
|
|
53
|
+
get_loaded_categories,
|
|
54
|
+
load_tool,
|
|
55
|
+
load_tool_category,
|
|
56
|
+
preload_categories_for_request,
|
|
57
|
+
reset_dynamic_tools,
|
|
58
|
+
suggest_categories_for_request,
|
|
59
|
+
)
|
|
60
|
+
from rossum_agent.tools.file_tools import write_file
|
|
61
|
+
from rossum_agent.tools.formula import suggest_formula_field
|
|
62
|
+
from rossum_agent.tools.skills import load_skill
|
|
63
|
+
from rossum_agent.tools.spawn_mcp import (
|
|
64
|
+
SpawnedConnection,
|
|
65
|
+
call_on_connection,
|
|
66
|
+
cleanup_all_spawned_connections,
|
|
67
|
+
clear_spawned_connections,
|
|
68
|
+
close_connection,
|
|
69
|
+
spawn_mcp_connection,
|
|
70
|
+
)
|
|
71
|
+
from rossum_agent.tools.subagents import (
|
|
72
|
+
OPUS_MODEL_ID,
|
|
73
|
+
WebSearchError,
|
|
74
|
+
debug_hook,
|
|
75
|
+
evaluate_python_hook,
|
|
76
|
+
patch_schema_with_subagent,
|
|
77
|
+
search_knowledge_base,
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
if TYPE_CHECKING:
|
|
81
|
+
from anthropic._tools import BetaTool # ty: ignore[unresolved-import] - private API
|
|
82
|
+
from anthropic.types import ToolParam
|
|
83
|
+
|
|
84
|
+
_BETA_TOOLS: list[BetaTool[..., str]] = [
|
|
85
|
+
write_file,
|
|
86
|
+
search_knowledge_base,
|
|
87
|
+
evaluate_python_hook,
|
|
88
|
+
debug_hook,
|
|
89
|
+
patch_schema_with_subagent,
|
|
90
|
+
suggest_formula_field,
|
|
91
|
+
load_skill,
|
|
92
|
+
spawn_mcp_connection,
|
|
93
|
+
call_on_connection,
|
|
94
|
+
close_connection,
|
|
95
|
+
]
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def get_internal_tools() -> list[ToolParam]:
|
|
99
|
+
"""Get all internal tools in Anthropic format."""
|
|
100
|
+
return [tool.to_dict() for tool in _BETA_TOOLS] + [get_load_tool_category_definition(), get_load_tool_definition()]
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def get_internal_tool_names() -> set[str]:
|
|
104
|
+
"""Get the names of all internal tools."""
|
|
105
|
+
return {tool.name for tool in _BETA_TOOLS} | {"load_tool_category", "load_tool"}
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def execute_internal_tool(name: str, arguments: dict[str, object]) -> str:
|
|
109
|
+
"""Execute an internal tool by name.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
name: The name of the tool to execute.
|
|
113
|
+
arguments: The arguments to pass to the tool.
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
The result of the tool execution as a string.
|
|
117
|
+
|
|
118
|
+
Raises:
|
|
119
|
+
ValueError: If the tool name is not recognized.
|
|
120
|
+
"""
|
|
121
|
+
if name == "load_tool_category":
|
|
122
|
+
raw_categories = arguments.get("categories", [])
|
|
123
|
+
categories = [str(c) for c in raw_categories] if isinstance(raw_categories, list) else [str(raw_categories)]
|
|
124
|
+
return load_tool_category(categories)
|
|
125
|
+
|
|
126
|
+
if name == "load_tool":
|
|
127
|
+
raw_tool_names = arguments.get("tool_names", [])
|
|
128
|
+
tool_names = [str(t) for t in raw_tool_names] if isinstance(raw_tool_names, list) else [str(raw_tool_names)]
|
|
129
|
+
return load_tool(tool_names)
|
|
130
|
+
|
|
131
|
+
for tool in _BETA_TOOLS:
|
|
132
|
+
if tool.name == name:
|
|
133
|
+
result: str = tool(**arguments)
|
|
134
|
+
return result
|
|
135
|
+
|
|
136
|
+
raise ValueError(f"Unknown internal tool: {name}")
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def execute_tool(name: str, arguments: dict[str, object], tools: list[BetaTool[..., str]]) -> str:
|
|
140
|
+
"""Execute a tool by name from the given tool set."""
|
|
141
|
+
for tool in tools:
|
|
142
|
+
if tool.name == name:
|
|
143
|
+
return tool(**arguments)
|
|
144
|
+
raise ValueError(f"Unknown tool: {name}")
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
INTERNAL_TOOLS = _BETA_TOOLS
|
|
148
|
+
|
|
149
|
+
__all__ = [
|
|
150
|
+
"DEPLOY_TOOLS",
|
|
151
|
+
"DISCOVERY_TOOL_NAME",
|
|
152
|
+
"INTERNAL_TOOLS",
|
|
153
|
+
"OPUS_MODEL_ID",
|
|
154
|
+
"CatalogData",
|
|
155
|
+
"DynamicToolsState",
|
|
156
|
+
"SpawnedConnection",
|
|
157
|
+
"SubAgentProgress",
|
|
158
|
+
"SubAgentProgressCallback",
|
|
159
|
+
"SubAgentText",
|
|
160
|
+
"SubAgentTextCallback",
|
|
161
|
+
"SubAgentTokenCallback",
|
|
162
|
+
"SubAgentTokenUsage",
|
|
163
|
+
"WebSearchError",
|
|
164
|
+
"call_on_connection",
|
|
165
|
+
"cleanup_all_spawned_connections",
|
|
166
|
+
"clear_spawned_connections",
|
|
167
|
+
"close_connection",
|
|
168
|
+
"create_workspace",
|
|
169
|
+
"debug_hook",
|
|
170
|
+
"deploy_compare_workspaces",
|
|
171
|
+
"deploy_copy_org",
|
|
172
|
+
"deploy_copy_workspace",
|
|
173
|
+
"deploy_diff",
|
|
174
|
+
"deploy_pull",
|
|
175
|
+
"deploy_push",
|
|
176
|
+
"deploy_to_org",
|
|
177
|
+
"evaluate_python_hook",
|
|
178
|
+
"execute_internal_tool",
|
|
179
|
+
"execute_tool",
|
|
180
|
+
"get_deploy_tool_names",
|
|
181
|
+
"get_deploy_tools",
|
|
182
|
+
"get_destructive_tools",
|
|
183
|
+
"get_dynamic_tools",
|
|
184
|
+
"get_internal_tool_names",
|
|
185
|
+
"get_internal_tools",
|
|
186
|
+
"get_load_tool_category_definition",
|
|
187
|
+
"get_load_tool_definition",
|
|
188
|
+
"get_loaded_categories",
|
|
189
|
+
"get_mcp_connection",
|
|
190
|
+
"get_mcp_event_loop",
|
|
191
|
+
"get_output_dir",
|
|
192
|
+
"get_rossum_credentials",
|
|
193
|
+
"load_skill",
|
|
194
|
+
"load_tool",
|
|
195
|
+
"load_tool_category",
|
|
196
|
+
"patch_schema_with_subagent",
|
|
197
|
+
"preload_categories_for_request",
|
|
198
|
+
"report_progress",
|
|
199
|
+
"report_text",
|
|
200
|
+
"report_token_usage",
|
|
201
|
+
"require_rossum_credentials",
|
|
202
|
+
"reset_dynamic_tools",
|
|
203
|
+
"search_knowledge_base",
|
|
204
|
+
"set_mcp_connection",
|
|
205
|
+
"set_output_dir",
|
|
206
|
+
"set_progress_callback",
|
|
207
|
+
"set_rossum_credentials",
|
|
208
|
+
"set_text_callback",
|
|
209
|
+
"set_token_callback",
|
|
210
|
+
"spawn_mcp_connection",
|
|
211
|
+
"suggest_categories_for_request",
|
|
212
|
+
"suggest_formula_field",
|
|
213
|
+
"write_file",
|
|
214
|
+
]
|