iridet-bot 0.1.1a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iribot/.env.example +4 -0
- iribot/__init__.py +5 -0
- iribot/__main__.py +7 -0
- iribot/ag_ui_protocol.py +247 -0
- iribot/agent.py +155 -0
- iribot/cli.py +33 -0
- iribot/config.py +45 -0
- iribot/executor.py +73 -0
- iribot/main.py +300 -0
- iribot/models.py +79 -0
- iribot/prompt_generator.py +104 -0
- iribot/session_manager.py +194 -0
- iribot/templates/system_prompt.j2 +185 -0
- iribot/tools/__init__.py +27 -0
- iribot/tools/base.py +80 -0
- iribot/tools/execute_command.py +572 -0
- iribot/tools/list_directory.py +49 -0
- iribot/tools/read_file.py +43 -0
- iribot/tools/write_file.py +49 -0
- iridet_bot-0.1.1a1.dist-info/METADATA +369 -0
- iridet_bot-0.1.1a1.dist-info/RECORD +24 -0
- iridet_bot-0.1.1a1.dist-info/WHEEL +5 -0
- iridet_bot-0.1.1a1.dist-info/entry_points.txt +2 -0
- iridet_bot-0.1.1a1.dist-info/top_level.txt +1 -0
iribot/.env.example
ADDED
iribot/__init__.py
ADDED
iribot/__main__.py
ADDED
iribot/ag_ui_protocol.py
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
"""AG-UI Protocol implementation for chat messages"""
|
|
2
|
+
from typing import Optional, List, Any, Literal, Union
|
|
3
|
+
from dataclasses import dataclass, asdict, field
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
import json
|
|
6
|
+
import uuid
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class ToolCall:
|
|
11
|
+
"""Tool function call"""
|
|
12
|
+
id: str
|
|
13
|
+
type: Literal["function"] = "function"
|
|
14
|
+
function: dict = field(default_factory=lambda: {"name": "", "arguments": ""})
|
|
15
|
+
|
|
16
|
+
def to_dict(self):
|
|
17
|
+
return asdict(self)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class BinaryContent:
|
|
22
|
+
"""Binary content (image, file, etc.)"""
|
|
23
|
+
type: Literal["binary"] = "binary"
|
|
24
|
+
mimeType: str = "image/jpeg"
|
|
25
|
+
data: Optional[str] = None # base64 encoded
|
|
26
|
+
id: Optional[str] = None
|
|
27
|
+
url: Optional[str] = None
|
|
28
|
+
filename: Optional[str] = None
|
|
29
|
+
|
|
30
|
+
def to_dict(self):
|
|
31
|
+
return {k: v for k, v in asdict(self).items() if v is not None}
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
@dataclass
|
|
35
|
+
class AGUIMessage:
|
|
36
|
+
"""AG-UI Protocol Message"""
|
|
37
|
+
id: str
|
|
38
|
+
role: str # "user", "assistant", "system", etc.
|
|
39
|
+
content: Optional[str] = None
|
|
40
|
+
name: Optional[str] = None
|
|
41
|
+
tool_calls: List[ToolCall] = field(default_factory=list)
|
|
42
|
+
tool_results: List[dict] = field(default_factory=list)
|
|
43
|
+
binary_content: List[BinaryContent] = field(default_factory=list)
|
|
44
|
+
metadata: dict = field(default_factory=dict)
|
|
45
|
+
|
|
46
|
+
@classmethod
|
|
47
|
+
def from_message(cls, message: dict) -> "AGUIMessage":
|
|
48
|
+
"""Convert chat message to AG-UI format"""
|
|
49
|
+
msg_id = message.get("id", str(uuid.uuid4()))
|
|
50
|
+
|
|
51
|
+
# Convert legacy 'images' field to binary_content
|
|
52
|
+
binary_content = message.get("binary_content") or []
|
|
53
|
+
if not binary_content and message.get("images"):
|
|
54
|
+
# Auto-convert images to binary_content format
|
|
55
|
+
binary_content = [
|
|
56
|
+
BinaryContent(
|
|
57
|
+
type="binary",
|
|
58
|
+
mimeType="image/jpeg",
|
|
59
|
+
data=img,
|
|
60
|
+
id=f"img_{i}"
|
|
61
|
+
).to_dict()
|
|
62
|
+
for i, img in enumerate(message.get("images", []))
|
|
63
|
+
]
|
|
64
|
+
|
|
65
|
+
# Ensure metadata includes timestamp
|
|
66
|
+
metadata = message.get("metadata") or {}
|
|
67
|
+
if "timestamp" not in metadata and message.get("timestamp"):
|
|
68
|
+
timestamp = message.get("timestamp")
|
|
69
|
+
if hasattr(timestamp, 'isoformat'):
|
|
70
|
+
metadata["timestamp"] = timestamp.isoformat()
|
|
71
|
+
else:
|
|
72
|
+
metadata["timestamp"] = str(timestamp)
|
|
73
|
+
|
|
74
|
+
return cls(
|
|
75
|
+
id=msg_id,
|
|
76
|
+
role=message.get("role", "user"),
|
|
77
|
+
content=message.get("content"),
|
|
78
|
+
name=message.get("name"),
|
|
79
|
+
tool_calls=message.get("tool_calls") or [],
|
|
80
|
+
tool_results=message.get("tool_results") or [],
|
|
81
|
+
binary_content=binary_content,
|
|
82
|
+
metadata=metadata
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
def to_dict(self):
|
|
86
|
+
"""Convert to AG-UI protocol dict"""
|
|
87
|
+
data = {
|
|
88
|
+
"id": self.id,
|
|
89
|
+
"role": self.role,
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
if self.content:
|
|
93
|
+
data["content"] = self.content
|
|
94
|
+
|
|
95
|
+
if self.name:
|
|
96
|
+
data["name"] = self.name
|
|
97
|
+
|
|
98
|
+
if self.tool_calls:
|
|
99
|
+
data["tool_calls"] = [
|
|
100
|
+
tc.to_dict() if hasattr(tc, 'to_dict') else tc
|
|
101
|
+
for tc in self.tool_calls
|
|
102
|
+
]
|
|
103
|
+
|
|
104
|
+
if self.tool_results:
|
|
105
|
+
data["tool_results"] = self.tool_results
|
|
106
|
+
|
|
107
|
+
if self.binary_content:
|
|
108
|
+
data["binary_content"] = [
|
|
109
|
+
bc.to_dict() if hasattr(bc, 'to_dict') else bc
|
|
110
|
+
for bc in self.binary_content
|
|
111
|
+
]
|
|
112
|
+
|
|
113
|
+
if self.metadata:
|
|
114
|
+
data["metadata"] = self.metadata
|
|
115
|
+
|
|
116
|
+
return data
|
|
117
|
+
|
|
118
|
+
def to_json(self):
|
|
119
|
+
"""Convert to JSON string"""
|
|
120
|
+
return json.dumps(self.to_dict())
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
class AGUIEventEncoder:
|
|
124
|
+
"""Encodes and decodes AG-UI protocol events"""
|
|
125
|
+
|
|
126
|
+
@staticmethod
|
|
127
|
+
def encode_message(message: AGUIMessage) -> str:
|
|
128
|
+
"""Encode AG-UI message to JSON"""
|
|
129
|
+
return message.to_json()
|
|
130
|
+
|
|
131
|
+
@staticmethod
|
|
132
|
+
def decode_message(data: str) -> AGUIMessage:
|
|
133
|
+
"""Decode AG-UI message from JSON"""
|
|
134
|
+
msg_dict = json.loads(data)
|
|
135
|
+
return AGUIMessage(
|
|
136
|
+
id=msg_dict.get("id", str(uuid.uuid4())),
|
|
137
|
+
role=msg_dict.get("role", "user"),
|
|
138
|
+
content=msg_dict.get("content"),
|
|
139
|
+
name=msg_dict.get("name"),
|
|
140
|
+
tool_calls=msg_dict.get("tool_calls", []),
|
|
141
|
+
tool_results=msg_dict.get("tool_results", []),
|
|
142
|
+
binary_content=msg_dict.get("binary_content", []),
|
|
143
|
+
metadata=msg_dict.get("metadata", {})
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
@staticmethod
|
|
147
|
+
def event_stream(messages: List[dict]) -> List[str]:
|
|
148
|
+
"""Convert messages to AG-UI event stream"""
|
|
149
|
+
events = []
|
|
150
|
+
for msg in messages:
|
|
151
|
+
ag_msg = AGUIMessage.from_message(msg)
|
|
152
|
+
events.append(ag_msg.to_json())
|
|
153
|
+
return events
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def convert_to_ag_ui(message: dict) -> dict:
|
|
157
|
+
"""Helper function to convert message to AG-UI format"""
|
|
158
|
+
ag_msg = AGUIMessage.from_message(message)
|
|
159
|
+
return ag_msg.to_dict()
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def convert_messages_to_ag_ui(messages: List[dict]) -> List[dict]:
|
|
163
|
+
"""Convert list of messages to AG-UI format"""
|
|
164
|
+
return [convert_to_ag_ui(msg) for msg in messages]
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
@dataclass
|
|
168
|
+
class ToolCallRecordAGUI:
|
|
169
|
+
"""AG-UI format for tool call record"""
|
|
170
|
+
tool_call_id: str
|
|
171
|
+
tool_name: str
|
|
172
|
+
arguments: dict
|
|
173
|
+
result: Any
|
|
174
|
+
success: bool
|
|
175
|
+
timestamp: Optional[str] = None
|
|
176
|
+
|
|
177
|
+
def to_dict(self):
|
|
178
|
+
return {
|
|
179
|
+
"tool_call_id": self.tool_call_id,
|
|
180
|
+
"tool_name": self.tool_name,
|
|
181
|
+
"arguments": self.arguments,
|
|
182
|
+
"result": self.result,
|
|
183
|
+
"success": self.success,
|
|
184
|
+
"timestamp": self.timestamp
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
@dataclass
|
|
189
|
+
class LLMCallRecordAGUI:
|
|
190
|
+
"""AG-UI format for LLM call record"""
|
|
191
|
+
id: str
|
|
192
|
+
request_messages: List[dict]
|
|
193
|
+
response_content: Optional[str] = None
|
|
194
|
+
tool_calls: List[dict] = field(default_factory=list)
|
|
195
|
+
tool_results: List[ToolCallRecordAGUI] = field(default_factory=list)
|
|
196
|
+
finish_reason: Optional[str] = None
|
|
197
|
+
timestamp: Optional[str] = None
|
|
198
|
+
|
|
199
|
+
def to_dict(self):
|
|
200
|
+
return {
|
|
201
|
+
"id": self.id,
|
|
202
|
+
"request_messages": self.request_messages,
|
|
203
|
+
"response_content": self.response_content,
|
|
204
|
+
"tool_calls": self.tool_calls,
|
|
205
|
+
"tool_results": [
|
|
206
|
+
tr.to_dict() if hasattr(tr, 'to_dict') else tr
|
|
207
|
+
for tr in self.tool_results
|
|
208
|
+
],
|
|
209
|
+
"finish_reason": self.finish_reason,
|
|
210
|
+
"timestamp": self.timestamp
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def convert_llm_calls_to_ag_ui(llm_calls: List[dict]) -> List[dict]:
|
|
215
|
+
"""Convert LLM call records to AG-UI format"""
|
|
216
|
+
result = []
|
|
217
|
+
for call in llm_calls:
|
|
218
|
+
tool_results = []
|
|
219
|
+
if call.get("tool_results"):
|
|
220
|
+
for tr in call["tool_results"]:
|
|
221
|
+
timestamp = tr.get("timestamp")
|
|
222
|
+
if hasattr(timestamp, 'isoformat'):
|
|
223
|
+
timestamp = timestamp.isoformat()
|
|
224
|
+
tool_results.append(ToolCallRecordAGUI(
|
|
225
|
+
tool_call_id=tr.get("tool_call_id", ""),
|
|
226
|
+
tool_name=tr.get("tool_name", ""),
|
|
227
|
+
arguments=tr.get("arguments", {}),
|
|
228
|
+
result=tr.get("result"),
|
|
229
|
+
success=tr.get("success", False),
|
|
230
|
+
timestamp=str(timestamp) if timestamp else None
|
|
231
|
+
))
|
|
232
|
+
|
|
233
|
+
timestamp = call.get("timestamp")
|
|
234
|
+
if hasattr(timestamp, 'isoformat'):
|
|
235
|
+
timestamp = timestamp.isoformat()
|
|
236
|
+
|
|
237
|
+
ag_call = LLMCallRecordAGUI(
|
|
238
|
+
id=call.get("id", ""),
|
|
239
|
+
request_messages=call.get("request_messages", []),
|
|
240
|
+
response_content=call.get("response_content"),
|
|
241
|
+
tool_calls=call.get("tool_calls") or [],
|
|
242
|
+
tool_results=tool_results,
|
|
243
|
+
finish_reason=call.get("finish_reason"),
|
|
244
|
+
timestamp=str(timestamp) if timestamp else None
|
|
245
|
+
)
|
|
246
|
+
result.append(ag_call.to_dict())
|
|
247
|
+
return result
|
iribot/agent.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
"""Core Agent logic for handling LLM interactions"""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from typing import Optional, List, Dict, Any, Generator
|
|
5
|
+
from openai import OpenAI
|
|
6
|
+
from .config import settings
|
|
7
|
+
from .executor import tool_executor
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class Agent:
|
|
11
|
+
"""Agent for handling LLM interactions"""
|
|
12
|
+
|
|
13
|
+
def __init__(self):
|
|
14
|
+
client_params = {"api_key": settings.openai_api_key}
|
|
15
|
+
if settings.openai_base_url:
|
|
16
|
+
client_params["base_url"] = settings.openai_base_url
|
|
17
|
+
|
|
18
|
+
self.client = OpenAI(**client_params)
|
|
19
|
+
self.model = settings.openai_model
|
|
20
|
+
|
|
21
|
+
def chat_stream(
|
|
22
|
+
self,
|
|
23
|
+
messages: List[Dict[str, Any]],
|
|
24
|
+
system_prompt: str,
|
|
25
|
+
images: Optional[List[str]] = None,
|
|
26
|
+
) -> Generator[Dict[str, Any], None, None]:
|
|
27
|
+
"""
|
|
28
|
+
Send a message to the LLM and stream the response
|
|
29
|
+
|
|
30
|
+
Yields:
|
|
31
|
+
Chunks of response data
|
|
32
|
+
"""
|
|
33
|
+
# Build messages with system prompt
|
|
34
|
+
formatted_messages = [{"role": "system", "content": system_prompt}]
|
|
35
|
+
|
|
36
|
+
# Add image to the last user message if provided
|
|
37
|
+
if images and messages and messages[-1]["role"] == "user":
|
|
38
|
+
last_msg = messages[-1].copy()
|
|
39
|
+
content = [{"type": "text", "text": last_msg.get("content", "")}]
|
|
40
|
+
|
|
41
|
+
for image_base64 in images:
|
|
42
|
+
content.append(
|
|
43
|
+
{
|
|
44
|
+
"type": "image_url",
|
|
45
|
+
"image_url": {
|
|
46
|
+
"url": f"data:image/jpeg;base64,{image_base64}"
|
|
47
|
+
},
|
|
48
|
+
}
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
last_msg["content"] = content
|
|
52
|
+
formatted_messages.extend(messages[:-1])
|
|
53
|
+
formatted_messages.append(last_msg)
|
|
54
|
+
else:
|
|
55
|
+
formatted_messages.extend(messages)
|
|
56
|
+
|
|
57
|
+
# Get available tools
|
|
58
|
+
tools = tool_executor.get_all_tools()
|
|
59
|
+
|
|
60
|
+
# Call OpenAI API with streaming
|
|
61
|
+
response = self.client.chat.completions.create(
|
|
62
|
+
model=self.model,
|
|
63
|
+
messages=formatted_messages,
|
|
64
|
+
tools=tools,
|
|
65
|
+
temperature=0.7,
|
|
66
|
+
stream=True,
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
content = ""
|
|
70
|
+
tool_calls_data = {} # {index: {id, function: {name, arguments}}}
|
|
71
|
+
finish_reason = None
|
|
72
|
+
|
|
73
|
+
for chunk in response:
|
|
74
|
+
delta = chunk.choices[0].delta if chunk.choices else None
|
|
75
|
+
|
|
76
|
+
if delta:
|
|
77
|
+
# Handle content
|
|
78
|
+
if delta.content:
|
|
79
|
+
content += delta.content
|
|
80
|
+
yield {"type": "content", "content": delta.content}
|
|
81
|
+
|
|
82
|
+
# Handle tool calls
|
|
83
|
+
if delta.tool_calls:
|
|
84
|
+
for tc in delta.tool_calls:
|
|
85
|
+
idx = tc.index
|
|
86
|
+
if idx not in tool_calls_data:
|
|
87
|
+
tool_calls_data[idx] = {
|
|
88
|
+
"id": tc.id or "",
|
|
89
|
+
"type": "function",
|
|
90
|
+
"function": {"name": "", "arguments": ""},
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
if tc.id:
|
|
94
|
+
tool_calls_data[idx]["id"] = tc.id
|
|
95
|
+
if tc.function:
|
|
96
|
+
if tc.function.name:
|
|
97
|
+
tool_calls_data[idx]["function"][
|
|
98
|
+
"name"
|
|
99
|
+
] = tc.function.name
|
|
100
|
+
if tc.function.arguments:
|
|
101
|
+
tool_calls_data[idx]["function"][
|
|
102
|
+
"arguments"
|
|
103
|
+
] += tc.function.arguments
|
|
104
|
+
|
|
105
|
+
if chunk.choices and chunk.choices[0].finish_reason:
|
|
106
|
+
finish_reason = chunk.choices[0].finish_reason
|
|
107
|
+
|
|
108
|
+
# Yield final result with tool calls
|
|
109
|
+
tool_calls = (
|
|
110
|
+
[tool_calls_data[i] for i in sorted(tool_calls_data.keys())]
|
|
111
|
+
if tool_calls_data
|
|
112
|
+
else []
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
yield {
|
|
116
|
+
"type": "done",
|
|
117
|
+
"content": content.strip(),
|
|
118
|
+
"tool_calls": tool_calls,
|
|
119
|
+
"finish_reason": finish_reason,
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
def process_tool_call(
|
|
123
|
+
self,
|
|
124
|
+
tool_name: str,
|
|
125
|
+
arguments: str,
|
|
126
|
+
context: Optional[Dict[str, Any]] = None,
|
|
127
|
+
) -> Dict[str, Any]:
|
|
128
|
+
"""
|
|
129
|
+
Process a tool call from the LLM
|
|
130
|
+
|
|
131
|
+
Args:
|
|
132
|
+
tool_name: Name of the tool to call
|
|
133
|
+
arguments: JSON string of tool arguments
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
Result of tool execution
|
|
137
|
+
"""
|
|
138
|
+
try:
|
|
139
|
+
args = json.loads(arguments) if arguments else {}
|
|
140
|
+
if tool_name.startswith("shell_") and context:
|
|
141
|
+
if "session_id" not in args and context.get("session_id"):
|
|
142
|
+
args["session_id"] = context["session_id"]
|
|
143
|
+
result = tool_executor.execute_tool(tool_name, **args)
|
|
144
|
+
return {"success": True, "result": result}
|
|
145
|
+
except json.JSONDecodeError:
|
|
146
|
+
return {
|
|
147
|
+
"success": False,
|
|
148
|
+
"error": f"Invalid JSON arguments: {arguments}",
|
|
149
|
+
}
|
|
150
|
+
except Exception as e:
|
|
151
|
+
return {"success": False, "error": str(e)}
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
# Global agent instance
|
|
155
|
+
agent = Agent()
|
iribot/cli.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""Command-line interface for running the IriBot backend."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import argparse
|
|
6
|
+
import uvicorn
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def main() -> None:
|
|
10
|
+
parser = argparse.ArgumentParser(description="Run the IriBot backend server")
|
|
11
|
+
parser.add_argument("--host", default="127.0.0.1", help="Host to bind")
|
|
12
|
+
parser.add_argument("--port", type=int, default=8000, help="Port to bind")
|
|
13
|
+
parser.add_argument("--reload", action="store_true", help="Enable auto-reload")
|
|
14
|
+
parser.add_argument(
|
|
15
|
+
"--log-level",
|
|
16
|
+
default="info",
|
|
17
|
+
choices=["critical", "error", "warning", "info", "debug", "trace"],
|
|
18
|
+
help="Log level",
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
args = parser.parse_args()
|
|
22
|
+
|
|
23
|
+
uvicorn.run(
|
|
24
|
+
"iribot.main:app",
|
|
25
|
+
host=args.host,
|
|
26
|
+
port=args.port,
|
|
27
|
+
reload=args.reload,
|
|
28
|
+
log_level=args.log_level,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
if __name__ == "__main__":
|
|
33
|
+
main()
|
iribot/config.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""Configuration for the Agent Application"""
|
|
2
|
+
import sys
|
|
3
|
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
|
4
|
+
from typing import Optional
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
import shutil
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
_ENV_PATH = Path.cwd() / ".env"
|
|
10
|
+
_ENV_EXAMPLE_PATH = Path(__file__).parent / ".env.example"
|
|
11
|
+
|
|
12
|
+
if not _ENV_PATH.exists():
|
|
13
|
+
shutil.copyfile(_ENV_EXAMPLE_PATH, _ENV_PATH)
|
|
14
|
+
print(".env was missing, so .env.example has been copied. Please review and update .env with your own settings.")
|
|
15
|
+
sys.exit(1)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class Settings(BaseSettings):
|
|
19
|
+
"""Application settings"""
|
|
20
|
+
|
|
21
|
+
model_config = SettingsConfigDict(
|
|
22
|
+
env_file=_ENV_PATH,
|
|
23
|
+
case_sensitive=False,
|
|
24
|
+
extra="ignore"
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
# OpenAI Configuration
|
|
28
|
+
openai_api_key: str = ""
|
|
29
|
+
openai_model: str = "gpt-4-vision-preview"
|
|
30
|
+
openai_base_url: Optional[str] = None
|
|
31
|
+
|
|
32
|
+
# Application Configuration
|
|
33
|
+
debug: bool = False
|
|
34
|
+
app_title: str = "Agent Application"
|
|
35
|
+
|
|
36
|
+
# Shell Configuration
|
|
37
|
+
bash_path: str = "bash" # Path to bash executable, defaults to "bash" in PATH
|
|
38
|
+
|
|
39
|
+
# CORS Configuration
|
|
40
|
+
cors_origins: list = []
|
|
41
|
+
|
|
42
|
+
print(Settings.model_config)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
settings = Settings()
|
iribot/executor.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"""Tool executor and registry"""
|
|
2
|
+
from typing import Any, Dict, List
|
|
3
|
+
from .tools.base import BaseTool, BaseToolGroup, BaseStatus
|
|
4
|
+
from .tools.execute_command import ShellToolGroup, ShellStatus
|
|
5
|
+
from .tools.read_file import ReadFileTool
|
|
6
|
+
from .tools.write_file import WriteFileTool
|
|
7
|
+
from .tools.list_directory import ListDirectoryTool
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ToolExecutor:
|
|
11
|
+
"""Manages and executes tools"""
|
|
12
|
+
|
|
13
|
+
def __init__(self):
|
|
14
|
+
self.tools: Dict[str, BaseTool] = {}
|
|
15
|
+
self.statuses: Dict[str, BaseStatus] = {}
|
|
16
|
+
self._register_default_tools()
|
|
17
|
+
|
|
18
|
+
def _register_default_tools(self) -> None:
|
|
19
|
+
"""Register all default tools"""
|
|
20
|
+
default_tools = [
|
|
21
|
+
ReadFileTool(),
|
|
22
|
+
WriteFileTool(),
|
|
23
|
+
ListDirectoryTool(),
|
|
24
|
+
]
|
|
25
|
+
for tool in default_tools:
|
|
26
|
+
self.register_tool(tool)
|
|
27
|
+
|
|
28
|
+
self.register_tool_group(ShellToolGroup())
|
|
29
|
+
self.register_status(ShellStatus())
|
|
30
|
+
|
|
31
|
+
def register_tool(self, tool: BaseTool) -> None:
|
|
32
|
+
"""Register a new tool"""
|
|
33
|
+
self.tools[tool.name] = tool
|
|
34
|
+
|
|
35
|
+
def register_tool_group(self, group: BaseToolGroup) -> None:
|
|
36
|
+
"""Register a group of tools"""
|
|
37
|
+
tools = group.get_tools()
|
|
38
|
+
for tool in tools:
|
|
39
|
+
self.register_tool(tool)
|
|
40
|
+
|
|
41
|
+
def register_status(self, status: BaseStatus) -> None:
|
|
42
|
+
"""Register a status-only entry"""
|
|
43
|
+
self.statuses[status.name] = status
|
|
44
|
+
|
|
45
|
+
def execute_tool(self, tool_name: str, **kwargs) -> Dict[str, Any]:
|
|
46
|
+
"""Execute a registered tool"""
|
|
47
|
+
if tool_name not in self.tools:
|
|
48
|
+
return {
|
|
49
|
+
"success": False,
|
|
50
|
+
"error": f"Tool '{tool_name}' not found"
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
try:
|
|
54
|
+
tool = self.tools[tool_name]
|
|
55
|
+
result = tool.execute(**kwargs)
|
|
56
|
+
return result
|
|
57
|
+
except Exception as e:
|
|
58
|
+
return {
|
|
59
|
+
"success": False,
|
|
60
|
+
"error": str(e)
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
def get_all_tools(self) -> List[Dict[str, Any]]:
|
|
64
|
+
"""Get all registered tools in OpenAI format"""
|
|
65
|
+
return [tool.to_dict() for tool in self.tools.values()]
|
|
66
|
+
|
|
67
|
+
def get_all_tool_statuses(self) -> List[Dict[str, Any]]:
|
|
68
|
+
"""Get status for all registered tools"""
|
|
69
|
+
return [status.get_status() for status in self.statuses.values()]
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
# Global tool executor instance
|
|
73
|
+
tool_executor = ToolExecutor()
|