devduck 0.1.1766644714__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of devduck might be problematic. Click here for more details.
- devduck/__init__.py +591 -1092
- devduck/_version.py +2 -2
- devduck/install.sh +42 -0
- devduck/test_redduck.py +1 -0
- devduck/tools/__init__.py +4 -44
- devduck/tools/install_tools.py +2 -103
- devduck/tools/mcp_server.py +6 -34
- devduck/tools/tcp.py +7 -6
- devduck/tools/websocket.py +2 -8
- devduck-0.2.0.dist-info/METADATA +143 -0
- devduck-0.2.0.dist-info/RECORD +16 -0
- {devduck-0.1.1766644714.dist-info → devduck-0.2.0.dist-info}/entry_points.txt +0 -1
- devduck-0.2.0.dist-info/licenses/LICENSE +21 -0
- devduck/agentcore_handler.py +0 -76
- devduck/tools/_ambient_input.py +0 -423
- devduck/tools/_tray_app.py +0 -530
- devduck/tools/agentcore_agents.py +0 -197
- devduck/tools/agentcore_config.py +0 -441
- devduck/tools/agentcore_invoke.py +0 -423
- devduck/tools/agentcore_logs.py +0 -320
- devduck/tools/ambient.py +0 -157
- devduck/tools/create_subagent.py +0 -659
- devduck/tools/fetch_github_tool.py +0 -201
- devduck/tools/ipc.py +0 -546
- devduck/tools/scraper.py +0 -935
- devduck/tools/speech_to_speech.py +0 -850
- devduck/tools/state_manager.py +0 -292
- devduck/tools/store_in_kb.py +0 -187
- devduck/tools/system_prompt.py +0 -608
- devduck/tools/tray.py +0 -247
- devduck/tools/use_github.py +0 -438
- devduck-0.1.1766644714.dist-info/METADATA +0 -717
- devduck-0.1.1766644714.dist-info/RECORD +0 -33
- devduck-0.1.1766644714.dist-info/licenses/LICENSE +0 -201
- {devduck-0.1.1766644714.dist-info → devduck-0.2.0.dist-info}/WHEEL +0 -0
- {devduck-0.1.1766644714.dist-info → devduck-0.2.0.dist-info}/top_level.txt +0 -0
devduck/__init__.py
CHANGED
|
@@ -3,46 +3,31 @@
|
|
|
3
3
|
🦆 devduck - extreme minimalist self-adapting agent
|
|
4
4
|
one file. self-healing. runtime dependencies. adaptive.
|
|
5
5
|
"""
|
|
6
|
-
import os
|
|
7
6
|
import sys
|
|
8
7
|
import subprocess
|
|
9
|
-
import
|
|
8
|
+
import os
|
|
10
9
|
import platform
|
|
11
10
|
import socket
|
|
12
11
|
import logging
|
|
13
12
|
import tempfile
|
|
14
|
-
import time
|
|
15
|
-
import warnings
|
|
16
|
-
import json
|
|
17
13
|
from pathlib import Path
|
|
18
14
|
from datetime import datetime
|
|
19
15
|
from typing import Dict, Any
|
|
20
16
|
from logging.handlers import RotatingFileHandler
|
|
21
|
-
from strands import Agent, tool
|
|
22
|
-
|
|
23
|
-
# Import system prompt helper for loading prompts from files
|
|
24
|
-
try:
|
|
25
|
-
from devduck.tools.system_prompt import _get_system_prompt
|
|
26
|
-
except ImportError:
|
|
27
|
-
# Fallback if tools module not available yet
|
|
28
|
-
def _get_system_prompt(repository=None, variable_name="SYSTEM_PROMPT"):
|
|
29
|
-
return os.getenv(variable_name, "")
|
|
30
|
-
|
|
31
17
|
|
|
32
|
-
|
|
33
|
-
warnings.filterwarnings("ignore", message=".*cache_prompt is deprecated.*")
|
|
34
|
-
|
|
35
|
-
os.environ["BYPASS_TOOL_CONSENT"] = os.getenv("BYPASS_TOOL_CONSENT", "true")
|
|
18
|
+
os.environ["BYPASS_TOOL_CONSENT"] = "true"
|
|
36
19
|
os.environ["STRANDS_TOOL_CONSOLE_MODE"] = "enabled"
|
|
37
|
-
os.environ["EDITOR_DISABLE_BACKUP"] = "true"
|
|
38
20
|
|
|
21
|
+
# 📝 Setup logging system
|
|
39
22
|
LOG_DIR = Path(tempfile.gettempdir()) / "devduck" / "logs"
|
|
40
23
|
LOG_DIR.mkdir(parents=True, exist_ok=True)
|
|
41
|
-
|
|
42
24
|
LOG_FILE = LOG_DIR / "devduck.log"
|
|
25
|
+
|
|
26
|
+
# Configure logger
|
|
43
27
|
logger = logging.getLogger("devduck")
|
|
44
28
|
logger.setLevel(logging.DEBUG)
|
|
45
29
|
|
|
30
|
+
# File handler with rotation (10MB max, keep 3 backups)
|
|
46
31
|
file_handler = RotatingFileHandler(
|
|
47
32
|
LOG_FILE, maxBytes=10 * 1024 * 1024, backupCount=3, encoding="utf-8"
|
|
48
33
|
)
|
|
@@ -52,6 +37,7 @@ file_formatter = logging.Formatter(
|
|
|
52
37
|
)
|
|
53
38
|
file_handler.setFormatter(file_formatter)
|
|
54
39
|
|
|
40
|
+
# Console handler (only warnings and above)
|
|
55
41
|
console_handler = logging.StreamHandler()
|
|
56
42
|
console_handler.setLevel(logging.WARNING)
|
|
57
43
|
console_formatter = logging.Formatter("🦆 %(levelname)s: %(message)s")
|
|
@@ -63,13 +49,243 @@ logger.addHandler(console_handler)
|
|
|
63
49
|
logger.info("DevDuck logging system initialized")
|
|
64
50
|
|
|
65
51
|
|
|
52
|
+
# 🔧 Self-healing dependency installer
|
|
53
|
+
def ensure_deps():
|
|
54
|
+
"""Install dependencies at runtime if missing"""
|
|
55
|
+
import importlib.metadata
|
|
56
|
+
|
|
57
|
+
deps = [
|
|
58
|
+
"strands-agents",
|
|
59
|
+
"strands-agents[ollama]",
|
|
60
|
+
"strands-agents[openai]",
|
|
61
|
+
"strands-agents[anthropic]",
|
|
62
|
+
"strands-agents-tools",
|
|
63
|
+
]
|
|
64
|
+
|
|
65
|
+
# Check each package individually using importlib.metadata
|
|
66
|
+
for dep in deps:
|
|
67
|
+
pkg_name = dep.split("[")[0] # Get base package name (strip extras)
|
|
68
|
+
try:
|
|
69
|
+
# Check if package is installed using metadata (checks PyPI package name)
|
|
70
|
+
importlib.metadata.version(pkg_name)
|
|
71
|
+
except importlib.metadata.PackageNotFoundError:
|
|
72
|
+
print(f"🦆 Installing {dep}...")
|
|
73
|
+
logger.debug(f"🦆 Installing {dep}...")
|
|
74
|
+
try:
|
|
75
|
+
subprocess.check_call(
|
|
76
|
+
[sys.executable, "-m", "pip", "install", dep],
|
|
77
|
+
stdout=subprocess.DEVNULL,
|
|
78
|
+
stderr=subprocess.DEVNULL,
|
|
79
|
+
)
|
|
80
|
+
except subprocess.CalledProcessError as e:
|
|
81
|
+
print(f"🦆 Warning: Failed to install {dep}: {e}")
|
|
82
|
+
logger.debug(f"🦆 Warning: Failed to install {dep}: {e}")
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
# 🌍 Environment adaptation
|
|
86
|
+
def adapt_to_env():
|
|
87
|
+
"""Self-adapt based on environment"""
|
|
88
|
+
env_info = {
|
|
89
|
+
"os": platform.system(),
|
|
90
|
+
"arch": platform.machine(),
|
|
91
|
+
"python": sys.version_info,
|
|
92
|
+
"cwd": str(Path.cwd()),
|
|
93
|
+
"home": str(Path.home()),
|
|
94
|
+
"shell": os.environ.get("SHELL", "unknown"),
|
|
95
|
+
"hostname": socket.gethostname(),
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
# Adaptive configurations - using common models
|
|
99
|
+
if env_info["os"] == "Darwin": # macOS
|
|
100
|
+
ollama_host = "http://localhost:11434"
|
|
101
|
+
model = "qwen3:1.7b" # Lightweight for macOS
|
|
102
|
+
elif env_info["os"] == "Linux":
|
|
103
|
+
ollama_host = "http://localhost:11434"
|
|
104
|
+
model = "qwen3:30b" # More power on Linux
|
|
105
|
+
else: # Windows
|
|
106
|
+
ollama_host = "http://localhost:11434"
|
|
107
|
+
model = "qwen3:8b" # Conservative for Windows
|
|
108
|
+
|
|
109
|
+
return env_info, ollama_host, model
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
# 🔍 Self-awareness: Read own source code
|
|
66
113
|
def get_own_source_code():
|
|
67
|
-
"""
|
|
114
|
+
"""
|
|
115
|
+
Read and return the source code of this agent file.
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
str: The complete source code for self-awareness
|
|
119
|
+
"""
|
|
68
120
|
try:
|
|
69
|
-
|
|
70
|
-
|
|
121
|
+
# Read this file (__init__.py)
|
|
122
|
+
current_file = __file__
|
|
123
|
+
with open(current_file, "r", encoding="utf-8") as f:
|
|
124
|
+
init_code = f.read()
|
|
125
|
+
return f"# devduck/__init__.py\n```python\n{init_code}\n```"
|
|
71
126
|
except Exception as e:
|
|
72
|
-
return f"Error reading source: {e}"
|
|
127
|
+
return f"Error reading own source code: {e}"
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
# 🛠️ System prompt tool (with .prompt file persistence)
|
|
131
|
+
def system_prompt_tool(
|
|
132
|
+
action: str,
|
|
133
|
+
prompt: str | None = None,
|
|
134
|
+
context: str | None = None,
|
|
135
|
+
variable_name: str = "SYSTEM_PROMPT",
|
|
136
|
+
) -> Dict[str, Any]:
|
|
137
|
+
"""
|
|
138
|
+
Manage the agent's system prompt dynamically with file persistence.
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
action: "view", "update", "add_context", or "reset"
|
|
142
|
+
prompt: New system prompt text (required for "update")
|
|
143
|
+
context: Additional context to prepend (for "add_context")
|
|
144
|
+
variable_name: Environment variable name (default: SYSTEM_PROMPT)
|
|
145
|
+
|
|
146
|
+
Returns:
|
|
147
|
+
Dict with status and content
|
|
148
|
+
"""
|
|
149
|
+
from pathlib import Path
|
|
150
|
+
import tempfile
|
|
151
|
+
|
|
152
|
+
def _get_prompt_file_path() -> Path:
|
|
153
|
+
"""Get the .prompt file path in temp directory."""
|
|
154
|
+
temp_dir = Path(tempfile.gettempdir()) / ".devduck"
|
|
155
|
+
temp_dir.mkdir(exist_ok=True, mode=0o700) # Create with restrictive permissions
|
|
156
|
+
return temp_dir / ".prompt"
|
|
157
|
+
|
|
158
|
+
def _write_prompt_file(prompt_text: str) -> None:
|
|
159
|
+
"""Write prompt to .prompt file in temp directory."""
|
|
160
|
+
prompt_file = _get_prompt_file_path()
|
|
161
|
+
try:
|
|
162
|
+
# Create file with restrictive permissions
|
|
163
|
+
with open(
|
|
164
|
+
prompt_file,
|
|
165
|
+
"w",
|
|
166
|
+
encoding="utf-8",
|
|
167
|
+
opener=lambda path, flags: os.open(path, flags, 0o600),
|
|
168
|
+
) as f:
|
|
169
|
+
f.write(prompt_text)
|
|
170
|
+
except (OSError, PermissionError):
|
|
171
|
+
try:
|
|
172
|
+
prompt_file.write_text(prompt_text, encoding="utf-8")
|
|
173
|
+
prompt_file.chmod(0o600)
|
|
174
|
+
except (OSError, PermissionError):
|
|
175
|
+
prompt_file.write_text(prompt_text, encoding="utf-8")
|
|
176
|
+
|
|
177
|
+
def _get_system_prompt(var_name: str) -> str:
|
|
178
|
+
"""Get current system prompt from environment variable."""
|
|
179
|
+
return os.environ.get(var_name, "")
|
|
180
|
+
|
|
181
|
+
def _update_system_prompt(new_prompt: str, var_name: str) -> None:
|
|
182
|
+
"""Update system prompt in both environment and .prompt file."""
|
|
183
|
+
os.environ[var_name] = new_prompt
|
|
184
|
+
if var_name == "SYSTEM_PROMPT":
|
|
185
|
+
_write_prompt_file(new_prompt)
|
|
186
|
+
|
|
187
|
+
try:
|
|
188
|
+
if action == "view":
|
|
189
|
+
current = _get_system_prompt(variable_name)
|
|
190
|
+
return {
|
|
191
|
+
"status": "success",
|
|
192
|
+
"content": [
|
|
193
|
+
{"text": f"Current system prompt from {variable_name}:{current}"}
|
|
194
|
+
],
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
elif action == "update":
|
|
198
|
+
if not prompt:
|
|
199
|
+
return {
|
|
200
|
+
"status": "error",
|
|
201
|
+
"content": [
|
|
202
|
+
{"text": "Error: prompt parameter required for update action"}
|
|
203
|
+
],
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
_update_system_prompt(prompt, variable_name)
|
|
207
|
+
|
|
208
|
+
if variable_name == "SYSTEM_PROMPT":
|
|
209
|
+
message = f"System prompt updated (env: {variable_name}, file: .prompt)"
|
|
210
|
+
else:
|
|
211
|
+
message = f"System prompt updated (env: {variable_name})"
|
|
212
|
+
|
|
213
|
+
return {"status": "success", "content": [{"text": message}]}
|
|
214
|
+
|
|
215
|
+
elif action == "add_context":
|
|
216
|
+
if not context:
|
|
217
|
+
return {
|
|
218
|
+
"status": "error",
|
|
219
|
+
"content": [
|
|
220
|
+
{
|
|
221
|
+
"text": "Error: context parameter required for add_context action"
|
|
222
|
+
}
|
|
223
|
+
],
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
current = _get_system_prompt(variable_name)
|
|
227
|
+
new_prompt = f"{current} {context}" if current else context
|
|
228
|
+
_update_system_prompt(new_prompt, variable_name)
|
|
229
|
+
|
|
230
|
+
if variable_name == "SYSTEM_PROMPT":
|
|
231
|
+
message = f"Context added to system prompt (env: {variable_name}, file: .prompt)"
|
|
232
|
+
else:
|
|
233
|
+
message = f"Context added to system prompt (env: {variable_name})"
|
|
234
|
+
|
|
235
|
+
return {"status": "success", "content": [{"text": message}]}
|
|
236
|
+
|
|
237
|
+
elif action == "reset":
|
|
238
|
+
os.environ.pop(variable_name, None)
|
|
239
|
+
|
|
240
|
+
if variable_name == "SYSTEM_PROMPT":
|
|
241
|
+
prompt_file = _get_prompt_file_path()
|
|
242
|
+
if prompt_file.exists():
|
|
243
|
+
try:
|
|
244
|
+
prompt_file.unlink()
|
|
245
|
+
except (OSError, PermissionError):
|
|
246
|
+
pass
|
|
247
|
+
message = (
|
|
248
|
+
f"System prompt reset (env: {variable_name}, file: .prompt cleared)"
|
|
249
|
+
)
|
|
250
|
+
else:
|
|
251
|
+
message = f"System prompt reset (env: {variable_name})"
|
|
252
|
+
|
|
253
|
+
return {"status": "success", "content": [{"text": message}]}
|
|
254
|
+
|
|
255
|
+
elif action == "get":
|
|
256
|
+
# Backward compatibility
|
|
257
|
+
current = _get_system_prompt(variable_name)
|
|
258
|
+
return {
|
|
259
|
+
"status": "success",
|
|
260
|
+
"content": [{"text": f"System prompt: {current}"}],
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
elif action == "set":
|
|
264
|
+
# Backward compatibility
|
|
265
|
+
if prompt is None:
|
|
266
|
+
return {"status": "error", "content": [{"text": "No prompt provided"}]}
|
|
267
|
+
|
|
268
|
+
if context:
|
|
269
|
+
prompt = f"{context} {prompt}"
|
|
270
|
+
|
|
271
|
+
_update_system_prompt(prompt, variable_name)
|
|
272
|
+
return {
|
|
273
|
+
"status": "success",
|
|
274
|
+
"content": [{"text": "System prompt updated successfully"}],
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
else:
|
|
278
|
+
return {
|
|
279
|
+
"status": "error",
|
|
280
|
+
"content": [
|
|
281
|
+
{
|
|
282
|
+
"text": f"Unknown action '{action}'. Valid: view, update, add_context, reset"
|
|
283
|
+
}
|
|
284
|
+
],
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
except Exception as e:
|
|
288
|
+
return {"status": "error", "content": [{"text": f"Error: {str(e)}"}]}
|
|
73
289
|
|
|
74
290
|
|
|
75
291
|
def view_logs_tool(
|
|
@@ -195,183 +411,6 @@ Last Modified: {modified}"""
|
|
|
195
411
|
return {"status": "error", "content": [{"text": f"Error: {str(e)}"}]}
|
|
196
412
|
|
|
197
413
|
|
|
198
|
-
def manage_tools_func(
|
|
199
|
-
action: str,
|
|
200
|
-
package: str = None,
|
|
201
|
-
tool_names: str = None,
|
|
202
|
-
tool_path: str = None,
|
|
203
|
-
) -> Dict[str, Any]:
|
|
204
|
-
"""Manage the agent's tool set at runtime - add, remove, list, reload tools on the fly."""
|
|
205
|
-
try:
|
|
206
|
-
if not hasattr(devduck, "agent") or not devduck.agent:
|
|
207
|
-
return {"status": "error", "content": [{"text": "Agent not initialized"}]}
|
|
208
|
-
|
|
209
|
-
registry = devduck.agent.tool_registry
|
|
210
|
-
|
|
211
|
-
if action == "list":
|
|
212
|
-
# List tools from registry
|
|
213
|
-
tool_list = list(registry.registry.keys())
|
|
214
|
-
dynamic_tools = list(registry.dynamic_tools.keys())
|
|
215
|
-
|
|
216
|
-
text = f"Currently loaded {len(tool_list)} tools:\n"
|
|
217
|
-
text += "\n".join(f" • {t}" for t in sorted(tool_list))
|
|
218
|
-
if dynamic_tools:
|
|
219
|
-
text += f"\n\nDynamic tools ({len(dynamic_tools)}):\n"
|
|
220
|
-
text += "\n".join(f" • {t}" for t in sorted(dynamic_tools))
|
|
221
|
-
|
|
222
|
-
return {"status": "success", "content": [{"text": text}]}
|
|
223
|
-
|
|
224
|
-
elif action == "add":
|
|
225
|
-
if not package and not tool_path:
|
|
226
|
-
return {
|
|
227
|
-
"status": "error",
|
|
228
|
-
"content": [
|
|
229
|
-
{
|
|
230
|
-
"text": "Either 'package' or 'tool_path' required for add action"
|
|
231
|
-
}
|
|
232
|
-
],
|
|
233
|
-
}
|
|
234
|
-
|
|
235
|
-
added_tools = []
|
|
236
|
-
|
|
237
|
-
# Add from package using process_tools
|
|
238
|
-
if package:
|
|
239
|
-
if not tool_names:
|
|
240
|
-
return {
|
|
241
|
-
"status": "error",
|
|
242
|
-
"content": [
|
|
243
|
-
{"text": "'tool_names' required when adding from package"}
|
|
244
|
-
],
|
|
245
|
-
}
|
|
246
|
-
|
|
247
|
-
tools_to_add = [t.strip() for t in tool_names.split(",")]
|
|
248
|
-
|
|
249
|
-
# Build tool specs: package.tool_name format
|
|
250
|
-
tool_specs = [f"{package}.{tool_name}" for tool_name in tools_to_add]
|
|
251
|
-
|
|
252
|
-
try:
|
|
253
|
-
added_tool_names = registry.process_tools(tool_specs)
|
|
254
|
-
added_tools.extend(added_tool_names)
|
|
255
|
-
logger.info(f"Added tools from {package}: {added_tool_names}")
|
|
256
|
-
except Exception as e:
|
|
257
|
-
logger.error(f"Failed to add tools from {package}: {e}")
|
|
258
|
-
return {
|
|
259
|
-
"status": "error",
|
|
260
|
-
"content": [{"text": f"Failed to add tools: {str(e)}"}],
|
|
261
|
-
}
|
|
262
|
-
|
|
263
|
-
# Add from file path using process_tools
|
|
264
|
-
if tool_path:
|
|
265
|
-
try:
|
|
266
|
-
added_tool_names = registry.process_tools([tool_path])
|
|
267
|
-
added_tools.extend(added_tool_names)
|
|
268
|
-
logger.info(f"Added tools from file: {added_tool_names}")
|
|
269
|
-
except Exception as e:
|
|
270
|
-
logger.error(f"Failed to add tool from {tool_path}: {e}")
|
|
271
|
-
return {
|
|
272
|
-
"status": "error",
|
|
273
|
-
"content": [{"text": f"Failed to add tool: {str(e)}"}],
|
|
274
|
-
}
|
|
275
|
-
|
|
276
|
-
if added_tools:
|
|
277
|
-
return {
|
|
278
|
-
"status": "success",
|
|
279
|
-
"content": [
|
|
280
|
-
{
|
|
281
|
-
"text": f"✅ Added {len(added_tools)} tools: {', '.join(added_tools)}\n"
|
|
282
|
-
+ f"Total tools: {len(registry.registry)}"
|
|
283
|
-
}
|
|
284
|
-
],
|
|
285
|
-
}
|
|
286
|
-
else:
|
|
287
|
-
return {"status": "error", "content": [{"text": "No tools were added"}]}
|
|
288
|
-
|
|
289
|
-
elif action == "remove":
|
|
290
|
-
if not tool_names:
|
|
291
|
-
return {
|
|
292
|
-
"status": "error",
|
|
293
|
-
"content": [{"text": "'tool_names' required for remove action"}],
|
|
294
|
-
}
|
|
295
|
-
|
|
296
|
-
tools_to_remove = [t.strip() for t in tool_names.split(",")]
|
|
297
|
-
removed_tools = []
|
|
298
|
-
|
|
299
|
-
# Remove from registry
|
|
300
|
-
for tool_name in tools_to_remove:
|
|
301
|
-
if tool_name in registry.registry:
|
|
302
|
-
del registry.registry[tool_name]
|
|
303
|
-
removed_tools.append(tool_name)
|
|
304
|
-
logger.info(f"Removed tool: {tool_name}")
|
|
305
|
-
|
|
306
|
-
if tool_name in registry.dynamic_tools:
|
|
307
|
-
del registry.dynamic_tools[tool_name]
|
|
308
|
-
logger.info(f"Removed dynamic tool: {tool_name}")
|
|
309
|
-
|
|
310
|
-
if removed_tools:
|
|
311
|
-
return {
|
|
312
|
-
"status": "success",
|
|
313
|
-
"content": [
|
|
314
|
-
{
|
|
315
|
-
"text": f"✅ Removed {len(removed_tools)} tools: {', '.join(removed_tools)}\n"
|
|
316
|
-
+ f"Total tools: {len(registry.registry)}"
|
|
317
|
-
}
|
|
318
|
-
],
|
|
319
|
-
}
|
|
320
|
-
else:
|
|
321
|
-
return {
|
|
322
|
-
"status": "success",
|
|
323
|
-
"content": [{"text": "No tools were removed (not found)"}],
|
|
324
|
-
}
|
|
325
|
-
|
|
326
|
-
elif action == "reload":
|
|
327
|
-
if tool_names:
|
|
328
|
-
# Reload specific tools
|
|
329
|
-
tools_to_reload = [t.strip() for t in tool_names.split(",")]
|
|
330
|
-
reloaded_tools = []
|
|
331
|
-
failed_tools = []
|
|
332
|
-
|
|
333
|
-
for tool_name in tools_to_reload:
|
|
334
|
-
try:
|
|
335
|
-
registry.reload_tool(tool_name)
|
|
336
|
-
reloaded_tools.append(tool_name)
|
|
337
|
-
logger.info(f"Reloaded tool: {tool_name}")
|
|
338
|
-
except Exception as e:
|
|
339
|
-
failed_tools.append((tool_name, str(e)))
|
|
340
|
-
logger.error(f"Failed to reload {tool_name}: {e}")
|
|
341
|
-
|
|
342
|
-
text = ""
|
|
343
|
-
if reloaded_tools:
|
|
344
|
-
text += f"✅ Reloaded {len(reloaded_tools)} tools: {', '.join(reloaded_tools)}\n"
|
|
345
|
-
if failed_tools:
|
|
346
|
-
text += f"❌ Failed to reload {len(failed_tools)} tools:\n"
|
|
347
|
-
for tool_name, error in failed_tools:
|
|
348
|
-
text += f" • {tool_name}: {error}\n"
|
|
349
|
-
|
|
350
|
-
return {"status": "success", "content": [{"text": text}]}
|
|
351
|
-
else:
|
|
352
|
-
# Reload all tools - restart agent
|
|
353
|
-
logger.info("Reloading all tools via restart")
|
|
354
|
-
devduck.restart()
|
|
355
|
-
return {
|
|
356
|
-
"status": "success",
|
|
357
|
-
"content": [{"text": "✅ All tools reloaded - agent restarted"}],
|
|
358
|
-
}
|
|
359
|
-
|
|
360
|
-
else:
|
|
361
|
-
return {
|
|
362
|
-
"status": "error",
|
|
363
|
-
"content": [
|
|
364
|
-
{
|
|
365
|
-
"text": f"Unknown action: {action}. Valid: list, add, remove, reload"
|
|
366
|
-
}
|
|
367
|
-
],
|
|
368
|
-
}
|
|
369
|
-
|
|
370
|
-
except Exception as e:
|
|
371
|
-
logger.error(f"Error in manage_tools: {e}")
|
|
372
|
-
return {"status": "error", "content": [{"text": f"Error: {str(e)}"}]}
|
|
373
|
-
|
|
374
|
-
|
|
375
414
|
def get_shell_history_file():
|
|
376
415
|
"""Get the devduck-specific history file path."""
|
|
377
416
|
devduck_history = Path.home() / ".devduck_history"
|
|
@@ -544,6 +583,8 @@ def get_last_messages():
|
|
|
544
583
|
|
|
545
584
|
def append_to_shell_history(query, response):
|
|
546
585
|
"""Append the interaction to devduck shell history."""
|
|
586
|
+
import time
|
|
587
|
+
|
|
547
588
|
try:
|
|
548
589
|
history_file = get_shell_history_file()
|
|
549
590
|
timestamp = str(int(time.time()))
|
|
@@ -565,148 +606,54 @@ def append_to_shell_history(query, response):
|
|
|
565
606
|
|
|
566
607
|
# 🦆 The devduck agent
|
|
567
608
|
class DevDuck:
|
|
568
|
-
def __init__(
|
|
569
|
-
|
|
570
|
-
auto_start_servers=True,
|
|
571
|
-
servers=None,
|
|
572
|
-
load_mcp_servers=True,
|
|
573
|
-
):
|
|
574
|
-
"""Initialize the minimalist adaptive agent
|
|
575
|
-
|
|
576
|
-
Args:
|
|
577
|
-
auto_start_servers: Enable automatic server startup
|
|
578
|
-
servers: Dict of server configs with optional env var lookups
|
|
579
|
-
Example: {
|
|
580
|
-
"tcp": {"port": 9999},
|
|
581
|
-
"ws": {"port": 8080, "LOOKUP_KEY": "SLACK_API_KEY"},
|
|
582
|
-
"mcp": {"port": 8000},
|
|
583
|
-
"ipc": {"socket_path": "/tmp/devduck.sock"}
|
|
584
|
-
}
|
|
585
|
-
load_mcp_servers: Load MCP servers from MCP_SERVERS env var
|
|
586
|
-
"""
|
|
609
|
+
def __init__(self, auto_start_servers=True):
|
|
610
|
+
"""Initialize the minimalist adaptive agent"""
|
|
587
611
|
logger.info("Initializing DevDuck agent...")
|
|
588
612
|
try:
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
"home": str(Path.home()),
|
|
595
|
-
"shell": os.environ.get("SHELL", "unknown"),
|
|
596
|
-
"hostname": socket.gethostname(),
|
|
597
|
-
}
|
|
613
|
+
# Self-heal dependencies
|
|
614
|
+
ensure_deps()
|
|
615
|
+
|
|
616
|
+
# Adapt to environment
|
|
617
|
+
self.env_info, self.ollama_host, self.model = adapt_to_env()
|
|
598
618
|
|
|
599
619
|
# Execution state tracking for hot-reload
|
|
600
620
|
self._agent_executing = False
|
|
601
621
|
self._reload_pending = False
|
|
602
622
|
|
|
603
|
-
#
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
),
|
|
626
|
-
"enabled": os.getenv("DEVDUCK_ENABLE_IPC", "false").lower()
|
|
627
|
-
== "true",
|
|
628
|
-
},
|
|
629
|
-
}
|
|
630
|
-
|
|
631
|
-
# Show server configuration status
|
|
632
|
-
enabled_servers = []
|
|
633
|
-
disabled_servers = []
|
|
634
|
-
for server_name, config in servers.items():
|
|
635
|
-
if config.get("enabled", False):
|
|
636
|
-
if "port" in config:
|
|
637
|
-
enabled_servers.append(
|
|
638
|
-
f"{server_name.upper()}:{config['port']}"
|
|
639
|
-
)
|
|
640
|
-
else:
|
|
641
|
-
enabled_servers.append(server_name.upper())
|
|
642
|
-
else:
|
|
643
|
-
disabled_servers.append(server_name.upper())
|
|
644
|
-
|
|
645
|
-
logger.debug(
|
|
646
|
-
f"🦆 Server config: {', '.join(enabled_servers) if enabled_servers else 'none enabled'}"
|
|
623
|
+
# Import after ensuring deps
|
|
624
|
+
from strands import Agent, tool
|
|
625
|
+
from strands.models.ollama import OllamaModel
|
|
626
|
+
from strands_tools.utils.models.model import create_model
|
|
627
|
+
from .tools import tcp, websocket, mcp_server, install_tools
|
|
628
|
+
from strands_fun_tools import (
|
|
629
|
+
listen,
|
|
630
|
+
cursor,
|
|
631
|
+
clipboard,
|
|
632
|
+
screen_reader,
|
|
633
|
+
yolo_vision,
|
|
634
|
+
)
|
|
635
|
+
from strands_tools import (
|
|
636
|
+
shell,
|
|
637
|
+
editor,
|
|
638
|
+
calculator,
|
|
639
|
+
python_repl,
|
|
640
|
+
image_reader,
|
|
641
|
+
use_agent,
|
|
642
|
+
load_tool,
|
|
643
|
+
environment,
|
|
644
|
+
mcp_client,
|
|
647
645
|
)
|
|
648
|
-
if disabled_servers:
|
|
649
|
-
logger.debug(f"🦆 Disabled: {', '.join(disabled_servers)}")
|
|
650
|
-
|
|
651
|
-
self.servers = servers
|
|
652
|
-
|
|
653
|
-
# Load tools with flexible configuration
|
|
654
|
-
# Default tool config
|
|
655
|
-
# Agent can load additional tools on-demand via fetch_github_tool
|
|
656
|
-
|
|
657
|
-
# 🔧 Available DevDuck Tools (load on-demand):
|
|
658
|
-
# - system_prompt: https://github.com/cagataycali/devduck/blob/main/devduck/tools/system_prompt.py
|
|
659
|
-
# - store_in_kb: https://github.com/cagataycali/devduck/blob/main/devduck/tools/store_in_kb.py
|
|
660
|
-
# - ipc: https://github.com/cagataycali/devduck/blob/main/devduck/tools/ipc.py
|
|
661
|
-
# - tcp: https://github.com/cagataycali/devduck/blob/main/devduck/tools/tcp.py
|
|
662
|
-
# - websocket: https://github.com/cagataycali/devduck/blob/main/devduck/tools/websocket.py
|
|
663
|
-
# - mcp_server: https://github.com/cagataycali/devduck/blob/main/devduck/tools/mcp_server.py
|
|
664
|
-
# - scraper: https://github.com/cagataycali/devduck/blob/main/devduck/tools/scraper.py
|
|
665
|
-
# - tray: https://github.com/cagataycali/devduck/blob/main/devduck/tools/tray.py
|
|
666
|
-
# - ambient: https://github.com/cagataycali/devduck/blob/main/devduck/tools/ambient.py
|
|
667
|
-
# - agentcore_config: https://github.com/cagataycali/devduck/blob/main/devduck/tools/agentcore_config.py
|
|
668
|
-
# - agentcore_invoke: https://github.com/cagataycali/devduck/blob/main/devduck/tools/agentcore_invoke.py
|
|
669
|
-
# - agentcore_logs: https://github.com/cagataycali/devduck/blob/main/devduck/tools/agentcore_logs.py
|
|
670
|
-
# - agentcore_agents: https://github.com/cagataycali/devduck/blob/main/devduck/tools/agentcore_agents.py
|
|
671
|
-
# - create_subagent: https://github.com/cagataycali/devduck/blob/main/devduck/tools/create_subagent.py
|
|
672
|
-
# - use_github: https://github.com/cagataycali/devduck/blob/main/devduck/tools/use_github.py
|
|
673
|
-
# - speech_to_speech: https://github.com/cagataycali/devduck/blob/main/devduck/tools/speech_to_speech.py
|
|
674
|
-
# - state_manager: https://github.com/cagataycali/devduck/blob/main/devduck/tools/state_manager.py
|
|
675
|
-
|
|
676
|
-
# 📦 Strands Tools
|
|
677
|
-
# - editor, file_read, file_write, image_reader, load_tool, retrieve
|
|
678
|
-
# - calculator, use_agent, environment, mcp_client, speak, slack
|
|
679
|
-
|
|
680
|
-
# 🎮 Strands Fun Tools
|
|
681
|
-
# - listen, cursor, clipboard, screen_reader, bluetooth, yolo_vision
|
|
682
|
-
|
|
683
|
-
# 🔍 Strands Google
|
|
684
|
-
# - use_google, google_auth
|
|
685
|
-
|
|
686
|
-
# 🔧 Auto-append server tools based on enabled servers
|
|
687
|
-
server_tools_needed = []
|
|
688
|
-
if servers.get("tcp", {}).get("enabled", False):
|
|
689
|
-
server_tools_needed.append("tcp")
|
|
690
|
-
if servers.get("ws", {}).get("enabled", True):
|
|
691
|
-
server_tools_needed.append("websocket")
|
|
692
|
-
if servers.get("mcp", {}).get("enabled", False):
|
|
693
|
-
server_tools_needed.append("mcp_server")
|
|
694
|
-
if servers.get("ipc", {}).get("enabled", False):
|
|
695
|
-
server_tools_needed.append("ipc")
|
|
696
|
-
|
|
697
|
-
# Append to default tools if any server tools are needed
|
|
698
|
-
if server_tools_needed:
|
|
699
|
-
server_tools_str = ",".join(server_tools_needed)
|
|
700
|
-
default_tools = f"devduck.tools:system_prompt,fetch_github_tool,{server_tools_str};strands_tools:shell"
|
|
701
|
-
logger.info(f"Auto-added server tools: {server_tools_str}")
|
|
702
|
-
else:
|
|
703
|
-
default_tools = (
|
|
704
|
-
"devduck.tools:system_prompt,fetch_github_tool;strands_tools:shell"
|
|
705
|
-
)
|
|
706
646
|
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
647
|
+
# Wrap system_prompt_tool with @tool decorator
|
|
648
|
+
@tool
|
|
649
|
+
def system_prompt(
|
|
650
|
+
action: str,
|
|
651
|
+
prompt: str = None,
|
|
652
|
+
context: str = None,
|
|
653
|
+
variable_name: str = "SYSTEM_PROMPT",
|
|
654
|
+
) -> Dict[str, Any]:
|
|
655
|
+
"""Manage agent system prompt dynamically."""
|
|
656
|
+
return system_prompt_tool(action, prompt, context, variable_name)
|
|
710
657
|
|
|
711
658
|
# Wrap view_logs_tool with @tool decorator
|
|
712
659
|
@tool
|
|
@@ -718,380 +665,114 @@ class DevDuck:
|
|
|
718
665
|
"""View and manage DevDuck logs."""
|
|
719
666
|
return view_logs_tool(action, lines, pattern)
|
|
720
667
|
|
|
721
|
-
#
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
core_tools.extend([view_logs, manage_tools])
|
|
745
|
-
|
|
746
|
-
# Assign tools
|
|
747
|
-
self.tools = core_tools
|
|
748
|
-
|
|
749
|
-
# 🔌 Load MCP servers if enabled
|
|
750
|
-
if load_mcp_servers:
|
|
751
|
-
mcp_clients = self._load_mcp_servers()
|
|
752
|
-
if mcp_clients:
|
|
753
|
-
self.tools.extend(mcp_clients)
|
|
754
|
-
logger.info(f"Loaded {len(mcp_clients)} MCP server(s)")
|
|
668
|
+
# Minimal but functional toolset including system_prompt and view_logs
|
|
669
|
+
self.tools = [
|
|
670
|
+
shell,
|
|
671
|
+
editor,
|
|
672
|
+
calculator,
|
|
673
|
+
python_repl,
|
|
674
|
+
image_reader,
|
|
675
|
+
use_agent,
|
|
676
|
+
load_tool,
|
|
677
|
+
environment,
|
|
678
|
+
system_prompt,
|
|
679
|
+
view_logs,
|
|
680
|
+
tcp,
|
|
681
|
+
websocket,
|
|
682
|
+
mcp_server,
|
|
683
|
+
install_tools,
|
|
684
|
+
mcp_client,
|
|
685
|
+
listen,
|
|
686
|
+
cursor,
|
|
687
|
+
clipboard,
|
|
688
|
+
screen_reader,
|
|
689
|
+
yolo_vision,
|
|
690
|
+
]
|
|
755
691
|
|
|
756
692
|
logger.info(f"Initialized {len(self.tools)} tools")
|
|
757
693
|
|
|
758
|
-
#
|
|
759
|
-
|
|
694
|
+
# Check if MODEL_PROVIDER env variable is set
|
|
695
|
+
model_provider = os.getenv("MODEL_PROVIDER")
|
|
760
696
|
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
697
|
+
if model_provider:
|
|
698
|
+
# Use create_model utility for any provider (bedrock, anthropic, etc.)
|
|
699
|
+
self.agent_model = create_model(provider=model_provider)
|
|
700
|
+
else:
|
|
701
|
+
# Fallback to default Ollama behavior
|
|
702
|
+
self.agent_model = OllamaModel(
|
|
703
|
+
host=self.ollama_host,
|
|
704
|
+
model_id=self.model,
|
|
705
|
+
temperature=1,
|
|
706
|
+
keep_alive="5m",
|
|
707
|
+
)
|
|
766
708
|
|
|
709
|
+
# Create agent with self-healing
|
|
767
710
|
self.agent = Agent(
|
|
768
711
|
model=self.agent_model,
|
|
769
712
|
tools=self.tools,
|
|
770
713
|
system_prompt=self._build_system_prompt(),
|
|
771
|
-
load_tools_from_directory=
|
|
772
|
-
trace_attributes={
|
|
773
|
-
"session.id": self.session_id,
|
|
774
|
-
"user.id": self.env_info["hostname"],
|
|
775
|
-
"tags": ["Strands-Agents", "DevDuck"],
|
|
776
|
-
},
|
|
777
|
-
)
|
|
778
|
-
|
|
779
|
-
# 🚀 AUTO-START SERVERS
|
|
780
|
-
if auto_start_servers and "--mcp" not in sys.argv:
|
|
781
|
-
self._start_servers()
|
|
782
|
-
|
|
783
|
-
# Start file watcher for auto hot-reload
|
|
784
|
-
self._start_file_watcher()
|
|
785
|
-
|
|
786
|
-
logger.info(
|
|
787
|
-
f"DevDuck agent initialized successfully with model {self.model}"
|
|
714
|
+
load_tools_from_directory=True,
|
|
788
715
|
)
|
|
789
716
|
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
def _load_tools_from_config(self, config):
|
|
795
|
-
"""
|
|
796
|
-
Load tools based on DEVDUCK_TOOLS configuration.
|
|
797
|
-
|
|
798
|
-
Format: package1:tool1,tool2;package2:tool3,tool4
|
|
799
|
-
Examples:
|
|
800
|
-
- strands_tools:shell,editor;strands_action:use_github
|
|
801
|
-
- strands_action:use_github;strands_tools:shell,use_aws
|
|
802
|
-
|
|
803
|
-
Note: Only loads what's specified in config - no automatic additions
|
|
804
|
-
"""
|
|
805
|
-
tools = []
|
|
806
|
-
|
|
807
|
-
# Split by semicolon to get package groups
|
|
808
|
-
groups = config.split(";")
|
|
809
|
-
|
|
810
|
-
for group in groups:
|
|
811
|
-
group = group.strip()
|
|
812
|
-
if not group:
|
|
813
|
-
continue
|
|
814
|
-
|
|
815
|
-
# Split by colon to get package:tools
|
|
816
|
-
parts = group.split(":", 1)
|
|
817
|
-
if len(parts) != 2:
|
|
818
|
-
logger.warning(f"Invalid format: {group}")
|
|
819
|
-
continue
|
|
820
|
-
|
|
821
|
-
package = parts[0].strip()
|
|
822
|
-
tools_str = parts[1].strip()
|
|
823
|
-
|
|
824
|
-
# Parse tools (comma-separated)
|
|
825
|
-
tool_names = [t.strip() for t in tools_str.split(",") if t.strip()]
|
|
826
|
-
|
|
827
|
-
for tool_name in tool_names:
|
|
828
|
-
tool = self._load_single_tool(package, tool_name)
|
|
829
|
-
if tool:
|
|
830
|
-
tools.append(tool)
|
|
831
|
-
|
|
832
|
-
logger.info(f"Loaded {len(tools)} tools from configuration")
|
|
833
|
-
return tools
|
|
834
|
-
|
|
835
|
-
def _load_single_tool(self, package, tool_name):
|
|
836
|
-
"""Load a single tool from a package"""
|
|
837
|
-
try:
|
|
838
|
-
module = __import__(package, fromlist=[tool_name])
|
|
839
|
-
tool = getattr(module, tool_name)
|
|
840
|
-
logger.debug(f"Loaded {tool_name} from {package}")
|
|
841
|
-
return tool
|
|
842
|
-
except Exception as e:
|
|
843
|
-
logger.warning(f"Failed to load {tool_name} from {package}: {e}")
|
|
844
|
-
return None
|
|
845
|
-
|
|
846
|
-
def _load_mcp_servers(self):
|
|
847
|
-
"""
|
|
848
|
-
Load MCP servers from MCP_SERVERS environment variable using direct loading.
|
|
849
|
-
|
|
850
|
-
Uses the experimental managed integration - MCPClient instances are passed
|
|
851
|
-
directly to Agent constructor without explicit context management.
|
|
852
|
-
|
|
853
|
-
Format: JSON with "mcpServers" object
|
|
854
|
-
Example: MCP_SERVERS='{"mcpServers": {"strands": {"command": "uvx", "args": ["strands-agents-mcp-server"]}}}'
|
|
717
|
+
# 🚀 AUTO-START SERVERS: TCP (9999), WebSocket (8080), MCP HTTP (8000)
|
|
718
|
+
if auto_start_servers:
|
|
719
|
+
logger.info("Auto-starting servers...")
|
|
720
|
+
print("🦆 Auto-starting servers...")
|
|
855
721
|
|
|
856
|
-
Returns:
|
|
857
|
-
List of MCPClient instances ready for direct use in Agent
|
|
858
|
-
"""
|
|
859
|
-
mcp_servers_json = os.getenv("MCP_SERVERS")
|
|
860
|
-
if not mcp_servers_json:
|
|
861
|
-
logger.debug("No MCP_SERVERS environment variable found")
|
|
862
|
-
return []
|
|
863
|
-
|
|
864
|
-
try:
|
|
865
|
-
config = json.loads(mcp_servers_json)
|
|
866
|
-
mcp_servers_config = config.get("mcpServers", {})
|
|
867
|
-
|
|
868
|
-
if not mcp_servers_config:
|
|
869
|
-
logger.warning("MCP_SERVERS JSON has no 'mcpServers' key")
|
|
870
|
-
return []
|
|
871
|
-
|
|
872
|
-
mcp_clients = []
|
|
873
|
-
|
|
874
|
-
from strands.tools.mcp import MCPClient
|
|
875
|
-
from mcp import stdio_client, StdioServerParameters
|
|
876
|
-
from mcp.client.streamable_http import streamablehttp_client
|
|
877
|
-
from mcp.client.sse import sse_client
|
|
878
|
-
|
|
879
|
-
for server_name, server_config in mcp_servers_config.items():
|
|
880
722
|
try:
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
command = server_config["command"]
|
|
887
|
-
args = server_config.get("args", [])
|
|
888
|
-
env = server_config.get("env", None)
|
|
889
|
-
|
|
890
|
-
transport_callable = (
|
|
891
|
-
lambda cmd=command, a=args, e=env: stdio_client(
|
|
892
|
-
StdioServerParameters(command=cmd, args=a, env=e)
|
|
893
|
-
)
|
|
894
|
-
)
|
|
895
|
-
|
|
896
|
-
elif "url" in server_config:
|
|
897
|
-
# Determine if SSE or streamable HTTP based on URL path
|
|
898
|
-
url = server_config["url"]
|
|
899
|
-
headers = server_config.get("headers", None)
|
|
900
|
-
|
|
901
|
-
if "/sse" in url:
|
|
902
|
-
# SSE transport
|
|
903
|
-
transport_callable = lambda u=url: sse_client(u)
|
|
904
|
-
else:
|
|
905
|
-
# Streamable HTTP transport (default for HTTP)
|
|
906
|
-
transport_callable = (
|
|
907
|
-
lambda u=url, h=headers: streamablehttp_client(
|
|
908
|
-
url=u, headers=h
|
|
909
|
-
)
|
|
910
|
-
)
|
|
723
|
+
# Start TCP server on port 9999
|
|
724
|
+
tcp_result = self.agent.tool.tcp(action="start_server", port=9999)
|
|
725
|
+
if tcp_result.get("status") == "success":
|
|
726
|
+
logger.info("✓ TCP server started on port 9999")
|
|
727
|
+
print("🦆 ✓ TCP server: localhost:9999")
|
|
911
728
|
else:
|
|
912
|
-
logger.warning(
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
729
|
+
logger.warning(f"TCP server start issue: {tcp_result}")
|
|
730
|
+
except Exception as e:
|
|
731
|
+
logger.error(f"Failed to start TCP server: {e}")
|
|
732
|
+
print(f"🦆 ⚠ TCP server failed: {e}")
|
|
916
733
|
|
|
917
|
-
|
|
918
|
-
#
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
transport_callable=transport_callable, prefix=prefix
|
|
734
|
+
try:
|
|
735
|
+
# Start WebSocket server on port 8080
|
|
736
|
+
ws_result = self.agent.tool.websocket(
|
|
737
|
+
action="start_server", port=8080
|
|
922
738
|
)
|
|
739
|
+
if ws_result.get("status") == "success":
|
|
740
|
+
logger.info("✓ WebSocket server started on port 8080")
|
|
741
|
+
print("🦆 ✓ WebSocket server: localhost:8080")
|
|
742
|
+
else:
|
|
743
|
+
logger.warning(f"WebSocket server start issue: {ws_result}")
|
|
744
|
+
except Exception as e:
|
|
745
|
+
logger.error(f"Failed to start WebSocket server: {e}")
|
|
746
|
+
print(f"🦆 ⚠ WebSocket server failed: {e}")
|
|
923
747
|
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
748
|
+
try:
|
|
749
|
+
# Start MCP server with HTTP transport on port 8000
|
|
750
|
+
mcp_result = self.agent.tool.mcp_server(
|
|
751
|
+
action="start",
|
|
752
|
+
transport="http",
|
|
753
|
+
port=8000,
|
|
754
|
+
expose_agent=True,
|
|
755
|
+
agent=self.agent,
|
|
927
756
|
)
|
|
928
|
-
|
|
757
|
+
if mcp_result.get("status") == "success":
|
|
758
|
+
logger.info("✓ MCP HTTP server started on port 8000")
|
|
759
|
+
print("🦆 ✓ MCP server: http://localhost:8000/mcp")
|
|
760
|
+
else:
|
|
761
|
+
logger.warning(f"MCP server start issue: {mcp_result}")
|
|
929
762
|
except Exception as e:
|
|
930
|
-
logger.error(f"Failed to
|
|
931
|
-
|
|
763
|
+
logger.error(f"Failed to start MCP server: {e}")
|
|
764
|
+
print(f"🦆 ⚠ MCP server failed: {e}")
|
|
932
765
|
|
|
933
|
-
|
|
766
|
+
# Start file watcher for auto hot-reload
|
|
767
|
+
self._start_file_watcher()
|
|
934
768
|
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
except Exception as e:
|
|
939
|
-
logger.error(f"Error loading MCP servers: {e}")
|
|
940
|
-
return []
|
|
941
|
-
|
|
942
|
-
def _select_model(self):
|
|
943
|
-
"""
|
|
944
|
-
Smart model selection with fallback based on available credentials.
|
|
945
|
-
|
|
946
|
-
Priority: Bedrock → Anthropic → OpenAI → GitHub → Gemini → Cohere →
|
|
947
|
-
Writer → Mistral → LiteLLM → LlamaAPI → SageMaker →
|
|
948
|
-
LlamaCpp → MLX → Ollama
|
|
949
|
-
|
|
950
|
-
Returns:
|
|
951
|
-
Tuple of (model_instance, model_name)
|
|
952
|
-
"""
|
|
953
|
-
provider = os.getenv("MODEL_PROVIDER")
|
|
954
|
-
|
|
955
|
-
# Read common model parameters from environment
|
|
956
|
-
max_tokens = int(os.getenv("STRANDS_MAX_TOKENS", "60000"))
|
|
957
|
-
temperature = float(os.getenv("STRANDS_TEMPERATURE", "1.0"))
|
|
958
|
-
|
|
959
|
-
if not provider:
|
|
960
|
-
# Auto-detect based on API keys and credentials
|
|
961
|
-
# 1. Try Bedrock (AWS bearer token or STS credentials)
|
|
962
|
-
try:
|
|
963
|
-
# Check for bearer token first
|
|
964
|
-
if os.getenv("AWS_BEARER_TOKEN_BEDROCK"):
|
|
965
|
-
provider = "bedrock"
|
|
966
|
-
print("🦆 Using Bedrock (bearer token)")
|
|
967
|
-
else:
|
|
968
|
-
# Try STS credentials
|
|
969
|
-
import boto3
|
|
970
|
-
|
|
971
|
-
boto3.client("sts").get_caller_identity()
|
|
972
|
-
provider = "bedrock"
|
|
973
|
-
print("🦆 Using Bedrock")
|
|
974
|
-
except:
|
|
975
|
-
# 2. Try Anthropic
|
|
976
|
-
if os.getenv("ANTHROPIC_API_KEY"):
|
|
977
|
-
provider = "anthropic"
|
|
978
|
-
print("🦆 Using Anthropic")
|
|
979
|
-
# 3. Try OpenAI
|
|
980
|
-
elif os.getenv("OPENAI_API_KEY"):
|
|
981
|
-
provider = "openai"
|
|
982
|
-
print("🦆 Using OpenAI")
|
|
983
|
-
# 4. Try GitHub Models
|
|
984
|
-
elif os.getenv("GITHUB_TOKEN") or os.getenv("PAT_TOKEN"):
|
|
985
|
-
provider = "github"
|
|
986
|
-
print("🦆 Using GitHub Models")
|
|
987
|
-
# 5. Try Gemini
|
|
988
|
-
elif os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY"):
|
|
989
|
-
provider = "gemini"
|
|
990
|
-
print("🦆 Using Gemini")
|
|
991
|
-
# 6. Try Cohere
|
|
992
|
-
elif os.getenv("COHERE_API_KEY"):
|
|
993
|
-
provider = "cohere"
|
|
994
|
-
print("🦆 Using Cohere")
|
|
995
|
-
# 7. Try Writer
|
|
996
|
-
elif os.getenv("WRITER_API_KEY"):
|
|
997
|
-
provider = "writer"
|
|
998
|
-
print("🦆 Using Writer")
|
|
999
|
-
# 8. Try Mistral
|
|
1000
|
-
elif os.getenv("MISTRAL_API_KEY"):
|
|
1001
|
-
provider = "mistral"
|
|
1002
|
-
print("🦆 Using Mistral")
|
|
1003
|
-
# 9. Try LiteLLM
|
|
1004
|
-
elif os.getenv("LITELLM_API_KEY"):
|
|
1005
|
-
provider = "litellm"
|
|
1006
|
-
print("🦆 Using LiteLLM")
|
|
1007
|
-
# 10. Try LlamaAPI
|
|
1008
|
-
elif os.getenv("LLAMAAPI_API_KEY"):
|
|
1009
|
-
provider = "llamaapi"
|
|
1010
|
-
print("🦆 Using LlamaAPI")
|
|
1011
|
-
# 11. Try SageMaker
|
|
1012
|
-
elif os.getenv("SAGEMAKER_ENDPOINT_NAME"):
|
|
1013
|
-
provider = "sagemaker"
|
|
1014
|
-
print("🦆 Using SageMaker")
|
|
1015
|
-
# 12. Try LlamaCpp
|
|
1016
|
-
elif os.getenv("LLAMACPP_MODEL_PATH"):
|
|
1017
|
-
provider = "llamacpp"
|
|
1018
|
-
print("🦆 Using LlamaCpp")
|
|
1019
|
-
# 13. Try MLX on Apple Silicon
|
|
1020
|
-
elif platform.system() == "Darwin" and platform.machine() in [
|
|
1021
|
-
"arm64",
|
|
1022
|
-
"aarch64",
|
|
1023
|
-
]:
|
|
1024
|
-
try:
|
|
1025
|
-
from strands_mlx import MLXModel
|
|
1026
|
-
|
|
1027
|
-
provider = "mlx"
|
|
1028
|
-
print("🦆 Using MLX (Apple Silicon)")
|
|
1029
|
-
except ImportError:
|
|
1030
|
-
provider = "ollama"
|
|
1031
|
-
print("🦆 Using Ollama (fallback)")
|
|
1032
|
-
# 14. Fallback to Ollama
|
|
1033
|
-
else:
|
|
1034
|
-
provider = "ollama"
|
|
1035
|
-
print("🦆 Using Ollama (fallback)")
|
|
1036
|
-
|
|
1037
|
-
# Create model based on provider
|
|
1038
|
-
if provider == "mlx":
|
|
1039
|
-
from strands_mlx import MLXModel
|
|
1040
|
-
|
|
1041
|
-
model_name = os.getenv("STRANDS_MODEL_ID", "mlx-community/Qwen3-1.7B-4bit")
|
|
1042
|
-
return (
|
|
1043
|
-
MLXModel(
|
|
1044
|
-
model_id=model_name,
|
|
1045
|
-
params={"temperature": temperature, "max_tokens": max_tokens},
|
|
1046
|
-
),
|
|
1047
|
-
model_name,
|
|
1048
|
-
)
|
|
1049
|
-
|
|
1050
|
-
elif provider == "gemini":
|
|
1051
|
-
from strands.models.gemini import GeminiModel
|
|
1052
|
-
|
|
1053
|
-
model_name = os.getenv("STRANDS_MODEL_ID", "gemini-2.5-flash")
|
|
1054
|
-
api_key = os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY")
|
|
1055
|
-
return (
|
|
1056
|
-
GeminiModel(
|
|
1057
|
-
client_args={"api_key": api_key},
|
|
1058
|
-
model_id=model_name,
|
|
1059
|
-
params={"temperature": temperature, "max_tokens": max_tokens},
|
|
1060
|
-
),
|
|
1061
|
-
model_name,
|
|
1062
|
-
)
|
|
1063
|
-
|
|
1064
|
-
elif provider == "ollama":
|
|
1065
|
-
from strands.models.ollama import OllamaModel
|
|
1066
|
-
|
|
1067
|
-
# Smart model selection based on OS
|
|
1068
|
-
os_type = platform.system()
|
|
1069
|
-
if os_type == "Darwin":
|
|
1070
|
-
model_name = os.getenv("STRANDS_MODEL_ID", "qwen3:1.7b")
|
|
1071
|
-
elif os_type == "Linux":
|
|
1072
|
-
model_name = os.getenv("STRANDS_MODEL_ID", "qwen3:30b")
|
|
1073
|
-
else:
|
|
1074
|
-
model_name = os.getenv("STRANDS_MODEL_ID", "qwen3:8b")
|
|
1075
|
-
|
|
1076
|
-
return (
|
|
1077
|
-
OllamaModel(
|
|
1078
|
-
host=os.getenv("OLLAMA_HOST", "http://localhost:11434"),
|
|
1079
|
-
model_id=model_name,
|
|
1080
|
-
temperature=temperature,
|
|
1081
|
-
num_predict=max_tokens,
|
|
1082
|
-
keep_alive="5m",
|
|
1083
|
-
),
|
|
1084
|
-
model_name,
|
|
1085
|
-
)
|
|
1086
|
-
|
|
1087
|
-
else:
|
|
1088
|
-
# All other providers via create_model utility
|
|
1089
|
-
# Supports: bedrock, anthropic, openai, github, cohere, writer, mistral, litellm
|
|
1090
|
-
from strands_tools.utils.models.model import create_model
|
|
769
|
+
logger.info(
|
|
770
|
+
f"DevDuck agent initialized successfully with model {self.model}"
|
|
771
|
+
)
|
|
1091
772
|
|
|
1092
|
-
|
|
1093
|
-
|
|
1094
|
-
|
|
773
|
+
except Exception as e:
|
|
774
|
+
logger.error(f"Initialization failed: {e}")
|
|
775
|
+
self._self_heal(e)
|
|
1095
776
|
|
|
1096
777
|
def _build_system_prompt(self):
|
|
1097
778
|
"""Build adaptive system prompt based on environment
|
|
@@ -1103,20 +784,13 @@ class DevDuck:
|
|
|
1103
784
|
|
|
1104
785
|
Learning: Always check source code truth over conversation memory!
|
|
1105
786
|
"""
|
|
1106
|
-
# Current date and time
|
|
1107
|
-
current_datetime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
1108
|
-
current_date = datetime.now().strftime("%A, %B %d, %Y")
|
|
1109
|
-
current_time = datetime.now().strftime("%I:%M %p")
|
|
1110
|
-
|
|
1111
787
|
session_id = f"devduck-{datetime.now().strftime('%Y-%m-%d')}"
|
|
1112
|
-
self.session_id = session_id
|
|
1113
|
-
|
|
1114
|
-
# Get own file path for self-modification awareness
|
|
1115
|
-
own_file_path = Path(__file__).resolve()
|
|
1116
788
|
|
|
1117
789
|
# Get own source code for self-awareness
|
|
1118
790
|
own_code = get_own_source_code()
|
|
1119
791
|
|
|
792
|
+
# print(own_code)
|
|
793
|
+
|
|
1120
794
|
# Get recent conversation history context (with error handling)
|
|
1121
795
|
try:
|
|
1122
796
|
recent_context = get_last_messages()
|
|
@@ -1138,8 +812,6 @@ Python: {self.env_info['python']}
|
|
|
1138
812
|
Model: {self.model}
|
|
1139
813
|
Hostname: {self.env_info['hostname']}
|
|
1140
814
|
Session ID: {session_id}
|
|
1141
|
-
Current Time: {current_datetime} ({current_date} at {current_time})
|
|
1142
|
-
My Path: {own_file_path}
|
|
1143
815
|
|
|
1144
816
|
You are:
|
|
1145
817
|
- Minimalist: Brief, direct responses
|
|
@@ -1162,7 +834,6 @@ You have full access to your own source code for self-awareness and self-modific
|
|
|
1162
834
|
- **No Restart Needed** - Tools are auto-loaded and ready to use instantly
|
|
1163
835
|
- **Live Development** - Modify existing tools while running and test immediately
|
|
1164
836
|
- **Full Python Access** - Create any Python functionality as a tool
|
|
1165
|
-
- **Agent Protection** - Hot-reload waits until agent finishes current task
|
|
1166
837
|
|
|
1167
838
|
## Dynamic Tool Loading:
|
|
1168
839
|
- **Install Tools** - Use install_tools() to load tools from any Python package
|
|
@@ -1170,48 +841,64 @@ You have full access to your own source code for self-awareness and self-modific
|
|
|
1170
841
|
- Expands capabilities without restart
|
|
1171
842
|
- Access to entire Python ecosystem
|
|
1172
843
|
|
|
1173
|
-
##
|
|
1174
|
-
Set DEVDUCK_TOOLS for custom tools:
|
|
1175
|
-
- Format: package1:tool1,tool2;package2:tool3,tool4
|
|
1176
|
-
- Example: strands_tools:shell,editor;strands_fun_tools:clipboard
|
|
1177
|
-
- Tools are filtered - only specified tools are loaded
|
|
1178
|
-
- Load the speech_to_speech tool when it's needed
|
|
1179
|
-
- Offload the tools when you don't need
|
|
1180
|
-
|
|
1181
|
-
## MCP Integration:
|
|
844
|
+
## MCP Server:
|
|
1182
845
|
- **Expose as MCP Server** - Use mcp_server() to expose devduck via MCP protocol
|
|
1183
846
|
- Example: mcp_server(action="start", port=8000)
|
|
1184
847
|
- Connect from Claude Desktop, other agents, or custom clients
|
|
1185
848
|
- Full bidirectional communication
|
|
1186
849
|
|
|
1187
|
-
|
|
1188
|
-
- Format: JSON with "mcpServers" object
|
|
1189
|
-
- Stdio servers: command, args, env keys
|
|
1190
|
-
- HTTP servers: url, headers keys
|
|
1191
|
-
- Example: MCP_SERVERS='{{"mcpServers": {{"strands": {{"command": "uvx", "args": ["strands-agents-mcp-server"]}}}}}}'
|
|
1192
|
-
- Tools from MCP servers automatically available in agent context
|
|
850
|
+
## Tool Creation Patterns:
|
|
1193
851
|
|
|
1194
|
-
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
- Seamless memory across sessions without manual tool calls
|
|
852
|
+
### **1. @tool Decorator:**
|
|
853
|
+
```python
|
|
854
|
+
# ./tools/calculate_tip.py
|
|
855
|
+
from strands import tool
|
|
1199
856
|
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
|
|
1205
|
-
|
|
857
|
+
@tool
|
|
858
|
+
def calculate_tip(amount: float, percentage: float = 15.0) -> str:
|
|
859
|
+
\"\"\"Calculate tip and total for a bill.
|
|
860
|
+
|
|
861
|
+
Args:
|
|
862
|
+
amount: Bill amount in dollars
|
|
863
|
+
percentage: Tip percentage (default: 15.0)
|
|
864
|
+
|
|
865
|
+
Returns:
|
|
866
|
+
str: Formatted tip calculation result
|
|
867
|
+
\"\"\"
|
|
868
|
+
tip = amount * (percentage / 100)
|
|
869
|
+
total = amount + tip
|
|
870
|
+
return f"Tip: {{tip:.2f}}, Total: {{total:.2f}}"
|
|
871
|
+
```
|
|
872
|
+
|
|
873
|
+
### **2. Action-Based Pattern:**
|
|
874
|
+
```python
|
|
875
|
+
# ./tools/weather.py
|
|
876
|
+
from typing import Dict, Any
|
|
877
|
+
from strands import tool
|
|
1206
878
|
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
879
|
+
@tool
|
|
880
|
+
def weather(action: str, location: str = None) -> Dict[str, Any]:
|
|
881
|
+
\"\"\"Comprehensive weather information tool.
|
|
882
|
+
|
|
883
|
+
Args:
|
|
884
|
+
action: Action to perform (current, forecast, alerts)
|
|
885
|
+
location: City name (required)
|
|
886
|
+
|
|
887
|
+
Returns:
|
|
888
|
+
Dict containing status and response content
|
|
889
|
+
\"\"\"
|
|
890
|
+
if action == "current":
|
|
891
|
+
return {{"status": "success", "content": [{{"text": f"Weather for {{location}}"}}]}}
|
|
892
|
+
elif action == "forecast":
|
|
893
|
+
return {{"status": "success", "content": [{{"text": f"Forecast for {{location}}"}}]}}
|
|
894
|
+
else:
|
|
895
|
+
return {{"status": "error", "content": [{{"text": f"Unknown action: {{action}}"}}]}}
|
|
896
|
+
```
|
|
1213
897
|
|
|
1214
|
-
|
|
898
|
+
## System Prompt Management:
|
|
899
|
+
- Use system_prompt(action='get') to view current prompt
|
|
900
|
+
- Use system_prompt(action='set', prompt='new text') to update
|
|
901
|
+
- Changes persist in SYSTEM_PROMPT environment variable
|
|
1215
902
|
|
|
1216
903
|
## Shell Commands:
|
|
1217
904
|
- Prefix with ! to execute shell commands directly
|
|
@@ -1223,7 +910,7 @@ When you learn something valuable during conversations:
|
|
|
1223
910
|
- Communication: **MINIMAL WORDS**
|
|
1224
911
|
- Efficiency: **Speed is paramount**
|
|
1225
912
|
|
|
1226
|
-
{
|
|
913
|
+
{os.getenv('SYSTEM_PROMPT', '')}"""
|
|
1227
914
|
|
|
1228
915
|
def _self_heal(self, error):
|
|
1229
916
|
"""Attempt self-healing when errors occur"""
|
|
@@ -1237,11 +924,62 @@ When you learn something valuable during conversations:
|
|
|
1237
924
|
self._heal_count += 1
|
|
1238
925
|
|
|
1239
926
|
# Limit recursion - if we've tried more than 3 times, give up
|
|
1240
|
-
if self._heal_count >
|
|
927
|
+
if self._heal_count > 3:
|
|
1241
928
|
print(f"🦆 Self-healing failed after {self._heal_count} attempts")
|
|
1242
929
|
print("🦆 Please fix the issue manually and restart")
|
|
1243
930
|
sys.exit(1)
|
|
1244
931
|
|
|
932
|
+
# Common healing strategies
|
|
933
|
+
if "not found" in str(error).lower() and "model" in str(error).lower():
|
|
934
|
+
print("🦆 Model not found - trying to pull model...")
|
|
935
|
+
try:
|
|
936
|
+
# Try to pull the model
|
|
937
|
+
result = subprocess.run(
|
|
938
|
+
["ollama", "pull", self.model], capture_output=True, timeout=60
|
|
939
|
+
)
|
|
940
|
+
if result.returncode == 0:
|
|
941
|
+
print(f"🦆 Successfully pulled {self.model}")
|
|
942
|
+
else:
|
|
943
|
+
print(f"🦆 Failed to pull {self.model}, trying fallback...")
|
|
944
|
+
# Fallback to basic models
|
|
945
|
+
fallback_models = ["llama3.2:1b", "qwen2.5:0.5b", "gemma2:2b"]
|
|
946
|
+
for fallback in fallback_models:
|
|
947
|
+
try:
|
|
948
|
+
subprocess.run(
|
|
949
|
+
["ollama", "pull", fallback],
|
|
950
|
+
capture_output=True,
|
|
951
|
+
timeout=30,
|
|
952
|
+
)
|
|
953
|
+
self.model = fallback
|
|
954
|
+
print(f"🦆 Using fallback model: {fallback}")
|
|
955
|
+
break
|
|
956
|
+
except:
|
|
957
|
+
continue
|
|
958
|
+
except Exception as pull_error:
|
|
959
|
+
print(f"🦆 Model pull failed: {pull_error}")
|
|
960
|
+
# Ultra-minimal fallback
|
|
961
|
+
self.model = "llama3.2:1b"
|
|
962
|
+
|
|
963
|
+
elif "ollama" in str(error).lower():
|
|
964
|
+
print("🦆 Ollama issue - checking service...")
|
|
965
|
+
try:
|
|
966
|
+
# Check if ollama is running
|
|
967
|
+
result = subprocess.run(
|
|
968
|
+
["ollama", "list"], capture_output=True, timeout=5
|
|
969
|
+
)
|
|
970
|
+
if result.returncode != 0:
|
|
971
|
+
print("🦆 Starting ollama service...")
|
|
972
|
+
subprocess.Popen(["ollama", "serve"])
|
|
973
|
+
import time
|
|
974
|
+
|
|
975
|
+
time.sleep(3) # Wait for service to start
|
|
976
|
+
except Exception as ollama_error:
|
|
977
|
+
print(f"🦆 Ollama service issue: {ollama_error}")
|
|
978
|
+
|
|
979
|
+
elif "import" in str(error).lower():
|
|
980
|
+
print("🦆 Import issue - reinstalling dependencies...")
|
|
981
|
+
ensure_deps()
|
|
982
|
+
|
|
1245
983
|
elif "connection" in str(error).lower():
|
|
1246
984
|
print("🦆 Connection issue - checking ollama service...")
|
|
1247
985
|
try:
|
|
@@ -1257,242 +995,26 @@ When you learn something valuable during conversations:
|
|
|
1257
995
|
print("🦆 Running in minimal mode...")
|
|
1258
996
|
self.agent = None
|
|
1259
997
|
|
|
1260
|
-
def _is_port_available(self, port):
|
|
1261
|
-
"""Check if a port is available"""
|
|
1262
|
-
try:
|
|
1263
|
-
test_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
1264
|
-
test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
|
1265
|
-
test_socket.bind(("0.0.0.0", port))
|
|
1266
|
-
test_socket.close()
|
|
1267
|
-
return True
|
|
1268
|
-
except OSError:
|
|
1269
|
-
return False
|
|
1270
|
-
|
|
1271
|
-
def _is_socket_available(self, socket_path):
|
|
1272
|
-
"""Check if a Unix socket is available"""
|
|
1273
|
-
|
|
1274
|
-
# If socket file doesn't exist, it's available
|
|
1275
|
-
if not os.path.exists(socket_path):
|
|
1276
|
-
return True
|
|
1277
|
-
# If it exists, try to connect to see if it's in use
|
|
1278
|
-
try:
|
|
1279
|
-
test_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
|
1280
|
-
test_socket.connect(socket_path)
|
|
1281
|
-
test_socket.close()
|
|
1282
|
-
return False # Socket is in use
|
|
1283
|
-
except (ConnectionRefusedError, FileNotFoundError):
|
|
1284
|
-
# Socket file exists but not in use - remove stale socket
|
|
1285
|
-
try:
|
|
1286
|
-
os.remove(socket_path)
|
|
1287
|
-
return True
|
|
1288
|
-
except:
|
|
1289
|
-
return False
|
|
1290
|
-
except Exception:
|
|
1291
|
-
return False
|
|
1292
|
-
|
|
1293
|
-
def _find_available_port(self, start_port, max_attempts=10):
|
|
1294
|
-
"""Find an available port starting from start_port"""
|
|
1295
|
-
for offset in range(max_attempts):
|
|
1296
|
-
port = start_port + offset
|
|
1297
|
-
if self._is_port_available(port):
|
|
1298
|
-
return port
|
|
1299
|
-
return None
|
|
1300
|
-
|
|
1301
|
-
def _find_available_socket(self, base_socket_path, max_attempts=10):
|
|
1302
|
-
"""Find an available socket path"""
|
|
1303
|
-
if self._is_socket_available(base_socket_path):
|
|
1304
|
-
return base_socket_path
|
|
1305
|
-
# Try numbered alternatives
|
|
1306
|
-
for i in range(1, max_attempts):
|
|
1307
|
-
alt_socket = f"{base_socket_path}.{i}"
|
|
1308
|
-
if self._is_socket_available(alt_socket):
|
|
1309
|
-
return alt_socket
|
|
1310
|
-
return None
|
|
1311
|
-
|
|
1312
|
-
def _start_servers(self):
|
|
1313
|
-
"""Auto-start configured servers with port conflict handling"""
|
|
1314
|
-
logger.info("Auto-starting servers...")
|
|
1315
|
-
print("🦆 Auto-starting servers...")
|
|
1316
|
-
|
|
1317
|
-
# Start servers in order: IPC, TCP, WS, MCP
|
|
1318
|
-
server_order = ["ipc", "tcp", "ws", "mcp"]
|
|
1319
|
-
|
|
1320
|
-
for server_type in server_order:
|
|
1321
|
-
if server_type not in self.servers:
|
|
1322
|
-
continue
|
|
1323
|
-
|
|
1324
|
-
config = self.servers[server_type]
|
|
1325
|
-
|
|
1326
|
-
# Check if server is enabled
|
|
1327
|
-
if not config.get("enabled", True):
|
|
1328
|
-
continue
|
|
1329
|
-
|
|
1330
|
-
# Check for LOOKUP_KEY (conditional start based on env var)
|
|
1331
|
-
if "LOOKUP_KEY" in config:
|
|
1332
|
-
lookup_key = config["LOOKUP_KEY"]
|
|
1333
|
-
if not os.getenv(lookup_key):
|
|
1334
|
-
logger.info(f"Skipping {server_type} - {lookup_key} not set")
|
|
1335
|
-
continue
|
|
1336
|
-
|
|
1337
|
-
# Start the server with port conflict handling
|
|
1338
|
-
try:
|
|
1339
|
-
if server_type == "tcp":
|
|
1340
|
-
port = config.get("port", 9999)
|
|
1341
|
-
|
|
1342
|
-
# Check port availability BEFORE attempting to start
|
|
1343
|
-
if not self._is_port_available(port):
|
|
1344
|
-
alt_port = self._find_available_port(port + 1)
|
|
1345
|
-
if alt_port:
|
|
1346
|
-
logger.info(f"Port {port} in use, using {alt_port}")
|
|
1347
|
-
print(f"🦆 Port {port} in use, using {alt_port}")
|
|
1348
|
-
port = alt_port
|
|
1349
|
-
else:
|
|
1350
|
-
logger.warning(f"No available ports found for TCP server")
|
|
1351
|
-
continue
|
|
1352
|
-
|
|
1353
|
-
result = self.agent.tool.tcp(
|
|
1354
|
-
action="start_server", port=port, record_direct_tool_call=False
|
|
1355
|
-
)
|
|
1356
|
-
|
|
1357
|
-
if result.get("status") == "success":
|
|
1358
|
-
logger.info(f"✓ TCP server started on port {port}")
|
|
1359
|
-
print(f"🦆 ✓ TCP server: localhost:{port}")
|
|
1360
|
-
|
|
1361
|
-
elif server_type == "ws":
|
|
1362
|
-
port = config.get("port", 8080)
|
|
1363
|
-
|
|
1364
|
-
# Check port availability BEFORE attempting to start
|
|
1365
|
-
if not self._is_port_available(port):
|
|
1366
|
-
alt_port = self._find_available_port(port + 1)
|
|
1367
|
-
if alt_port:
|
|
1368
|
-
logger.info(f"Port {port} in use, using {alt_port}")
|
|
1369
|
-
print(f"🦆 Port {port} in use, using {alt_port}")
|
|
1370
|
-
port = alt_port
|
|
1371
|
-
else:
|
|
1372
|
-
logger.warning(
|
|
1373
|
-
f"No available ports found for WebSocket server"
|
|
1374
|
-
)
|
|
1375
|
-
continue
|
|
1376
|
-
|
|
1377
|
-
result = self.agent.tool.websocket(
|
|
1378
|
-
action="start_server", port=port, record_direct_tool_call=False
|
|
1379
|
-
)
|
|
1380
|
-
|
|
1381
|
-
if result.get("status") == "success":
|
|
1382
|
-
logger.info(f"✓ WebSocket server started on port {port}")
|
|
1383
|
-
print(f"🦆 ✓ WebSocket server: localhost:{port}")
|
|
1384
|
-
|
|
1385
|
-
elif server_type == "mcp":
|
|
1386
|
-
port = config.get("port", 8000)
|
|
1387
|
-
|
|
1388
|
-
# Check port availability BEFORE attempting to start
|
|
1389
|
-
if not self._is_port_available(port):
|
|
1390
|
-
alt_port = self._find_available_port(port + 1)
|
|
1391
|
-
if alt_port:
|
|
1392
|
-
logger.info(f"Port {port} in use, using {alt_port}")
|
|
1393
|
-
print(f"🦆 Port {port} in use, using {alt_port}")
|
|
1394
|
-
port = alt_port
|
|
1395
|
-
else:
|
|
1396
|
-
logger.warning(f"No available ports found for MCP server")
|
|
1397
|
-
continue
|
|
1398
|
-
|
|
1399
|
-
result = self.agent.tool.mcp_server(
|
|
1400
|
-
action="start",
|
|
1401
|
-
transport="http",
|
|
1402
|
-
port=port,
|
|
1403
|
-
expose_agent=True,
|
|
1404
|
-
agent=self.agent,
|
|
1405
|
-
record_direct_tool_call=False,
|
|
1406
|
-
)
|
|
1407
|
-
|
|
1408
|
-
if result.get("status") == "success":
|
|
1409
|
-
logger.info(f"✓ MCP HTTP server started on port {port}")
|
|
1410
|
-
print(f"🦆 ✓ MCP server: http://localhost:{port}/mcp")
|
|
1411
|
-
|
|
1412
|
-
elif server_type == "ipc":
|
|
1413
|
-
socket_path = config.get("socket_path", "/tmp/devduck_main.sock")
|
|
1414
|
-
|
|
1415
|
-
# Check socket availability BEFORE attempting to start
|
|
1416
|
-
available_socket = self._find_available_socket(socket_path)
|
|
1417
|
-
if not available_socket:
|
|
1418
|
-
logger.warning(
|
|
1419
|
-
f"No available socket paths found for IPC server"
|
|
1420
|
-
)
|
|
1421
|
-
continue
|
|
1422
|
-
|
|
1423
|
-
if available_socket != socket_path:
|
|
1424
|
-
logger.info(
|
|
1425
|
-
f"Socket {socket_path} in use, using {available_socket}"
|
|
1426
|
-
)
|
|
1427
|
-
print(
|
|
1428
|
-
f"🦆 Socket {socket_path} in use, using {available_socket}"
|
|
1429
|
-
)
|
|
1430
|
-
socket_path = available_socket
|
|
1431
|
-
|
|
1432
|
-
result = self.agent.tool.ipc(
|
|
1433
|
-
action="start_server",
|
|
1434
|
-
socket_path=socket_path,
|
|
1435
|
-
record_direct_tool_call=False,
|
|
1436
|
-
)
|
|
1437
|
-
|
|
1438
|
-
if result.get("status") == "success":
|
|
1439
|
-
logger.info(f"✓ IPC server started on {socket_path}")
|
|
1440
|
-
print(f"🦆 ✓ IPC server: {socket_path}")
|
|
1441
|
-
# TODO: support custom file path here so we can trigger foreign python function like another file
|
|
1442
|
-
except Exception as e:
|
|
1443
|
-
logger.error(f"Failed to start {server_type} server: {e}")
|
|
1444
|
-
print(f"🦆 ⚠ {server_type.upper()} server failed: {e}")
|
|
1445
|
-
|
|
1446
998
|
def __call__(self, query):
|
|
1447
|
-
"""Make the agent callable
|
|
999
|
+
"""Make the agent callable"""
|
|
1448
1000
|
if not self.agent:
|
|
1449
1001
|
logger.warning("Agent unavailable - attempted to call with query")
|
|
1450
1002
|
return "🦆 Agent unavailable - try: devduck.restart()"
|
|
1451
1003
|
|
|
1452
1004
|
try:
|
|
1453
1005
|
logger.info(f"Agent call started: {query[:100]}...")
|
|
1454
|
-
|
|
1455
1006
|
# Mark agent as executing to prevent hot-reload interruption
|
|
1456
1007
|
self._agent_executing = True
|
|
1457
1008
|
|
|
1458
|
-
# 📚 Knowledge Base Retrieval (BEFORE agent runs)
|
|
1459
|
-
knowledge_base_id = os.getenv("DEVDUCK_KNOWLEDGE_BASE_ID")
|
|
1460
|
-
if knowledge_base_id and hasattr(self.agent, "tool"):
|
|
1461
|
-
try:
|
|
1462
|
-
if "retrieve" in self.agent.tool_names:
|
|
1463
|
-
logger.info(f"Retrieving context from KB: {knowledge_base_id}")
|
|
1464
|
-
self.agent.tool.retrieve(
|
|
1465
|
-
text=query, knowledgeBaseId=knowledge_base_id
|
|
1466
|
-
)
|
|
1467
|
-
except Exception as e:
|
|
1468
|
-
logger.warning(f"KB retrieval failed: {e}")
|
|
1469
|
-
|
|
1470
|
-
# Run the agent
|
|
1471
1009
|
result = self.agent(query)
|
|
1472
1010
|
|
|
1473
|
-
#
|
|
1474
|
-
if knowledge_base_id and hasattr(self.agent, "tool"):
|
|
1475
|
-
try:
|
|
1476
|
-
if "store_in_kb" in self.agent.tool_names:
|
|
1477
|
-
conversation_content = f"Input: {query}, Result: {result!s}"
|
|
1478
|
-
conversation_title = f"DevDuck: {datetime.now().strftime('%Y-%m-%d')} | {query[:500]}"
|
|
1479
|
-
self.agent.tool.store_in_kb(
|
|
1480
|
-
content=conversation_content,
|
|
1481
|
-
title=conversation_title,
|
|
1482
|
-
knowledge_base_id=knowledge_base_id,
|
|
1483
|
-
)
|
|
1484
|
-
logger.info(f"Stored conversation in KB: {knowledge_base_id}")
|
|
1485
|
-
except Exception as e:
|
|
1486
|
-
logger.warning(f"KB storage failed: {e}")
|
|
1487
|
-
|
|
1488
|
-
# Clear executing flag
|
|
1011
|
+
# Agent finished - check if reload was pending
|
|
1489
1012
|
self._agent_executing = False
|
|
1490
|
-
|
|
1491
|
-
# Check for pending hot-reload
|
|
1013
|
+
logger.info("Agent call completed successfully")
|
|
1492
1014
|
if self._reload_pending:
|
|
1493
1015
|
logger.info("Triggering pending hot-reload after agent completion")
|
|
1494
|
-
print("
|
|
1495
|
-
self.
|
|
1016
|
+
print("🦆 Agent finished - triggering pending hot-reload...")
|
|
1017
|
+
self.hot_reload()
|
|
1496
1018
|
|
|
1497
1019
|
return result
|
|
1498
1020
|
except Exception as e:
|
|
@@ -1506,12 +1028,12 @@ When you learn something valuable during conversations:
|
|
|
1506
1028
|
|
|
1507
1029
|
def restart(self):
|
|
1508
1030
|
"""Restart the agent"""
|
|
1509
|
-
print("
|
|
1510
|
-
logger.debug("\n🦆 Restarting...")
|
|
1031
|
+
print("🦆 Restarting...")
|
|
1511
1032
|
self.__init__()
|
|
1512
1033
|
|
|
1513
1034
|
def _start_file_watcher(self):
|
|
1514
1035
|
"""Start background file watcher for auto hot-reload"""
|
|
1036
|
+
import threading
|
|
1515
1037
|
|
|
1516
1038
|
logger.info("Starting file watcher for hot-reload")
|
|
1517
1039
|
# Get the path to this file
|
|
@@ -1520,7 +1042,6 @@ When you learn something valuable during conversations:
|
|
|
1520
1042
|
self._watch_file.stat().st_mtime if self._watch_file.exists() else None
|
|
1521
1043
|
)
|
|
1522
1044
|
self._watcher_running = True
|
|
1523
|
-
self._is_reloading = False
|
|
1524
1045
|
|
|
1525
1046
|
# Start watcher thread
|
|
1526
1047
|
self._watcher_thread = threading.Thread(
|
|
@@ -1531,13 +1052,15 @@ When you learn something valuable during conversations:
|
|
|
1531
1052
|
|
|
1532
1053
|
def _file_watcher_thread(self):
|
|
1533
1054
|
"""Background thread that watches for file changes"""
|
|
1055
|
+
import time
|
|
1056
|
+
|
|
1534
1057
|
last_reload_time = 0
|
|
1535
1058
|
debounce_seconds = 3 # 3 second debounce
|
|
1536
1059
|
|
|
1537
1060
|
while self._watcher_running:
|
|
1538
1061
|
try:
|
|
1539
|
-
# Skip if currently reloading
|
|
1540
|
-
if self
|
|
1062
|
+
# Skip if currently reloading to prevent triggering during exec()
|
|
1063
|
+
if getattr(self, "_is_reloading", False):
|
|
1541
1064
|
time.sleep(1)
|
|
1542
1065
|
continue
|
|
1543
1066
|
|
|
@@ -1551,36 +1074,34 @@ When you learn something valuable during conversations:
|
|
|
1551
1074
|
and current_mtime > self._last_modified
|
|
1552
1075
|
and current_time - last_reload_time > debounce_seconds
|
|
1553
1076
|
):
|
|
1554
|
-
|
|
1077
|
+
|
|
1078
|
+
print(f"🦆 Detected changes in {self._watch_file.name}!")
|
|
1079
|
+
self._last_modified = current_mtime
|
|
1555
1080
|
last_reload_time = current_time
|
|
1556
1081
|
|
|
1557
1082
|
# Check if agent is currently executing
|
|
1558
|
-
if self
|
|
1083
|
+
if getattr(self, "_agent_executing", False):
|
|
1559
1084
|
logger.info(
|
|
1560
1085
|
"Code change detected but agent is executing - reload pending"
|
|
1561
1086
|
)
|
|
1562
1087
|
print(
|
|
1563
|
-
"
|
|
1088
|
+
"🦆 Agent is currently executing - reload will trigger after completion"
|
|
1564
1089
|
)
|
|
1565
1090
|
self._reload_pending = True
|
|
1566
|
-
# Don't update _last_modified yet - keep detecting the change
|
|
1567
1091
|
else:
|
|
1568
1092
|
# Safe to reload immediately
|
|
1569
|
-
self._last_modified = current_mtime
|
|
1570
1093
|
logger.info(
|
|
1571
1094
|
f"Code change detected in {self._watch_file.name} - triggering hot-reload"
|
|
1572
1095
|
)
|
|
1573
1096
|
time.sleep(
|
|
1574
1097
|
0.5
|
|
1575
1098
|
) # Small delay to ensure file write is complete
|
|
1576
|
-
self.
|
|
1099
|
+
self.hot_reload()
|
|
1577
1100
|
else:
|
|
1578
|
-
|
|
1579
|
-
if not self._reload_pending:
|
|
1580
|
-
self._last_modified = current_mtime
|
|
1101
|
+
self._last_modified = current_mtime
|
|
1581
1102
|
|
|
1582
1103
|
except Exception as e:
|
|
1583
|
-
|
|
1104
|
+
print(f"🦆 File watcher error: {e}")
|
|
1584
1105
|
|
|
1585
1106
|
# Check every 1 second
|
|
1586
1107
|
time.sleep(1)
|
|
@@ -1588,45 +1109,41 @@ When you learn something valuable during conversations:
|
|
|
1588
1109
|
def _stop_file_watcher(self):
|
|
1589
1110
|
"""Stop the file watcher"""
|
|
1590
1111
|
self._watcher_running = False
|
|
1591
|
-
|
|
1112
|
+
print("🦆 File watcher stopped")
|
|
1592
1113
|
|
|
1593
|
-
def
|
|
1114
|
+
def hot_reload(self):
|
|
1594
1115
|
"""Hot-reload by restarting the entire Python process with fresh code"""
|
|
1595
1116
|
logger.info("Hot-reload initiated")
|
|
1596
|
-
print("
|
|
1117
|
+
print("🦆 Hot-reloading via process restart...")
|
|
1597
1118
|
|
|
1598
1119
|
try:
|
|
1599
1120
|
# Set reload flag to prevent recursive reloads during shutdown
|
|
1600
|
-
self
|
|
1121
|
+
if hasattr(self, "_is_reloading") and self._is_reloading:
|
|
1122
|
+
print("🦆 Reload already in progress, skipping")
|
|
1123
|
+
return
|
|
1601
1124
|
|
|
1602
|
-
|
|
1603
|
-
if hasattr(self, "_watch_file") and self._watch_file.exists():
|
|
1604
|
-
self._last_modified = self._watch_file.stat().st_mtime
|
|
1605
|
-
|
|
1606
|
-
# Reset pending flag
|
|
1607
|
-
self._reload_pending = False
|
|
1125
|
+
self._is_reloading = True
|
|
1608
1126
|
|
|
1609
1127
|
# Stop the file watcher
|
|
1610
1128
|
if hasattr(self, "_watcher_running"):
|
|
1611
1129
|
self._watcher_running = False
|
|
1612
1130
|
|
|
1613
|
-
print("
|
|
1614
|
-
logger.debug("\n🦆 Restarting process with fresh code...")
|
|
1131
|
+
print("🦆 Restarting process with fresh code...")
|
|
1615
1132
|
|
|
1616
1133
|
# Restart the entire Python process
|
|
1617
1134
|
# This ensures all code is freshly loaded
|
|
1618
1135
|
os.execv(sys.executable, [sys.executable] + sys.argv)
|
|
1619
1136
|
|
|
1620
1137
|
except Exception as e:
|
|
1621
|
-
|
|
1622
|
-
print(
|
|
1623
|
-
print("\n🦆 Falling back to manual restart")
|
|
1138
|
+
print(f"🦆 Hot-reload failed: {e}")
|
|
1139
|
+
print("🦆 Falling back to manual restart")
|
|
1624
1140
|
self._is_reloading = False
|
|
1625
1141
|
|
|
1626
1142
|
def status(self):
|
|
1627
1143
|
"""Show current status"""
|
|
1628
1144
|
return {
|
|
1629
1145
|
"model": self.model,
|
|
1146
|
+
"host": self.ollama_host,
|
|
1630
1147
|
"env": self.env_info,
|
|
1631
1148
|
"agent_ready": self.agent is not None,
|
|
1632
1149
|
"tools": len(self.tools) if hasattr(self, "tools") else 0,
|
|
@@ -1640,15 +1157,7 @@ When you learn something valuable during conversations:
|
|
|
1640
1157
|
|
|
1641
1158
|
|
|
1642
1159
|
# 🦆 Auto-initialize when imported
|
|
1643
|
-
|
|
1644
|
-
# Also check if --mcp flag is present to skip auto-starting servers
|
|
1645
|
-
_auto_start = os.getenv("DEVDUCK_AUTO_START_SERVERS", "true").lower() == "true"
|
|
1646
|
-
|
|
1647
|
-
# Disable auto-start if --mcp flag is present (stdio mode)
|
|
1648
|
-
if "--mcp" in sys.argv:
|
|
1649
|
-
_auto_start = False
|
|
1650
|
-
|
|
1651
|
-
devduck = DevDuck(auto_start_servers=_auto_start)
|
|
1160
|
+
devduck = DevDuck()
|
|
1652
1161
|
|
|
1653
1162
|
|
|
1654
1163
|
# 🚀 Convenience functions
|
|
@@ -1669,7 +1178,7 @@ def restart():
|
|
|
1669
1178
|
|
|
1670
1179
|
def hot_reload():
|
|
1671
1180
|
"""Quick hot-reload without restart"""
|
|
1672
|
-
devduck.
|
|
1181
|
+
devduck.hot_reload()
|
|
1673
1182
|
|
|
1674
1183
|
|
|
1675
1184
|
def extract_commands_from_history():
|
|
@@ -1755,7 +1264,7 @@ def interactive():
|
|
|
1755
1264
|
print(f"📝 Logs: {LOG_DIR}")
|
|
1756
1265
|
print("Type 'exit', 'quit', or 'q' to quit.")
|
|
1757
1266
|
print("Prefix with ! to run shell commands (e.g., ! ls -la)")
|
|
1758
|
-
print("
|
|
1267
|
+
print("-" * 50)
|
|
1759
1268
|
logger.info("Interactive mode started")
|
|
1760
1269
|
|
|
1761
1270
|
# Set up prompt_toolkit with history
|
|
@@ -1770,10 +1279,6 @@ def interactive():
|
|
|
1770
1279
|
all_commands = list(set(base_commands + history_commands))
|
|
1771
1280
|
completer = WordCompleter(all_commands, ignore_case=True)
|
|
1772
1281
|
|
|
1773
|
-
# Track consecutive interrupts for double Ctrl+C to exit
|
|
1774
|
-
interrupt_count = 0
|
|
1775
|
-
last_interrupt = 0
|
|
1776
|
-
|
|
1777
1282
|
while True:
|
|
1778
1283
|
try:
|
|
1779
1284
|
# Use prompt_toolkit for enhanced input with arrow key support
|
|
@@ -1783,11 +1288,9 @@ def interactive():
|
|
|
1783
1288
|
auto_suggest=AutoSuggestFromHistory(),
|
|
1784
1289
|
completer=completer,
|
|
1785
1290
|
complete_while_typing=True,
|
|
1291
|
+
mouse_support=False, # breaks scrolling when enabled
|
|
1786
1292
|
)
|
|
1787
1293
|
|
|
1788
|
-
# Reset interrupt count on successful prompt
|
|
1789
|
-
interrupt_count = 0
|
|
1790
|
-
|
|
1791
1294
|
# Check for exit command
|
|
1792
1295
|
if q.lower() in ["exit", "quit", "q"]:
|
|
1793
1296
|
print("\n🦆 Goodbye!")
|
|
@@ -1810,10 +1313,6 @@ def interactive():
|
|
|
1810
1313
|
)
|
|
1811
1314
|
devduck._agent_executing = False
|
|
1812
1315
|
|
|
1813
|
-
# Reset terminal to fix rendering issues after command output
|
|
1814
|
-
print("\r", end="", flush=True)
|
|
1815
|
-
sys.stdout.flush()
|
|
1816
|
-
|
|
1817
1316
|
# Append shell command to history
|
|
1818
1317
|
append_to_shell_history(q, result["content"][0]["text"])
|
|
1819
1318
|
|
|
@@ -1822,17 +1321,95 @@ def interactive():
|
|
|
1822
1321
|
print(
|
|
1823
1322
|
"🦆 Shell command finished - triggering pending hot-reload..."
|
|
1824
1323
|
)
|
|
1825
|
-
devduck.
|
|
1324
|
+
devduck.hot_reload()
|
|
1826
1325
|
else:
|
|
1827
1326
|
print("🦆 Agent unavailable")
|
|
1828
1327
|
except Exception as e:
|
|
1829
1328
|
devduck._agent_executing = False # Reset on error
|
|
1830
1329
|
print(f"🦆 Shell command error: {e}")
|
|
1831
|
-
# Reset terminal on error too
|
|
1832
|
-
print("\r", end="", flush=True)
|
|
1833
|
-
sys.stdout.flush()
|
|
1834
1330
|
continue
|
|
1835
1331
|
|
|
1332
|
+
# Get recent conversation context
|
|
1333
|
+
recent_context = get_last_messages()
|
|
1334
|
+
|
|
1335
|
+
# Get recent logs
|
|
1336
|
+
recent_logs = get_recent_logs()
|
|
1337
|
+
|
|
1338
|
+
# Update system prompt before each call with history context
|
|
1339
|
+
if devduck.agent:
|
|
1340
|
+
# Rebuild system prompt with history
|
|
1341
|
+
own_code = get_own_source_code()
|
|
1342
|
+
session_id = f"devduck-{datetime.now().strftime('%Y-%m-%d')}"
|
|
1343
|
+
|
|
1344
|
+
devduck.agent.system_prompt = f"""🦆 You are DevDuck - an extreme minimalist, self-adapting agent.
|
|
1345
|
+
|
|
1346
|
+
Environment: {devduck.env_info['os']} {devduck.env_info['arch']}
|
|
1347
|
+
Python: {devduck.env_info['python']}
|
|
1348
|
+
Model: {devduck.model}
|
|
1349
|
+
Hostname: {devduck.env_info['hostname']}
|
|
1350
|
+
Session ID: {session_id}
|
|
1351
|
+
|
|
1352
|
+
You are:
|
|
1353
|
+
- Minimalist: Brief, direct responses
|
|
1354
|
+
- Self-healing: Adapt when things break
|
|
1355
|
+
- Efficient: Get things done fast
|
|
1356
|
+
- Pragmatic: Use what works
|
|
1357
|
+
|
|
1358
|
+
Current working directory: {devduck.env_info['cwd']}
|
|
1359
|
+
|
|
1360
|
+
{recent_context}
|
|
1361
|
+
{recent_logs}
|
|
1362
|
+
|
|
1363
|
+
## Your Own Implementation:
|
|
1364
|
+
You have full access to your own source code for self-awareness and self-modification:
|
|
1365
|
+
|
|
1366
|
+
{own_code}
|
|
1367
|
+
|
|
1368
|
+
## Hot Reload System Active:
|
|
1369
|
+
- **Instant Tool Creation** - Save any .py file in `./tools/` and it becomes immediately available
|
|
1370
|
+
- **No Restart Needed** - Tools are auto-loaded and ready to use instantly
|
|
1371
|
+
- **Live Development** - Modify existing tools while running and test immediately
|
|
1372
|
+
- **Full Python Access** - Create any Python functionality as a tool
|
|
1373
|
+
|
|
1374
|
+
## Dynamic Tool Loading:
|
|
1375
|
+
- **Install Tools** - Use install_tools() to load tools from any Python package
|
|
1376
|
+
- Example: install_tools(action="install_and_load", package="strands-fun-tools", module="strands_fun_tools")
|
|
1377
|
+
- Expands capabilities without restart
|
|
1378
|
+
- Access to entire Python ecosystem
|
|
1379
|
+
|
|
1380
|
+
## MCP Server:
|
|
1381
|
+
- **Expose as MCP Server** - Use mcp_server() to expose devduck via MCP protocol
|
|
1382
|
+
- Example: mcp_server(action="start", port=8000)
|
|
1383
|
+
- Connect from Claude Desktop, other agents, or custom clients
|
|
1384
|
+
- Full bidirectional communication
|
|
1385
|
+
|
|
1386
|
+
## System Prompt Management:
|
|
1387
|
+
- Use system_prompt(action='get') to view current prompt
|
|
1388
|
+
- Use system_prompt(action='set', prompt='new text') to update
|
|
1389
|
+
- Changes persist in SYSTEM_PROMPT environment variable
|
|
1390
|
+
|
|
1391
|
+
## Shell Commands:
|
|
1392
|
+
- Prefix with ! to execute shell commands directly
|
|
1393
|
+
- Example: ! ls -la (lists files)
|
|
1394
|
+
- Example: ! pwd (shows current directory)
|
|
1395
|
+
|
|
1396
|
+
**Response Format:**
|
|
1397
|
+
- Tool calls: **MAXIMUM PARALLELISM - ALWAYS**
|
|
1398
|
+
- Communication: **MINIMAL WORDS**
|
|
1399
|
+
- Efficiency: **Speed is paramount**
|
|
1400
|
+
|
|
1401
|
+
{os.getenv('SYSTEM_PROMPT', '')}"""
|
|
1402
|
+
|
|
1403
|
+
# Update model if MODEL_PROVIDER changed
|
|
1404
|
+
model_provider = os.getenv("MODEL_PROVIDER")
|
|
1405
|
+
if model_provider:
|
|
1406
|
+
try:
|
|
1407
|
+
from strands_tools.utils.models.model import create_model
|
|
1408
|
+
|
|
1409
|
+
devduck.agent.model = create_model(provider=model_provider)
|
|
1410
|
+
except Exception as e:
|
|
1411
|
+
print(f"🦆 Model update error: {e}")
|
|
1412
|
+
|
|
1836
1413
|
# Execute the agent with user input
|
|
1837
1414
|
result = ask(q)
|
|
1838
1415
|
|
|
@@ -1840,21 +1417,7 @@ def interactive():
|
|
|
1840
1417
|
append_to_shell_history(q, str(result))
|
|
1841
1418
|
|
|
1842
1419
|
except KeyboardInterrupt:
|
|
1843
|
-
|
|
1844
|
-
|
|
1845
|
-
# Check if this is a consecutive interrupt within 2 seconds
|
|
1846
|
-
if current_time - last_interrupt < 2:
|
|
1847
|
-
interrupt_count += 1
|
|
1848
|
-
if interrupt_count >= 2:
|
|
1849
|
-
print("\n🦆 Exiting...")
|
|
1850
|
-
break
|
|
1851
|
-
else:
|
|
1852
|
-
print("\n🦆 Interrupted. Press Ctrl+C again to exit.")
|
|
1853
|
-
else:
|
|
1854
|
-
interrupt_count = 1
|
|
1855
|
-
print("\n🦆 Interrupted. Press Ctrl+C again to exit.")
|
|
1856
|
-
|
|
1857
|
-
last_interrupt = current_time
|
|
1420
|
+
print("\n🦆 Interrupted. Type 'exit' to quit.")
|
|
1858
1421
|
continue
|
|
1859
1422
|
except Exception as e:
|
|
1860
1423
|
print(f"🦆 Error: {e}")
|
|
@@ -1863,73 +1426,9 @@ def interactive():
|
|
|
1863
1426
|
|
|
1864
1427
|
def cli():
|
|
1865
1428
|
"""CLI entry point for pip-installed devduck command"""
|
|
1866
|
-
import argparse
|
|
1867
|
-
|
|
1868
|
-
parser = argparse.ArgumentParser(
|
|
1869
|
-
description="🦆 DevDuck - Extreme minimalist self-adapting agent",
|
|
1870
|
-
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
1871
|
-
epilog="""
|
|
1872
|
-
Examples:
|
|
1873
|
-
devduck # Start interactive mode
|
|
1874
|
-
devduck "your query here" # One-shot query
|
|
1875
|
-
devduck --mcp # MCP stdio mode (for Claude Desktop)
|
|
1876
|
-
|
|
1877
|
-
Tool Configuration:
|
|
1878
|
-
export DEVDUCK_TOOLS="strands_tools:shell,editor:strands_fun_tools:clipboard"
|
|
1879
|
-
|
|
1880
|
-
Claude Desktop Config:
|
|
1881
|
-
{
|
|
1882
|
-
"mcpServers": {
|
|
1883
|
-
"devduck": {
|
|
1884
|
-
"command": "uvx",
|
|
1885
|
-
"args": ["devduck", "--mcp"]
|
|
1886
|
-
}
|
|
1887
|
-
}
|
|
1888
|
-
}
|
|
1889
|
-
""",
|
|
1890
|
-
)
|
|
1891
|
-
|
|
1892
|
-
# Query argument
|
|
1893
|
-
parser.add_argument("query", nargs="*", help="Query to send to the agent")
|
|
1894
|
-
|
|
1895
|
-
# MCP stdio mode flag
|
|
1896
|
-
parser.add_argument(
|
|
1897
|
-
"--mcp",
|
|
1898
|
-
action="store_true",
|
|
1899
|
-
help="Start MCP server in stdio mode (for Claude Desktop integration)",
|
|
1900
|
-
)
|
|
1901
|
-
|
|
1902
|
-
args = parser.parse_args()
|
|
1903
|
-
|
|
1904
1429
|
logger.info("CLI mode started")
|
|
1905
|
-
|
|
1906
|
-
|
|
1907
|
-
if args.mcp:
|
|
1908
|
-
logger.info("Starting MCP server in stdio mode (blocking, foreground)")
|
|
1909
|
-
print("🦆 Starting MCP stdio server...", file=sys.stderr)
|
|
1910
|
-
|
|
1911
|
-
# Don't auto-start HTTP/TCP/WS servers for stdio mode
|
|
1912
|
-
if devduck.agent:
|
|
1913
|
-
try:
|
|
1914
|
-
# Start MCP server in stdio mode - this BLOCKS until terminated
|
|
1915
|
-
devduck.agent.tool.mcp_server(
|
|
1916
|
-
action="start",
|
|
1917
|
-
transport="stdio",
|
|
1918
|
-
expose_agent=True,
|
|
1919
|
-
agent=devduck.agent,
|
|
1920
|
-
record_direct_tool_call=False,
|
|
1921
|
-
)
|
|
1922
|
-
except Exception as e:
|
|
1923
|
-
logger.error(f"Failed to start MCP stdio server: {e}")
|
|
1924
|
-
print(f"🦆 Error: {e}", file=sys.stderr)
|
|
1925
|
-
sys.exit(1)
|
|
1926
|
-
else:
|
|
1927
|
-
print("🦆 Agent not available", file=sys.stderr)
|
|
1928
|
-
sys.exit(1)
|
|
1929
|
-
return
|
|
1930
|
-
|
|
1931
|
-
if args.query:
|
|
1932
|
-
query = " ".join(args.query)
|
|
1430
|
+
if len(sys.argv) > 1:
|
|
1431
|
+
query = " ".join(sys.argv[1:])
|
|
1933
1432
|
logger.info(f"CLI query: {query}")
|
|
1934
1433
|
result = ask(query)
|
|
1935
1434
|
print(result)
|