devduck 0.1.0__py3-none-any.whl → 0.1.1766644714__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of devduck might be problematic. Click here for more details.

Files changed (37) hide show
  1. devduck/__init__.py +1439 -483
  2. devduck/__main__.py +7 -0
  3. devduck/_version.py +34 -0
  4. devduck/agentcore_handler.py +76 -0
  5. devduck/test_redduck.py +0 -1
  6. devduck/tools/__init__.py +47 -0
  7. devduck/tools/_ambient_input.py +423 -0
  8. devduck/tools/_tray_app.py +530 -0
  9. devduck/tools/agentcore_agents.py +197 -0
  10. devduck/tools/agentcore_config.py +441 -0
  11. devduck/tools/agentcore_invoke.py +423 -0
  12. devduck/tools/agentcore_logs.py +320 -0
  13. devduck/tools/ambient.py +157 -0
  14. devduck/tools/create_subagent.py +659 -0
  15. devduck/tools/fetch_github_tool.py +201 -0
  16. devduck/tools/install_tools.py +409 -0
  17. devduck/tools/ipc.py +546 -0
  18. devduck/tools/mcp_server.py +600 -0
  19. devduck/tools/scraper.py +935 -0
  20. devduck/tools/speech_to_speech.py +850 -0
  21. devduck/tools/state_manager.py +292 -0
  22. devduck/tools/store_in_kb.py +187 -0
  23. devduck/tools/system_prompt.py +608 -0
  24. devduck/tools/tcp.py +263 -94
  25. devduck/tools/tray.py +247 -0
  26. devduck/tools/use_github.py +438 -0
  27. devduck/tools/websocket.py +498 -0
  28. devduck-0.1.1766644714.dist-info/METADATA +717 -0
  29. devduck-0.1.1766644714.dist-info/RECORD +33 -0
  30. {devduck-0.1.0.dist-info → devduck-0.1.1766644714.dist-info}/entry_points.txt +1 -0
  31. devduck-0.1.1766644714.dist-info/licenses/LICENSE +201 -0
  32. devduck/install.sh +0 -42
  33. devduck-0.1.0.dist-info/METADATA +0 -106
  34. devduck-0.1.0.dist-info/RECORD +0 -11
  35. devduck-0.1.0.dist-info/licenses/LICENSE +0 -21
  36. {devduck-0.1.0.dist-info → devduck-0.1.1766644714.dist-info}/WHEEL +0 -0
  37. {devduck-0.1.0.dist-info → devduck-0.1.1766644714.dist-info}/top_level.txt +0 -0
devduck/__init__.py CHANGED
@@ -3,242 +3,372 @@
3
3
  🦆 devduck - extreme minimalist self-adapting agent
4
4
  one file. self-healing. runtime dependencies. adaptive.
5
5
  """
6
+ import os
6
7
  import sys
7
8
  import subprocess
8
- import os
9
+ import threading
9
10
  import platform
10
11
  import socket
12
+ import logging
13
+ import tempfile
14
+ import time
15
+ import warnings
16
+ import json
11
17
  from pathlib import Path
12
18
  from datetime import datetime
13
19
  from typing import Dict, Any
20
+ from logging.handlers import RotatingFileHandler
21
+ from strands import Agent, tool
14
22
 
15
- os.environ["BYPASS_TOOL_CONSENT"] = "true"
16
- os.environ["STRANDS_TOOL_CONSOLE_MODE"] = "enabled"
23
+ # Import system prompt helper for loading prompts from files
24
+ try:
25
+ from devduck.tools.system_prompt import _get_system_prompt
26
+ except ImportError:
27
+ # Fallback if tools module not available yet
28
+ def _get_system_prompt(repository=None, variable_name="SYSTEM_PROMPT"):
29
+ return os.getenv(variable_name, "")
17
30
 
18
31
 
19
- # 🔧 Self-healing dependency installer
20
- def ensure_deps():
21
- """Install dependencies at runtime if missing"""
22
- deps = ["strands-agents", "strands-agents[ollama]", "strands-agents[openai]", "strands-agents[anthropic]", "strands-agents-tools"]
32
+ warnings.filterwarnings("ignore", message=".*pkg_resources is deprecated.*")
33
+ warnings.filterwarnings("ignore", message=".*cache_prompt is deprecated.*")
23
34
 
24
- for dep in deps:
25
- try:
26
- if "strands" in dep:
27
- import strands
35
+ os.environ["BYPASS_TOOL_CONSENT"] = os.getenv("BYPASS_TOOL_CONSENT", "true")
36
+ os.environ["STRANDS_TOOL_CONSOLE_MODE"] = "enabled"
37
+ os.environ["EDITOR_DISABLE_BACKUP"] = "true"
28
38
 
29
- break
30
- except ImportError:
31
- print(f"🦆 Installing {dep}...")
32
- subprocess.check_call(
33
- [sys.executable, "-m", "pip", "install", dep],
34
- stdout=subprocess.DEVNULL,
35
- stderr=subprocess.DEVNULL,
36
- )
39
+ LOG_DIR = Path(tempfile.gettempdir()) / "devduck" / "logs"
40
+ LOG_DIR.mkdir(parents=True, exist_ok=True)
37
41
 
42
+ LOG_FILE = LOG_DIR / "devduck.log"
43
+ logger = logging.getLogger("devduck")
44
+ logger.setLevel(logging.DEBUG)
38
45
 
39
- # 🌍 Environment adaptation
40
- def adapt_to_env():
41
- """Self-adapt based on environment"""
42
- env_info = {
43
- "os": platform.system(),
44
- "arch": platform.machine(),
45
- "python": sys.version_info,
46
- "cwd": str(Path.cwd()),
47
- "home": str(Path.home()),
48
- "shell": os.environ.get("SHELL", "unknown"),
49
- "hostname": socket.gethostname(),
50
- }
46
+ file_handler = RotatingFileHandler(
47
+ LOG_FILE, maxBytes=10 * 1024 * 1024, backupCount=3, encoding="utf-8"
48
+ )
49
+ file_handler.setLevel(logging.DEBUG)
50
+ file_formatter = logging.Formatter(
51
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
52
+ )
53
+ file_handler.setFormatter(file_formatter)
51
54
 
52
- # Adaptive configurations - using common models
53
- if env_info["os"] == "Darwin": # macOS
54
- ollama_host = "http://localhost:11434"
55
- model = "qwen3:1.7b" # Lightweight for macOS
56
- elif env_info["os"] == "Linux":
57
- ollama_host = "http://localhost:11434"
58
- model = "qwen3:30b" # More power on Linux
59
- else: # Windows
60
- ollama_host = "http://localhost:11434"
61
- model = "qwen3:8b" # Conservative for Windows
55
+ console_handler = logging.StreamHandler()
56
+ console_handler.setLevel(logging.WARNING)
57
+ console_formatter = logging.Formatter("🦆 %(levelname)s: %(message)s")
58
+ console_handler.setFormatter(console_formatter)
62
59
 
63
- return env_info, ollama_host, model
60
+ logger.addHandler(file_handler)
61
+ logger.addHandler(console_handler)
64
62
 
63
+ logger.info("DevDuck logging system initialized")
65
64
 
66
- # 🔍 Self-awareness: Read own source code
67
- def get_own_source_code():
68
- """
69
- Read and return the source code of this agent file.
70
65
 
71
- Returns:
72
- str: The complete source code for self-awareness
73
- """
66
+ def get_own_source_code():
67
+ """Read own source code for self-awareness"""
74
68
  try:
75
- # Read this file (__init__.py)
76
- current_file = __file__
77
- with open(current_file, "r", encoding="utf-8") as f:
78
- init_code = f.read()
79
- return f"# devduck/__init__.py\n```python\n{init_code}\n```"
69
+ with open(__file__, "r", encoding="utf-8") as f:
70
+ return f"# Source path: {__file__}\n\ndevduck/__init__.py\n```python\n{f.read()}\n```"
80
71
  except Exception as e:
81
- return f"Error reading own source code: {e}"
72
+ return f"Error reading source: {e}"
82
73
 
83
74
 
84
- # 🛠️ System prompt tool (with .prompt file persistence)
85
- def system_prompt_tool(
86
- action: str,
87
- prompt: str | None = None,
88
- context: str | None = None,
89
- variable_name: str = "SYSTEM_PROMPT",
75
+ def view_logs_tool(
76
+ action: str = "view",
77
+ lines: int = 100,
78
+ pattern: str = None,
90
79
  ) -> Dict[str, Any]:
91
80
  """
92
- Manage the agent's system prompt dynamically with file persistence.
81
+ View and manage DevDuck logs.
93
82
 
94
83
  Args:
95
- action: "view", "update", "add_context", or "reset"
96
- prompt: New system prompt text (required for "update")
97
- context: Additional context to prepend (for "add_context")
98
- variable_name: Environment variable name (default: SYSTEM_PROMPT)
84
+ action: Action to perform - "view", "tail", "search", "clear", "stats"
85
+ lines: Number of lines to show (for view/tail)
86
+ pattern: Search pattern (for search action)
99
87
 
100
88
  Returns:
101
89
  Dict with status and content
102
90
  """
103
- from pathlib import Path
104
- import tempfile
105
-
106
- def _get_prompt_file_path() -> Path:
107
- """Get the .prompt file path in temp directory."""
108
- temp_dir = Path(tempfile.gettempdir()) / ".devduck"
109
- temp_dir.mkdir(exist_ok=True, mode=0o700) # Create with restrictive permissions
110
- return temp_dir / ".prompt"
111
-
112
- def _write_prompt_file(prompt_text: str) -> None:
113
- """Write prompt to .prompt file in temp directory."""
114
- prompt_file = _get_prompt_file_path()
115
- try:
116
- # Create file with restrictive permissions
117
- with open(
118
- prompt_file,
119
- "w",
120
- encoding="utf-8",
121
- opener=lambda path, flags: os.open(path, flags, 0o600),
122
- ) as f:
123
- f.write(prompt_text)
124
- except (OSError, PermissionError):
125
- try:
126
- prompt_file.write_text(prompt_text, encoding="utf-8")
127
- prompt_file.chmod(0o600)
128
- except (OSError, PermissionError):
129
- prompt_file.write_text(prompt_text, encoding="utf-8")
130
-
131
- def _get_system_prompt(var_name: str) -> str:
132
- """Get current system prompt from environment variable."""
133
- return os.environ.get(var_name, "")
134
-
135
- def _update_system_prompt(new_prompt: str, var_name: str) -> None:
136
- """Update system prompt in both environment and .prompt file."""
137
- os.environ[var_name] = new_prompt
138
- if var_name == "SYSTEM_PROMPT":
139
- _write_prompt_file(new_prompt)
140
-
141
91
  try:
142
92
  if action == "view":
143
- current = _get_system_prompt(variable_name)
93
+ if not LOG_FILE.exists():
94
+ return {"status": "success", "content": [{"text": "No logs yet"}]}
95
+
96
+ with open(LOG_FILE, "r", encoding="utf-8") as f:
97
+ all_lines = f.readlines()
98
+ recent_lines = (
99
+ all_lines[-lines:] if len(all_lines) > lines else all_lines
100
+ )
101
+ content = "".join(recent_lines)
102
+
144
103
  return {
145
104
  "status": "success",
146
105
  "content": [
147
- {"text": f"Current system prompt from {variable_name}:{current}"}
106
+ {"text": f"Last {len(recent_lines)} log lines:\n\n{content}"}
148
107
  ],
149
108
  }
150
109
 
151
- elif action == "update":
152
- if not prompt:
110
+ elif action == "tail":
111
+ if not LOG_FILE.exists():
112
+ return {"status": "success", "content": [{"text": "No logs yet"}]}
113
+
114
+ with open(LOG_FILE, "r", encoding="utf-8") as f:
115
+ all_lines = f.readlines()
116
+ tail_lines = all_lines[-50:] if len(all_lines) > 50 else all_lines
117
+ content = "".join(tail_lines)
118
+
119
+ return {
120
+ "status": "success",
121
+ "content": [{"text": f"Tail (last 50 lines):\n\n{content}"}],
122
+ }
123
+
124
+ elif action == "search":
125
+ if not pattern:
153
126
  return {
154
127
  "status": "error",
155
- "content": [
156
- {"text": "Error: prompt parameter required for update action"}
157
- ],
128
+ "content": [{"text": "pattern parameter required for search"}],
158
129
  }
159
130
 
160
- _update_system_prompt(prompt, variable_name)
131
+ if not LOG_FILE.exists():
132
+ return {"status": "success", "content": [{"text": "No logs yet"}]}
161
133
 
162
- if variable_name == "SYSTEM_PROMPT":
163
- message = f"System prompt updated (env: {variable_name}, file: .prompt)"
164
- else:
165
- message = f"System prompt updated (env: {variable_name})"
134
+ with open(LOG_FILE, "r", encoding="utf-8") as f:
135
+ matching_lines = [line for line in f if pattern.lower() in line.lower()]
136
+
137
+ if not matching_lines:
138
+ return {
139
+ "status": "success",
140
+ "content": [{"text": f"No matches found for pattern: {pattern}"}],
141
+ }
142
+
143
+ content = "".join(matching_lines[-100:]) # Last 100 matches
144
+ return {
145
+ "status": "success",
146
+ "content": [
147
+ {
148
+ "text": f"Found {len(matching_lines)} matches (showing last 100):\n\n{content}"
149
+ }
150
+ ],
151
+ }
152
+
153
+ elif action == "clear":
154
+ if LOG_FILE.exists():
155
+ LOG_FILE.unlink()
156
+ logger.info("Log file cleared by user")
157
+ return {
158
+ "status": "success",
159
+ "content": [{"text": "Logs cleared successfully"}],
160
+ }
161
+
162
+ elif action == "stats":
163
+ if not LOG_FILE.exists():
164
+ return {"status": "success", "content": [{"text": "No logs yet"}]}
165
+
166
+ stat = LOG_FILE.stat()
167
+ size_mb = stat.st_size / (1024 * 1024)
168
+ modified = datetime.fromtimestamp(stat.st_mtime).strftime(
169
+ "%Y-%m-%d %H:%M:%S"
170
+ )
171
+
172
+ with open(LOG_FILE, "r", encoding="utf-8") as f:
173
+ total_lines = sum(1 for _ in f)
174
+
175
+ stats_text = f"""Log File Statistics:
176
+ Path: {LOG_FILE}
177
+ Size: {size_mb:.2f} MB
178
+ Lines: {total_lines}
179
+ Last Modified: {modified}"""
180
+
181
+ return {"status": "success", "content": [{"text": stats_text}]}
182
+
183
+ else:
184
+ return {
185
+ "status": "error",
186
+ "content": [
187
+ {
188
+ "text": f"Unknown action: {action}. Valid: view, tail, search, clear, stats"
189
+ }
190
+ ],
191
+ }
192
+
193
+ except Exception as e:
194
+ logger.error(f"Error in view_logs_tool: {e}")
195
+ return {"status": "error", "content": [{"text": f"Error: {str(e)}"}]}
166
196
 
167
- return {"status": "success", "content": [{"text": message}]}
168
197
 
169
- elif action == "add_context":
170
- if not context:
198
+ def manage_tools_func(
199
+ action: str,
200
+ package: str = None,
201
+ tool_names: str = None,
202
+ tool_path: str = None,
203
+ ) -> Dict[str, Any]:
204
+ """Manage the agent's tool set at runtime - add, remove, list, reload tools on the fly."""
205
+ try:
206
+ if not hasattr(devduck, "agent") or not devduck.agent:
207
+ return {"status": "error", "content": [{"text": "Agent not initialized"}]}
208
+
209
+ registry = devduck.agent.tool_registry
210
+
211
+ if action == "list":
212
+ # List tools from registry
213
+ tool_list = list(registry.registry.keys())
214
+ dynamic_tools = list(registry.dynamic_tools.keys())
215
+
216
+ text = f"Currently loaded {len(tool_list)} tools:\n"
217
+ text += "\n".join(f" • {t}" for t in sorted(tool_list))
218
+ if dynamic_tools:
219
+ text += f"\n\nDynamic tools ({len(dynamic_tools)}):\n"
220
+ text += "\n".join(f" • {t}" for t in sorted(dynamic_tools))
221
+
222
+ return {"status": "success", "content": [{"text": text}]}
223
+
224
+ elif action == "add":
225
+ if not package and not tool_path:
171
226
  return {
172
227
  "status": "error",
173
228
  "content": [
174
229
  {
175
- "text": "Error: context parameter required for add_context action"
230
+ "text": "Either 'package' or 'tool_path' required for add action"
176
231
  }
177
232
  ],
178
233
  }
179
234
 
180
- current = _get_system_prompt(variable_name)
181
- new_prompt = f"{current} {context}" if current else context
182
- _update_system_prompt(new_prompt, variable_name)
235
+ added_tools = []
183
236
 
184
- if variable_name == "SYSTEM_PROMPT":
185
- message = f"Context added to system prompt (env: {variable_name}, file: .prompt)"
186
- else:
187
- message = f"Context added to system prompt (env: {variable_name})"
237
+ # Add from package using process_tools
238
+ if package:
239
+ if not tool_names:
240
+ return {
241
+ "status": "error",
242
+ "content": [
243
+ {"text": "'tool_names' required when adding from package"}
244
+ ],
245
+ }
188
246
 
189
- return {"status": "success", "content": [{"text": message}]}
247
+ tools_to_add = [t.strip() for t in tool_names.split(",")]
190
248
 
191
- elif action == "reset":
192
- os.environ.pop(variable_name, None)
249
+ # Build tool specs: package.tool_name format
250
+ tool_specs = [f"{package}.{tool_name}" for tool_name in tools_to_add]
193
251
 
194
- if variable_name == "SYSTEM_PROMPT":
195
- prompt_file = _get_prompt_file_path()
196
- if prompt_file.exists():
197
- try:
198
- prompt_file.unlink()
199
- except (OSError, PermissionError):
200
- pass
201
- message = (
202
- f"System prompt reset (env: {variable_name}, file: .prompt cleared)"
203
- )
252
+ try:
253
+ added_tool_names = registry.process_tools(tool_specs)
254
+ added_tools.extend(added_tool_names)
255
+ logger.info(f"Added tools from {package}: {added_tool_names}")
256
+ except Exception as e:
257
+ logger.error(f"Failed to add tools from {package}: {e}")
258
+ return {
259
+ "status": "error",
260
+ "content": [{"text": f"Failed to add tools: {str(e)}"}],
261
+ }
262
+
263
+ # Add from file path using process_tools
264
+ if tool_path:
265
+ try:
266
+ added_tool_names = registry.process_tools([tool_path])
267
+ added_tools.extend(added_tool_names)
268
+ logger.info(f"Added tools from file: {added_tool_names}")
269
+ except Exception as e:
270
+ logger.error(f"Failed to add tool from {tool_path}: {e}")
271
+ return {
272
+ "status": "error",
273
+ "content": [{"text": f"Failed to add tool: {str(e)}"}],
274
+ }
275
+
276
+ if added_tools:
277
+ return {
278
+ "status": "success",
279
+ "content": [
280
+ {
281
+ "text": f"✅ Added {len(added_tools)} tools: {', '.join(added_tools)}\n"
282
+ + f"Total tools: {len(registry.registry)}"
283
+ }
284
+ ],
285
+ }
204
286
  else:
205
- message = f"System prompt reset (env: {variable_name})"
287
+ return {"status": "error", "content": [{"text": "No tools were added"}]}
206
288
 
207
- return {"status": "success", "content": [{"text": message}]}
289
+ elif action == "remove":
290
+ if not tool_names:
291
+ return {
292
+ "status": "error",
293
+ "content": [{"text": "'tool_names' required for remove action"}],
294
+ }
208
295
 
209
- elif action == "get":
210
- # Backward compatibility
211
- current = _get_system_prompt(variable_name)
212
- return {
213
- "status": "success",
214
- "content": [{"text": f"System prompt: {current}"}],
215
- }
296
+ tools_to_remove = [t.strip() for t in tool_names.split(",")]
297
+ removed_tools = []
216
298
 
217
- elif action == "set":
218
- # Backward compatibility
219
- if prompt is None:
220
- return {"status": "error", "content": [{"text": "No prompt provided"}]}
299
+ # Remove from registry
300
+ for tool_name in tools_to_remove:
301
+ if tool_name in registry.registry:
302
+ del registry.registry[tool_name]
303
+ removed_tools.append(tool_name)
304
+ logger.info(f"Removed tool: {tool_name}")
221
305
 
222
- if context:
223
- prompt = f"{context} {prompt}"
306
+ if tool_name in registry.dynamic_tools:
307
+ del registry.dynamic_tools[tool_name]
308
+ logger.info(f"Removed dynamic tool: {tool_name}")
224
309
 
225
- _update_system_prompt(prompt, variable_name)
226
- return {
227
- "status": "success",
228
- "content": [{"text": "System prompt updated successfully"}],
229
- }
310
+ if removed_tools:
311
+ return {
312
+ "status": "success",
313
+ "content": [
314
+ {
315
+ "text": f"✅ Removed {len(removed_tools)} tools: {', '.join(removed_tools)}\n"
316
+ + f"Total tools: {len(registry.registry)}"
317
+ }
318
+ ],
319
+ }
320
+ else:
321
+ return {
322
+ "status": "success",
323
+ "content": [{"text": "No tools were removed (not found)"}],
324
+ }
325
+
326
+ elif action == "reload":
327
+ if tool_names:
328
+ # Reload specific tools
329
+ tools_to_reload = [t.strip() for t in tool_names.split(",")]
330
+ reloaded_tools = []
331
+ failed_tools = []
332
+
333
+ for tool_name in tools_to_reload:
334
+ try:
335
+ registry.reload_tool(tool_name)
336
+ reloaded_tools.append(tool_name)
337
+ logger.info(f"Reloaded tool: {tool_name}")
338
+ except Exception as e:
339
+ failed_tools.append((tool_name, str(e)))
340
+ logger.error(f"Failed to reload {tool_name}: {e}")
341
+
342
+ text = ""
343
+ if reloaded_tools:
344
+ text += f"✅ Reloaded {len(reloaded_tools)} tools: {', '.join(reloaded_tools)}\n"
345
+ if failed_tools:
346
+ text += f"❌ Failed to reload {len(failed_tools)} tools:\n"
347
+ for tool_name, error in failed_tools:
348
+ text += f" • {tool_name}: {error}\n"
349
+
350
+ return {"status": "success", "content": [{"text": text}]}
351
+ else:
352
+ # Reload all tools - restart agent
353
+ logger.info("Reloading all tools via restart")
354
+ devduck.restart()
355
+ return {
356
+ "status": "success",
357
+ "content": [{"text": "✅ All tools reloaded - agent restarted"}],
358
+ }
230
359
 
231
360
  else:
232
361
  return {
233
362
  "status": "error",
234
363
  "content": [
235
364
  {
236
- "text": f"Unknown action '{action}'. Valid: view, update, add_context, reset"
365
+ "text": f"Unknown action: {action}. Valid: list, add, remove, reload"
237
366
  }
238
367
  ],
239
368
  }
240
369
 
241
370
  except Exception as e:
371
+ logger.error(f"Error in manage_tools: {e}")
242
372
  return {"status": "error", "content": [{"text": f"Error: {str(e)}"}]}
243
373
 
244
374
 
@@ -253,22 +383,22 @@ def get_shell_history_file():
253
383
  def get_shell_history_files():
254
384
  """Get available shell history file paths."""
255
385
  history_files = []
256
-
386
+
257
387
  # devduck history (primary)
258
388
  devduck_history = Path(get_shell_history_file())
259
389
  if devduck_history.exists():
260
390
  history_files.append(("devduck", str(devduck_history)))
261
-
391
+
262
392
  # Bash history
263
393
  bash_history = Path.home() / ".bash_history"
264
394
  if bash_history.exists():
265
395
  history_files.append(("bash", str(bash_history)))
266
-
396
+
267
397
  # Zsh history
268
398
  zsh_history = Path.home() / ".zsh_history"
269
399
  if zsh_history.exists():
270
400
  history_files.append(("zsh", str(zsh_history)))
271
-
401
+
272
402
  return history_files
273
403
 
274
404
 
@@ -277,14 +407,16 @@ def parse_history_line(line, history_type):
277
407
  line = line.strip()
278
408
  if not line:
279
409
  return None
280
-
410
+
281
411
  if history_type == "devduck":
282
412
  # devduck format: ": timestamp:0;# devduck: query" or ": timestamp:0;# devduck_result: result"
283
413
  if "# devduck:" in line:
284
414
  try:
285
415
  timestamp_str = line.split(":")[1]
286
416
  timestamp = int(timestamp_str)
287
- readable_time = datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S")
417
+ readable_time = datetime.fromtimestamp(timestamp).strftime(
418
+ "%Y-%m-%d %H:%M:%S"
419
+ )
288
420
  query = line.split("# devduck:")[-1].strip()
289
421
  return ("you", readable_time, query)
290
422
  except (ValueError, IndexError):
@@ -293,12 +425,14 @@ def parse_history_line(line, history_type):
293
425
  try:
294
426
  timestamp_str = line.split(":")[1]
295
427
  timestamp = int(timestamp_str)
296
- readable_time = datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S")
428
+ readable_time = datetime.fromtimestamp(timestamp).strftime(
429
+ "%Y-%m-%d %H:%M:%S"
430
+ )
297
431
  result = line.split("# devduck_result:")[-1].strip()
298
432
  return ("me", readable_time, result)
299
433
  except (ValueError, IndexError):
300
434
  return None
301
-
435
+
302
436
  elif history_type == "zsh":
303
437
  if line.startswith(": ") and ":0;" in line:
304
438
  try:
@@ -306,37 +440,65 @@ def parse_history_line(line, history_type):
306
440
  if len(parts) == 2:
307
441
  timestamp_str = parts[0].split(":")[1]
308
442
  timestamp = int(timestamp_str)
309
- readable_time = datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S")
443
+ readable_time = datetime.fromtimestamp(timestamp).strftime(
444
+ "%Y-%m-%d %H:%M:%S"
445
+ )
310
446
  command = parts[1].strip()
311
447
  if not command.startswith("devduck "):
312
448
  return ("shell", readable_time, f"$ {command}")
313
449
  except (ValueError, IndexError):
314
450
  return None
315
-
451
+
316
452
  elif history_type == "bash":
317
453
  readable_time = "recent"
318
454
  if not line.startswith("devduck "):
319
455
  return ("shell", readable_time, f"$ {line}")
320
-
456
+
321
457
  return None
322
458
 
323
459
 
460
+ def get_recent_logs():
461
+ """Get the last N lines from the log file for context."""
462
+ try:
463
+ log_line_count = int(os.getenv("DEVDUCK_LOG_LINE_COUNT", "50"))
464
+
465
+ if not LOG_FILE.exists():
466
+ return ""
467
+
468
+ with open(LOG_FILE, "r", encoding="utf-8", errors="ignore") as f:
469
+ all_lines = f.readlines()
470
+
471
+ recent_lines = (
472
+ all_lines[-log_line_count:]
473
+ if len(all_lines) > log_line_count
474
+ else all_lines
475
+ )
476
+
477
+ if not recent_lines:
478
+ return ""
479
+
480
+ log_content = "".join(recent_lines)
481
+ return f"\n\n## Recent Logs (last {len(recent_lines)} lines):\n```\n{log_content}```\n"
482
+ except Exception as e:
483
+ return f"\n\n## Recent Logs: Error reading logs - {e}\n"
484
+
485
+
324
486
  def get_last_messages():
325
487
  """Get the last N messages from multiple shell histories for context."""
326
488
  try:
327
489
  message_count = int(os.getenv("DEVDUCK_LAST_MESSAGE_COUNT", "200"))
328
490
  all_entries = []
329
-
491
+
330
492
  history_files = get_shell_history_files()
331
-
493
+
332
494
  for history_type, history_file in history_files:
333
495
  try:
334
496
  with open(history_file, encoding="utf-8", errors="ignore") as f:
335
497
  lines = f.readlines()
336
-
498
+
337
499
  if history_type == "bash":
338
500
  lines = lines[-message_count:]
339
-
501
+
340
502
  # Join multi-line entries for zsh
341
503
  if history_type == "zsh":
342
504
  joined_lines = []
@@ -355,22 +517,26 @@ def get_last_messages():
355
517
  if current_line:
356
518
  joined_lines.append(current_line)
357
519
  lines = joined_lines
358
-
520
+
359
521
  for line in lines:
360
522
  parsed = parse_history_line(line, history_type)
361
523
  if parsed:
362
524
  all_entries.append(parsed)
363
525
  except Exception:
364
526
  continue
365
-
366
- recent_entries = all_entries[-message_count:] if len(all_entries) >= message_count else all_entries
367
-
527
+
528
+ recent_entries = (
529
+ all_entries[-message_count:]
530
+ if len(all_entries) >= message_count
531
+ else all_entries
532
+ )
533
+
368
534
  context = ""
369
535
  if recent_entries:
370
536
  context += f"\n\nRecent conversation context (last {len(recent_entries)} messages):\n"
371
537
  for speaker, timestamp, content in recent_entries:
372
538
  context += f"[{timestamp}] {speaker}: {content}\n"
373
-
539
+
374
540
  return context
375
541
  except Exception:
376
542
  return ""
@@ -378,16 +544,20 @@ def get_last_messages():
378
544
 
379
545
  def append_to_shell_history(query, response):
380
546
  """Append the interaction to devduck shell history."""
381
- import time
382
547
  try:
383
548
  history_file = get_shell_history_file()
384
549
  timestamp = str(int(time.time()))
385
-
550
+
386
551
  with open(history_file, "a", encoding="utf-8") as f:
387
552
  f.write(f": {timestamp}:0;# devduck: {query}\n")
388
- response_summary = str(response).replace("\n", " ")[:int(os.getenv("DEVDUCK_RESPONSE_SUMMARY_LENGTH", "10000"))] + "..."
553
+ response_summary = (
554
+ str(response).replace("\n", " ")[
555
+ : int(os.getenv("DEVDUCK_RESPONSE_SUMMARY_LENGTH", "10000"))
556
+ ]
557
+ + "..."
558
+ )
389
559
  f.write(f": {timestamp}:0;# devduck_result: {response_summary}\n")
390
-
560
+
391
561
  os.chmod(history_file, 0o600)
392
562
  except Exception:
393
563
  pass
@@ -395,99 +565,534 @@ def append_to_shell_history(query, response):
395
565
 
396
566
  # 🦆 The devduck agent
397
567
  class DevDuck:
398
- def __init__(self):
399
- """Initialize the minimalist adaptive agent"""
568
+ def __init__(
569
+ self,
570
+ auto_start_servers=True,
571
+ servers=None,
572
+ load_mcp_servers=True,
573
+ ):
574
+ """Initialize the minimalist adaptive agent
575
+
576
+ Args:
577
+ auto_start_servers: Enable automatic server startup
578
+ servers: Dict of server configs with optional env var lookups
579
+ Example: {
580
+ "tcp": {"port": 9999},
581
+ "ws": {"port": 8080, "LOOKUP_KEY": "SLACK_API_KEY"},
582
+ "mcp": {"port": 8000},
583
+ "ipc": {"socket_path": "/tmp/devduck.sock"}
584
+ }
585
+ load_mcp_servers: Load MCP servers from MCP_SERVERS env var
586
+ """
587
+ logger.info("Initializing DevDuck agent...")
400
588
  try:
401
- # Self-heal dependencies
402
- ensure_deps()
589
+ self.env_info = {
590
+ "os": platform.system(),
591
+ "arch": platform.machine(),
592
+ "python": sys.version_info,
593
+ "cwd": str(Path.cwd()),
594
+ "home": str(Path.home()),
595
+ "shell": os.environ.get("SHELL", "unknown"),
596
+ "hostname": socket.gethostname(),
597
+ }
403
598
 
404
- # Adapt to environment
405
- self.env_info, self.ollama_host, self.model = adapt_to_env()
599
+ # Execution state tracking for hot-reload
600
+ self._agent_executing = False
601
+ self._reload_pending = False
602
+
603
+ # Server configuration
604
+ if servers is None:
605
+ # Default server config from env vars
606
+ servers = {
607
+ "tcp": {
608
+ "port": int(os.getenv("DEVDUCK_TCP_PORT", "9999")),
609
+ "enabled": os.getenv("DEVDUCK_ENABLE_TCP", "false").lower()
610
+ == "true",
611
+ },
612
+ "ws": {
613
+ "port": int(os.getenv("DEVDUCK_WS_PORT", "8080")),
614
+ "enabled": os.getenv("DEVDUCK_ENABLE_WS", "true").lower()
615
+ == "true",
616
+ },
617
+ "mcp": {
618
+ "port": int(os.getenv("DEVDUCK_MCP_PORT", "8000")),
619
+ "enabled": os.getenv("DEVDUCK_ENABLE_MCP", "false").lower()
620
+ == "true",
621
+ },
622
+ "ipc": {
623
+ "socket_path": os.getenv(
624
+ "DEVDUCK_IPC_SOCKET", "/tmp/devduck_main.sock"
625
+ ),
626
+ "enabled": os.getenv("DEVDUCK_ENABLE_IPC", "false").lower()
627
+ == "true",
628
+ },
629
+ }
406
630
 
407
- # Import after ensuring deps
408
- from strands import Agent, tool
409
- from strands.models.ollama import OllamaModel
410
- from strands.session.file_session_manager import FileSessionManager
411
- from strands_tools.utils.models.model import create_model
412
- from .tools import tcp
413
- from strands_tools import (
414
- shell,
415
- editor,
416
- file_read,
417
- file_write,
418
- python_repl,
419
- current_time,
420
- calculator,
421
- journal,
422
- image_reader,
423
- use_agent,
424
- load_tool,
425
- environment,
631
+ # Show server configuration status
632
+ enabled_servers = []
633
+ disabled_servers = []
634
+ for server_name, config in servers.items():
635
+ if config.get("enabled", False):
636
+ if "port" in config:
637
+ enabled_servers.append(
638
+ f"{server_name.upper()}:{config['port']}"
639
+ )
640
+ else:
641
+ enabled_servers.append(server_name.upper())
642
+ else:
643
+ disabled_servers.append(server_name.upper())
644
+
645
+ logger.debug(
646
+ f"🦆 Server config: {', '.join(enabled_servers) if enabled_servers else 'none enabled'}"
426
647
  )
648
+ if disabled_servers:
649
+ logger.debug(f"🦆 Disabled: {', '.join(disabled_servers)}")
650
+
651
+ self.servers = servers
652
+
653
+ # Load tools with flexible configuration
654
+ # Default tool config
655
+ # Agent can load additional tools on-demand via fetch_github_tool
656
+
657
+ # 🔧 Available DevDuck Tools (load on-demand):
658
+ # - system_prompt: https://github.com/cagataycali/devduck/blob/main/devduck/tools/system_prompt.py
659
+ # - store_in_kb: https://github.com/cagataycali/devduck/blob/main/devduck/tools/store_in_kb.py
660
+ # - ipc: https://github.com/cagataycali/devduck/blob/main/devduck/tools/ipc.py
661
+ # - tcp: https://github.com/cagataycali/devduck/blob/main/devduck/tools/tcp.py
662
+ # - websocket: https://github.com/cagataycali/devduck/blob/main/devduck/tools/websocket.py
663
+ # - mcp_server: https://github.com/cagataycali/devduck/blob/main/devduck/tools/mcp_server.py
664
+ # - scraper: https://github.com/cagataycali/devduck/blob/main/devduck/tools/scraper.py
665
+ # - tray: https://github.com/cagataycali/devduck/blob/main/devduck/tools/tray.py
666
+ # - ambient: https://github.com/cagataycali/devduck/blob/main/devduck/tools/ambient.py
667
+ # - agentcore_config: https://github.com/cagataycali/devduck/blob/main/devduck/tools/agentcore_config.py
668
+ # - agentcore_invoke: https://github.com/cagataycali/devduck/blob/main/devduck/tools/agentcore_invoke.py
669
+ # - agentcore_logs: https://github.com/cagataycali/devduck/blob/main/devduck/tools/agentcore_logs.py
670
+ # - agentcore_agents: https://github.com/cagataycali/devduck/blob/main/devduck/tools/agentcore_agents.py
671
+ # - create_subagent: https://github.com/cagataycali/devduck/blob/main/devduck/tools/create_subagent.py
672
+ # - use_github: https://github.com/cagataycali/devduck/blob/main/devduck/tools/use_github.py
673
+ # - speech_to_speech: https://github.com/cagataycali/devduck/blob/main/devduck/tools/speech_to_speech.py
674
+ # - state_manager: https://github.com/cagataycali/devduck/blob/main/devduck/tools/state_manager.py
675
+
676
+ # 📦 Strands Tools
677
+ # - editor, file_read, file_write, image_reader, load_tool, retrieve
678
+ # - calculator, use_agent, environment, mcp_client, speak, slack
679
+
680
+ # 🎮 Strands Fun Tools
681
+ # - listen, cursor, clipboard, screen_reader, bluetooth, yolo_vision
682
+
683
+ # 🔍 Strands Google
684
+ # - use_google, google_auth
685
+
686
+ # 🔧 Auto-append server tools based on enabled servers
687
+ server_tools_needed = []
688
+ if servers.get("tcp", {}).get("enabled", False):
689
+ server_tools_needed.append("tcp")
690
+ if servers.get("ws", {}).get("enabled", True):
691
+ server_tools_needed.append("websocket")
692
+ if servers.get("mcp", {}).get("enabled", False):
693
+ server_tools_needed.append("mcp_server")
694
+ if servers.get("ipc", {}).get("enabled", False):
695
+ server_tools_needed.append("ipc")
696
+
697
+ # Append to default tools if any server tools are needed
698
+ if server_tools_needed:
699
+ server_tools_str = ",".join(server_tools_needed)
700
+ default_tools = f"devduck.tools:system_prompt,fetch_github_tool,{server_tools_str};strands_tools:shell"
701
+ logger.info(f"Auto-added server tools: {server_tools_str}")
702
+ else:
703
+ default_tools = (
704
+ "devduck.tools:system_prompt,fetch_github_tool;strands_tools:shell"
705
+ )
706
+
707
+ tools_config = os.getenv("DEVDUCK_TOOLS", default_tools)
708
+ logger.info(f"Loading tools from config: {tools_config}")
709
+ core_tools = self._load_tools_from_config(tools_config)
427
710
 
428
- # Wrap system_prompt_tool with @tool decorator
711
+ # Wrap view_logs_tool with @tool decorator
429
712
  @tool
430
- def system_prompt(
713
+ def view_logs(
714
+ action: str = "view",
715
+ lines: int = 100,
716
+ pattern: str = None,
717
+ ) -> Dict[str, Any]:
718
+ """View and manage DevDuck logs."""
719
+ return view_logs_tool(action, lines, pattern)
720
+
721
+ # Wrap manage_tools_func with @tool decorator
722
+ @tool
723
+ def manage_tools(
431
724
  action: str,
432
- prompt: str = None,
433
- context: str = None,
434
- variable_name: str = "SYSTEM_PROMPT",
725
+ package: str = None,
726
+ tool_names: str = None,
727
+ tool_path: str = None,
435
728
  ) -> Dict[str, Any]:
436
- """Manage agent system prompt dynamically."""
437
- return system_prompt_tool(action, prompt, context, variable_name)
438
-
439
- # Minimal but functional toolset including system_prompt and hello
440
- self.tools = [
441
- shell,
442
- editor,
443
- file_read,
444
- file_write,
445
- python_repl,
446
- current_time,
447
- calculator,
448
- journal,
449
- image_reader,
450
- use_agent,
451
- load_tool,
452
- environment,
453
- system_prompt,
454
- tcp
455
- ]
456
-
457
- # Check if MODEL_PROVIDER env variable is set
458
- model_provider = os.getenv("MODEL_PROVIDER")
459
-
460
- if model_provider:
461
- # Use create_model utility for any provider (bedrock, anthropic, etc.)
462
- self.agent_model = create_model(provider=model_provider)
463
- else:
464
- # Fallback to default Ollama behavior
465
- self.agent_model = OllamaModel(
466
- host=self.ollama_host,
467
- model_id=self.model,
468
- temperature=1,
469
- keep_alive="5m",
470
- )
729
+ """
730
+ Manage the agent's tool set at runtime using ToolRegistry.
471
731
 
472
- session_manager = FileSessionManager(
473
- session_id=f"devduck-{datetime.now().strftime('%Y-%m-%d')}"
474
- )
732
+ Args:
733
+ action: Action to perform - "list", "add", "remove", "reload"
734
+ package: Package name to load tools from (e.g., "strands_tools", "strands_fun_tools") or "devduck.tools:speech_to_speech,system_prompt,..."
735
+ tool_names: Comma-separated tool names (e.g., "shell,editor,calculator")
736
+ tool_path: Path to a .py file to load as a tool
737
+
738
+ Returns:
739
+ Dict with status and content
740
+ """
741
+ return manage_tools_func(action, package, tool_names, tool_path)
742
+
743
+ # Add built-in tools to the toolset
744
+ core_tools.extend([view_logs, manage_tools])
745
+
746
+ # Assign tools
747
+ self.tools = core_tools
748
+
749
+ # 🔌 Load MCP servers if enabled
750
+ if load_mcp_servers:
751
+ mcp_clients = self._load_mcp_servers()
752
+ if mcp_clients:
753
+ self.tools.extend(mcp_clients)
754
+ logger.info(f"Loaded {len(mcp_clients)} MCP server(s)")
755
+
756
+ logger.info(f"Initialized {len(self.tools)} tools")
757
+
758
+ # 🎯 Smart model selection
759
+ self.agent_model, self.model = self._select_model()
475
760
 
476
761
  # Create agent with self-healing
762
+ # load_tools_from_directory controlled by DEVDUCK_LOAD_TOOLS_FROM_DIR (default: true)
763
+ load_from_dir = (
764
+ os.getenv("DEVDUCK_LOAD_TOOLS_FROM_DIR", "true").lower() == "true"
765
+ )
766
+
477
767
  self.agent = Agent(
478
768
  model=self.agent_model,
479
769
  tools=self.tools,
480
770
  system_prompt=self._build_system_prompt(),
481
- load_tools_from_directory=True,
482
- # session_manager=session_manager,
771
+ load_tools_from_directory=load_from_dir,
772
+ trace_attributes={
773
+ "session.id": self.session_id,
774
+ "user.id": self.env_info["hostname"],
775
+ "tags": ["Strands-Agents", "DevDuck"],
776
+ },
483
777
  )
484
778
 
779
+ # 🚀 AUTO-START SERVERS
780
+ if auto_start_servers and "--mcp" not in sys.argv:
781
+ self._start_servers()
782
+
485
783
  # Start file watcher for auto hot-reload
486
784
  self._start_file_watcher()
487
785
 
786
+ logger.info(
787
+ f"DevDuck agent initialized successfully with model {self.model}"
788
+ )
789
+
488
790
  except Exception as e:
791
+ logger.error(f"Initialization failed: {e}")
489
792
  self._self_heal(e)
490
793
 
794
+ def _load_tools_from_config(self, config):
795
+ """
796
+ Load tools based on DEVDUCK_TOOLS configuration.
797
+
798
+ Format: package1:tool1,tool2;package2:tool3,tool4
799
+ Examples:
800
+ - strands_tools:shell,editor;strands_action:use_github
801
+ - strands_action:use_github;strands_tools:shell,use_aws
802
+
803
+ Note: Only loads what's specified in config - no automatic additions
804
+ """
805
+ tools = []
806
+
807
+ # Split by semicolon to get package groups
808
+ groups = config.split(";")
809
+
810
+ for group in groups:
811
+ group = group.strip()
812
+ if not group:
813
+ continue
814
+
815
+ # Split by colon to get package:tools
816
+ parts = group.split(":", 1)
817
+ if len(parts) != 2:
818
+ logger.warning(f"Invalid format: {group}")
819
+ continue
820
+
821
+ package = parts[0].strip()
822
+ tools_str = parts[1].strip()
823
+
824
+ # Parse tools (comma-separated)
825
+ tool_names = [t.strip() for t in tools_str.split(",") if t.strip()]
826
+
827
+ for tool_name in tool_names:
828
+ tool = self._load_single_tool(package, tool_name)
829
+ if tool:
830
+ tools.append(tool)
831
+
832
+ logger.info(f"Loaded {len(tools)} tools from configuration")
833
+ return tools
834
+
835
+ def _load_single_tool(self, package, tool_name):
836
+ """Load a single tool from a package"""
837
+ try:
838
+ module = __import__(package, fromlist=[tool_name])
839
+ tool = getattr(module, tool_name)
840
+ logger.debug(f"Loaded {tool_name} from {package}")
841
+ return tool
842
+ except Exception as e:
843
+ logger.warning(f"Failed to load {tool_name} from {package}: {e}")
844
+ return None
845
+
846
+ def _load_mcp_servers(self):
847
+ """
848
+ Load MCP servers from MCP_SERVERS environment variable using direct loading.
849
+
850
+ Uses the experimental managed integration - MCPClient instances are passed
851
+ directly to Agent constructor without explicit context management.
852
+
853
+ Format: JSON with "mcpServers" object
854
+ Example: MCP_SERVERS='{"mcpServers": {"strands": {"command": "uvx", "args": ["strands-agents-mcp-server"]}}}'
855
+
856
+ Returns:
857
+ List of MCPClient instances ready for direct use in Agent
858
+ """
859
+ mcp_servers_json = os.getenv("MCP_SERVERS")
860
+ if not mcp_servers_json:
861
+ logger.debug("No MCP_SERVERS environment variable found")
862
+ return []
863
+
864
+ try:
865
+ config = json.loads(mcp_servers_json)
866
+ mcp_servers_config = config.get("mcpServers", {})
867
+
868
+ if not mcp_servers_config:
869
+ logger.warning("MCP_SERVERS JSON has no 'mcpServers' key")
870
+ return []
871
+
872
+ mcp_clients = []
873
+
874
+ from strands.tools.mcp import MCPClient
875
+ from mcp import stdio_client, StdioServerParameters
876
+ from mcp.client.streamable_http import streamablehttp_client
877
+ from mcp.client.sse import sse_client
878
+
879
+ for server_name, server_config in mcp_servers_config.items():
880
+ try:
881
+ logger.info(f"Loading MCP server: {server_name}")
882
+
883
+ # Determine transport type and create appropriate callable
884
+ if "command" in server_config:
885
+ # stdio transport
886
+ command = server_config["command"]
887
+ args = server_config.get("args", [])
888
+ env = server_config.get("env", None)
889
+
890
+ transport_callable = (
891
+ lambda cmd=command, a=args, e=env: stdio_client(
892
+ StdioServerParameters(command=cmd, args=a, env=e)
893
+ )
894
+ )
895
+
896
+ elif "url" in server_config:
897
+ # Determine if SSE or streamable HTTP based on URL path
898
+ url = server_config["url"]
899
+ headers = server_config.get("headers", None)
900
+
901
+ if "/sse" in url:
902
+ # SSE transport
903
+ transport_callable = lambda u=url: sse_client(u)
904
+ else:
905
+ # Streamable HTTP transport (default for HTTP)
906
+ transport_callable = (
907
+ lambda u=url, h=headers: streamablehttp_client(
908
+ url=u, headers=h
909
+ )
910
+ )
911
+ else:
912
+ logger.warning(
913
+ f"MCP server {server_name} has no 'command' or 'url' - skipping"
914
+ )
915
+ continue
916
+
917
+ # Create MCPClient with direct loading (experimental managed integration)
918
+ # No need for context managers - Agent handles lifecycle
919
+ prefix = server_config.get("prefix", server_name)
920
+ mcp_client = MCPClient(
921
+ transport_callable=transport_callable, prefix=prefix
922
+ )
923
+
924
+ mcp_clients.append(mcp_client)
925
+ logger.info(
926
+ f"✓ MCP server '{server_name}' loaded (prefix: {prefix})"
927
+ )
928
+
929
+ except Exception as e:
930
+ logger.error(f"Failed to load MCP server '{server_name}': {e}")
931
+ continue
932
+
933
+ return mcp_clients
934
+
935
+ except json.JSONDecodeError as e:
936
+ logger.error(f"Invalid JSON in MCP_SERVERS: {e}")
937
+ return []
938
+ except Exception as e:
939
+ logger.error(f"Error loading MCP servers: {e}")
940
+ return []
941
+
942
+ def _select_model(self):
943
+ """
944
+ Smart model selection with fallback based on available credentials.
945
+
946
+ Priority: Bedrock → Anthropic → OpenAI → GitHub → Gemini → Cohere →
947
+ Writer → Mistral → LiteLLM → LlamaAPI → SageMaker →
948
+ LlamaCpp → MLX → Ollama
949
+
950
+ Returns:
951
+ Tuple of (model_instance, model_name)
952
+ """
953
+ provider = os.getenv("MODEL_PROVIDER")
954
+
955
+ # Read common model parameters from environment
956
+ max_tokens = int(os.getenv("STRANDS_MAX_TOKENS", "60000"))
957
+ temperature = float(os.getenv("STRANDS_TEMPERATURE", "1.0"))
958
+
959
+ if not provider:
960
+ # Auto-detect based on API keys and credentials
961
+ # 1. Try Bedrock (AWS bearer token or STS credentials)
962
+ try:
963
+ # Check for bearer token first
964
+ if os.getenv("AWS_BEARER_TOKEN_BEDROCK"):
965
+ provider = "bedrock"
966
+ print("🦆 Using Bedrock (bearer token)")
967
+ else:
968
+ # Try STS credentials
969
+ import boto3
970
+
971
+ boto3.client("sts").get_caller_identity()
972
+ provider = "bedrock"
973
+ print("🦆 Using Bedrock")
974
+ except:
975
+ # 2. Try Anthropic
976
+ if os.getenv("ANTHROPIC_API_KEY"):
977
+ provider = "anthropic"
978
+ print("🦆 Using Anthropic")
979
+ # 3. Try OpenAI
980
+ elif os.getenv("OPENAI_API_KEY"):
981
+ provider = "openai"
982
+ print("🦆 Using OpenAI")
983
+ # 4. Try GitHub Models
984
+ elif os.getenv("GITHUB_TOKEN") or os.getenv("PAT_TOKEN"):
985
+ provider = "github"
986
+ print("🦆 Using GitHub Models")
987
+ # 5. Try Gemini
988
+ elif os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY"):
989
+ provider = "gemini"
990
+ print("🦆 Using Gemini")
991
+ # 6. Try Cohere
992
+ elif os.getenv("COHERE_API_KEY"):
993
+ provider = "cohere"
994
+ print("🦆 Using Cohere")
995
+ # 7. Try Writer
996
+ elif os.getenv("WRITER_API_KEY"):
997
+ provider = "writer"
998
+ print("🦆 Using Writer")
999
+ # 8. Try Mistral
1000
+ elif os.getenv("MISTRAL_API_KEY"):
1001
+ provider = "mistral"
1002
+ print("🦆 Using Mistral")
1003
+ # 9. Try LiteLLM
1004
+ elif os.getenv("LITELLM_API_KEY"):
1005
+ provider = "litellm"
1006
+ print("🦆 Using LiteLLM")
1007
+ # 10. Try LlamaAPI
1008
+ elif os.getenv("LLAMAAPI_API_KEY"):
1009
+ provider = "llamaapi"
1010
+ print("🦆 Using LlamaAPI")
1011
+ # 11. Try SageMaker
1012
+ elif os.getenv("SAGEMAKER_ENDPOINT_NAME"):
1013
+ provider = "sagemaker"
1014
+ print("🦆 Using SageMaker")
1015
+ # 12. Try LlamaCpp
1016
+ elif os.getenv("LLAMACPP_MODEL_PATH"):
1017
+ provider = "llamacpp"
1018
+ print("🦆 Using LlamaCpp")
1019
+ # 13. Try MLX on Apple Silicon
1020
+ elif platform.system() == "Darwin" and platform.machine() in [
1021
+ "arm64",
1022
+ "aarch64",
1023
+ ]:
1024
+ try:
1025
+ from strands_mlx import MLXModel
1026
+
1027
+ provider = "mlx"
1028
+ print("🦆 Using MLX (Apple Silicon)")
1029
+ except ImportError:
1030
+ provider = "ollama"
1031
+ print("🦆 Using Ollama (fallback)")
1032
+ # 14. Fallback to Ollama
1033
+ else:
1034
+ provider = "ollama"
1035
+ print("🦆 Using Ollama (fallback)")
1036
+
1037
+ # Create model based on provider
1038
+ if provider == "mlx":
1039
+ from strands_mlx import MLXModel
1040
+
1041
+ model_name = os.getenv("STRANDS_MODEL_ID", "mlx-community/Qwen3-1.7B-4bit")
1042
+ return (
1043
+ MLXModel(
1044
+ model_id=model_name,
1045
+ params={"temperature": temperature, "max_tokens": max_tokens},
1046
+ ),
1047
+ model_name,
1048
+ )
1049
+
1050
+ elif provider == "gemini":
1051
+ from strands.models.gemini import GeminiModel
1052
+
1053
+ model_name = os.getenv("STRANDS_MODEL_ID", "gemini-2.5-flash")
1054
+ api_key = os.getenv("GOOGLE_API_KEY") or os.getenv("GEMINI_API_KEY")
1055
+ return (
1056
+ GeminiModel(
1057
+ client_args={"api_key": api_key},
1058
+ model_id=model_name,
1059
+ params={"temperature": temperature, "max_tokens": max_tokens},
1060
+ ),
1061
+ model_name,
1062
+ )
1063
+
1064
+ elif provider == "ollama":
1065
+ from strands.models.ollama import OllamaModel
1066
+
1067
+ # Smart model selection based on OS
1068
+ os_type = platform.system()
1069
+ if os_type == "Darwin":
1070
+ model_name = os.getenv("STRANDS_MODEL_ID", "qwen3:1.7b")
1071
+ elif os_type == "Linux":
1072
+ model_name = os.getenv("STRANDS_MODEL_ID", "qwen3:30b")
1073
+ else:
1074
+ model_name = os.getenv("STRANDS_MODEL_ID", "qwen3:8b")
1075
+
1076
+ return (
1077
+ OllamaModel(
1078
+ host=os.getenv("OLLAMA_HOST", "http://localhost:11434"),
1079
+ model_id=model_name,
1080
+ temperature=temperature,
1081
+ num_predict=max_tokens,
1082
+ keep_alive="5m",
1083
+ ),
1084
+ model_name,
1085
+ )
1086
+
1087
+ else:
1088
+ # All other providers via create_model utility
1089
+ # Supports: bedrock, anthropic, openai, github, cohere, writer, mistral, litellm
1090
+ from strands_tools.utils.models.model import create_model
1091
+
1092
+ model = create_model(provider=provider)
1093
+ model_name = os.getenv("STRANDS_MODEL_ID", provider)
1094
+ return model, model_name
1095
+
491
1096
  def _build_system_prompt(self):
492
1097
  """Build adaptive system prompt based on environment
493
1098
 
@@ -498,11 +1103,20 @@ class DevDuck:
498
1103
 
499
1104
  Learning: Always check source code truth over conversation memory!
500
1105
  """
1106
+ # Current date and time
1107
+ current_datetime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
1108
+ current_date = datetime.now().strftime("%A, %B %d, %Y")
1109
+ current_time = datetime.now().strftime("%I:%M %p")
1110
+
501
1111
  session_id = f"devduck-{datetime.now().strftime('%Y-%m-%d')}"
1112
+ self.session_id = session_id
1113
+
1114
+ # Get own file path for self-modification awareness
1115
+ own_file_path = Path(__file__).resolve()
502
1116
 
503
1117
  # Get own source code for self-awareness
504
1118
  own_code = get_own_source_code()
505
-
1119
+
506
1120
  # Get recent conversation history context (with error handling)
507
1121
  try:
508
1122
  recent_context = get_last_messages()
@@ -510,6 +1124,13 @@ class DevDuck:
510
1124
  print(f"🦆 Warning: Could not load history context: {e}")
511
1125
  recent_context = ""
512
1126
 
1127
+ # Get recent logs for immediate visibility
1128
+ try:
1129
+ recent_logs = get_recent_logs()
1130
+ except Exception as e:
1131
+ print(f"🦆 Warning: Could not load recent logs: {e}")
1132
+ recent_logs = ""
1133
+
513
1134
  return f"""🦆 You are DevDuck - an extreme minimalist, self-adapting agent.
514
1135
 
515
1136
  Environment: {self.env_info['os']} {self.env_info['arch']}
@@ -517,6 +1138,8 @@ Python: {self.env_info['python']}
517
1138
  Model: {self.model}
518
1139
  Hostname: {self.env_info['hostname']}
519
1140
  Session ID: {session_id}
1141
+ Current Time: {current_datetime} ({current_date} at {current_time})
1142
+ My Path: {own_file_path}
520
1143
 
521
1144
  You are:
522
1145
  - Minimalist: Brief, direct responses
@@ -527,6 +1150,7 @@ You are:
527
1150
  Current working directory: {self.env_info['cwd']}
528
1151
 
529
1152
  {recent_context}
1153
+ {recent_logs}
530
1154
 
531
1155
  ## Your Own Implementation:
532
1156
  You have full access to your own source code for self-awareness and self-modification:
@@ -538,59 +1162,56 @@ You have full access to your own source code for self-awareness and self-modific
538
1162
  - **No Restart Needed** - Tools are auto-loaded and ready to use instantly
539
1163
  - **Live Development** - Modify existing tools while running and test immediately
540
1164
  - **Full Python Access** - Create any Python functionality as a tool
1165
+ - **Agent Protection** - Hot-reload waits until agent finishes current task
1166
+
1167
+ ## Dynamic Tool Loading:
1168
+ - **Install Tools** - Use install_tools() to load tools from any Python package
1169
+ - Example: install_tools(action="install_and_load", package="strands-fun-tools", module="strands_fun_tools")
1170
+ - Expands capabilities without restart
1171
+ - Access to entire Python ecosystem
1172
+
1173
+ ## Tool Configuration:
1174
+ Set DEVDUCK_TOOLS for custom tools:
1175
+ - Format: package1:tool1,tool2;package2:tool3,tool4
1176
+ - Example: strands_tools:shell,editor;strands_fun_tools:clipboard
1177
+ - Tools are filtered - only specified tools are loaded
1178
+ - Load the speech_to_speech tool when it's needed
1179
+ - Offload the tools when you don't need
1180
+
1181
+ ## MCP Integration:
1182
+ - **Expose as MCP Server** - Use mcp_server() to expose devduck via MCP protocol
1183
+ - Example: mcp_server(action="start", port=8000)
1184
+ - Connect from Claude Desktop, other agents, or custom clients
1185
+ - Full bidirectional communication
1186
+
1187
+ - **Load MCP Servers** - Set MCP_SERVERS env var to auto-load external MCP servers
1188
+ - Format: JSON with "mcpServers" object
1189
+ - Stdio servers: command, args, env keys
1190
+ - HTTP servers: url, headers keys
1191
+ - Example: MCP_SERVERS='{{"mcpServers": {{"strands": {{"command": "uvx", "args": ["strands-agents-mcp-server"]}}}}}}'
1192
+ - Tools from MCP servers automatically available in agent context
1193
+
1194
+ ## Knowledge Base Integration:
1195
+ - **Automatic RAG** - Set DEVDUCK_KNOWLEDGE_BASE_ID to enable automatic retrieval/storage
1196
+ - Before each query: Retrieves relevant context from knowledge base
1197
+ - After each response: Stores conversation for future reference
1198
+ - Seamless memory across sessions without manual tool calls
541
1199
 
542
- ## Tool Creation Patterns:
543
-
544
- ### **1. @tool Decorator:**
545
- ```python
546
- # ./tools/calculate_tip.py
547
- from strands import tool
548
-
549
- @tool
550
- def calculate_tip(amount: float, percentage: float = 15.0) -> str:
551
- \"\"\"Calculate tip and total for a bill.
552
-
553
- Args:
554
- amount: Bill amount in dollars
555
- percentage: Tip percentage (default: 15.0)
556
-
557
- Returns:
558
- str: Formatted tip calculation result
559
- \"\"\"
560
- tip = amount * (percentage / 100)
561
- total = amount + tip
562
- return f"Tip: {{tip:.2f}}, Total: {{total:.2f}}"
563
- ```
564
-
565
- ### **2. Action-Based Pattern:**
566
- ```python
567
- # ./tools/weather.py
568
- from typing import Dict, Any
569
- from strands import tool
1200
+ ## System Prompt Management:
1201
+ - **View**: system_prompt(action='view') - See current prompt
1202
+ - **Update Local**: system_prompt(action='update', prompt='new text') - Updates env var + .prompt file
1203
+ - **Update GitHub**: system_prompt(action='update', prompt='text', repository='cagataycali/devduck') - Syncs to repo variables
1204
+ - **Variable Name**: system_prompt(action='update', prompt='text', variable_name='CUSTOM_PROMPT') - Use custom var
1205
+ - **Add Context**: system_prompt(action='add_context', context='new learning') - Append without replacing
570
1206
 
571
- @tool
572
- def weather(action: str, location: str = None) -> Dict[str, Any]:
573
- \"\"\"Comprehensive weather information tool.
574
-
575
- Args:
576
- action: Action to perform (current, forecast, alerts)
577
- location: City name (required)
578
-
579
- Returns:
580
- Dict containing status and response content
581
- \"\"\"
582
- if action == "current":
583
- return {{"status": "success", "content": [{{"text": f"Weather for {{location}}"}}]}}
584
- elif action == "forecast":
585
- return {{"status": "success", "content": [{{"text": f"Forecast for {{location}}"}}]}}
586
- else:
587
- return {{"status": "error", "content": [{{"text": f"Unknown action: {{action}}"}}]}}
588
- ```
1207
+ ### 🧠 Self-Improvement Pattern:
1208
+ When you learn something valuable during conversations:
1209
+ 1. Identify the new insight or pattern
1210
+ 2. Use system_prompt(action='add_context', context='...') to append it
1211
+ 3. Sync to GitHub: system_prompt(action='update', prompt=new_full_prompt, repository='owner/repo')
1212
+ 4. New learnings persist across sessions via SYSTEM_PROMPT env var
589
1213
 
590
- ## System Prompt Management:
591
- - Use system_prompt(action='get') to view current prompt
592
- - Use system_prompt(action='set', prompt='new text') to update
593
- - Changes persist in SYSTEM_PROMPT environment variable
1214
+ **Repository Integration**: Set repository='cagataycali/devduck' to sync prompts across deployments
594
1215
 
595
1216
  ## Shell Commands:
596
1217
  - Prefix with ! to execute shell commands directly
@@ -602,96 +1223,25 @@ def weather(action: str, location: str = None) -> Dict[str, Any]:
602
1223
  - Communication: **MINIMAL WORDS**
603
1224
  - Efficiency: **Speed is paramount**
604
1225
 
605
- {os.getenv('SYSTEM_PROMPT', '')}"""
1226
+ {_get_system_prompt()}"""
606
1227
 
607
1228
  def _self_heal(self, error):
608
1229
  """Attempt self-healing when errors occur"""
1230
+ logger.error(f"Self-healing triggered by error: {error}")
609
1231
  print(f"🦆 Self-healing from: {error}")
610
1232
 
611
1233
  # Prevent infinite recursion by tracking heal attempts
612
1234
  if not hasattr(self, "_heal_count"):
613
1235
  self._heal_count = 0
614
-
1236
+
615
1237
  self._heal_count += 1
616
-
1238
+
617
1239
  # Limit recursion - if we've tried more than 3 times, give up
618
- if self._heal_count > 3:
1240
+ if self._heal_count > 2:
619
1241
  print(f"🦆 Self-healing failed after {self._heal_count} attempts")
620
1242
  print("🦆 Please fix the issue manually and restart")
621
1243
  sys.exit(1)
622
1244
 
623
- # Handle tool validation errors by resetting session
624
- if "Expected toolResult blocks" in str(error):
625
- print("🦆 Tool validation error detected - resetting session...")
626
- # Add timestamp postfix to create fresh session
627
- postfix = datetime.now().strftime("%H%M%S")
628
- new_session_id = f"devduck-{datetime.now().strftime('%Y-%m-%d')}-{postfix}"
629
- print(f"🦆 New session: {new_session_id}")
630
-
631
- # Update session manager with new session
632
- try:
633
- from strands.session.file_session_manager import FileSessionManager
634
-
635
- self.agent.session_manager = FileSessionManager(
636
- session_id=new_session_id
637
- )
638
- print("🦆 Session reset successful - continuing with fresh history")
639
- self._heal_count = 0 # Reset counter on success
640
- return # Early return - no need for full restart
641
- except Exception as session_error:
642
- print(f"🦆 Session reset failed: {session_error}")
643
-
644
- # Common healing strategies
645
- if "not found" in str(error).lower() and "model" in str(error).lower():
646
- print("🦆 Model not found - trying to pull model...")
647
- try:
648
- # Try to pull the model
649
- result = subprocess.run(
650
- ["ollama", "pull", self.model], capture_output=True, timeout=60
651
- )
652
- if result.returncode == 0:
653
- print(f"🦆 Successfully pulled {self.model}")
654
- else:
655
- print(f"🦆 Failed to pull {self.model}, trying fallback...")
656
- # Fallback to basic models
657
- fallback_models = ["llama3.2:1b", "qwen2.5:0.5b", "gemma2:2b"]
658
- for fallback in fallback_models:
659
- try:
660
- subprocess.run(
661
- ["ollama", "pull", fallback],
662
- capture_output=True,
663
- timeout=30,
664
- )
665
- self.model = fallback
666
- print(f"🦆 Using fallback model: {fallback}")
667
- break
668
- except:
669
- continue
670
- except Exception as pull_error:
671
- print(f"🦆 Model pull failed: {pull_error}")
672
- # Ultra-minimal fallback
673
- self.model = "llama3.2:1b"
674
-
675
- elif "ollama" in str(error).lower():
676
- print("🦆 Ollama issue - checking service...")
677
- try:
678
- # Check if ollama is running
679
- result = subprocess.run(
680
- ["ollama", "list"], capture_output=True, timeout=5
681
- )
682
- if result.returncode != 0:
683
- print("🦆 Starting ollama service...")
684
- subprocess.Popen(["ollama", "serve"])
685
- import time
686
-
687
- time.sleep(3) # Wait for service to start
688
- except Exception as ollama_error:
689
- print(f"🦆 Ollama service issue: {ollama_error}")
690
-
691
- elif "import" in str(error).lower():
692
- print("🦆 Import issue - reinstalling dependencies...")
693
- ensure_deps()
694
-
695
1245
  elif "connection" in str(error).lower():
696
1246
  print("🦆 Connection issue - checking ollama service...")
697
1247
  try:
@@ -707,14 +1257,247 @@ def weather(action: str, location: str = None) -> Dict[str, Any]:
707
1257
  print("🦆 Running in minimal mode...")
708
1258
  self.agent = None
709
1259
 
1260
+ def _is_port_available(self, port):
1261
+ """Check if a port is available"""
1262
+ try:
1263
+ test_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
1264
+ test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1265
+ test_socket.bind(("0.0.0.0", port))
1266
+ test_socket.close()
1267
+ return True
1268
+ except OSError:
1269
+ return False
1270
+
1271
+ def _is_socket_available(self, socket_path):
1272
+ """Check if a Unix socket is available"""
1273
+
1274
+ # If socket file doesn't exist, it's available
1275
+ if not os.path.exists(socket_path):
1276
+ return True
1277
+ # If it exists, try to connect to see if it's in use
1278
+ try:
1279
+ test_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
1280
+ test_socket.connect(socket_path)
1281
+ test_socket.close()
1282
+ return False # Socket is in use
1283
+ except (ConnectionRefusedError, FileNotFoundError):
1284
+ # Socket file exists but not in use - remove stale socket
1285
+ try:
1286
+ os.remove(socket_path)
1287
+ return True
1288
+ except:
1289
+ return False
1290
+ except Exception:
1291
+ return False
1292
+
1293
+ def _find_available_port(self, start_port, max_attempts=10):
1294
+ """Find an available port starting from start_port"""
1295
+ for offset in range(max_attempts):
1296
+ port = start_port + offset
1297
+ if self._is_port_available(port):
1298
+ return port
1299
+ return None
1300
+
1301
+ def _find_available_socket(self, base_socket_path, max_attempts=10):
1302
+ """Find an available socket path"""
1303
+ if self._is_socket_available(base_socket_path):
1304
+ return base_socket_path
1305
+ # Try numbered alternatives
1306
+ for i in range(1, max_attempts):
1307
+ alt_socket = f"{base_socket_path}.{i}"
1308
+ if self._is_socket_available(alt_socket):
1309
+ return alt_socket
1310
+ return None
1311
+
1312
+ def _start_servers(self):
1313
+ """Auto-start configured servers with port conflict handling"""
1314
+ logger.info("Auto-starting servers...")
1315
+ print("🦆 Auto-starting servers...")
1316
+
1317
+ # Start servers in order: IPC, TCP, WS, MCP
1318
+ server_order = ["ipc", "tcp", "ws", "mcp"]
1319
+
1320
+ for server_type in server_order:
1321
+ if server_type not in self.servers:
1322
+ continue
1323
+
1324
+ config = self.servers[server_type]
1325
+
1326
+ # Check if server is enabled
1327
+ if not config.get("enabled", True):
1328
+ continue
1329
+
1330
+ # Check for LOOKUP_KEY (conditional start based on env var)
1331
+ if "LOOKUP_KEY" in config:
1332
+ lookup_key = config["LOOKUP_KEY"]
1333
+ if not os.getenv(lookup_key):
1334
+ logger.info(f"Skipping {server_type} - {lookup_key} not set")
1335
+ continue
1336
+
1337
+ # Start the server with port conflict handling
1338
+ try:
1339
+ if server_type == "tcp":
1340
+ port = config.get("port", 9999)
1341
+
1342
+ # Check port availability BEFORE attempting to start
1343
+ if not self._is_port_available(port):
1344
+ alt_port = self._find_available_port(port + 1)
1345
+ if alt_port:
1346
+ logger.info(f"Port {port} in use, using {alt_port}")
1347
+ print(f"🦆 Port {port} in use, using {alt_port}")
1348
+ port = alt_port
1349
+ else:
1350
+ logger.warning(f"No available ports found for TCP server")
1351
+ continue
1352
+
1353
+ result = self.agent.tool.tcp(
1354
+ action="start_server", port=port, record_direct_tool_call=False
1355
+ )
1356
+
1357
+ if result.get("status") == "success":
1358
+ logger.info(f"✓ TCP server started on port {port}")
1359
+ print(f"🦆 ✓ TCP server: localhost:{port}")
1360
+
1361
+ elif server_type == "ws":
1362
+ port = config.get("port", 8080)
1363
+
1364
+ # Check port availability BEFORE attempting to start
1365
+ if not self._is_port_available(port):
1366
+ alt_port = self._find_available_port(port + 1)
1367
+ if alt_port:
1368
+ logger.info(f"Port {port} in use, using {alt_port}")
1369
+ print(f"🦆 Port {port} in use, using {alt_port}")
1370
+ port = alt_port
1371
+ else:
1372
+ logger.warning(
1373
+ f"No available ports found for WebSocket server"
1374
+ )
1375
+ continue
1376
+
1377
+ result = self.agent.tool.websocket(
1378
+ action="start_server", port=port, record_direct_tool_call=False
1379
+ )
1380
+
1381
+ if result.get("status") == "success":
1382
+ logger.info(f"✓ WebSocket server started on port {port}")
1383
+ print(f"🦆 ✓ WebSocket server: localhost:{port}")
1384
+
1385
+ elif server_type == "mcp":
1386
+ port = config.get("port", 8000)
1387
+
1388
+ # Check port availability BEFORE attempting to start
1389
+ if not self._is_port_available(port):
1390
+ alt_port = self._find_available_port(port + 1)
1391
+ if alt_port:
1392
+ logger.info(f"Port {port} in use, using {alt_port}")
1393
+ print(f"🦆 Port {port} in use, using {alt_port}")
1394
+ port = alt_port
1395
+ else:
1396
+ logger.warning(f"No available ports found for MCP server")
1397
+ continue
1398
+
1399
+ result = self.agent.tool.mcp_server(
1400
+ action="start",
1401
+ transport="http",
1402
+ port=port,
1403
+ expose_agent=True,
1404
+ agent=self.agent,
1405
+ record_direct_tool_call=False,
1406
+ )
1407
+
1408
+ if result.get("status") == "success":
1409
+ logger.info(f"✓ MCP HTTP server started on port {port}")
1410
+ print(f"🦆 ✓ MCP server: http://localhost:{port}/mcp")
1411
+
1412
+ elif server_type == "ipc":
1413
+ socket_path = config.get("socket_path", "/tmp/devduck_main.sock")
1414
+
1415
+ # Check socket availability BEFORE attempting to start
1416
+ available_socket = self._find_available_socket(socket_path)
1417
+ if not available_socket:
1418
+ logger.warning(
1419
+ f"No available socket paths found for IPC server"
1420
+ )
1421
+ continue
1422
+
1423
+ if available_socket != socket_path:
1424
+ logger.info(
1425
+ f"Socket {socket_path} in use, using {available_socket}"
1426
+ )
1427
+ print(
1428
+ f"🦆 Socket {socket_path} in use, using {available_socket}"
1429
+ )
1430
+ socket_path = available_socket
1431
+
1432
+ result = self.agent.tool.ipc(
1433
+ action="start_server",
1434
+ socket_path=socket_path,
1435
+ record_direct_tool_call=False,
1436
+ )
1437
+
1438
+ if result.get("status") == "success":
1439
+ logger.info(f"✓ IPC server started on {socket_path}")
1440
+ print(f"🦆 ✓ IPC server: {socket_path}")
1441
+ # TODO: support custom file path here so we can trigger foreign python function like another file
1442
+ except Exception as e:
1443
+ logger.error(f"Failed to start {server_type} server: {e}")
1444
+ print(f"🦆 ⚠ {server_type.upper()} server failed: {e}")
1445
+
710
1446
  def __call__(self, query):
711
- """Make the agent callable"""
1447
+ """Make the agent callable with automatic knowledge base integration"""
712
1448
  if not self.agent:
1449
+ logger.warning("Agent unavailable - attempted to call with query")
713
1450
  return "🦆 Agent unavailable - try: devduck.restart()"
714
1451
 
715
1452
  try:
716
- return self.agent(query)
1453
+ logger.info(f"Agent call started: {query[:100]}...")
1454
+
1455
+ # Mark agent as executing to prevent hot-reload interruption
1456
+ self._agent_executing = True
1457
+
1458
+ # 📚 Knowledge Base Retrieval (BEFORE agent runs)
1459
+ knowledge_base_id = os.getenv("DEVDUCK_KNOWLEDGE_BASE_ID")
1460
+ if knowledge_base_id and hasattr(self.agent, "tool"):
1461
+ try:
1462
+ if "retrieve" in self.agent.tool_names:
1463
+ logger.info(f"Retrieving context from KB: {knowledge_base_id}")
1464
+ self.agent.tool.retrieve(
1465
+ text=query, knowledgeBaseId=knowledge_base_id
1466
+ )
1467
+ except Exception as e:
1468
+ logger.warning(f"KB retrieval failed: {e}")
1469
+
1470
+ # Run the agent
1471
+ result = self.agent(query)
1472
+
1473
+ # 💾 Knowledge Base Storage (AFTER agent runs)
1474
+ if knowledge_base_id and hasattr(self.agent, "tool"):
1475
+ try:
1476
+ if "store_in_kb" in self.agent.tool_names:
1477
+ conversation_content = f"Input: {query}, Result: {result!s}"
1478
+ conversation_title = f"DevDuck: {datetime.now().strftime('%Y-%m-%d')} | {query[:500]}"
1479
+ self.agent.tool.store_in_kb(
1480
+ content=conversation_content,
1481
+ title=conversation_title,
1482
+ knowledge_base_id=knowledge_base_id,
1483
+ )
1484
+ logger.info(f"Stored conversation in KB: {knowledge_base_id}")
1485
+ except Exception as e:
1486
+ logger.warning(f"KB storage failed: {e}")
1487
+
1488
+ # Clear executing flag
1489
+ self._agent_executing = False
1490
+
1491
+ # Check for pending hot-reload
1492
+ if self._reload_pending:
1493
+ logger.info("Triggering pending hot-reload after agent completion")
1494
+ print("\n🦆 Agent finished - triggering pending hot-reload...")
1495
+ self._hot_reload()
1496
+
1497
+ return result
717
1498
  except Exception as e:
1499
+ self._agent_executing = False # Reset flag on error
1500
+ logger.error(f"Agent call failed with error: {e}")
718
1501
  self._self_heal(e)
719
1502
  if self.agent:
720
1503
  return self.agent(query)
@@ -723,37 +1506,38 @@ def weather(action: str, location: str = None) -> Dict[str, Any]:
723
1506
 
724
1507
  def restart(self):
725
1508
  """Restart the agent"""
726
- print("🦆 Restarting...")
1509
+ print("\n🦆 Restarting...")
1510
+ logger.debug("\n🦆 Restarting...")
727
1511
  self.__init__()
728
1512
 
729
1513
  def _start_file_watcher(self):
730
1514
  """Start background file watcher for auto hot-reload"""
731
- import threading
732
1515
 
1516
+ logger.info("Starting file watcher for hot-reload")
733
1517
  # Get the path to this file
734
1518
  self._watch_file = Path(__file__).resolve()
735
1519
  self._last_modified = (
736
1520
  self._watch_file.stat().st_mtime if self._watch_file.exists() else None
737
1521
  )
738
1522
  self._watcher_running = True
1523
+ self._is_reloading = False
739
1524
 
740
1525
  # Start watcher thread
741
1526
  self._watcher_thread = threading.Thread(
742
1527
  target=self._file_watcher_thread, daemon=True
743
1528
  )
744
1529
  self._watcher_thread.start()
1530
+ logger.info(f"File watcher started, monitoring {self._watch_file}")
745
1531
 
746
1532
  def _file_watcher_thread(self):
747
1533
  """Background thread that watches for file changes"""
748
- import time
749
-
750
1534
  last_reload_time = 0
751
1535
  debounce_seconds = 3 # 3 second debounce
752
1536
 
753
1537
  while self._watcher_running:
754
1538
  try:
755
- # Skip if currently reloading to prevent triggering during exec()
756
- if getattr(self, "_is_reloading", False):
1539
+ # Skip if currently reloading
1540
+ if self._is_reloading:
757
1541
  time.sleep(1)
758
1542
  continue
759
1543
 
@@ -767,19 +1551,36 @@ def weather(action: str, location: str = None) -> Dict[str, Any]:
767
1551
  and current_mtime > self._last_modified
768
1552
  and current_time - last_reload_time > debounce_seconds
769
1553
  ):
770
-
771
- print(f"🦆 Detected changes in {self._watch_file.name}!")
772
- self._last_modified = current_mtime
1554
+ print(f"\n🦆 Detected changes in {self._watch_file.name}!")
773
1555
  last_reload_time = current_time
774
1556
 
775
- # Trigger hot-reload
776
- time.sleep(0.5) # Small delay to ensure file write is complete
777
- self.hot_reload()
1557
+ # Check if agent is currently executing
1558
+ if self._agent_executing:
1559
+ logger.info(
1560
+ "Code change detected but agent is executing - reload pending"
1561
+ )
1562
+ print(
1563
+ "\n🦆 Agent is currently executing - reload will trigger after completion"
1564
+ )
1565
+ self._reload_pending = True
1566
+ # Don't update _last_modified yet - keep detecting the change
1567
+ else:
1568
+ # Safe to reload immediately
1569
+ self._last_modified = current_mtime
1570
+ logger.info(
1571
+ f"Code change detected in {self._watch_file.name} - triggering hot-reload"
1572
+ )
1573
+ time.sleep(
1574
+ 0.5
1575
+ ) # Small delay to ensure file write is complete
1576
+ self._hot_reload()
778
1577
  else:
779
- self._last_modified = current_mtime
1578
+ # Update timestamp if no change or still in debounce
1579
+ if not self._reload_pending:
1580
+ self._last_modified = current_mtime
780
1581
 
781
1582
  except Exception as e:
782
- print(f"🦆 File watcher error: {e}")
1583
+ logger.error(f"File watcher error: {e}")
783
1584
 
784
1585
  # Check every 1 second
785
1586
  time.sleep(1)
@@ -787,40 +1588,45 @@ def weather(action: str, location: str = None) -> Dict[str, Any]:
787
1588
  def _stop_file_watcher(self):
788
1589
  """Stop the file watcher"""
789
1590
  self._watcher_running = False
790
- print("🦆 File watcher stopped")
1591
+ logger.info("File watcher stopped")
791
1592
 
792
- def hot_reload(self):
1593
+ def _hot_reload(self):
793
1594
  """Hot-reload by restarting the entire Python process with fresh code"""
794
- print("🦆 Hot-reloading via process restart...")
1595
+ logger.info("Hot-reload initiated")
1596
+ print("\n🦆 Hot-reloading via process restart...")
795
1597
 
796
1598
  try:
797
1599
  # Set reload flag to prevent recursive reloads during shutdown
798
- if hasattr(self, "_is_reloading") and self._is_reloading:
799
- print("🦆 Reload already in progress, skipping")
800
- return
801
-
802
1600
  self._is_reloading = True
803
1601
 
1602
+ # Update last_modified before reload to acknowledge the change
1603
+ if hasattr(self, "_watch_file") and self._watch_file.exists():
1604
+ self._last_modified = self._watch_file.stat().st_mtime
1605
+
1606
+ # Reset pending flag
1607
+ self._reload_pending = False
1608
+
804
1609
  # Stop the file watcher
805
1610
  if hasattr(self, "_watcher_running"):
806
1611
  self._watcher_running = False
807
1612
 
808
- print("🦆 Restarting process with fresh code...")
1613
+ print("\n🦆 Restarting process with fresh code...")
1614
+ logger.debug("\n🦆 Restarting process with fresh code...")
809
1615
 
810
1616
  # Restart the entire Python process
811
1617
  # This ensures all code is freshly loaded
812
1618
  os.execv(sys.executable, [sys.executable] + sys.argv)
813
1619
 
814
1620
  except Exception as e:
815
- print(f"🦆 Hot-reload failed: {e}")
816
- print("🦆 Falling back to manual restart")
1621
+ logger.error(f"Hot-reload failed: {e}")
1622
+ print(f"\n🦆 Hot-reload failed: {e}")
1623
+ print("\n🦆 Falling back to manual restart")
817
1624
  self._is_reloading = False
818
1625
 
819
1626
  def status(self):
820
1627
  """Show current status"""
821
1628
  return {
822
1629
  "model": self.model,
823
- "host": self.ollama_host,
824
1630
  "env": self.env_info,
825
1631
  "agent_ready": self.agent is not None,
826
1632
  "tools": len(self.tools) if hasattr(self, "tools") else 0,
@@ -834,7 +1640,15 @@ def weather(action: str, location: str = None) -> Dict[str, Any]:
834
1640
 
835
1641
 
836
1642
  # 🦆 Auto-initialize when imported
837
- devduck = DevDuck()
1643
+ # Check environment variables to control server configuration
1644
+ # Also check if --mcp flag is present to skip auto-starting servers
1645
+ _auto_start = os.getenv("DEVDUCK_AUTO_START_SERVERS", "true").lower() == "true"
1646
+
1647
+ # Disable auto-start if --mcp flag is present (stdio mode)
1648
+ if "--mcp" in sys.argv:
1649
+ _auto_start = False
1650
+
1651
+ devduck = DevDuck(auto_start_servers=_auto_start)
838
1652
 
839
1653
 
840
1654
  # 🚀 Convenience functions
@@ -855,20 +1669,124 @@ def restart():
855
1669
 
856
1670
  def hot_reload():
857
1671
  """Quick hot-reload without restart"""
858
- devduck.hot_reload()
1672
+ devduck._hot_reload()
1673
+
1674
+
1675
+ def extract_commands_from_history():
1676
+ """Extract commonly used commands from shell history for auto-completion."""
1677
+ commands = set()
1678
+ history_files = get_shell_history_files()
1679
+
1680
+ # Limit the number of recent commands to process for performance
1681
+ max_recent_commands = 100
1682
+
1683
+ for history_type, history_file in history_files:
1684
+ try:
1685
+ with open(history_file, encoding="utf-8", errors="ignore") as f:
1686
+ lines = f.readlines()
1687
+
1688
+ # Take recent commands for better relevance
1689
+ recent_lines = (
1690
+ lines[-max_recent_commands:]
1691
+ if len(lines) > max_recent_commands
1692
+ else lines
1693
+ )
1694
+
1695
+ for line in recent_lines:
1696
+ line = line.strip()
1697
+ if not line:
1698
+ continue
1699
+
1700
+ if history_type == "devduck":
1701
+ # Extract devduck commands
1702
+ if "# devduck:" in line:
1703
+ try:
1704
+ query = line.split("# devduck:")[-1].strip()
1705
+ # Extract first word as command
1706
+ first_word = query.split()[0] if query.split() else None
1707
+ if (
1708
+ first_word and len(first_word) > 2
1709
+ ): # Only meaningful commands
1710
+ commands.add(first_word.lower())
1711
+ except (ValueError, IndexError):
1712
+ continue
1713
+
1714
+ elif history_type == "zsh":
1715
+ # Zsh format: ": timestamp:0;command"
1716
+ if line.startswith(": ") and ":0;" in line:
1717
+ try:
1718
+ parts = line.split(":0;", 1)
1719
+ if len(parts) == 2:
1720
+ full_command = parts[1].strip()
1721
+ # Extract first word as command
1722
+ first_word = (
1723
+ full_command.split()[0]
1724
+ if full_command.split()
1725
+ else None
1726
+ )
1727
+ if (
1728
+ first_word and len(first_word) > 1
1729
+ ): # Only meaningful commands
1730
+ commands.add(first_word.lower())
1731
+ except (ValueError, IndexError):
1732
+ continue
1733
+
1734
+ elif history_type == "bash":
1735
+ # Bash format: simple command per line
1736
+ first_word = line.split()[0] if line.split() else None
1737
+ if first_word and len(first_word) > 1: # Only meaningful commands
1738
+ commands.add(first_word.lower())
1739
+
1740
+ except Exception:
1741
+ # Skip files that can't be read
1742
+ continue
1743
+
1744
+ return list(commands)
859
1745
 
860
1746
 
861
1747
  def interactive():
862
1748
  """Interactive REPL mode for devduck"""
1749
+ from prompt_toolkit import prompt
1750
+ from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
1751
+ from prompt_toolkit.completion import WordCompleter
1752
+ from prompt_toolkit.history import FileHistory
1753
+
863
1754
  print("🦆 DevDuck")
1755
+ print(f"📝 Logs: {LOG_DIR}")
864
1756
  print("Type 'exit', 'quit', or 'q' to quit.")
865
1757
  print("Prefix with ! to run shell commands (e.g., ! ls -la)")
866
- print("-" * 50)
1758
+ print("\n\n")
1759
+ logger.info("Interactive mode started")
1760
+
1761
+ # Set up prompt_toolkit with history
1762
+ history_file = get_shell_history_file()
1763
+ history = FileHistory(history_file)
1764
+
1765
+ # Create completions from common commands and shell history
1766
+ base_commands = ["exit", "quit", "q", "help", "clear", "status", "reload"]
1767
+ history_commands = extract_commands_from_history()
1768
+
1769
+ # Combine base commands with commands from history
1770
+ all_commands = list(set(base_commands + history_commands))
1771
+ completer = WordCompleter(all_commands, ignore_case=True)
1772
+
1773
+ # Track consecutive interrupts for double Ctrl+C to exit
1774
+ interrupt_count = 0
1775
+ last_interrupt = 0
867
1776
 
868
1777
  while True:
869
1778
  try:
870
- # Get user input
871
- q = input("\n🦆 ")
1779
+ # Use prompt_toolkit for enhanced input with arrow key support
1780
+ q = prompt(
1781
+ "\n🦆 ",
1782
+ history=history,
1783
+ auto_suggest=AutoSuggestFromHistory(),
1784
+ completer=completer,
1785
+ complete_while_typing=True,
1786
+ )
1787
+
1788
+ # Reset interrupt count on successful prompt
1789
+ interrupt_count = 0
872
1790
 
873
1791
  # Check for exit command
874
1792
  if q.lower() in ["exit", "quit", "q"]:
@@ -884,87 +1802,59 @@ def interactive():
884
1802
  shell_command = q[1:].strip()
885
1803
  try:
886
1804
  if devduck.agent:
887
- result = devduck.agent.tool.shell(command=shell_command, timeout=900)
1805
+ devduck._agent_executing = (
1806
+ True # Prevent hot-reload during shell execution
1807
+ )
1808
+ result = devduck.agent.tool.shell(
1809
+ command=shell_command, timeout=9000
1810
+ )
1811
+ devduck._agent_executing = False
1812
+
1813
+ # Reset terminal to fix rendering issues after command output
1814
+ print("\r", end="", flush=True)
1815
+ sys.stdout.flush()
1816
+
888
1817
  # Append shell command to history
889
1818
  append_to_shell_history(q, result["content"][0]["text"])
1819
+
1820
+ # Check if reload was pending
1821
+ if devduck._reload_pending:
1822
+ print(
1823
+ "🦆 Shell command finished - triggering pending hot-reload..."
1824
+ )
1825
+ devduck._hot_reload()
890
1826
  else:
891
1827
  print("🦆 Agent unavailable")
892
1828
  except Exception as e:
1829
+ devduck._agent_executing = False # Reset on error
893
1830
  print(f"🦆 Shell command error: {e}")
1831
+ # Reset terminal on error too
1832
+ print("\r", end="", flush=True)
1833
+ sys.stdout.flush()
894
1834
  continue
895
1835
 
896
- # Get recent conversation context
897
- recent_context = get_last_messages()
898
-
899
- # Update system prompt before each call with history context
900
- if devduck.agent:
901
- # Rebuild system prompt with history
902
- own_code = get_own_source_code()
903
- session_id = f"devduck-{datetime.now().strftime('%Y-%m-%d')}"
904
-
905
- devduck.agent.system_prompt = f"""🦆 You are DevDuck - an extreme minimalist, self-adapting agent.
906
-
907
- Environment: {devduck.env_info['os']} {devduck.env_info['arch']}
908
- Python: {devduck.env_info['python']}
909
- Model: {devduck.model}
910
- Hostname: {devduck.env_info['hostname']}
911
- Session ID: {session_id}
912
-
913
- You are:
914
- - Minimalist: Brief, direct responses
915
- - Self-healing: Adapt when things break
916
- - Efficient: Get things done fast
917
- - Pragmatic: Use what works
918
-
919
- Current working directory: {devduck.env_info['cwd']}
920
-
921
- {recent_context}
922
-
923
- ## Your Own Implementation:
924
- You have full access to your own source code for self-awareness and self-modification:
925
-
926
- {own_code}
927
-
928
- ## Hot Reload System Active:
929
- - **Instant Tool Creation** - Save any .py file in `./tools/` and it becomes immediately available
930
- - **No Restart Needed** - Tools are auto-loaded and ready to use instantly
931
- - **Live Development** - Modify existing tools while running and test immediately
932
- - **Full Python Access** - Create any Python functionality as a tool
933
-
934
- ## System Prompt Management:
935
- - Use system_prompt(action='get') to view current prompt
936
- - Use system_prompt(action='set', prompt='new text') to update
937
- - Changes persist in SYSTEM_PROMPT environment variable
938
-
939
- ## Shell Commands:
940
- - Prefix with ! to execute shell commands directly
941
- - Example: ! ls -la (lists files)
942
- - Example: ! pwd (shows current directory)
943
-
944
- **Response Format:**
945
- - Tool calls: **MAXIMUM PARALLELISM - ALWAYS**
946
- - Communication: **MINIMAL WORDS**
947
- - Efficiency: **Speed is paramount**
948
-
949
- {os.getenv('SYSTEM_PROMPT', '')}"""
950
-
951
- # Update model if MODEL_PROVIDER changed
952
- model_provider = os.getenv("MODEL_PROVIDER")
953
- if model_provider:
954
- try:
955
- from strands_tools.utils.models.model import create_model
956
- devduck.agent.model = create_model(provider=model_provider)
957
- except Exception as e:
958
- print(f"🦆 Model update error: {e}")
959
-
960
1836
  # Execute the agent with user input
961
1837
  result = ask(q)
962
-
1838
+
963
1839
  # Append to shell history
964
1840
  append_to_shell_history(q, str(result))
965
1841
 
966
1842
  except KeyboardInterrupt:
967
- print("\n🦆 Interrupted. Type 'exit' to quit.")
1843
+ current_time = time.time()
1844
+
1845
+ # Check if this is a consecutive interrupt within 2 seconds
1846
+ if current_time - last_interrupt < 2:
1847
+ interrupt_count += 1
1848
+ if interrupt_count >= 2:
1849
+ print("\n🦆 Exiting...")
1850
+ break
1851
+ else:
1852
+ print("\n🦆 Interrupted. Press Ctrl+C again to exit.")
1853
+ else:
1854
+ interrupt_count = 1
1855
+ print("\n🦆 Interrupted. Press Ctrl+C again to exit.")
1856
+
1857
+ last_interrupt = current_time
968
1858
  continue
969
1859
  except Exception as e:
970
1860
  print(f"🦆 Error: {e}")
@@ -973,8 +1863,74 @@ You have full access to your own source code for self-awareness and self-modific
973
1863
 
974
1864
  def cli():
975
1865
  """CLI entry point for pip-installed devduck command"""
976
- if len(sys.argv) > 1:
977
- query = " ".join(sys.argv[1:])
1866
+ import argparse
1867
+
1868
+ parser = argparse.ArgumentParser(
1869
+ description="🦆 DevDuck - Extreme minimalist self-adapting agent",
1870
+ formatter_class=argparse.RawDescriptionHelpFormatter,
1871
+ epilog="""
1872
+ Examples:
1873
+ devduck # Start interactive mode
1874
+ devduck "your query here" # One-shot query
1875
+ devduck --mcp # MCP stdio mode (for Claude Desktop)
1876
+
1877
+ Tool Configuration:
1878
+ export DEVDUCK_TOOLS="strands_tools:shell,editor:strands_fun_tools:clipboard"
1879
+
1880
+ Claude Desktop Config:
1881
+ {
1882
+ "mcpServers": {
1883
+ "devduck": {
1884
+ "command": "uvx",
1885
+ "args": ["devduck", "--mcp"]
1886
+ }
1887
+ }
1888
+ }
1889
+ """,
1890
+ )
1891
+
1892
+ # Query argument
1893
+ parser.add_argument("query", nargs="*", help="Query to send to the agent")
1894
+
1895
+ # MCP stdio mode flag
1896
+ parser.add_argument(
1897
+ "--mcp",
1898
+ action="store_true",
1899
+ help="Start MCP server in stdio mode (for Claude Desktop integration)",
1900
+ )
1901
+
1902
+ args = parser.parse_args()
1903
+
1904
+ logger.info("CLI mode started")
1905
+
1906
+ # Handle --mcp flag for stdio mode
1907
+ if args.mcp:
1908
+ logger.info("Starting MCP server in stdio mode (blocking, foreground)")
1909
+ print("🦆 Starting MCP stdio server...", file=sys.stderr)
1910
+
1911
+ # Don't auto-start HTTP/TCP/WS servers for stdio mode
1912
+ if devduck.agent:
1913
+ try:
1914
+ # Start MCP server in stdio mode - this BLOCKS until terminated
1915
+ devduck.agent.tool.mcp_server(
1916
+ action="start",
1917
+ transport="stdio",
1918
+ expose_agent=True,
1919
+ agent=devduck.agent,
1920
+ record_direct_tool_call=False,
1921
+ )
1922
+ except Exception as e:
1923
+ logger.error(f"Failed to start MCP stdio server: {e}")
1924
+ print(f"🦆 Error: {e}", file=sys.stderr)
1925
+ sys.exit(1)
1926
+ else:
1927
+ print("🦆 Agent not available", file=sys.stderr)
1928
+ sys.exit(1)
1929
+ return
1930
+
1931
+ if args.query:
1932
+ query = " ".join(args.query)
1933
+ logger.info(f"CLI query: {query}")
978
1934
  result = ask(query)
979
1935
  print(result)
980
1936
  else: