devduck 0.1.1766644714__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of devduck might be problematic. Click here for more details.

@@ -1,320 +0,0 @@
1
- """AgentCore Logs Tool - View CloudWatch logs from deployed DevDuck instances."""
2
-
3
- from datetime import datetime, timedelta
4
- from typing import Any, Dict, Optional
5
- from strands import tool
6
-
7
-
8
- @tool
9
- def agentcore_logs(
10
- action: str = "recent",
11
- agent_name: str = "devduck",
12
- limit: int = 50,
13
- start_time: Optional[str] = None,
14
- end_time: Optional[str] = None,
15
- filter_pattern: Optional[str] = None,
16
- log_stream_name: Optional[str] = None,
17
- endpoint: str = "DEFAULT",
18
- region: str = "us-west-2",
19
- ) -> Dict[str, Any]:
20
- """View CloudWatch logs from deployed DevDuck instances on AgentCore.
21
-
22
- Args:
23
- action: Log operation (recent, streams, search, tail)
24
- agent_name: Name of deployed agent (default: devduck)
25
- limit: Max events/streams to return (default: 50)
26
- start_time: Start time in ISO format (default: last hour)
27
- end_time: End time in ISO format (default: now)
28
- filter_pattern: CloudWatch filter pattern for search
29
- log_stream_name: Specific stream to tail
30
- endpoint: Endpoint qualifier (default: DEFAULT)
31
- region: AWS region (default: us-west-2)
32
-
33
- Returns:
34
- Dict with status and log content
35
-
36
- Examples:
37
- # Recent logs
38
- agentcore_logs(agent_name="devduck")
39
-
40
- # Search for errors
41
- agentcore_logs(
42
- action="search",
43
- filter_pattern="ERROR"
44
- )
45
-
46
- # List streams
47
- agentcore_logs(action="streams")
48
-
49
- # Tail specific stream
50
- agentcore_logs(
51
- action="tail",
52
- log_stream_name="2025/11/16/[runtime-logs]session-abc"
53
- )
54
- """
55
- try:
56
- import boto3
57
- import yaml
58
- from pathlib import Path
59
- from botocore.exceptions import ClientError
60
-
61
- # Load config to get agent ID
62
- devduck_dir = Path(__file__).parent.parent
63
- config_path = devduck_dir / ".bedrock_agentcore.yaml"
64
-
65
- if not config_path.exists():
66
- return {
67
- "status": "error",
68
- "content": [
69
- {"text": "Agent not configured. Run agentcore_launch() first."}
70
- ],
71
- }
72
-
73
- with open(config_path) as f:
74
- config = yaml.safe_load(f)
75
-
76
- # Get agent ID
77
- if "agents" not in config or agent_name not in config["agents"]:
78
- return {
79
- "status": "error",
80
- "content": [{"text": f"Agent '{agent_name}' not found in config"}],
81
- }
82
-
83
- agent_id = (
84
- config["agents"][agent_name].get("bedrock_agentcore", {}).get("agent_id")
85
- )
86
-
87
- if not agent_id:
88
- return {
89
- "status": "error",
90
- "content": [
91
- {
92
- "text": f"Agent '{agent_name}' not deployed. Run agentcore_launch()."
93
- }
94
- ],
95
- }
96
-
97
- # CloudWatch client
98
- logs_client = boto3.client("logs", region_name=region)
99
-
100
- # Build log group name
101
- log_group_name = f"/aws/bedrock-agentcore/runtimes/{agent_id}-{endpoint}"
102
-
103
- # Route to appropriate handler
104
- if action == "recent":
105
- return _get_recent_logs(
106
- logs_client, log_group_name, limit, start_time, end_time, filter_pattern
107
- )
108
- elif action == "streams":
109
- return _list_log_streams(logs_client, log_group_name, limit)
110
- elif action == "search":
111
- if not filter_pattern:
112
- return {
113
- "status": "error",
114
- "content": [{"text": "filter_pattern required for search"}],
115
- }
116
- return _search_logs(
117
- logs_client, log_group_name, filter_pattern, limit, start_time, end_time
118
- )
119
- elif action == "tail":
120
- if not log_stream_name:
121
- log_stream_name = _get_latest_stream(logs_client, log_group_name)
122
- if not log_stream_name:
123
- return {
124
- "status": "error",
125
- "content": [{"text": "No log streams found"}],
126
- }
127
- return _tail_logs(logs_client, log_group_name, log_stream_name, limit)
128
- else:
129
- return {
130
- "status": "error",
131
- "content": [{"text": f"Unknown action: {action}"}],
132
- }
133
-
134
- except ClientError as e:
135
- error_code = e.response["Error"]["Code"]
136
- if error_code == "ResourceNotFoundException":
137
- return {
138
- "status": "error",
139
- "content": [
140
- {"text": f"Log group not found: {log_group_name}"},
141
- {"text": "Agent may not be deployed or hasn't logged yet"},
142
- ],
143
- }
144
- return {"status": "error", "content": [{"text": f"AWS Error: {str(e)}"}]}
145
- except Exception as e:
146
- return {"status": "error", "content": [{"text": f"Error: {str(e)}"}]}
147
-
148
-
149
- def _get_recent_logs(
150
- client,
151
- log_group_name: str,
152
- limit: int,
153
- start_time: Optional[str],
154
- end_time: Optional[str],
155
- filter_pattern: Optional[str],
156
- ) -> Dict[str, Any]:
157
- """Get recent log events."""
158
- params = {
159
- "logGroupName": log_group_name,
160
- "limit": limit,
161
- "interleaved": True,
162
- }
163
-
164
- if start_time:
165
- start_dt = datetime.fromisoformat(start_time.replace("Z", "+00:00"))
166
- params["startTime"] = int(start_dt.timestamp() * 1000)
167
- else:
168
- # Default: last 5 minutes for truly recent logs
169
- params["startTime"] = int(
170
- (datetime.now() - timedelta(minutes=5)).timestamp() * 1000
171
- )
172
-
173
- if end_time:
174
- end_dt = datetime.fromisoformat(end_time.replace("Z", "+00:00"))
175
- params["endTime"] = int(end_dt.timestamp() * 1000)
176
-
177
- if filter_pattern:
178
- params["filterPattern"] = filter_pattern
179
-
180
- response = client.filter_log_events(**params)
181
- events = response.get("events", [])
182
-
183
- if not events:
184
- return {
185
- "status": "success",
186
- "content": [{"text": "No log events found in last 5 minutes"}],
187
- }
188
-
189
- # Sort by timestamp descending (most recent first)
190
- events.sort(key=lambda e: e["timestamp"], reverse=True)
191
-
192
- # Format logs
193
- log_lines = []
194
- for event in events:
195
- timestamp = datetime.fromtimestamp(event["timestamp"] / 1000).strftime(
196
- "%Y-%m-%d %H:%M:%S"
197
- )
198
- message = event["message"].rstrip()
199
- log_lines.append(f"[{timestamp}] {message}")
200
-
201
- return {
202
- "status": "success",
203
- "content": [
204
- {"text": f"Found {len(events)} log events:\n"},
205
- {"text": "\n".join(log_lines)},
206
- ],
207
- }
208
-
209
-
210
- def _list_log_streams(client, log_group_name: str, limit: int) -> Dict[str, Any]:
211
- """List log streams."""
212
- response = client.describe_log_streams(
213
- logGroupName=log_group_name,
214
- orderBy="LastEventTime",
215
- descending=True,
216
- limit=limit,
217
- )
218
-
219
- streams = response.get("logStreams", [])
220
-
221
- if not streams:
222
- return {"status": "success", "content": [{"text": "No log streams found"}]}
223
-
224
- stream_lines = [f"Found {len(streams)} log streams:\n"]
225
- for stream in streams:
226
- stream_name = stream["logStreamName"]
227
- last_event = datetime.fromtimestamp(
228
- stream.get("lastEventTimestamp", 0) / 1000
229
- ).strftime("%Y-%m-%d %H:%M:%S")
230
- stream_lines.append(f"• {stream_name} (last: {last_event})")
231
-
232
- return {"status": "success", "content": [{"text": "\n".join(stream_lines)}]}
233
-
234
-
235
- def _search_logs(
236
- client,
237
- log_group_name: str,
238
- filter_pattern: str,
239
- limit: int,
240
- start_time: Optional[str],
241
- end_time: Optional[str],
242
- ) -> Dict[str, Any]:
243
- """Search logs with pattern."""
244
- params = {
245
- "logGroupName": log_group_name,
246
- "filterPattern": filter_pattern,
247
- "limit": limit,
248
- "interleaved": True,
249
- }
250
-
251
- if start_time:
252
- start_dt = datetime.fromisoformat(start_time.replace("Z", "+00:00"))
253
- params["startTime"] = int(start_dt.timestamp() * 1000)
254
- else:
255
- params["startTime"] = int(
256
- (datetime.now() - timedelta(hours=24)).timestamp() * 1000
257
- )
258
-
259
- if end_time:
260
- end_dt = datetime.fromisoformat(end_time.replace("Z", "+00:00"))
261
- params["endTime"] = int(end_dt.timestamp() * 1000)
262
-
263
- response = client.filter_log_events(**params)
264
- events = response.get("events", [])
265
-
266
- if not events:
267
- return {
268
- "status": "success",
269
- "content": [{"text": f"No matches for pattern: {filter_pattern}"}],
270
- }
271
-
272
- log_lines = [f"Found {len(events)} matches:\n"]
273
- for event in events:
274
- timestamp = datetime.fromtimestamp(event["timestamp"] / 1000).strftime(
275
- "%Y-%m-%d %H:%M:%S"
276
- )
277
- message = event["message"].rstrip()
278
- log_lines.append(f"[{timestamp}] {message}")
279
-
280
- return {"status": "success", "content": [{"text": "\n".join(log_lines)}]}
281
-
282
-
283
- def _tail_logs(
284
- client, log_group_name: str, log_stream_name: str, limit: int
285
- ) -> Dict[str, Any]:
286
- """Tail specific log stream."""
287
- response = client.get_log_events(
288
- logGroupName=log_group_name,
289
- logStreamName=log_stream_name,
290
- limit=limit,
291
- startFromHead=False,
292
- )
293
-
294
- events = response.get("events", [])
295
-
296
- if not events:
297
- return {
298
- "status": "success",
299
- "content": [{"text": f"No events in stream: {log_stream_name}"}],
300
- }
301
-
302
- log_lines = [f"Latest {len(events)} events:\n"]
303
- for event in events:
304
- timestamp = datetime.fromtimestamp(event["timestamp"] / 1000).strftime(
305
- "%Y-%m-%d %H:%M:%S"
306
- )
307
- message = event["message"].rstrip()
308
- log_lines.append(f"[{timestamp}] {message}")
309
-
310
- return {"status": "success", "content": [{"text": "\n".join(log_lines)}]}
311
-
312
-
313
- def _get_latest_stream(client, log_group_name: str) -> Optional[str]:
314
- """Get latest log stream."""
315
- response = client.describe_log_streams(
316
- logGroupName=log_group_name, orderBy="LastEventTime", descending=True, limit=1
317
- )
318
-
319
- streams = response.get("logStreams", [])
320
- return streams[0]["logStreamName"] if streams else None
devduck/tools/ambient.py DELETED
@@ -1,157 +0,0 @@
1
- """
2
- Ambient input overlay control tool - integrated with devduck
3
- """
4
-
5
- from strands import tool
6
- from typing import Dict, Any
7
- import subprocess
8
- import socket
9
- import json
10
- import tempfile
11
- import os
12
- import time
13
- import signal
14
- import sys
15
- from pathlib import Path
16
-
17
- # Global state
18
- _ambient_process = None
19
-
20
-
21
- def _send_command(command: Dict) -> Dict:
22
- """Send command to ambient input overlay"""
23
- socket_path = os.path.join(tempfile.gettempdir(), "devduck_ambient.sock")
24
-
25
- try:
26
- client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
27
- client.settimeout(2.0)
28
- client.connect(socket_path)
29
- client.sendall(json.dumps(command).encode("utf-8"))
30
-
31
- response_data = client.recv(4096)
32
- client.close()
33
-
34
- if not response_data:
35
- return {"status": "error", "message": "Empty response"}
36
-
37
- return json.loads(response_data.decode("utf-8"))
38
- except socket.timeout:
39
- return {"status": "error", "message": "Timeout"}
40
- except Exception as e:
41
- return {"status": "error", "message": str(e)}
42
-
43
-
44
- @tool
45
- def ambient(
46
- action: str,
47
- text: str = None,
48
- ) -> Dict[str, Any]:
49
- """Control ambient AI input overlay.
50
-
51
- Args:
52
- action: Action to perform
53
- - "start": Start ambient overlay
54
- - "stop": Stop overlay
55
- - "show": Show overlay
56
- - "hide": Hide overlay
57
- - "status": Check if running
58
- - "set_text": Pre-fill text
59
- text: Text to pre-fill (for set_text action)
60
-
61
- Returns:
62
- Dict with status and content
63
-
64
- Features:
65
- 🎨 Modern glassmorphism UI
66
- ⚡ Blinking cursor with auto-focus
67
- 🌊 Real-time IPC streaming from devduck
68
- 📦 Structured message handling
69
- ⌨️ ESC to hide, Enter to send
70
- """
71
- global _ambient_process
72
-
73
- if action == "start":
74
- if _ambient_process and _ambient_process.poll() is None:
75
- return {"status": "success", "content": [{"text": "✓ Already running"}]}
76
-
77
- # Get ambient script path (in same directory as this file)
78
- tools_dir = Path(__file__).parent
79
- ambient_script = tools_dir / "_ambient_input.py"
80
-
81
- if not ambient_script.exists():
82
- return {
83
- "status": "error",
84
- "content": [{"text": f"❌ Ambient script not found: {ambient_script}"}],
85
- }
86
-
87
- _ambient_process = subprocess.Popen(
88
- [sys.executable, str(ambient_script)],
89
- stdout=subprocess.DEVNULL,
90
- stderr=subprocess.DEVNULL,
91
- )
92
-
93
- time.sleep(1.5)
94
-
95
- return {
96
- "status": "success",
97
- "content": [
98
- {"text": f"✓ Ambient overlay started (PID: {_ambient_process.pid})"}
99
- ],
100
- }
101
-
102
- elif action == "stop":
103
- if _ambient_process:
104
- try:
105
- os.kill(_ambient_process.pid, signal.SIGTERM)
106
- _ambient_process.wait(timeout=3)
107
- except:
108
- pass
109
- _ambient_process = None
110
-
111
- return {"status": "success", "content": [{"text": "✓ Stopped"}]}
112
-
113
- elif action == "status":
114
- is_running = _ambient_process and _ambient_process.poll() is None
115
- return {"status": "success", "content": [{"text": f"Running: {is_running}"}]}
116
-
117
- elif action == "show":
118
- result = _send_command({"action": "show"})
119
- if result.get("status") == "success":
120
- return {"status": "success", "content": [{"text": "✓ Overlay shown"}]}
121
- else:
122
- return {
123
- "status": "error",
124
- "content": [
125
- {"text": f"Failed: {result.get('message', 'Unknown error')}"}
126
- ],
127
- }
128
-
129
- elif action == "hide":
130
- result = _send_command({"action": "hide"})
131
- if result.get("status") == "success":
132
- return {"status": "success", "content": [{"text": "✓ Overlay hidden"}]}
133
- else:
134
- return {
135
- "status": "error",
136
- "content": [
137
- {"text": f"Failed: {result.get('message', 'Unknown error')}"}
138
- ],
139
- }
140
-
141
- elif action == "set_text":
142
- if not text:
143
- return {"status": "error", "content": [{"text": "text parameter required"}]}
144
-
145
- result = _send_command({"action": "set_text", "text": text})
146
- if result.get("status") == "success":
147
- return {"status": "success", "content": [{"text": f"✓ Text set: {text}"}]}
148
- else:
149
- return {
150
- "status": "error",
151
- "content": [
152
- {"text": f"Failed: {result.get('message', 'Unknown error')}"}
153
- ],
154
- }
155
-
156
- else:
157
- return {"status": "error", "content": [{"text": f"Unknown action: {action}"}]}
@@ -1,201 +0,0 @@
1
- """GitHub Tool Fetcher for Strands Agent.
2
-
3
- This tool fetches Python tool files from GitHub repositories and loads them
4
- as available tools in the current Strands agent. It combines HTTP fetching
5
- with the load_tool functionality to enable dynamic tool loading from remote
6
- GitHub repositories.
7
-
8
- Usage with Strands Agents:
9
- python
10
- from strands import Agent
11
- from tools.fetch_github_tool import fetch_github_tool
12
-
13
- agent = Agent(tools=[fetch_github_tool])
14
-
15
- # Fetch and load a tool from GitHub
16
- agent.tool.fetch_github_tool(
17
- github_url="https://github.com/owner/repo/blob/main/tools/my_tool.py",
18
- tool_name="my_tool"
19
- )
20
-
21
- # Now you can use the fetched tool
22
- agent.tool.my_tool(param1="value")
23
-
24
-
25
- Supported GitHub URL formats:
26
- - https://github.com/owner/repo/blob/branch/path/to/file.py
27
- - https://github.com/owner/repo/tree/branch/path/to/file.py
28
- - https://raw.githubusercontent.com/owner/repo/branch/path/to/file.py
29
- """
30
-
31
- import os
32
- import re
33
- from pathlib import Path
34
- from typing import Any
35
-
36
- import requests
37
- from strands import tool
38
-
39
-
40
- def parse_github_url(github_url: str) -> dict[str, str]:
41
- """Parse GitHub URL to extract repository information.
42
-
43
- Args:
44
- github_url: GitHub URL to the file
45
-
46
- Returns:
47
- Dictionary with owner, repo, branch, and file_path
48
-
49
- Raises:
50
- ValueError: If URL format is not supported
51
- """
52
- # Handle raw.githubusercontent.com URLs
53
- raw_pattern = r"https://raw\.githubusercontent\.com/([^/]+)/([^/]+)/([^/]+)/(.+)"
54
- raw_match = re.match(raw_pattern, github_url)
55
-
56
- if raw_match:
57
- owner, repo, branch, file_path = raw_match.groups()
58
- return {"owner": owner, "repo": repo, "branch": branch, "file_path": file_path}
59
-
60
- # Handle github.com/owner/repo/blob/branch/path URLs
61
- blob_pattern = r"https://github\.com/([^/]+)/([^/]+)/(?:blob|tree)/([^/]+)/(.+)"
62
- blob_match = re.match(blob_pattern, github_url)
63
-
64
- if blob_match:
65
- owner, repo, branch, file_path = blob_match.groups()
66
- return {"owner": owner, "repo": repo, "branch": branch, "file_path": file_path}
67
-
68
- raise ValueError(f"Unsupported GitHub URL format: {github_url}")
69
-
70
-
71
- def build_raw_url(owner: str, repo: str, branch: str, file_path: str) -> str:
72
- """Build GitHub raw content URL.
73
-
74
- Args:
75
- owner: Repository owner
76
- repo: Repository name
77
- branch: Branch name
78
- file_path: Path to file in repository
79
-
80
- Returns:
81
- Raw GitHub URL for the file
82
- """
83
- return f"https://raw.githubusercontent.com/{owner}/{repo}/{branch}/{file_path}"
84
-
85
-
86
- @tool
87
- def fetch_github_tool(
88
- github_url: str,
89
- tool_name: str | None = None,
90
- local_dir: str = "./github_tools",
91
- agent: Any = None,
92
- ) -> dict[str, Any]:
93
- """Fetch a Python tool file from GitHub and load it as a Strands tool.
94
-
95
- This tool downloads Python files from GitHub repositories, saves them locally,
96
- and registers them as available tools in the current Strands agent. It supports
97
- various GitHub URL formats and automatically handles the conversion to raw content URLs.
98
-
99
- Args:
100
- github_url: GitHub URL to the Python tool file. Supports formats like:
101
- - https://github.com/owner/repo/blob/main/tools/my_tool.py
102
- - https://github.com/owner/repo/tree/main/tools/my_tool.py
103
- - https://raw.githubusercontent.com/owner/repo/main/tools/my_tool.py
104
- tool_name: Name to register the tool under. If not provided, will extract
105
- from the filename (e.g., "my_tool.py" becomes "my_tool")
106
- local_dir: Local directory to save the fetched tool file. Defaults to "./github_tools"
107
- agent: Agent instance (automatically provided by Strands)
108
-
109
- Returns:
110
- Dict containing status and response content:
111
- {
112
- "status": "success|error",
113
- "content": [{"text": "Response message"}]
114
- }
115
-
116
- Examples:
117
- # Fetch a tool from GitHub and load it
118
- agent.tool.fetch_github_tool(
119
- github_url="https://github.com/cagataycali/my-tools/blob/main/weather_tool.py",
120
- tool_name="weather"
121
- )
122
-
123
- # Tool name can be auto-detected from filename
124
- agent.tool.fetch_github_tool(
125
- github_url="https://github.com/cagataycali/my-tools/blob/main/calculator.py"
126
- )
127
- """
128
- try:
129
- # Parse the GitHub URL
130
- try:
131
- url_info = parse_github_url(github_url)
132
- except ValueError as e:
133
- return {"status": "error", "content": [{"text": f"❌ {e!s}"}]}
134
-
135
- # Extract tool name from filename if not provided
136
- if not tool_name:
137
- filename = os.path.basename(url_info["file_path"])
138
- tool_name = os.path.splitext(filename)[0]
139
-
140
- # Check if it's a Python file first (before making HTTP request)
141
- if not url_info["file_path"].endswith(".py"):
142
- return {
143
- "status": "error",
144
- "content": [
145
- {
146
- "text": f"❌ File must be a Python file (.py), got: {url_info['file_path']}"
147
- }
148
- ],
149
- }
150
-
151
- # Build raw GitHub URL
152
- raw_url = build_raw_url(
153
- url_info["owner"],
154
- url_info["repo"],
155
- url_info["branch"],
156
- url_info["file_path"],
157
- )
158
-
159
- # Create local directory if it doesn't exist
160
- local_path = Path(local_dir)
161
- local_path.mkdir(parents=True, exist_ok=True)
162
-
163
- # Download the file
164
- response = requests.get(raw_url, timeout=30)
165
- response.raise_for_status()
166
- # Save the file locally
167
- local_file_path = local_path / f"{tool_name}.py"
168
- with open(local_file_path, "w", encoding="utf-8") as f:
169
- f.write(response.text)
170
-
171
- # Load the tool using load_tool functionality
172
- if agent and hasattr(agent, "tool_registry"):
173
- agent.tool_registry.load_tool_from_filepath(
174
- tool_name=tool_name, tool_path=str(local_file_path)
175
- )
176
-
177
- success_message = f"""✅ Successfully fetched and loaded GitHub tool!
178
-
179
- 📂 **Source:** {github_url}
180
- 🏷️ **Tool Name:** {tool_name}
181
- 💾 **Local Path:** {local_file_path}
182
- 🔧 **Status:** Ready to use
183
-
184
- You can now use the tool with: agent.tool.{tool_name}(...)"""
185
-
186
- return {"status": "success", "content": [{"text": success_message}]}
187
- else:
188
- return {
189
- "status": "error",
190
- "content": [
191
- {"text": "❌ Agent instance not available for tool registration"}
192
- ],
193
- }
194
-
195
- except requests.RequestException as e:
196
- return {
197
- "status": "error",
198
- "content": [{"text": f"❌ Failed to download file from GitHub: {e!s}"}],
199
- }
200
- except Exception as e:
201
- return {"status": "error", "content": [{"text": f"❌ Unexpected error: {e!s}"}]}