devduck 0.1.0__py3-none-any.whl → 0.1.1766644714__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of devduck might be problematic. Click here for more details.
- devduck/__init__.py +1439 -483
- devduck/__main__.py +7 -0
- devduck/_version.py +34 -0
- devduck/agentcore_handler.py +76 -0
- devduck/test_redduck.py +0 -1
- devduck/tools/__init__.py +47 -0
- devduck/tools/_ambient_input.py +423 -0
- devduck/tools/_tray_app.py +530 -0
- devduck/tools/agentcore_agents.py +197 -0
- devduck/tools/agentcore_config.py +441 -0
- devduck/tools/agentcore_invoke.py +423 -0
- devduck/tools/agentcore_logs.py +320 -0
- devduck/tools/ambient.py +157 -0
- devduck/tools/create_subagent.py +659 -0
- devduck/tools/fetch_github_tool.py +201 -0
- devduck/tools/install_tools.py +409 -0
- devduck/tools/ipc.py +546 -0
- devduck/tools/mcp_server.py +600 -0
- devduck/tools/scraper.py +935 -0
- devduck/tools/speech_to_speech.py +850 -0
- devduck/tools/state_manager.py +292 -0
- devduck/tools/store_in_kb.py +187 -0
- devduck/tools/system_prompt.py +608 -0
- devduck/tools/tcp.py +263 -94
- devduck/tools/tray.py +247 -0
- devduck/tools/use_github.py +438 -0
- devduck/tools/websocket.py +498 -0
- devduck-0.1.1766644714.dist-info/METADATA +717 -0
- devduck-0.1.1766644714.dist-info/RECORD +33 -0
- {devduck-0.1.0.dist-info → devduck-0.1.1766644714.dist-info}/entry_points.txt +1 -0
- devduck-0.1.1766644714.dist-info/licenses/LICENSE +201 -0
- devduck/install.sh +0 -42
- devduck-0.1.0.dist-info/METADATA +0 -106
- devduck-0.1.0.dist-info/RECORD +0 -11
- devduck-0.1.0.dist-info/licenses/LICENSE +0 -21
- {devduck-0.1.0.dist-info → devduck-0.1.1766644714.dist-info}/WHEEL +0 -0
- {devduck-0.1.0.dist-info → devduck-0.1.1766644714.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,292 @@
|
|
|
1
|
+
"""DevDuck State Manager - Time-travel for agent conversations"""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import tempfile
|
|
5
|
+
import dill
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
from typing import Dict, Any
|
|
9
|
+
from strands import tool
|
|
10
|
+
|
|
11
|
+
base_dir = Path(os.getenv("DEVDUCK_HOME", tempfile.gettempdir()))
|
|
12
|
+
states_dir = base_dir / ".devduck" / "states"
|
|
13
|
+
states_dir.mkdir(parents=True, exist_ok=True)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@tool
|
|
17
|
+
def state_manager(
|
|
18
|
+
action: str,
|
|
19
|
+
state_file: str = None,
|
|
20
|
+
query: str = None,
|
|
21
|
+
metadata: dict = None,
|
|
22
|
+
agent=None, # Parent agent injection
|
|
23
|
+
) -> Dict[str, Any]:
|
|
24
|
+
"""Agent state management with time-travel capabilities.
|
|
25
|
+
|
|
26
|
+
Inspired by cagataycali/research-agent state export pattern.
|
|
27
|
+
|
|
28
|
+
Actions:
|
|
29
|
+
- export: Save current agent state to pkl
|
|
30
|
+
- load: Load and display state from pkl
|
|
31
|
+
- list: List available saved states
|
|
32
|
+
- resume: Load state and continue with new query (ephemeral)
|
|
33
|
+
- modify: Update pkl file metadata
|
|
34
|
+
- delete: Remove saved state
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
action: Operation to perform
|
|
38
|
+
state_file: Path to pkl file (auto-generated for export)
|
|
39
|
+
query: New query for resume action
|
|
40
|
+
metadata: Additional metadata for export/modify
|
|
41
|
+
agent: Parent agent (auto-injected by Strands)
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
Dict with status and content
|
|
45
|
+
|
|
46
|
+
Examples:
|
|
47
|
+
# Save current state
|
|
48
|
+
state_manager(action="export", metadata={"note": "before refactor"})
|
|
49
|
+
|
|
50
|
+
# List saved states
|
|
51
|
+
state_manager(action="list")
|
|
52
|
+
|
|
53
|
+
# Resume from previous state (ephemeral, no mutation)
|
|
54
|
+
state_manager(action="resume", state_file="~/.devduck/states/devduck_20250116_032000.pkl", query="continue analysis")
|
|
55
|
+
|
|
56
|
+
# Modify state metadata
|
|
57
|
+
state_manager(action="modify", state_file="path/to/state.pkl", metadata={"tags": ["important", "refactor"]})
|
|
58
|
+
"""
|
|
59
|
+
try:
|
|
60
|
+
if action == "export":
|
|
61
|
+
# Capture current agent state
|
|
62
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
63
|
+
state_file = states_dir / f"devduck_{timestamp}.pkl"
|
|
64
|
+
|
|
65
|
+
# Safe state extraction (avoid complex objects)
|
|
66
|
+
state_data = {
|
|
67
|
+
"version": "1.0",
|
|
68
|
+
"timestamp": datetime.now().isoformat(),
|
|
69
|
+
"system_prompt": agent.system_prompt,
|
|
70
|
+
"tools": list(agent.tool_names),
|
|
71
|
+
"model": {
|
|
72
|
+
"model_id": getattr(agent.model, "model_id", "unknown"),
|
|
73
|
+
"temperature": getattr(agent.model, "temperature", None),
|
|
74
|
+
"provider": getattr(agent.model, "provider", "unknown"),
|
|
75
|
+
},
|
|
76
|
+
"metadata": metadata or {},
|
|
77
|
+
"environment": {
|
|
78
|
+
"cwd": str(Path.cwd()),
|
|
79
|
+
"devduck_version": "0.6.0",
|
|
80
|
+
},
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
# Try to capture conversation history if available
|
|
84
|
+
if hasattr(agent, "conversation_history"):
|
|
85
|
+
state_data["conversation_history"] = agent.conversation_history
|
|
86
|
+
elif hasattr(agent, "messages"):
|
|
87
|
+
state_data["conversation_history"] = agent.messages
|
|
88
|
+
|
|
89
|
+
# Save with dill
|
|
90
|
+
with open(state_file, "wb") as f:
|
|
91
|
+
dill.dump(state_data, f)
|
|
92
|
+
|
|
93
|
+
size = state_file.stat().st_size
|
|
94
|
+
return {
|
|
95
|
+
"status": "success",
|
|
96
|
+
"content": [
|
|
97
|
+
{
|
|
98
|
+
"text": f"✅ State exported: {state_file}\n"
|
|
99
|
+
f"📦 Size: {size} bytes\n"
|
|
100
|
+
f"🔧 Tools: {len(state_data['tools'])}\n"
|
|
101
|
+
f"📝 Metadata: {metadata or 'none'}"
|
|
102
|
+
}
|
|
103
|
+
],
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
elif action == "list":
|
|
107
|
+
# List all saved states
|
|
108
|
+
states = sorted(
|
|
109
|
+
states_dir.glob("devduck_*.pkl"),
|
|
110
|
+
key=lambda p: p.stat().st_mtime,
|
|
111
|
+
reverse=True,
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
if not states:
|
|
115
|
+
return {
|
|
116
|
+
"status": "success",
|
|
117
|
+
"content": [{"text": "No saved states found"}],
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
output = f"📚 Found {len(states)} saved states:\n\n"
|
|
121
|
+
for i, state_path in enumerate(states[:10], 1): # Show last 10
|
|
122
|
+
try:
|
|
123
|
+
with open(state_path, "rb") as f:
|
|
124
|
+
state_data = dill.load(f)
|
|
125
|
+
|
|
126
|
+
timestamp = state_data.get("timestamp", "unknown")
|
|
127
|
+
tools_count = len(state_data.get("tools", []))
|
|
128
|
+
meta = state_data.get("metadata", {})
|
|
129
|
+
|
|
130
|
+
output += f"{i}. {state_path.name}\n"
|
|
131
|
+
output += f" 📅 {timestamp}\n"
|
|
132
|
+
output += f" 🔧 {tools_count} tools\n"
|
|
133
|
+
if meta:
|
|
134
|
+
output += f" 📝 {meta}\n"
|
|
135
|
+
output += "\n"
|
|
136
|
+
except:
|
|
137
|
+
output += f"{i}. {state_path.name} (corrupted)\n\n"
|
|
138
|
+
|
|
139
|
+
return {"status": "success", "content": [{"text": output}]}
|
|
140
|
+
|
|
141
|
+
elif action == "load":
|
|
142
|
+
# Load and display state
|
|
143
|
+
if not state_file:
|
|
144
|
+
return {
|
|
145
|
+
"status": "error",
|
|
146
|
+
"content": [{"text": "state_file required for load"}],
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
state_path = Path(state_file).expanduser()
|
|
150
|
+
if not state_path.exists():
|
|
151
|
+
return {
|
|
152
|
+
"status": "error",
|
|
153
|
+
"content": [{"text": f"State file not found: {state_path}"}],
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
with open(state_path, "rb") as f:
|
|
157
|
+
state_data = dill.load(f)
|
|
158
|
+
|
|
159
|
+
# Pretty format
|
|
160
|
+
output = f"📦 State: {state_path.name}\n\n"
|
|
161
|
+
output += f"📅 Timestamp: {state_data.get('timestamp')}\n"
|
|
162
|
+
output += f"🤖 Model: {state_data.get('model', {}).get('model_id')}\n"
|
|
163
|
+
output += f"🔧 Tools ({len(state_data.get('tools', []))}): {', '.join(state_data.get('tools', []))}\n"
|
|
164
|
+
output += f"📝 Metadata: {state_data.get('metadata', {})}\n"
|
|
165
|
+
|
|
166
|
+
if "conversation_history" in state_data:
|
|
167
|
+
history = state_data["conversation_history"]
|
|
168
|
+
output += f"\n💬 Conversation: {len(history)} messages\n"
|
|
169
|
+
|
|
170
|
+
return {"status": "success", "content": [{"text": output}]}
|
|
171
|
+
|
|
172
|
+
elif action == "resume":
|
|
173
|
+
# Time-travel: Load state and continue with ephemeral agent
|
|
174
|
+
if not state_file or not query:
|
|
175
|
+
return {
|
|
176
|
+
"status": "error",
|
|
177
|
+
"content": [{"text": "state_file and query required for resume"}],
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
state_path = Path(state_file).expanduser()
|
|
181
|
+
if not state_path.exists():
|
|
182
|
+
return {
|
|
183
|
+
"status": "error",
|
|
184
|
+
"content": [{"text": f"State file not found: {state_path}"}],
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
with open(state_path, "rb") as f:
|
|
188
|
+
state_data = dill.load(f)
|
|
189
|
+
|
|
190
|
+
# ✅ Create ephemeral DevDuck instance (no mutation!)
|
|
191
|
+
try:
|
|
192
|
+
from devduck import DevDuck
|
|
193
|
+
|
|
194
|
+
ephemeral_duck = DevDuck(auto_start_servers=False)
|
|
195
|
+
ephemeral_agent = ephemeral_duck.agent
|
|
196
|
+
except Exception as e:
|
|
197
|
+
return {
|
|
198
|
+
"status": "error",
|
|
199
|
+
"content": [{"text": f"Failed to create ephemeral DevDuck: {e}"}],
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
# Load saved state into ephemeral agent
|
|
203
|
+
ephemeral_agent.system_prompt = state_data["system_prompt"]
|
|
204
|
+
|
|
205
|
+
# Restore conversation history
|
|
206
|
+
if "conversation_history" in state_data:
|
|
207
|
+
saved_history = state_data["conversation_history"]
|
|
208
|
+
|
|
209
|
+
if hasattr(ephemeral_agent, "conversation_history"):
|
|
210
|
+
ephemeral_agent.conversation_history = saved_history
|
|
211
|
+
elif hasattr(ephemeral_agent, "messages"):
|
|
212
|
+
ephemeral_agent.messages = saved_history
|
|
213
|
+
|
|
214
|
+
# Build continuation prompt with context
|
|
215
|
+
continuation_context = f"""
|
|
216
|
+
[Resumed from state: {state_path.name}]
|
|
217
|
+
[Original timestamp: {state_data.get('timestamp')}]
|
|
218
|
+
|
|
219
|
+
{query}
|
|
220
|
+
"""
|
|
221
|
+
# Run ephemeral agent (parent agent unchanged!)
|
|
222
|
+
result = ephemeral_agent(continuation_context)
|
|
223
|
+
|
|
224
|
+
return {
|
|
225
|
+
"status": "success",
|
|
226
|
+
"content": [{"text": f"🔄 Resumed from {state_path.name}\n\n{result}"}],
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
elif action == "modify":
|
|
230
|
+
# Modify state metadata
|
|
231
|
+
if not state_file:
|
|
232
|
+
return {
|
|
233
|
+
"status": "error",
|
|
234
|
+
"content": [{"text": "state_file required for modify"}],
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
state_path = Path(state_file).expanduser()
|
|
238
|
+
if not state_path.exists():
|
|
239
|
+
return {
|
|
240
|
+
"status": "error",
|
|
241
|
+
"content": [{"text": f"State file not found: {state_path}"}],
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
with open(state_path, "rb") as f:
|
|
245
|
+
state_data = dill.load(f)
|
|
246
|
+
|
|
247
|
+
# Update metadata
|
|
248
|
+
if metadata:
|
|
249
|
+
state_data["metadata"].update(metadata)
|
|
250
|
+
|
|
251
|
+
# Save back
|
|
252
|
+
with open(state_path, "wb") as f:
|
|
253
|
+
dill.dump(state_data, f)
|
|
254
|
+
|
|
255
|
+
return {
|
|
256
|
+
"status": "success",
|
|
257
|
+
"content": [
|
|
258
|
+
{
|
|
259
|
+
"text": f"✅ Modified {state_path.name}\n📝 New metadata: {state_data['metadata']}"
|
|
260
|
+
}
|
|
261
|
+
],
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
elif action == "delete":
|
|
265
|
+
# Delete saved state
|
|
266
|
+
if not state_file:
|
|
267
|
+
return {
|
|
268
|
+
"status": "error",
|
|
269
|
+
"content": [{"text": "state_file required for delete"}],
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
state_path = Path(state_file).expanduser()
|
|
273
|
+
if not state_path.exists():
|
|
274
|
+
return {
|
|
275
|
+
"status": "error",
|
|
276
|
+
"content": [{"text": f"State file not found: {state_path}"}],
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
state_path.unlink()
|
|
280
|
+
return {
|
|
281
|
+
"status": "success",
|
|
282
|
+
"content": [{"text": f"🗑️ Deleted {state_path.name}"}],
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
else:
|
|
286
|
+
return {
|
|
287
|
+
"status": "error",
|
|
288
|
+
"content": [{"text": f"Unknown action: {action}"}],
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
except Exception as e:
|
|
292
|
+
return {"status": "error", "content": [{"text": f"Error: {e}"}]}
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
"""Tool for storing data in Bedrock Knowledge Base asynchronously."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
import os
|
|
6
|
+
import threading
|
|
7
|
+
import time
|
|
8
|
+
import uuid
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
import boto3
|
|
12
|
+
from strands import tool
|
|
13
|
+
|
|
14
|
+
# Set up logging
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _store_in_kb_background(
|
|
19
|
+
content: str, title: str, kb_id: str, region_name: str
|
|
20
|
+
) -> None:
|
|
21
|
+
"""Background worker function that performs the actual KB storage.
|
|
22
|
+
|
|
23
|
+
This runs in a separate thread and handles all the KB operations.
|
|
24
|
+
Whole validation is done in the main thread before calling this function.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
content: The text content to store
|
|
28
|
+
title: The title for the content
|
|
29
|
+
kb_id: The knowledge base ID
|
|
30
|
+
region_name: The AWS region to use
|
|
31
|
+
"""
|
|
32
|
+
try:
|
|
33
|
+
# Generate document ID with timestamp for traceability
|
|
34
|
+
timestamp = time.strftime("%Y%m%d_%H%M%S")
|
|
35
|
+
doc_id = f"memory_{timestamp}_{str(uuid.uuid4())[:8]}"
|
|
36
|
+
|
|
37
|
+
# Package content with title for better organization
|
|
38
|
+
content_with_metadata = {
|
|
39
|
+
"title": title,
|
|
40
|
+
"action": "store",
|
|
41
|
+
"content": content,
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
# Initialize Bedrock agent client
|
|
45
|
+
bedrock_agent_client = boto3.client("bedrock-agent", region_name=region_name)
|
|
46
|
+
|
|
47
|
+
# Get the data source ID associated with the knowledge base
|
|
48
|
+
data_sources = bedrock_agent_client.list_data_sources(knowledgeBaseId=kb_id)
|
|
49
|
+
|
|
50
|
+
if not data_sources.get("dataSourceSummaries"):
|
|
51
|
+
logger.error(
|
|
52
|
+
f"No data sources found for knowledge base {kb_id}, region {region_name}."
|
|
53
|
+
)
|
|
54
|
+
return
|
|
55
|
+
|
|
56
|
+
# Look for a CUSTOM data source type first, as it's required for inline content ingestion
|
|
57
|
+
data_source_id = None
|
|
58
|
+
source_type = None
|
|
59
|
+
|
|
60
|
+
for ds in data_sources["dataSourceSummaries"]:
|
|
61
|
+
# Get the data source details to check its type
|
|
62
|
+
ds_detail = bedrock_agent_client.get_data_source(
|
|
63
|
+
knowledgeBaseId=kb_id, dataSourceId=ds["dataSourceId"]
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
# Check if this is a CUSTOM type data source
|
|
67
|
+
if ds_detail["dataSource"]["dataSourceConfiguration"]["type"] == "CUSTOM":
|
|
68
|
+
data_source_id = ds["dataSourceId"]
|
|
69
|
+
source_type = "CUSTOM"
|
|
70
|
+
logger.debug(f"Found CUSTOM data source: {data_source_id}")
|
|
71
|
+
break
|
|
72
|
+
|
|
73
|
+
# If no CUSTOM data source found, use the first available one but log a warning
|
|
74
|
+
if not data_source_id and data_sources["dataSourceSummaries"]:
|
|
75
|
+
data_source_id = data_sources["dataSourceSummaries"][0]["dataSourceId"]
|
|
76
|
+
ds_detail = bedrock_agent_client.get_data_source(
|
|
77
|
+
knowledgeBaseId=kb_id, dataSourceId=data_source_id
|
|
78
|
+
)
|
|
79
|
+
source_type = ds_detail["dataSource"]["dataSourceConfiguration"]["type"]
|
|
80
|
+
logger.debug(
|
|
81
|
+
f"No CUSTOM data source found. Using {source_type} data source: {data_source_id}"
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
if not data_source_id:
|
|
85
|
+
logger.error(f"No suitable data source found for knowledge base {kb_id}.")
|
|
86
|
+
return
|
|
87
|
+
|
|
88
|
+
# Prepare document for ingestion based on the data source type
|
|
89
|
+
if source_type == "CUSTOM":
|
|
90
|
+
ingest_request = {
|
|
91
|
+
"knowledgeBaseId": kb_id,
|
|
92
|
+
"dataSourceId": data_source_id,
|
|
93
|
+
"documents": [
|
|
94
|
+
{
|
|
95
|
+
"content": {
|
|
96
|
+
"dataSourceType": "CUSTOM",
|
|
97
|
+
"custom": {
|
|
98
|
+
"customDocumentIdentifier": {"id": doc_id},
|
|
99
|
+
"inlineContent": {
|
|
100
|
+
"textContent": {
|
|
101
|
+
"data": json.dumps(content_with_metadata)
|
|
102
|
+
},
|
|
103
|
+
"type": "TEXT",
|
|
104
|
+
},
|
|
105
|
+
"sourceType": "IN_LINE",
|
|
106
|
+
},
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
],
|
|
110
|
+
}
|
|
111
|
+
elif source_type == "S3":
|
|
112
|
+
# S3 source types need a different ingestion approach
|
|
113
|
+
logger.error(
|
|
114
|
+
"S3 data source type is not supported for direct ingestion with this tool."
|
|
115
|
+
)
|
|
116
|
+
return
|
|
117
|
+
else:
|
|
118
|
+
logger.error(f"Unsupported data source type: {source_type}")
|
|
119
|
+
return
|
|
120
|
+
|
|
121
|
+
# Ingest document into knowledge base
|
|
122
|
+
_ = bedrock_agent_client.ingest_knowledge_base_documents(**ingest_request)
|
|
123
|
+
|
|
124
|
+
# Log success
|
|
125
|
+
logger.info(
|
|
126
|
+
f"Successfully ingested document into knowledge base {kb_id}: {doc_id}"
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
except Exception as e:
|
|
130
|
+
logger.error(f"Error ingesting into knowledge base: {e!s}")
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
@tool
|
|
134
|
+
def store_in_kb(
|
|
135
|
+
content: str, title: str | None = None, knowledge_base_id: str | None = None
|
|
136
|
+
) -> dict[str, Any]:
|
|
137
|
+
"""Store content in a Bedrock Knowledge Base using real-time ingestion.
|
|
138
|
+
|
|
139
|
+
This version runs asynchronously in a background thread and returns immediately.
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
content: The text content to store in the knowledge base.
|
|
143
|
+
title: Optional title for the content. If not provided, a timestamp will be used.
|
|
144
|
+
knowledge_base_id: Optional knowledge base ID. If not provided, will use the STRANDS_KNOWLEDGE_BASE_ID env.
|
|
145
|
+
|
|
146
|
+
Returns:
|
|
147
|
+
A dictionary containing the result of the operation.
|
|
148
|
+
"""
|
|
149
|
+
# All validation done in main thread before spawning background thread
|
|
150
|
+
|
|
151
|
+
# Validate content first
|
|
152
|
+
if not content or not content.strip():
|
|
153
|
+
return {"status": "error", "content": [{"text": "❌ Content cannot be empty"}]}
|
|
154
|
+
|
|
155
|
+
# Resolve and validate knowledge base ID early (addresses environment variable race condition)
|
|
156
|
+
kb_id = knowledge_base_id or os.getenv("STRANDS_KNOWLEDGE_BASE_ID")
|
|
157
|
+
if not kb_id:
|
|
158
|
+
return {
|
|
159
|
+
"status": "error",
|
|
160
|
+
"content": [
|
|
161
|
+
{
|
|
162
|
+
"text": "❌ No knowledge base ID provided or found in environment variables STRANDS_KNOWLEDGE_BASE_ID"
|
|
163
|
+
}
|
|
164
|
+
],
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
region_name = os.getenv("AWS_REGION", "us-west-2")
|
|
168
|
+
|
|
169
|
+
doc_title = title or f"Strands Memory {time.strftime('%Y%m%d_%H%M%S')}"
|
|
170
|
+
|
|
171
|
+
thread = threading.Thread(
|
|
172
|
+
target=_store_in_kb_background,
|
|
173
|
+
args=(content, doc_title, kb_id, region_name),
|
|
174
|
+
daemon=True,
|
|
175
|
+
)
|
|
176
|
+
thread.start()
|
|
177
|
+
|
|
178
|
+
# Return immediately with status
|
|
179
|
+
return {
|
|
180
|
+
"status": "success",
|
|
181
|
+
"content": [
|
|
182
|
+
{"text": "✅ Started background task to store content in knowledge base:"},
|
|
183
|
+
{"text": f"📝 Title: {doc_title}"},
|
|
184
|
+
{"text": f"🗄️ Knowledge Base ID: {kb_id}"},
|
|
185
|
+
{"text": "⏱️ Processing in background..."},
|
|
186
|
+
],
|
|
187
|
+
}
|