remdb 0.3.180__py3-none-any.whl → 0.3.230__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rem/agentic/README.md +36 -2
- rem/agentic/context.py +173 -0
- rem/agentic/context_builder.py +12 -2
- rem/agentic/mcp/tool_wrapper.py +2 -2
- rem/agentic/providers/pydantic_ai.py +1 -1
- rem/agentic/schema.py +2 -2
- rem/api/main.py +1 -1
- rem/api/mcp_router/server.py +4 -0
- rem/api/mcp_router/tools.py +542 -166
- rem/api/routers/admin.py +30 -4
- rem/api/routers/auth.py +106 -10
- rem/api/routers/chat/child_streaming.py +379 -0
- rem/api/routers/chat/completions.py +74 -37
- rem/api/routers/chat/sse_events.py +7 -3
- rem/api/routers/chat/streaming.py +352 -257
- rem/api/routers/chat/streaming_utils.py +327 -0
- rem/api/routers/common.py +18 -0
- rem/api/routers/dev.py +7 -1
- rem/api/routers/feedback.py +9 -1
- rem/api/routers/messages.py +176 -38
- rem/api/routers/models.py +9 -1
- rem/api/routers/query.py +12 -1
- rem/api/routers/shared_sessions.py +16 -0
- rem/auth/jwt.py +19 -4
- rem/auth/middleware.py +42 -28
- rem/cli/README.md +62 -0
- rem/cli/commands/ask.py +61 -81
- rem/cli/commands/db.py +55 -31
- rem/cli/commands/process.py +171 -43
- rem/models/entities/ontology.py +18 -20
- rem/schemas/agents/rem.yaml +1 -1
- rem/services/content/service.py +18 -5
- rem/services/embeddings/worker.py +26 -12
- rem/services/postgres/__init__.py +28 -3
- rem/services/postgres/diff_service.py +57 -5
- rem/services/postgres/programmable_diff_service.py +635 -0
- rem/services/postgres/pydantic_to_sqlalchemy.py +2 -2
- rem/services/postgres/register_type.py +11 -10
- rem/services/postgres/repository.py +39 -29
- rem/services/postgres/schema_generator.py +5 -5
- rem/services/postgres/sql_builder.py +6 -5
- rem/services/session/__init__.py +8 -1
- rem/services/session/compression.py +40 -2
- rem/services/session/pydantic_messages.py +292 -0
- rem/settings.py +28 -0
- rem/sql/migrations/001_install.sql +125 -7
- rem/sql/migrations/002_install_models.sql +159 -149
- rem/sql/migrations/004_cache_system.sql +7 -275
- rem/sql/migrations/migrate_session_id_to_uuid.sql +45 -0
- rem/utils/schema_loader.py +79 -51
- {remdb-0.3.180.dist-info → remdb-0.3.230.dist-info}/METADATA +2 -2
- {remdb-0.3.180.dist-info → remdb-0.3.230.dist-info}/RECORD +54 -48
- {remdb-0.3.180.dist-info → remdb-0.3.230.dist-info}/WHEEL +0 -0
- {remdb-0.3.180.dist-info → remdb-0.3.230.dist-info}/entry_points.txt +0 -0
rem/api/mcp_router/tools.py
CHANGED
|
@@ -20,6 +20,7 @@ Available Tools:
|
|
|
20
20
|
- get_schema: Get detailed schema for a table (columns, types, indexes)
|
|
21
21
|
"""
|
|
22
22
|
|
|
23
|
+
import json
|
|
23
24
|
from functools import wraps
|
|
24
25
|
from typing import Any, Callable, Literal, cast
|
|
25
26
|
|
|
@@ -128,201 +129,228 @@ def mcp_tool_error_handler(func: Callable) -> Callable:
|
|
|
128
129
|
|
|
129
130
|
@mcp_tool_error_handler
|
|
130
131
|
async def search_rem(
|
|
131
|
-
|
|
132
|
-
# LOOKUP parameters
|
|
133
|
-
entity_key: str | None = None,
|
|
134
|
-
# FUZZY parameters
|
|
135
|
-
query_text: str | None = None,
|
|
136
|
-
threshold: float = 0.7,
|
|
137
|
-
# SEARCH parameters
|
|
138
|
-
table: str | None = None,
|
|
132
|
+
query: str,
|
|
139
133
|
limit: int = 20,
|
|
140
|
-
# SQL parameters
|
|
141
|
-
sql_query: str | None = None,
|
|
142
|
-
# TRAVERSE parameters
|
|
143
|
-
initial_query: str | None = None,
|
|
144
|
-
edge_types: list[str] | None = None,
|
|
145
|
-
depth: int = 1,
|
|
146
|
-
# Optional context override (defaults to authenticated user)
|
|
147
|
-
user_id: str | None = None,
|
|
148
134
|
) -> dict[str, Any]:
|
|
149
135
|
"""
|
|
150
|
-
Execute REM
|
|
151
|
-
|
|
152
|
-
REM supports multiple query types for different retrieval patterns:
|
|
136
|
+
Execute a REM query using the REM query dialect.
|
|
153
137
|
|
|
154
|
-
**
|
|
155
|
-
- Fast exact match across all tables
|
|
156
|
-
- Uses indexed label_vector for instant retrieval
|
|
157
|
-
- Example: LOOKUP "Sarah Chen" returns all entities named "Sarah Chen"
|
|
138
|
+
**REM Query Syntax:**
|
|
158
139
|
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
140
|
+
LOOKUP <entity_key>
|
|
141
|
+
Find entity by exact name/key. Searches across all tables.
|
|
142
|
+
Example: LOOKUP phq-9-procedure
|
|
143
|
+
Example: LOOKUP sertraline
|
|
162
144
|
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
145
|
+
SEARCH <text> IN <table>
|
|
146
|
+
Semantic vector search within a specific table.
|
|
147
|
+
Tables: 'ontologies' (clinical knowledge, procedures, drugs, DSM criteria)
|
|
148
|
+
'resources' (documents, files, user content)
|
|
149
|
+
Example: SEARCH depression IN ontologies
|
|
150
|
+
Example: SEARCH Module F IN ontologies
|
|
166
151
|
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
152
|
+
FUZZY <text>
|
|
153
|
+
Fuzzy text matching for partial matches and typos.
|
|
154
|
+
Example: FUZZY setraline
|
|
170
155
|
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
- Example: TRAVERSE "Sarah Chen" edge_types=["manages", "reports_to"] depth=2
|
|
156
|
+
TRAVERSE <start_entity>
|
|
157
|
+
Graph traversal from a starting entity.
|
|
158
|
+
Example: TRAVERSE sarah-chen
|
|
175
159
|
|
|
176
160
|
Args:
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
query_text: Search text for FUZZY or SEARCH
|
|
180
|
-
threshold: Similarity threshold for FUZZY (0.0-1.0)
|
|
181
|
-
table: Target table for SEARCH (resources, moments, users, etc.)
|
|
182
|
-
limit: Max results for SEARCH
|
|
183
|
-
sql_query: SQL WHERE clause for SQL type (e.g. "id = '123'")
|
|
184
|
-
initial_query: Starting entity for TRAVERSE
|
|
185
|
-
edge_types: Edge types to follow for TRAVERSE (e.g., ["manages", "reports_to"])
|
|
186
|
-
depth: Traversal depth for TRAVERSE (0=plan only, 1-5=actual traversal)
|
|
187
|
-
user_id: Optional user identifier (defaults to authenticated user or "default")
|
|
161
|
+
query: REM query string (e.g., "LOOKUP phq-9-procedure", "SEARCH depression IN ontologies")
|
|
162
|
+
limit: Maximum results to return (default: 20)
|
|
188
163
|
|
|
189
164
|
Returns:
|
|
190
|
-
Dict with query results
|
|
165
|
+
Dict with query results and metadata. If no results found, includes
|
|
166
|
+
'suggestions' with alternative search strategies.
|
|
191
167
|
|
|
192
168
|
Examples:
|
|
193
|
-
|
|
194
|
-
search_rem(
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
)
|
|
198
|
-
|
|
199
|
-
# Semantic search
|
|
200
|
-
search_rem(
|
|
201
|
-
query_type="search",
|
|
202
|
-
query_text="database migration",
|
|
203
|
-
table="resources",
|
|
204
|
-
limit=10
|
|
205
|
-
)
|
|
206
|
-
|
|
207
|
-
# SQL query (WHERE clause only)
|
|
208
|
-
search_rem(
|
|
209
|
-
query_type="sql",
|
|
210
|
-
table="resources",
|
|
211
|
-
sql_query="category = 'document'"
|
|
212
|
-
)
|
|
213
|
-
|
|
214
|
-
# Graph traversal
|
|
215
|
-
search_rem(
|
|
216
|
-
query_type="traverse",
|
|
217
|
-
initial_query="Sarah Chen",
|
|
218
|
-
edge_types=["manages", "reports_to"],
|
|
219
|
-
depth=2
|
|
220
|
-
)
|
|
169
|
+
search_rem("LOOKUP phq-9-procedure")
|
|
170
|
+
search_rem("SEARCH depression IN ontologies")
|
|
171
|
+
search_rem("SEARCH anxiety treatment IN ontologies", limit=10)
|
|
172
|
+
search_rem("FUZZY setraline")
|
|
221
173
|
"""
|
|
222
174
|
# Get RemService instance (lazy initialization)
|
|
223
175
|
rem_service = await get_rem_service()
|
|
224
176
|
|
|
225
|
-
# Get user_id from context
|
|
226
|
-
|
|
227
|
-
user_id = AgentContext.get_user_id_or_default(user_id, source="search_rem")
|
|
177
|
+
# Get user_id from context
|
|
178
|
+
user_id = AgentContext.get_user_id_or_default(None, source="search_rem")
|
|
228
179
|
|
|
229
|
-
#
|
|
230
|
-
|
|
180
|
+
# Parse the REM query string
|
|
181
|
+
if not query or not query.strip():
|
|
182
|
+
return {
|
|
183
|
+
"status": "error",
|
|
184
|
+
"error": "Empty query. Use REM syntax: LOOKUP <key>, SEARCH <text> IN <table>, FUZZY <text>, or TRAVERSE <entity>",
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
query = query.strip()
|
|
188
|
+
parts = query.split(None, 1) # Split on first whitespace
|
|
189
|
+
|
|
190
|
+
if len(parts) < 2:
|
|
191
|
+
return {
|
|
192
|
+
"status": "error",
|
|
193
|
+
"error": f"Invalid query format: '{query}'. Expected: LOOKUP <key>, SEARCH <text> IN <table>, FUZZY <text>, or TRAVERSE <entity>",
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
query_type = parts[0].upper()
|
|
197
|
+
remainder = parts[1].strip()
|
|
231
198
|
|
|
232
199
|
# Build RemQuery based on query_type
|
|
233
|
-
if query_type == "
|
|
234
|
-
if not
|
|
235
|
-
return {
|
|
200
|
+
if query_type == "LOOKUP":
|
|
201
|
+
if not remainder:
|
|
202
|
+
return {
|
|
203
|
+
"status": "error",
|
|
204
|
+
"error": "LOOKUP requires an entity key. Example: LOOKUP phq-9-procedure",
|
|
205
|
+
}
|
|
236
206
|
|
|
237
|
-
|
|
207
|
+
rem_query = RemQuery(
|
|
238
208
|
query_type=QueryType.LOOKUP,
|
|
239
209
|
parameters=LookupParameters(
|
|
240
|
-
key=
|
|
210
|
+
key=remainder,
|
|
241
211
|
user_id=user_id,
|
|
242
212
|
),
|
|
243
213
|
user_id=user_id,
|
|
244
214
|
)
|
|
215
|
+
table = None # LOOKUP searches all tables
|
|
216
|
+
|
|
217
|
+
elif query_type == "SEARCH":
|
|
218
|
+
# Parse "text IN table" format
|
|
219
|
+
if " IN " in remainder.upper():
|
|
220
|
+
# Find the last " IN " to handle cases like "SEARCH pain IN back IN ontologies"
|
|
221
|
+
in_pos = remainder.upper().rfind(" IN ")
|
|
222
|
+
search_text = remainder[:in_pos].strip()
|
|
223
|
+
table = remainder[in_pos + 4:].strip().lower()
|
|
224
|
+
else:
|
|
225
|
+
return {
|
|
226
|
+
"status": "error",
|
|
227
|
+
"error": f"SEARCH requires table: SEARCH <text> IN <table>. "
|
|
228
|
+
"Use 'ontologies' for clinical knowledge or 'resources' for documents. "
|
|
229
|
+
f"Example: SEARCH {remainder} IN ontologies",
|
|
230
|
+
}
|
|
245
231
|
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
query_type=QueryType.FUZZY,
|
|
252
|
-
parameters=FuzzyParameters(
|
|
253
|
-
query_text=query_text,
|
|
254
|
-
threshold=threshold,
|
|
255
|
-
limit=limit, # Limit was missing in original logic but likely intended
|
|
256
|
-
),
|
|
257
|
-
user_id=user_id,
|
|
258
|
-
)
|
|
259
|
-
|
|
260
|
-
elif query_type == "search":
|
|
261
|
-
if not query_text:
|
|
262
|
-
return {"status": "error", "error": "query_text required for SEARCH"}
|
|
263
|
-
if not table:
|
|
264
|
-
return {"status": "error", "error": "table required for SEARCH"}
|
|
232
|
+
if not search_text:
|
|
233
|
+
return {
|
|
234
|
+
"status": "error",
|
|
235
|
+
"error": "SEARCH requires search text. Example: SEARCH depression IN ontologies",
|
|
236
|
+
}
|
|
265
237
|
|
|
266
|
-
|
|
238
|
+
rem_query = RemQuery(
|
|
267
239
|
query_type=QueryType.SEARCH,
|
|
268
240
|
parameters=SearchParameters(
|
|
269
|
-
query_text=
|
|
241
|
+
query_text=search_text,
|
|
270
242
|
table_name=table,
|
|
271
243
|
limit=limit,
|
|
272
244
|
),
|
|
273
245
|
user_id=user_id,
|
|
274
246
|
)
|
|
275
247
|
|
|
276
|
-
elif query_type == "
|
|
277
|
-
if not
|
|
278
|
-
return {
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
parameters=SQLParameters(
|
|
289
|
-
table_name=table,
|
|
290
|
-
where_clause=sql_query,
|
|
248
|
+
elif query_type == "FUZZY":
|
|
249
|
+
if not remainder:
|
|
250
|
+
return {
|
|
251
|
+
"status": "error",
|
|
252
|
+
"error": "FUZZY requires search text. Example: FUZZY setraline",
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
rem_query = RemQuery(
|
|
256
|
+
query_type=QueryType.FUZZY,
|
|
257
|
+
parameters=FuzzyParameters(
|
|
258
|
+
query_text=remainder,
|
|
259
|
+
threshold=0.3, # pg_trgm similarity - 0.3 is reasonable for typo correction
|
|
291
260
|
limit=limit,
|
|
292
261
|
),
|
|
293
262
|
user_id=user_id,
|
|
294
263
|
)
|
|
264
|
+
table = None
|
|
295
265
|
|
|
296
|
-
elif query_type == "
|
|
297
|
-
if not
|
|
266
|
+
elif query_type == "TRAVERSE":
|
|
267
|
+
if not remainder:
|
|
298
268
|
return {
|
|
299
269
|
"status": "error",
|
|
300
|
-
"error": "
|
|
270
|
+
"error": "TRAVERSE requires a starting entity. Example: TRAVERSE sarah-chen",
|
|
301
271
|
}
|
|
302
272
|
|
|
303
|
-
|
|
273
|
+
rem_query = RemQuery(
|
|
304
274
|
query_type=QueryType.TRAVERSE,
|
|
305
275
|
parameters=TraverseParameters(
|
|
306
|
-
initial_query=
|
|
307
|
-
edge_types=
|
|
308
|
-
max_depth=
|
|
276
|
+
initial_query=remainder,
|
|
277
|
+
edge_types=[],
|
|
278
|
+
max_depth=1,
|
|
309
279
|
),
|
|
310
280
|
user_id=user_id,
|
|
311
281
|
)
|
|
282
|
+
table = None
|
|
312
283
|
|
|
313
284
|
else:
|
|
314
|
-
return {
|
|
285
|
+
return {
|
|
286
|
+
"status": "error",
|
|
287
|
+
"error": f"Unknown query type: '{query_type}'. Valid types: LOOKUP, SEARCH, FUZZY, TRAVERSE. "
|
|
288
|
+
"Examples: LOOKUP phq-9-procedure, SEARCH depression IN ontologies",
|
|
289
|
+
}
|
|
315
290
|
|
|
316
291
|
# Execute query (errors handled by decorator)
|
|
317
292
|
logger.info(f"Executing REM query: {query_type} for user {user_id}")
|
|
318
|
-
result = await rem_service.execute_query(
|
|
293
|
+
result = await rem_service.execute_query(rem_query)
|
|
319
294
|
|
|
320
295
|
logger.info(f"Query completed successfully: {query_type}")
|
|
321
|
-
|
|
296
|
+
|
|
297
|
+
# Provide helpful guidance when no results found
|
|
298
|
+
response: dict[str, Any] = {
|
|
322
299
|
"query_type": query_type,
|
|
323
300
|
"results": result,
|
|
324
301
|
}
|
|
325
302
|
|
|
303
|
+
# Check if results are empty - handle both list and dict result formats
|
|
304
|
+
is_empty = False
|
|
305
|
+
if not result:
|
|
306
|
+
is_empty = True
|
|
307
|
+
elif isinstance(result, list) and len(result) == 0:
|
|
308
|
+
is_empty = True
|
|
309
|
+
elif isinstance(result, dict):
|
|
310
|
+
# RemService returns dict with 'results' key containing actual matches
|
|
311
|
+
inner_results = result.get("results", [])
|
|
312
|
+
count = result.get("count", len(inner_results) if isinstance(inner_results, list) else 0)
|
|
313
|
+
is_empty = count == 0 or (isinstance(inner_results, list) and len(inner_results) == 0)
|
|
314
|
+
|
|
315
|
+
if is_empty:
|
|
316
|
+
# Build helpful suggestions based on query type
|
|
317
|
+
suggestions = []
|
|
318
|
+
|
|
319
|
+
if query_type in ("LOOKUP", "FUZZY"):
|
|
320
|
+
suggestions.append(
|
|
321
|
+
"LOOKUP/FUZZY searches across ALL tables. If you expected results, "
|
|
322
|
+
"verify the entity name is spelled correctly."
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
if query_type == "SEARCH":
|
|
326
|
+
if table == "resources":
|
|
327
|
+
suggestions.append(
|
|
328
|
+
"No results in 'resources' table. Try: SEARCH <text> IN ontologies - "
|
|
329
|
+
"clinical procedures, drug info, and diagnostic criteria are stored there."
|
|
330
|
+
)
|
|
331
|
+
elif table == "ontologies":
|
|
332
|
+
suggestions.append(
|
|
333
|
+
"No results in 'ontologies' table. Try: SEARCH <text> IN resources - "
|
|
334
|
+
"for user-uploaded documents and general content."
|
|
335
|
+
)
|
|
336
|
+
else:
|
|
337
|
+
suggestions.append(
|
|
338
|
+
"Try: SEARCH <text> IN ontologies (clinical knowledge, procedures, drugs) "
|
|
339
|
+
"or SEARCH <text> IN resources (documents, files)."
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
# Always suggest both tables if no specific table guidance given
|
|
343
|
+
if not suggestions:
|
|
344
|
+
suggestions.append(
|
|
345
|
+
"No results found. Try: SEARCH <text> IN ontologies (clinical procedures, drugs) "
|
|
346
|
+
"or SEARCH <text> IN resources (documents, files)."
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
response["suggestions"] = suggestions
|
|
350
|
+
response["hint"] = "0 results returned. See 'suggestions' for alternative search strategies."
|
|
351
|
+
|
|
352
|
+
return response
|
|
353
|
+
|
|
326
354
|
|
|
327
355
|
@mcp_tool_error_handler
|
|
328
356
|
async def ask_rem_agent(
|
|
@@ -373,20 +401,45 @@ async def ask_rem_agent(
|
|
|
373
401
|
query="Show me Sarah's reporting chain and their recent projects"
|
|
374
402
|
)
|
|
375
403
|
"""
|
|
376
|
-
# Get user_id from context if not provided
|
|
377
|
-
# TODO: Extract from authenticated session context when auth is enabled
|
|
378
|
-
user_id = AgentContext.get_user_id_or_default(user_id, source="ask_rem_agent")
|
|
379
|
-
|
|
380
404
|
from ...agentic import create_agent
|
|
405
|
+
from ...agentic.context import get_current_context
|
|
381
406
|
from ...utils.schema_loader import load_agent_schema
|
|
382
407
|
|
|
383
|
-
#
|
|
384
|
-
#
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
408
|
+
# Get parent context for multi-agent support
|
|
409
|
+
# This enables context propagation from parent agent to child agent
|
|
410
|
+
parent_context = get_current_context()
|
|
411
|
+
|
|
412
|
+
# Build child context: inherit from parent if available, otherwise use defaults
|
|
413
|
+
if parent_context is not None:
|
|
414
|
+
# Inherit user_id, tenant_id, session_id, is_eval from parent
|
|
415
|
+
# Allow explicit user_id override if provided
|
|
416
|
+
effective_user_id = user_id or parent_context.user_id
|
|
417
|
+
context = parent_context.child_context(agent_schema_uri=agent_schema)
|
|
418
|
+
if user_id is not None:
|
|
419
|
+
# Override user_id if explicitly provided
|
|
420
|
+
context = AgentContext(
|
|
421
|
+
user_id=user_id,
|
|
422
|
+
tenant_id=parent_context.tenant_id,
|
|
423
|
+
session_id=parent_context.session_id,
|
|
424
|
+
default_model=parent_context.default_model,
|
|
425
|
+
agent_schema_uri=agent_schema,
|
|
426
|
+
is_eval=parent_context.is_eval,
|
|
427
|
+
)
|
|
428
|
+
logger.debug(
|
|
429
|
+
f"ask_rem_agent inheriting context from parent: "
|
|
430
|
+
f"user_id={context.user_id}, session_id={context.session_id}"
|
|
431
|
+
)
|
|
432
|
+
else:
|
|
433
|
+
# No parent context - create fresh context (backwards compatible)
|
|
434
|
+
effective_user_id = AgentContext.get_user_id_or_default(
|
|
435
|
+
user_id, source="ask_rem_agent"
|
|
436
|
+
)
|
|
437
|
+
context = AgentContext(
|
|
438
|
+
user_id=effective_user_id,
|
|
439
|
+
tenant_id=effective_user_id or "default",
|
|
440
|
+
default_model=settings.llm.default_model,
|
|
441
|
+
agent_schema_uri=agent_schema,
|
|
442
|
+
)
|
|
390
443
|
|
|
391
444
|
# Load agent schema
|
|
392
445
|
try:
|
|
@@ -426,15 +479,18 @@ async def ingest_into_rem(
|
|
|
426
479
|
category: str | None = None,
|
|
427
480
|
tags: list[str] | None = None,
|
|
428
481
|
is_local_server: bool = False,
|
|
429
|
-
user_id: str | None = None,
|
|
430
482
|
resource_type: str | None = None,
|
|
431
483
|
) -> dict[str, Any]:
|
|
432
484
|
"""
|
|
433
|
-
Ingest file into REM, creating searchable resources and embeddings.
|
|
485
|
+
Ingest file into REM, creating searchable PUBLIC resources and embeddings.
|
|
486
|
+
|
|
487
|
+
**IMPORTANT: All ingested data is PUBLIC by default.** This is correct for
|
|
488
|
+
shared knowledge bases (ontologies, procedures, reference data). Private
|
|
489
|
+
user-scoped data requires different handling via the CLI with --make-private.
|
|
434
490
|
|
|
435
491
|
This tool provides the complete file ingestion pipeline:
|
|
436
492
|
1. **Read**: File from local/S3/HTTP
|
|
437
|
-
2. **Store**: To
|
|
493
|
+
2. **Store**: To internal storage (public namespace)
|
|
438
494
|
3. **Parse**: Extract content, metadata, tables, images
|
|
439
495
|
4. **Chunk**: Semantic chunking for embeddings
|
|
440
496
|
5. **Embed**: Create Resource chunks with vector embeddings
|
|
@@ -453,7 +509,6 @@ async def ingest_into_rem(
|
|
|
453
509
|
category: Optional category (document, code, audio, etc.)
|
|
454
510
|
tags: Optional tags for file
|
|
455
511
|
is_local_server: True if running as local/stdio MCP server
|
|
456
|
-
user_id: Optional user identifier (defaults to authenticated user or "default")
|
|
457
512
|
resource_type: Optional resource type for storing chunks (case-insensitive).
|
|
458
513
|
Supports flexible naming:
|
|
459
514
|
- "resource", "resources", "Resource" → Resource (default)
|
|
@@ -472,10 +527,10 @@ async def ingest_into_rem(
|
|
|
472
527
|
- message: Human-readable status message
|
|
473
528
|
|
|
474
529
|
Examples:
|
|
475
|
-
# Ingest local file (local server only
|
|
530
|
+
# Ingest local file (local server only)
|
|
476
531
|
ingest_into_rem(
|
|
477
|
-
file_uri="/Users/me/
|
|
478
|
-
category="
|
|
532
|
+
file_uri="/Users/me/procedure.pdf",
|
|
533
|
+
category="medical",
|
|
479
534
|
is_local_server=True
|
|
480
535
|
)
|
|
481
536
|
|
|
@@ -499,15 +554,14 @@ async def ingest_into_rem(
|
|
|
499
554
|
"""
|
|
500
555
|
from ...services.content import ContentService
|
|
501
556
|
|
|
502
|
-
#
|
|
503
|
-
#
|
|
504
|
-
user_id = AgentContext.get_user_id_or_default(user_id, source="ingest_into_rem")
|
|
557
|
+
# Data is PUBLIC by default (user_id=None)
|
|
558
|
+
# Private user-scoped data requires CLI with --make-private flag
|
|
505
559
|
|
|
506
560
|
# Delegate to ContentService for centralized ingestion (errors handled by decorator)
|
|
507
561
|
content_service = ContentService()
|
|
508
562
|
result = await content_service.ingest_file(
|
|
509
563
|
file_uri=file_uri,
|
|
510
|
-
user_id=
|
|
564
|
+
user_id=None, # PUBLIC - all ingested data is shared/public
|
|
511
565
|
category=category,
|
|
512
566
|
tags=tags,
|
|
513
567
|
is_local_server=is_local_server,
|
|
@@ -540,15 +594,18 @@ async def read_resource(uri: str) -> dict[str, Any]:
|
|
|
540
594
|
**Available Resources:**
|
|
541
595
|
|
|
542
596
|
Agent Schemas:
|
|
543
|
-
• rem://
|
|
544
|
-
• rem://
|
|
545
|
-
|
|
597
|
+
• rem://agents - List all available agent schemas
|
|
598
|
+
• rem://agents/{agent_name} - Get specific agent schema
|
|
599
|
+
|
|
600
|
+
Documentation:
|
|
601
|
+
• rem://schema/entities - Entity schemas (Resource, Message, User, File, Moment)
|
|
602
|
+
• rem://schema/query-types - REM query type documentation
|
|
546
603
|
|
|
547
604
|
System Status:
|
|
548
605
|
• rem://status - System health and statistics
|
|
549
606
|
|
|
550
607
|
Args:
|
|
551
|
-
uri: Resource URI (e.g., "rem://
|
|
608
|
+
uri: Resource URI (e.g., "rem://agents", "rem://agents/ask_rem")
|
|
552
609
|
|
|
553
610
|
Returns:
|
|
554
611
|
Dict with:
|
|
@@ -557,14 +614,11 @@ async def read_resource(uri: str) -> dict[str, Any]:
|
|
|
557
614
|
- data: Resource data (format depends on resource type)
|
|
558
615
|
|
|
559
616
|
Examples:
|
|
560
|
-
# List all
|
|
561
|
-
read_resource(uri="rem://
|
|
562
|
-
|
|
563
|
-
# Get specific schema
|
|
564
|
-
read_resource(uri="rem://schema/ask_rem")
|
|
617
|
+
# List all agents
|
|
618
|
+
read_resource(uri="rem://agents")
|
|
565
619
|
|
|
566
|
-
# Get
|
|
567
|
-
read_resource(uri="rem://
|
|
620
|
+
# Get specific agent
|
|
621
|
+
read_resource(uri="rem://agents/ask_rem")
|
|
568
622
|
|
|
569
623
|
# Check system status
|
|
570
624
|
read_resource(uri="rem://status")
|
|
@@ -617,6 +671,8 @@ async def register_metadata(
|
|
|
617
671
|
recommended_action: str | None = None,
|
|
618
672
|
# Generic extension - any additional key-value pairs
|
|
619
673
|
extra: dict[str, Any] | None = None,
|
|
674
|
+
# Agent schema (auto-populated from context if not provided)
|
|
675
|
+
agent_schema: str | None = None,
|
|
620
676
|
) -> dict[str, Any]:
|
|
621
677
|
"""
|
|
622
678
|
Register response metadata to be emitted as an SSE MetadataEvent.
|
|
@@ -657,6 +713,8 @@ async def register_metadata(
|
|
|
657
713
|
extra: Dict of arbitrary additional metadata. Use this for any
|
|
658
714
|
domain-specific fields not covered by the standard parameters.
|
|
659
715
|
Example: {"topics_detected": ["anxiety", "sleep"], "session_count": 5}
|
|
716
|
+
agent_schema: Optional agent schema name. If not provided, automatically
|
|
717
|
+
populated from the current agent context (for multi-agent tracing).
|
|
660
718
|
|
|
661
719
|
Returns:
|
|
662
720
|
Dict with:
|
|
@@ -700,10 +758,17 @@ async def register_metadata(
|
|
|
700
758
|
}
|
|
701
759
|
)
|
|
702
760
|
"""
|
|
761
|
+
# Auto-populate agent_schema from context if not provided
|
|
762
|
+
if agent_schema is None:
|
|
763
|
+
from ...agentic.context import get_current_context
|
|
764
|
+
current_context = get_current_context()
|
|
765
|
+
if current_context and current_context.agent_schema_uri:
|
|
766
|
+
agent_schema = current_context.agent_schema_uri
|
|
767
|
+
|
|
703
768
|
logger.debug(
|
|
704
769
|
f"Registering metadata: confidence={confidence}, "
|
|
705
770
|
f"risk_level={risk_level}, refs={len(references or [])}, "
|
|
706
|
-
f"sources={len(sources or [])}"
|
|
771
|
+
f"sources={len(sources or [])}, agent_schema={agent_schema}"
|
|
707
772
|
)
|
|
708
773
|
|
|
709
774
|
result = {
|
|
@@ -713,6 +778,7 @@ async def register_metadata(
|
|
|
713
778
|
"references": references,
|
|
714
779
|
"sources": sources,
|
|
715
780
|
"flags": flags,
|
|
781
|
+
"agent_schema": agent_schema, # Include agent schema for tracing
|
|
716
782
|
}
|
|
717
783
|
|
|
718
784
|
# Add session name if provided
|
|
@@ -1134,6 +1200,316 @@ async def save_agent(
|
|
|
1134
1200
|
return result
|
|
1135
1201
|
|
|
1136
1202
|
|
|
1203
|
+
# =============================================================================
|
|
1204
|
+
# Multi-Agent Tools
|
|
1205
|
+
# =============================================================================
|
|
1206
|
+
|
|
1207
|
+
|
|
1208
|
+
@mcp_tool_error_handler
|
|
1209
|
+
async def ask_agent(
|
|
1210
|
+
agent_name: str,
|
|
1211
|
+
input_text: str,
|
|
1212
|
+
input_data: dict[str, Any] | None = None,
|
|
1213
|
+
user_id: str | None = None,
|
|
1214
|
+
timeout_seconds: int = 300,
|
|
1215
|
+
) -> dict[str, Any]:
|
|
1216
|
+
"""
|
|
1217
|
+
Invoke another agent by name and return its response.
|
|
1218
|
+
|
|
1219
|
+
This tool enables multi-agent orchestration by allowing one agent to call
|
|
1220
|
+
another. The child agent inherits the parent's context (user_id, session_id,
|
|
1221
|
+
tenant_id, is_eval) for proper scoping and continuity.
|
|
1222
|
+
|
|
1223
|
+
Use Cases:
|
|
1224
|
+
- Orchestrator agents that delegate to specialized sub-agents
|
|
1225
|
+
- Workflow agents that chain multiple processing steps
|
|
1226
|
+
- Ensemble agents that aggregate responses from multiple specialists
|
|
1227
|
+
|
|
1228
|
+
Args:
|
|
1229
|
+
agent_name: Name of the agent to invoke. Can be:
|
|
1230
|
+
- A user-created agent (saved via save_agent)
|
|
1231
|
+
- A system agent (e.g., "ask_rem", "knowledge-query")
|
|
1232
|
+
input_text: The user message/query to send to the agent
|
|
1233
|
+
input_data: Optional structured input data for the agent
|
|
1234
|
+
user_id: Optional user override (defaults to parent's user_id)
|
|
1235
|
+
timeout_seconds: Maximum execution time (default: 300s)
|
|
1236
|
+
|
|
1237
|
+
Returns:
|
|
1238
|
+
Dict with:
|
|
1239
|
+
- status: "success" or "error"
|
|
1240
|
+
- output: Agent's structured output (if using output schema)
|
|
1241
|
+
- text_response: Agent's text response
|
|
1242
|
+
- agent_schema: Name of the invoked agent
|
|
1243
|
+
- metadata: Any metadata registered by the agent (confidence, etc.)
|
|
1244
|
+
|
|
1245
|
+
Examples:
|
|
1246
|
+
# Simple delegation
|
|
1247
|
+
ask_agent(
|
|
1248
|
+
agent_name="sentiment-analyzer",
|
|
1249
|
+
input_text="I love this product! Best purchase ever."
|
|
1250
|
+
)
|
|
1251
|
+
# Returns: {"status": "success", "output": {"sentiment": "positive"}, ...}
|
|
1252
|
+
|
|
1253
|
+
# Orchestrator pattern
|
|
1254
|
+
ask_agent(
|
|
1255
|
+
agent_name="knowledge-query",
|
|
1256
|
+
input_text="What are the latest Q3 results?"
|
|
1257
|
+
)
|
|
1258
|
+
|
|
1259
|
+
# Chain with structured input
|
|
1260
|
+
ask_agent(
|
|
1261
|
+
agent_name="summarizer",
|
|
1262
|
+
input_text="Summarize this document",
|
|
1263
|
+
input_data={"document_id": "doc-123", "max_length": 500}
|
|
1264
|
+
)
|
|
1265
|
+
"""
|
|
1266
|
+
import asyncio
|
|
1267
|
+
from ...agentic import create_agent
|
|
1268
|
+
from ...agentic.context import get_current_context, agent_context_scope, get_event_sink, push_event
|
|
1269
|
+
from ...agentic.agents.agent_manager import get_agent
|
|
1270
|
+
from ...utils.schema_loader import load_agent_schema
|
|
1271
|
+
|
|
1272
|
+
# Get parent context for inheritance
|
|
1273
|
+
parent_context = get_current_context()
|
|
1274
|
+
|
|
1275
|
+
# Determine effective user_id
|
|
1276
|
+
if parent_context is not None:
|
|
1277
|
+
effective_user_id = user_id or parent_context.user_id
|
|
1278
|
+
else:
|
|
1279
|
+
effective_user_id = AgentContext.get_user_id_or_default(
|
|
1280
|
+
user_id, source="ask_agent"
|
|
1281
|
+
)
|
|
1282
|
+
|
|
1283
|
+
# Build child context
|
|
1284
|
+
if parent_context is not None:
|
|
1285
|
+
child_context = parent_context.child_context(agent_schema_uri=agent_name)
|
|
1286
|
+
if user_id is not None:
|
|
1287
|
+
# Explicit user_id override
|
|
1288
|
+
child_context = AgentContext(
|
|
1289
|
+
user_id=user_id,
|
|
1290
|
+
tenant_id=parent_context.tenant_id,
|
|
1291
|
+
session_id=parent_context.session_id,
|
|
1292
|
+
default_model=parent_context.default_model,
|
|
1293
|
+
agent_schema_uri=agent_name,
|
|
1294
|
+
is_eval=parent_context.is_eval,
|
|
1295
|
+
)
|
|
1296
|
+
logger.debug(
|
|
1297
|
+
f"ask_agent '{agent_name}' inheriting context: "
|
|
1298
|
+
f"user_id={child_context.user_id}, session_id={child_context.session_id}"
|
|
1299
|
+
)
|
|
1300
|
+
else:
|
|
1301
|
+
child_context = AgentContext(
|
|
1302
|
+
user_id=effective_user_id,
|
|
1303
|
+
tenant_id=effective_user_id or "default",
|
|
1304
|
+
default_model=settings.llm.default_model,
|
|
1305
|
+
agent_schema_uri=agent_name,
|
|
1306
|
+
)
|
|
1307
|
+
|
|
1308
|
+
# Try to load agent schema from:
|
|
1309
|
+
# 1. Database (user-created or system agents)
|
|
1310
|
+
# 2. File system (packaged agents)
|
|
1311
|
+
schema = None
|
|
1312
|
+
|
|
1313
|
+
# Try database first
|
|
1314
|
+
if effective_user_id:
|
|
1315
|
+
schema = await get_agent(agent_name, user_id=effective_user_id)
|
|
1316
|
+
if schema:
|
|
1317
|
+
logger.debug(f"Loaded agent '{agent_name}' from database")
|
|
1318
|
+
|
|
1319
|
+
# Fall back to file system
|
|
1320
|
+
if schema is None:
|
|
1321
|
+
try:
|
|
1322
|
+
schema = load_agent_schema(agent_name)
|
|
1323
|
+
logger.debug(f"Loaded agent '{agent_name}' from file system")
|
|
1324
|
+
except FileNotFoundError:
|
|
1325
|
+
pass
|
|
1326
|
+
|
|
1327
|
+
if schema is None:
|
|
1328
|
+
return {
|
|
1329
|
+
"status": "error",
|
|
1330
|
+
"error": f"Agent not found: {agent_name}",
|
|
1331
|
+
"hint": "Use list_agents to see available agents, or save_agent to create one",
|
|
1332
|
+
}
|
|
1333
|
+
|
|
1334
|
+
# Create agent runtime
|
|
1335
|
+
agent_runtime = await create_agent(
|
|
1336
|
+
context=child_context,
|
|
1337
|
+
agent_schema_override=schema,
|
|
1338
|
+
)
|
|
1339
|
+
|
|
1340
|
+
# Build prompt with optional input_data
|
|
1341
|
+
prompt = input_text
|
|
1342
|
+
if input_data:
|
|
1343
|
+
prompt = f"{input_text}\n\nInput data: {json.dumps(input_data)}"
|
|
1344
|
+
|
|
1345
|
+
# Load session history for the sub-agent (CRITICAL for multi-turn conversations)
|
|
1346
|
+
# Sub-agents need to see the full conversation context, not just the summary
|
|
1347
|
+
pydantic_message_history = None
|
|
1348
|
+
if child_context.session_id and settings.postgres.enabled:
|
|
1349
|
+
try:
|
|
1350
|
+
from ...services.session import SessionMessageStore, session_to_pydantic_messages
|
|
1351
|
+
from ...agentic.schema import get_system_prompt
|
|
1352
|
+
|
|
1353
|
+
store = SessionMessageStore(user_id=child_context.user_id or "default")
|
|
1354
|
+
raw_session_history = await store.load_session_messages(
|
|
1355
|
+
session_id=child_context.session_id,
|
|
1356
|
+
user_id=child_context.user_id,
|
|
1357
|
+
compress_on_load=False, # Need full data for reconstruction
|
|
1358
|
+
)
|
|
1359
|
+
if raw_session_history:
|
|
1360
|
+
# Extract agent's system prompt from schema
|
|
1361
|
+
agent_system_prompt = get_system_prompt(schema) if schema else None
|
|
1362
|
+
pydantic_message_history = session_to_pydantic_messages(
|
|
1363
|
+
raw_session_history,
|
|
1364
|
+
system_prompt=agent_system_prompt,
|
|
1365
|
+
)
|
|
1366
|
+
logger.debug(
|
|
1367
|
+
f"ask_agent '{agent_name}': loaded {len(raw_session_history)} session messages "
|
|
1368
|
+
f"-> {len(pydantic_message_history)} pydantic-ai messages"
|
|
1369
|
+
)
|
|
1370
|
+
|
|
1371
|
+
# Audit session history if enabled
|
|
1372
|
+
from ...services.session import audit_session_history
|
|
1373
|
+
audit_session_history(
|
|
1374
|
+
session_id=child_context.session_id,
|
|
1375
|
+
agent_name=agent_name,
|
|
1376
|
+
prompt=prompt,
|
|
1377
|
+
raw_session_history=raw_session_history,
|
|
1378
|
+
pydantic_messages_count=len(pydantic_message_history),
|
|
1379
|
+
)
|
|
1380
|
+
except Exception as e:
|
|
1381
|
+
logger.warning(f"ask_agent '{agent_name}': failed to load session history: {e}")
|
|
1382
|
+
# Fall back to running without history
|
|
1383
|
+
|
|
1384
|
+
# Run agent with timeout and context propagation
|
|
1385
|
+
logger.info(f"Invoking agent '{agent_name}' with prompt: {prompt[:100]}...")
|
|
1386
|
+
|
|
1387
|
+
# Check if we have an event sink for streaming
|
|
1388
|
+
push_event = get_event_sink()
|
|
1389
|
+
use_streaming = push_event is not None
|
|
1390
|
+
|
|
1391
|
+
streamed_content = "" # Track if content was streamed
|
|
1392
|
+
|
|
1393
|
+
try:
|
|
1394
|
+
# Set child context for nested tool calls
|
|
1395
|
+
with agent_context_scope(child_context):
|
|
1396
|
+
if use_streaming:
|
|
1397
|
+
# STREAMING MODE: Use iter() and proxy events to parent
|
|
1398
|
+
logger.debug(f"ask_agent '{agent_name}': using streaming mode with event proxying")
|
|
1399
|
+
|
|
1400
|
+
async def run_with_streaming():
|
|
1401
|
+
from pydantic_ai.messages import (
|
|
1402
|
+
PartStartEvent, PartDeltaEvent, PartEndEvent,
|
|
1403
|
+
FunctionToolResultEvent, FunctionToolCallEvent,
|
|
1404
|
+
)
|
|
1405
|
+
from pydantic_ai.agent import Agent
|
|
1406
|
+
|
|
1407
|
+
accumulated_content = []
|
|
1408
|
+
child_tool_calls = []
|
|
1409
|
+
|
|
1410
|
+
# iter() returns an async context manager, not an awaitable
|
|
1411
|
+
iter_kwargs = {"message_history": pydantic_message_history} if pydantic_message_history else {}
|
|
1412
|
+
async with agent_runtime.iter(prompt, **iter_kwargs) as agent_run:
|
|
1413
|
+
async for node in agent_run:
|
|
1414
|
+
if Agent.is_model_request_node(node):
|
|
1415
|
+
async with node.stream(agent_run.ctx) as request_stream:
|
|
1416
|
+
async for event in request_stream:
|
|
1417
|
+
# Proxy part starts
|
|
1418
|
+
if isinstance(event, PartStartEvent):
|
|
1419
|
+
from pydantic_ai.messages import ToolCallPart, TextPart
|
|
1420
|
+
if isinstance(event.part, ToolCallPart):
|
|
1421
|
+
# Push tool start event to parent
|
|
1422
|
+
await push_event.put({
|
|
1423
|
+
"type": "child_tool_start",
|
|
1424
|
+
"agent_name": agent_name,
|
|
1425
|
+
"tool_name": event.part.tool_name,
|
|
1426
|
+
"arguments": event.part.args if hasattr(event.part, 'args') else None,
|
|
1427
|
+
})
|
|
1428
|
+
child_tool_calls.append({
|
|
1429
|
+
"tool_name": event.part.tool_name,
|
|
1430
|
+
"index": event.index,
|
|
1431
|
+
})
|
|
1432
|
+
elif isinstance(event.part, TextPart):
|
|
1433
|
+
# TextPart may have initial content
|
|
1434
|
+
if event.part.content:
|
|
1435
|
+
accumulated_content.append(event.part.content)
|
|
1436
|
+
await push_event.put({
|
|
1437
|
+
"type": "child_content",
|
|
1438
|
+
"agent_name": agent_name,
|
|
1439
|
+
"content": event.part.content,
|
|
1440
|
+
})
|
|
1441
|
+
# Proxy text content deltas to parent for real-time streaming
|
|
1442
|
+
elif isinstance(event, PartDeltaEvent):
|
|
1443
|
+
if hasattr(event, 'delta') and hasattr(event.delta, 'content_delta'):
|
|
1444
|
+
content = event.delta.content_delta
|
|
1445
|
+
if content:
|
|
1446
|
+
accumulated_content.append(content)
|
|
1447
|
+
# Push content chunk to parent for streaming
|
|
1448
|
+
await push_event.put({
|
|
1449
|
+
"type": "child_content",
|
|
1450
|
+
"agent_name": agent_name,
|
|
1451
|
+
"content": content,
|
|
1452
|
+
})
|
|
1453
|
+
|
|
1454
|
+
elif Agent.is_call_tools_node(node):
|
|
1455
|
+
async with node.stream(agent_run.ctx) as tools_stream:
|
|
1456
|
+
async for tool_event in tools_stream:
|
|
1457
|
+
if isinstance(tool_event, FunctionToolResultEvent):
|
|
1458
|
+
result_content = tool_event.result.content if hasattr(tool_event.result, 'content') else tool_event.result
|
|
1459
|
+
# Push tool result to parent
|
|
1460
|
+
await push_event.put({
|
|
1461
|
+
"type": "child_tool_result",
|
|
1462
|
+
"agent_name": agent_name,
|
|
1463
|
+
"result": result_content,
|
|
1464
|
+
})
|
|
1465
|
+
|
|
1466
|
+
# Get final result (inside context manager)
|
|
1467
|
+
return agent_run.result, "".join(accumulated_content), child_tool_calls
|
|
1468
|
+
|
|
1469
|
+
result, streamed_content, tool_calls = await asyncio.wait_for(
|
|
1470
|
+
run_with_streaming(),
|
|
1471
|
+
timeout=timeout_seconds
|
|
1472
|
+
)
|
|
1473
|
+
else:
|
|
1474
|
+
# NON-STREAMING MODE: Use run() for backwards compatibility
|
|
1475
|
+
if pydantic_message_history:
|
|
1476
|
+
result = await asyncio.wait_for(
|
|
1477
|
+
agent_runtime.run(prompt, message_history=pydantic_message_history),
|
|
1478
|
+
timeout=timeout_seconds
|
|
1479
|
+
)
|
|
1480
|
+
else:
|
|
1481
|
+
result = await asyncio.wait_for(
|
|
1482
|
+
agent_runtime.run(prompt),
|
|
1483
|
+
timeout=timeout_seconds
|
|
1484
|
+
)
|
|
1485
|
+
except asyncio.TimeoutError:
|
|
1486
|
+
return {
|
|
1487
|
+
"status": "error",
|
|
1488
|
+
"error": f"Agent '{agent_name}' timed out after {timeout_seconds}s",
|
|
1489
|
+
"agent_schema": agent_name,
|
|
1490
|
+
}
|
|
1491
|
+
|
|
1492
|
+
# Serialize output
|
|
1493
|
+
from rem.agentic.serialization import serialize_agent_result
|
|
1494
|
+
output = serialize_agent_result(result.output)
|
|
1495
|
+
|
|
1496
|
+
logger.info(f"Agent '{agent_name}' completed successfully")
|
|
1497
|
+
|
|
1498
|
+
response = {
|
|
1499
|
+
"status": "success",
|
|
1500
|
+
"output": output,
|
|
1501
|
+
"agent_schema": agent_name,
|
|
1502
|
+
"input_text": input_text,
|
|
1503
|
+
}
|
|
1504
|
+
|
|
1505
|
+
# Only include text_response if content was NOT streamed
|
|
1506
|
+
# When streaming, child_content events already delivered the content
|
|
1507
|
+
if not use_streaming or not streamed_content:
|
|
1508
|
+
response["text_response"] = str(result.output)
|
|
1509
|
+
|
|
1510
|
+
return response
|
|
1511
|
+
|
|
1512
|
+
|
|
1137
1513
|
# =============================================================================
|
|
1138
1514
|
# Test/Debug Tools (for development only)
|
|
1139
1515
|
# =============================================================================
|