remdb 0.3.230__py3-none-any.whl → 0.3.258__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. rem/agentic/__init__.py +10 -1
  2. rem/agentic/context.py +13 -2
  3. rem/agentic/context_builder.py +45 -34
  4. rem/agentic/providers/pydantic_ai.py +302 -110
  5. rem/api/mcp_router/resources.py +223 -0
  6. rem/api/mcp_router/tools.py +76 -10
  7. rem/api/routers/auth.py +113 -10
  8. rem/api/routers/chat/child_streaming.py +22 -8
  9. rem/api/routers/chat/completions.py +3 -3
  10. rem/api/routers/chat/sse_events.py +3 -3
  11. rem/api/routers/chat/streaming.py +40 -45
  12. rem/api/routers/chat/streaming_utils.py +5 -7
  13. rem/api/routers/feedback.py +2 -2
  14. rem/api/routers/query.py +5 -14
  15. rem/cli/commands/ask.py +144 -33
  16. rem/cli/commands/experiments.py +1 -1
  17. rem/cli/commands/process.py +9 -1
  18. rem/cli/commands/query.py +109 -0
  19. rem/cli/commands/session.py +117 -0
  20. rem/cli/main.py +2 -0
  21. rem/models/core/experiment.py +1 -1
  22. rem/models/entities/session.py +1 -0
  23. rem/schemas/agents/core/agent-builder.yaml +1 -1
  24. rem/schemas/agents/test_orchestrator.yaml +42 -0
  25. rem/schemas/agents/test_structured_output.yaml +52 -0
  26. rem/services/content/providers.py +151 -49
  27. rem/services/postgres/repository.py +1 -0
  28. rem/services/rem/README.md +4 -3
  29. rem/services/rem/parser.py +7 -10
  30. rem/services/rem/service.py +47 -0
  31. rem/services/session/compression.py +7 -3
  32. rem/services/session/pydantic_messages.py +25 -7
  33. rem/services/session/reload.py +2 -1
  34. rem/settings.py +64 -7
  35. rem/sql/migrations/004_cache_system.sql +3 -1
  36. rem/utils/schema_loader.py +135 -103
  37. {remdb-0.3.230.dist-info → remdb-0.3.258.dist-info}/METADATA +6 -5
  38. {remdb-0.3.230.dist-info → remdb-0.3.258.dist-info}/RECORD +40 -37
  39. {remdb-0.3.230.dist-info → remdb-0.3.258.dist-info}/WHEEL +0 -0
  40. {remdb-0.3.230.dist-info → remdb-0.3.258.dist-info}/entry_points.txt +0 -0
@@ -404,52 +404,47 @@ async def stream_openai_response(
404
404
  tool_calls_out.append(tool_data)
405
405
  del state.pending_tool_data[tool_id]
406
406
 
407
- if not is_metadata_event:
408
- # NOTE: text_response fallback is DISABLED
409
- # Child agents now stream content via child_content events (above)
410
- # which provides real-time streaming. The text_response in tool
411
- # result would duplicate that content, so we skip it entirely.
412
-
413
- # Normal tool completion - emit ToolCallEvent
414
- # For finalize_intake, send full result dict for frontend
415
- if tool_name == "finalize_intake" and isinstance(result_content, dict):
416
- result_for_sse = result_content
407
+ # Always emit ToolCallEvent completion for frontend tracking
408
+ # Send full result for dict/list types, stringify others
409
+ if isinstance(result_content, (dict, list)):
410
+ result_for_sse = result_content
411
+ else:
412
+ result_for_sse = str(result_content)
413
+
414
+ # Log result count for search_rem
415
+ if tool_name == "search_rem" and isinstance(result_content, dict):
416
+ results = result_content.get("results", {})
417
+ # Handle nested result structure: results may be a dict with 'results' list and 'count'
418
+ if isinstance(results, dict):
419
+ count = results.get("count", len(results.get("results", [])))
420
+ query_type = results.get("query_type", "?")
421
+ query_text = results.get("query_text", results.get("key", ""))
422
+ table = results.get("table_name", "")
423
+ elif isinstance(results, list):
424
+ count = len(results)
425
+ query_type = "?"
426
+ query_text = ""
427
+ table = ""
417
428
  else:
418
- result_str = str(result_content)
419
- result_for_sse = result_str[:200] + "..." if len(result_str) > 200 else result_str
420
-
421
- # Log result count for search_rem
422
- if tool_name == "search_rem" and isinstance(result_content, dict):
423
- results = result_content.get("results", {})
424
- # Handle nested result structure: results may be a dict with 'results' list and 'count'
425
- if isinstance(results, dict):
426
- count = results.get("count", len(results.get("results", [])))
427
- query_type = results.get("query_type", "?")
428
- query_text = results.get("query_text", results.get("key", ""))
429
- table = results.get("table_name", "")
430
- elif isinstance(results, list):
431
- count = len(results)
432
- query_type = "?"
433
- query_text = ""
434
- table = ""
435
- else:
436
- count = "?"
437
- query_type = "?"
438
- query_text = ""
439
- table = ""
440
- status = result_content.get("status", "unknown")
441
- # Truncate query text for logging
442
- if query_text and len(str(query_text)) > 40:
443
- query_text = str(query_text)[:40] + "..."
444
- logger.info(f" ↳ {tool_name} {query_type} '{query_text}' table={table} → {count} results")
445
-
446
- yield format_sse_event(ToolCallEvent(
447
- tool_name=tool_name,
448
- tool_id=tool_id,
449
- status="completed",
450
- arguments=completed_args,
451
- result=result_for_sse
452
- ))
429
+ count = "?"
430
+ query_type = "?"
431
+ query_text = ""
432
+ table = ""
433
+ status = result_content.get("status", "unknown")
434
+ # Truncate query text for logging
435
+ if query_text and len(str(query_text)) > 40:
436
+ query_text = str(query_text)[:40] + "..."
437
+ logger.info(f" ↳ {tool_name} {query_type} '{query_text}' table={table} → {count} results")
438
+
439
+ # Always emit ToolCallEvent completion for frontend tracking
440
+ # This includes register_metadata calls so they turn green in the UI
441
+ yield format_sse_event(ToolCallEvent(
442
+ tool_name=tool_name,
443
+ tool_id=tool_id,
444
+ status="completed",
445
+ arguments=completed_args,
446
+ result=result_for_sse
447
+ ))
453
448
 
454
449
  # Update progress after tool completion
455
450
  state.current_step = 3
@@ -177,19 +177,17 @@ def build_tool_complete_event(
177
177
  arguments: dict | None = None,
178
178
  result: Any = None,
179
179
  ) -> str:
180
- """Build a tool call completed SSE event."""
181
- result_str = None
182
- if result is not None:
183
- result_str = str(result)
184
- if len(result_str) > 200:
185
- result_str = result_str[:200] + "..."
180
+ """Build a tool call completed SSE event.
186
181
 
182
+ Note: Full result is sent in SSE events for UI display.
183
+ Truncation only happens in log_tool_result() for log readability.
184
+ """
187
185
  return format_sse_event(ToolCallEvent(
188
186
  tool_name=tool_name,
189
187
  tool_id=tool_id,
190
188
  status="completed",
191
189
  arguments=arguments,
192
- result=result_str,
190
+ result=result,
193
191
  ))
194
192
 
195
193
 
@@ -21,11 +21,11 @@ IMPORTANT - Testing Requirements:
21
21
  ║ 2. Session IDs MUST be UUIDs - use python3 -c "import uuid; print(uuid.uuid4())" ║
22
22
  ║ 3. Port-forward OTEL collector: kubectl port-forward -n observability ║
23
23
  ║ svc/otel-collector-collector 4318:4318 ║
24
- ║ 4. Port-forward Phoenix: kubectl port-forward -n siggy svc/phoenix 6006:6006
24
+ ║ 4. Port-forward Phoenix: kubectl port-forward -n rem svc/phoenix 6006:6006
25
25
  ║ 5. Set environment variables when starting the API: ║
26
26
  ║ OTEL__ENABLED=true PHOENIX__ENABLED=true PHOENIX_API_KEY=<jwt> uvicorn ... ║
27
27
  ║ 6. Get PHOENIX_API_KEY: ║
28
- ║ kubectl get secret -n siggy rem-phoenix-api-key -o jsonpath='{.data.PHOENIX_API_KEY}'
28
+ ║ kubectl get secret -n rem rem-phoenix-api-key -o jsonpath='{.data.PHOENIX_API_KEY}'
29
29
  ║ | base64 -d ║
30
30
  ╚════════════════════════════════════════════════════════════════════════════════════════════════════╝
31
31
 
rem/api/routers/query.py CHANGED
@@ -90,8 +90,6 @@ from .common import ErrorResponse
90
90
 
91
91
  from ...services.postgres import get_postgres_service
92
92
  from ...services.rem.service import RemService
93
- from ...services.rem.parser import RemQueryParser
94
- from ...models.core import RemQuery
95
93
  from ...settings import settings
96
94
 
97
95
  router = APIRouter(prefix="/api/v1", tags=["query"])
@@ -331,7 +329,7 @@ async def execute_query(
331
329
  return response
332
330
 
333
331
  else:
334
- # REM dialect mode - parse and execute directly
332
+ # REM dialect mode - use unified execute_query_string
335
333
  if not request.query:
336
334
  raise HTTPException(
337
335
  status_code=400,
@@ -340,17 +338,10 @@ async def execute_query(
340
338
 
341
339
  logger.info(f"REM dialect query: {request.query[:100]}...")
342
340
 
343
- parser = RemQueryParser()
344
- query_type, parameters = parser.parse(request.query)
345
-
346
- # Create and execute RemQuery
347
- rem_query = RemQuery.model_validate({
348
- "query_type": query_type,
349
- "parameters": parameters,
350
- "user_id": effective_user_id,
351
- })
352
-
353
- result = await rem_service.execute_query(rem_query)
341
+ # Use the unified execute_query_string method
342
+ result = await rem_service.execute_query_string(
343
+ request.query, user_id=effective_user_id
344
+ )
354
345
 
355
346
  return QueryResponse(
356
347
  query_type=result["query_type"],
rem/cli/commands/ask.py CHANGED
@@ -164,9 +164,13 @@ async def run_agent_non_streaming(
164
164
  context: AgentContext | None = None,
165
165
  plan: bool = False,
166
166
  max_iterations: int | None = None,
167
+ user_message: str | None = None,
167
168
  ) -> dict[str, Any] | None:
168
169
  """
169
- Run agent in non-streaming mode using agent.run() with usage limits.
170
+ Run agent in non-streaming mode using agent.iter() to capture tool calls.
171
+
172
+ This mirrors the streaming code path to ensure tool messages are properly
173
+ persisted to the database for state tracking across turns.
170
174
 
171
175
  Args:
172
176
  agent: Pydantic AI agent
@@ -176,77 +180,183 @@ async def run_agent_non_streaming(
176
180
  context: Optional AgentContext for session persistence
177
181
  plan: If True, output only the generated query (for query-agent)
178
182
  max_iterations: Maximum iterations/requests (from agent schema or settings)
183
+ user_message: The user's original message (for database storage)
179
184
 
180
185
  Returns:
181
186
  Output data if successful, None otherwise
182
187
  """
183
188
  from pydantic_ai import UsageLimits
189
+ from pydantic_ai.agent import Agent
190
+ from pydantic_ai.messages import (
191
+ FunctionToolResultEvent,
192
+ PartStartEvent,
193
+ PartEndEvent,
194
+ TextPart,
195
+ ToolCallPart,
196
+ )
184
197
  from rem.utils.date_utils import to_iso_with_z, utc_now
185
198
 
186
199
  logger.info("Running agent in non-streaming mode...")
187
200
 
188
201
  try:
189
- # Run agent and get complete result with usage limits
190
- usage_limits = UsageLimits(request_limit=max_iterations) if max_iterations else None
191
- result = await agent.run(prompt, usage_limits=usage_limits)
202
+ # Track tool calls for persistence (same as streaming code path)
203
+ tool_calls: list = []
204
+ pending_tool_data: dict = {}
205
+ pending_tool_completions: list = []
206
+ accumulated_content: list = []
207
+
208
+ # Get the underlying pydantic-ai agent
209
+ pydantic_agent = agent.agent if hasattr(agent, 'agent') else agent
210
+
211
+ # Use agent.iter() to capture tool calls (same as streaming)
212
+ async with pydantic_agent.iter(prompt) as agent_run:
213
+ async for node in agent_run:
214
+ # Handle model request nodes (text + tool call starts)
215
+ if Agent.is_model_request_node(node):
216
+ async with node.stream(agent_run.ctx) as request_stream:
217
+ async for event in request_stream:
218
+ # Capture text content
219
+ if isinstance(event, PartStartEvent) and isinstance(event.part, TextPart):
220
+ if event.part.content:
221
+ accumulated_content.append(event.part.content)
222
+
223
+ # Capture tool call starts
224
+ elif isinstance(event, PartStartEvent) and isinstance(event.part, ToolCallPart):
225
+ tool_name = event.part.tool_name
226
+ if tool_name == "final_result":
227
+ continue
228
+
229
+ import uuid
230
+ tool_id = f"call_{uuid.uuid4().hex[:8]}"
231
+ pending_tool_completions.append((tool_name, tool_id))
232
+
233
+ # Extract arguments
234
+ args_dict = {}
235
+ if hasattr(event.part, 'args'):
236
+ args = event.part.args
237
+ if isinstance(args, str):
238
+ try:
239
+ args_dict = json.loads(args)
240
+ except json.JSONDecodeError:
241
+ args_dict = {"raw": args}
242
+ elif isinstance(args, dict):
243
+ args_dict = args
244
+
245
+ pending_tool_data[tool_id] = {
246
+ "tool_name": tool_name,
247
+ "tool_id": tool_id,
248
+ "arguments": args_dict,
249
+ }
250
+
251
+ # Print tool call for CLI visibility
252
+ print(f"\n[Calling: {tool_name}]", flush=True)
253
+
254
+ # Capture tool call end (update arguments if changed)
255
+ elif isinstance(event, PartEndEvent) and isinstance(event.part, ToolCallPart):
256
+ pass # Arguments already captured at start
257
+
258
+ # Handle tool execution nodes (results)
259
+ elif Agent.is_call_tools_node(node):
260
+ async with node.stream(agent_run.ctx) as tools_stream:
261
+ async for event in tools_stream:
262
+ if isinstance(event, FunctionToolResultEvent):
263
+ # Get tool info from pending queue
264
+ if pending_tool_completions:
265
+ tool_name, tool_id = pending_tool_completions.pop(0)
266
+ else:
267
+ import uuid
268
+ tool_name = "tool"
269
+ tool_id = f"call_{uuid.uuid4().hex[:8]}"
270
+
271
+ result_content = event.result.content if hasattr(event.result, 'content') else event.result
272
+
273
+ # Capture tool call for persistence
274
+ if tool_id in pending_tool_data:
275
+ tool_data = pending_tool_data[tool_id]
276
+ tool_data["result"] = result_content
277
+ tool_calls.append(tool_data)
278
+ del pending_tool_data[tool_id]
279
+
280
+ # Get final result
281
+ result = agent_run.result
192
282
 
193
283
  # Extract output data
194
284
  output_data = None
195
285
  assistant_content = None
196
- if hasattr(result, "output"):
286
+ if result is not None and hasattr(result, "output"):
197
287
  output = result.output
198
288
  from rem.agentic.serialization import serialize_agent_result
199
289
  output_data = serialize_agent_result(output)
200
290
 
201
291
  if plan and isinstance(output_data, dict) and "query" in output_data:
202
- # Plan mode: Output only the query
203
- # Use sql formatting if possible or just raw string
204
292
  assistant_content = output_data["query"]
205
293
  print(assistant_content)
206
294
  else:
207
- # Normal mode
208
- assistant_content = json.dumps(output_data, indent=2)
295
+ # For string output, use it directly
296
+ if isinstance(output_data, str):
297
+ assistant_content = output_data
298
+ else:
299
+ assistant_content = json.dumps(output_data, indent=2)
209
300
  print(assistant_content)
210
301
  else:
211
- # Fallback for text-only results
212
- assistant_content = str(result)
213
- print(assistant_content)
302
+ assistant_content = str(result) if result else ""
303
+ if assistant_content:
304
+ print(assistant_content)
214
305
 
215
306
  # Save to file if requested
216
307
  if output_file and output_data:
217
308
  await _save_output_file(output_file, output_data)
218
309
 
219
- # Save session messages (if session_id provided and postgres enabled)
310
+ # Save session messages including tool calls (same as streaming code path)
220
311
  if context and context.session_id and settings.postgres.enabled:
221
312
  from ...services.session.compression import SessionMessageStore
222
313
 
223
- # Extract just the user query from prompt
224
- # Prompt format from ContextBuilder: system + history + user message
225
- # We need to extract the last user message
226
- user_message_content = prompt.split("\n\n")[-1] if "\n\n" in prompt else prompt
314
+ timestamp = to_iso_with_z(utc_now())
315
+ messages_to_store = []
227
316
 
228
- user_message = {
317
+ # Save user message first
318
+ user_message_content = user_message or (prompt.split("\n\n")[-1] if "\n\n" in prompt else prompt)
319
+ messages_to_store.append({
229
320
  "role": "user",
230
321
  "content": user_message_content,
231
- "timestamp": to_iso_with_z(utc_now()),
232
- }
233
-
234
- assistant_message = {
235
- "role": "assistant",
236
- "content": assistant_content,
237
- "timestamp": to_iso_with_z(utc_now()),
238
- }
239
-
240
- # Store messages with compression
322
+ "timestamp": timestamp,
323
+ })
324
+
325
+ # Save tool call messages (message_type: "tool") - CRITICAL for state tracking
326
+ for tool_call in tool_calls:
327
+ if not tool_call:
328
+ continue
329
+ tool_message = {
330
+ "role": "tool",
331
+ "content": json.dumps(tool_call.get("result", {}), default=str),
332
+ "timestamp": timestamp,
333
+ "tool_call_id": tool_call.get("tool_id"),
334
+ "tool_name": tool_call.get("tool_name"),
335
+ "tool_arguments": tool_call.get("arguments"),
336
+ }
337
+ messages_to_store.append(tool_message)
338
+
339
+ # Save assistant message
340
+ if assistant_content:
341
+ messages_to_store.append({
342
+ "role": "assistant",
343
+ "content": assistant_content,
344
+ "timestamp": timestamp,
345
+ })
346
+
347
+ # Store all messages
241
348
  store = SessionMessageStore(user_id=context.user_id or settings.test.effective_user_id)
242
349
  await store.store_session_messages(
243
350
  session_id=context.session_id,
244
- messages=[user_message, assistant_message],
351
+ messages=messages_to_store,
245
352
  user_id=context.user_id,
246
- compress=True,
353
+ compress=False, # Store uncompressed; compression happens on reload
247
354
  )
248
355
 
249
- logger.debug(f"Saved conversation to session {context.session_id}")
356
+ logger.debug(
357
+ f"Saved {len(tool_calls)} tool calls + user/assistant messages "
358
+ f"to session {context.session_id}"
359
+ )
250
360
 
251
361
  return output_data
252
362
 
@@ -332,8 +442,8 @@ async def _save_output_file(file_path: Path, data: dict[str, Any]) -> None:
332
442
  )
333
443
  @click.option(
334
444
  "--stream/--no-stream",
335
- default=False,
336
- help="Enable streaming mode (default: disabled)",
445
+ default=True,
446
+ help="Enable streaming mode (default: enabled)",
337
447
  )
338
448
  @click.option(
339
449
  "--user-id",
@@ -538,6 +648,7 @@ async def _ask_async(
538
648
  output_file=output_file,
539
649
  context=context,
540
650
  plan=plan,
651
+ user_message=query,
541
652
  )
542
653
 
543
654
  # Log session ID for reuse
@@ -1568,7 +1568,7 @@ def export(
1568
1568
  rem experiments export my-experiment
1569
1569
 
1570
1570
  # Export to specific bucket
1571
- rem experiments export my-experiment --bucket siggy-data
1571
+ rem experiments export my-experiment --bucket my-data-lake
1572
1572
 
1573
1573
  # Include results in export
1574
1574
  rem experiments export my-experiment --include-results
@@ -193,7 +193,15 @@ def process_ingest(
193
193
  try:
194
194
  # Read file content
195
195
  content = file_path.read_text(encoding="utf-8")
196
- entity_key = file_path.stem # filename without extension
196
+
197
+ # Generate entity key from filename
198
+ # Special case: README files use parent directory as section name
199
+ if file_path.stem.lower() == "readme":
200
+ # Use parent directory name, e.g., "drugs" for drugs/README.md
201
+ # For nested paths like disorders/anxiety/README.md -> "anxiety"
202
+ entity_key = file_path.parent.name
203
+ else:
204
+ entity_key = file_path.stem # filename without extension
197
205
 
198
206
  # Build entity based on table
199
207
  entity_data = {
@@ -0,0 +1,109 @@
1
+ """
2
+ REM query command.
3
+
4
+ Usage:
5
+ rem query --sql 'LOOKUP "Sarah Chen"'
6
+ rem query --sql 'SEARCH resources "API design" LIMIT 10'
7
+ rem query --sql "SELECT * FROM resources LIMIT 5"
8
+ rem query --file queries/my_query.sql
9
+
10
+ This tool connects to the configured PostgreSQL instance and executes the
11
+ provided REM dialect query, printing results as JSON (default) or plain dicts.
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import asyncio
17
+ import json
18
+ from pathlib import Path
19
+ from typing import List
20
+
21
+ import click
22
+ from loguru import logger
23
+
24
+ from ...services.rem import QueryExecutionError
25
+ from ...services.rem.service import RemService
26
+
27
+
28
+ @click.command("query")
29
+ @click.option("--sql", "-s", default=None, help="REM query string (LOOKUP, SEARCH, FUZZY, TRAVERSE, or SQL)")
30
+ @click.option(
31
+ "--file",
32
+ "-f",
33
+ "sql_file",
34
+ type=click.Path(exists=True, path_type=Path),
35
+ default=None,
36
+ help="Path to file containing REM query",
37
+ )
38
+ @click.option("--no-json", is_flag=True, default=False, help="Print rows as Python dicts instead of JSON")
39
+ @click.option("--user-id", "-u", default=None, help="Scope query to a specific user")
40
+ def query_command(sql: str | None, sql_file: Path | None, no_json: bool, user_id: str | None):
41
+ """
42
+ Execute a REM query against the database.
43
+
44
+ Supports REM dialect queries (LOOKUP, SEARCH, FUZZY, TRAVERSE) and raw SQL.
45
+ Either --sql or --file must be provided.
46
+ """
47
+ if not sql and not sql_file:
48
+ click.secho("Error: either --sql or --file is required", fg="red")
49
+ raise click.Abort()
50
+
51
+ # Read query from file if provided
52
+ if sql_file:
53
+ query_text = sql_file.read_text(encoding="utf-8")
54
+ else:
55
+ query_text = sql # type: ignore[assignment]
56
+
57
+ try:
58
+ asyncio.run(_run_query_async(query_text, not no_json, user_id))
59
+ except Exception as exc: # pragma: no cover - CLI error path
60
+ logger.exception("Query failed")
61
+ click.secho(f"✗ Query failed: {exc}", fg="red")
62
+ raise click.Abort()
63
+
64
+
65
+ async def _run_query_async(query_text: str, as_json: bool, user_id: str | None) -> None:
66
+ """
67
+ Execute the query using RemService.execute_query_string().
68
+ """
69
+ from ...services.postgres import get_postgres_service
70
+
71
+ db = get_postgres_service()
72
+ if not db:
73
+ click.secho("✗ PostgreSQL is disabled in settings. Enable with POSTGRES__ENABLED=true", fg="red")
74
+ raise click.Abort()
75
+
76
+ if db.pool is None:
77
+ await db.connect()
78
+
79
+ rem_service = RemService(db)
80
+
81
+ try:
82
+ # Use the unified execute_query_string method
83
+ result = await rem_service.execute_query_string(query_text, user_id=user_id)
84
+ output_rows = result.get("results", [])
85
+ except QueryExecutionError as qe:
86
+ logger.exception("Query execution failed")
87
+ click.secho(f"✗ Query execution failed: {qe}. Please check the query you provided and try again.", fg="red")
88
+ raise click.Abort()
89
+ except ValueError as ve:
90
+ # Parse errors from the query parser
91
+ click.secho(f"✗ Invalid query: {ve}", fg="red")
92
+ raise click.Abort()
93
+ except Exception as exc: # pragma: no cover - CLI error path
94
+ logger.exception("Unexpected error during query execution")
95
+ click.secho("✗ An unexpected error occurred while executing the query. Please check the query you provided and try again.", fg="red")
96
+ raise click.Abort()
97
+
98
+ if as_json:
99
+ click.echo(json.dumps(output_rows, default=str, indent=2))
100
+ else:
101
+ for r in output_rows:
102
+ click.echo(str(r))
103
+
104
+
105
+ def register_command(cli_group):
106
+ """Register the query command on the given CLI group (top-level)."""
107
+ cli_group.add_command(query_command)
108
+
109
+