remdb 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of remdb might be problematic. Click here for more details.
- rem/__init__.py +2 -0
- rem/agentic/README.md +650 -0
- rem/agentic/__init__.py +39 -0
- rem/agentic/agents/README.md +155 -0
- rem/agentic/agents/__init__.py +8 -0
- rem/agentic/context.py +148 -0
- rem/agentic/context_builder.py +329 -0
- rem/agentic/mcp/__init__.py +0 -0
- rem/agentic/mcp/tool_wrapper.py +107 -0
- rem/agentic/otel/__init__.py +5 -0
- rem/agentic/otel/setup.py +151 -0
- rem/agentic/providers/phoenix.py +674 -0
- rem/agentic/providers/pydantic_ai.py +572 -0
- rem/agentic/query.py +117 -0
- rem/agentic/query_helper.py +89 -0
- rem/agentic/schema.py +396 -0
- rem/agentic/serialization.py +245 -0
- rem/agentic/tools/__init__.py +5 -0
- rem/agentic/tools/rem_tools.py +231 -0
- rem/api/README.md +420 -0
- rem/api/main.py +324 -0
- rem/api/mcp_router/prompts.py +182 -0
- rem/api/mcp_router/resources.py +536 -0
- rem/api/mcp_router/server.py +213 -0
- rem/api/mcp_router/tools.py +584 -0
- rem/api/routers/auth.py +229 -0
- rem/api/routers/chat/__init__.py +5 -0
- rem/api/routers/chat/completions.py +281 -0
- rem/api/routers/chat/json_utils.py +76 -0
- rem/api/routers/chat/models.py +124 -0
- rem/api/routers/chat/streaming.py +185 -0
- rem/auth/README.md +258 -0
- rem/auth/__init__.py +26 -0
- rem/auth/middleware.py +100 -0
- rem/auth/providers/__init__.py +13 -0
- rem/auth/providers/base.py +376 -0
- rem/auth/providers/google.py +163 -0
- rem/auth/providers/microsoft.py +237 -0
- rem/cli/README.md +455 -0
- rem/cli/__init__.py +8 -0
- rem/cli/commands/README.md +126 -0
- rem/cli/commands/__init__.py +3 -0
- rem/cli/commands/ask.py +566 -0
- rem/cli/commands/configure.py +497 -0
- rem/cli/commands/db.py +493 -0
- rem/cli/commands/dreaming.py +324 -0
- rem/cli/commands/experiments.py +1302 -0
- rem/cli/commands/mcp.py +66 -0
- rem/cli/commands/process.py +245 -0
- rem/cli/commands/schema.py +183 -0
- rem/cli/commands/serve.py +106 -0
- rem/cli/dreaming.py +363 -0
- rem/cli/main.py +96 -0
- rem/config.py +237 -0
- rem/mcp_server.py +41 -0
- rem/models/core/__init__.py +49 -0
- rem/models/core/core_model.py +64 -0
- rem/models/core/engram.py +333 -0
- rem/models/core/experiment.py +628 -0
- rem/models/core/inline_edge.py +132 -0
- rem/models/core/rem_query.py +243 -0
- rem/models/entities/__init__.py +43 -0
- rem/models/entities/file.py +57 -0
- rem/models/entities/image_resource.py +88 -0
- rem/models/entities/message.py +35 -0
- rem/models/entities/moment.py +123 -0
- rem/models/entities/ontology.py +191 -0
- rem/models/entities/ontology_config.py +131 -0
- rem/models/entities/resource.py +95 -0
- rem/models/entities/schema.py +87 -0
- rem/models/entities/user.py +85 -0
- rem/py.typed +0 -0
- rem/schemas/README.md +507 -0
- rem/schemas/__init__.py +6 -0
- rem/schemas/agents/README.md +92 -0
- rem/schemas/agents/core/moment-builder.yaml +178 -0
- rem/schemas/agents/core/rem-query-agent.yaml +226 -0
- rem/schemas/agents/core/resource-affinity-assessor.yaml +99 -0
- rem/schemas/agents/core/simple-assistant.yaml +19 -0
- rem/schemas/agents/core/user-profile-builder.yaml +163 -0
- rem/schemas/agents/examples/contract-analyzer.yaml +317 -0
- rem/schemas/agents/examples/contract-extractor.yaml +134 -0
- rem/schemas/agents/examples/cv-parser.yaml +263 -0
- rem/schemas/agents/examples/hello-world.yaml +37 -0
- rem/schemas/agents/examples/query.yaml +54 -0
- rem/schemas/agents/examples/simple.yaml +21 -0
- rem/schemas/agents/examples/test.yaml +29 -0
- rem/schemas/agents/rem.yaml +128 -0
- rem/schemas/evaluators/hello-world/default.yaml +77 -0
- rem/schemas/evaluators/rem/faithfulness.yaml +219 -0
- rem/schemas/evaluators/rem/lookup-correctness.yaml +182 -0
- rem/schemas/evaluators/rem/retrieval-precision.yaml +199 -0
- rem/schemas/evaluators/rem/retrieval-recall.yaml +211 -0
- rem/schemas/evaluators/rem/search-correctness.yaml +192 -0
- rem/services/__init__.py +16 -0
- rem/services/audio/INTEGRATION.md +308 -0
- rem/services/audio/README.md +376 -0
- rem/services/audio/__init__.py +15 -0
- rem/services/audio/chunker.py +354 -0
- rem/services/audio/transcriber.py +259 -0
- rem/services/content/README.md +1269 -0
- rem/services/content/__init__.py +5 -0
- rem/services/content/providers.py +806 -0
- rem/services/content/service.py +676 -0
- rem/services/dreaming/README.md +230 -0
- rem/services/dreaming/__init__.py +53 -0
- rem/services/dreaming/affinity_service.py +336 -0
- rem/services/dreaming/moment_service.py +264 -0
- rem/services/dreaming/ontology_service.py +54 -0
- rem/services/dreaming/user_model_service.py +297 -0
- rem/services/dreaming/utils.py +39 -0
- rem/services/embeddings/__init__.py +11 -0
- rem/services/embeddings/api.py +120 -0
- rem/services/embeddings/worker.py +421 -0
- rem/services/fs/README.md +662 -0
- rem/services/fs/__init__.py +62 -0
- rem/services/fs/examples.py +206 -0
- rem/services/fs/examples_paths.py +204 -0
- rem/services/fs/git_provider.py +935 -0
- rem/services/fs/local_provider.py +760 -0
- rem/services/fs/parsing-hooks-examples.md +172 -0
- rem/services/fs/paths.py +276 -0
- rem/services/fs/provider.py +460 -0
- rem/services/fs/s3_provider.py +1042 -0
- rem/services/fs/service.py +186 -0
- rem/services/git/README.md +1075 -0
- rem/services/git/__init__.py +17 -0
- rem/services/git/service.py +469 -0
- rem/services/phoenix/EXPERIMENT_DESIGN.md +1146 -0
- rem/services/phoenix/README.md +453 -0
- rem/services/phoenix/__init__.py +46 -0
- rem/services/phoenix/client.py +686 -0
- rem/services/phoenix/config.py +88 -0
- rem/services/phoenix/prompt_labels.py +477 -0
- rem/services/postgres/README.md +575 -0
- rem/services/postgres/__init__.py +23 -0
- rem/services/postgres/migration_service.py +427 -0
- rem/services/postgres/pydantic_to_sqlalchemy.py +232 -0
- rem/services/postgres/register_type.py +352 -0
- rem/services/postgres/repository.py +337 -0
- rem/services/postgres/schema_generator.py +379 -0
- rem/services/postgres/service.py +802 -0
- rem/services/postgres/sql_builder.py +354 -0
- rem/services/rem/README.md +304 -0
- rem/services/rem/__init__.py +23 -0
- rem/services/rem/exceptions.py +71 -0
- rem/services/rem/executor.py +293 -0
- rem/services/rem/parser.py +145 -0
- rem/services/rem/queries.py +196 -0
- rem/services/rem/query.py +371 -0
- rem/services/rem/service.py +527 -0
- rem/services/session/README.md +374 -0
- rem/services/session/__init__.py +6 -0
- rem/services/session/compression.py +360 -0
- rem/services/session/reload.py +77 -0
- rem/settings.py +1235 -0
- rem/sql/002_install_models.sql +1068 -0
- rem/sql/background_indexes.sql +42 -0
- rem/sql/install_models.sql +1038 -0
- rem/sql/migrations/001_install.sql +503 -0
- rem/sql/migrations/002_install_models.sql +1202 -0
- rem/utils/AGENTIC_CHUNKING.md +597 -0
- rem/utils/README.md +583 -0
- rem/utils/__init__.py +43 -0
- rem/utils/agentic_chunking.py +622 -0
- rem/utils/batch_ops.py +343 -0
- rem/utils/chunking.py +108 -0
- rem/utils/clip_embeddings.py +276 -0
- rem/utils/dict_utils.py +98 -0
- rem/utils/embeddings.py +423 -0
- rem/utils/examples/embeddings_example.py +305 -0
- rem/utils/examples/sql_types_example.py +202 -0
- rem/utils/markdown.py +16 -0
- rem/utils/model_helpers.py +236 -0
- rem/utils/schema_loader.py +336 -0
- rem/utils/sql_types.py +348 -0
- rem/utils/user_id.py +81 -0
- rem/utils/vision.py +330 -0
- rem/workers/README.md +506 -0
- rem/workers/__init__.py +5 -0
- rem/workers/dreaming.py +502 -0
- rem/workers/engram_processor.py +312 -0
- rem/workers/sqs_file_processor.py +193 -0
- remdb-0.3.0.dist-info/METADATA +1455 -0
- remdb-0.3.0.dist-info/RECORD +187 -0
- remdb-0.3.0.dist-info/WHEEL +4 -0
- remdb-0.3.0.dist-info/entry_points.txt +2 -0
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
# REM CLI Commands
|
|
2
|
+
|
|
3
|
+
## Configuration (`rem configure`)
|
|
4
|
+
|
|
5
|
+
Interactive configuration wizard for REM setup.
|
|
6
|
+
|
|
7
|
+
### Quick Start
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
# Basic configuration (creates ~/.rem/config.yaml)
|
|
11
|
+
rem configure
|
|
12
|
+
|
|
13
|
+
# Configure + install database tables
|
|
14
|
+
rem configure --install
|
|
15
|
+
|
|
16
|
+
# Configure + install + register with Claude Desktop
|
|
17
|
+
rem configure --install --claude-desktop
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
### Managing Configuration
|
|
21
|
+
|
|
22
|
+
```bash
|
|
23
|
+
# View current configuration
|
|
24
|
+
rem configure --show
|
|
25
|
+
|
|
26
|
+
# Edit configuration file
|
|
27
|
+
rem configure --edit # Opens in $EDITOR (defaults to vim)
|
|
28
|
+
|
|
29
|
+
# Or edit manually
|
|
30
|
+
vim ~/.rem/config.yaml
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
### Configuration File Structure
|
|
34
|
+
|
|
35
|
+
`~/.rem/config.yaml`:
|
|
36
|
+
|
|
37
|
+
```yaml
|
|
38
|
+
postgres:
|
|
39
|
+
connection_string: postgresql://user:pass@localhost:5432/rem
|
|
40
|
+
pool_min_size: 5
|
|
41
|
+
pool_max_size: 20
|
|
42
|
+
|
|
43
|
+
llm:
|
|
44
|
+
default_model: anthropic:claude-sonnet-4-5-20250929
|
|
45
|
+
default_temperature: 0.5
|
|
46
|
+
openai_api_key: sk-...
|
|
47
|
+
anthropic_api_key: sk-ant-...
|
|
48
|
+
|
|
49
|
+
s3:
|
|
50
|
+
bucket_name: rem-storage
|
|
51
|
+
region: us-east-1
|
|
52
|
+
# Optional: for MinIO/LocalStack
|
|
53
|
+
endpoint_url: http://localhost:9000
|
|
54
|
+
access_key_id: minioadmin
|
|
55
|
+
secret_access_key: minioadmin
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
### Environment Variables
|
|
59
|
+
|
|
60
|
+
All configuration can be overridden via environment variables using double underscore delimiter:
|
|
61
|
+
|
|
62
|
+
```bash
|
|
63
|
+
# Postgres
|
|
64
|
+
export POSTGRES__CONNECTION_STRING=postgresql://user:pass@host:5432/db
|
|
65
|
+
export POSTGRES__POOL_MIN_SIZE=5
|
|
66
|
+
export POSTGRES__POOL_MAX_SIZE=20
|
|
67
|
+
|
|
68
|
+
# LLM
|
|
69
|
+
export LLM__DEFAULT_MODEL=anthropic:claude-sonnet-4-5-20250929
|
|
70
|
+
export LLM__OPENAI_API_KEY=sk-...
|
|
71
|
+
export LLM__ANTHROPIC_API_KEY=sk-ant-...
|
|
72
|
+
|
|
73
|
+
# S3
|
|
74
|
+
export S3__BUCKET_NAME=rem-storage
|
|
75
|
+
export S3__REGION=us-east-1
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
### Configuration Precedence
|
|
79
|
+
|
|
80
|
+
1. **Environment variables** (highest priority)
|
|
81
|
+
2. **Configuration file** (`~/.rem/config.yaml`)
|
|
82
|
+
3. **Default values** (from `rem/settings.py`)
|
|
83
|
+
|
|
84
|
+
### Docker/Kubernetes
|
|
85
|
+
|
|
86
|
+
In containerized environments, use environment variables exclusively:
|
|
87
|
+
|
|
88
|
+
```yaml
|
|
89
|
+
# docker-compose.yml
|
|
90
|
+
services:
|
|
91
|
+
rem-api:
|
|
92
|
+
image: rem:latest
|
|
93
|
+
environment:
|
|
94
|
+
POSTGRES__CONNECTION_STRING: postgresql://rem:rem@postgres:5432/rem
|
|
95
|
+
LLM__OPENAI_API_KEY: ${OPENAI_API_KEY}
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
```yaml
|
|
99
|
+
# Kubernetes ConfigMap/Secret
|
|
100
|
+
apiVersion: v1
|
|
101
|
+
kind: Secret
|
|
102
|
+
metadata:
|
|
103
|
+
name: rem-secrets
|
|
104
|
+
stringData:
|
|
105
|
+
POSTGRES__CONNECTION_STRING: postgresql://rem:rem@postgres:5432/rem
|
|
106
|
+
LLM__OPENAI_API_KEY: sk-...
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
## Other Commands
|
|
110
|
+
|
|
111
|
+
- **`rem ask`** - Interactive chat with REM memory
|
|
112
|
+
- **`rem serve`** - Start FastAPI server
|
|
113
|
+
- **`rem db`** - Database management (migrate, seed, etc.)
|
|
114
|
+
- **`rem schema`** - Schema generation and validation
|
|
115
|
+
- **`rem mcp`** - MCP server commands
|
|
116
|
+
- **`rem dreaming`** - Background knowledge processing
|
|
117
|
+
- **`rem process`** - File processing utilities
|
|
118
|
+
- **`rem experiments`** - Experiment management (datasets, prompts, traces, runs)
|
|
119
|
+
|
|
120
|
+
Run `rem COMMAND --help` for detailed usage of each command.
|
|
121
|
+
|
|
122
|
+
## See Also
|
|
123
|
+
|
|
124
|
+
- [README.md](../../../../../README.md) - Main documentation
|
|
125
|
+
- [CLAUDE.md](../../../../../CLAUDE.md) - Architecture overview
|
|
126
|
+
- [settings.py](../../settings.py) - All available settings
|
rem/cli/commands/ask.py
ADDED
|
@@ -0,0 +1,566 @@
|
|
|
1
|
+
"""
|
|
2
|
+
CLI command for testing Pydantic AI agents.
|
|
3
|
+
|
|
4
|
+
Usage:
|
|
5
|
+
rem ask query-agent "Find all documents by Sarah" --model anthropic:claude-sonnet-4-5-20250929
|
|
6
|
+
rem ask schemas/query-agent.yaml "What is the weather?" --temperature 0.7 --max-turns 5
|
|
7
|
+
rem ask my-agent "Hello" --stream --version 1.2.0
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import asyncio
|
|
11
|
+
import json
|
|
12
|
+
import sys
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
import click
|
|
17
|
+
from loguru import logger
|
|
18
|
+
|
|
19
|
+
from ...agentic.context import AgentContext
|
|
20
|
+
from ...agentic.providers.pydantic_ai import create_agent
|
|
21
|
+
from ...agentic.query import AgentQuery
|
|
22
|
+
from ...settings import settings
|
|
23
|
+
from ...utils.schema_loader import load_agent_schema
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
async def load_schema_from_registry(
|
|
27
|
+
name: str, version: str | None = None
|
|
28
|
+
) -> dict[str, Any]:
|
|
29
|
+
"""
|
|
30
|
+
Load agent schema from registry (database or cache).
|
|
31
|
+
|
|
32
|
+
TODO: Implement schema registry with:
|
|
33
|
+
- Database table: agent_schemas (name, version, schema_json, created_at)
|
|
34
|
+
- Cache layer: Redis/in-memory for fast lookups
|
|
35
|
+
- Versioning: semantic versioning with latest fallback
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
name: Schema name (e.g., "query-agent", "rem-agents-query-agent")
|
|
39
|
+
version: Optional version (e.g., "1.2.0", defaults to latest)
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
Agent schema as dictionary
|
|
43
|
+
|
|
44
|
+
Example:
|
|
45
|
+
schema = await load_schema_from_registry("query-agent", version="1.0.0")
|
|
46
|
+
"""
|
|
47
|
+
# TODO: Implement database/cache lookup
|
|
48
|
+
# from ...db import get_db_pool
|
|
49
|
+
# async with get_db_pool() as pool:
|
|
50
|
+
# if version:
|
|
51
|
+
# query = "SELECT schema_json FROM agent_schemas WHERE name = $1 AND version = $2"
|
|
52
|
+
# row = await pool.fetchrow(query, name, version)
|
|
53
|
+
# else:
|
|
54
|
+
# query = "SELECT schema_json FROM agent_schemas WHERE name = $1 ORDER BY created_at DESC LIMIT 1"
|
|
55
|
+
# row = await pool.fetchrow(query, name)
|
|
56
|
+
#
|
|
57
|
+
# if not row:
|
|
58
|
+
# raise ValueError(f"Schema not found: {name} (version: {version or 'latest'})")
|
|
59
|
+
#
|
|
60
|
+
# return json.loads(row["schema_json"])
|
|
61
|
+
|
|
62
|
+
raise NotImplementedError(
|
|
63
|
+
f"Schema registry not implemented yet. Please use a file path instead.\n"
|
|
64
|
+
f"Attempted to load: {name} (version: {version or 'latest'})"
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
async def run_agent_streaming(
|
|
69
|
+
agent,
|
|
70
|
+
prompt: str,
|
|
71
|
+
max_turns: int = 10,
|
|
72
|
+
context: AgentContext | None = None,
|
|
73
|
+
max_iterations: int | None = None,
|
|
74
|
+
) -> None:
|
|
75
|
+
"""
|
|
76
|
+
Run agent in streaming mode using agent.iter() with usage limits.
|
|
77
|
+
|
|
78
|
+
Design Pattern (from carrier):
|
|
79
|
+
- Use agent.iter() for complete execution with tool call visibility
|
|
80
|
+
- run_stream() stops after first output, missing tool calls
|
|
81
|
+
- Stream tool call markers: [Calling: tool_name]
|
|
82
|
+
- Stream text content deltas as they arrive
|
|
83
|
+
- Show final structured result
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
agent: Pydantic AI agent
|
|
87
|
+
prompt: Complete prompt (includes system context + history + query)
|
|
88
|
+
max_turns: Maximum turns for agent execution (not used in current API)
|
|
89
|
+
context: Optional AgentContext for session persistence
|
|
90
|
+
max_iterations: Maximum iterations/requests (from agent schema or settings)
|
|
91
|
+
"""
|
|
92
|
+
from datetime import datetime, timezone
|
|
93
|
+
from pydantic_ai import UsageLimits
|
|
94
|
+
|
|
95
|
+
logger.info("Running agent in streaming mode...")
|
|
96
|
+
|
|
97
|
+
try:
|
|
98
|
+
# Import event types for streaming
|
|
99
|
+
from pydantic_ai import Agent as PydanticAgent
|
|
100
|
+
from pydantic_ai.messages import PartStartEvent, PartDeltaEvent, TextPartDelta, ToolCallPart
|
|
101
|
+
|
|
102
|
+
# Accumulate assistant response for session persistence
|
|
103
|
+
assistant_response_parts = []
|
|
104
|
+
|
|
105
|
+
# Use agent.iter() to get complete execution with tool calls
|
|
106
|
+
usage_limits = UsageLimits(request_limit=max_iterations) if max_iterations else None
|
|
107
|
+
async with agent.iter(prompt, usage_limits=usage_limits) as agent_run:
|
|
108
|
+
async for node in agent_run:
|
|
109
|
+
# Check if this is a model request node (includes tool calls and text)
|
|
110
|
+
if PydanticAgent.is_model_request_node(node):
|
|
111
|
+
# Stream events from model request
|
|
112
|
+
request_stream: Any
|
|
113
|
+
async with node.stream(agent_run.ctx) as request_stream:
|
|
114
|
+
async for event in request_stream:
|
|
115
|
+
# Tool call start event
|
|
116
|
+
if isinstance(event, PartStartEvent) and isinstance(
|
|
117
|
+
event.part, ToolCallPart
|
|
118
|
+
):
|
|
119
|
+
tool_marker = f"\n[Calling: {event.part.tool_name}]"
|
|
120
|
+
print(tool_marker, flush=True)
|
|
121
|
+
assistant_response_parts.append(tool_marker)
|
|
122
|
+
|
|
123
|
+
# Text content delta
|
|
124
|
+
elif isinstance(event, PartDeltaEvent) and isinstance(
|
|
125
|
+
event.delta, TextPartDelta
|
|
126
|
+
):
|
|
127
|
+
print(event.delta.content_delta, end="", flush=True)
|
|
128
|
+
assistant_response_parts.append(event.delta.content_delta)
|
|
129
|
+
|
|
130
|
+
print("\n") # Final newline after streaming
|
|
131
|
+
|
|
132
|
+
# Get final result from agent_run
|
|
133
|
+
result = agent_run.result
|
|
134
|
+
if hasattr(result, "output"):
|
|
135
|
+
logger.info("Final structured result:")
|
|
136
|
+
output = result.output
|
|
137
|
+
from rem.agentic.serialization import serialize_agent_result
|
|
138
|
+
output_json = json.dumps(serialize_agent_result(output), indent=2)
|
|
139
|
+
print(output_json)
|
|
140
|
+
assistant_response_parts.append(f"\n{output_json}")
|
|
141
|
+
|
|
142
|
+
# Save session messages (if session_id provided and postgres enabled)
|
|
143
|
+
if context and context.session_id and settings.postgres.enabled:
|
|
144
|
+
from ...services.session.compression import SessionMessageStore
|
|
145
|
+
|
|
146
|
+
# Extract just the user query from prompt
|
|
147
|
+
# Prompt format from ContextBuilder: system + history + user message
|
|
148
|
+
# We need to extract the last user message
|
|
149
|
+
user_message_content = prompt.split("\n\n")[-1] if "\n\n" in prompt else prompt
|
|
150
|
+
|
|
151
|
+
user_message = {
|
|
152
|
+
"role": "user",
|
|
153
|
+
"content": user_message_content,
|
|
154
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
assistant_message = {
|
|
158
|
+
"role": "assistant",
|
|
159
|
+
"content": "".join(assistant_response_parts),
|
|
160
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
# Store messages with compression
|
|
164
|
+
store = SessionMessageStore(user_id=context.user_id or settings.test.effective_user_id)
|
|
165
|
+
await store.store_session_messages(
|
|
166
|
+
session_id=context.session_id,
|
|
167
|
+
messages=[user_message, assistant_message],
|
|
168
|
+
user_id=context.user_id,
|
|
169
|
+
compress=True,
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
logger.debug(f"Saved conversation to session {context.session_id}")
|
|
173
|
+
|
|
174
|
+
except Exception as e:
|
|
175
|
+
logger.error(f"Agent execution failed: {e}")
|
|
176
|
+
raise
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
async def run_agent_non_streaming(
|
|
180
|
+
agent,
|
|
181
|
+
prompt: str,
|
|
182
|
+
max_turns: int = 10,
|
|
183
|
+
output_file: Path | None = None,
|
|
184
|
+
context: AgentContext | None = None,
|
|
185
|
+
plan: bool = False,
|
|
186
|
+
max_iterations: int | None = None,
|
|
187
|
+
) -> dict[str, Any] | None:
|
|
188
|
+
"""
|
|
189
|
+
Run agent in non-streaming mode using agent.run() with usage limits.
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
agent: Pydantic AI agent
|
|
193
|
+
prompt: Complete prompt (includes system context + history + query)
|
|
194
|
+
max_turns: Maximum turns for agent execution (not used in current API)
|
|
195
|
+
output_file: Optional path to save output
|
|
196
|
+
context: Optional AgentContext for session persistence
|
|
197
|
+
plan: If True, output only the generated query (for query-agent)
|
|
198
|
+
max_iterations: Maximum iterations/requests (from agent schema or settings)
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
Output data if successful, None otherwise
|
|
202
|
+
"""
|
|
203
|
+
from datetime import datetime, timezone
|
|
204
|
+
from pydantic_ai import UsageLimits
|
|
205
|
+
|
|
206
|
+
logger.info("Running agent in non-streaming mode...")
|
|
207
|
+
|
|
208
|
+
try:
|
|
209
|
+
# Run agent and get complete result with usage limits
|
|
210
|
+
usage_limits = UsageLimits(request_limit=max_iterations) if max_iterations else None
|
|
211
|
+
result = await agent.run(prompt, usage_limits=usage_limits)
|
|
212
|
+
|
|
213
|
+
# Extract output data
|
|
214
|
+
output_data = None
|
|
215
|
+
assistant_content = None
|
|
216
|
+
if hasattr(result, "output"):
|
|
217
|
+
output = result.output
|
|
218
|
+
from rem.agentic.serialization import serialize_agent_result
|
|
219
|
+
output_data = serialize_agent_result(output)
|
|
220
|
+
|
|
221
|
+
if plan and isinstance(output_data, dict) and "query" in output_data:
|
|
222
|
+
# Plan mode: Output only the query
|
|
223
|
+
# Use sql formatting if possible or just raw string
|
|
224
|
+
assistant_content = output_data["query"]
|
|
225
|
+
print(assistant_content)
|
|
226
|
+
else:
|
|
227
|
+
# Normal mode
|
|
228
|
+
assistant_content = json.dumps(output_data, indent=2)
|
|
229
|
+
print(assistant_content)
|
|
230
|
+
else:
|
|
231
|
+
# Fallback for text-only results
|
|
232
|
+
assistant_content = str(result)
|
|
233
|
+
print(assistant_content)
|
|
234
|
+
|
|
235
|
+
# Save to file if requested
|
|
236
|
+
if output_file and output_data:
|
|
237
|
+
await _save_output_file(output_file, output_data)
|
|
238
|
+
|
|
239
|
+
# Save session messages (if session_id provided and postgres enabled)
|
|
240
|
+
if context and context.session_id and settings.postgres.enabled:
|
|
241
|
+
from ...services.session.compression import SessionMessageStore
|
|
242
|
+
|
|
243
|
+
# Extract just the user query from prompt
|
|
244
|
+
# Prompt format from ContextBuilder: system + history + user message
|
|
245
|
+
# We need to extract the last user message
|
|
246
|
+
user_message_content = prompt.split("\n\n")[-1] if "\n\n" in prompt else prompt
|
|
247
|
+
|
|
248
|
+
user_message = {
|
|
249
|
+
"role": "user",
|
|
250
|
+
"content": user_message_content,
|
|
251
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
assistant_message = {
|
|
255
|
+
"role": "assistant",
|
|
256
|
+
"content": assistant_content,
|
|
257
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
# Store messages with compression
|
|
261
|
+
store = SessionMessageStore(user_id=context.user_id or settings.test.effective_user_id)
|
|
262
|
+
await store.store_session_messages(
|
|
263
|
+
session_id=context.session_id,
|
|
264
|
+
messages=[user_message, assistant_message],
|
|
265
|
+
user_id=context.user_id,
|
|
266
|
+
compress=True,
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
logger.debug(f"Saved conversation to session {context.session_id}")
|
|
270
|
+
|
|
271
|
+
return output_data
|
|
272
|
+
|
|
273
|
+
except Exception as e:
|
|
274
|
+
logger.error(f"Agent execution failed: {e}")
|
|
275
|
+
raise
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
async def _load_input_file(
|
|
279
|
+
file_path: Path, user_id: str | None = None
|
|
280
|
+
) -> str:
|
|
281
|
+
"""
|
|
282
|
+
Load content from input file using ContentService.
|
|
283
|
+
|
|
284
|
+
Simple parse operation - just extracts content without creating Resources.
|
|
285
|
+
|
|
286
|
+
Args:
|
|
287
|
+
file_path: Path to input file
|
|
288
|
+
user_id: Optional user ID (not used for simple parse)
|
|
289
|
+
|
|
290
|
+
Returns:
|
|
291
|
+
Parsed file content as string (markdown format)
|
|
292
|
+
"""
|
|
293
|
+
from ...services.content import ContentService
|
|
294
|
+
|
|
295
|
+
# Create ContentService instance
|
|
296
|
+
content_service = ContentService()
|
|
297
|
+
|
|
298
|
+
# Parse file (read-only, no database writes)
|
|
299
|
+
logger.info(f"Parsing file: {file_path}")
|
|
300
|
+
result = content_service.process_uri(str(file_path))
|
|
301
|
+
content = result["content"]
|
|
302
|
+
|
|
303
|
+
logger.info(
|
|
304
|
+
f"Loaded {len(content)} characters from {file_path.suffix} file using {result['provider']}"
|
|
305
|
+
)
|
|
306
|
+
return content
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
async def _save_output_file(file_path: Path, data: dict[str, Any]) -> None:
|
|
310
|
+
"""
|
|
311
|
+
Save output data to file in YAML format.
|
|
312
|
+
|
|
313
|
+
Args:
|
|
314
|
+
file_path: Path to output file
|
|
315
|
+
data: Data to save
|
|
316
|
+
"""
|
|
317
|
+
import yaml
|
|
318
|
+
|
|
319
|
+
with open(file_path, "w", encoding="utf-8") as f:
|
|
320
|
+
yaml.safe_dump(data, f, default_flow_style=False, allow_unicode=True, sort_keys=False)
|
|
321
|
+
|
|
322
|
+
logger.success(f"Output saved to: {file_path}")
|
|
323
|
+
|
|
324
|
+
|
|
325
|
+
@click.command()
|
|
326
|
+
@click.argument("name_or_query")
|
|
327
|
+
@click.argument("query", required=False)
|
|
328
|
+
@click.option(
|
|
329
|
+
"--model",
|
|
330
|
+
"-m",
|
|
331
|
+
default=None,
|
|
332
|
+
help=f"LLM model (default: {settings.llm.default_model})",
|
|
333
|
+
)
|
|
334
|
+
@click.option(
|
|
335
|
+
"--temperature",
|
|
336
|
+
"-t",
|
|
337
|
+
type=float,
|
|
338
|
+
default=None,
|
|
339
|
+
help=f"Temperature for generation (default: {settings.llm.default_temperature})",
|
|
340
|
+
)
|
|
341
|
+
@click.option(
|
|
342
|
+
"--max-turns",
|
|
343
|
+
type=int,
|
|
344
|
+
default=10,
|
|
345
|
+
help="Maximum turns for agent execution (default: 10)",
|
|
346
|
+
)
|
|
347
|
+
@click.option(
|
|
348
|
+
"--version",
|
|
349
|
+
"-v",
|
|
350
|
+
default=None,
|
|
351
|
+
help="Schema version (for registry lookup, defaults to latest)",
|
|
352
|
+
)
|
|
353
|
+
@click.option(
|
|
354
|
+
"--stream/--no-stream",
|
|
355
|
+
default=False,
|
|
356
|
+
help="Enable streaming mode (default: disabled)",
|
|
357
|
+
)
|
|
358
|
+
@click.option(
|
|
359
|
+
"--user-id",
|
|
360
|
+
default="test-user",
|
|
361
|
+
help="User ID for context (default: test-user)",
|
|
362
|
+
)
|
|
363
|
+
@click.option(
|
|
364
|
+
"--session-id",
|
|
365
|
+
default=None,
|
|
366
|
+
help="Session ID for context (default: auto-generated)",
|
|
367
|
+
)
|
|
368
|
+
@click.option(
|
|
369
|
+
"--input-file",
|
|
370
|
+
"-i",
|
|
371
|
+
type=click.Path(exists=True, path_type=Path),
|
|
372
|
+
default=None,
|
|
373
|
+
help="Read input from file instead of QUERY argument (supports PDF, TXT, Markdown)",
|
|
374
|
+
)
|
|
375
|
+
@click.option(
|
|
376
|
+
"--output-file",
|
|
377
|
+
"-o",
|
|
378
|
+
type=click.Path(path_type=Path),
|
|
379
|
+
default=None,
|
|
380
|
+
help="Write output to file (YAML format)",
|
|
381
|
+
)
|
|
382
|
+
@click.option(
|
|
383
|
+
"--plan",
|
|
384
|
+
is_flag=True,
|
|
385
|
+
default=False,
|
|
386
|
+
help="Output only the generated plan/query (useful for query-agent)",
|
|
387
|
+
)
|
|
388
|
+
def ask(
|
|
389
|
+
name_or_query: str,
|
|
390
|
+
query: str | None,
|
|
391
|
+
model: str | None,
|
|
392
|
+
temperature: float | None,
|
|
393
|
+
max_turns: int,
|
|
394
|
+
version: str | None,
|
|
395
|
+
stream: bool,
|
|
396
|
+
user_id: str,
|
|
397
|
+
session_id: str | None,
|
|
398
|
+
input_file: Path | None,
|
|
399
|
+
output_file: Path | None,
|
|
400
|
+
plan: bool,
|
|
401
|
+
):
|
|
402
|
+
"""
|
|
403
|
+
Run an agent with a query or file input.
|
|
404
|
+
|
|
405
|
+
Arguments:
|
|
406
|
+
NAME_OR_QUERY: Agent schema name OR query string.
|
|
407
|
+
QUERY: Query string (if first arg is agent name).
|
|
408
|
+
|
|
409
|
+
Examples:
|
|
410
|
+
# Simple query (uses default 'rem' agent)
|
|
411
|
+
rem ask "What documents did I upload?"
|
|
412
|
+
|
|
413
|
+
# Explicit agent
|
|
414
|
+
rem ask contract-analyzer "Analyze this contract"
|
|
415
|
+
|
|
416
|
+
# Process file
|
|
417
|
+
rem ask contract-analyzer -i contract.pdf -o output.yaml
|
|
418
|
+
"""
|
|
419
|
+
# Smart argument handling
|
|
420
|
+
name = "rem" # Default agent
|
|
421
|
+
|
|
422
|
+
if query is None and not input_file:
|
|
423
|
+
# Single argument provided
|
|
424
|
+
# Heuristic: If it looks like a schema file or known agent, treat as name
|
|
425
|
+
# Otherwise treat as query
|
|
426
|
+
if name_or_query.endswith((".yaml", ".yml", ".json")) or name_or_query in ["rem", "query-agent", "rem-query-agent"]:
|
|
427
|
+
# It's an agent name, query is missing (unless input_file)
|
|
428
|
+
name = name_or_query
|
|
429
|
+
# Query remains None, _ask_async will check input_file
|
|
430
|
+
else:
|
|
431
|
+
# It's a query, use default agent
|
|
432
|
+
query = name_or_query
|
|
433
|
+
elif query is not None:
|
|
434
|
+
# Two arguments provided
|
|
435
|
+
name = name_or_query
|
|
436
|
+
|
|
437
|
+
asyncio.run(
|
|
438
|
+
_ask_async(
|
|
439
|
+
name=name,
|
|
440
|
+
query=query,
|
|
441
|
+
model=model,
|
|
442
|
+
temperature=temperature,
|
|
443
|
+
max_turns=max_turns,
|
|
444
|
+
version=version,
|
|
445
|
+
stream=stream,
|
|
446
|
+
user_id=user_id,
|
|
447
|
+
session_id=session_id,
|
|
448
|
+
input_file=input_file,
|
|
449
|
+
output_file=output_file,
|
|
450
|
+
plan=plan,
|
|
451
|
+
)
|
|
452
|
+
)
|
|
453
|
+
|
|
454
|
+
|
|
455
|
+
async def _ask_async(
|
|
456
|
+
name: str,
|
|
457
|
+
query: str | None,
|
|
458
|
+
model: str | None,
|
|
459
|
+
temperature: float | None,
|
|
460
|
+
max_turns: int,
|
|
461
|
+
version: str | None,
|
|
462
|
+
stream: bool,
|
|
463
|
+
user_id: str,
|
|
464
|
+
session_id: str | None,
|
|
465
|
+
input_file: Path | None,
|
|
466
|
+
output_file: Path | None,
|
|
467
|
+
plan: bool,
|
|
468
|
+
):
|
|
469
|
+
"""Async implementation of ask command."""
|
|
470
|
+
import uuid
|
|
471
|
+
from ...agentic.context_builder import ContextBuilder
|
|
472
|
+
|
|
473
|
+
# Validate input arguments
|
|
474
|
+
if not query and not input_file:
|
|
475
|
+
logger.error("Either QUERY argument or --input-file must be provided")
|
|
476
|
+
sys.exit(1)
|
|
477
|
+
|
|
478
|
+
if query and input_file:
|
|
479
|
+
logger.error("Cannot use both QUERY argument and --input-file")
|
|
480
|
+
sys.exit(1)
|
|
481
|
+
|
|
482
|
+
# Load input from file if specified
|
|
483
|
+
if input_file:
|
|
484
|
+
logger.info(f"Loading input from file: {input_file}")
|
|
485
|
+
query = await _load_input_file(input_file, user_id=user_id)
|
|
486
|
+
|
|
487
|
+
# Load schema using centralized utility
|
|
488
|
+
# Handles both file paths and schema names automatically
|
|
489
|
+
# Falls back to database LOOKUP if not found in filesystem
|
|
490
|
+
logger.info(f"Loading schema: {name} (version: {version or 'latest'})")
|
|
491
|
+
try:
|
|
492
|
+
schema = load_agent_schema(name, user_id=user_id)
|
|
493
|
+
except FileNotFoundError as e:
|
|
494
|
+
logger.error(str(e))
|
|
495
|
+
sys.exit(1)
|
|
496
|
+
|
|
497
|
+
# Generate session ID if not provided
|
|
498
|
+
if not session_id:
|
|
499
|
+
session_id = str(uuid.uuid4())
|
|
500
|
+
logger.info(f"Generated session ID: {session_id}")
|
|
501
|
+
|
|
502
|
+
# Build context with session history using ContextBuilder
|
|
503
|
+
# This provides:
|
|
504
|
+
# - System context message with date and user profile hints
|
|
505
|
+
# - Compressed session history (if session exists)
|
|
506
|
+
# - Proper message structure for agent
|
|
507
|
+
logger.info(f"Building context for user {user_id}, session {session_id}")
|
|
508
|
+
|
|
509
|
+
# Prepare new message for ContextBuilder
|
|
510
|
+
new_messages = [{"role": "user", "content": query}]
|
|
511
|
+
|
|
512
|
+
# Build context with session history
|
|
513
|
+
context, messages = await ContextBuilder.build_from_headers(
|
|
514
|
+
headers={
|
|
515
|
+
"X-User-Id": user_id,
|
|
516
|
+
"X-Session-Id": session_id,
|
|
517
|
+
},
|
|
518
|
+
new_messages=new_messages,
|
|
519
|
+
)
|
|
520
|
+
|
|
521
|
+
# Override model if specified via CLI flag
|
|
522
|
+
if model:
|
|
523
|
+
context.default_model = model
|
|
524
|
+
|
|
525
|
+
logger.info(
|
|
526
|
+
f"Creating agent: model={context.default_model}, stream={stream}, max_turns={max_turns}, messages={len(messages)}"
|
|
527
|
+
)
|
|
528
|
+
|
|
529
|
+
# Create agent
|
|
530
|
+
agent = await create_agent(
|
|
531
|
+
context=context,
|
|
532
|
+
agent_schema_override=schema,
|
|
533
|
+
model_override=model,
|
|
534
|
+
)
|
|
535
|
+
|
|
536
|
+
# Temperature is now handled in agent factory (schema override or settings default)
|
|
537
|
+
if temperature is not None:
|
|
538
|
+
logger.warning(
|
|
539
|
+
f"CLI temperature override ({temperature}) not yet supported. "
|
|
540
|
+
"Use agent schema 'override_temperature' field or LLM__DEFAULT_TEMPERATURE setting."
|
|
541
|
+
)
|
|
542
|
+
|
|
543
|
+
# Combine messages into single prompt
|
|
544
|
+
# ContextBuilder already assembled: system context + history + new message
|
|
545
|
+
prompt = "\n\n".join(msg.content for msg in messages)
|
|
546
|
+
|
|
547
|
+
# Run agent with session persistence
|
|
548
|
+
if stream:
|
|
549
|
+
await run_agent_streaming(agent, prompt, max_turns=max_turns, context=context)
|
|
550
|
+
else:
|
|
551
|
+
await run_agent_non_streaming(
|
|
552
|
+
agent,
|
|
553
|
+
prompt,
|
|
554
|
+
max_turns=max_turns,
|
|
555
|
+
output_file=output_file,
|
|
556
|
+
context=context,
|
|
557
|
+
plan=plan,
|
|
558
|
+
)
|
|
559
|
+
|
|
560
|
+
# Log session ID for reuse
|
|
561
|
+
logger.success(f"Session ID: {session_id} (use --session-id to continue this conversation)")
|
|
562
|
+
|
|
563
|
+
|
|
564
|
+
def register_command(parent_group):
|
|
565
|
+
"""Register ask command with parent CLI group."""
|
|
566
|
+
parent_group.add_command(ask)
|