remdb 0.3.103__py3-none-any.whl → 0.3.118__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of remdb might be problematic. Click here for more details.
- rem/agentic/context.py +28 -24
- rem/agentic/mcp/tool_wrapper.py +29 -3
- rem/agentic/otel/setup.py +92 -4
- rem/agentic/providers/pydantic_ai.py +88 -18
- rem/agentic/schema.py +358 -21
- rem/agentic/tools/rem_tools.py +3 -3
- rem/api/main.py +85 -16
- rem/api/mcp_router/resources.py +1 -1
- rem/api/mcp_router/server.py +18 -4
- rem/api/mcp_router/tools.py +383 -16
- rem/api/routers/admin.py +218 -1
- rem/api/routers/chat/completions.py +30 -3
- rem/api/routers/chat/streaming.py +143 -3
- rem/api/routers/feedback.py +12 -319
- rem/api/routers/query.py +360 -0
- rem/api/routers/shared_sessions.py +13 -13
- rem/cli/commands/README.md +237 -64
- rem/cli/commands/cluster.py +1300 -0
- rem/cli/commands/configure.py +1 -3
- rem/cli/commands/db.py +354 -143
- rem/cli/commands/process.py +14 -8
- rem/cli/commands/schema.py +92 -45
- rem/cli/main.py +27 -6
- rem/models/core/rem_query.py +5 -2
- rem/models/entities/shared_session.py +2 -28
- rem/registry.py +10 -4
- rem/services/content/service.py +30 -8
- rem/services/embeddings/api.py +4 -4
- rem/services/embeddings/worker.py +16 -16
- rem/services/postgres/README.md +151 -26
- rem/services/postgres/__init__.py +2 -1
- rem/services/postgres/diff_service.py +531 -0
- rem/services/postgres/pydantic_to_sqlalchemy.py +427 -129
- rem/services/postgres/schema_generator.py +205 -4
- rem/services/postgres/service.py +6 -6
- rem/services/rem/parser.py +44 -9
- rem/services/rem/service.py +36 -2
- rem/services/session/reload.py +1 -1
- rem/settings.py +56 -7
- rem/sql/background_indexes.sql +19 -24
- rem/sql/migrations/001_install.sql +252 -69
- rem/sql/migrations/002_install_models.sql +2171 -593
- rem/sql/migrations/003_optional_extensions.sql +326 -0
- rem/sql/migrations/004_cache_system.sql +548 -0
- rem/utils/__init__.py +18 -0
- rem/utils/date_utils.py +2 -2
- rem/utils/schema_loader.py +17 -13
- rem/utils/sql_paths.py +146 -0
- rem/workers/__init__.py +2 -1
- rem/workers/unlogged_maintainer.py +463 -0
- {remdb-0.3.103.dist-info → remdb-0.3.118.dist-info}/METADATA +149 -76
- {remdb-0.3.103.dist-info → remdb-0.3.118.dist-info}/RECORD +54 -48
- rem/sql/migrations/003_seed_default_user.sql +0 -48
- {remdb-0.3.103.dist-info → remdb-0.3.118.dist-info}/WHEEL +0 -0
- {remdb-0.3.103.dist-info → remdb-0.3.118.dist-info}/entry_points.txt +0 -0
rem/agentic/context.py
CHANGED
|
@@ -73,43 +73,47 @@ class AgentContext(BaseModel):
|
|
|
73
73
|
user_id: str | None,
|
|
74
74
|
source: str = "context",
|
|
75
75
|
default: str | None = None,
|
|
76
|
-
) -> str:
|
|
76
|
+
) -> str | None:
|
|
77
77
|
"""
|
|
78
|
-
Get user_id or
|
|
78
|
+
Get user_id or return None for anonymous access.
|
|
79
79
|
|
|
80
|
-
|
|
81
|
-
|
|
80
|
+
User ID convention:
|
|
81
|
+
- user_id is a deterministic UUID5 hash of the user's email address
|
|
82
|
+
- Use rem.utils.user_id.email_to_user_id(email) to generate
|
|
83
|
+
- The JWT's `sub` claim is NOT directly used as user_id
|
|
84
|
+
- Authentication middleware extracts email from JWT and hashes it
|
|
85
|
+
|
|
86
|
+
When user_id is None, queries return data with user_id IS NULL
|
|
87
|
+
(shared/public data). This is intentional - no fake user IDs.
|
|
82
88
|
|
|
83
89
|
Args:
|
|
84
|
-
user_id: User identifier (may be None)
|
|
90
|
+
user_id: User identifier (UUID5 hash of email, may be None for anonymous)
|
|
85
91
|
source: Source of the call (for logging clarity)
|
|
86
|
-
default:
|
|
92
|
+
default: Explicit default (only for testing, not auto-generated)
|
|
87
93
|
|
|
88
94
|
Returns:
|
|
89
|
-
user_id if provided,
|
|
95
|
+
user_id if provided, explicit default if provided, otherwise None
|
|
90
96
|
|
|
91
97
|
Example:
|
|
92
|
-
#
|
|
93
|
-
user_id
|
|
94
|
-
|
|
95
|
-
|
|
98
|
+
# Generate user_id from email (done by auth middleware)
|
|
99
|
+
from rem.utils.user_id import email_to_user_id
|
|
100
|
+
user_id = email_to_user_id("alice@example.com")
|
|
101
|
+
# -> "2c5ea4c0-4067-5fef-942d-0a20124e06d8"
|
|
96
102
|
|
|
97
|
-
# In
|
|
103
|
+
# In MCP tool - anonymous user sees shared data
|
|
98
104
|
user_id = AgentContext.get_user_id_or_default(
|
|
99
|
-
|
|
100
|
-
)
|
|
101
|
-
|
|
102
|
-
# In CLI command
|
|
103
|
-
user_id = AgentContext.get_user_id_or_default(
|
|
104
|
-
args.user_id, source="rem ask"
|
|
105
|
+
user_id, source="ask_rem_agent"
|
|
105
106
|
)
|
|
107
|
+
# Returns None if not authenticated -> queries WHERE user_id IS NULL
|
|
106
108
|
"""
|
|
107
|
-
if user_id is None:
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
logger.debug(f"
|
|
111
|
-
return
|
|
112
|
-
return
|
|
109
|
+
if user_id is not None:
|
|
110
|
+
return user_id
|
|
111
|
+
if default is not None:
|
|
112
|
+
logger.debug(f"Using explicit default user_id '{default}' from {source}")
|
|
113
|
+
return default
|
|
114
|
+
# No fake user IDs - return None for anonymous/unauthenticated
|
|
115
|
+
logger.debug(f"No user_id from {source}, using None (anonymous/shared data)")
|
|
116
|
+
return None
|
|
113
117
|
|
|
114
118
|
@classmethod
|
|
115
119
|
def from_headers(cls, headers: dict[str, str]) -> "AgentContext":
|
rem/agentic/mcp/tool_wrapper.py
CHANGED
|
@@ -28,7 +28,12 @@ def create_pydantic_tool(func: Callable[..., Any]) -> Tool:
|
|
|
28
28
|
return Tool(func)
|
|
29
29
|
|
|
30
30
|
|
|
31
|
-
def create_mcp_tool_wrapper(
|
|
31
|
+
def create_mcp_tool_wrapper(
|
|
32
|
+
tool_name: str,
|
|
33
|
+
mcp_tool: Any,
|
|
34
|
+
user_id: str | None = None,
|
|
35
|
+
description_suffix: str | None = None,
|
|
36
|
+
) -> Tool:
|
|
32
37
|
"""
|
|
33
38
|
Create a Pydantic AI Tool from a FastMCP FunctionTool.
|
|
34
39
|
|
|
@@ -40,6 +45,8 @@ def create_mcp_tool_wrapper(tool_name: str, mcp_tool: Any, user_id: str | None =
|
|
|
40
45
|
tool_name: Name of the MCP tool
|
|
41
46
|
mcp_tool: The FastMCP FunctionTool object
|
|
42
47
|
user_id: Optional user_id to inject into tool calls
|
|
48
|
+
description_suffix: Optional text to append to the tool's docstring.
|
|
49
|
+
Used to add schema-specific context (e.g., default table for search_rem).
|
|
43
50
|
|
|
44
51
|
Returns:
|
|
45
52
|
A Pydantic AI Tool instance
|
|
@@ -52,7 +59,11 @@ def create_mcp_tool_wrapper(tool_name: str, mcp_tool: Any, user_id: str | None =
|
|
|
52
59
|
sig = inspect.signature(tool_func)
|
|
53
60
|
has_user_id = "user_id" in sig.parameters
|
|
54
61
|
|
|
55
|
-
#
|
|
62
|
+
# Build the docstring with optional suffix
|
|
63
|
+
base_doc = tool_func.__doc__ or ""
|
|
64
|
+
final_doc = base_doc + description_suffix if description_suffix else base_doc
|
|
65
|
+
|
|
66
|
+
# If we need to inject user_id or modify docstring, create a wrapper
|
|
56
67
|
# Otherwise, use the function directly for better signature preservation
|
|
57
68
|
if user_id and has_user_id:
|
|
58
69
|
async def wrapped_tool(**kwargs) -> Any:
|
|
@@ -69,12 +80,27 @@ def create_mcp_tool_wrapper(tool_name: str, mcp_tool: Any, user_id: str | None =
|
|
|
69
80
|
|
|
70
81
|
# Copy signature from original function for Pydantic AI inspection
|
|
71
82
|
wrapped_tool.__name__ = tool_name
|
|
72
|
-
wrapped_tool.__doc__ =
|
|
83
|
+
wrapped_tool.__doc__ = final_doc
|
|
73
84
|
wrapped_tool.__annotations__ = tool_func.__annotations__
|
|
74
85
|
wrapped_tool.__signature__ = sig # Important: preserve full signature
|
|
75
86
|
|
|
76
87
|
logger.debug(f"Creating MCP tool wrapper with user_id injection: {tool_name}")
|
|
77
88
|
return Tool(wrapped_tool)
|
|
89
|
+
elif description_suffix:
|
|
90
|
+
# Need to wrap just for docstring modification
|
|
91
|
+
async def wrapped_tool(**kwargs) -> Any:
|
|
92
|
+
"""Wrapper for docstring modification."""
|
|
93
|
+
valid_params = set(sig.parameters.keys())
|
|
94
|
+
filtered_kwargs = {k: v for k, v in kwargs.items() if k in valid_params}
|
|
95
|
+
return await tool_func(**filtered_kwargs)
|
|
96
|
+
|
|
97
|
+
wrapped_tool.__name__ = tool_name
|
|
98
|
+
wrapped_tool.__doc__ = final_doc
|
|
99
|
+
wrapped_tool.__annotations__ = tool_func.__annotations__
|
|
100
|
+
wrapped_tool.__signature__ = sig
|
|
101
|
+
|
|
102
|
+
logger.debug(f"Creating MCP tool wrapper with description suffix: {tool_name}")
|
|
103
|
+
return Tool(wrapped_tool)
|
|
78
104
|
else:
|
|
79
105
|
# No injection needed - use original function directly
|
|
80
106
|
logger.debug(f"Creating MCP tool wrapper (no injection): {tool_name}")
|
rem/agentic/otel/setup.py
CHANGED
|
@@ -14,6 +14,7 @@ from loguru import logger
|
|
|
14
14
|
|
|
15
15
|
from ...settings import settings
|
|
16
16
|
|
|
17
|
+
|
|
17
18
|
# Global flag to track if instrumentation is initialized
|
|
18
19
|
_instrumentation_initialized = False
|
|
19
20
|
|
|
@@ -52,12 +53,94 @@ def setup_instrumentation() -> None:
|
|
|
52
53
|
|
|
53
54
|
try:
|
|
54
55
|
from opentelemetry import trace
|
|
55
|
-
from opentelemetry.sdk.trace import TracerProvider
|
|
56
|
-
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
|
56
|
+
from opentelemetry.sdk.trace import TracerProvider, ReadableSpan
|
|
57
|
+
from opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExporter, SpanExportResult
|
|
57
58
|
from opentelemetry.sdk.resources import Resource, SERVICE_NAME, DEPLOYMENT_ENVIRONMENT
|
|
58
59
|
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter as HTTPExporter
|
|
59
60
|
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter as GRPCExporter
|
|
60
61
|
|
|
62
|
+
class SanitizingSpanExporter(SpanExporter):
|
|
63
|
+
"""
|
|
64
|
+
Wrapper exporter that sanitizes span attributes before export.
|
|
65
|
+
|
|
66
|
+
Removes None values that cause OTLP encoding failures like:
|
|
67
|
+
- llm.input_messages.3.message.content: None
|
|
68
|
+
"""
|
|
69
|
+
|
|
70
|
+
def __init__(self, wrapped_exporter: SpanExporter):
|
|
71
|
+
self._wrapped = wrapped_exporter
|
|
72
|
+
|
|
73
|
+
def _sanitize_value(self, value):
|
|
74
|
+
"""Recursively sanitize a value, replacing None with empty string."""
|
|
75
|
+
if value is None:
|
|
76
|
+
return "" # Replace None with empty string
|
|
77
|
+
if isinstance(value, dict):
|
|
78
|
+
return {k: self._sanitize_value(v) for k, v in value.items()}
|
|
79
|
+
if isinstance(value, (list, tuple)):
|
|
80
|
+
return [self._sanitize_value(v) for v in value]
|
|
81
|
+
return value
|
|
82
|
+
|
|
83
|
+
def export(self, spans: tuple[ReadableSpan, ...]) -> SpanExportResult:
|
|
84
|
+
# Create sanitized copies of spans
|
|
85
|
+
sanitized_spans = []
|
|
86
|
+
for span in spans:
|
|
87
|
+
if span.attributes:
|
|
88
|
+
# Sanitize all attribute values - replace None with empty string
|
|
89
|
+
sanitized_attrs = {}
|
|
90
|
+
for k, v in span.attributes.items():
|
|
91
|
+
sanitized_attrs[k] = self._sanitize_value(v)
|
|
92
|
+
sanitized_spans.append(_SanitizedSpan(span, sanitized_attrs))
|
|
93
|
+
else:
|
|
94
|
+
sanitized_spans.append(span)
|
|
95
|
+
|
|
96
|
+
return self._wrapped.export(tuple(sanitized_spans))
|
|
97
|
+
|
|
98
|
+
def shutdown(self) -> None:
|
|
99
|
+
self._wrapped.shutdown()
|
|
100
|
+
|
|
101
|
+
def force_flush(self, timeout_millis: int = 30000) -> bool:
|
|
102
|
+
return self._wrapped.force_flush(timeout_millis)
|
|
103
|
+
|
|
104
|
+
class _SanitizedSpan(ReadableSpan):
|
|
105
|
+
"""ReadableSpan wrapper with sanitized attributes."""
|
|
106
|
+
|
|
107
|
+
def __init__(self, original: ReadableSpan, sanitized_attributes: dict):
|
|
108
|
+
self._original = original
|
|
109
|
+
self._sanitized_attributes = sanitized_attributes
|
|
110
|
+
|
|
111
|
+
@property
|
|
112
|
+
def name(self): return self._original.name
|
|
113
|
+
@property
|
|
114
|
+
def context(self): return self._original.context
|
|
115
|
+
@property
|
|
116
|
+
def parent(self): return self._original.parent
|
|
117
|
+
@property
|
|
118
|
+
def resource(self): return self._original.resource
|
|
119
|
+
@property
|
|
120
|
+
def instrumentation_scope(self): return self._original.instrumentation_scope
|
|
121
|
+
@property
|
|
122
|
+
def status(self): return self._original.status
|
|
123
|
+
@property
|
|
124
|
+
def start_time(self): return self._original.start_time
|
|
125
|
+
@property
|
|
126
|
+
def end_time(self): return self._original.end_time
|
|
127
|
+
@property
|
|
128
|
+
def links(self): return self._original.links
|
|
129
|
+
@property
|
|
130
|
+
def events(self): return self._original.events
|
|
131
|
+
@property
|
|
132
|
+
def kind(self): return self._original.kind
|
|
133
|
+
@property
|
|
134
|
+
def attributes(self): return self._sanitized_attributes
|
|
135
|
+
@property
|
|
136
|
+
def dropped_attributes(self): return self._original.dropped_attributes
|
|
137
|
+
@property
|
|
138
|
+
def dropped_events(self): return self._original.dropped_events
|
|
139
|
+
@property
|
|
140
|
+
def dropped_links(self): return self._original.dropped_links
|
|
141
|
+
|
|
142
|
+
def get_span_context(self): return self._original.get_span_context()
|
|
143
|
+
|
|
61
144
|
# Create resource with service metadata
|
|
62
145
|
resource = Resource(
|
|
63
146
|
attributes={
|
|
@@ -72,16 +155,19 @@ def setup_instrumentation() -> None:
|
|
|
72
155
|
|
|
73
156
|
# Configure OTLP exporter based on protocol
|
|
74
157
|
if settings.otel.protocol == "grpc":
|
|
75
|
-
|
|
158
|
+
base_exporter = GRPCExporter(
|
|
76
159
|
endpoint=settings.otel.collector_endpoint,
|
|
77
160
|
timeout=settings.otel.export_timeout,
|
|
78
161
|
)
|
|
79
162
|
else: # http
|
|
80
|
-
|
|
163
|
+
base_exporter = HTTPExporter(
|
|
81
164
|
endpoint=f"{settings.otel.collector_endpoint}/v1/traces",
|
|
82
165
|
timeout=settings.otel.export_timeout,
|
|
83
166
|
)
|
|
84
167
|
|
|
168
|
+
# Wrap with sanitizing exporter to handle None values
|
|
169
|
+
exporter = SanitizingSpanExporter(base_exporter)
|
|
170
|
+
|
|
85
171
|
# Add span processor
|
|
86
172
|
tracer_provider.add_span_processor(BatchSpanProcessor(exporter))
|
|
87
173
|
|
|
@@ -95,6 +181,8 @@ def setup_instrumentation() -> None:
|
|
|
95
181
|
# Add OpenInference span processor for Pydantic AI
|
|
96
182
|
# This adds rich attributes (openinference.span.kind, input/output, etc.) to ALL traces
|
|
97
183
|
# Phoenix receives these traces via the OTLP collector - no separate "Phoenix integration" needed
|
|
184
|
+
# Note: The OTEL exporter may log warnings about None values in tool call messages,
|
|
185
|
+
# but this is a known limitation in openinference-instrumentation-pydantic-ai
|
|
98
186
|
try:
|
|
99
187
|
from openinference.instrumentation.pydantic_ai import OpenInferenceSpanProcessor as PydanticAISpanProcessor
|
|
100
188
|
|
|
@@ -175,6 +175,23 @@ class AgentRuntime:
|
|
|
175
175
|
return self.agent.iter(*args, **kwargs)
|
|
176
176
|
|
|
177
177
|
|
|
178
|
+
def _get_builtin_tools() -> list:
|
|
179
|
+
"""
|
|
180
|
+
Get built-in tools that are always available to agents.
|
|
181
|
+
|
|
182
|
+
Currently returns empty list - all tools come from MCP servers.
|
|
183
|
+
The register_metadata tool is available via the REM MCP server and
|
|
184
|
+
agents can opt-in by configuring mcp_servers in their schema.
|
|
185
|
+
|
|
186
|
+
Returns:
|
|
187
|
+
List of Pydantic AI tool functions (currently empty)
|
|
188
|
+
"""
|
|
189
|
+
# NOTE: register_metadata is now an MCP tool, not a built-in.
|
|
190
|
+
# Agents that want it should configure mcp_servers to load from rem.mcp_server.
|
|
191
|
+
# This allows agents to choose which tools they need.
|
|
192
|
+
return []
|
|
193
|
+
|
|
194
|
+
|
|
178
195
|
def _create_model_from_schema(agent_schema: dict[str, Any]) -> type[BaseModel]:
|
|
179
196
|
"""
|
|
180
197
|
Create Pydantic model dynamically from JSON Schema.
|
|
@@ -530,18 +547,42 @@ async def create_agent(
|
|
|
530
547
|
default_model = context.default_model if context else settings.llm.default_model
|
|
531
548
|
model = get_valid_model_or_default(model_override, default_model)
|
|
532
549
|
|
|
533
|
-
# Extract schema fields
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
550
|
+
# Extract schema fields using typed helpers
|
|
551
|
+
from ..schema import get_system_prompt, get_metadata
|
|
552
|
+
|
|
553
|
+
if agent_schema:
|
|
554
|
+
system_prompt = get_system_prompt(agent_schema)
|
|
555
|
+
metadata = get_metadata(agent_schema)
|
|
556
|
+
mcp_server_configs = [s.model_dump() for s in metadata.mcp_servers] if hasattr(metadata, 'mcp_servers') else []
|
|
557
|
+
resource_configs = metadata.resources if hasattr(metadata, 'resources') else []
|
|
558
|
+
|
|
559
|
+
if metadata.system_prompt:
|
|
560
|
+
logger.debug("Using custom system_prompt from json_schema_extra")
|
|
561
|
+
else:
|
|
562
|
+
system_prompt = ""
|
|
563
|
+
metadata = None
|
|
564
|
+
mcp_server_configs = []
|
|
565
|
+
resource_configs = []
|
|
538
566
|
|
|
539
567
|
# Extract temperature and max_iterations from schema metadata (with fallback to settings defaults)
|
|
540
|
-
|
|
541
|
-
|
|
568
|
+
if metadata:
|
|
569
|
+
temperature = metadata.override_temperature if metadata.override_temperature is not None else settings.llm.default_temperature
|
|
570
|
+
max_iterations = metadata.override_max_iterations if metadata.override_max_iterations is not None else settings.llm.default_max_iterations
|
|
571
|
+
use_structured_output = metadata.structured_output
|
|
572
|
+
else:
|
|
573
|
+
temperature = settings.llm.default_temperature
|
|
574
|
+
max_iterations = settings.llm.default_max_iterations
|
|
575
|
+
use_structured_output = True
|
|
576
|
+
|
|
577
|
+
# Build list of tools - start with built-in tools
|
|
578
|
+
tools = _get_builtin_tools()
|
|
579
|
+
|
|
580
|
+
# Get agent name from metadata for logging
|
|
581
|
+
agent_name = metadata.name if metadata and hasattr(metadata, 'name') else "unknown"
|
|
542
582
|
|
|
543
583
|
logger.info(
|
|
544
|
-
f"Creating agent: model={model}, mcp_servers={len(mcp_server_configs)},
|
|
584
|
+
f"Creating agent '{agent_name}': model={model}, mcp_servers={len(mcp_server_configs)}, "
|
|
585
|
+
f"resources={len(resource_configs)}, builtin_tools={len(tools)}"
|
|
545
586
|
)
|
|
546
587
|
|
|
547
588
|
# Set agent resource attributes for OTEL (before creating agent)
|
|
@@ -550,8 +591,23 @@ async def create_agent(
|
|
|
550
591
|
|
|
551
592
|
set_agent_resource_attributes(agent_schema=agent_schema)
|
|
552
593
|
|
|
553
|
-
#
|
|
554
|
-
|
|
594
|
+
# Extract schema metadata for search_rem tool description suffix
|
|
595
|
+
# This allows entity schemas to add context-specific notes to the search_rem tool
|
|
596
|
+
search_rem_suffix = None
|
|
597
|
+
if metadata:
|
|
598
|
+
# Check for default_search_table in metadata (set by entity schemas)
|
|
599
|
+
extra = agent_schema.get("json_schema_extra", {}) if agent_schema else {}
|
|
600
|
+
default_table = extra.get("default_search_table")
|
|
601
|
+
has_embeddings = extra.get("has_embeddings", False)
|
|
602
|
+
|
|
603
|
+
if default_table:
|
|
604
|
+
# Build description suffix for search_rem
|
|
605
|
+
search_rem_suffix = f"\n\nFor this schema, use `search_rem` to query `{default_table}`. "
|
|
606
|
+
if has_embeddings:
|
|
607
|
+
search_rem_suffix += f"SEARCH works well on {default_table} (has embeddings). "
|
|
608
|
+
search_rem_suffix += f"Example: `SEARCH \"your query\" FROM {default_table} LIMIT 10`"
|
|
609
|
+
|
|
610
|
+
# Add tools from MCP server (in-process, no subprocess)
|
|
555
611
|
if mcp_server_configs:
|
|
556
612
|
for server_config in mcp_server_configs:
|
|
557
613
|
server_type = server_config.get("type")
|
|
@@ -574,9 +630,17 @@ async def create_agent(
|
|
|
574
630
|
mcp_tools_dict = await mcp_server.get_tools()
|
|
575
631
|
|
|
576
632
|
for tool_name, tool_func in mcp_tools_dict.items():
|
|
577
|
-
|
|
633
|
+
# Add description suffix to search_rem tool if schema specifies a default table
|
|
634
|
+
tool_suffix = search_rem_suffix if tool_name == "search_rem" else None
|
|
635
|
+
|
|
636
|
+
wrapped_tool = create_mcp_tool_wrapper(
|
|
637
|
+
tool_name,
|
|
638
|
+
tool_func,
|
|
639
|
+
user_id=context.user_id if context else None,
|
|
640
|
+
description_suffix=tool_suffix,
|
|
641
|
+
)
|
|
578
642
|
tools.append(wrapped_tool)
|
|
579
|
-
logger.debug(f"Loaded MCP tool: {tool_name}")
|
|
643
|
+
logger.debug(f"Loaded MCP tool: {tool_name}" + (" (with schema suffix)" if tool_suffix else ""))
|
|
580
644
|
|
|
581
645
|
logger.info(f"Loaded {len(mcp_tools_dict)} tools from MCP server: {server_id} (in-process)")
|
|
582
646
|
|
|
@@ -589,11 +653,8 @@ async def create_agent(
|
|
|
589
653
|
# TODO: Convert resources to tools (MCP convenience syntax)
|
|
590
654
|
pass
|
|
591
655
|
|
|
592
|
-
# Check if structured output is disabled for this schema
|
|
593
|
-
# When structured_output: false, properties become part of prompt instead of output_type
|
|
594
|
-
use_structured_output = metadata.get("structured_output", True)
|
|
595
|
-
|
|
596
656
|
# Create dynamic result_type from schema if not provided
|
|
657
|
+
# Note: use_structured_output is set earlier from metadata.structured_output
|
|
597
658
|
if result_type is None and agent_schema and "properties" in agent_schema:
|
|
598
659
|
if use_structured_output:
|
|
599
660
|
# Pre-process schema for Qwen compatibility (strips min/max, sets additionalProperties=False)
|
|
@@ -615,21 +676,30 @@ async def create_agent(
|
|
|
615
676
|
wrapped_result_type = _create_schema_wrapper(
|
|
616
677
|
result_type, strip_description=strip_model_description
|
|
617
678
|
)
|
|
679
|
+
# Use InstrumentationSettings with version=3 to include agent name in span names
|
|
680
|
+
from pydantic_ai.models.instrumented import InstrumentationSettings
|
|
681
|
+
instrumentation = InstrumentationSettings(version=3) if settings.otel.enabled else False
|
|
682
|
+
|
|
618
683
|
agent = Agent(
|
|
619
684
|
model=model,
|
|
685
|
+
name=agent_name, # Used for OTEL span names (version 3: "invoke_agent {name}")
|
|
620
686
|
system_prompt=system_prompt,
|
|
621
687
|
output_type=wrapped_result_type,
|
|
622
688
|
tools=tools,
|
|
623
|
-
instrument=
|
|
689
|
+
instrument=instrumentation,
|
|
624
690
|
model_settings={"temperature": temperature},
|
|
625
691
|
retries=settings.llm.max_retries,
|
|
626
692
|
)
|
|
627
693
|
else:
|
|
694
|
+
from pydantic_ai.models.instrumented import InstrumentationSettings
|
|
695
|
+
instrumentation = InstrumentationSettings(version=3) if settings.otel.enabled else False
|
|
696
|
+
|
|
628
697
|
agent = Agent(
|
|
629
698
|
model=model,
|
|
699
|
+
name=agent_name, # Used for OTEL span names (version 3: "invoke_agent {name}")
|
|
630
700
|
system_prompt=system_prompt,
|
|
631
701
|
tools=tools,
|
|
632
|
-
instrument=
|
|
702
|
+
instrument=instrumentation,
|
|
633
703
|
model_settings={"temperature": temperature},
|
|
634
704
|
retries=settings.llm.max_retries,
|
|
635
705
|
)
|