remdb 0.3.242__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of remdb might be problematic. Click here for more details.

Files changed (235) hide show
  1. rem/__init__.py +129 -0
  2. rem/agentic/README.md +760 -0
  3. rem/agentic/__init__.py +54 -0
  4. rem/agentic/agents/README.md +155 -0
  5. rem/agentic/agents/__init__.py +38 -0
  6. rem/agentic/agents/agent_manager.py +311 -0
  7. rem/agentic/agents/sse_simulator.py +502 -0
  8. rem/agentic/context.py +425 -0
  9. rem/agentic/context_builder.py +360 -0
  10. rem/agentic/llm_provider_models.py +301 -0
  11. rem/agentic/mcp/__init__.py +0 -0
  12. rem/agentic/mcp/tool_wrapper.py +273 -0
  13. rem/agentic/otel/__init__.py +5 -0
  14. rem/agentic/otel/setup.py +240 -0
  15. rem/agentic/providers/phoenix.py +926 -0
  16. rem/agentic/providers/pydantic_ai.py +854 -0
  17. rem/agentic/query.py +117 -0
  18. rem/agentic/query_helper.py +89 -0
  19. rem/agentic/schema.py +737 -0
  20. rem/agentic/serialization.py +245 -0
  21. rem/agentic/tools/__init__.py +5 -0
  22. rem/agentic/tools/rem_tools.py +242 -0
  23. rem/api/README.md +657 -0
  24. rem/api/deps.py +253 -0
  25. rem/api/main.py +460 -0
  26. rem/api/mcp_router/prompts.py +182 -0
  27. rem/api/mcp_router/resources.py +820 -0
  28. rem/api/mcp_router/server.py +243 -0
  29. rem/api/mcp_router/tools.py +1605 -0
  30. rem/api/middleware/tracking.py +172 -0
  31. rem/api/routers/admin.py +520 -0
  32. rem/api/routers/auth.py +898 -0
  33. rem/api/routers/chat/__init__.py +5 -0
  34. rem/api/routers/chat/child_streaming.py +394 -0
  35. rem/api/routers/chat/completions.py +702 -0
  36. rem/api/routers/chat/json_utils.py +76 -0
  37. rem/api/routers/chat/models.py +202 -0
  38. rem/api/routers/chat/otel_utils.py +33 -0
  39. rem/api/routers/chat/sse_events.py +546 -0
  40. rem/api/routers/chat/streaming.py +950 -0
  41. rem/api/routers/chat/streaming_utils.py +327 -0
  42. rem/api/routers/common.py +18 -0
  43. rem/api/routers/dev.py +87 -0
  44. rem/api/routers/feedback.py +276 -0
  45. rem/api/routers/messages.py +620 -0
  46. rem/api/routers/models.py +86 -0
  47. rem/api/routers/query.py +362 -0
  48. rem/api/routers/shared_sessions.py +422 -0
  49. rem/auth/README.md +258 -0
  50. rem/auth/__init__.py +36 -0
  51. rem/auth/jwt.py +367 -0
  52. rem/auth/middleware.py +318 -0
  53. rem/auth/providers/__init__.py +16 -0
  54. rem/auth/providers/base.py +376 -0
  55. rem/auth/providers/email.py +215 -0
  56. rem/auth/providers/google.py +163 -0
  57. rem/auth/providers/microsoft.py +237 -0
  58. rem/cli/README.md +517 -0
  59. rem/cli/__init__.py +8 -0
  60. rem/cli/commands/README.md +299 -0
  61. rem/cli/commands/__init__.py +3 -0
  62. rem/cli/commands/ask.py +549 -0
  63. rem/cli/commands/cluster.py +1808 -0
  64. rem/cli/commands/configure.py +495 -0
  65. rem/cli/commands/db.py +828 -0
  66. rem/cli/commands/dreaming.py +324 -0
  67. rem/cli/commands/experiments.py +1698 -0
  68. rem/cli/commands/mcp.py +66 -0
  69. rem/cli/commands/process.py +388 -0
  70. rem/cli/commands/query.py +109 -0
  71. rem/cli/commands/scaffold.py +47 -0
  72. rem/cli/commands/schema.py +230 -0
  73. rem/cli/commands/serve.py +106 -0
  74. rem/cli/commands/session.py +453 -0
  75. rem/cli/dreaming.py +363 -0
  76. rem/cli/main.py +123 -0
  77. rem/config.py +244 -0
  78. rem/mcp_server.py +41 -0
  79. rem/models/core/__init__.py +49 -0
  80. rem/models/core/core_model.py +70 -0
  81. rem/models/core/engram.py +333 -0
  82. rem/models/core/experiment.py +672 -0
  83. rem/models/core/inline_edge.py +132 -0
  84. rem/models/core/rem_query.py +246 -0
  85. rem/models/entities/__init__.py +68 -0
  86. rem/models/entities/domain_resource.py +38 -0
  87. rem/models/entities/feedback.py +123 -0
  88. rem/models/entities/file.py +57 -0
  89. rem/models/entities/image_resource.py +88 -0
  90. rem/models/entities/message.py +64 -0
  91. rem/models/entities/moment.py +123 -0
  92. rem/models/entities/ontology.py +181 -0
  93. rem/models/entities/ontology_config.py +131 -0
  94. rem/models/entities/resource.py +95 -0
  95. rem/models/entities/schema.py +87 -0
  96. rem/models/entities/session.py +84 -0
  97. rem/models/entities/shared_session.py +180 -0
  98. rem/models/entities/subscriber.py +175 -0
  99. rem/models/entities/user.py +93 -0
  100. rem/py.typed +0 -0
  101. rem/registry.py +373 -0
  102. rem/schemas/README.md +507 -0
  103. rem/schemas/__init__.py +6 -0
  104. rem/schemas/agents/README.md +92 -0
  105. rem/schemas/agents/core/agent-builder.yaml +235 -0
  106. rem/schemas/agents/core/moment-builder.yaml +178 -0
  107. rem/schemas/agents/core/rem-query-agent.yaml +226 -0
  108. rem/schemas/agents/core/resource-affinity-assessor.yaml +99 -0
  109. rem/schemas/agents/core/simple-assistant.yaml +19 -0
  110. rem/schemas/agents/core/user-profile-builder.yaml +163 -0
  111. rem/schemas/agents/examples/contract-analyzer.yaml +317 -0
  112. rem/schemas/agents/examples/contract-extractor.yaml +134 -0
  113. rem/schemas/agents/examples/cv-parser.yaml +263 -0
  114. rem/schemas/agents/examples/hello-world.yaml +37 -0
  115. rem/schemas/agents/examples/query.yaml +54 -0
  116. rem/schemas/agents/examples/simple.yaml +21 -0
  117. rem/schemas/agents/examples/test.yaml +29 -0
  118. rem/schemas/agents/rem.yaml +132 -0
  119. rem/schemas/evaluators/hello-world/default.yaml +77 -0
  120. rem/schemas/evaluators/rem/faithfulness.yaml +219 -0
  121. rem/schemas/evaluators/rem/lookup-correctness.yaml +182 -0
  122. rem/schemas/evaluators/rem/retrieval-precision.yaml +199 -0
  123. rem/schemas/evaluators/rem/retrieval-recall.yaml +211 -0
  124. rem/schemas/evaluators/rem/search-correctness.yaml +192 -0
  125. rem/services/__init__.py +18 -0
  126. rem/services/audio/INTEGRATION.md +308 -0
  127. rem/services/audio/README.md +376 -0
  128. rem/services/audio/__init__.py +15 -0
  129. rem/services/audio/chunker.py +354 -0
  130. rem/services/audio/transcriber.py +259 -0
  131. rem/services/content/README.md +1269 -0
  132. rem/services/content/__init__.py +5 -0
  133. rem/services/content/providers.py +760 -0
  134. rem/services/content/service.py +762 -0
  135. rem/services/dreaming/README.md +230 -0
  136. rem/services/dreaming/__init__.py +53 -0
  137. rem/services/dreaming/affinity_service.py +322 -0
  138. rem/services/dreaming/moment_service.py +251 -0
  139. rem/services/dreaming/ontology_service.py +54 -0
  140. rem/services/dreaming/user_model_service.py +297 -0
  141. rem/services/dreaming/utils.py +39 -0
  142. rem/services/email/__init__.py +10 -0
  143. rem/services/email/service.py +522 -0
  144. rem/services/email/templates.py +360 -0
  145. rem/services/embeddings/__init__.py +11 -0
  146. rem/services/embeddings/api.py +127 -0
  147. rem/services/embeddings/worker.py +435 -0
  148. rem/services/fs/README.md +662 -0
  149. rem/services/fs/__init__.py +62 -0
  150. rem/services/fs/examples.py +206 -0
  151. rem/services/fs/examples_paths.py +204 -0
  152. rem/services/fs/git_provider.py +935 -0
  153. rem/services/fs/local_provider.py +760 -0
  154. rem/services/fs/parsing-hooks-examples.md +172 -0
  155. rem/services/fs/paths.py +276 -0
  156. rem/services/fs/provider.py +460 -0
  157. rem/services/fs/s3_provider.py +1042 -0
  158. rem/services/fs/service.py +186 -0
  159. rem/services/git/README.md +1075 -0
  160. rem/services/git/__init__.py +17 -0
  161. rem/services/git/service.py +469 -0
  162. rem/services/phoenix/EXPERIMENT_DESIGN.md +1146 -0
  163. rem/services/phoenix/README.md +453 -0
  164. rem/services/phoenix/__init__.py +46 -0
  165. rem/services/phoenix/client.py +960 -0
  166. rem/services/phoenix/config.py +88 -0
  167. rem/services/phoenix/prompt_labels.py +477 -0
  168. rem/services/postgres/README.md +757 -0
  169. rem/services/postgres/__init__.py +49 -0
  170. rem/services/postgres/diff_service.py +599 -0
  171. rem/services/postgres/migration_service.py +427 -0
  172. rem/services/postgres/programmable_diff_service.py +635 -0
  173. rem/services/postgres/pydantic_to_sqlalchemy.py +562 -0
  174. rem/services/postgres/register_type.py +353 -0
  175. rem/services/postgres/repository.py +481 -0
  176. rem/services/postgres/schema_generator.py +661 -0
  177. rem/services/postgres/service.py +802 -0
  178. rem/services/postgres/sql_builder.py +355 -0
  179. rem/services/rate_limit.py +113 -0
  180. rem/services/rem/README.md +318 -0
  181. rem/services/rem/__init__.py +23 -0
  182. rem/services/rem/exceptions.py +71 -0
  183. rem/services/rem/executor.py +293 -0
  184. rem/services/rem/parser.py +180 -0
  185. rem/services/rem/queries.py +196 -0
  186. rem/services/rem/query.py +371 -0
  187. rem/services/rem/service.py +608 -0
  188. rem/services/session/README.md +374 -0
  189. rem/services/session/__init__.py +13 -0
  190. rem/services/session/compression.py +488 -0
  191. rem/services/session/pydantic_messages.py +310 -0
  192. rem/services/session/reload.py +85 -0
  193. rem/services/user_service.py +130 -0
  194. rem/settings.py +1877 -0
  195. rem/sql/background_indexes.sql +52 -0
  196. rem/sql/migrations/001_install.sql +983 -0
  197. rem/sql/migrations/002_install_models.sql +3157 -0
  198. rem/sql/migrations/003_optional_extensions.sql +326 -0
  199. rem/sql/migrations/004_cache_system.sql +282 -0
  200. rem/sql/migrations/005_schema_update.sql +145 -0
  201. rem/sql/migrations/migrate_session_id_to_uuid.sql +45 -0
  202. rem/utils/AGENTIC_CHUNKING.md +597 -0
  203. rem/utils/README.md +628 -0
  204. rem/utils/__init__.py +61 -0
  205. rem/utils/agentic_chunking.py +622 -0
  206. rem/utils/batch_ops.py +343 -0
  207. rem/utils/chunking.py +108 -0
  208. rem/utils/clip_embeddings.py +276 -0
  209. rem/utils/constants.py +97 -0
  210. rem/utils/date_utils.py +228 -0
  211. rem/utils/dict_utils.py +98 -0
  212. rem/utils/embeddings.py +436 -0
  213. rem/utils/examples/embeddings_example.py +305 -0
  214. rem/utils/examples/sql_types_example.py +202 -0
  215. rem/utils/files.py +323 -0
  216. rem/utils/markdown.py +16 -0
  217. rem/utils/mime_types.py +158 -0
  218. rem/utils/model_helpers.py +492 -0
  219. rem/utils/schema_loader.py +649 -0
  220. rem/utils/sql_paths.py +146 -0
  221. rem/utils/sql_types.py +350 -0
  222. rem/utils/user_id.py +81 -0
  223. rem/utils/vision.py +325 -0
  224. rem/workers/README.md +506 -0
  225. rem/workers/__init__.py +7 -0
  226. rem/workers/db_listener.py +579 -0
  227. rem/workers/db_maintainer.py +74 -0
  228. rem/workers/dreaming.py +502 -0
  229. rem/workers/engram_processor.py +312 -0
  230. rem/workers/sqs_file_processor.py +193 -0
  231. rem/workers/unlogged_maintainer.py +463 -0
  232. remdb-0.3.242.dist-info/METADATA +1632 -0
  233. remdb-0.3.242.dist-info/RECORD +235 -0
  234. remdb-0.3.242.dist-info/WHEEL +4 -0
  235. remdb-0.3.242.dist-info/entry_points.txt +2 -0
@@ -0,0 +1,76 @@
1
+ """
2
+ JSON extraction utilities for response_format='json_object' mode.
3
+
4
+ Design Pattern:
5
+ - Best-effort JSON extraction from agent output
6
+ - Handles fenced code blocks (```json ... ```)
7
+ - Handles raw JSON objects
8
+ - Graceful fallback to string if extraction fails
9
+ """
10
+
11
+ import json
12
+ import re
13
+
14
+
15
+ def extract_json_resilient(output: str | dict | list) -> str:
16
+ """
17
+ Extract JSON from agent output with multiple fallback strategies.
18
+
19
+ Strategies (in order):
20
+ 1. If already dict/list, serialize directly
21
+ 2. Extract from fenced JSON code blocks (```json ... ```)
22
+ 3. Find JSON object/array in text ({...} or [...])
23
+ 4. Return as-is if all strategies fail
24
+
25
+ Args:
26
+ output: Agent output (str, dict, or list)
27
+
28
+ Returns:
29
+ JSON string (best-effort)
30
+
31
+ Examples:
32
+ >>> extract_json_resilient({"answer": "test"})
33
+ '{"answer": "test"}'
34
+
35
+ >>> extract_json_resilient('Here is the result:\\n```json\\n{"answer": "test"}\\n```')
36
+ '{"answer": "test"}'
37
+
38
+ >>> extract_json_resilient('The answer is {"answer": "test"} as shown above.')
39
+ '{"answer": "test"}'
40
+ """
41
+ # Strategy 1: Already structured
42
+ if isinstance(output, (dict, list)):
43
+ return json.dumps(output)
44
+
45
+ text = str(output)
46
+
47
+ # Strategy 2: Extract from fenced code blocks
48
+ fenced_match = re.search(r"```json\s*\n(.*?)\n```", text, re.DOTALL)
49
+ if fenced_match:
50
+ try:
51
+ json_str = fenced_match.group(1).strip()
52
+ # Validate it's valid JSON
53
+ json.loads(json_str)
54
+ return json_str
55
+ except json.JSONDecodeError:
56
+ pass
57
+
58
+ # Strategy 3: Find JSON object or array
59
+ # Look for {...} or [...]
60
+ for pattern in [
61
+ r"\{[^{}]*\}", # Simple object
62
+ r"\{.*\}", # Nested object
63
+ r"\[.*\]", # Array
64
+ ]:
65
+ match = re.search(pattern, text, re.DOTALL)
66
+ if match:
67
+ try:
68
+ json_str = match.group(0)
69
+ # Validate it's valid JSON
70
+ json.loads(json_str)
71
+ return json_str
72
+ except json.JSONDecodeError:
73
+ continue
74
+
75
+ # Strategy 4: Fallback to string
76
+ return text
@@ -0,0 +1,202 @@
1
+ """
2
+ OpenAI-compatible API models for chat completions.
3
+
4
+ Design Pattern:
5
+ - Full OpenAI compatibility for drop-in replacement
6
+ - Support for streaming (SSE) and non-streaming modes
7
+ - Response format control (text vs json_object)
8
+ - Headers map to AgentContext for session/context control
9
+ - Body fields for OpenAI-compatible parameters + metadata
10
+
11
+ Headers (context control):
12
+ X-User-Id → context.user_id (user identifier)
13
+ X-Tenant-Id → context.tenant_id (multi-tenancy, default: "default")
14
+ X-Session-Id → context.session_id (conversation continuity)
15
+ X-Agent-Schema → context.agent_schema_uri (which agent to use, default: "rem")
16
+ X-Model-Name → context.default_model (model override)
17
+ X-Chat-Is-Audio → triggers audio transcription ("true"/"false")
18
+ X-Is-Eval → context.is_eval (marks session as evaluation, sets mode=EVALUATION)
19
+
20
+ Body Fields (OpenAI-compatible + extensions):
21
+ model → LLM model (e.g., "openai:gpt-4.1", "anthropic:claude-sonnet-4-5-20250929")
22
+ messages → Chat conversation history
23
+ temperature → Sampling temperature (0-2)
24
+ max_tokens → Max tokens (deprecated, use max_completion_tokens)
25
+ max_completion_tokens → Max tokens to generate
26
+ stream → Enable SSE streaming
27
+ metadata → Key-value pairs merged with session metadata (for evals/experiments)
28
+ store → Whether to store for distillation/evaluation
29
+ seed → Deterministic sampling seed
30
+ top_p → Nucleus sampling probability
31
+ reasoning_effort → low/medium/high for o-series models
32
+ service_tier → auto/flex/priority/default
33
+ """
34
+
35
+ from typing import Any, Literal
36
+
37
+ from pydantic import BaseModel, Field
38
+
39
+ from rem.settings import settings
40
+
41
+
42
+ # Request models
43
+ class ChatMessage(BaseModel):
44
+ """OpenAI chat message format."""
45
+
46
+ role: Literal["system", "user", "assistant", "tool"]
47
+ content: str | None = None
48
+ name: str | None = None
49
+ tool_call_id: str | None = None
50
+
51
+
52
+ class ResponseFormat(BaseModel):
53
+ """
54
+ Response format specification (OpenAI-compatible).
55
+
56
+ - text: Plain text response
57
+ - json_object: Best-effort JSON extraction from agent output
58
+ """
59
+
60
+ type: Literal["text", "json_object"] = Field(
61
+ default="text",
62
+ description="Response format type. Use 'json_object' to enable JSON mode.",
63
+ )
64
+
65
+
66
+ class ChatCompletionRequest(BaseModel):
67
+ """
68
+ OpenAI chat completion request format.
69
+
70
+ Compatible with OpenAI's /v1/chat/completions endpoint.
71
+
72
+ Headers Map to AgentContext:
73
+ X-User-Id → context.user_id
74
+ X-Tenant-Id → context.tenant_id (default: "default")
75
+ X-Session-Id → context.session_id
76
+ X-Agent-Schema → context.agent_schema_uri (default: "rem")
77
+ X-Model-Name → context.default_model
78
+ X-Chat-Is-Audio → triggers audio transcription
79
+ X-Is-Eval → context.is_eval (sets session mode=EVALUATION)
80
+
81
+ Body Fields for Metadata/Evals:
82
+ metadata → Key-value pairs merged with session metadata
83
+ store → Whether to store for distillation/evaluation
84
+
85
+ Note: Model is specified in body.model (standard OpenAI field), not headers.
86
+ """
87
+
88
+ # TODO: default should come from settings.llm.default_model at request time
89
+ # Using None and resolving in endpoint to avoid import-time settings evaluation
90
+ model: str | None = Field(
91
+ default=None,
92
+ description="Model to use. Defaults to LLM__DEFAULT_MODEL from settings.",
93
+ )
94
+ messages: list[ChatMessage] = Field(description="Chat conversation history")
95
+ temperature: float | None = Field(default=None, ge=0, le=2)
96
+ max_tokens: int | None = Field(default=None, ge=1)
97
+ stream: bool = Field(default=False, description="Enable SSE streaming")
98
+ n: int | None = Field(default=1, ge=1, le=1, description="Number of completions (must be 1)")
99
+ stop: str | list[str] | None = None
100
+ presence_penalty: float | None = Field(default=None, ge=-2, le=2)
101
+ frequency_penalty: float | None = Field(default=None, ge=-2, le=2)
102
+ user: str | None = Field(default=None, description="Unique user identifier")
103
+ response_format: ResponseFormat | None = Field(
104
+ default=None,
105
+ description="Response format. Set type='json_object' to enable JSON mode.",
106
+ )
107
+ # Additional OpenAI-compatible fields
108
+ metadata: dict[str, str] | None = Field(
109
+ default=None,
110
+ description="Key-value pairs attached to the request (max 16 keys, 64/512 char limits). "
111
+ "Merged with session metadata for persistence.",
112
+ )
113
+ store: bool | None = Field(
114
+ default=None,
115
+ description="Whether to store for distillation/evaluation purposes.",
116
+ )
117
+ max_completion_tokens: int | None = Field(
118
+ default=None,
119
+ ge=1,
120
+ description="Max tokens to generate (replaces deprecated max_tokens).",
121
+ )
122
+ seed: int | None = Field(
123
+ default=None,
124
+ description="Seed for deterministic sampling (best effort).",
125
+ )
126
+ top_p: float | None = Field(
127
+ default=None,
128
+ ge=0,
129
+ le=1,
130
+ description="Nucleus sampling probability. Use temperature OR top_p, not both.",
131
+ )
132
+ logprobs: bool | None = Field(
133
+ default=None,
134
+ description="Whether to return log probabilities for output tokens.",
135
+ )
136
+ top_logprobs: int | None = Field(
137
+ default=None,
138
+ ge=0,
139
+ le=20,
140
+ description="Number of most likely tokens to return at each position (requires logprobs=true).",
141
+ )
142
+ reasoning_effort: Literal["low", "medium", "high"] | None = Field(
143
+ default=None,
144
+ description="Reasoning effort for o-series models (low/medium/high).",
145
+ )
146
+ service_tier: Literal["auto", "flex", "priority", "default"] | None = Field(
147
+ default=None,
148
+ description="Service tier for processing (flex is 50% cheaper but slower).",
149
+ )
150
+
151
+
152
+ # Response models
153
+ class ChatCompletionUsage(BaseModel):
154
+ """Token usage statistics."""
155
+
156
+ prompt_tokens: int
157
+ completion_tokens: int
158
+ total_tokens: int
159
+
160
+
161
+ class ChatCompletionMessageDelta(BaseModel):
162
+ """Streaming delta for chat completion."""
163
+
164
+ role: Literal["system", "user", "assistant"] | None = None
165
+ content: str | None = None
166
+
167
+
168
+ class ChatCompletionChoice(BaseModel):
169
+ """Chat completion choice (non-streaming)."""
170
+
171
+ index: int
172
+ message: ChatMessage
173
+ finish_reason: Literal["stop", "length", "content_filter", "tool_calls"] | None
174
+
175
+
176
+ class ChatCompletionStreamChoice(BaseModel):
177
+ """Chat completion choice (streaming)."""
178
+
179
+ index: int
180
+ delta: ChatCompletionMessageDelta
181
+ finish_reason: Literal["stop", "length", "content_filter"] | None = None
182
+
183
+
184
+ class ChatCompletionResponse(BaseModel):
185
+ """OpenAI chat completion response (non-streaming)."""
186
+
187
+ id: str
188
+ object: Literal["chat.completion"] = "chat.completion"
189
+ created: int
190
+ model: str
191
+ choices: list[ChatCompletionChoice]
192
+ usage: ChatCompletionUsage
193
+
194
+
195
+ class ChatCompletionStreamResponse(BaseModel):
196
+ """OpenAI chat completion chunk (streaming)."""
197
+
198
+ id: str
199
+ object: Literal["chat.completion.chunk"] = "chat.completion.chunk"
200
+ created: int
201
+ model: str
202
+ choices: list[ChatCompletionStreamChoice]
@@ -0,0 +1,33 @@
1
+ """OTEL utilities for chat routers."""
2
+
3
+ from loguru import logger
4
+
5
+
6
+ def get_tracer():
7
+ """Get the OpenTelemetry tracer for chat completions."""
8
+ try:
9
+ from opentelemetry import trace
10
+ return trace.get_tracer("rem.chat.completions")
11
+ except Exception:
12
+ return None
13
+
14
+
15
+ def get_current_trace_context() -> tuple[str | None, str | None]:
16
+ """Get trace_id and span_id from current OTEL context.
17
+
18
+ Returns:
19
+ Tuple of (trace_id, span_id) as hex strings, or (None, None) if not available.
20
+ """
21
+ try:
22
+ from opentelemetry import trace
23
+
24
+ span = trace.get_current_span()
25
+ ctx = span.get_span_context()
26
+ if ctx.is_valid:
27
+ trace_id = format(ctx.trace_id, '032x')
28
+ span_id = format(ctx.span_id, '016x')
29
+ return trace_id, span_id
30
+ except Exception as e:
31
+ logger.debug(f"Could not get trace context: {e}")
32
+
33
+ return None, None