pixie-qa 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,366 @@
1
+ """LLMSpanProcessor — converts OpenInference span attributes to LLMSpan."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from datetime import datetime, timezone
7
+ from typing import Any
8
+
9
+ from opentelemetry.sdk.trace import ReadableSpan, SpanProcessor
10
+ from opentelemetry.trace import StatusCode
11
+
12
+ from .queue import _DeliveryQueue
13
+ from .spans import (
14
+ AssistantMessage,
15
+ ImageContent,
16
+ LLMSpan,
17
+ Message,
18
+ SystemMessage,
19
+ TextContent,
20
+ ToolCall,
21
+ ToolDefinition,
22
+ ToolResultMessage,
23
+ UserMessage,
24
+ )
25
+
26
+
27
+ class LLMSpanProcessor(SpanProcessor):
28
+ """OTel SpanProcessor that converts OpenInference LLM spans to typed LLMSpan objects."""
29
+
30
+ def __init__(self, delivery_queue: _DeliveryQueue) -> None:
31
+ self._delivery_queue = delivery_queue
32
+
33
+ def on_start(self, span: Any, parent_context: Any = None) -> None:
34
+ """No-op — we only process completed spans."""
35
+
36
+ def on_end(self, span: ReadableSpan) -> None:
37
+ """Convert completed OpenInference LLM spans to LLMSpan and submit."""
38
+ try:
39
+ attrs = dict(span.attributes) if span.attributes else {}
40
+
41
+ # Only process LLM spans
42
+ span_kind = attrs.get("openinference.span.kind")
43
+ if span_kind not in ("LLM", "EMBEDDING"):
44
+ return
45
+
46
+ llm_span = self._build_llm_span(span, attrs, str(span_kind))
47
+ self._delivery_queue.submit(llm_span)
48
+ except Exception:
49
+ pass # Never raise from on_end
50
+
51
+ def on_shutdown(self) -> None:
52
+ """No-op."""
53
+
54
+ def force_flush(self, timeout_millis: int = 30000) -> bool:
55
+ """Flush the delivery queue."""
56
+ return self._delivery_queue.flush(timeout_seconds=timeout_millis / 1000)
57
+
58
+ def _build_llm_span(
59
+ self,
60
+ span: ReadableSpan,
61
+ attrs: dict[str, Any],
62
+ span_kind: str,
63
+ ) -> LLMSpan:
64
+ """Build a typed LLMSpan from raw OTel span and attributes."""
65
+ # ── Identity / timing
66
+ ctx = span.context
67
+ if ctx is None:
68
+ raise ValueError("No span context")
69
+ span_id = format(ctx.span_id, "016x")
70
+ trace_id = format(ctx.trace_id, "032x")
71
+ parent_span_id = format(span.parent.span_id, "016x") if span.parent else None
72
+
73
+ start_ns = span.start_time or 0
74
+ end_ns = span.end_time or 0
75
+ started_at = datetime.fromtimestamp(start_ns / 1e9, tz=timezone.utc)
76
+ ended_at = datetime.fromtimestamp(end_ns / 1e9, tz=timezone.utc)
77
+ duration_ms = (end_ns - start_ns) / 1e6
78
+
79
+ # ── Provider / model
80
+ request_model = str(attrs.get("llm.model_name") or attrs.get("gen_ai.request.model", ""))
81
+ response_model_raw = attrs.get("gen_ai.response.model")
82
+ response_model = str(response_model_raw) if response_model_raw is not None else None
83
+ provider = str(attrs.get("gen_ai.system", "")) or _infer_provider(request_model)
84
+ operation = "embedding" if span_kind == "EMBEDDING" else "chat"
85
+
86
+ # ── Token usage
87
+ input_tokens = int(attrs.get("llm.token_count.prompt", 0))
88
+ output_tokens = int(attrs.get("llm.token_count.completion", 0))
89
+ cache_read_tokens = int(attrs.get("llm.token_count.cache_read", 0))
90
+ cache_creation_tokens = int(attrs.get("llm.token_count.cache_creation", 0))
91
+
92
+ # ── Request parameters
93
+ params = _parse_json(str(attrs.get("llm.invocation_parameters", "{}")))
94
+ request_temperature = _to_float_or_none(params.get("temperature"))
95
+ request_max_tokens = _to_int_or_none(
96
+ params.get("max_tokens") or params.get("max_completion_tokens")
97
+ )
98
+ request_top_p = _to_float_or_none(params.get("top_p"))
99
+
100
+ # ── Response / error
101
+ response_id_raw = attrs.get("llm.response_id") or attrs.get("gen_ai.response.id")
102
+ response_id = str(response_id_raw) if response_id_raw is not None else None
103
+ output_type_raw = attrs.get("gen_ai.output.type")
104
+ output_type = str(output_type_raw) if output_type_raw is not None else None
105
+ error_type_raw = attrs.get("error.type")
106
+ if error_type_raw is not None:
107
+ error_type: str | None = str(error_type_raw)
108
+ elif span.status and span.status.status_code == StatusCode.ERROR:
109
+ error_type = "error"
110
+ else:
111
+ error_type = None
112
+
113
+ # ── Messages
114
+ input_messages = _parse_input_messages(attrs)
115
+ output_messages = _parse_output_messages(attrs)
116
+ finish_reasons = tuple(msg.finish_reason for msg in output_messages if msg.finish_reason)
117
+
118
+ # ── Tool definitions
119
+ tool_definitions = _parse_tool_definitions(attrs)
120
+
121
+ return LLMSpan(
122
+ span_id=span_id,
123
+ trace_id=trace_id,
124
+ parent_span_id=parent_span_id,
125
+ started_at=started_at,
126
+ ended_at=ended_at,
127
+ duration_ms=duration_ms,
128
+ operation=operation,
129
+ provider=provider,
130
+ request_model=request_model,
131
+ response_model=response_model,
132
+ input_tokens=input_tokens,
133
+ output_tokens=output_tokens,
134
+ cache_read_tokens=cache_read_tokens,
135
+ cache_creation_tokens=cache_creation_tokens,
136
+ request_temperature=request_temperature,
137
+ request_max_tokens=request_max_tokens,
138
+ request_top_p=request_top_p,
139
+ finish_reasons=finish_reasons,
140
+ response_id=response_id,
141
+ output_type=output_type,
142
+ error_type=error_type,
143
+ input_messages=tuple(input_messages),
144
+ output_messages=tuple(output_messages),
145
+ tool_definitions=tuple(tool_definitions),
146
+ )
147
+
148
+
149
+ # ── Helper functions ──────────────────────────────────────────────────────────
150
+
151
+
152
+ def _infer_provider(model_name: str) -> str:
153
+ """Infer the LLM provider from the model name."""
154
+ lower = model_name.lower()
155
+ if "gpt" in lower or "o1" in lower or "o3" in lower:
156
+ return "openai"
157
+ if "claude" in lower:
158
+ return "anthropic"
159
+ if "gemini" in lower:
160
+ return "google"
161
+ if "command" in lower or "coral" in lower:
162
+ return "cohere"
163
+ if "llama" in lower or "mixtral" in lower or "mistral" in lower:
164
+ return "meta"
165
+ return "unknown"
166
+
167
+
168
+ def _parse_json(raw: str) -> dict[str, Any]:
169
+ """Parse JSON safely, returning empty dict on failure."""
170
+ try:
171
+ result = json.loads(raw)
172
+ if isinstance(result, dict):
173
+ return result
174
+ return {}
175
+ except (json.JSONDecodeError, TypeError, ValueError):
176
+ return {}
177
+
178
+
179
+ def _to_float_or_none(value: Any) -> float | None:
180
+ """Convert value to float or return None."""
181
+ if value is None:
182
+ return None
183
+ try:
184
+ return float(value)
185
+ except (TypeError, ValueError):
186
+ return None
187
+
188
+
189
+ def _to_int_or_none(value: Any) -> int | None:
190
+ """Convert value to int or return None."""
191
+ if value is None:
192
+ return None
193
+ try:
194
+ return int(value)
195
+ except (TypeError, ValueError):
196
+ return None
197
+
198
+
199
+ def _parse_content_parts(
200
+ attrs: dict[str, Any], prefix: str
201
+ ) -> tuple[TextContent | ImageContent, ...]:
202
+ """Parse multimodal content parts from OpenInference indexed attributes.
203
+
204
+ Falls back to plain .content string as a single TextContent.
205
+ """
206
+ parts: list[TextContent | ImageContent] = []
207
+ j = 0
208
+ while True:
209
+ type_key = f"{prefix}.contents.{j}.message_content.type"
210
+ content_type = attrs.get(type_key)
211
+ if content_type is None:
212
+ break
213
+
214
+ if content_type == "text":
215
+ text_key = f"{prefix}.contents.{j}.message_content.text"
216
+ text = str(attrs.get(text_key, ""))
217
+ parts.append(TextContent(text=text))
218
+ elif content_type == "image":
219
+ url_key = f"{prefix}.contents.{j}.message_content.image.url.url"
220
+ detail_key = f"{prefix}.contents.{j}.message_content.image.url.detail"
221
+ url = str(attrs.get(url_key, ""))
222
+ detail_raw = attrs.get(detail_key)
223
+ detail = str(detail_raw) if detail_raw is not None else None
224
+ parts.append(ImageContent(url=url, detail=detail))
225
+
226
+ j += 1
227
+
228
+ if not parts:
229
+ # Fall back to plain .content string
230
+ content_key = f"{prefix}.content"
231
+ content_raw = attrs.get(content_key)
232
+ if content_raw is not None:
233
+ parts.append(TextContent(text=str(content_raw)))
234
+
235
+ return tuple(parts)
236
+
237
+
238
+ def _parse_tool_calls(attrs: dict[str, Any], prefix: str) -> tuple[ToolCall, ...]:
239
+ """Parse tool calls from OpenInference indexed attributes."""
240
+ tool_calls: list[ToolCall] = []
241
+ j = 0
242
+ while True:
243
+ name_key = f"{prefix}.tool_calls.{j}.tool_call.function.name"
244
+ name = attrs.get(name_key)
245
+ if name is None:
246
+ break
247
+
248
+ args_key = f"{prefix}.tool_calls.{j}.tool_call.function.arguments"
249
+ args_raw = attrs.get(args_key)
250
+ if isinstance(args_raw, str):
251
+ try:
252
+ arguments = json.loads(args_raw)
253
+ except json.JSONDecodeError:
254
+ arguments = {"_raw": args_raw}
255
+ elif isinstance(args_raw, dict):
256
+ arguments = args_raw
257
+ else:
258
+ arguments = {}
259
+
260
+ id_key = f"{prefix}.tool_calls.{j}.tool_call.id"
261
+ call_id_raw = attrs.get(id_key)
262
+ call_id = str(call_id_raw) if call_id_raw is not None else None
263
+
264
+ tool_calls.append(ToolCall(name=str(name), arguments=arguments, id=call_id))
265
+ j += 1
266
+
267
+ return tuple(tool_calls)
268
+
269
+
270
+ def _parse_input_messages(attrs: dict[str, Any]) -> list[Message]:
271
+ """Parse input messages from OpenInference indexed span attributes."""
272
+ messages: list[Message] = []
273
+ i = 0
274
+ while True:
275
+ prefix = f"llm.input_messages.{i}.message"
276
+ role_key = f"{prefix}.role"
277
+ role = attrs.get(role_key)
278
+ if role is None:
279
+ break
280
+
281
+ role = str(role).lower()
282
+
283
+ if role == "system":
284
+ content_key = f"{prefix}.content"
285
+ content = str(attrs.get(content_key, ""))
286
+ messages.append(SystemMessage(content=content))
287
+ elif role == "user":
288
+ parts = _parse_content_parts(attrs, prefix)
289
+ messages.append(UserMessage(content=parts))
290
+ elif role == "assistant":
291
+ parts = _parse_content_parts(attrs, prefix)
292
+ tool_calls = _parse_tool_calls(attrs, prefix)
293
+ messages.append(AssistantMessage(content=parts, tool_calls=tool_calls))
294
+ elif role == "tool":
295
+ content_key = f"{prefix}.content"
296
+ content = str(attrs.get(content_key, ""))
297
+ tool_call_id_raw = attrs.get(f"{prefix}.tool_call_id")
298
+ tool_call_id = str(tool_call_id_raw) if tool_call_id_raw is not None else None
299
+ tool_name_raw = attrs.get(f"{prefix}.name")
300
+ tool_name = str(tool_name_raw) if tool_name_raw is not None else None
301
+ messages.append(
302
+ ToolResultMessage(content=content, tool_call_id=tool_call_id, tool_name=tool_name)
303
+ )
304
+
305
+ i += 1
306
+
307
+ return messages
308
+
309
+
310
+ def _parse_output_messages(attrs: dict[str, Any]) -> list[AssistantMessage]:
311
+ """Parse output messages from OpenInference indexed span attributes."""
312
+ messages: list[AssistantMessage] = []
313
+ i = 0
314
+ while True:
315
+ prefix = f"llm.output_messages.{i}.message"
316
+ role_key = f"{prefix}.role"
317
+ role = attrs.get(role_key)
318
+ if role is None:
319
+ break
320
+
321
+ parts = _parse_content_parts(attrs, prefix)
322
+ tool_calls = _parse_tool_calls(attrs, prefix)
323
+
324
+ # finish_reason is per-message in OpenInference
325
+ finish_reason_raw = attrs.get(f"{prefix}.finish_reason")
326
+ finish_reason = str(finish_reason_raw) if finish_reason_raw is not None else None
327
+
328
+ messages.append(
329
+ AssistantMessage(
330
+ content=parts,
331
+ tool_calls=tool_calls,
332
+ finish_reason=finish_reason,
333
+ )
334
+ )
335
+ i += 1
336
+
337
+ return messages
338
+
339
+
340
+ def _parse_tool_definitions(attrs: dict[str, Any]) -> list[ToolDefinition]:
341
+ """Parse tool definitions from OpenInference indexed span attributes."""
342
+ tools: list[ToolDefinition] = []
343
+ i = 0
344
+ while True:
345
+ name_key = f"llm.tools.{i}.tool.name"
346
+ name = attrs.get(name_key)
347
+ if name is None:
348
+ break
349
+
350
+ desc_raw = attrs.get(f"llm.tools.{i}.tool.description")
351
+ description = str(desc_raw) if desc_raw is not None else None
352
+
353
+ schema_raw = attrs.get(f"llm.tools.{i}.tool.json_schema")
354
+ if isinstance(schema_raw, str):
355
+ parameters = _parse_json(schema_raw) or None
356
+ elif isinstance(schema_raw, dict):
357
+ parameters = schema_raw
358
+ else:
359
+ parameters = None
360
+
361
+ tools.append(
362
+ ToolDefinition(name=str(name), description=description, parameters=parameters)
363
+ )
364
+ i += 1
365
+
366
+ return tools
@@ -0,0 +1,88 @@
1
+ """_DeliveryQueue — background worker thread for delivering spans to handler."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import queue
7
+ import threading
8
+ from concurrent.futures import Future
9
+
10
+ from .handler import InstrumentationHandler
11
+ from .spans import LLMSpan, ObserveSpan
12
+
13
+
14
+ class _DeliveryQueue:
15
+ """Single queue for both LLMSpan and ObserveSpan.
16
+
17
+ A dedicated asyncio event loop runs on a background daemon thread. The
18
+ queue-worker thread picks up each span and schedules an async dispatch
19
+ coroutine on that loop (fire and forget from the worker's perspective).
20
+ ``queue.task_done()`` is called via a ``Future`` done-callback once the
21
+ coroutine finishes, so ``flush()`` (which calls ``queue.join()``) correctly
22
+ waits for all in-flight async processing to complete.
23
+ """
24
+
25
+ def __init__(self, handler: InstrumentationHandler, maxsize: int = 1000) -> None:
26
+ self._handler = handler
27
+ self._queue: queue.Queue[LLMSpan | ObserveSpan] = queue.Queue(maxsize=maxsize)
28
+ self._dropped_count = 0
29
+
30
+ # Dedicated event loop running on its own daemon thread.
31
+ self._loop = asyncio.new_event_loop()
32
+ self._loop_thread = threading.Thread(
33
+ target=self._loop.run_forever,
34
+ daemon=True,
35
+ name="pixie-asyncio-loop",
36
+ )
37
+ self._loop_thread.start()
38
+
39
+ # Queue-consumer thread: picks items and schedules async tasks.
40
+ self._thread = threading.Thread(
41
+ target=self._worker, daemon=True, name="pixie-delivery-worker"
42
+ )
43
+ self._thread.start()
44
+
45
+ def submit(self, item: LLMSpan | ObserveSpan) -> None:
46
+ """Submit a span for delivery. Drops silently on full queue."""
47
+ try:
48
+ self._queue.put_nowait(item)
49
+ except queue.Full:
50
+ self._dropped_count += 1
51
+
52
+ def flush(self, timeout_seconds: float = 5.0) -> bool:
53
+ """Block until all queued items and their async handlers are done."""
54
+ try:
55
+ self._queue.join()
56
+ return True
57
+ except Exception:
58
+ return False
59
+
60
+ def _worker(self) -> None:
61
+ """Queue-consumer: fire-and-forget async dispatch for each span."""
62
+ while True:
63
+ item = self._queue.get()
64
+ try:
65
+ future: Future[None] = asyncio.run_coroutine_threadsafe(
66
+ self._dispatch(item), self._loop
67
+ )
68
+ # task_done() is deferred until the coroutine finishes so
69
+ # that flush() / queue.join() waits for async handlers too.
70
+ future.add_done_callback(lambda _f: self._queue.task_done())
71
+ except Exception:
72
+ # Scheduling failed — mark done immediately to avoid deadlock.
73
+ self._queue.task_done()
74
+
75
+ async def _dispatch(self, item: LLMSpan | ObserveSpan) -> None:
76
+ """Async dispatch: route span to the appropriate handler method."""
77
+ try:
78
+ if isinstance(item, LLMSpan):
79
+ await self._handler.on_llm(item)
80
+ elif isinstance(item, ObserveSpan):
81
+ await self._handler.on_observe(item)
82
+ except Exception:
83
+ pass # Handler exceptions are silently swallowed
84
+
85
+ @property
86
+ def dropped_count(self) -> int:
87
+ """Number of spans dropped due to full queue."""
88
+ return self._dropped_count
@@ -0,0 +1,165 @@
1
+ """Data model types for pixie instrumentation spans."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass
6
+ from datetime import datetime
7
+ from typing import Any, Literal # noqa: UP035
8
+
9
+ # ── Message content types ─────────────────────────────────────────────────────
10
+
11
+
12
+ @dataclass(frozen=True)
13
+ class TextContent:
14
+ """Plain text content part."""
15
+
16
+ text: str
17
+ type: Literal["text"] = "text"
18
+
19
+
20
+ @dataclass(frozen=True)
21
+ class ImageContent:
22
+ """Image content part (URL or data URI)."""
23
+
24
+ url: str # https:// or data: URI
25
+ detail: str | None = None # "low" | "high" | "auto" | None
26
+ type: Literal["image"] = "image"
27
+
28
+
29
+ MessageContent = TextContent | ImageContent
30
+
31
+
32
+ # ── Tool types ────────────────────────────────────────────────────────────────
33
+
34
+
35
+ @dataclass(frozen=True)
36
+ class ToolCall:
37
+ """Tool invocation requested by the model."""
38
+
39
+ name: str
40
+ arguments: dict[str, Any] # always deserialized, never a raw JSON string
41
+ id: str | None = None
42
+
43
+
44
+ @dataclass(frozen=True)
45
+ class ToolDefinition:
46
+ """Tool made available to the model in the request."""
47
+
48
+ name: str
49
+ description: str | None = None
50
+ parameters: dict[str, Any] | None = None # JSON Schema object
51
+
52
+
53
+ # ── Message types ─────────────────────────────────────────────────────────────
54
+
55
+
56
+ @dataclass(frozen=True)
57
+ class SystemMessage:
58
+ """System prompt message."""
59
+
60
+ content: str
61
+ role: Literal["system"] = "system"
62
+
63
+
64
+ @dataclass(frozen=True)
65
+ class UserMessage:
66
+ """User message with multimodal content parts."""
67
+
68
+ content: tuple[MessageContent, ...]
69
+ role: Literal["user"] = "user"
70
+
71
+ @classmethod
72
+ def from_text(cls, text: str) -> UserMessage:
73
+ """Create a UserMessage with a single TextContent part."""
74
+ return cls(content=(TextContent(text=text),))
75
+
76
+
77
+ @dataclass(frozen=True)
78
+ class AssistantMessage:
79
+ """Assistant response message with optional tool calls."""
80
+
81
+ content: tuple[MessageContent, ...]
82
+ tool_calls: tuple[ToolCall, ...]
83
+ finish_reason: str | None = None
84
+ role: Literal["assistant"] = "assistant"
85
+
86
+
87
+ @dataclass(frozen=True)
88
+ class ToolResultMessage:
89
+ """Tool execution result message."""
90
+
91
+ content: str
92
+ tool_call_id: str | None = None
93
+ tool_name: str | None = None
94
+ role: Literal["tool"] = "tool"
95
+
96
+
97
+ Message = SystemMessage | UserMessage | AssistantMessage | ToolResultMessage
98
+
99
+
100
+ # ── Span types ────────────────────────────────────────────────────────────────
101
+
102
+
103
+ @dataclass(frozen=True)
104
+ class LLMSpan:
105
+ """One LLM provider call, produced by LLMSpanProcessor from OpenInference attrs."""
106
+
107
+ # ── Identity
108
+ span_id: str # hex, 16 chars
109
+ trace_id: str # hex, 32 chars
110
+ parent_span_id: str | None # links to ObserveSpan.span_id when nested
111
+
112
+ # ── Timing
113
+ started_at: datetime
114
+ ended_at: datetime
115
+ duration_ms: float
116
+
117
+ # ── Provider / model
118
+ operation: str # "chat" | "embedding"
119
+ provider: str # "openai" | "anthropic" | "google" | ...
120
+ request_model: str
121
+ response_model: str | None
122
+
123
+ # ── Token usage
124
+ input_tokens: int # default 0
125
+ output_tokens: int # default 0
126
+ cache_read_tokens: int # default 0
127
+ cache_creation_tokens: int # default 0
128
+
129
+ # ── Request parameters
130
+ request_temperature: float | None
131
+ request_max_tokens: int | None
132
+ request_top_p: float | None
133
+
134
+ # ── Response metadata
135
+ finish_reasons: tuple[str, ...] # default ()
136
+ response_id: str | None
137
+ output_type: str | None # "json" | "text" | None
138
+ error_type: str | None
139
+
140
+ # ── Content (populated when capture_content=True)
141
+ input_messages: tuple[Message, ...] # default ()
142
+ output_messages: tuple[AssistantMessage, ...] # default ()
143
+ tool_definitions: tuple[ToolDefinition, ...] # always populated when available
144
+
145
+
146
+ @dataclass(frozen=True)
147
+ class ObserveSpan:
148
+ """A user-defined instrumented block, produced when a log() block exits."""
149
+
150
+ # ── Identity
151
+ span_id: str # hex, 16 chars
152
+ trace_id: str # hex, 32 chars
153
+ parent_span_id: str | None
154
+
155
+ # ── Timing
156
+ started_at: datetime
157
+ ended_at: datetime
158
+ duration_ms: float
159
+
160
+ # ── User-defined fields
161
+ name: str | None # optional label for the block
162
+ input: Any # value passed to log(input=...)
163
+ output: Any # value set via span.set_output(...)
164
+ metadata: dict[str, Any] # accumulated via span.set_metadata(k, v)
165
+ error: str | None # exception type if block raised, else None
@@ -0,0 +1,27 @@
1
+ """Observation storage module for persisting and querying LLM application traces.
2
+
3
+ Provides:
4
+ - ``Evaluable`` Pydantic BaseModel for uniform evaluator access
5
+ - ``UNSET`` sentinel for distinguishing unset from ``None``
6
+ - ``ObservationNode`` tree wrapper with traversal and LLM-friendly serialization
7
+ - ``ObservationStore`` for persistence and query via Piccolo ORM / SQLite
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ from pixie.storage.evaluable import (
13
+ UNSET,
14
+ Evaluable,
15
+ as_evaluable,
16
+ )
17
+ from pixie.storage.store import ObservationStore
18
+ from pixie.storage.tree import ObservationNode, build_tree
19
+
20
+ __all__ = [
21
+ "Evaluable",
22
+ "ObservationNode",
23
+ "ObservationStore",
24
+ "UNSET",
25
+ "as_evaluable",
26
+ "build_tree",
27
+ ]