splunk-otel-util-genai 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- opentelemetry/util/genai/__init__.py +17 -0
- opentelemetry/util/genai/_fsspec_upload/__init__.py +39 -0
- opentelemetry/util/genai/_fsspec_upload/fsspec_hook.py +184 -0
- opentelemetry/util/genai/attributes.py +60 -0
- opentelemetry/util/genai/callbacks.py +24 -0
- opentelemetry/util/genai/config.py +184 -0
- opentelemetry/util/genai/debug.py +183 -0
- opentelemetry/util/genai/emitters/__init__.py +25 -0
- opentelemetry/util/genai/emitters/composite.py +186 -0
- opentelemetry/util/genai/emitters/configuration.py +324 -0
- opentelemetry/util/genai/emitters/content_events.py +153 -0
- opentelemetry/util/genai/emitters/evaluation.py +519 -0
- opentelemetry/util/genai/emitters/metrics.py +308 -0
- opentelemetry/util/genai/emitters/span.py +774 -0
- opentelemetry/util/genai/emitters/spec.py +48 -0
- opentelemetry/util/genai/emitters/utils.py +961 -0
- opentelemetry/util/genai/environment_variables.py +200 -0
- opentelemetry/util/genai/handler.py +1002 -0
- opentelemetry/util/genai/instruments.py +44 -0
- opentelemetry/util/genai/interfaces.py +58 -0
- opentelemetry/util/genai/plugins.py +114 -0
- opentelemetry/util/genai/span_context.py +80 -0
- opentelemetry/util/genai/types.py +440 -0
- opentelemetry/util/genai/upload_hook.py +119 -0
- opentelemetry/util/genai/utils.py +182 -0
- opentelemetry/util/genai/version.py +15 -0
- splunk_otel_util_genai-0.1.3.dist-info/METADATA +70 -0
- splunk_otel_util_genai-0.1.3.dist-info/RECORD +31 -0
- splunk_otel_util_genai-0.1.3.dist-info/WHEEL +4 -0
- splunk_otel_util_genai-0.1.3.dist-info/entry_points.txt +5 -0
- splunk_otel_util_genai-0.1.3.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,774 @@
|
|
|
1
|
+
# Span emitter (moved from generators/span_emitter.py)
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
import json # noqa: F401 (kept for backward compatibility if external code relies on this module re-exporting json)
|
|
5
|
+
from dataclasses import asdict # noqa: F401
|
|
6
|
+
from typing import Any, Optional
|
|
7
|
+
|
|
8
|
+
from opentelemetry import trace
|
|
9
|
+
from opentelemetry.semconv._incubating.attributes import (
|
|
10
|
+
gen_ai_attributes as GenAI,
|
|
11
|
+
)
|
|
12
|
+
from opentelemetry.semconv.attributes import (
|
|
13
|
+
error_attributes as ErrorAttributes,
|
|
14
|
+
)
|
|
15
|
+
from opentelemetry.trace import Span, SpanKind, Tracer
|
|
16
|
+
from opentelemetry.trace.status import Status, StatusCode
|
|
17
|
+
|
|
18
|
+
from ..attributes import (
|
|
19
|
+
GEN_AI_AGENT_ID,
|
|
20
|
+
GEN_AI_AGENT_NAME,
|
|
21
|
+
GEN_AI_AGENT_TOOLS,
|
|
22
|
+
GEN_AI_AGENT_TYPE,
|
|
23
|
+
GEN_AI_EMBEDDINGS_DIMENSION_COUNT,
|
|
24
|
+
GEN_AI_EMBEDDINGS_INPUT_TEXTS,
|
|
25
|
+
GEN_AI_INPUT_MESSAGES,
|
|
26
|
+
GEN_AI_OUTPUT_MESSAGES,
|
|
27
|
+
GEN_AI_PROVIDER_NAME,
|
|
28
|
+
GEN_AI_REQUEST_ENCODING_FORMATS,
|
|
29
|
+
GEN_AI_STEP_ASSIGNED_AGENT,
|
|
30
|
+
GEN_AI_STEP_NAME,
|
|
31
|
+
GEN_AI_STEP_OBJECTIVE,
|
|
32
|
+
GEN_AI_STEP_SOURCE,
|
|
33
|
+
GEN_AI_STEP_STATUS,
|
|
34
|
+
GEN_AI_STEP_TYPE,
|
|
35
|
+
GEN_AI_WORKFLOW_DESCRIPTION,
|
|
36
|
+
GEN_AI_WORKFLOW_NAME,
|
|
37
|
+
GEN_AI_WORKFLOW_TYPE,
|
|
38
|
+
SERVER_ADDRESS,
|
|
39
|
+
SERVER_PORT,
|
|
40
|
+
)
|
|
41
|
+
from ..interfaces import EmitterMeta
|
|
42
|
+
from ..span_context import extract_span_context, store_span_context
|
|
43
|
+
from ..types import (
|
|
44
|
+
AgentCreation,
|
|
45
|
+
AgentInvocation,
|
|
46
|
+
ContentCapturingMode,
|
|
47
|
+
EmbeddingInvocation,
|
|
48
|
+
Error,
|
|
49
|
+
LLMInvocation,
|
|
50
|
+
Step,
|
|
51
|
+
ToolCall,
|
|
52
|
+
Workflow,
|
|
53
|
+
)
|
|
54
|
+
from ..types import (
|
|
55
|
+
GenAI as GenAIType,
|
|
56
|
+
)
|
|
57
|
+
from .utils import (
|
|
58
|
+
_apply_function_definitions,
|
|
59
|
+
_apply_llm_finish_semconv,
|
|
60
|
+
_extract_system_instructions,
|
|
61
|
+
_serialize_messages,
|
|
62
|
+
filter_semconv_gen_ai_attributes,
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
_SPAN_ALLOWED_SUPPLEMENTAL_KEYS: tuple[str, ...] = (
|
|
66
|
+
"gen_ai.framework",
|
|
67
|
+
"gen_ai.request.id",
|
|
68
|
+
)
|
|
69
|
+
_SPAN_BLOCKED_SUPPLEMENTAL_KEYS: set[str] = {"request_top_p", "ls_temperature"}
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def _sanitize_span_attribute_value(value: Any) -> Optional[Any]:
|
|
73
|
+
"""Cast arbitrary invocation attribute values to OTEL-compatible types."""
|
|
74
|
+
|
|
75
|
+
if value is None:
|
|
76
|
+
return None
|
|
77
|
+
if isinstance(value, bool):
|
|
78
|
+
return value
|
|
79
|
+
if isinstance(value, (str, int, float)):
|
|
80
|
+
return value
|
|
81
|
+
if isinstance(value, (list, tuple)):
|
|
82
|
+
sanitized_items: list[Any] = []
|
|
83
|
+
for item in value:
|
|
84
|
+
sanitized = _sanitize_span_attribute_value(item)
|
|
85
|
+
if sanitized is None:
|
|
86
|
+
continue
|
|
87
|
+
if isinstance(sanitized, list):
|
|
88
|
+
sanitized_items.append(str(sanitized))
|
|
89
|
+
else:
|
|
90
|
+
sanitized_items.append(sanitized)
|
|
91
|
+
return sanitized_items
|
|
92
|
+
if isinstance(value, dict):
|
|
93
|
+
try:
|
|
94
|
+
return json.dumps(value, default=str)
|
|
95
|
+
except Exception: # pragma: no cover - defensive
|
|
96
|
+
return str(value)
|
|
97
|
+
return str(value)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def _apply_gen_ai_semconv_attributes(
|
|
101
|
+
span: Span,
|
|
102
|
+
attributes: Optional[dict[str, Any]],
|
|
103
|
+
) -> None:
|
|
104
|
+
if not attributes:
|
|
105
|
+
return
|
|
106
|
+
for key, value in attributes.items():
|
|
107
|
+
sanitized = _sanitize_span_attribute_value(value)
|
|
108
|
+
if sanitized is None:
|
|
109
|
+
continue
|
|
110
|
+
try:
|
|
111
|
+
span.set_attribute(key, sanitized)
|
|
112
|
+
except Exception: # pragma: no cover - defensive
|
|
113
|
+
pass
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def _apply_sampled_for_evaluation(
|
|
117
|
+
span: Span,
|
|
118
|
+
is_sampled: bool,
|
|
119
|
+
) -> None:
|
|
120
|
+
span.set_attribute("gen_ai.evaluation.sampled", is_sampled)
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
class SpanEmitter(EmitterMeta):
|
|
124
|
+
"""Span-focused emitter supporting optional content capture.
|
|
125
|
+
|
|
126
|
+
Original implementation migrated from generators/span_emitter.py. Additional telemetry
|
|
127
|
+
(metrics, content events) are handled by separate emitters composed via CompositeEmitter.
|
|
128
|
+
"""
|
|
129
|
+
|
|
130
|
+
role = "span"
|
|
131
|
+
name = "semconv_span"
|
|
132
|
+
|
|
133
|
+
def __init__(
|
|
134
|
+
self, tracer: Optional[Tracer] = None, capture_content: bool = False
|
|
135
|
+
):
|
|
136
|
+
self._tracer: Tracer = tracer or trace.get_tracer(__name__)
|
|
137
|
+
self._capture_content = capture_content
|
|
138
|
+
self._content_mode = ContentCapturingMode.NO_CONTENT
|
|
139
|
+
|
|
140
|
+
def set_capture_content(
|
|
141
|
+
self, value: bool
|
|
142
|
+
): # pragma: no cover - trivial mutator
|
|
143
|
+
self._capture_content = value
|
|
144
|
+
|
|
145
|
+
def set_content_mode(
|
|
146
|
+
self, mode: ContentCapturingMode
|
|
147
|
+
) -> None: # pragma: no cover - trivial mutator
|
|
148
|
+
self._content_mode = mode
|
|
149
|
+
|
|
150
|
+
def handles(self, obj: object) -> bool:
|
|
151
|
+
return True
|
|
152
|
+
|
|
153
|
+
# ---- helpers ---------------------------------------------------------
|
|
154
|
+
def _apply_start_attrs(self, invocation: GenAIType):
|
|
155
|
+
span = getattr(invocation, "span", None)
|
|
156
|
+
if span is None:
|
|
157
|
+
return
|
|
158
|
+
semconv_attrs = dict(invocation.semantic_convention_attributes())
|
|
159
|
+
if isinstance(invocation, ToolCall):
|
|
160
|
+
enum_val = getattr(
|
|
161
|
+
GenAI.GenAiOperationNameValues, "EXECUTE_TOOL", None
|
|
162
|
+
)
|
|
163
|
+
semconv_attrs[GenAI.GEN_AI_OPERATION_NAME] = (
|
|
164
|
+
enum_val.value if enum_val else "execute_tool"
|
|
165
|
+
)
|
|
166
|
+
semconv_attrs[GenAI.GEN_AI_REQUEST_MODEL] = invocation.name
|
|
167
|
+
elif isinstance(invocation, EmbeddingInvocation):
|
|
168
|
+
semconv_attrs.setdefault(
|
|
169
|
+
GenAI.GEN_AI_REQUEST_MODEL, invocation.request_model
|
|
170
|
+
)
|
|
171
|
+
elif isinstance(invocation, LLMInvocation):
|
|
172
|
+
semconv_attrs.setdefault(
|
|
173
|
+
GenAI.GEN_AI_REQUEST_MODEL, invocation.request_model
|
|
174
|
+
)
|
|
175
|
+
_apply_gen_ai_semconv_attributes(span, semconv_attrs)
|
|
176
|
+
supplemental = getattr(invocation, "attributes", None)
|
|
177
|
+
if supplemental:
|
|
178
|
+
semconv_subset = filter_semconv_gen_ai_attributes(
|
|
179
|
+
supplemental, extras=_SPAN_ALLOWED_SUPPLEMENTAL_KEYS
|
|
180
|
+
)
|
|
181
|
+
if semconv_subset:
|
|
182
|
+
_apply_gen_ai_semconv_attributes(span, semconv_subset)
|
|
183
|
+
for key, value in supplemental.items():
|
|
184
|
+
if key in (semconv_subset or {}):
|
|
185
|
+
continue
|
|
186
|
+
if key in _SPAN_BLOCKED_SUPPLEMENTAL_KEYS:
|
|
187
|
+
continue
|
|
188
|
+
if (
|
|
189
|
+
not key.startswith("custom_")
|
|
190
|
+
and key not in _SPAN_ALLOWED_SUPPLEMENTAL_KEYS
|
|
191
|
+
):
|
|
192
|
+
continue
|
|
193
|
+
if key in span.attributes: # type: ignore[attr-defined]
|
|
194
|
+
continue
|
|
195
|
+
sanitized = _sanitize_span_attribute_value(value)
|
|
196
|
+
if sanitized is None:
|
|
197
|
+
continue
|
|
198
|
+
try:
|
|
199
|
+
span.set_attribute(key, sanitized)
|
|
200
|
+
except Exception: # pragma: no cover - defensive
|
|
201
|
+
pass
|
|
202
|
+
provider = getattr(invocation, "provider", None)
|
|
203
|
+
if provider:
|
|
204
|
+
span.set_attribute(GEN_AI_PROVIDER_NAME, provider)
|
|
205
|
+
# framework (named field)
|
|
206
|
+
if isinstance(invocation, LLMInvocation) and invocation.framework:
|
|
207
|
+
span.set_attribute("gen_ai.framework", invocation.framework)
|
|
208
|
+
# function definitions (semantic conv derived from structured list)
|
|
209
|
+
if isinstance(invocation, LLMInvocation):
|
|
210
|
+
_apply_function_definitions(span, invocation.request_functions)
|
|
211
|
+
# Agent context (already covered by semconv metadata on base fields)
|
|
212
|
+
|
|
213
|
+
def _apply_finish_attrs(
|
|
214
|
+
self, invocation: LLMInvocation | EmbeddingInvocation
|
|
215
|
+
):
|
|
216
|
+
span = getattr(invocation, "span", None)
|
|
217
|
+
if span is None:
|
|
218
|
+
return
|
|
219
|
+
|
|
220
|
+
# Capture input messages and system instructions if enabled
|
|
221
|
+
if (
|
|
222
|
+
self._capture_content
|
|
223
|
+
and isinstance(invocation, LLMInvocation)
|
|
224
|
+
and invocation.input_messages
|
|
225
|
+
):
|
|
226
|
+
# Extract and set system instructions separately
|
|
227
|
+
system_instructions = _extract_system_instructions(
|
|
228
|
+
invocation.input_messages
|
|
229
|
+
)
|
|
230
|
+
if system_instructions is not None:
|
|
231
|
+
span.set_attribute(
|
|
232
|
+
GenAI.GEN_AI_SYSTEM_INSTRUCTIONS, system_instructions
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
# Serialize input messages (excluding system messages)
|
|
236
|
+
serialized_in = _serialize_messages(
|
|
237
|
+
invocation.input_messages, exclude_system=True
|
|
238
|
+
)
|
|
239
|
+
if serialized_in is not None:
|
|
240
|
+
span.set_attribute(GEN_AI_INPUT_MESSAGES, serialized_in)
|
|
241
|
+
|
|
242
|
+
# Finish-time semconv attributes (response + usage tokens + functions)
|
|
243
|
+
if isinstance(invocation, LLMInvocation):
|
|
244
|
+
_apply_llm_finish_semconv(span, invocation)
|
|
245
|
+
_apply_gen_ai_semconv_attributes(
|
|
246
|
+
span, invocation.semantic_convention_attributes()
|
|
247
|
+
)
|
|
248
|
+
extra_attrs = filter_semconv_gen_ai_attributes(
|
|
249
|
+
getattr(invocation, "attributes", None),
|
|
250
|
+
extras=_SPAN_ALLOWED_SUPPLEMENTAL_KEYS,
|
|
251
|
+
)
|
|
252
|
+
if extra_attrs:
|
|
253
|
+
_apply_gen_ai_semconv_attributes(span, extra_attrs)
|
|
254
|
+
|
|
255
|
+
# Capture output messages if enabled
|
|
256
|
+
if (
|
|
257
|
+
self._capture_content
|
|
258
|
+
and isinstance(invocation, LLMInvocation)
|
|
259
|
+
and invocation.output_messages
|
|
260
|
+
):
|
|
261
|
+
serialized = _serialize_messages(invocation.output_messages)
|
|
262
|
+
if serialized is not None:
|
|
263
|
+
span.set_attribute(GEN_AI_OUTPUT_MESSAGES, serialized)
|
|
264
|
+
|
|
265
|
+
def _attach_span(
|
|
266
|
+
self,
|
|
267
|
+
invocation: GenAIType,
|
|
268
|
+
span: Span,
|
|
269
|
+
context_manager: Any,
|
|
270
|
+
) -> None:
|
|
271
|
+
invocation.span = span # type: ignore[assignment]
|
|
272
|
+
invocation.context_token = context_manager # type: ignore[assignment]
|
|
273
|
+
store_span_context(invocation, extract_span_context(span))
|
|
274
|
+
|
|
275
|
+
# ---- lifecycle -------------------------------------------------------
|
|
276
|
+
def on_start(
|
|
277
|
+
self, invocation: LLMInvocation | EmbeddingInvocation
|
|
278
|
+
) -> None: # type: ignore[override]
|
|
279
|
+
# Handle new agentic types
|
|
280
|
+
if isinstance(invocation, Workflow):
|
|
281
|
+
self._start_workflow(invocation)
|
|
282
|
+
elif isinstance(invocation, (AgentCreation, AgentInvocation)):
|
|
283
|
+
self._start_agent(invocation)
|
|
284
|
+
elif isinstance(invocation, Step):
|
|
285
|
+
self._start_step(invocation)
|
|
286
|
+
# Handle existing types
|
|
287
|
+
elif isinstance(invocation, ToolCall):
|
|
288
|
+
span_name = f"tool {invocation.name}"
|
|
289
|
+
parent_span = getattr(invocation, "parent_span", None)
|
|
290
|
+
parent_ctx = (
|
|
291
|
+
trace.set_span_in_context(parent_span)
|
|
292
|
+
if parent_span is not None
|
|
293
|
+
else None
|
|
294
|
+
)
|
|
295
|
+
cm = self._tracer.start_as_current_span(
|
|
296
|
+
span_name,
|
|
297
|
+
kind=SpanKind.CLIENT,
|
|
298
|
+
end_on_exit=False,
|
|
299
|
+
context=parent_ctx,
|
|
300
|
+
)
|
|
301
|
+
span = cm.__enter__()
|
|
302
|
+
self._attach_span(invocation, span, cm)
|
|
303
|
+
self._apply_start_attrs(invocation)
|
|
304
|
+
elif isinstance(invocation, EmbeddingInvocation):
|
|
305
|
+
self._start_embedding(invocation)
|
|
306
|
+
else:
|
|
307
|
+
# Use operation field for span name (defaults to "chat")
|
|
308
|
+
operation = getattr(invocation, "operation", "chat")
|
|
309
|
+
model_name = invocation.request_model
|
|
310
|
+
span_name = f"{operation} {model_name}"
|
|
311
|
+
parent_span = getattr(invocation, "parent_span", None)
|
|
312
|
+
parent_ctx = (
|
|
313
|
+
trace.set_span_in_context(parent_span)
|
|
314
|
+
if parent_span is not None
|
|
315
|
+
else None
|
|
316
|
+
)
|
|
317
|
+
cm = self._tracer.start_as_current_span(
|
|
318
|
+
span_name,
|
|
319
|
+
kind=SpanKind.CLIENT,
|
|
320
|
+
end_on_exit=False,
|
|
321
|
+
context=parent_ctx,
|
|
322
|
+
)
|
|
323
|
+
span = cm.__enter__()
|
|
324
|
+
self._attach_span(invocation, span, cm)
|
|
325
|
+
self._apply_start_attrs(invocation)
|
|
326
|
+
|
|
327
|
+
def on_end(self, invocation: LLMInvocation | EmbeddingInvocation) -> None:
|
|
328
|
+
_apply_sampled_for_evaluation(
|
|
329
|
+
invocation.span, invocation.sample_for_evaluation
|
|
330
|
+
) # type: ignore[override]
|
|
331
|
+
if isinstance(invocation, Workflow):
|
|
332
|
+
self._finish_workflow(invocation)
|
|
333
|
+
elif isinstance(invocation, (AgentCreation, AgentInvocation)):
|
|
334
|
+
self._finish_agent(invocation)
|
|
335
|
+
elif isinstance(invocation, Step):
|
|
336
|
+
self._finish_step(invocation)
|
|
337
|
+
elif isinstance(invocation, EmbeddingInvocation):
|
|
338
|
+
self._finish_embedding(invocation)
|
|
339
|
+
else:
|
|
340
|
+
span = getattr(invocation, "span", None)
|
|
341
|
+
if span is None:
|
|
342
|
+
return
|
|
343
|
+
self._apply_finish_attrs(invocation)
|
|
344
|
+
token = getattr(invocation, "context_token", None)
|
|
345
|
+
if token is not None and hasattr(token, "__exit__"):
|
|
346
|
+
try: # pragma: no cover
|
|
347
|
+
token.__exit__(None, None, None) # type: ignore[misc]
|
|
348
|
+
except Exception: # pragma: no cover
|
|
349
|
+
pass
|
|
350
|
+
span.end()
|
|
351
|
+
|
|
352
|
+
def on_error(
|
|
353
|
+
self, error: Error, invocation: LLMInvocation | EmbeddingInvocation
|
|
354
|
+
) -> None: # type: ignore[override]
|
|
355
|
+
if isinstance(invocation, Workflow):
|
|
356
|
+
self._error_workflow(error, invocation)
|
|
357
|
+
elif isinstance(invocation, (AgentCreation, AgentInvocation)):
|
|
358
|
+
self._error_agent(error, invocation)
|
|
359
|
+
elif isinstance(invocation, Step):
|
|
360
|
+
self._error_step(error, invocation)
|
|
361
|
+
elif isinstance(invocation, EmbeddingInvocation):
|
|
362
|
+
self._error_embedding(error, invocation)
|
|
363
|
+
else:
|
|
364
|
+
span = getattr(invocation, "span", None)
|
|
365
|
+
if span is None:
|
|
366
|
+
return
|
|
367
|
+
span.set_status(Status(StatusCode.ERROR, error.message))
|
|
368
|
+
if span.is_recording():
|
|
369
|
+
span.set_attribute(
|
|
370
|
+
ErrorAttributes.ERROR_TYPE, error.type.__qualname__
|
|
371
|
+
)
|
|
372
|
+
self._apply_finish_attrs(invocation)
|
|
373
|
+
token = getattr(invocation, "context_token", None)
|
|
374
|
+
if token is not None and hasattr(token, "__exit__"):
|
|
375
|
+
try: # pragma: no cover
|
|
376
|
+
token.__exit__(None, None, None) # type: ignore[misc]
|
|
377
|
+
except Exception: # pragma: no cover
|
|
378
|
+
pass
|
|
379
|
+
span.end()
|
|
380
|
+
|
|
381
|
+
# ---- Workflow lifecycle ----------------------------------------------
|
|
382
|
+
def _start_workflow(self, workflow: Workflow) -> None:
|
|
383
|
+
"""Start a workflow span."""
|
|
384
|
+
span_name = f"gen_ai.workflow {workflow.name}"
|
|
385
|
+
parent_span = getattr(workflow, "parent_span", None)
|
|
386
|
+
parent_ctx = (
|
|
387
|
+
trace.set_span_in_context(parent_span)
|
|
388
|
+
if parent_span is not None
|
|
389
|
+
else None
|
|
390
|
+
)
|
|
391
|
+
cm = self._tracer.start_as_current_span(
|
|
392
|
+
span_name,
|
|
393
|
+
kind=SpanKind.CLIENT,
|
|
394
|
+
end_on_exit=False,
|
|
395
|
+
context=parent_ctx,
|
|
396
|
+
)
|
|
397
|
+
span = cm.__enter__()
|
|
398
|
+
self._attach_span(workflow, span, cm)
|
|
399
|
+
|
|
400
|
+
# Set workflow attributes
|
|
401
|
+
# TODO: Align to enum when semconvs is updated.
|
|
402
|
+
span.set_attribute(GenAI.GEN_AI_OPERATION_NAME, "invoke_workflow")
|
|
403
|
+
span.set_attribute(GEN_AI_WORKFLOW_NAME, workflow.name)
|
|
404
|
+
if workflow.workflow_type:
|
|
405
|
+
span.set_attribute(GEN_AI_WORKFLOW_TYPE, workflow.workflow_type)
|
|
406
|
+
if workflow.description:
|
|
407
|
+
span.set_attribute(
|
|
408
|
+
GEN_AI_WORKFLOW_DESCRIPTION, workflow.description
|
|
409
|
+
)
|
|
410
|
+
if workflow.framework:
|
|
411
|
+
span.set_attribute("gen_ai.framework", workflow.framework)
|
|
412
|
+
if workflow.initial_input and self._capture_content:
|
|
413
|
+
# Format as a message with text content
|
|
414
|
+
import json
|
|
415
|
+
|
|
416
|
+
input_msg = {
|
|
417
|
+
"role": "user",
|
|
418
|
+
"parts": [{"type": "text", "content": workflow.initial_input}],
|
|
419
|
+
}
|
|
420
|
+
span.set_attribute(
|
|
421
|
+
"gen_ai.input.messages", json.dumps([input_msg])
|
|
422
|
+
)
|
|
423
|
+
_apply_gen_ai_semconv_attributes(
|
|
424
|
+
span, workflow.semantic_convention_attributes()
|
|
425
|
+
)
|
|
426
|
+
|
|
427
|
+
def _finish_workflow(self, workflow: Workflow) -> None:
|
|
428
|
+
"""Finish a workflow span."""
|
|
429
|
+
span = workflow.span
|
|
430
|
+
if span is None:
|
|
431
|
+
return
|
|
432
|
+
# Set final output if capture_content enabled
|
|
433
|
+
if workflow.final_output and self._capture_content:
|
|
434
|
+
import json
|
|
435
|
+
|
|
436
|
+
output_msg = {
|
|
437
|
+
"role": "assistant",
|
|
438
|
+
"parts": [{"type": "text", "content": workflow.final_output}],
|
|
439
|
+
"finish_reason": "stop",
|
|
440
|
+
}
|
|
441
|
+
span.set_attribute(
|
|
442
|
+
"gen_ai.output.messages", json.dumps([output_msg])
|
|
443
|
+
)
|
|
444
|
+
_apply_gen_ai_semconv_attributes(
|
|
445
|
+
span, workflow.semantic_convention_attributes()
|
|
446
|
+
)
|
|
447
|
+
token = workflow.context_token
|
|
448
|
+
if token is not None and hasattr(token, "__exit__"):
|
|
449
|
+
try:
|
|
450
|
+
token.__exit__(None, None, None) # type: ignore[misc]
|
|
451
|
+
except Exception:
|
|
452
|
+
pass
|
|
453
|
+
span.end()
|
|
454
|
+
|
|
455
|
+
def _error_workflow(self, error: Error, workflow: Workflow) -> None:
|
|
456
|
+
"""Fail a workflow span with error status."""
|
|
457
|
+
span = workflow.span
|
|
458
|
+
if span is None:
|
|
459
|
+
return
|
|
460
|
+
span.set_status(Status(StatusCode.ERROR, error.message))
|
|
461
|
+
if span.is_recording():
|
|
462
|
+
span.set_attribute(
|
|
463
|
+
ErrorAttributes.ERROR_TYPE, error.type.__qualname__
|
|
464
|
+
)
|
|
465
|
+
_apply_gen_ai_semconv_attributes(
|
|
466
|
+
span, workflow.semantic_convention_attributes()
|
|
467
|
+
)
|
|
468
|
+
token = workflow.context_token
|
|
469
|
+
if token is not None and hasattr(token, "__exit__"):
|
|
470
|
+
try:
|
|
471
|
+
token.__exit__(None, None, None) # type: ignore[misc]
|
|
472
|
+
except Exception:
|
|
473
|
+
pass
|
|
474
|
+
span.end()
|
|
475
|
+
|
|
476
|
+
# ---- Agent lifecycle -------------------------------------------------
|
|
477
|
+
def _start_agent(self, agent: AgentCreation | AgentInvocation) -> None:
|
|
478
|
+
"""Start an agent span (create or invoke)."""
|
|
479
|
+
# Span name per semantic conventions
|
|
480
|
+
if agent.operation == "create_agent":
|
|
481
|
+
span_name = f"create_agent {agent.name}"
|
|
482
|
+
else:
|
|
483
|
+
span_name = f"invoke_agent {agent.name}"
|
|
484
|
+
|
|
485
|
+
parent_span = getattr(agent, "parent_span", None)
|
|
486
|
+
parent_ctx = (
|
|
487
|
+
trace.set_span_in_context(parent_span)
|
|
488
|
+
if parent_span is not None
|
|
489
|
+
else None
|
|
490
|
+
)
|
|
491
|
+
cm = self._tracer.start_as_current_span(
|
|
492
|
+
span_name,
|
|
493
|
+
kind=SpanKind.CLIENT,
|
|
494
|
+
end_on_exit=False,
|
|
495
|
+
context=parent_ctx,
|
|
496
|
+
)
|
|
497
|
+
span = cm.__enter__()
|
|
498
|
+
self._attach_span(agent, span, cm)
|
|
499
|
+
|
|
500
|
+
# Required attributes per semantic conventions
|
|
501
|
+
# Set operation name based on agent operation (create or invoke)
|
|
502
|
+
semconv_attrs = dict(agent.semantic_convention_attributes())
|
|
503
|
+
semconv_attrs.setdefault(GEN_AI_AGENT_NAME, agent.name)
|
|
504
|
+
semconv_attrs.setdefault(GEN_AI_AGENT_ID, str(agent.run_id))
|
|
505
|
+
_apply_gen_ai_semconv_attributes(span, semconv_attrs)
|
|
506
|
+
|
|
507
|
+
# Optional attributes
|
|
508
|
+
if agent.agent_type:
|
|
509
|
+
span.set_attribute(GEN_AI_AGENT_TYPE, agent.agent_type)
|
|
510
|
+
if agent.framework:
|
|
511
|
+
span.set_attribute("gen_ai.framework", agent.framework)
|
|
512
|
+
if agent.tools:
|
|
513
|
+
span.set_attribute(GEN_AI_AGENT_TOOLS, agent.tools)
|
|
514
|
+
if agent.system_instructions and self._capture_content:
|
|
515
|
+
import json
|
|
516
|
+
|
|
517
|
+
system_parts = [
|
|
518
|
+
{"type": "text", "content": agent.system_instructions}
|
|
519
|
+
]
|
|
520
|
+
span.set_attribute(
|
|
521
|
+
GenAI.GEN_AI_SYSTEM_INSTRUCTIONS, json.dumps(system_parts)
|
|
522
|
+
)
|
|
523
|
+
if (
|
|
524
|
+
isinstance(agent, AgentInvocation)
|
|
525
|
+
and agent.input_context
|
|
526
|
+
and self._capture_content
|
|
527
|
+
):
|
|
528
|
+
import json
|
|
529
|
+
|
|
530
|
+
input_msg = {
|
|
531
|
+
"role": "user",
|
|
532
|
+
"parts": [{"type": "text", "content": agent.input_context}],
|
|
533
|
+
}
|
|
534
|
+
span.set_attribute(
|
|
535
|
+
"gen_ai.input.messages", json.dumps([input_msg])
|
|
536
|
+
)
|
|
537
|
+
_apply_gen_ai_semconv_attributes(
|
|
538
|
+
span, agent.semantic_convention_attributes()
|
|
539
|
+
)
|
|
540
|
+
|
|
541
|
+
def _finish_agent(self, agent: AgentCreation | AgentInvocation) -> None:
|
|
542
|
+
"""Finish an agent span."""
|
|
543
|
+
span = agent.span
|
|
544
|
+
if span is None:
|
|
545
|
+
return
|
|
546
|
+
# Set output result if capture_content enabled
|
|
547
|
+
if (
|
|
548
|
+
isinstance(agent, AgentInvocation)
|
|
549
|
+
and agent.output_result
|
|
550
|
+
and self._capture_content
|
|
551
|
+
):
|
|
552
|
+
import json
|
|
553
|
+
|
|
554
|
+
output_msg = {
|
|
555
|
+
"role": "assistant",
|
|
556
|
+
"parts": [{"type": "text", "content": agent.output_result}],
|
|
557
|
+
"finish_reason": "stop",
|
|
558
|
+
}
|
|
559
|
+
span.set_attribute(
|
|
560
|
+
"gen_ai.output.messages", json.dumps([output_msg])
|
|
561
|
+
)
|
|
562
|
+
_apply_gen_ai_semconv_attributes(
|
|
563
|
+
span, agent.semantic_convention_attributes()
|
|
564
|
+
)
|
|
565
|
+
token = agent.context_token
|
|
566
|
+
if token is not None and hasattr(token, "__exit__"):
|
|
567
|
+
try:
|
|
568
|
+
token.__exit__(None, None, None) # type: ignore[misc]
|
|
569
|
+
except Exception:
|
|
570
|
+
pass
|
|
571
|
+
span.end()
|
|
572
|
+
|
|
573
|
+
def _error_agent(
|
|
574
|
+
self, error: Error, agent: AgentCreation | AgentInvocation
|
|
575
|
+
) -> None:
|
|
576
|
+
"""Fail an agent span with error status."""
|
|
577
|
+
span = agent.span
|
|
578
|
+
if span is None:
|
|
579
|
+
return
|
|
580
|
+
span.set_status(Status(StatusCode.ERROR, error.message))
|
|
581
|
+
if span.is_recording():
|
|
582
|
+
span.set_attribute(
|
|
583
|
+
ErrorAttributes.ERROR_TYPE, error.type.__qualname__
|
|
584
|
+
)
|
|
585
|
+
_apply_gen_ai_semconv_attributes(
|
|
586
|
+
span, agent.semantic_convention_attributes()
|
|
587
|
+
)
|
|
588
|
+
token = agent.context_token
|
|
589
|
+
if token is not None and hasattr(token, "__exit__"):
|
|
590
|
+
try:
|
|
591
|
+
token.__exit__(None, None, None) # type: ignore[misc]
|
|
592
|
+
except Exception:
|
|
593
|
+
pass
|
|
594
|
+
span.end()
|
|
595
|
+
|
|
596
|
+
# ---- Step lifecycle --------------------------------------------------
|
|
597
|
+
def _start_step(self, step: Step) -> None:
|
|
598
|
+
"""Start a step span."""
|
|
599
|
+
span_name = f"gen_ai.step {step.name}"
|
|
600
|
+
parent_span = getattr(step, "parent_span", None)
|
|
601
|
+
parent_ctx = (
|
|
602
|
+
trace.set_span_in_context(parent_span)
|
|
603
|
+
if parent_span is not None
|
|
604
|
+
else None
|
|
605
|
+
)
|
|
606
|
+
cm = self._tracer.start_as_current_span(
|
|
607
|
+
span_name,
|
|
608
|
+
kind=SpanKind.CLIENT,
|
|
609
|
+
end_on_exit=False,
|
|
610
|
+
context=parent_ctx,
|
|
611
|
+
)
|
|
612
|
+
span = cm.__enter__()
|
|
613
|
+
self._attach_span(step, span, cm)
|
|
614
|
+
|
|
615
|
+
# Set step attributes
|
|
616
|
+
span.set_attribute(GEN_AI_STEP_NAME, step.name)
|
|
617
|
+
if step.step_type:
|
|
618
|
+
span.set_attribute(GEN_AI_STEP_TYPE, step.step_type)
|
|
619
|
+
if step.objective:
|
|
620
|
+
span.set_attribute(GEN_AI_STEP_OBJECTIVE, step.objective)
|
|
621
|
+
if step.source:
|
|
622
|
+
span.set_attribute(GEN_AI_STEP_SOURCE, step.source)
|
|
623
|
+
if step.assigned_agent:
|
|
624
|
+
span.set_attribute(GEN_AI_STEP_ASSIGNED_AGENT, step.assigned_agent)
|
|
625
|
+
if step.status:
|
|
626
|
+
span.set_attribute(GEN_AI_STEP_STATUS, step.status)
|
|
627
|
+
if step.input_data and self._capture_content:
|
|
628
|
+
import json
|
|
629
|
+
|
|
630
|
+
input_msg = {
|
|
631
|
+
"role": "user",
|
|
632
|
+
"parts": [{"type": "text", "content": step.input_data}],
|
|
633
|
+
}
|
|
634
|
+
span.set_attribute(
|
|
635
|
+
"gen_ai.input.messages", json.dumps([input_msg])
|
|
636
|
+
)
|
|
637
|
+
_apply_gen_ai_semconv_attributes(
|
|
638
|
+
span, step.semantic_convention_attributes()
|
|
639
|
+
)
|
|
640
|
+
|
|
641
|
+
def _finish_step(self, step: Step) -> None:
|
|
642
|
+
"""Finish a step span."""
|
|
643
|
+
span = step.span
|
|
644
|
+
if span is None:
|
|
645
|
+
return
|
|
646
|
+
# Set output data if capture_content enabled
|
|
647
|
+
if step.output_data and self._capture_content:
|
|
648
|
+
import json
|
|
649
|
+
|
|
650
|
+
output_msg = {
|
|
651
|
+
"role": "assistant",
|
|
652
|
+
"parts": [{"type": "text", "content": step.output_data}],
|
|
653
|
+
"finish_reason": "stop",
|
|
654
|
+
}
|
|
655
|
+
span.set_attribute(
|
|
656
|
+
"gen_ai.output.messages", json.dumps([output_msg])
|
|
657
|
+
)
|
|
658
|
+
# Update status if changed
|
|
659
|
+
if step.status:
|
|
660
|
+
span.set_attribute(GEN_AI_STEP_STATUS, step.status)
|
|
661
|
+
_apply_gen_ai_semconv_attributes(
|
|
662
|
+
span, step.semantic_convention_attributes()
|
|
663
|
+
)
|
|
664
|
+
token = step.context_token
|
|
665
|
+
if token is not None and hasattr(token, "__exit__"):
|
|
666
|
+
try:
|
|
667
|
+
token.__exit__(None, None, None) # type: ignore[misc]
|
|
668
|
+
except Exception:
|
|
669
|
+
pass
|
|
670
|
+
span.end()
|
|
671
|
+
|
|
672
|
+
def _error_step(self, error: Error, step: Step) -> None:
|
|
673
|
+
"""Fail a step span with error status."""
|
|
674
|
+
span = step.span
|
|
675
|
+
if span is None:
|
|
676
|
+
return
|
|
677
|
+
span.set_status(Status(StatusCode.ERROR, error.message))
|
|
678
|
+
if span.is_recording():
|
|
679
|
+
span.set_attribute(
|
|
680
|
+
ErrorAttributes.ERROR_TYPE, error.type.__qualname__
|
|
681
|
+
)
|
|
682
|
+
# Update status to failed
|
|
683
|
+
span.set_attribute(GEN_AI_STEP_STATUS, "failed")
|
|
684
|
+
_apply_gen_ai_semconv_attributes(
|
|
685
|
+
span, step.semantic_convention_attributes()
|
|
686
|
+
)
|
|
687
|
+
token = step.context_token
|
|
688
|
+
if token is not None and hasattr(token, "__exit__"):
|
|
689
|
+
try:
|
|
690
|
+
token.__exit__(None, None, None) # type: ignore[misc]
|
|
691
|
+
except Exception:
|
|
692
|
+
pass
|
|
693
|
+
span.end()
|
|
694
|
+
|
|
695
|
+
# ---- Embedding lifecycle ---------------------------------------------
|
|
696
|
+
def _start_embedding(self, embedding: EmbeddingInvocation) -> None:
|
|
697
|
+
"""Start an embedding span."""
|
|
698
|
+
span_name = f"{embedding.operation_name} {embedding.request_model}"
|
|
699
|
+
parent_span = getattr(embedding, "parent_span", None)
|
|
700
|
+
parent_ctx = (
|
|
701
|
+
trace.set_span_in_context(parent_span)
|
|
702
|
+
if parent_span is not None
|
|
703
|
+
else None
|
|
704
|
+
)
|
|
705
|
+
cm = self._tracer.start_as_current_span(
|
|
706
|
+
span_name,
|
|
707
|
+
kind=SpanKind.CLIENT,
|
|
708
|
+
end_on_exit=False,
|
|
709
|
+
context=parent_ctx,
|
|
710
|
+
)
|
|
711
|
+
span = cm.__enter__()
|
|
712
|
+
self._attach_span(embedding, span, cm)
|
|
713
|
+
self._apply_start_attrs(embedding)
|
|
714
|
+
|
|
715
|
+
# Set embedding-specific start attributes
|
|
716
|
+
if embedding.server_address:
|
|
717
|
+
span.set_attribute(SERVER_ADDRESS, embedding.server_address)
|
|
718
|
+
if embedding.server_port:
|
|
719
|
+
span.set_attribute(SERVER_PORT, embedding.server_port)
|
|
720
|
+
if embedding.encoding_formats:
|
|
721
|
+
span.set_attribute(
|
|
722
|
+
GEN_AI_REQUEST_ENCODING_FORMATS, embedding.encoding_formats
|
|
723
|
+
)
|
|
724
|
+
if self._capture_content and embedding.input_texts:
|
|
725
|
+
# Capture input texts as array attribute
|
|
726
|
+
span.set_attribute(
|
|
727
|
+
GEN_AI_EMBEDDINGS_INPUT_TEXTS, embedding.input_texts
|
|
728
|
+
)
|
|
729
|
+
|
|
730
|
+
def _finish_embedding(self, embedding: EmbeddingInvocation) -> None:
|
|
731
|
+
"""Finish an embedding span."""
|
|
732
|
+
span = embedding.span
|
|
733
|
+
if span is None:
|
|
734
|
+
return
|
|
735
|
+
# Apply finish-time semantic conventions
|
|
736
|
+
if embedding.dimension_count:
|
|
737
|
+
span.set_attribute(
|
|
738
|
+
GEN_AI_EMBEDDINGS_DIMENSION_COUNT, embedding.dimension_count
|
|
739
|
+
)
|
|
740
|
+
if embedding.input_tokens is not None:
|
|
741
|
+
span.set_attribute(
|
|
742
|
+
GenAI.GEN_AI_USAGE_INPUT_TOKENS, embedding.input_tokens
|
|
743
|
+
)
|
|
744
|
+
token = embedding.context_token
|
|
745
|
+
if token is not None and hasattr(token, "__exit__"):
|
|
746
|
+
try:
|
|
747
|
+
token.__exit__(None, None, None) # type: ignore[misc]
|
|
748
|
+
except Exception:
|
|
749
|
+
pass
|
|
750
|
+
span.end()
|
|
751
|
+
|
|
752
|
+
def _error_embedding(
|
|
753
|
+
self, error: Error, embedding: EmbeddingInvocation
|
|
754
|
+
) -> None:
|
|
755
|
+
"""Fail an embedding span with error status."""
|
|
756
|
+
span = embedding.span
|
|
757
|
+
if span is None:
|
|
758
|
+
return
|
|
759
|
+
span.set_status(Status(StatusCode.ERROR, error.message))
|
|
760
|
+
if span.is_recording():
|
|
761
|
+
span.set_attribute(
|
|
762
|
+
ErrorAttributes.ERROR_TYPE, error.type.__qualname__
|
|
763
|
+
)
|
|
764
|
+
# Set error type from invocation if available
|
|
765
|
+
if embedding.error_type:
|
|
766
|
+
span.set_attribute(
|
|
767
|
+
ErrorAttributes.ERROR_TYPE, embedding.error_type
|
|
768
|
+
)
|
|
769
|
+
token = embedding.context_token
|
|
770
|
+
if token is not None and hasattr(token, "__exit__"):
|
|
771
|
+
try:
|
|
772
|
+
token.__exit__(None, None, None) # type: ignore[misc]
|
|
773
|
+
except Exception:
|
|
774
|
+
pass
|