genai-otel-instrument 0.1.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- genai_otel/__init__.py +132 -0
- genai_otel/__version__.py +34 -0
- genai_otel/auto_instrument.py +602 -0
- genai_otel/cli.py +92 -0
- genai_otel/config.py +333 -0
- genai_otel/cost_calculator.py +467 -0
- genai_otel/cost_enriching_exporter.py +207 -0
- genai_otel/cost_enrichment_processor.py +174 -0
- genai_otel/evaluation/__init__.py +76 -0
- genai_otel/evaluation/bias_detector.py +364 -0
- genai_otel/evaluation/config.py +261 -0
- genai_otel/evaluation/hallucination_detector.py +525 -0
- genai_otel/evaluation/pii_detector.py +356 -0
- genai_otel/evaluation/prompt_injection_detector.py +262 -0
- genai_otel/evaluation/restricted_topics_detector.py +316 -0
- genai_otel/evaluation/span_processor.py +962 -0
- genai_otel/evaluation/toxicity_detector.py +406 -0
- genai_otel/exceptions.py +17 -0
- genai_otel/gpu_metrics.py +516 -0
- genai_otel/instrumentors/__init__.py +71 -0
- genai_otel/instrumentors/anthropic_instrumentor.py +134 -0
- genai_otel/instrumentors/anyscale_instrumentor.py +27 -0
- genai_otel/instrumentors/autogen_instrumentor.py +394 -0
- genai_otel/instrumentors/aws_bedrock_instrumentor.py +94 -0
- genai_otel/instrumentors/azure_openai_instrumentor.py +69 -0
- genai_otel/instrumentors/base.py +919 -0
- genai_otel/instrumentors/bedrock_agents_instrumentor.py +398 -0
- genai_otel/instrumentors/cohere_instrumentor.py +140 -0
- genai_otel/instrumentors/crewai_instrumentor.py +311 -0
- genai_otel/instrumentors/dspy_instrumentor.py +661 -0
- genai_otel/instrumentors/google_ai_instrumentor.py +310 -0
- genai_otel/instrumentors/groq_instrumentor.py +106 -0
- genai_otel/instrumentors/guardrails_ai_instrumentor.py +510 -0
- genai_otel/instrumentors/haystack_instrumentor.py +503 -0
- genai_otel/instrumentors/huggingface_instrumentor.py +399 -0
- genai_otel/instrumentors/hyperbolic_instrumentor.py +236 -0
- genai_otel/instrumentors/instructor_instrumentor.py +425 -0
- genai_otel/instrumentors/langchain_instrumentor.py +340 -0
- genai_otel/instrumentors/langgraph_instrumentor.py +328 -0
- genai_otel/instrumentors/llamaindex_instrumentor.py +36 -0
- genai_otel/instrumentors/mistralai_instrumentor.py +315 -0
- genai_otel/instrumentors/ollama_instrumentor.py +197 -0
- genai_otel/instrumentors/ollama_server_metrics_poller.py +336 -0
- genai_otel/instrumentors/openai_agents_instrumentor.py +291 -0
- genai_otel/instrumentors/openai_instrumentor.py +260 -0
- genai_otel/instrumentors/pydantic_ai_instrumentor.py +362 -0
- genai_otel/instrumentors/replicate_instrumentor.py +87 -0
- genai_otel/instrumentors/sambanova_instrumentor.py +196 -0
- genai_otel/instrumentors/togetherai_instrumentor.py +146 -0
- genai_otel/instrumentors/vertexai_instrumentor.py +106 -0
- genai_otel/llm_pricing.json +1676 -0
- genai_otel/logging_config.py +45 -0
- genai_otel/mcp_instrumentors/__init__.py +14 -0
- genai_otel/mcp_instrumentors/api_instrumentor.py +144 -0
- genai_otel/mcp_instrumentors/base.py +105 -0
- genai_otel/mcp_instrumentors/database_instrumentor.py +336 -0
- genai_otel/mcp_instrumentors/kafka_instrumentor.py +31 -0
- genai_otel/mcp_instrumentors/manager.py +139 -0
- genai_otel/mcp_instrumentors/redis_instrumentor.py +31 -0
- genai_otel/mcp_instrumentors/vector_db_instrumentor.py +265 -0
- genai_otel/metrics.py +148 -0
- genai_otel/py.typed +2 -0
- genai_otel/server_metrics.py +197 -0
- genai_otel_instrument-0.1.24.dist-info/METADATA +1404 -0
- genai_otel_instrument-0.1.24.dist-info/RECORD +69 -0
- genai_otel_instrument-0.1.24.dist-info/WHEEL +5 -0
- genai_otel_instrument-0.1.24.dist-info/entry_points.txt +2 -0
- genai_otel_instrument-0.1.24.dist-info/licenses/LICENSE +680 -0
- genai_otel_instrument-0.1.24.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,510 @@
|
|
|
1
|
+
"""OpenTelemetry instrumentor for Guardrails AI framework.
|
|
2
|
+
|
|
3
|
+
This instrumentor automatically traces validation guards that detect, quantify,
|
|
4
|
+
and mitigate risks in LLM outputs using Guardrails AI's validation framework.
|
|
5
|
+
|
|
6
|
+
Guardrails AI is a popular validation library for LLMs with input/output guards,
|
|
7
|
+
validators, and on-fail policies (reask, fix, filter, refrain).
|
|
8
|
+
|
|
9
|
+
Requirements:
|
|
10
|
+
pip install guardrails-ai
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import logging
|
|
14
|
+
from typing import Any, Dict, Optional
|
|
15
|
+
|
|
16
|
+
from ..config import OTelConfig
|
|
17
|
+
from .base import BaseInstrumentor
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class GuardrailsAIInstrumentor(BaseInstrumentor):
|
|
23
|
+
"""Instrumentor for Guardrails AI framework"""
|
|
24
|
+
|
|
25
|
+
def __init__(self):
|
|
26
|
+
"""Initialize the instrumentor."""
|
|
27
|
+
super().__init__()
|
|
28
|
+
self._guardrails_available = False
|
|
29
|
+
self._check_availability()
|
|
30
|
+
|
|
31
|
+
def _check_availability(self):
|
|
32
|
+
"""Check if Guardrails AI is available."""
|
|
33
|
+
try:
|
|
34
|
+
import guardrails
|
|
35
|
+
|
|
36
|
+
self._guardrails_available = True
|
|
37
|
+
logger.debug("Guardrails AI framework detected and available for instrumentation")
|
|
38
|
+
except ImportError:
|
|
39
|
+
logger.debug("Guardrails AI not installed, instrumentation will be skipped")
|
|
40
|
+
self._guardrails_available = False
|
|
41
|
+
|
|
42
|
+
def instrument(self, config: OTelConfig):
|
|
43
|
+
"""Instrument Guardrails AI if available.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
config (OTelConfig): The OpenTelemetry configuration object.
|
|
47
|
+
"""
|
|
48
|
+
if not self._guardrails_available:
|
|
49
|
+
logger.debug("Skipping Guardrails AI instrumentation - library not available")
|
|
50
|
+
return
|
|
51
|
+
|
|
52
|
+
self.config = config
|
|
53
|
+
|
|
54
|
+
try:
|
|
55
|
+
import wrapt
|
|
56
|
+
|
|
57
|
+
# Wrap Guard.__call__ for full LLM execution with guards
|
|
58
|
+
wrapt.wrap_function_wrapper(
|
|
59
|
+
"guardrails.guard",
|
|
60
|
+
"Guard.__call__",
|
|
61
|
+
self._wrap_guard_call,
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
# Wrap Guard.validate for validation-only operations
|
|
65
|
+
wrapt.wrap_function_wrapper(
|
|
66
|
+
"guardrails.guard",
|
|
67
|
+
"Guard.validate",
|
|
68
|
+
self._wrap_guard_validate,
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
# Wrap Guard.parse for parsing LLM outputs
|
|
72
|
+
wrapt.wrap_function_wrapper(
|
|
73
|
+
"guardrails.guard",
|
|
74
|
+
"Guard.parse",
|
|
75
|
+
self._wrap_guard_parse,
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
# Wrap Guard.use for adding validators
|
|
79
|
+
try:
|
|
80
|
+
wrapt.wrap_function_wrapper(
|
|
81
|
+
"guardrails.guard",
|
|
82
|
+
"Guard.use",
|
|
83
|
+
self._wrap_guard_use,
|
|
84
|
+
)
|
|
85
|
+
except (ImportError, AttributeError):
|
|
86
|
+
logger.debug("Guard.use not available for instrumentation")
|
|
87
|
+
|
|
88
|
+
self._instrumented = True
|
|
89
|
+
logger.info("Guardrails AI instrumentation enabled")
|
|
90
|
+
|
|
91
|
+
except Exception as e:
|
|
92
|
+
logger.error("Failed to instrument Guardrails AI: %s", e, exc_info=True)
|
|
93
|
+
if config.fail_on_error:
|
|
94
|
+
raise
|
|
95
|
+
|
|
96
|
+
def _wrap_guard_call(self, wrapped, instance, args, kwargs):
|
|
97
|
+
"""Wrap Guard.__call__ to trace full LLM execution with guards.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
wrapped: The original method
|
|
101
|
+
instance: The Guard instance
|
|
102
|
+
args: Positional arguments
|
|
103
|
+
kwargs: Keyword arguments
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
The result of the wrapped method
|
|
107
|
+
"""
|
|
108
|
+
return self.create_span_wrapper(
|
|
109
|
+
span_name="guardrails.guard.call",
|
|
110
|
+
extract_attributes=lambda inst, args, kwargs: self._extract_guard_call_attributes(
|
|
111
|
+
instance, kwargs
|
|
112
|
+
),
|
|
113
|
+
extract_response_attributes=self._extract_guard_call_response_attributes,
|
|
114
|
+
)(wrapped)(*args, **kwargs)
|
|
115
|
+
|
|
116
|
+
def _wrap_guard_validate(self, wrapped, instance, args, kwargs):
|
|
117
|
+
"""Wrap Guard.validate to trace validation operations.
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
wrapped: The original method
|
|
121
|
+
instance: The Guard instance
|
|
122
|
+
args: Positional arguments
|
|
123
|
+
kwargs: Keyword arguments
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
The result of the wrapped method
|
|
127
|
+
"""
|
|
128
|
+
return self.create_span_wrapper(
|
|
129
|
+
span_name="guardrails.guard.validate",
|
|
130
|
+
extract_attributes=lambda inst, args, kwargs: self._extract_guard_validate_attributes(
|
|
131
|
+
instance, args, kwargs
|
|
132
|
+
),
|
|
133
|
+
extract_response_attributes=self._extract_guard_validate_response_attributes,
|
|
134
|
+
)(wrapped)(*args, **kwargs)
|
|
135
|
+
|
|
136
|
+
def _wrap_guard_parse(self, wrapped, instance, args, kwargs):
|
|
137
|
+
"""Wrap Guard.parse to trace parsing operations.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
wrapped: The original method
|
|
141
|
+
instance: The Guard instance
|
|
142
|
+
args: Positional arguments
|
|
143
|
+
kwargs: Keyword arguments
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
The result of the wrapped method
|
|
147
|
+
"""
|
|
148
|
+
return self.create_span_wrapper(
|
|
149
|
+
span_name="guardrails.guard.parse",
|
|
150
|
+
extract_attributes=lambda inst, args, kwargs: self._extract_guard_parse_attributes(
|
|
151
|
+
instance, args, kwargs
|
|
152
|
+
),
|
|
153
|
+
extract_response_attributes=self._extract_guard_parse_response_attributes,
|
|
154
|
+
)(wrapped)(*args, **kwargs)
|
|
155
|
+
|
|
156
|
+
def _wrap_guard_use(self, wrapped, instance, args, kwargs):
|
|
157
|
+
"""Wrap Guard.use to trace validator additions.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
wrapped: The original method
|
|
161
|
+
instance: The Guard instance
|
|
162
|
+
args: Positional arguments
|
|
163
|
+
kwargs: Keyword arguments
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
The result of the wrapped method
|
|
167
|
+
"""
|
|
168
|
+
return self.create_span_wrapper(
|
|
169
|
+
span_name="guardrails.guard.use",
|
|
170
|
+
extract_attributes=lambda inst, args, kwargs: self._extract_guard_use_attributes(
|
|
171
|
+
args, kwargs
|
|
172
|
+
),
|
|
173
|
+
)(wrapped)(*args, **kwargs)
|
|
174
|
+
|
|
175
|
+
def _extract_guard_call_attributes(
|
|
176
|
+
self, instance: Any, kwargs: Dict[str, Any]
|
|
177
|
+
) -> Dict[str, Any]:
|
|
178
|
+
"""Extract attributes from Guard.__call__.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
instance: The Guard instance
|
|
182
|
+
kwargs: Keyword arguments
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
Dict[str, Any]: Dictionary of attributes to set on the span.
|
|
186
|
+
"""
|
|
187
|
+
attrs = {}
|
|
188
|
+
|
|
189
|
+
# Core attributes
|
|
190
|
+
attrs["gen_ai.system"] = "guardrails"
|
|
191
|
+
attrs["gen_ai.operation.name"] = "guard.call"
|
|
192
|
+
|
|
193
|
+
try:
|
|
194
|
+
# Extract validators from guard
|
|
195
|
+
if hasattr(instance, "_validators") and instance._validators:
|
|
196
|
+
validator_names = []
|
|
197
|
+
on_fail_actions = []
|
|
198
|
+
|
|
199
|
+
for validator in instance._validators[:10]: # Limit to first 10
|
|
200
|
+
if hasattr(validator, "__class__"):
|
|
201
|
+
validator_names.append(validator.__class__.__name__)
|
|
202
|
+
|
|
203
|
+
if hasattr(validator, "on_fail_descriptor"):
|
|
204
|
+
on_fail = str(validator.on_fail_descriptor)
|
|
205
|
+
if on_fail not in on_fail_actions:
|
|
206
|
+
on_fail_actions.append(on_fail)
|
|
207
|
+
|
|
208
|
+
if validator_names:
|
|
209
|
+
attrs["guardrails.validators"] = validator_names
|
|
210
|
+
attrs["guardrails.validators_count"] = len(instance._validators)
|
|
211
|
+
|
|
212
|
+
if on_fail_actions:
|
|
213
|
+
attrs["guardrails.on_fail_actions"] = on_fail_actions
|
|
214
|
+
|
|
215
|
+
# Extract num_reasks
|
|
216
|
+
if "num_reasks" in kwargs:
|
|
217
|
+
attrs["guardrails.num_reasks"] = kwargs["num_reasks"]
|
|
218
|
+
|
|
219
|
+
# Extract LLM API if provided
|
|
220
|
+
if "llm_api" in kwargs and kwargs["llm_api"]:
|
|
221
|
+
llm_api = kwargs["llm_api"]
|
|
222
|
+
if hasattr(llm_api, "__name__"):
|
|
223
|
+
attrs["guardrails.llm_api"] = llm_api.__name__
|
|
224
|
+
|
|
225
|
+
# Extract metadata
|
|
226
|
+
if "metadata" in kwargs and kwargs["metadata"]:
|
|
227
|
+
attrs["guardrails.has_metadata"] = True
|
|
228
|
+
|
|
229
|
+
# Extract prompt params
|
|
230
|
+
if "prompt_params" in kwargs and kwargs["prompt_params"]:
|
|
231
|
+
attrs["guardrails.has_prompt_params"] = True
|
|
232
|
+
|
|
233
|
+
# Extract full_schema_reask flag
|
|
234
|
+
if "full_schema_reask" in kwargs:
|
|
235
|
+
attrs["guardrails.full_schema_reask"] = bool(kwargs["full_schema_reask"])
|
|
236
|
+
|
|
237
|
+
except Exception as e:
|
|
238
|
+
logger.debug("Failed to extract guard.__call__ attributes: %s", e)
|
|
239
|
+
|
|
240
|
+
return attrs
|
|
241
|
+
|
|
242
|
+
def _extract_guard_validate_attributes(
|
|
243
|
+
self, instance: Any, args: Any, kwargs: Dict[str, Any]
|
|
244
|
+
) -> Dict[str, Any]:
|
|
245
|
+
"""Extract attributes from Guard.validate.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
instance: The Guard instance
|
|
249
|
+
args: Positional arguments
|
|
250
|
+
kwargs: Keyword arguments
|
|
251
|
+
|
|
252
|
+
Returns:
|
|
253
|
+
Dict[str, Any]: Dictionary of attributes to set on the span.
|
|
254
|
+
"""
|
|
255
|
+
attrs = {}
|
|
256
|
+
|
|
257
|
+
# Core attributes
|
|
258
|
+
attrs["gen_ai.system"] = "guardrails"
|
|
259
|
+
attrs["gen_ai.operation.name"] = "guard.validate"
|
|
260
|
+
|
|
261
|
+
try:
|
|
262
|
+
# Extract validators
|
|
263
|
+
if hasattr(instance, "_validators") and instance._validators:
|
|
264
|
+
validator_names = [v.__class__.__name__ for v in instance._validators[:10]]
|
|
265
|
+
attrs["guardrails.validators"] = validator_names
|
|
266
|
+
attrs["guardrails.validators_count"] = len(instance._validators)
|
|
267
|
+
|
|
268
|
+
# Extract LLM output to validate (first positional arg)
|
|
269
|
+
if args and len(args) > 0:
|
|
270
|
+
llm_output = args[0]
|
|
271
|
+
if isinstance(llm_output, str):
|
|
272
|
+
attrs["guardrails.llm_output_length"] = len(llm_output)
|
|
273
|
+
# Truncate output for tracing
|
|
274
|
+
attrs["guardrails.llm_output_preview"] = llm_output[:200]
|
|
275
|
+
|
|
276
|
+
except Exception as e:
|
|
277
|
+
logger.debug("Failed to extract guard.validate attributes: %s", e)
|
|
278
|
+
|
|
279
|
+
return attrs
|
|
280
|
+
|
|
281
|
+
def _extract_guard_parse_attributes(
|
|
282
|
+
self, instance: Any, args: Any, kwargs: Dict[str, Any]
|
|
283
|
+
) -> Dict[str, Any]:
|
|
284
|
+
"""Extract attributes from Guard.parse.
|
|
285
|
+
|
|
286
|
+
Args:
|
|
287
|
+
instance: The Guard instance
|
|
288
|
+
args: Positional arguments
|
|
289
|
+
kwargs: Keyword arguments
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
Dict[str, Any]: Dictionary of attributes to set on the span.
|
|
293
|
+
"""
|
|
294
|
+
attrs = {}
|
|
295
|
+
|
|
296
|
+
# Core attributes
|
|
297
|
+
attrs["gen_ai.system"] = "guardrails"
|
|
298
|
+
attrs["gen_ai.operation.name"] = "guard.parse"
|
|
299
|
+
|
|
300
|
+
try:
|
|
301
|
+
# Extract validators
|
|
302
|
+
if hasattr(instance, "_validators") and instance._validators:
|
|
303
|
+
validator_names = [v.__class__.__name__ for v in instance._validators[:10]]
|
|
304
|
+
attrs["guardrails.validators"] = validator_names
|
|
305
|
+
attrs["guardrails.validators_count"] = len(instance._validators)
|
|
306
|
+
|
|
307
|
+
# Extract LLM output to parse
|
|
308
|
+
if args and len(args) > 0:
|
|
309
|
+
llm_output = args[0]
|
|
310
|
+
if isinstance(llm_output, str):
|
|
311
|
+
attrs["guardrails.llm_output_length"] = len(llm_output)
|
|
312
|
+
|
|
313
|
+
# Extract num_reasks
|
|
314
|
+
if "num_reasks" in kwargs:
|
|
315
|
+
attrs["guardrails.num_reasks"] = kwargs["num_reasks"]
|
|
316
|
+
|
|
317
|
+
# Extract metadata
|
|
318
|
+
if "metadata" in kwargs and kwargs["metadata"]:
|
|
319
|
+
attrs["guardrails.has_metadata"] = True
|
|
320
|
+
|
|
321
|
+
except Exception as e:
|
|
322
|
+
logger.debug("Failed to extract guard.parse attributes: %s", e)
|
|
323
|
+
|
|
324
|
+
return attrs
|
|
325
|
+
|
|
326
|
+
def _extract_guard_use_attributes(self, args: Any, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
|
327
|
+
"""Extract attributes from Guard.use.
|
|
328
|
+
|
|
329
|
+
Args:
|
|
330
|
+
args: Positional arguments
|
|
331
|
+
kwargs: Keyword arguments
|
|
332
|
+
|
|
333
|
+
Returns:
|
|
334
|
+
Dict[str, Any]: Dictionary of attributes to set on the span.
|
|
335
|
+
"""
|
|
336
|
+
attrs = {}
|
|
337
|
+
|
|
338
|
+
# Core attributes
|
|
339
|
+
attrs["gen_ai.system"] = "guardrails"
|
|
340
|
+
attrs["gen_ai.operation.name"] = "guard.use"
|
|
341
|
+
|
|
342
|
+
try:
|
|
343
|
+
# Extract validator being added
|
|
344
|
+
if args and len(args) > 0:
|
|
345
|
+
validator = args[0]
|
|
346
|
+
if hasattr(validator, "__name__"):
|
|
347
|
+
attrs["guardrails.validator.name"] = validator.__name__
|
|
348
|
+
elif hasattr(validator, "__class__"):
|
|
349
|
+
attrs["guardrails.validator.name"] = validator.__class__.__name__
|
|
350
|
+
|
|
351
|
+
# Extract on_fail parameter
|
|
352
|
+
if "on_fail" in kwargs:
|
|
353
|
+
attrs["guardrails.validator.on_fail"] = str(kwargs["on_fail"])
|
|
354
|
+
|
|
355
|
+
except Exception as e:
|
|
356
|
+
logger.debug("Failed to extract guard.use attributes: %s", e)
|
|
357
|
+
|
|
358
|
+
return attrs
|
|
359
|
+
|
|
360
|
+
def _extract_guard_call_response_attributes(self, result: Any) -> Dict[str, Any]:
|
|
361
|
+
"""Extract response attributes from Guard.__call__.
|
|
362
|
+
|
|
363
|
+
Args:
|
|
364
|
+
result: The ValidationOutcome result
|
|
365
|
+
|
|
366
|
+
Returns:
|
|
367
|
+
Dict[str, Any]: Dictionary of response attributes.
|
|
368
|
+
"""
|
|
369
|
+
attrs = {}
|
|
370
|
+
|
|
371
|
+
try:
|
|
372
|
+
# Check if result is ValidationOutcome
|
|
373
|
+
if hasattr(result, "validation_passed"):
|
|
374
|
+
attrs["guardrails.validation.passed"] = bool(result.validation_passed)
|
|
375
|
+
|
|
376
|
+
# Extract validated output
|
|
377
|
+
if hasattr(result, "validated_output"):
|
|
378
|
+
validated_output = result.validated_output
|
|
379
|
+
if validated_output is not None:
|
|
380
|
+
if isinstance(validated_output, str):
|
|
381
|
+
attrs["guardrails.validated_output_length"] = len(validated_output)
|
|
382
|
+
attrs["guardrails.validated_output_preview"] = validated_output[:200]
|
|
383
|
+
else:
|
|
384
|
+
attrs["guardrails.validated_output_type"] = type(validated_output).__name__
|
|
385
|
+
|
|
386
|
+
# Extract reask count
|
|
387
|
+
if hasattr(result, "reasks"):
|
|
388
|
+
attrs["guardrails.reasks_count"] = len(result.reasks) if result.reasks else 0
|
|
389
|
+
|
|
390
|
+
# Extract error information
|
|
391
|
+
if hasattr(result, "error") and result.error:
|
|
392
|
+
attrs["guardrails.has_error"] = True
|
|
393
|
+
attrs["guardrails.error_message"] = str(result.error)[:200]
|
|
394
|
+
|
|
395
|
+
except Exception as e:
|
|
396
|
+
logger.debug("Failed to extract guard.__call__ response attributes: %s", e)
|
|
397
|
+
|
|
398
|
+
return attrs
|
|
399
|
+
|
|
400
|
+
def _extract_guard_validate_response_attributes(self, result: Any) -> Dict[str, Any]:
|
|
401
|
+
"""Extract response attributes from Guard.validate.
|
|
402
|
+
|
|
403
|
+
Args:
|
|
404
|
+
result: The ValidationOutcome result
|
|
405
|
+
|
|
406
|
+
Returns:
|
|
407
|
+
Dict[str, Any]: Dictionary of response attributes.
|
|
408
|
+
"""
|
|
409
|
+
attrs = {}
|
|
410
|
+
|
|
411
|
+
try:
|
|
412
|
+
# Check validation result
|
|
413
|
+
if hasattr(result, "validation_passed"):
|
|
414
|
+
attrs["guardrails.validation.passed"] = bool(result.validation_passed)
|
|
415
|
+
|
|
416
|
+
# Extract validator results
|
|
417
|
+
if hasattr(result, "validator_logs") and result.validator_logs:
|
|
418
|
+
passed_validators = []
|
|
419
|
+
failed_validators = []
|
|
420
|
+
|
|
421
|
+
for log in result.validator_logs[:10]: # Limit to first 10
|
|
422
|
+
if hasattr(log, "validator_name"):
|
|
423
|
+
validator_name = log.validator_name
|
|
424
|
+
if hasattr(log, "validation_result") and log.validation_result:
|
|
425
|
+
passed_validators.append(validator_name)
|
|
426
|
+
else:
|
|
427
|
+
failed_validators.append(validator_name)
|
|
428
|
+
|
|
429
|
+
if passed_validators:
|
|
430
|
+
attrs["guardrails.validators.passed"] = passed_validators
|
|
431
|
+
if failed_validators:
|
|
432
|
+
attrs["guardrails.validators.failed"] = failed_validators
|
|
433
|
+
|
|
434
|
+
except Exception as e:
|
|
435
|
+
logger.debug("Failed to extract guard.validate response attributes: %s", e)
|
|
436
|
+
|
|
437
|
+
return attrs
|
|
438
|
+
|
|
439
|
+
def _extract_guard_parse_response_attributes(self, result: Any) -> Dict[str, Any]:
|
|
440
|
+
"""Extract response attributes from Guard.parse.
|
|
441
|
+
|
|
442
|
+
Args:
|
|
443
|
+
result: The ValidationOutcome result
|
|
444
|
+
|
|
445
|
+
Returns:
|
|
446
|
+
Dict[str, Any]: Dictionary of response attributes.
|
|
447
|
+
"""
|
|
448
|
+
attrs = {}
|
|
449
|
+
|
|
450
|
+
try:
|
|
451
|
+
# Check validation result
|
|
452
|
+
if hasattr(result, "validation_passed"):
|
|
453
|
+
attrs["guardrails.validation.passed"] = bool(result.validation_passed)
|
|
454
|
+
|
|
455
|
+
# Extract validated output
|
|
456
|
+
if hasattr(result, "validated_output"):
|
|
457
|
+
validated_output = result.validated_output
|
|
458
|
+
if validated_output is not None:
|
|
459
|
+
if isinstance(validated_output, str):
|
|
460
|
+
attrs["guardrails.validated_output_length"] = len(validated_output)
|
|
461
|
+
|
|
462
|
+
# Extract reask count
|
|
463
|
+
if hasattr(result, "reasks"):
|
|
464
|
+
attrs["guardrails.reasks_count"] = len(result.reasks) if result.reasks else 0
|
|
465
|
+
|
|
466
|
+
except Exception as e:
|
|
467
|
+
logger.debug("Failed to extract guard.parse response attributes: %s", e)
|
|
468
|
+
|
|
469
|
+
return attrs
|
|
470
|
+
|
|
471
|
+
def _extract_usage(self, result) -> Optional[Dict[str, int]]:
|
|
472
|
+
"""Extract token usage from Guardrails AI result.
|
|
473
|
+
|
|
474
|
+
Note: Guardrails AI wraps LLM provider calls.
|
|
475
|
+
Token usage is captured by underlying provider instrumentors.
|
|
476
|
+
|
|
477
|
+
Args:
|
|
478
|
+
result: The Guardrails AI operation result.
|
|
479
|
+
|
|
480
|
+
Returns:
|
|
481
|
+
Optional[Dict[str, int]]: Dictionary with token counts or None.
|
|
482
|
+
"""
|
|
483
|
+
# Token usage is tracked by underlying LLM provider instrumentors
|
|
484
|
+
return None
|
|
485
|
+
|
|
486
|
+
def _extract_finish_reason(self, result) -> Optional[str]:
|
|
487
|
+
"""Extract finish reason from Guardrails AI result.
|
|
488
|
+
|
|
489
|
+
Args:
|
|
490
|
+
result: The Guardrails AI operation result.
|
|
491
|
+
|
|
492
|
+
Returns:
|
|
493
|
+
Optional[str]: The finish reason string or None if not available.
|
|
494
|
+
"""
|
|
495
|
+
try:
|
|
496
|
+
# Check validation outcome
|
|
497
|
+
if hasattr(result, "validation_passed"):
|
|
498
|
+
if result.validation_passed:
|
|
499
|
+
return "validated"
|
|
500
|
+
else:
|
|
501
|
+
return "validation_failed"
|
|
502
|
+
|
|
503
|
+
# Check for errors
|
|
504
|
+
if hasattr(result, "error") and result.error:
|
|
505
|
+
return "error"
|
|
506
|
+
|
|
507
|
+
except Exception as e:
|
|
508
|
+
logger.debug("Failed to extract finish reason: %s", e)
|
|
509
|
+
|
|
510
|
+
return None
|