lexsi-sdk 0.1.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. lexsi_sdk/__init__.py +5 -0
  2. lexsi_sdk/client/__init__.py +0 -0
  3. lexsi_sdk/client/client.py +176 -0
  4. lexsi_sdk/common/__init__.py +0 -0
  5. lexsi_sdk/common/config/.env.prod +3 -0
  6. lexsi_sdk/common/constants.py +143 -0
  7. lexsi_sdk/common/enums.py +8 -0
  8. lexsi_sdk/common/environment.py +49 -0
  9. lexsi_sdk/common/monitoring.py +81 -0
  10. lexsi_sdk/common/trigger.py +75 -0
  11. lexsi_sdk/common/types.py +122 -0
  12. lexsi_sdk/common/utils.py +93 -0
  13. lexsi_sdk/common/validation.py +110 -0
  14. lexsi_sdk/common/xai_uris.py +197 -0
  15. lexsi_sdk/core/__init__.py +0 -0
  16. lexsi_sdk/core/agent.py +62 -0
  17. lexsi_sdk/core/alert.py +56 -0
  18. lexsi_sdk/core/case.py +618 -0
  19. lexsi_sdk/core/dashboard.py +131 -0
  20. lexsi_sdk/core/guardrails/__init__.py +0 -0
  21. lexsi_sdk/core/guardrails/guard_template.py +299 -0
  22. lexsi_sdk/core/guardrails/guardrail_autogen.py +554 -0
  23. lexsi_sdk/core/guardrails/guardrails_langgraph.py +525 -0
  24. lexsi_sdk/core/guardrails/guardrails_openai.py +541 -0
  25. lexsi_sdk/core/guardrails/openai_runner.py +1328 -0
  26. lexsi_sdk/core/model_summary.py +110 -0
  27. lexsi_sdk/core/organization.py +549 -0
  28. lexsi_sdk/core/project.py +5131 -0
  29. lexsi_sdk/core/synthetic.py +387 -0
  30. lexsi_sdk/core/text.py +595 -0
  31. lexsi_sdk/core/tracer.py +208 -0
  32. lexsi_sdk/core/utils.py +36 -0
  33. lexsi_sdk/core/workspace.py +325 -0
  34. lexsi_sdk/core/wrapper.py +766 -0
  35. lexsi_sdk/core/xai.py +306 -0
  36. lexsi_sdk/version.py +34 -0
  37. lexsi_sdk-0.1.16.dist-info/METADATA +100 -0
  38. lexsi_sdk-0.1.16.dist-info/RECORD +40 -0
  39. lexsi_sdk-0.1.16.dist-info/WHEEL +5 -0
  40. lexsi_sdk-0.1.16.dist-info/top_level.txt +1 -0
@@ -0,0 +1,541 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from datetime import datetime
5
+ from typing import Any, Callable, Dict, List, Optional, TypedDict, Union
6
+ from lexsi_sdk.common.xai_uris import RUN_GUARDRAILS_URI , RUN_GUARDRAILS_PARALLEL_URI
7
+ from lexsi_sdk.core.project import Project
8
+ from opentelemetry import trace
9
+ import time
10
+ import asyncio
11
+ import requests
12
+ from .guard_template import Guard
13
+ from dataclasses import dataclass
14
+
15
+ from agents import (
16
+ Agent,
17
+ RunContextWrapper,
18
+ TResponseInputItem,
19
+ input_guardrail,
20
+ output_guardrail,
21
+ ModelSettings,
22
+ ModelTracing,
23
+ )
24
+
25
+ @dataclass
26
+ class GuardrailFunctionOutput:
27
+ """The output of a guardrail function."""
28
+
29
+ output_info: Any
30
+ """
31
+ Optional information about the guardrail's output. For example, the guardrail could include
32
+ information about the checks it performed and granular results.
33
+ """
34
+
35
+ tripwire_triggered: bool
36
+ """
37
+ Whether the tripwire was triggered. If triggered, the agent's execution will be halted.
38
+ """
39
+
40
+ sanitized_content: str
41
+
42
+ class GuardrailRunResult(TypedDict, total=False):
43
+ """Normalized response payload for guardrail execution."""
44
+ success: bool
45
+ details: Dict[str, Any]
46
+ validated_output: Any
47
+ validation_passed: bool
48
+ sanitized_output: Any
49
+ duration: float
50
+ latency: str
51
+ on_fail_action: str
52
+ retry_count: int
53
+ max_retries: int
54
+ start_time: str
55
+ end_time: str
56
+
57
+ class OpenAIAgentsGuardrail:
58
+ """
59
+ Utility for creating OpenAI Agents guardrails that call Guardrails HTTP APIs.
60
+
61
+ Supports different actions:
62
+ - "block": raises InputGuardrailTripwireTriggered/OutputGuardrailTripwireTriggered
63
+ - "retry": attempts to sanitize content using LLM and passes sanitized content to handoff agent
64
+ - "warn": logs the issue but allows content to pass through
65
+ """
66
+
67
+ def __init__(
68
+ self,
69
+ project: Optional[Project],
70
+ model: Optional[Any] = None,
71
+ ) -> None:
72
+ """Initialize OpenAI Agents guardrail helper with project and model context."""
73
+ if project is not None:
74
+ self.client = project.api_client
75
+ self.project_name = project.project_name
76
+
77
+ self.logs: List[Dict[str, Any]] = []
78
+ self.max_retries = 2
79
+ self.retry_delay = 1.0
80
+ self.tracer = trace.get_tracer(__name__)
81
+ self.model = model
82
+
83
+ def create_input_guardrail(
84
+ self,
85
+ guards: Union[List[str], List[Dict[str, Any]], str, Dict[str, Any]],
86
+ action: str = "block",
87
+ name: str = "input_guardrail"
88
+ ) -> Callable:
89
+ """
90
+ Create an input guardrail function for OpenAI Agents.
91
+
92
+ :param guards: List of guard specifications or single guard.
93
+ :param action: 'block' | 'retry' | 'warn'.
94
+ :param name: Name for the guardrail function.
95
+ :return: Callable suitable for OpenAI Agents guardrail hook.
96
+ """
97
+ if isinstance(guards, (str, dict)):
98
+ guards = [guards]
99
+
100
+ @input_guardrail
101
+ async def guardrail_function(
102
+ ctx: RunContextWrapper[None],
103
+ agent: Agent,
104
+ input: str | list[TResponseInputItem]
105
+ ) -> GuardrailFunctionOutput:
106
+ """Run configured input guardrails for an agent invocation."""
107
+ # Convert input to string for processing
108
+ if isinstance(input, list):
109
+ # Handle list of input items (messages)
110
+ input_text = ""
111
+ for item in input:
112
+ if hasattr(item, 'content'):
113
+ input_text += str(item.content) + " "
114
+ else:
115
+ input_text += str(item) + " "
116
+ input_text = input_text.strip()
117
+ else:
118
+ input_text = str(input)
119
+
120
+ # Process through all guards in parallel
121
+ current_content, tripwire_triggered, output_info = await self._apply_guardrail_parallel(
122
+ content=input_text,
123
+ guards=guards,
124
+ guardrail_type="input",
125
+ action=action,
126
+ agent_name=agent.name,
127
+ )
128
+
129
+ return GuardrailFunctionOutput(
130
+ output_info=output_info,
131
+ tripwire_triggered=tripwire_triggered,
132
+ sanitized_content=current_content if action == "retry" else input_text
133
+ )
134
+
135
+ # Set function name for debugging
136
+ guardrail_function.__name__ = name
137
+ return guardrail_function
138
+
139
+ def create_output_guardrail(
140
+ self,
141
+ guards: Union[List[str], List[Dict[str, Any]], str, Dict[str, Any]],
142
+ action: str = "block",
143
+ name: str = "output_guardrail"
144
+ ) -> Callable:
145
+ """
146
+ Create an output guardrail function for OpenAI Agents.
147
+
148
+ :param guards: List of guard specifications or single guard.
149
+ :param action: 'block' | 'retry' | 'warn'.
150
+ :param name: Name for the guardrail function.
151
+ :return: Callable suitable for OpenAI Agents guardrail hook.
152
+ """
153
+ if isinstance(guards, (str, dict)):
154
+ guards = [guards]
155
+
156
+ @output_guardrail
157
+ async def guardrail_function(
158
+ ctx: RunContextWrapper,
159
+ agent: Agent,
160
+ output: Any
161
+ ) -> GuardrailFunctionOutput:
162
+ """Run configured output guardrails for an agent response."""
163
+ # Extract text content from output
164
+ if hasattr(output, 'response'):
165
+ output_text = str(output.response)
166
+ elif hasattr(output, 'content'):
167
+ output_text = str(output.content)
168
+ else:
169
+ output_text = str(output)
170
+
171
+ # Process through all guards in parallel
172
+ current_content, tripwire_triggered, output_info = await self._apply_guardrail_parallel(
173
+ content=output_text,
174
+ guards=guards,
175
+ guardrail_type="output",
176
+ action=action,
177
+ agent_name=agent.name,
178
+ )
179
+
180
+ return GuardrailFunctionOutput(
181
+ output_info=output_info,
182
+ tripwire_triggered=tripwire_triggered,
183
+ sanitized_content=current_content if action == "retry" else output_text
184
+ )
185
+
186
+ # Set function name for debugging
187
+ guardrail_function.__name__ = name
188
+ return guardrail_function
189
+
190
+ async def _apply_guardrail_parallel(
191
+ self,
192
+ content: Any,
193
+ guards: List[Union[str, Dict[str, Any]]],
194
+ guardrail_type: str,
195
+ action: str,
196
+ agent_name: str,
197
+ ) -> tuple[Any, bool, Dict[str, Any]]:
198
+ """
199
+ Run multiple guardrails in parallel using batch endpoint.
200
+
201
+ Returns:
202
+ tuple: (processed_content, tripwire_triggered, output_info)
203
+ """
204
+ current_content = content
205
+ tripwire_triggered = False
206
+ output_info = {}
207
+ retry_count = 0
208
+
209
+ try:
210
+ parent_span = trace.get_current_span()
211
+ if parent_span is not None:
212
+ ctx = trace.set_span_in_context(parent_span)
213
+ with self.tracer.start_as_current_span(f"guardrails:{guardrail_type}", context=ctx) as parent_gr_span:
214
+ parent_gr_span.set_attribute("component", str(agent_name))
215
+ parent_gr_span.set_attribute("content_type", guardrail_type)
216
+
217
+ while retry_count <= self.max_retries:
218
+ # Prepare payload for parallel guardrail execution
219
+ guard_specs = [guard if isinstance(guard, dict) else {"name": guard} for guard in guards]
220
+ payload = {
221
+ "input_data": current_content,
222
+ "guards": guard_specs
223
+ }
224
+
225
+ # Call parallel guardrail endpoint
226
+ start_time = datetime.now()
227
+ response = self.client.post(
228
+ RUN_GUARDRAILS_PARALLEL_URI,
229
+ payload=payload,
230
+ )
231
+ end_time = datetime.now()
232
+ parallel_result = response
233
+
234
+ # Add timing information
235
+ parallel_result.update({
236
+ "start_time": start_time.isoformat(),
237
+ "end_time": end_time.isoformat(),
238
+ "duration": (end_time - start_time).total_seconds()
239
+ })
240
+
241
+ parent_gr_span.set_attribute("start_time", str(parallel_result.get("start_time", "")))
242
+ parent_gr_span.set_attribute("end_time", str(parallel_result.get("end_time", "")))
243
+ parent_gr_span.set_attribute("duration", float(parallel_result.get("duration", 0.0)))
244
+
245
+ if not parallel_result.get("success", False):
246
+ output_info["parallel_execution_error"] = parallel_result.get("details", {})
247
+ return current_content, tripwire_triggered, output_info
248
+
249
+ # Process each guardrail result
250
+ detected_issue = False
251
+ for guard_result in parallel_result.get("details", []):
252
+ guard_name = guard_result.get("name", "unknown")
253
+ run_result: GuardrailRunResult = {
254
+ "success": parallel_result.get("success"),
255
+ "details": guard_result,
256
+ "validated_output": guard_result.get("validated_output"),
257
+ "validation_passed": guard_result.get("validation_passed", False),
258
+ "sanitized_output": guard_result.get("sanitized_output", current_content),
259
+ "duration": guard_result.get("duration", 0.0),
260
+ "latency": guard_result.get("latency", "0 ms"),
261
+ "start_time": parallel_result.get("start_time", ""),
262
+ "end_time": parallel_result.get("end_time", ""),
263
+ "retry_count": retry_count,
264
+ "max_retries": self.max_retries
265
+ }
266
+ run_result["response"] = guard_result
267
+ run_result["input"] = current_content
268
+
269
+ # Log and handle each guard result
270
+ current_content, is_triggered = await self._handle_action(
271
+ original=current_content,
272
+ run_result=run_result,
273
+ action=f"retry_{retry_count}" if retry_count > 0 else action,
274
+ agent_name=agent_name,
275
+ guardrail_type=guardrail_type,
276
+ guard_name=guard_name,
277
+ parent_span=parent_gr_span
278
+ )
279
+
280
+ if is_triggered:
281
+ detected_issue = True
282
+ tripwire_triggered = True
283
+ output_info[f"guard_{guard_name}"] = run_result
284
+
285
+ if detected_issue and action == "retry" and self.model is not None and retry_count < self.max_retries:
286
+ # Sanitize content using LLM
287
+ prompt = self._build_sanitize_prompt("combined", current_content, guardrail_type)
288
+ try:
289
+ sanitized = await self._invoke_llm(prompt)
290
+ current_content = sanitized
291
+ except Exception:
292
+ pass # Keep original content if sanitization fails
293
+ retry_count += 1
294
+ await self._async_sleep(self.retry_delay)
295
+ continue
296
+ else:
297
+ # No issues or no retries left
298
+ output_info["retry_count"] = retry_count
299
+ output_info["final_content"] = current_content
300
+ return current_content, tripwire_triggered, output_info
301
+
302
+ # Fallback if no parent span
303
+ output_info["retry_count"] = retry_count
304
+ output_info["final_content"] = current_content
305
+ return current_content, tripwire_triggered, output_info
306
+
307
+ except Exception as e:
308
+ output_info["error"] = f"Parallel guardrail execution failed: {str(e)}"
309
+ return current_content, tripwire_triggered, output_info
310
+
311
+ async def _handle_action(
312
+ self,
313
+ original: Any,
314
+ run_result: GuardrailRunResult,
315
+ action: str,
316
+ agent_name: str,
317
+ guardrail_type: str,
318
+ guard_name: str,
319
+ parent_span: Optional[Any]
320
+ ) -> tuple[Any, bool]:
321
+ """
322
+ Handle the action for a single guardrail result.
323
+
324
+ Returns:
325
+ tuple: (processed_content, is_triggered)
326
+ """
327
+ validation_passed = bool(run_result.get("validation_passed", True))
328
+ detected_issue = not validation_passed or not run_result.get("success", True)
329
+
330
+ if parent_span is not None:
331
+ try:
332
+ with self.tracer.start_as_current_span(
333
+ f"guard:{guard_name}",
334
+ context=trace.set_span_in_context(parent_span)
335
+ ) as gr_span:
336
+ gr_span.set_attribute("component", str(agent_name))
337
+ gr_span.set_attribute("guard", str(guard_name))
338
+ gr_span.set_attribute("content_type", guardrail_type)
339
+ gr_span.set_attribute("detected", detected_issue)
340
+ gr_span.set_attribute("action", action)
341
+ gr_span.set_attribute("input.value", self._safe_str(run_result.get("input")))
342
+ gr_span.set_attribute("output.value", json.dumps(run_result.get("response")))
343
+ gr_span.set_attribute("start_time", str(run_result.get("start_time", "")))
344
+ gr_span.set_attribute("end_time", str(run_result.get("end_time", "")))
345
+ gr_span.set_attribute("duration", float(run_result.get("duration", 0.0)))
346
+ except Exception:
347
+ pass
348
+
349
+ # Log guardrail result without creating a new span
350
+ self._log_guardrail_result(
351
+ run_result=run_result,
352
+ action=action,
353
+ agent_name=agent_name,
354
+ guardrail_type=guardrail_type,
355
+ guard_name=guard_name,
356
+ )
357
+
358
+ if detected_issue:
359
+ if action == "block":
360
+ return original, True
361
+ elif "retry" in action:
362
+ return run_result.get("sanitized_output", original), True
363
+ else: # warn
364
+ return original, False
365
+ return original, False
366
+
367
+ async def _call_run_guardrail(
368
+ self,
369
+ input_data: Any,
370
+ guard: Dict[str, Any],
371
+ guardrail_type: str
372
+ ) -> GuardrailRunResult:
373
+ """Call the guardrails HTTP API"""
374
+ uri = RUN_GUARDRAILS_URI
375
+ input_text = str(input_data)
376
+
377
+ start_time = datetime.now()
378
+ try:
379
+ body = {"input_data": input_text, "guard": guard}
380
+ data = self.client.post(uri, body)
381
+
382
+ end_time = datetime.now()
383
+
384
+ details = data.get("details", {}) if isinstance(data, dict) else {}
385
+ result: GuardrailRunResult = {
386
+ "success": bool(data.get("success", False)) if isinstance(data, dict) else False,
387
+ "details": details if isinstance(details, dict) else {},
388
+ "start_time": start_time.isoformat(),
389
+ "end_time": end_time.isoformat(),
390
+ }
391
+
392
+ if "duration" not in details:
393
+ result["duration"] = (end_time - start_time).total_seconds()
394
+
395
+ if isinstance(details, dict):
396
+ if "validated_output" in details:
397
+ result["validated_output"] = details["validated_output"]
398
+ if "validation_passed" in details:
399
+ result["validation_passed"] = details["validation_passed"]
400
+ if "sanitized_output" in details:
401
+ result["sanitized_output"] = details["sanitized_output"]
402
+ if "duration" in details:
403
+ result["duration"] = details["duration"]
404
+ if "latency" in details:
405
+ result["latency"] = details["latency"]
406
+
407
+ result["retry_count"] = 0
408
+ result["max_retries"] = self.max_retries
409
+ result["response"] = data
410
+ result["input"] = input_text
411
+
412
+ return result
413
+
414
+ except Exception as exc:
415
+ end_time = datetime.now()
416
+ raise exc
417
+
418
+ def _log_guardrail_result(
419
+ self,
420
+ run_result: GuardrailRunResult,
421
+ action: str,
422
+ agent_name: str,
423
+ guardrail_type: str,
424
+ guard_name: str,
425
+ ) -> None:
426
+ """Log guardrail results without creating a new span"""
427
+ validation_passed = bool(run_result.get("validation_passed", True))
428
+ detected_issue = not validation_passed or not run_result.get("success", True)
429
+
430
+ # Append to logs list instead of creating a new span
431
+ log_entry = {
432
+ "guard_name": guard_name,
433
+ "guardrail_type": guardrail_type,
434
+ "agent_name": agent_name,
435
+ "action": action,
436
+ "detected_issue": detected_issue,
437
+ "start_time": run_result.get("start_time", ""),
438
+ "end_time": run_result.get("end_time", ""),
439
+ "duration": float(run_result.get("duration", 0.0)),
440
+ "input": self._safe_str(run_result.get("input")),
441
+ "output": self._safe_str(run_result.get("response"))
442
+ }
443
+ self.logs.append(log_entry)
444
+
445
+ def _build_sanitize_prompt(self, guard_name: str, content: Any, guardrail_type: str) -> str:
446
+ """Build a prompt for the LLM to sanitize the content according to the guardrail type"""
447
+ instructions = {
448
+ "Detect PII": "Sanitize the following text by removing or masking any personally identifiable information (PII). Do not change anything else.",
449
+ "NSFW Text": "Sanitize the following text by removing or masking any not safe for work (NSFW) content. Do not change anything else.",
450
+ "Ban List": "Sanitize the following text by removing or masking any banned words. Do not change anything else.",
451
+ "Bias Check": "Sanitize the following text by removing or masking any biased language. Do not change anything else.",
452
+ "Competitor Check": "Sanitize the following text by removing or masking any competitor names. Do not change anything else.",
453
+ "Correct Language": "Sanitize the following text by correcting the language to the expected language. Do not change anything else.",
454
+ "Gibberish Text": "Sanitize the following text by removing or correcting any gibberish. Do not change anything else.",
455
+ "Profanity Free": "Sanitize the following text by removing or masking any profanity. Do not change anything else.",
456
+ "Secrets Present": "Sanitize the following text by removing or masking any secrets. Do not change anything else.",
457
+ "Toxic Language": "Sanitize the following text by removing or masking any toxic language. Do not change anything else.",
458
+ "Contains String": "Sanitize the following text by removing or masking the specified substring. Do not change anything else.",
459
+ "Detect Jailbreak": "Sanitize the following text by removing or masking any jailbreak attempts. Do not change anything else.",
460
+ "Endpoint Is Reachable": "Sanitize the following text by ensuring any mentioned endpoints are reachable. Do not change anything else.",
461
+ "Ends With": "Sanitize the following text by ensuring it ends with the specified string. Do not change anything else.",
462
+ "Has Url": "Sanitize the following text by removing or masking any URLs. Do not change anything else.",
463
+ "Lower Case": "Sanitize the following text by converting it to lower case. Do not change anything else.",
464
+ "Mentions Drugs": "Sanitize the following text by removing or masking any mentions of drugs. Do not change anything else.",
465
+ "One Line": "Sanitize the following text by ensuring it is a single line. Do not change anything else.",
466
+ "Reading Time": "Sanitize the following text by ensuring its reading time matches the specified value. Do not change anything else.",
467
+ "Redundant Sentences": "Sanitize the following text by removing redundant sentences. Do not change anything else.",
468
+ "Regex Match": "Sanitize the following text by ensuring it matches the specified regex. Do not change anything else.",
469
+ "Sql Column Presence": "Sanitize the following text by ensuring specified SQL columns are present. Do not change anything else.",
470
+ "Two Words": "Sanitize the following text by ensuring it contains only two words. Do not change anything else.",
471
+ "Upper Case": "Sanitize the following text by converting it to upper case. Do not change anything else.",
472
+ "Valid Choices": "Sanitize the following text by ensuring it matches one of the valid choices. Do not change anything else.",
473
+ "Valid Json": "Sanitize the following text by ensuring it is valid JSON. Do not change anything else.",
474
+ "Valid Length": "Sanitize the following text by ensuring its length is valid. Do not change anything else.",
475
+ "Valid Range": "Sanitize the following text by ensuring its value is within the valid range. Do not change anything else.",
476
+ "Valid URL": "Sanitize the following text by ensuring it is a valid URL. Do not change anything else.",
477
+ "Web Sanitization": "Sanitize the following text by removing any unsafe web content. Do not change anything else.",
478
+ }
479
+
480
+ instruction = instructions.get(guard_name, "Sanitize the following text according to the guardrail requirements. Do not change anything else.")
481
+ prompt = f"{instruction}\n\nContent:\n{content}"
482
+ return prompt
483
+
484
+ async def _invoke_llm(self, prompt: str) -> str:
485
+ """Invoke the LLM for content sanitization"""
486
+ if self.model is None:
487
+ return prompt # Return original if no LLM available
488
+
489
+ try:
490
+ data = await self.model.get_response(
491
+ system_instructions="Based on the input you have to provide the best and accurate results",
492
+ input=prompt,
493
+ model_settings=ModelSettings(temperature=0.1),
494
+ tools=[],
495
+ output_schema=None,
496
+ handoffs=[],
497
+ tracing=ModelTracing.DISABLED,
498
+ previous_response_id=None
499
+ )
500
+ return str(data.output[0].content[0].text)
501
+ except Exception:
502
+ return prompt # Return original on error
503
+
504
+ async def _async_sleep(self, seconds: float) -> None:
505
+ """Async sleep utility"""
506
+ await asyncio.sleep(seconds)
507
+
508
+ @staticmethod
509
+ def _safe_str(value: Any) -> str:
510
+ """Safely convert any value to string for logging"""
511
+ try:
512
+ if isinstance(value, (str, int, float, bool)) or value is None:
513
+ return str(value)
514
+ if hasattr(value, "content"):
515
+ return str(getattr(value, "content", ""))
516
+
517
+ if isinstance(value, (list, tuple)):
518
+ parts = []
519
+ for item in value:
520
+ parts.append(OpenAIAgentsGuardrail._safe_str(item))
521
+ return ", ".join(parts)
522
+
523
+ if isinstance(value, dict):
524
+ safe_dict: Dict[str, Any] = {}
525
+ for k, v in value.items():
526
+ key = str(k)
527
+ if isinstance(v, (str, int, float, bool)) or v is None:
528
+ safe_dict[key] = v
529
+ elif hasattr(v, "content"):
530
+ safe_dict[key] = str(getattr(v, "content", ""))
531
+ else:
532
+ safe_dict[key] = str(v)
533
+ return json.dumps(safe_dict, ensure_ascii=False)
534
+
535
+ return str(value)
536
+ except Exception:
537
+ return "<unserializable>"
538
+
539
+ def create_guardrail(project: Project, model: Optional[Any] = None) -> OpenAIAgentsGuardrail:
540
+ """Quick factory function to create a guardrail instance with a project"""
541
+ return OpenAIAgentsGuardrail(project=project, model=model)