lmnr 0.6.16__py3-none-any.whl → 0.7.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr/__init__.py +6 -15
- lmnr/cli/__init__.py +270 -0
- lmnr/cli/datasets.py +371 -0
- lmnr/{cli.py → cli/evals.py} +20 -102
- lmnr/cli/rules.py +42 -0
- lmnr/opentelemetry_lib/__init__.py +9 -2
- lmnr/opentelemetry_lib/decorators/__init__.py +274 -168
- lmnr/opentelemetry_lib/litellm/__init__.py +352 -38
- lmnr/opentelemetry_lib/litellm/utils.py +82 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +849 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +13 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +211 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +401 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +425 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +332 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/__init__.py +451 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/proxy.py +144 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_agent/__init__.py +100 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/__init__.py +476 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/utils.py +12 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +191 -129
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py +26 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +126 -41
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +488 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +8 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +143 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +229 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +92 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/__init__.py +381 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/utils.py +36 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +16 -16
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/__init__.py +61 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/__init__.py +472 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +1185 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +305 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/config.py +16 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +312 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_emitter.py +100 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +68 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +197 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v0/__init__.py +176 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/__init__.py +368 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +325 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +135 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +786 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openhands_ai/__init__.py +388 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/opentelemetry/__init__.py +69 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/skyvern/__init__.py +59 -61
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/threading/__init__.py +197 -0
- lmnr/opentelemetry_lib/tracing/__init__.py +119 -18
- lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +124 -25
- lmnr/opentelemetry_lib/tracing/attributes.py +4 -0
- lmnr/opentelemetry_lib/tracing/context.py +200 -0
- lmnr/opentelemetry_lib/tracing/exporter.py +109 -15
- lmnr/opentelemetry_lib/tracing/instruments.py +22 -5
- lmnr/opentelemetry_lib/tracing/processor.py +128 -30
- lmnr/opentelemetry_lib/tracing/span.py +398 -0
- lmnr/opentelemetry_lib/tracing/tracer.py +40 -1
- lmnr/opentelemetry_lib/tracing/utils.py +62 -0
- lmnr/opentelemetry_lib/utils/package_check.py +9 -0
- lmnr/opentelemetry_lib/utils/wrappers.py +11 -0
- lmnr/sdk/browser/background_send_events.py +158 -0
- lmnr/sdk/browser/browser_use_cdp_otel.py +100 -0
- lmnr/sdk/browser/browser_use_otel.py +12 -12
- lmnr/sdk/browser/bubus_otel.py +71 -0
- lmnr/sdk/browser/cdp_utils.py +518 -0
- lmnr/sdk/browser/inject_script.js +514 -0
- lmnr/sdk/browser/patchright_otel.py +18 -44
- lmnr/sdk/browser/playwright_otel.py +104 -187
- lmnr/sdk/browser/pw_utils.py +249 -210
- lmnr/sdk/browser/recorder/record.umd.min.cjs +84 -0
- lmnr/sdk/browser/utils.py +1 -1
- lmnr/sdk/client/asynchronous/async_client.py +47 -15
- lmnr/sdk/client/asynchronous/resources/__init__.py +2 -7
- lmnr/sdk/client/asynchronous/resources/browser_events.py +1 -0
- lmnr/sdk/client/asynchronous/resources/datasets.py +131 -0
- lmnr/sdk/client/asynchronous/resources/evals.py +122 -18
- lmnr/sdk/client/asynchronous/resources/evaluators.py +85 -0
- lmnr/sdk/client/asynchronous/resources/tags.py +4 -10
- lmnr/sdk/client/synchronous/resources/__init__.py +2 -2
- lmnr/sdk/client/synchronous/resources/datasets.py +131 -0
- lmnr/sdk/client/synchronous/resources/evals.py +83 -17
- lmnr/sdk/client/synchronous/resources/evaluators.py +85 -0
- lmnr/sdk/client/synchronous/resources/tags.py +4 -10
- lmnr/sdk/client/synchronous/sync_client.py +47 -15
- lmnr/sdk/datasets/__init__.py +94 -0
- lmnr/sdk/datasets/file_utils.py +91 -0
- lmnr/sdk/decorators.py +103 -23
- lmnr/sdk/evaluations.py +122 -33
- lmnr/sdk/laminar.py +816 -333
- lmnr/sdk/log.py +7 -2
- lmnr/sdk/types.py +124 -143
- lmnr/sdk/utils.py +115 -2
- lmnr/version.py +1 -1
- {lmnr-0.6.16.dist-info → lmnr-0.7.26.dist-info}/METADATA +71 -78
- lmnr-0.7.26.dist-info/RECORD +116 -0
- lmnr-0.7.26.dist-info/WHEEL +4 -0
- lmnr-0.7.26.dist-info/entry_points.txt +3 -0
- lmnr/opentelemetry_lib/tracing/context_properties.py +0 -65
- lmnr/sdk/browser/rrweb/rrweb.umd.min.cjs +0 -98
- lmnr/sdk/client/asynchronous/resources/agent.py +0 -329
- lmnr/sdk/client/synchronous/resources/agent.py +0 -323
- lmnr/sdk/datasets.py +0 -60
- lmnr-0.6.16.dist-info/LICENSE +0 -75
- lmnr-0.6.16.dist-info/RECORD +0 -61
- lmnr-0.6.16.dist-info/WHEEL +0 -4
- lmnr-0.6.16.dist-info/entry_points.txt +0 -3
|
@@ -0,0 +1,332 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import inspect
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
import os
|
|
6
|
+
import threading
|
|
7
|
+
import traceback
|
|
8
|
+
from importlib.metadata import version
|
|
9
|
+
|
|
10
|
+
from opentelemetry import context as context_api
|
|
11
|
+
from .config import Config
|
|
12
|
+
from opentelemetry.semconv_ai import SpanAttributes
|
|
13
|
+
|
|
14
|
+
GEN_AI_SYSTEM = "gen_ai.system"
|
|
15
|
+
GEN_AI_SYSTEM_ANTHROPIC = "anthropic"
|
|
16
|
+
_PYDANTIC_VERSION = version("pydantic")
|
|
17
|
+
|
|
18
|
+
LMNR_TRACE_CONTENT = "LMNR_TRACE_CONTENT"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def set_span_attribute(span, name, value):
|
|
22
|
+
if value is not None:
|
|
23
|
+
if value != "":
|
|
24
|
+
span.set_attribute(name, value)
|
|
25
|
+
return
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def should_send_prompts():
|
|
29
|
+
return (
|
|
30
|
+
os.getenv(LMNR_TRACE_CONTENT) or "true"
|
|
31
|
+
).lower() == "true" or context_api.get_value("override_enable_content_tracing")
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def dont_throw(func):
|
|
35
|
+
"""
|
|
36
|
+
A decorator that wraps the passed in function and logs exceptions instead of throwing them.
|
|
37
|
+
Works for both synchronous and asynchronous functions.
|
|
38
|
+
"""
|
|
39
|
+
logger = logging.getLogger(func.__module__)
|
|
40
|
+
|
|
41
|
+
async def async_wrapper(*args, **kwargs):
|
|
42
|
+
try:
|
|
43
|
+
return await func(*args, **kwargs)
|
|
44
|
+
except Exception as e:
|
|
45
|
+
_handle_exception(e, func, logger)
|
|
46
|
+
|
|
47
|
+
def sync_wrapper(*args, **kwargs):
|
|
48
|
+
try:
|
|
49
|
+
return func(*args, **kwargs)
|
|
50
|
+
except Exception as e:
|
|
51
|
+
_handle_exception(e, func, logger)
|
|
52
|
+
|
|
53
|
+
def _handle_exception(e, func, logger):
|
|
54
|
+
logger.debug(
|
|
55
|
+
"OpenLLMetry failed to trace in %s, error: %s",
|
|
56
|
+
func.__name__,
|
|
57
|
+
traceback.format_exc(),
|
|
58
|
+
)
|
|
59
|
+
if Config.exception_logger:
|
|
60
|
+
Config.exception_logger(e)
|
|
61
|
+
|
|
62
|
+
return async_wrapper if inspect.iscoroutinefunction(func) else sync_wrapper
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
async def _aextract_response_data(response):
|
|
66
|
+
"""Async version of _extract_response_data that can await coroutines."""
|
|
67
|
+
import inspect
|
|
68
|
+
|
|
69
|
+
# If we get a coroutine, await it
|
|
70
|
+
if inspect.iscoroutine(response):
|
|
71
|
+
try:
|
|
72
|
+
response = await response
|
|
73
|
+
except Exception as e:
|
|
74
|
+
import logging
|
|
75
|
+
|
|
76
|
+
logger = logging.getLogger(__name__)
|
|
77
|
+
logger.debug(f"Failed to await coroutine response: {e}")
|
|
78
|
+
return {}
|
|
79
|
+
|
|
80
|
+
if isinstance(response, dict):
|
|
81
|
+
return response
|
|
82
|
+
|
|
83
|
+
# Handle with_raw_response wrapped responses
|
|
84
|
+
if hasattr(response, "parse") and callable(response.parse):
|
|
85
|
+
try:
|
|
86
|
+
# For with_raw_response, parse() gives us the actual response object
|
|
87
|
+
parsed_response = response.parse()
|
|
88
|
+
if not isinstance(parsed_response, dict):
|
|
89
|
+
parsed_response = parsed_response.__dict__
|
|
90
|
+
return parsed_response
|
|
91
|
+
except Exception as e:
|
|
92
|
+
import logging
|
|
93
|
+
|
|
94
|
+
logger = logging.getLogger(__name__)
|
|
95
|
+
logger.debug(
|
|
96
|
+
f"Failed to parse response: {e}, response type: {type(response)}"
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
# Fallback to __dict__ for regular response objects
|
|
100
|
+
if hasattr(response, "__dict__"):
|
|
101
|
+
response_dict = response.__dict__
|
|
102
|
+
return response_dict
|
|
103
|
+
|
|
104
|
+
return {}
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def _extract_response_data(response):
|
|
108
|
+
"""Extract the actual response data from both regular and with_raw_response wrapped responses."""
|
|
109
|
+
import inspect
|
|
110
|
+
|
|
111
|
+
# If we get a coroutine, we cannot process it in sync context
|
|
112
|
+
if inspect.iscoroutine(response):
|
|
113
|
+
import logging
|
|
114
|
+
|
|
115
|
+
logger = logging.getLogger(__name__)
|
|
116
|
+
logger.warning(
|
|
117
|
+
f"_extract_response_data received coroutine {response} - response processing skipped"
|
|
118
|
+
)
|
|
119
|
+
return {}
|
|
120
|
+
|
|
121
|
+
if isinstance(response, dict):
|
|
122
|
+
return response
|
|
123
|
+
|
|
124
|
+
# Handle with_raw_response wrapped responses
|
|
125
|
+
if hasattr(response, "parse") and callable(response.parse):
|
|
126
|
+
try:
|
|
127
|
+
# For with_raw_response, parse() gives us the actual response object
|
|
128
|
+
parsed_response = response.parse()
|
|
129
|
+
if not isinstance(parsed_response, dict):
|
|
130
|
+
parsed_response = parsed_response.__dict__
|
|
131
|
+
return parsed_response
|
|
132
|
+
except Exception as e:
|
|
133
|
+
import logging
|
|
134
|
+
|
|
135
|
+
logger = logging.getLogger(__name__)
|
|
136
|
+
logger.debug(
|
|
137
|
+
f"Failed to parse response: {e}, response type: {type(response)}"
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
# Fallback to __dict__ for regular response objects
|
|
141
|
+
if hasattr(response, "__dict__"):
|
|
142
|
+
response_dict = response.__dict__
|
|
143
|
+
return response_dict
|
|
144
|
+
|
|
145
|
+
return {}
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
@dont_throw
|
|
149
|
+
async def ashared_metrics_attributes(response):
|
|
150
|
+
import inspect
|
|
151
|
+
|
|
152
|
+
# If we get a coroutine, await it
|
|
153
|
+
if inspect.iscoroutine(response):
|
|
154
|
+
try:
|
|
155
|
+
response = await response
|
|
156
|
+
except Exception as e:
|
|
157
|
+
import logging
|
|
158
|
+
|
|
159
|
+
logger = logging.getLogger(__name__)
|
|
160
|
+
logger.debug(f"Failed to await coroutine response: {e}")
|
|
161
|
+
response = None
|
|
162
|
+
|
|
163
|
+
# If it's already a dict (e.g., from streaming), use it directly
|
|
164
|
+
if isinstance(response, dict):
|
|
165
|
+
model = response.get("model")
|
|
166
|
+
else:
|
|
167
|
+
# Handle with_raw_response wrapped responses first
|
|
168
|
+
if response and hasattr(response, "parse") and callable(response.parse):
|
|
169
|
+
try:
|
|
170
|
+
response = response.parse()
|
|
171
|
+
except Exception as e:
|
|
172
|
+
import logging
|
|
173
|
+
|
|
174
|
+
logger = logging.getLogger(__name__)
|
|
175
|
+
logger.debug(f"Failed to parse with_raw_response: {e}")
|
|
176
|
+
response = None
|
|
177
|
+
|
|
178
|
+
# Safely get model attribute without extracting the whole object
|
|
179
|
+
model = getattr(response, "model", None) if response else None
|
|
180
|
+
|
|
181
|
+
common_attributes = Config.get_common_metrics_attributes()
|
|
182
|
+
|
|
183
|
+
return {
|
|
184
|
+
**common_attributes,
|
|
185
|
+
GEN_AI_SYSTEM: GEN_AI_SYSTEM_ANTHROPIC,
|
|
186
|
+
SpanAttributes.LLM_RESPONSE_MODEL: model,
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
@dont_throw
|
|
191
|
+
def shared_metrics_attributes(response):
|
|
192
|
+
import inspect
|
|
193
|
+
|
|
194
|
+
# If we get a coroutine, we cannot process it in sync context
|
|
195
|
+
if inspect.iscoroutine(response):
|
|
196
|
+
import logging
|
|
197
|
+
|
|
198
|
+
logger = logging.getLogger(__name__)
|
|
199
|
+
logger.warning(
|
|
200
|
+
f"shared_metrics_attributes received coroutine {response} - using None for model"
|
|
201
|
+
)
|
|
202
|
+
response = None
|
|
203
|
+
|
|
204
|
+
# If it's already a dict (e.g., from streaming), use it directly
|
|
205
|
+
if isinstance(response, dict):
|
|
206
|
+
model = response.get("model")
|
|
207
|
+
else:
|
|
208
|
+
# Handle with_raw_response wrapped responses first
|
|
209
|
+
if response and hasattr(response, "parse") and callable(response.parse):
|
|
210
|
+
try:
|
|
211
|
+
response = response.parse()
|
|
212
|
+
except Exception as e:
|
|
213
|
+
import logging
|
|
214
|
+
|
|
215
|
+
logger = logging.getLogger(__name__)
|
|
216
|
+
logger.debug(f"Failed to parse with_raw_response: {e}")
|
|
217
|
+
response = None
|
|
218
|
+
|
|
219
|
+
# Safely get model attribute without extracting the whole object
|
|
220
|
+
model = getattr(response, "model", None) if response else None
|
|
221
|
+
|
|
222
|
+
common_attributes = Config.get_common_metrics_attributes()
|
|
223
|
+
|
|
224
|
+
return {
|
|
225
|
+
**common_attributes,
|
|
226
|
+
GEN_AI_SYSTEM: GEN_AI_SYSTEM_ANTHROPIC,
|
|
227
|
+
SpanAttributes.LLM_RESPONSE_MODEL: model,
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
@dont_throw
|
|
232
|
+
def error_metrics_attributes(exception):
|
|
233
|
+
return {
|
|
234
|
+
GEN_AI_SYSTEM: GEN_AI_SYSTEM_ANTHROPIC,
|
|
235
|
+
"error.type": exception.__class__.__name__,
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
@dont_throw
|
|
240
|
+
def count_prompt_tokens_from_request(anthropic, request):
|
|
241
|
+
prompt_tokens = 0
|
|
242
|
+
if hasattr(anthropic, "count_tokens"):
|
|
243
|
+
if request.get("prompt"):
|
|
244
|
+
prompt_tokens = anthropic.count_tokens(request.get("prompt"))
|
|
245
|
+
elif messages := request.get("messages"):
|
|
246
|
+
prompt_tokens = 0
|
|
247
|
+
for m in messages:
|
|
248
|
+
content = m.get("content")
|
|
249
|
+
if isinstance(content, str):
|
|
250
|
+
prompt_tokens += anthropic.count_tokens(content)
|
|
251
|
+
elif isinstance(content, list):
|
|
252
|
+
for item in content:
|
|
253
|
+
# TODO: handle image and tool tokens
|
|
254
|
+
if isinstance(item, dict) and item.get("type") == "text":
|
|
255
|
+
prompt_tokens += anthropic.count_tokens(
|
|
256
|
+
item.get("text", "")
|
|
257
|
+
)
|
|
258
|
+
return prompt_tokens
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
@dont_throw
|
|
262
|
+
async def acount_prompt_tokens_from_request(anthropic, request):
|
|
263
|
+
prompt_tokens = 0
|
|
264
|
+
if hasattr(anthropic, "count_tokens"):
|
|
265
|
+
if request.get("prompt"):
|
|
266
|
+
prompt_tokens = await anthropic.count_tokens(request.get("prompt"))
|
|
267
|
+
elif messages := request.get("messages"):
|
|
268
|
+
prompt_tokens = 0
|
|
269
|
+
for m in messages:
|
|
270
|
+
content = m.get("content")
|
|
271
|
+
if isinstance(content, str):
|
|
272
|
+
prompt_tokens += await anthropic.count_tokens(content)
|
|
273
|
+
elif isinstance(content, list):
|
|
274
|
+
for item in content:
|
|
275
|
+
# TODO: handle image and tool tokens
|
|
276
|
+
if isinstance(item, dict) and item.get("type") == "text":
|
|
277
|
+
prompt_tokens += await anthropic.count_tokens(
|
|
278
|
+
item.get("text", "")
|
|
279
|
+
)
|
|
280
|
+
return prompt_tokens
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
def run_async(method):
|
|
284
|
+
try:
|
|
285
|
+
loop = asyncio.get_running_loop()
|
|
286
|
+
except RuntimeError:
|
|
287
|
+
loop = None
|
|
288
|
+
|
|
289
|
+
if loop and loop.is_running():
|
|
290
|
+
thread = threading.Thread(target=lambda: asyncio.run(method))
|
|
291
|
+
thread.start()
|
|
292
|
+
thread.join()
|
|
293
|
+
else:
|
|
294
|
+
asyncio.run(method)
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
def should_emit_events() -> bool:
|
|
298
|
+
"""
|
|
299
|
+
Checks if the instrumentation isn't using the legacy attributes
|
|
300
|
+
and if the event logger is not None.
|
|
301
|
+
"""
|
|
302
|
+
return not Config.use_legacy_attributes
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
class JSONEncoder(json.JSONEncoder):
|
|
306
|
+
def default(self, o):
|
|
307
|
+
if hasattr(o, "to_json"):
|
|
308
|
+
return o.to_json()
|
|
309
|
+
|
|
310
|
+
if hasattr(o, "model_dump_json"):
|
|
311
|
+
return o.model_dump_json()
|
|
312
|
+
|
|
313
|
+
try:
|
|
314
|
+
return str(o)
|
|
315
|
+
except Exception:
|
|
316
|
+
logger = logging.getLogger(__name__)
|
|
317
|
+
logger.debug("Failed to serialize object of type: %s", type(o).__name__)
|
|
318
|
+
return ""
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
def model_as_dict(model):
|
|
322
|
+
if isinstance(model, dict):
|
|
323
|
+
return model
|
|
324
|
+
if _PYDANTIC_VERSION < "2.0.0" and hasattr(model, "dict"):
|
|
325
|
+
return model.dict()
|
|
326
|
+
if hasattr(model, "model_dump"):
|
|
327
|
+
return model.model_dump()
|
|
328
|
+
else:
|
|
329
|
+
try:
|
|
330
|
+
return dict(model)
|
|
331
|
+
except Exception:
|
|
332
|
+
return model
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.41.0"
|