lmnr 0.5.1a0__py3-none-any.whl → 0.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. lmnr/__init__.py +2 -10
  2. lmnr/cli.py +10 -8
  3. lmnr/{openllmetry_sdk → opentelemetry_lib}/__init__.py +8 -36
  4. lmnr/{openllmetry_sdk → opentelemetry_lib}/decorators/base.py +27 -20
  5. lmnr/{openllmetry_sdk → opentelemetry_lib}/instruments.py +2 -0
  6. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +454 -0
  7. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/config.py +9 -0
  8. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +216 -0
  9. lmnr/opentelemetry_lib/tracing/__init__.py +1 -0
  10. lmnr/opentelemetry_lib/tracing/context_manager.py +13 -0
  11. lmnr/{openllmetry_sdk → opentelemetry_lib}/tracing/tracing.py +253 -257
  12. lmnr/sdk/browser/browser_use_otel.py +20 -3
  13. lmnr/sdk/browser/patchright_otel.py +177 -0
  14. lmnr/sdk/browser/playwright_otel.py +55 -62
  15. lmnr/sdk/browser/pw_utils.py +122 -116
  16. lmnr/sdk/browser/rrweb/rrweb.umd.min.cjs +98 -0
  17. lmnr/sdk/client/asynchronous/async_client.py +0 -34
  18. lmnr/sdk/client/asynchronous/resources/__init__.py +0 -4
  19. lmnr/sdk/client/asynchronous/resources/agent.py +115 -6
  20. lmnr/sdk/client/synchronous/resources/__init__.py +1 -3
  21. lmnr/sdk/client/synchronous/resources/agent.py +112 -6
  22. lmnr/sdk/client/synchronous/sync_client.py +0 -36
  23. lmnr/sdk/decorators.py +19 -5
  24. lmnr/sdk/eval_control.py +3 -2
  25. lmnr/sdk/evaluations.py +8 -14
  26. lmnr/sdk/laminar.py +10 -10
  27. lmnr/sdk/types.py +86 -170
  28. lmnr/sdk/utils.py +8 -1
  29. lmnr/version.py +1 -1
  30. {lmnr-0.5.1a0.dist-info → lmnr-0.5.3.dist-info}/METADATA +58 -58
  31. lmnr-0.5.3.dist-info/RECORD +55 -0
  32. {lmnr-0.5.1a0.dist-info → lmnr-0.5.3.dist-info}/WHEEL +1 -1
  33. lmnr/openllmetry_sdk/tracing/__init__.py +0 -0
  34. lmnr/sdk/browser/rrweb/rrweb.min.js +0 -18
  35. lmnr/sdk/client/asynchronous/resources/pipeline.py +0 -89
  36. lmnr/sdk/client/asynchronous/resources/semantic_search.py +0 -60
  37. lmnr/sdk/client/synchronous/resources/pipeline.py +0 -89
  38. lmnr/sdk/client/synchronous/resources/semantic_search.py +0 -60
  39. lmnr-0.5.1a0.dist-info/RECORD +0 -54
  40. /lmnr/{openllmetry_sdk → opentelemetry_lib}/.flake8 +0 -0
  41. /lmnr/{openllmetry_sdk → opentelemetry_lib}/config/__init__.py +0 -0
  42. /lmnr/{openllmetry_sdk → opentelemetry_lib}/decorators/__init__.py +0 -0
  43. /lmnr/{openllmetry_sdk → opentelemetry_lib}/tracing/attributes.py +0 -0
  44. /lmnr/{openllmetry_sdk → opentelemetry_lib}/tracing/content_allow_list.py +0 -0
  45. /lmnr/{openllmetry_sdk → opentelemetry_lib}/utils/__init__.py +0 -0
  46. /lmnr/{openllmetry_sdk → opentelemetry_lib}/utils/in_memory_span_exporter.py +0 -0
  47. /lmnr/{openllmetry_sdk → opentelemetry_lib}/utils/json_encoder.py +0 -0
  48. /lmnr/{openllmetry_sdk → opentelemetry_lib}/utils/package_check.py +0 -0
  49. {lmnr-0.5.1a0.dist-info → lmnr-0.5.3.dist-info}/LICENSE +0 -0
  50. {lmnr-0.5.1a0.dist-info → lmnr-0.5.3.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,454 @@
1
+ """OpenTelemetry Google Generative AI API instrumentation"""
2
+
3
+ from collections import defaultdict
4
+ import logging
5
+ import os
6
+ from typing import AsyncGenerator, Callable, Collection, Generator, Optional
7
+
8
+ from google.genai import types
9
+
10
+ from .config import (
11
+ Config,
12
+ )
13
+ from .utils import (
14
+ dont_throw,
15
+ role_from_content_union,
16
+ set_span_attribute,
17
+ process_content_union,
18
+ to_dict,
19
+ with_tracer_wrapper,
20
+ )
21
+ from opentelemetry.trace import Tracer
22
+ from wrapt import wrap_function_wrapper
23
+
24
+ from opentelemetry import context as context_api
25
+ from opentelemetry.trace import get_tracer, SpanKind, Span
26
+ from opentelemetry.semconv._incubating.attributes import gen_ai_attributes
27
+
28
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
29
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY, unwrap
30
+
31
+ from opentelemetry.semconv_ai import (
32
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
33
+ SpanAttributes,
34
+ LLMRequestTypeValues,
35
+ )
36
+
37
+ logger = logging.getLogger(__name__)
38
+
39
+ _instruments = ("google-genai >= 1.0.0",)
40
+
41
+ WRAPPED_METHODS = [
42
+ {
43
+ "package": "google.genai.models",
44
+ "object": "Models",
45
+ "method": "generate_content",
46
+ "span_name": "gemini.generate_content",
47
+ "is_streaming": False,
48
+ "is_async": False,
49
+ },
50
+ {
51
+ "package": "google.genai.models",
52
+ "object": "AsyncModels",
53
+ "method": "generate_content",
54
+ "span_name": "gemini.generate_content",
55
+ "is_streaming": False,
56
+ "is_async": True,
57
+ },
58
+ {
59
+ "package": "google.genai.models",
60
+ "object": "Models",
61
+ "method": "generate_content_stream",
62
+ "span_name": "gemini.generate_content_stream",
63
+ "is_streaming": True,
64
+ "is_async": False,
65
+ },
66
+ {
67
+ "package": "google.genai.models",
68
+ "object": "AsyncModels",
69
+ "method": "generate_content_stream",
70
+ "span_name": "gemini.generate_content_stream",
71
+ "is_streaming": True,
72
+ "is_async": True,
73
+ },
74
+ ]
75
+
76
+
77
+ def should_send_prompts():
78
+ return (
79
+ os.getenv("TRACELOOP_TRACE_CONTENT") or "true"
80
+ ).lower() == "true" or context_api.get_value("override_enable_content_tracing")
81
+
82
+
83
+ @dont_throw
84
+ def _set_request_attributes(span, args, kwargs):
85
+ config_dict = to_dict(kwargs.get("config", {}))
86
+ set_span_attribute(
87
+ span, gen_ai_attributes.GEN_AI_REQUEST_MODEL, kwargs.get("model")
88
+ )
89
+ set_span_attribute(
90
+ span,
91
+ gen_ai_attributes.GEN_AI_REQUEST_TEMPERATURE,
92
+ config_dict.get("temperature"),
93
+ )
94
+ set_span_attribute(
95
+ span, gen_ai_attributes.GEN_AI_REQUEST_TOP_P, config_dict.get("top_p")
96
+ )
97
+ set_span_attribute(
98
+ span, gen_ai_attributes.GEN_AI_REQUEST_TOP_K, config_dict.get("top_k")
99
+ )
100
+ set_span_attribute(
101
+ span,
102
+ gen_ai_attributes.GEN_AI_REQUEST_CHOICE_COUNT,
103
+ config_dict.get("candidate_count"),
104
+ )
105
+ set_span_attribute(
106
+ span,
107
+ gen_ai_attributes.GEN_AI_REQUEST_MAX_TOKENS,
108
+ config_dict.get("max_output_tokens"),
109
+ )
110
+ set_span_attribute(
111
+ span,
112
+ gen_ai_attributes.GEN_AI_REQUEST_STOP_SEQUENCES,
113
+ config_dict.get("stop_sequences"),
114
+ )
115
+ set_span_attribute(
116
+ span,
117
+ gen_ai_attributes.GEN_AI_REQUEST_FREQUENCY_PENALTY,
118
+ config_dict.get("frequency_penalty"),
119
+ )
120
+ set_span_attribute(
121
+ span,
122
+ gen_ai_attributes.GEN_AI_REQUEST_PRESENCE_PENALTY,
123
+ config_dict.get("presence_penalty"),
124
+ )
125
+ set_span_attribute(
126
+ span, gen_ai_attributes.GEN_AI_REQUEST_SEED, config_dict.get("seed")
127
+ )
128
+
129
+ tools: list[types.FunctionDeclaration] = []
130
+ if kwargs.get("tools"):
131
+ for tool in kwargs.get("tools"):
132
+ if isinstance(tool, types.Tool):
133
+ tools += tool.function_declarations or []
134
+ elif isinstance(tool, Callable):
135
+ tools.append(types.FunctionDeclaration.from_callable(tool))
136
+ for tool_num, tool in enumerate(tools):
137
+ set_span_attribute(
138
+ span,
139
+ f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{tool_num}.name",
140
+ to_dict(tool).get("name"),
141
+ )
142
+ set_span_attribute(
143
+ span,
144
+ f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{tool_num}.description",
145
+ to_dict(tool).get("description"),
146
+ )
147
+ set_span_attribute(
148
+ span,
149
+ f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{tool_num}.parameters",
150
+ to_dict(tool).get("parameters"),
151
+ )
152
+
153
+ if should_send_prompts():
154
+ i = 0
155
+ system_instruction: Optional[types.ContentUnion] = config_dict.get(
156
+ "system_instruction"
157
+ )
158
+ if system_instruction:
159
+ set_span_attribute(
160
+ span,
161
+ f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.content",
162
+ process_content_union(system_instruction),
163
+ )
164
+ set_span_attribute(
165
+ span, f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.role", "system"
166
+ )
167
+ i += 1
168
+ contents = kwargs.get("contents", [])
169
+ if not isinstance(contents, list):
170
+ contents = [contents]
171
+ for content in contents:
172
+ set_span_attribute(
173
+ span,
174
+ f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.content",
175
+ process_content_union(content),
176
+ )
177
+ set_span_attribute(
178
+ span,
179
+ f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.role",
180
+ role_from_content_union(content) or "user",
181
+ )
182
+ i += 1
183
+
184
+
185
+ @dont_throw
186
+ def _set_response_attributes(span, response: types.GenerateContentResponse):
187
+ candidates = response.candidates or []
188
+ set_span_attribute(
189
+ span, gen_ai_attributes.GEN_AI_RESPONSE_ID, to_dict(response).get("response_id")
190
+ )
191
+ set_span_attribute(
192
+ span,
193
+ gen_ai_attributes.GEN_AI_RESPONSE_MODEL,
194
+ to_dict(response).get("model_version"),
195
+ )
196
+
197
+ if response.usage_metadata:
198
+ usage_dict = to_dict(response.usage_metadata)
199
+ set_span_attribute(
200
+ span,
201
+ gen_ai_attributes.GEN_AI_USAGE_INPUT_TOKENS,
202
+ usage_dict.get("prompt_token_count"),
203
+ )
204
+ set_span_attribute(
205
+ span,
206
+ gen_ai_attributes.GEN_AI_USAGE_OUTPUT_TOKENS,
207
+ usage_dict.get("candidates_token_count"),
208
+ )
209
+ set_span_attribute(
210
+ span,
211
+ SpanAttributes.LLM_USAGE_TOTAL_TOKENS,
212
+ usage_dict.get("total_token_count"),
213
+ )
214
+ set_span_attribute(
215
+ span,
216
+ SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS,
217
+ usage_dict.get("cached_content_token_count"),
218
+ )
219
+
220
+ if should_send_prompts():
221
+ if len(candidates) > 1:
222
+ for i, candidate in enumerate(candidates):
223
+ set_span_attribute(
224
+ span,
225
+ f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.content",
226
+ process_content_union(candidate.content),
227
+ )
228
+ set_span_attribute(
229
+ span, f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.role", "assistant"
230
+ )
231
+ else:
232
+ set_span_attribute(
233
+ span, f"{gen_ai_attributes.GEN_AI_COMPLETION}.0.content", response.text
234
+ )
235
+ set_span_attribute(
236
+ span, f"{gen_ai_attributes.GEN_AI_COMPLETION}.0.role", "assistant"
237
+ )
238
+
239
+
240
+ @dont_throw
241
+ def _build_from_streaming_response(
242
+ span: Span, response: Generator[types.GenerateContentResponse, None, None]
243
+ ) -> Generator[types.GenerateContentResponse, None, None]:
244
+ final_parts = []
245
+ role = "model"
246
+ aggregated_usage_metadata = defaultdict(int)
247
+ model_version = None
248
+ for chunk in response:
249
+ if chunk.model_version:
250
+ model_version = chunk.model_version
251
+
252
+ if chunk.candidates:
253
+ # Currently gemini throws an error if you pass more than one candidate
254
+ # with streaming
255
+ if chunk.candidates and len(chunk.candidates) > 0:
256
+ final_parts += chunk.candidates[0].content.parts or []
257
+ role = chunk.candidates[0].content.role or role
258
+ if chunk.usage_metadata:
259
+ usage_dict = to_dict(chunk.usage_metadata)
260
+ # prompt token count is sent in every chunk
261
+ # (and is less by 1 in the last chunk, so we set it once);
262
+ # total token count in every chunk is greater by prompt token count than it should be,
263
+ # thus this awkward logic here
264
+ if aggregated_usage_metadata.get("prompt_token_count") is None:
265
+ aggregated_usage_metadata["prompt_token_count"] = (
266
+ usage_dict.get("prompt_token_count") or 0
267
+ )
268
+ aggregated_usage_metadata["total_token_count"] = (
269
+ usage_dict.get("total_token_count") or 0
270
+ )
271
+ aggregated_usage_metadata["candidates_token_count"] += (
272
+ usage_dict.get("candidates_token_count") or 0
273
+ )
274
+ aggregated_usage_metadata["total_token_count"] += (
275
+ usage_dict.get("candidates_token_count") or 0
276
+ )
277
+ yield chunk
278
+
279
+ compound_response = types.GenerateContentResponse(
280
+ candidates=[
281
+ {
282
+ "content": {
283
+ "parts": final_parts,
284
+ "role": role,
285
+ },
286
+ }
287
+ ],
288
+ usage_metadata=types.GenerateContentResponseUsageMetadataDict(
289
+ **aggregated_usage_metadata
290
+ ),
291
+ model_version=model_version,
292
+ )
293
+ if span.is_recording():
294
+ _set_response_attributes(span, compound_response)
295
+ span.end()
296
+
297
+
298
+ @dont_throw
299
+ async def _abuild_from_streaming_response(
300
+ span: Span, response: AsyncGenerator[types.GenerateContentResponse, None]
301
+ ) -> AsyncGenerator[types.GenerateContentResponse, None]:
302
+ final_parts = []
303
+ role = "model"
304
+ aggregated_usage_metadata = defaultdict(int)
305
+ model_version = None
306
+ async for chunk in response:
307
+ if chunk.candidates:
308
+ # Currently gemini throws an error if you pass more than one candidate
309
+ # with streaming
310
+ if chunk.candidates and len(chunk.candidates) > 0:
311
+ final_parts += chunk.candidates[0].content.parts or []
312
+ role = chunk.candidates[0].content.role or role
313
+ if chunk.model_version:
314
+ model_version = chunk.model_version
315
+ if chunk.usage_metadata:
316
+ usage_dict = to_dict(chunk.usage_metadata)
317
+ # prompt token count is sent in every chunk
318
+ # (and is less by 1 in the last chunk, so we set it once);
319
+ # total token count in every chunk is greater by prompt token count than it should be,
320
+ # thus this awkward logic here
321
+ if aggregated_usage_metadata.get("prompt_token_count") is None:
322
+ aggregated_usage_metadata["prompt_token_count"] = usage_dict.get(
323
+ "prompt_token_count"
324
+ )
325
+ aggregated_usage_metadata["total_token_count"] = usage_dict.get(
326
+ "total_token_count"
327
+ )
328
+ aggregated_usage_metadata["candidates_token_count"] += (
329
+ usage_dict.get("candidates_token_count") or 0
330
+ )
331
+ aggregated_usage_metadata["total_token_count"] += (
332
+ usage_dict.get("candidates_token_count") or 0
333
+ )
334
+ yield chunk
335
+
336
+ compound_response = types.GenerateContentResponse(
337
+ candidates=[
338
+ {
339
+ "content": {
340
+ "parts": final_parts,
341
+ "role": role,
342
+ },
343
+ }
344
+ ],
345
+ usage_metadata=types.GenerateContentResponseUsageMetadataDict(
346
+ **aggregated_usage_metadata
347
+ ),
348
+ model_version=model_version,
349
+ )
350
+ if span.is_recording():
351
+ _set_response_attributes(span, compound_response)
352
+ span.end()
353
+
354
+
355
+ @with_tracer_wrapper
356
+ def _wrap(tracer: Tracer, to_wrap, wrapped, instance, args, kwargs):
357
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
358
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
359
+ ):
360
+ return wrapped(*args, **kwargs)
361
+
362
+ span = tracer.start_span(
363
+ to_wrap.get("span_name"),
364
+ kind=SpanKind.CLIENT,
365
+ attributes={
366
+ SpanAttributes.LLM_SYSTEM: "gemini",
367
+ SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
368
+ },
369
+ )
370
+
371
+ if span.is_recording():
372
+ _set_request_attributes(span, args, kwargs)
373
+
374
+ if to_wrap.get("is_streaming"):
375
+ return _build_from_streaming_response(span, wrapped(*args, **kwargs))
376
+ else:
377
+ response = wrapped(*args, **kwargs)
378
+
379
+ if span.is_recording():
380
+ _set_response_attributes(span, response)
381
+
382
+ span.end()
383
+ return response
384
+
385
+
386
+ @with_tracer_wrapper
387
+ async def _awrap(tracer: Tracer, to_wrap, wrapped, instance, args, kwargs):
388
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
389
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
390
+ ):
391
+ return await wrapped(*args, **kwargs)
392
+
393
+ span = tracer.start_span(
394
+ to_wrap.get("span_name"),
395
+ kind=SpanKind.CLIENT,
396
+ attributes={
397
+ SpanAttributes.LLM_SYSTEM: "gemini",
398
+ SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
399
+ },
400
+ )
401
+
402
+ if span.is_recording():
403
+ _set_request_attributes(span, args, kwargs)
404
+
405
+ if to_wrap.get("is_streaming"):
406
+ return _abuild_from_streaming_response(span, await wrapped(*args, **kwargs))
407
+ else:
408
+ response = await wrapped(*args, **kwargs)
409
+
410
+ if span.is_recording():
411
+ _set_response_attributes(span, response)
412
+
413
+ span.end()
414
+ return response
415
+
416
+
417
+ class GoogleGenAiSdkInstrumentor(BaseInstrumentor):
418
+ """An instrumentor for Google GenAI's client library."""
419
+
420
+ def __init__(
421
+ self,
422
+ exception_logger=None,
423
+ upload_base64_image=None,
424
+ convert_image_to_openai_format=True,
425
+ ):
426
+ super().__init__()
427
+ Config.exception_logger = exception_logger
428
+ Config.upload_base64_image = upload_base64_image
429
+ Config.convert_image_to_openai_format = convert_image_to_openai_format
430
+
431
+ def instrumentation_dependencies(self) -> Collection[str]:
432
+ return _instruments
433
+
434
+ def _instrument(self, **kwargs):
435
+ tracer_provider = kwargs.get("tracer_provider")
436
+ tracer = get_tracer(__name__, "0.0.1a0", tracer_provider)
437
+
438
+ for wrapped_method in WRAPPED_METHODS:
439
+ wrap_function_wrapper(
440
+ wrapped_method.get("package"),
441
+ f"{wrapped_method.get('object')}.{wrapped_method.get('method')}",
442
+ (
443
+ _awrap(tracer, wrapped_method)
444
+ if wrapped_method.get("is_async")
445
+ else _wrap(tracer, wrapped_method)
446
+ ),
447
+ )
448
+
449
+ def _uninstrument(self, **kwargs):
450
+ for wrapped_method in WRAPPED_METHODS:
451
+ unwrap(
452
+ f"{wrapped_method.get('package')}.{wrapped_method.get('object')}",
453
+ wrapped_method.get("method"),
454
+ )
@@ -0,0 +1,9 @@
1
+ from typing import Callable, Coroutine, Optional
2
+
3
+
4
+ class Config:
5
+ exception_logger = None
6
+ upload_base64_image: Optional[
7
+ Callable[[str, str, str, str], Coroutine[None, None, str]]
8
+ ] = None
9
+ convert_image_to_openai_format: bool = True
@@ -0,0 +1,216 @@
1
+ import logging
2
+ import traceback
3
+ import json
4
+
5
+ from .config import (
6
+ Config,
7
+ )
8
+ from google.genai import types
9
+ from google.genai._common import BaseModel
10
+ import pydantic
11
+ from opentelemetry.trace import Span
12
+ from typing import Any, Optional, Union
13
+
14
+
15
+ def set_span_attribute(span: Span, name: str, value: str):
16
+ if value is not None:
17
+ if value != "":
18
+ span.set_attribute(name, value)
19
+ return
20
+
21
+
22
+ def dont_throw(func):
23
+ """
24
+ A decorator that wraps the passed in function and logs exceptions instead of throwing them.
25
+
26
+ @param func: The function to wrap
27
+ @return: The wrapper function
28
+ """
29
+ # Obtain a logger specific to the function's module
30
+ logger = logging.getLogger(func.__module__)
31
+
32
+ def wrapper(*args, **kwargs):
33
+ try:
34
+ return func(*args, **kwargs)
35
+ except Exception as e:
36
+ logger.debug(
37
+ "Laminar failed to trace in %s, error: %s",
38
+ func.__name__,
39
+ traceback.format_exc(),
40
+ )
41
+ if Config.exception_logger:
42
+ Config.exception_logger(e)
43
+
44
+ return wrapper
45
+
46
+
47
+ def to_dict(obj: Union[BaseModel, pydantic.BaseModel, dict]) -> dict[str, Any]:
48
+ try:
49
+ if isinstance(obj, BaseModel):
50
+ return obj.model_dump()
51
+ elif isinstance(obj, pydantic.BaseModel):
52
+ return obj.model_dump()
53
+ elif isinstance(obj, dict):
54
+ return obj
55
+ else:
56
+ return dict(obj)
57
+ except Exception:
58
+ return dict(obj)
59
+
60
+
61
+ def process_content_union(
62
+ content: Union[types.ContentUnion, types.ContentUnionDict],
63
+ trace_id: Optional[str] = None,
64
+ span_id: Optional[str] = None,
65
+ message_index: int = 0,
66
+ ) -> Optional[str]:
67
+ parts = _process_content_union(content, trace_id, span_id, message_index)
68
+ if parts is None:
69
+ return None
70
+ if isinstance(parts, str):
71
+ return parts
72
+ elif isinstance(parts, list):
73
+ if len(parts) == 1 and isinstance(parts[0], str):
74
+ return parts[0]
75
+ return json.dumps(
76
+ [
77
+ {"type": "text", "text": part} if isinstance(part, str) else part
78
+ for part in parts
79
+ ]
80
+ )
81
+ else:
82
+ return None
83
+
84
+
85
+ def _process_content_union(
86
+ content: Union[types.ContentUnion, types.ContentUnionDict],
87
+ trace_id: Optional[str] = None,
88
+ span_id: Optional[str] = None,
89
+ message_index: int = 0,
90
+ ) -> Union[str, list[str], None]:
91
+ if isinstance(content, types.Content):
92
+ parts = to_dict(content).get("parts", [])
93
+ return [_process_part(part) for part in parts]
94
+ elif isinstance(content, list):
95
+ return [_process_part_union(item) for item in content]
96
+ elif isinstance(content, (types.Part, types.File, str)):
97
+ return _process_part_union(content)
98
+ elif isinstance(content, dict):
99
+ if "parts" in content:
100
+ return [
101
+ _process_part_union(
102
+ item, trace_id, span_id, message_index, content_index
103
+ )
104
+ for content_index, item in enumerate(content.get("parts", []))
105
+ ]
106
+ else:
107
+ # Assume it's PartDict
108
+ return _process_part_union(content, trace_id, span_id, message_index)
109
+ else:
110
+ return None
111
+
112
+
113
+ def _process_part_union(
114
+ content: Union[types.PartDict, types.File, types.Part, str],
115
+ trace_id: Optional[str] = None,
116
+ span_id: Optional[str] = None,
117
+ message_index: int = 0,
118
+ content_index: int = 0,
119
+ ) -> Optional[str]:
120
+ if isinstance(content, str):
121
+ return content
122
+ elif isinstance(content, types.File):
123
+ content_dict = to_dict(content)
124
+ name = (
125
+ content_dict.get("name")
126
+ or content_dict.get("display_name")
127
+ or content_dict.get("uri")
128
+ )
129
+ return f"files/{name}"
130
+ elif isinstance(content, (types.Part, dict)):
131
+ return _process_part(content, trace_id, span_id, message_index, content_index)
132
+ else:
133
+ return None
134
+
135
+
136
+ def _process_part(
137
+ content: types.Part,
138
+ trace_id: Optional[str] = None,
139
+ span_id: Optional[str] = None,
140
+ message_index: int = 0,
141
+ content_index: int = 0,
142
+ ) -> Optional[str]:
143
+ part_dict = to_dict(content)
144
+ if part_dict.get("text") is not None:
145
+ return part_dict.get("text")
146
+ elif part_dict.get("inline_data"):
147
+ blob = to_dict(part_dict.get("inline_data"))
148
+ if blob.get("mime_type").startswith("image/"):
149
+ return _process_image_item(
150
+ blob, trace_id, span_id, message_index, content_index
151
+ )
152
+ else:
153
+ # currently, only images are supported
154
+ return blob.get("mime_type") or "unknown_media"
155
+ else:
156
+ return None
157
+
158
+
159
+ def role_from_content_union(
160
+ content: Union[types.ContentUnion, types.ContentUnionDict],
161
+ ) -> Optional[str]:
162
+ if isinstance(content, types.Content):
163
+ return to_dict(content).get("role")
164
+ elif isinstance(content, list) and len(content) > 0:
165
+ return role_from_content_union(content[0])
166
+ else:
167
+ return None
168
+
169
+
170
+ def with_tracer_wrapper(func):
171
+ """Helper for providing tracer for wrapper functions."""
172
+
173
+ def _with_tracer(tracer, to_wrap):
174
+ def wrapper(wrapped, instance, args, kwargs):
175
+ return func(tracer, to_wrap, wrapped, instance, args, kwargs)
176
+
177
+ return wrapper
178
+
179
+ return _with_tracer
180
+
181
+
182
+ def _run_async(method):
183
+ import asyncio
184
+ import threading
185
+
186
+ try:
187
+ loop = asyncio.get_running_loop()
188
+ except RuntimeError:
189
+ loop = None
190
+
191
+ if loop and loop.is_running():
192
+ thread = threading.Thread(target=lambda: asyncio.run(method))
193
+ thread.start()
194
+ thread.join()
195
+ else:
196
+ asyncio.run(method)
197
+
198
+
199
+ def _process_image_item(
200
+ blob: dict[str, Any],
201
+ trace_id: str,
202
+ span_id: str,
203
+ message_index: int,
204
+ content_index: int,
205
+ ):
206
+ # Convert to openai format, so backends can handle it
207
+ return (
208
+ {
209
+ "type": "image_url",
210
+ "image_url": {
211
+ "url": f"data:image/{blob.get('mime_type').split('/')[1]};base64,{blob.get('data')}",
212
+ },
213
+ }
214
+ if Config.convert_image_to_openai_format
215
+ else blob
216
+ )
@@ -0,0 +1 @@
1
+ from lmnr.opentelemetry_lib.tracing.context_manager import get_tracer
@@ -0,0 +1,13 @@
1
+ from contextlib import contextmanager
2
+
3
+ from lmnr.opentelemetry_lib.tracing.tracing import TracerWrapper
4
+
5
+
6
+ @contextmanager
7
+ def get_tracer(flush_on_exit: bool = False):
8
+ wrapper = TracerWrapper()
9
+ try:
10
+ yield wrapper.get_tracer()
11
+ finally:
12
+ if flush_on_exit:
13
+ wrapper.flush()