lmnr 0.7.10__py3-none-any.whl → 0.7.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. lmnr/opentelemetry_lib/__init__.py +6 -0
  2. lmnr/opentelemetry_lib/decorators/__init__.py +1 -1
  3. lmnr/opentelemetry_lib/litellm/__init__.py +277 -32
  4. lmnr/opentelemetry_lib/litellm/utils.py +76 -0
  5. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +136 -44
  6. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +93 -6
  7. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +155 -3
  8. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_agent/__init__.py +100 -0
  9. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/__init__.py +477 -0
  10. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/utils.py +12 -0
  11. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/__init__.py +14 -0
  12. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +10 -1
  13. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +100 -8
  14. lmnr/opentelemetry_lib/tracing/__init__.py +9 -0
  15. lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +56 -3
  16. lmnr/opentelemetry_lib/tracing/exporter.py +24 -9
  17. lmnr/opentelemetry_lib/tracing/instruments.py +8 -0
  18. lmnr/opentelemetry_lib/tracing/processor.py +26 -0
  19. lmnr/sdk/browser/browser_use_cdp_otel.py +12 -7
  20. lmnr/sdk/browser/bubus_otel.py +71 -0
  21. lmnr/sdk/browser/cdp_utils.py +318 -87
  22. lmnr/sdk/evaluations.py +22 -2
  23. lmnr/sdk/laminar.py +17 -3
  24. lmnr/version.py +1 -1
  25. {lmnr-0.7.10.dist-info → lmnr-0.7.12.dist-info}/METADATA +50 -50
  26. {lmnr-0.7.10.dist-info → lmnr-0.7.12.dist-info}/RECORD +28 -24
  27. {lmnr-0.7.10.dist-info → lmnr-0.7.12.dist-info}/WHEEL +0 -0
  28. {lmnr-0.7.10.dist-info → lmnr-0.7.12.dist-info}/entry_points.txt +0 -0
@@ -36,6 +36,7 @@ except ImportError:
36
36
  ResponseOutputMessageParam = Dict[str, Any]
37
37
  RESPONSES_AVAILABLE = False
38
38
 
39
+ from lmnr.opentelemetry_lib.decorators import json_dumps
39
40
  from lmnr.opentelemetry_lib.tracing.context import (
40
41
  get_current_context,
41
42
  get_event_attributes_from_context,
@@ -139,6 +140,10 @@ class TracedData(pydantic.BaseModel):
139
140
  request_model: Optional[str] = pydantic.Field(default=None)
140
141
  response_model: Optional[str] = pydantic.Field(default=None)
141
142
 
143
+ # Reasoning attributes
144
+ request_reasoning_summary: Optional[str] = pydantic.Field(default=None)
145
+ request_reasoning_effort: Optional[str] = pydantic.Field(default=None)
146
+
142
147
 
143
148
  responses: dict[str, TracedData] = {}
144
149
 
@@ -204,7 +209,28 @@ def set_data_attributes(traced_response: TracedData, span: Span):
204
209
  SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS,
205
210
  usage.input_tokens_details.cached_tokens,
206
211
  )
207
- # TODO: add reasoning tokens in output token details
212
+
213
+ reasoning_tokens = None
214
+ if usage.output_tokens_details:
215
+ reasoning_tokens = usage.output_tokens_details.reasoning_tokens
216
+
217
+ _set_span_attribute(
218
+ span,
219
+ SpanAttributes.LLM_USAGE_REASONING_TOKENS,
220
+ reasoning_tokens or 0,
221
+ )
222
+
223
+ _set_span_attribute(
224
+ span,
225
+ f"{SpanAttributes.LLM_REQUEST_REASONING_SUMMARY}",
226
+ traced_response.request_reasoning_summary or (),
227
+ )
228
+
229
+ _set_span_attribute(
230
+ span,
231
+ f"{SpanAttributes.LLM_REQUEST_REASONING_EFFORT}",
232
+ traced_response.request_reasoning_effort or (),
233
+ )
208
234
 
209
235
  if should_send_prompts():
210
236
  prompt_index = 0
@@ -275,7 +301,9 @@ def set_data_attributes(traced_response: TracedData, span: Span):
275
301
  prompt_index += 1
276
302
  elif block_dict.get("type") == "computer_call_output":
277
303
  _set_span_attribute(
278
- span, f"{GEN_AI_PROMPT}.{prompt_index}.role", "computer-call"
304
+ span,
305
+ f"{GEN_AI_PROMPT}.{prompt_index}.role",
306
+ "computer_call_output",
279
307
  )
280
308
  output_image_url = block_dict.get("output", {}).get("image_url")
281
309
  if output_image_url:
@@ -299,16 +327,45 @@ def set_data_attributes(traced_response: TracedData, span: Span):
299
327
  call_content = {}
300
328
  if block_dict.get("id"):
301
329
  call_content["id"] = block_dict.get("id")
302
- if block_dict.get("call_id"):
303
- call_content["call_id"] = block_dict.get("call_id")
304
330
  if block_dict.get("action"):
305
331
  call_content["action"] = block_dict.get("action")
306
332
  _set_span_attribute(
307
333
  span,
308
- f"{GEN_AI_PROMPT}.{prompt_index}.content",
334
+ f"{GEN_AI_PROMPT}.{prompt_index}.tool_calls.0.arguments",
309
335
  json.dumps(call_content),
310
336
  )
337
+ _set_span_attribute(
338
+ span,
339
+ f"{GEN_AI_PROMPT}.{prompt_index}.tool_calls.0.id",
340
+ block_dict.get("call_id"),
341
+ )
342
+ _set_span_attribute(
343
+ span,
344
+ f"{GEN_AI_PROMPT}.{prompt_index}.tool_calls.0.name",
345
+ "computer_call",
346
+ )
311
347
  prompt_index += 1
348
+ elif block_dict.get("type") == "reasoning":
349
+ reasoning_summary = block_dict.get("summary")
350
+ if reasoning_summary and isinstance(reasoning_summary, list):
351
+ processed_chunks = [
352
+ {"type": "text", "text": chunk.get("text")}
353
+ for chunk in reasoning_summary
354
+ if isinstance(chunk, dict)
355
+ and chunk.get("type") == "summary_text"
356
+ ]
357
+ _set_span_attribute(
358
+ span,
359
+ f"{GEN_AI_PROMPT}.{prompt_index}.reasoning",
360
+ json_dumps(processed_chunks),
361
+ )
362
+ _set_span_attribute(
363
+ span,
364
+ f"{GEN_AI_PROMPT}.{prompt_index}.role",
365
+ "assistant",
366
+ )
367
+ # reasoning is followed by other content parts in the same messge,
368
+ # so we don't increment the prompt index
312
369
  # TODO: handle other block types
313
370
 
314
371
  _set_span_attribute(span, f"{GEN_AI_COMPLETION}.0.role", "assistant")
@@ -381,9 +438,19 @@ def set_data_attributes(traced_response: TracedData, span: Span):
381
438
  )
382
439
  tool_call_index += 1
383
440
  elif block_dict.get("type") == "reasoning":
384
- _set_span_attribute(
385
- span, f"{GEN_AI_COMPLETION}.0.reasoning", block_dict.get("summary")
386
- )
441
+ reasoning_summary = block_dict.get("summary")
442
+ if reasoning_summary and isinstance(reasoning_summary, list):
443
+ processed_chunks = [
444
+ {"type": "text", "text": chunk.get("text")}
445
+ for chunk in reasoning_summary
446
+ if isinstance(chunk, dict)
447
+ and chunk.get("type") == "summary_text"
448
+ ]
449
+ _set_span_attribute(
450
+ span,
451
+ "gen_ai.completion.0.reasoning",
452
+ json_dumps(processed_chunks),
453
+ )
387
454
  # TODO: handle other block types, in particular other calls
388
455
 
389
456
 
@@ -423,6 +490,12 @@ def responses_get_or_create_wrapper(tracer: Tracer, wrapped, instance, args, kwa
423
490
  "model", existing_data.get("request_model", "")
424
491
  ),
425
492
  response_model=existing_data.get("response_model", ""),
493
+ request_reasoning_summary=kwargs.get("reasoning", {}).get(
494
+ "summary", existing_data.get("request_reasoning_summary")
495
+ ),
496
+ request_reasoning_effort=kwargs.get("reasoning", {}).get(
497
+ "effort", existing_data.get("request_reasoning_effort")
498
+ ),
426
499
  )
427
500
  except Exception:
428
501
  traced_data = None
@@ -469,9 +542,16 @@ def responses_get_or_create_wrapper(tracer: Tracer, wrapped, instance, args, kwa
469
542
  ),
470
543
  request_model=existing_data.get("request_model", kwargs.get("model")),
471
544
  response_model=existing_data.get("response_model", parsed_response.model),
545
+ request_reasoning_summary=existing_data.get(
546
+ "request_reasoning_summary", kwargs.get("reasoning", {}).get("summary")
547
+ ),
548
+ request_reasoning_effort=existing_data.get(
549
+ "request_reasoning_effort", kwargs.get("reasoning", {}).get("effort")
550
+ ),
472
551
  )
473
552
  responses[parsed_response.id] = traced_data
474
553
  except Exception:
554
+ raise
475
555
  return response
476
556
 
477
557
  if parsed_response.status == "completed":
@@ -521,6 +601,12 @@ async def async_responses_get_or_create_wrapper(
521
601
  output_text=kwargs.get("output_text", existing_data.get("output_text")),
522
602
  request_model=kwargs.get("model", existing_data.get("request_model")),
523
603
  response_model=existing_data.get("response_model"),
604
+ request_reasoning_summary=kwargs.get("reasoning", {}).get(
605
+ "summary", existing_data.get("request_reasoning_summary")
606
+ ),
607
+ request_reasoning_effort=kwargs.get("reasoning", {}).get(
608
+ "effort", existing_data.get("request_reasoning_effort")
609
+ ),
524
610
  )
525
611
  except Exception:
526
612
  traced_data = None
@@ -567,6 +653,12 @@ async def async_responses_get_or_create_wrapper(
567
653
  ),
568
654
  request_model=existing_data.get("request_model", kwargs.get("model")),
569
655
  response_model=existing_data.get("response_model", parsed_response.model),
656
+ request_reasoning_summary=existing_data.get(
657
+ "request_reasoning_summary", kwargs.get("reasoning", {}).get("summary")
658
+ ),
659
+ request_reasoning_effort=existing_data.get(
660
+ "request_reasoning_effort", kwargs.get("reasoning", {}).get("effort")
661
+ ),
570
662
  )
571
663
  responses[parsed_response.id] = traced_data
572
664
  except Exception:
@@ -261,6 +261,15 @@ class TracerWrapper(object):
261
261
  return False
262
262
  return self._span_processor.force_flush()
263
263
 
264
+ def force_reinit_processor(self):
265
+ if isinstance(self._span_processor, LaminarSpanProcessor):
266
+ self._span_processor.force_flush()
267
+ self._span_processor.force_reinit()
268
+ else:
269
+ self._logger.warning(
270
+ "Not using LaminarSpanProcessor, cannot force reinit processor"
271
+ )
272
+
264
273
  @classmethod
265
274
  def get_session_recording_options(cls) -> SessionRecordingOptions:
266
275
  """Get the session recording options set during initialization."""
@@ -51,9 +51,19 @@ class BedrockInstrumentorInitializer(InstrumentorInitializer):
51
51
 
52
52
 
53
53
  class BrowserUseInstrumentorInitializer(InstrumentorInitializer):
54
- def init_instrumentor(
55
- self, client, async_client, *args, **kwargs
56
- ) -> BaseInstrumentor | None:
54
+ """Instruments for different versions of browser-use:
55
+
56
+ - browser-use < 0.5: BrowserUseLegacyInstrumentor to track agent_step and
57
+ other structure spans. Session instrumentation is controlled by
58
+ Instruments.PLAYWRIGHT (or Instruments.PATCHRIGHT for several versions
59
+ in 0.4.* that used patchright)
60
+ - browser-use ~= 0.5: Structure spans live in browser_use package itself.
61
+ Session instrumentation is controlled by Instruments.PLAYWRIGHT
62
+ - browser-use >= 0.6.0rc1: BubusInstrumentor to keep spans structure.
63
+ Session instrumentation is controlled by Instruments.BROWSER_USE_SESSION
64
+ """
65
+
66
+ def init_instrumentor(self, *args, **kwargs) -> BaseInstrumentor | None:
57
67
  if not is_package_installed("browser-use"):
58
68
  return None
59
69
 
@@ -65,6 +75,19 @@ class BrowserUseInstrumentorInitializer(InstrumentorInitializer):
65
75
 
66
76
  return BrowserUseLegacyInstrumentor()
67
77
 
78
+ return None
79
+
80
+
81
+ class BrowserUseSessionInstrumentorInitializer(InstrumentorInitializer):
82
+ def init_instrumentor(
83
+ self, client, async_client, *args, **kwargs
84
+ ) -> BaseInstrumentor | None:
85
+ if not is_package_installed("browser-use"):
86
+ return None
87
+
88
+ version = get_package_version("browser-use")
89
+ from packaging.version import parse
90
+
68
91
  if version and parse(version) >= parse("0.6.0rc1"):
69
92
  from lmnr.sdk.browser.browser_use_cdp_otel import BrowserUseInstrumentor
70
93
 
@@ -73,6 +96,16 @@ class BrowserUseInstrumentorInitializer(InstrumentorInitializer):
73
96
  return None
74
97
 
75
98
 
99
+ class BubusInstrumentorInitializer(InstrumentorInitializer):
100
+ def init_instrumentor(self, *args, **kwargs) -> BaseInstrumentor | None:
101
+ if not is_package_installed("bubus"):
102
+ return None
103
+
104
+ from lmnr.sdk.browser.bubus_otel import BubusInstrumentor
105
+
106
+ return BubusInstrumentor()
107
+
108
+
76
109
  class ChromaInstrumentorInitializer(InstrumentorInitializer):
77
110
  def init_instrumentor(self, *args, **kwargs) -> BaseInstrumentor | None:
78
111
  if not is_package_installed("chromadb"):
@@ -109,6 +142,26 @@ class CrewAIInstrumentorInitializer(InstrumentorInitializer):
109
142
  return CrewAiInstrumentor()
110
143
 
111
144
 
145
+ class CuaAgentInstrumentorInitializer(InstrumentorInitializer):
146
+ def init_instrumentor(self, *args, **kwargs) -> BaseInstrumentor | None:
147
+ if not is_package_installed("cua-agent"):
148
+ return None
149
+
150
+ from ..opentelemetry.instrumentation.cua_agent import CuaAgentInstrumentor
151
+
152
+ return CuaAgentInstrumentor()
153
+
154
+
155
+ class CuaComputerInstrumentorInitializer(InstrumentorInitializer):
156
+ def init_instrumentor(self, *args, **kwargs) -> BaseInstrumentor | None:
157
+ if not is_package_installed("cua-computer"):
158
+ return None
159
+
160
+ from ..opentelemetry.instrumentation.cua_computer import CuaComputerInstrumentor
161
+
162
+ return CuaComputerInstrumentor()
163
+
164
+
112
165
  class GoogleGenAIInstrumentorInitializer(InstrumentorInitializer):
113
166
  def init_instrumentor(self, *args, **kwargs) -> BaseInstrumentor | None:
114
167
  if not is_package_installed("google-genai"):
@@ -5,7 +5,7 @@ from opentelemetry.sdk.trace import ReadableSpan
5
5
  from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
6
6
  OTLPSpanExporter,
7
7
  )
8
- from opentelemetry.exporter.otlp.proto.http import Compression
8
+ from opentelemetry.exporter.otlp.proto.http import Compression as HTTPCompression
9
9
  from opentelemetry.exporter.otlp.proto.http.trace_exporter import (
10
10
  OTLPSpanExporter as HTTPOTLPSpanExporter,
11
11
  )
@@ -15,6 +15,10 @@ from lmnr.sdk.utils import from_env
15
15
 
16
16
  class LaminarSpanExporter(SpanExporter):
17
17
  instance: OTLPSpanExporter | HTTPOTLPSpanExporter
18
+ endpoint: str
19
+ headers: dict[str, str]
20
+ timeout: float
21
+ force_http: bool
18
22
 
19
23
  def __init__(
20
24
  self,
@@ -34,19 +38,30 @@ class LaminarSpanExporter(SpanExporter):
34
38
  port = 443 if force_http else 8443
35
39
  final_url = f"{url}:{port or 443}"
36
40
  api_key = api_key or from_env("LMNR_PROJECT_API_KEY")
37
- if force_http:
41
+ self.endpoint = final_url
42
+ self.headers = (
43
+ {"Authorization": f"Bearer {api_key}"}
44
+ if force_http
45
+ else {"authorization": f"Bearer {api_key}"}
46
+ )
47
+ self.timeout = timeout_seconds
48
+ self.force_http = force_http
49
+ self._init_instance()
50
+
51
+ def _init_instance(self):
52
+ if self.force_http:
38
53
  self.instance = HTTPOTLPSpanExporter(
39
- endpoint=f"{final_url}/v1/traces",
40
- headers={"Authorization": f"Bearer {api_key}"},
41
- compression=Compression.Gzip,
42
- timeout=timeout_seconds,
54
+ endpoint=self.endpoint,
55
+ headers=self.headers,
56
+ compression=HTTPCompression.Gzip,
57
+ timeout=self.timeout,
43
58
  )
44
59
  else:
45
60
  self.instance = OTLPSpanExporter(
46
- endpoint=final_url,
47
- headers={"authorization": f"Bearer {api_key}"},
61
+ endpoint=self.endpoint,
62
+ headers=self.headers,
63
+ timeout=self.timeout,
48
64
  compression=grpc.Compression.Gzip,
49
- timeout=timeout_seconds,
50
65
  )
51
66
 
52
67
  def export(self, spans: list[ReadableSpan]) -> SpanExportResult:
@@ -17,9 +17,13 @@ class Instruments(Enum):
17
17
  ANTHROPIC = "anthropic"
18
18
  BEDROCK = "bedrock"
19
19
  BROWSER_USE = "browser_use"
20
+ BROWSER_USE_SESSION = "browser_use_session"
21
+ BUBUS = "bubus"
20
22
  CHROMA = "chroma"
21
23
  COHERE = "cohere"
22
24
  CREWAI = "crewai"
25
+ CUA_AGENT = "cua_agent"
26
+ CUA_COMPUTER = "cua_computer"
23
27
  GOOGLE_GENAI = "google_genai"
24
28
  GROQ = "groq"
25
29
  HAYSTACK = "haystack"
@@ -60,9 +64,13 @@ INSTRUMENTATION_INITIALIZERS: dict[
60
64
  Instruments.ANTHROPIC: initializers.AnthropicInstrumentorInitializer(),
61
65
  Instruments.BEDROCK: initializers.BedrockInstrumentorInitializer(),
62
66
  Instruments.BROWSER_USE: initializers.BrowserUseInstrumentorInitializer(),
67
+ Instruments.BROWSER_USE_SESSION: initializers.BrowserUseSessionInstrumentorInitializer(),
68
+ Instruments.BUBUS: initializers.BubusInstrumentorInitializer(),
63
69
  Instruments.CHROMA: initializers.ChromaInstrumentorInitializer(),
64
70
  Instruments.COHERE: initializers.CohereInstrumentorInitializer(),
65
71
  Instruments.CREWAI: initializers.CrewAIInstrumentorInitializer(),
72
+ Instruments.CUA_AGENT: initializers.CuaAgentInstrumentorInitializer(),
73
+ Instruments.CUA_COMPUTER: initializers.CuaComputerInstrumentorInitializer(),
66
74
  Instruments.GOOGLE_GENAI: initializers.GoogleGenAIInstrumentorInitializer(),
67
75
  Instruments.GROQ: initializers.GroqInstrumentorInitializer(),
68
76
  Instruments.HAYSTACK: initializers.HaystackInstrumentorInitializer(),
@@ -1,3 +1,4 @@
1
+ import logging
1
2
  import uuid
2
3
 
3
4
  from opentelemetry.sdk.trace.export import (
@@ -19,13 +20,16 @@ from lmnr.opentelemetry_lib.tracing.attributes import (
19
20
  SPAN_SDK_VERSION,
20
21
  )
21
22
  from lmnr.opentelemetry_lib.tracing.exporter import LaminarSpanExporter
23
+ from lmnr.sdk.log import get_default_logger
22
24
  from lmnr.version import PYTHON_VERSION, __version__
23
25
 
24
26
 
25
27
  class LaminarSpanProcessor(SpanProcessor):
26
28
  instance: BatchSpanProcessor | SimpleSpanProcessor
29
+ logger: logging.Logger
27
30
  __span_id_to_path: dict[int, list[str]] = {}
28
31
  __span_id_lists: dict[int, list[str]] = {}
32
+ max_export_batch_size: int
29
33
 
30
34
  def __init__(
31
35
  self,
@@ -38,6 +42,8 @@ class LaminarSpanProcessor(SpanProcessor):
38
42
  disable_batch: bool = False,
39
43
  exporter: SpanExporter | None = None,
40
44
  ):
45
+ self.logger = get_default_logger(__name__)
46
+ self.max_export_batch_size = max_export_batch_size
41
47
  self.exporter = exporter or LaminarSpanExporter(
42
48
  base_url=base_url,
43
49
  port=port,
@@ -86,6 +92,26 @@ class LaminarSpanProcessor(SpanProcessor):
86
92
  def force_flush(self, timeout_millis: int = 30000) -> bool:
87
93
  return self.instance.force_flush(timeout_millis)
88
94
 
95
+ def force_reinit(self):
96
+ if not isinstance(self.exporter, LaminarSpanExporter):
97
+ self.logger.warning(
98
+ "LaminarSpanProcessor is not using LaminarSpanExporter, cannot force reinit"
99
+ )
100
+ return
101
+ self.instance.shutdown()
102
+ disable_batch = isinstance(self.instance, SimpleSpanProcessor)
103
+ del self.exporter.instance
104
+ del self.instance
105
+
106
+ self.exporter._init_instance()
107
+ self.instance = (
108
+ SimpleSpanProcessor(self.exporter)
109
+ if disable_batch
110
+ else BatchSpanProcessor(
111
+ self.exporter, max_export_batch_size=self.max_export_batch_size
112
+ )
113
+ )
114
+
89
115
  def shutdown(self):
90
116
  self.instance.shutdown()
91
117
 
@@ -1,3 +1,6 @@
1
+ import asyncio
2
+ import uuid
3
+
1
4
  from lmnr.sdk.client.asynchronous.async_client import AsyncLaminarClient
2
5
  from lmnr.sdk.browser.utils import with_tracer_and_client_wrapper
3
6
  from lmnr.version import __version__
@@ -12,7 +15,6 @@ from opentelemetry.instrumentation.utils import unwrap
12
15
  from opentelemetry.trace import get_tracer, Tracer
13
16
  from typing import Collection
14
17
  from wrapt import wrap_function_wrapper
15
- import uuid
16
18
 
17
19
  # Stable versions, e.g. 0.6.0, satisfy this condition too
18
20
  _instruments = ("browser-use >= 0.6.0rc1",)
@@ -33,12 +35,7 @@ WRAPPED_METHODS = [
33
35
  ]
34
36
 
35
37
 
36
- @with_tracer_and_client_wrapper
37
- async def _wrap(
38
- tracer: Tracer, client: AsyncLaminarClient, to_wrap, wrapped, instance, args, kwargs
39
- ):
40
- result = await wrapped(*args, **kwargs)
41
-
38
+ async def process_wrapped_result(result, instance, client, to_wrap):
42
39
  if to_wrap.get("action") == "inject_session_recorder":
43
40
  is_registered = await is_recorder_present(result)
44
41
  if not is_registered:
@@ -50,6 +47,14 @@ async def _wrap(
50
47
  cdp_session = await instance.get_or_create_cdp_session(target_id)
51
48
  await take_full_snapshot(cdp_session)
52
49
 
50
+
51
+ @with_tracer_and_client_wrapper
52
+ async def _wrap(
53
+ tracer: Tracer, client: AsyncLaminarClient, to_wrap, wrapped, instance, args, kwargs
54
+ ):
55
+ result = await wrapped(*args, **kwargs)
56
+ asyncio.create_task(process_wrapped_result(result, instance, client, to_wrap))
57
+
53
58
  return result
54
59
 
55
60
 
@@ -0,0 +1,71 @@
1
+ from typing import Collection
2
+
3
+ from lmnr import Laminar
4
+ from lmnr.opentelemetry_lib.tracing.context import get_current_context
5
+ from lmnr.sdk.log import get_default_logger
6
+
7
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
8
+ from opentelemetry.instrumentation.utils import unwrap
9
+ from opentelemetry.trace import NonRecordingSpan, get_current_span
10
+ from wrapt import wrap_function_wrapper
11
+
12
+
13
+ _instruments = ("bubus >= 1.3.0",)
14
+ event_id_to_span_context = {}
15
+ logger = get_default_logger(__name__)
16
+
17
+
18
+ def wrap_dispatch(wrapped, instance, args, kwargs):
19
+ event = args[0] if args and len(args) > 0 else kwargs.get("event", None)
20
+ if event and hasattr(event, "event_id"):
21
+ event_id = event.event_id
22
+ if event_id:
23
+ span = get_current_span(get_current_context())
24
+ event_id_to_span_context[event_id] = span.get_span_context()
25
+ return wrapped(*args, **kwargs)
26
+
27
+
28
+ async def wrap_process_event(wrapped, instance, args, kwargs):
29
+ event = args[0] if args and len(args) > 0 else kwargs.get("event", None)
30
+ span_context = None
31
+ if event and hasattr(event, "event_id"):
32
+ event_id = event.event_id
33
+ if event_id:
34
+ span_context = event_id_to_span_context.get(event_id)
35
+ if not span_context:
36
+ return await wrapped(*args, **kwargs)
37
+ if not Laminar.is_initialized():
38
+ return await wrapped(*args, **kwargs)
39
+ with Laminar.use_span(NonRecordingSpan(span_context)):
40
+ return await wrapped(*args, **kwargs)
41
+
42
+
43
+ class BubusInstrumentor(BaseInstrumentor):
44
+ def __init__(self):
45
+ super().__init__()
46
+
47
+ def instrumentation_dependencies(self) -> Collection[str]:
48
+ return _instruments
49
+
50
+ def _instrument(self, **kwargs):
51
+ try:
52
+ wrap_function_wrapper("bubus.service", "EventBus.dispatch", wrap_dispatch)
53
+ except (ModuleNotFoundError, ImportError):
54
+ pass
55
+ try:
56
+ wrap_function_wrapper(
57
+ "bubus.service", "EventBus.process_event", wrap_process_event
58
+ )
59
+ except (ModuleNotFoundError, ImportError):
60
+ pass
61
+
62
+ def _uninstrument(self, **kwargs):
63
+ try:
64
+ unwrap("bubus.service", "EventBus.dispatch")
65
+ except (ModuleNotFoundError, ImportError):
66
+ pass
67
+ try:
68
+ unwrap("bubus.service", "EventBus.process_event")
69
+ except (ModuleNotFoundError, ImportError):
70
+ pass
71
+ event_id_to_span_context.clear()