lmnr 0.7.0__py3-none-any.whl → 0.7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr/opentelemetry_lib/decorators/__init__.py +43 -4
- lmnr/opentelemetry_lib/litellm/__init__.py +5 -2
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +85 -6
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +57 -14
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +106 -6
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +8 -3
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/__init__.py +6 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +139 -10
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +8 -3
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +6 -2
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +6 -3
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +4 -1
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +14 -5
- lmnr/opentelemetry_lib/tracing/context.py +18 -1
- lmnr/sdk/browser/pw_utils.py +43 -122
- lmnr/sdk/browser/recorder/record.umd.min.cjs +84 -0
- lmnr/sdk/laminar.py +51 -26
- lmnr/sdk/types.py +17 -5
- lmnr/version.py +1 -1
- {lmnr-0.7.0.dist-info → lmnr-0.7.2.dist-info}/METADATA +48 -51
- {lmnr-0.7.0.dist-info → lmnr-0.7.2.dist-info}/RECORD +23 -23
- lmnr/sdk/browser/rrweb/rrweb.umd.min.cjs +0 -98
- {lmnr-0.7.0.dist-info → lmnr-0.7.2.dist-info}/WHEEL +0 -0
- {lmnr-0.7.0.dist-info → lmnr-0.7.2.dist-info}/entry_points.txt +0 -0
@@ -1,6 +1,7 @@
|
|
1
1
|
import copy
|
2
2
|
import json
|
3
3
|
import logging
|
4
|
+
import threading
|
4
5
|
import time
|
5
6
|
from functools import singledispatch
|
6
7
|
from typing import List, Optional, Union
|
@@ -39,7 +40,10 @@ from ..utils import (
|
|
39
40
|
should_emit_events,
|
40
41
|
should_send_prompts,
|
41
42
|
)
|
42
|
-
from lmnr.opentelemetry_lib.tracing.context import
|
43
|
+
from lmnr.opentelemetry_lib.tracing.context import (
|
44
|
+
get_current_context,
|
45
|
+
get_event_attributes_from_context,
|
46
|
+
)
|
43
47
|
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
|
44
48
|
from opentelemetry.metrics import Counter, Histogram
|
45
49
|
from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
|
@@ -111,7 +115,8 @@ def chat_wrapper(
|
|
111
115
|
exception_counter.add(1, attributes=attributes)
|
112
116
|
|
113
117
|
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
114
|
-
|
118
|
+
attributes = get_event_attributes_from_context()
|
119
|
+
span.record_exception(e, attributes=attributes)
|
115
120
|
span.set_status(Status(StatusCode.ERROR, str(e)))
|
116
121
|
span.end()
|
117
122
|
|
@@ -211,7 +216,8 @@ async def achat_wrapper(
|
|
211
216
|
exception_counter.add(1, attributes=attributes)
|
212
217
|
|
213
218
|
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
214
|
-
|
219
|
+
attributes = get_event_attributes_from_context()
|
220
|
+
span.record_exception(e, attributes=attributes)
|
215
221
|
span.set_status(Status(StatusCode.ERROR, str(e)))
|
216
222
|
span.end()
|
217
223
|
|
@@ -296,6 +302,7 @@ def _handle_response(
|
|
296
302
|
choice_counter=None,
|
297
303
|
duration_histogram=None,
|
298
304
|
duration=None,
|
305
|
+
is_streaming: bool = False,
|
299
306
|
):
|
300
307
|
if is_openai_v1():
|
301
308
|
response_dict = model_as_dict(response)
|
@@ -310,6 +317,7 @@ def _handle_response(
|
|
310
317
|
duration_histogram,
|
311
318
|
response_dict,
|
312
319
|
duration,
|
320
|
+
is_streaming,
|
313
321
|
)
|
314
322
|
|
315
323
|
# span attributes
|
@@ -327,13 +335,19 @@ def _handle_response(
|
|
327
335
|
|
328
336
|
|
329
337
|
def _set_chat_metrics(
|
330
|
-
instance,
|
338
|
+
instance,
|
339
|
+
token_counter,
|
340
|
+
choice_counter,
|
341
|
+
duration_histogram,
|
342
|
+
response_dict,
|
343
|
+
duration,
|
344
|
+
is_streaming: bool = False,
|
331
345
|
):
|
332
346
|
shared_attributes = metric_shared_attributes(
|
333
347
|
response_model=response_dict.get("model") or None,
|
334
348
|
operation="chat",
|
335
349
|
server_address=_get_openai_base_url(instance),
|
336
|
-
is_streaming=
|
350
|
+
is_streaming=is_streaming,
|
337
351
|
)
|
338
352
|
|
339
353
|
# token metrics
|
@@ -520,11 +534,9 @@ def _set_completions(span, choices):
|
|
520
534
|
def _set_streaming_token_metrics(
|
521
535
|
request_kwargs, complete_response, span, token_counter, shared_attributes
|
522
536
|
):
|
523
|
-
# use tiktoken calculate token usage
|
524
537
|
if not should_record_stream_token_usage():
|
525
538
|
return
|
526
539
|
|
527
|
-
# kwargs={'model': 'gpt-3.5', 'messages': [{'role': 'user', 'content': '...'}], 'stream': True}
|
528
540
|
prompt_usage = -1
|
529
541
|
completion_usage = -1
|
530
542
|
|
@@ -621,11 +633,35 @@ class ChatStream(ObjectProxy):
|
|
621
633
|
self._time_of_first_token = self._start_time
|
622
634
|
self._complete_response = {"choices": [], "model": ""}
|
623
635
|
|
636
|
+
# Cleanup state tracking to prevent duplicate operations
|
637
|
+
self._cleanup_completed = False
|
638
|
+
self._cleanup_lock = threading.Lock()
|
639
|
+
|
640
|
+
def __del__(self):
|
641
|
+
"""Cleanup when object is garbage collected"""
|
642
|
+
if hasattr(self, "_cleanup_completed") and not self._cleanup_completed:
|
643
|
+
self._ensure_cleanup()
|
644
|
+
|
624
645
|
def __enter__(self):
|
625
646
|
return self
|
626
647
|
|
627
648
|
def __exit__(self, exc_type, exc_val, exc_tb):
|
628
|
-
|
649
|
+
cleanup_exception = None
|
650
|
+
try:
|
651
|
+
self._ensure_cleanup()
|
652
|
+
except Exception as e:
|
653
|
+
cleanup_exception = e
|
654
|
+
# Don't re-raise to avoid masking original exception
|
655
|
+
|
656
|
+
result = self.__wrapped__.__exit__(exc_type, exc_val, exc_tb)
|
657
|
+
|
658
|
+
if cleanup_exception:
|
659
|
+
# Log cleanup exception but don't affect context manager behavior
|
660
|
+
logger.debug(
|
661
|
+
"Error during ChatStream cleanup in __exit__: %s", cleanup_exception
|
662
|
+
)
|
663
|
+
|
664
|
+
return result
|
629
665
|
|
630
666
|
async def __aenter__(self):
|
631
667
|
return self
|
@@ -645,7 +681,12 @@ class ChatStream(ObjectProxy):
|
|
645
681
|
except Exception as e:
|
646
682
|
if isinstance(e, StopIteration):
|
647
683
|
self._process_complete_response()
|
648
|
-
|
684
|
+
else:
|
685
|
+
# Handle cleanup for other exceptions during stream iteration
|
686
|
+
self._ensure_cleanup()
|
687
|
+
if self._span and self._span.is_recording():
|
688
|
+
self._span.set_status(Status(StatusCode.ERROR, str(e)))
|
689
|
+
raise
|
649
690
|
else:
|
650
691
|
self._process_item(chunk)
|
651
692
|
return chunk
|
@@ -656,7 +697,12 @@ class ChatStream(ObjectProxy):
|
|
656
697
|
except Exception as e:
|
657
698
|
if isinstance(e, StopAsyncIteration):
|
658
699
|
self._process_complete_response()
|
659
|
-
|
700
|
+
else:
|
701
|
+
# Handle cleanup for other exceptions during stream iteration
|
702
|
+
self._ensure_cleanup()
|
703
|
+
if self._span and self._span.is_recording():
|
704
|
+
self._span.set_status(Status(StatusCode.ERROR, str(e)))
|
705
|
+
raise
|
660
706
|
else:
|
661
707
|
self._process_item(chunk)
|
662
708
|
return chunk
|
@@ -727,6 +773,82 @@ class ChatStream(ObjectProxy):
|
|
727
773
|
|
728
774
|
self._span.set_status(Status(StatusCode.OK))
|
729
775
|
self._span.end()
|
776
|
+
self._cleanup_completed = True
|
777
|
+
|
778
|
+
@dont_throw
|
779
|
+
def _ensure_cleanup(self):
|
780
|
+
"""Thread-safe cleanup method that handles different cleanup scenarios"""
|
781
|
+
with self._cleanup_lock:
|
782
|
+
if self._cleanup_completed:
|
783
|
+
logger.debug("ChatStream cleanup already completed, skipping")
|
784
|
+
return
|
785
|
+
|
786
|
+
try:
|
787
|
+
logger.debug("Starting ChatStream cleanup")
|
788
|
+
|
789
|
+
# Set span status and close it
|
790
|
+
if self._span and self._span.is_recording():
|
791
|
+
self._span.set_status(Status(StatusCode.OK))
|
792
|
+
self._span.end()
|
793
|
+
logger.debug("ChatStream span closed successfully")
|
794
|
+
|
795
|
+
# Calculate partial metrics based on available data
|
796
|
+
self._record_partial_metrics()
|
797
|
+
|
798
|
+
self._cleanup_completed = True
|
799
|
+
logger.debug("ChatStream cleanup completed successfully")
|
800
|
+
|
801
|
+
except Exception as e:
|
802
|
+
# Log cleanup errors but don't propagate to avoid masking original issues
|
803
|
+
logger.debug("Error during ChatStream cleanup: %s", str(e))
|
804
|
+
|
805
|
+
# Still try to close the span even if metrics recording failed
|
806
|
+
try:
|
807
|
+
if self._span and self._span.is_recording():
|
808
|
+
self._span.set_status(
|
809
|
+
Status(StatusCode.ERROR, "Cleanup failed")
|
810
|
+
)
|
811
|
+
self._span.end()
|
812
|
+
self._cleanup_completed = True
|
813
|
+
except Exception:
|
814
|
+
# Final fallback - just mark as completed to prevent infinite loops
|
815
|
+
self._cleanup_completed = True
|
816
|
+
|
817
|
+
@dont_throw
|
818
|
+
def _record_partial_metrics(self):
|
819
|
+
"""Record metrics based on available partial data"""
|
820
|
+
# Always record duration if we have start time
|
821
|
+
if (
|
822
|
+
self._start_time
|
823
|
+
and isinstance(self._start_time, (float, int))
|
824
|
+
and self._duration_histogram
|
825
|
+
):
|
826
|
+
duration = time.time() - self._start_time
|
827
|
+
self._duration_histogram.record(
|
828
|
+
duration, attributes=self._shared_attributes()
|
829
|
+
)
|
830
|
+
|
831
|
+
# Record basic span attributes even without complete response
|
832
|
+
if self._span and self._span.is_recording():
|
833
|
+
_set_response_attributes(self._span, self._complete_response)
|
834
|
+
|
835
|
+
# Record partial token metrics if we have any data
|
836
|
+
if self._complete_response.get("choices") or self._request_kwargs:
|
837
|
+
_set_streaming_token_metrics(
|
838
|
+
self._request_kwargs,
|
839
|
+
self._complete_response,
|
840
|
+
self._span,
|
841
|
+
self._token_counter,
|
842
|
+
self._shared_attributes(),
|
843
|
+
)
|
844
|
+
|
845
|
+
# Record choice metrics if we have any choices processed
|
846
|
+
if self._choice_counter and self._complete_response.get("choices"):
|
847
|
+
_set_choice_counter_metrics(
|
848
|
+
self._choice_counter,
|
849
|
+
self._complete_response.get("choices"),
|
850
|
+
self._shared_attributes(),
|
851
|
+
)
|
730
852
|
|
731
853
|
|
732
854
|
# Backward compatibility with OpenAI v0
|
@@ -975,6 +1097,13 @@ def _accumulate_stream_items(item, complete_response):
|
|
975
1097
|
complete_response["model"] = item.get("model")
|
976
1098
|
complete_response["id"] = item.get("id")
|
977
1099
|
|
1100
|
+
# capture usage information from the last stream chunks
|
1101
|
+
if item.get("usage"):
|
1102
|
+
complete_response["usage"] = item.get("usage")
|
1103
|
+
elif item.get("choices") and item["choices"][0].get("usage"):
|
1104
|
+
# Some LLM providers like moonshot mistakenly place token usage information within choices[0], handle this.
|
1105
|
+
complete_response["usage"] = item["choices"][0].get("usage")
|
1106
|
+
|
978
1107
|
# prompt filter results
|
979
1108
|
if item.get("prompt_filter_results"):
|
980
1109
|
complete_response["prompt_filter_results"] = item.get("prompt_filter_results")
|
@@ -27,7 +27,10 @@ from ..utils import (
|
|
27
27
|
should_emit_events,
|
28
28
|
should_send_prompts,
|
29
29
|
)
|
30
|
-
from lmnr.opentelemetry_lib.tracing.context import
|
30
|
+
from lmnr.opentelemetry_lib.tracing.context import (
|
31
|
+
get_current_context,
|
32
|
+
get_event_attributes_from_context,
|
33
|
+
)
|
31
34
|
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
|
32
35
|
from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
|
33
36
|
from opentelemetry.semconv_ai import (
|
@@ -65,7 +68,8 @@ def completion_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
65
68
|
response = wrapped(*args, **kwargs)
|
66
69
|
except Exception as e:
|
67
70
|
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
68
|
-
|
71
|
+
attributes = get_event_attributes_from_context()
|
72
|
+
span.record_exception(e, attributes=attributes)
|
69
73
|
span.set_status(Status(StatusCode.ERROR, str(e)))
|
70
74
|
span.end()
|
71
75
|
raise
|
@@ -100,7 +104,8 @@ async def acompletion_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
100
104
|
response = await wrapped(*args, **kwargs)
|
101
105
|
except Exception as e:
|
102
106
|
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
103
|
-
|
107
|
+
attributes = get_event_attributes_from_context()
|
108
|
+
span.record_exception(e, attributes=attributes)
|
104
109
|
span.set_status(Status(StatusCode.ERROR, str(e)))
|
105
110
|
span.end()
|
106
111
|
raise
|
@@ -3,6 +3,8 @@ import time
|
|
3
3
|
from collections.abc import Iterable
|
4
4
|
|
5
5
|
from opentelemetry import context as context_api
|
6
|
+
|
7
|
+
from lmnr.opentelemetry_lib.tracing.context import get_event_attributes_from_context
|
6
8
|
from ..shared import (
|
7
9
|
OPENAI_LLM_USAGE_TOKEN_TYPES,
|
8
10
|
_get_openai_base_url,
|
@@ -91,7 +93,8 @@ def embeddings_wrapper(
|
|
91
93
|
exception_counter.add(1, attributes=attributes)
|
92
94
|
|
93
95
|
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
94
|
-
|
96
|
+
attributes = get_event_attributes_from_context()
|
97
|
+
span.record_exception(e, attributes=attributes)
|
95
98
|
span.set_status(Status(StatusCode.ERROR, str(e)))
|
96
99
|
span.end()
|
97
100
|
|
@@ -156,7 +159,8 @@ async def aembeddings_wrapper(
|
|
156
159
|
exception_counter.add(1, attributes=attributes)
|
157
160
|
|
158
161
|
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
159
|
-
|
162
|
+
attributes = get_event_attributes_from_context()
|
163
|
+
span.record_exception(e, attributes=attributes)
|
160
164
|
span.set_status(Status(StatusCode.ERROR, str(e)))
|
161
165
|
span.end()
|
162
166
|
|
@@ -17,7 +17,10 @@ from ..utils import (
|
|
17
17
|
dont_throw,
|
18
18
|
should_emit_events,
|
19
19
|
)
|
20
|
-
from lmnr.opentelemetry_lib.tracing.context import
|
20
|
+
from lmnr.opentelemetry_lib.tracing.context import (
|
21
|
+
get_current_context,
|
22
|
+
get_event_attributes_from_context,
|
23
|
+
)
|
21
24
|
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
|
22
25
|
from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
|
23
26
|
from opentelemetry.semconv_ai import LLMRequestTypeValues, SpanAttributes
|
@@ -132,7 +135,7 @@ def messages_list_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
132
135
|
|
133
136
|
if exception := run.get("exception"):
|
134
137
|
span.set_attribute(ERROR_TYPE, exception.__class__.__name__)
|
135
|
-
span.record_exception(exception)
|
138
|
+
span.record_exception(exception, attributes=get_event_attributes_from_context())
|
136
139
|
span.set_status(Status(StatusCode.ERROR, str(exception)))
|
137
140
|
span.end(run.get("end_time"))
|
138
141
|
|
@@ -316,7 +319,7 @@ def runs_create_and_stream_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
316
319
|
return response
|
317
320
|
except Exception as e:
|
318
321
|
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
319
|
-
span.record_exception(e)
|
322
|
+
span.record_exception(e, attributes=get_event_attributes_from_context())
|
320
323
|
span.set_status(Status(StatusCode.ERROR, str(e)))
|
321
324
|
span.end()
|
322
325
|
raise
|
@@ -1,3 +1,4 @@
|
|
1
|
+
from lmnr.opentelemetry_lib.tracing.context import get_event_attributes_from_context
|
1
2
|
from ..shared import _set_span_attribute
|
2
3
|
from ..shared.event_emitter import emit_event
|
3
4
|
from ..shared.event_models import ChoiceEvent
|
@@ -69,7 +70,9 @@ class EventHandlerWrapper(AssistantEventHandler):
|
|
69
70
|
@override
|
70
71
|
def on_exception(self, exception: Exception):
|
71
72
|
self._span.set_attribute(ERROR_TYPE, exception.__class__.__name__)
|
72
|
-
self._span.record_exception(
|
73
|
+
self._span.record_exception(
|
74
|
+
exception, attributes=get_event_attributes_from_context()
|
75
|
+
)
|
73
76
|
self._span.set_status(Status(StatusCode.ERROR, str(exception)))
|
74
77
|
self._original_handler.on_exception(exception)
|
75
78
|
|
@@ -36,7 +36,10 @@ except ImportError:
|
|
36
36
|
ResponseOutputMessageParam = Dict[str, Any]
|
37
37
|
RESPONSES_AVAILABLE = False
|
38
38
|
|
39
|
-
from lmnr.opentelemetry_lib.tracing.context import
|
39
|
+
from lmnr.opentelemetry_lib.tracing.context import (
|
40
|
+
get_current_context,
|
41
|
+
get_event_attributes_from_context,
|
42
|
+
)
|
40
43
|
from openai._legacy_response import LegacyAPIResponse
|
41
44
|
from opentelemetry import context as context_api
|
42
45
|
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
|
@@ -433,7 +436,7 @@ def responses_get_or_create_wrapper(tracer: Tracer, wrapped, instance, args, kwa
|
|
433
436
|
context=get_current_context(),
|
434
437
|
)
|
435
438
|
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
436
|
-
span.record_exception(e)
|
439
|
+
span.record_exception(e, attributes=get_event_attributes_from_context())
|
437
440
|
span.set_status(StatusCode.ERROR, str(e))
|
438
441
|
if traced_data:
|
439
442
|
set_data_attributes(traced_data, span)
|
@@ -529,7 +532,7 @@ async def async_responses_get_or_create_wrapper(
|
|
529
532
|
context=get_current_context(),
|
530
533
|
)
|
531
534
|
span.set_attribute(ERROR_TYPE, e.__class__.__name__)
|
532
|
-
span.record_exception(e)
|
535
|
+
span.record_exception(e, attributes=get_event_attributes_from_context())
|
533
536
|
span.set_status(StatusCode.ERROR, str(e))
|
534
537
|
if traced_data:
|
535
538
|
set_data_attributes(traced_data, span)
|
@@ -597,7 +600,10 @@ def responses_cancel_wrapper(tracer: Tracer, wrapped, instance, args, kwargs):
|
|
597
600
|
record_exception=True,
|
598
601
|
context=get_current_context(),
|
599
602
|
)
|
600
|
-
span.record_exception(
|
603
|
+
span.record_exception(
|
604
|
+
Exception("Response cancelled"),
|
605
|
+
attributes=get_event_attributes_from_context(),
|
606
|
+
)
|
601
607
|
set_data_attributes(existing_data, span)
|
602
608
|
span.end()
|
603
609
|
return response
|
@@ -624,7 +630,10 @@ async def async_responses_cancel_wrapper(
|
|
624
630
|
record_exception=True,
|
625
631
|
context=get_current_context(),
|
626
632
|
)
|
627
|
-
span.record_exception(
|
633
|
+
span.record_exception(
|
634
|
+
Exception("Response cancelled"),
|
635
|
+
attributes=get_event_attributes_from_context(),
|
636
|
+
)
|
628
637
|
set_data_attributes(existing_data, span)
|
629
638
|
span.end()
|
630
639
|
return response
|
@@ -2,7 +2,9 @@ import threading
|
|
2
2
|
|
3
3
|
from abc import ABC, abstractmethod
|
4
4
|
from contextvars import ContextVar
|
5
|
-
from opentelemetry.context import Context, Token
|
5
|
+
from opentelemetry.context import Context, Token, create_key, get_value
|
6
|
+
|
7
|
+
from lmnr.opentelemetry_lib.tracing.attributes import SESSION_ID, USER_ID
|
6
8
|
|
7
9
|
|
8
10
|
class _IsolatedRuntimeContext(ABC):
|
@@ -107,3 +109,18 @@ def attach_context(context: Context) -> Token[Context]:
|
|
107
109
|
def detach_context(token: Token[Context]) -> None:
|
108
110
|
"""Detach a context from the isolated runtime context."""
|
109
111
|
_ISOLATED_RUNTIME_CONTEXT.detach(token)
|
112
|
+
|
113
|
+
|
114
|
+
CONTEXT_USER_ID_KEY = create_key(f"lmnr.{USER_ID}")
|
115
|
+
CONTEXT_SESSION_ID_KEY = create_key(f"lmnr.{SESSION_ID}")
|
116
|
+
|
117
|
+
|
118
|
+
def get_event_attributes_from_context(context: Context | None = None) -> dict[str, str]:
|
119
|
+
"""Get the event attributes from the context."""
|
120
|
+
context = context or get_current_context()
|
121
|
+
attributes = {}
|
122
|
+
if session_id := get_value(CONTEXT_SESSION_ID_KEY, context):
|
123
|
+
attributes["lmnr.event.session_id"] = session_id
|
124
|
+
if user_id := get_value(CONTEXT_USER_ID_KEY, context):
|
125
|
+
attributes["lmnr.event.user_id"] = user_id
|
126
|
+
return attributes
|
lmnr/sdk/browser/pw_utils.py
CHANGED
@@ -33,7 +33,7 @@ except ImportError as e:
|
|
33
33
|
logger = logging.getLogger(__name__)
|
34
34
|
|
35
35
|
current_dir = os.path.dirname(os.path.abspath(__file__))
|
36
|
-
with open(os.path.join(current_dir, "
|
36
|
+
with open(os.path.join(current_dir, "recorder", "record.umd.min.cjs"), "r") as f:
|
37
37
|
RRWEB_CONTENT = f"() => {{ {f.read()} }}"
|
38
38
|
|
39
39
|
INJECT_PLACEHOLDER = """
|
@@ -358,14 +358,6 @@ INJECT_PLACEHOLDER = """
|
|
358
358
|
|
359
359
|
setInterval(sendBatchIfReady, BATCH_TIMEOUT);
|
360
360
|
|
361
|
-
// Add heartbeat events
|
362
|
-
setInterval(() => {
|
363
|
-
window.lmnrRrweb.record.addCustomEvent('heartbeat', {
|
364
|
-
title: document.title,
|
365
|
-
url: document.URL,
|
366
|
-
})
|
367
|
-
}, HEARTBEAT_INTERVAL);
|
368
|
-
|
369
361
|
async function bufferToBase64(buffer) {
|
370
362
|
const base64url = await new Promise(r => {
|
371
363
|
const reader = new FileReader()
|
@@ -397,63 +389,22 @@ INJECT_PLACEHOLDER = """
|
|
397
389
|
collectFonts: true,
|
398
390
|
recordCrossOriginIframes: true
|
399
391
|
});
|
400
|
-
}
|
401
|
-
"""
|
402
|
-
|
403
|
-
|
404
|
-
async def send_events_async(
|
405
|
-
page: Page, session_id: str, trace_id: str, client: AsyncLaminarClient
|
406
|
-
):
|
407
|
-
"""Fetch events from the page and send them to the server"""
|
408
|
-
try:
|
409
|
-
# Check if function exists first
|
410
|
-
events = await page.evaluate(
|
411
|
-
"""
|
412
|
-
() => {
|
413
|
-
if (typeof window.lmnrGetAndClearEvents !== 'function') {
|
414
|
-
return [];
|
415
|
-
}
|
416
|
-
return window.lmnrGetAndClearEvents();
|
417
|
-
}
|
418
|
-
"""
|
419
|
-
)
|
420
|
-
|
421
|
-
if not events or len(events) == 0:
|
422
|
-
return
|
423
|
-
|
424
|
-
await client._browser_events.send(session_id, trace_id, events)
|
425
|
-
except Exception as e:
|
426
|
-
if "Page.evaluate: Target page, context or browser has been closed" not in str(
|
427
|
-
e
|
428
|
-
):
|
429
|
-
logger.debug(f"Could not send events: {e}")
|
430
392
|
|
393
|
+
function heartbeat() {
|
394
|
+
// Add heartbeat events
|
395
|
+
setInterval(() => {
|
396
|
+
window.lmnrRrweb.record.addCustomEvent('heartbeat', {
|
397
|
+
title: document.title,
|
398
|
+
url: document.URL,
|
399
|
+
})
|
400
|
+
}, HEARTBEAT_INTERVAL
|
401
|
+
);
|
402
|
+
}
|
431
403
|
|
432
|
-
|
433
|
-
page: SyncPage, session_id: str, trace_id: str, client: LaminarClient
|
434
|
-
):
|
435
|
-
"""Synchronous version of send_events"""
|
436
|
-
try:
|
437
|
-
events = page.evaluate(
|
438
|
-
"""
|
439
|
-
() => {
|
440
|
-
if (typeof window.lmnrGetAndClearEvents !== 'function') {
|
441
|
-
return [];
|
442
|
-
}
|
443
|
-
return window.lmnrGetAndClearEvents();
|
444
|
-
}
|
445
|
-
"""
|
446
|
-
)
|
447
|
-
if not events or len(events) == 0:
|
448
|
-
return
|
449
|
-
|
450
|
-
client._browser_events.send(session_id, trace_id, events)
|
404
|
+
heartbeat();
|
451
405
|
|
452
|
-
|
453
|
-
|
454
|
-
e
|
455
|
-
):
|
456
|
-
logger.debug(f"Could not send events: {e}")
|
406
|
+
}
|
407
|
+
"""
|
457
408
|
|
458
409
|
|
459
410
|
def inject_session_recorder_sync(page: SyncPage):
|
@@ -483,10 +434,10 @@ def inject_session_recorder_sync(page: SyncPage):
|
|
483
434
|
):
|
484
435
|
return
|
485
436
|
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
437
|
+
try:
|
438
|
+
page.evaluate(INJECT_PLACEHOLDER)
|
439
|
+
except Exception as e:
|
440
|
+
logger.debug(f"Failed to inject session recorder: {e}")
|
490
441
|
|
491
442
|
except Exception as e:
|
492
443
|
logger.error(f"Error during session recorder injection: {e}")
|
@@ -519,10 +470,10 @@ async def inject_session_recorder_async(page: Page):
|
|
519
470
|
):
|
520
471
|
return
|
521
472
|
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
473
|
+
try:
|
474
|
+
await page.evaluate(INJECT_PLACEHOLDER)
|
475
|
+
except Exception as e:
|
476
|
+
logger.debug(f"Failed to inject session recorder placeholder: {e}")
|
526
477
|
|
527
478
|
except Exception as e:
|
528
479
|
logger.error(f"Error during session recorder injection: {e}")
|
@@ -536,30 +487,6 @@ def start_recording_events_sync(page: SyncPage, session_id: str, client: Laminar
|
|
536
487
|
trace_id = format(span.get_span_context().trace_id, "032x")
|
537
488
|
span.set_attribute("lmnr.internal.has_browser_session", True)
|
538
489
|
|
539
|
-
try:
|
540
|
-
if page.evaluate("""() => typeof window.lmnrSendEvents !== 'undefined'"""):
|
541
|
-
return
|
542
|
-
except Exception:
|
543
|
-
pass
|
544
|
-
|
545
|
-
def on_load():
|
546
|
-
try:
|
547
|
-
inject_session_recorder_sync(page)
|
548
|
-
except Exception as e:
|
549
|
-
logger.error(f"Error in on_load handler: {e}")
|
550
|
-
|
551
|
-
def on_close():
|
552
|
-
try:
|
553
|
-
send_events_sync(page, session_id, trace_id, client)
|
554
|
-
except Exception:
|
555
|
-
pass
|
556
|
-
|
557
|
-
page.on("load", on_load)
|
558
|
-
page.on("close", on_close)
|
559
|
-
|
560
|
-
inject_session_recorder_sync(page)
|
561
|
-
|
562
|
-
# Expose function to browser so it can call us when events are ready
|
563
490
|
def send_events_from_browser(events):
|
564
491
|
try:
|
565
492
|
if events and len(events) > 0:
|
@@ -572,6 +499,16 @@ def start_recording_events_sync(page: SyncPage, session_id: str, client: Laminar
|
|
572
499
|
except Exception as e:
|
573
500
|
logger.debug(f"Could not expose function: {e}")
|
574
501
|
|
502
|
+
inject_session_recorder_sync(page)
|
503
|
+
|
504
|
+
def on_load(p):
|
505
|
+
try:
|
506
|
+
inject_session_recorder_sync(p)
|
507
|
+
except Exception as e:
|
508
|
+
logger.error(f"Error in on_load handler: {e}")
|
509
|
+
|
510
|
+
page.on("domcontentloaded", on_load)
|
511
|
+
|
575
512
|
|
576
513
|
@observe(name="playwright.page", ignore_input=True, ignore_output=True)
|
577
514
|
async def start_recording_events_async(
|
@@ -581,33 +518,7 @@ async def start_recording_events_async(
|
|
581
518
|
span = trace.get_current_span(ctx)
|
582
519
|
trace_id = format(span.get_span_context().trace_id, "032x")
|
583
520
|
span.set_attribute("lmnr.internal.has_browser_session", True)
|
584
|
-
|
585
|
-
try:
|
586
|
-
if await page.evaluate(
|
587
|
-
"""() => typeof window.lmnrSendEvents !== 'undefined'"""
|
588
|
-
):
|
589
|
-
return
|
590
|
-
except Exception:
|
591
|
-
pass
|
592
|
-
|
593
|
-
async def on_load(p):
|
594
|
-
try:
|
595
|
-
await inject_session_recorder_async(p)
|
596
|
-
except Exception as e:
|
597
|
-
logger.error(f"Error in on_load handler: {e}")
|
598
|
-
|
599
|
-
async def on_close(p):
|
600
|
-
try:
|
601
|
-
# Send any remaining events before closing
|
602
|
-
await send_events_async(p, session_id, trace_id, client)
|
603
|
-
except Exception:
|
604
|
-
pass
|
605
|
-
|
606
|
-
page.on("load", on_load)
|
607
|
-
page.on("close", on_close)
|
608
|
-
|
609
|
-
await inject_session_recorder_async(page)
|
610
|
-
|
521
|
+
|
611
522
|
async def send_events_from_browser(events):
|
612
523
|
try:
|
613
524
|
if events and len(events) > 0:
|
@@ -620,6 +531,16 @@ async def start_recording_events_async(
|
|
620
531
|
except Exception as e:
|
621
532
|
logger.debug(f"Could not expose function: {e}")
|
622
533
|
|
534
|
+
await inject_session_recorder_async(page)
|
535
|
+
|
536
|
+
async def on_load(p):
|
537
|
+
try:
|
538
|
+
await inject_session_recorder_async(p)
|
539
|
+
except Exception as e:
|
540
|
+
logger.error(f"Error in on_load handler: {e}")
|
541
|
+
|
542
|
+
page.on("domcontentloaded", on_load)
|
543
|
+
|
623
544
|
|
624
545
|
def take_full_snapshot(page: Page):
|
625
546
|
return page.evaluate(
|