lmnr 0.7.4__py3-none-any.whl → 0.7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,733 @@
1
+ import orjson
2
+ import logging
3
+ import os
4
+ import time
5
+ import asyncio
6
+
7
+ from opentelemetry import trace
8
+
9
+ from lmnr.sdk.decorators import observe
10
+ from lmnr.sdk.browser.utils import retry_async
11
+ from lmnr.sdk.client.asynchronous.async_client import AsyncLaminarClient
12
+ from lmnr.opentelemetry_lib.tracing.context import get_current_context
13
+ from lmnr.opentelemetry_lib.tracing import TracerWrapper
14
+ from lmnr.sdk.types import MaskInputOptions
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+ OLD_BUFFER_TIMEOUT = 60
19
+
20
+ current_dir = os.path.dirname(os.path.abspath(__file__))
21
+ with open(os.path.join(current_dir, "recorder", "record.umd.min.cjs"), "r") as f:
22
+ RRWEB_CONTENT = f"() => {{ {f.read()} }}"
23
+
24
+ INJECT_PLACEHOLDER = """
25
+ (maskInputOptions) => {
26
+ const BATCH_TIMEOUT = 2000; // Send events after 2 seconds
27
+ const MAX_WORKER_PROMISES = 50; // Max concurrent worker promises
28
+ const HEARTBEAT_INTERVAL = 2000;
29
+ const CHUNK_SIZE = 256 * 1024; // 256KB chunks
30
+ const CHUNK_SEND_DELAY = 100; // 100ms delay between chunks
31
+
32
+ window.lmnrRrwebEventsBatch = [];
33
+ window.lmnrChunkQueue = [];
34
+ window.lmnrChunkSequence = 0;
35
+ window.lmnrCurrentBatchId = null;
36
+
37
+ // Create a Web Worker for heavy JSON processing with chunked processing
38
+ const createCompressionWorker = () => {
39
+ const workerCode = `
40
+ self.onmessage = async function(e) {
41
+ const { jsonString, buffer, id, useBuffer } = e.data;
42
+ try {
43
+ let uint8Array;
44
+
45
+ if (useBuffer && buffer) {
46
+ // Use transferred ArrayBuffer (no copying needed!)
47
+ uint8Array = new Uint8Array(buffer);
48
+ } else {
49
+ // Convert JSON string to bytes
50
+ const textEncoder = new TextEncoder();
51
+ uint8Array = textEncoder.encode(jsonString);
52
+ }
53
+
54
+ const compressionStream = new CompressionStream('gzip');
55
+ const writer = compressionStream.writable.getWriter();
56
+ const reader = compressionStream.readable.getReader();
57
+
58
+ writer.write(uint8Array);
59
+ writer.close();
60
+
61
+ const chunks = [];
62
+ let totalLength = 0;
63
+
64
+ while (true) {
65
+ const { done, value } = await reader.read();
66
+ if (done) break;
67
+ chunks.push(value);
68
+ totalLength += value.length;
69
+ }
70
+
71
+ const compressedData = new Uint8Array(totalLength);
72
+ let offset = 0;
73
+ for (const chunk of chunks) {
74
+ compressedData.set(chunk, offset);
75
+ offset += chunk.length;
76
+ }
77
+
78
+ self.postMessage({ id, success: true, data: compressedData });
79
+ } catch (error) {
80
+ self.postMessage({ id, success: false, error: error.message });
81
+ }
82
+ };
83
+ `;
84
+
85
+ const blob = new Blob([workerCode], { type: 'application/javascript' });
86
+ return new Worker(URL.createObjectURL(blob));
87
+ };
88
+
89
+ let compressionWorker = null;
90
+ let workerPromises = new Map();
91
+ let workerId = 0;
92
+ let workerSupported = null; // null = unknown, true = supported, false = blocked by CSP
93
+
94
+ // Test if workers are supported (not blocked by CSP)
95
+ function testWorkerSupport() {
96
+ if (workerSupported !== null) {
97
+ return workerSupported;
98
+ }
99
+
100
+ try {
101
+ const testWorker = createCompressionWorker();
102
+ testWorker.terminate();
103
+ workerSupported = true;
104
+ return true;
105
+ } catch (error) {
106
+ console.warn('Web Workers blocked by CSP, will use main thread compression:', error);
107
+ workerSupported = false;
108
+ return false;
109
+ }
110
+ }
111
+
112
+ // Cleanup function for worker
113
+ const cleanupWorker = () => {
114
+ if (compressionWorker) {
115
+ compressionWorker.terminate();
116
+ compressionWorker = null;
117
+ }
118
+ workerPromises.clear();
119
+ workerId = 0;
120
+ };
121
+
122
+ // Clean up stale promises to prevent memory leaks
123
+ const cleanupStalePromises = () => {
124
+ if (workerPromises.size > MAX_WORKER_PROMISES) {
125
+ const toDelete = [];
126
+ for (const [id, promise] of workerPromises) {
127
+ if (toDelete.length >= workerPromises.size - MAX_WORKER_PROMISES) break;
128
+ toDelete.push(id);
129
+ promise.reject(new Error('Promise cleaned up due to memory pressure'));
130
+ }
131
+ toDelete.forEach(id => workerPromises.delete(id));
132
+ }
133
+ };
134
+
135
+ // Non-blocking JSON.stringify using chunked processing
136
+ function stringifyNonBlocking(obj, chunkSize = 10000) {
137
+ return new Promise((resolve, reject) => {
138
+ try {
139
+ // For very large objects, we need to be more careful
140
+ // Use requestIdleCallback if available, otherwise setTimeout
141
+ const scheduleWork = window.requestIdleCallback ||
142
+ ((cb) => setTimeout(cb, 0));
143
+
144
+ let result = '';
145
+ let keys = [];
146
+ let keyIndex = 0;
147
+
148
+ // Pre-process to get all keys if it's an object
149
+ if (typeof obj === 'object' && obj !== null && !Array.isArray(obj)) {
150
+ keys = Object.keys(obj);
151
+ }
152
+
153
+ function processChunk() {
154
+ try {
155
+ if (Array.isArray(obj) || typeof obj !== 'object' || obj === null) {
156
+ // For arrays and primitives, just stringify directly
157
+ result = JSON.stringify(obj);
158
+ resolve(result);
159
+ return;
160
+ }
161
+
162
+ // For objects, process in chunks
163
+ const endIndex = Math.min(keyIndex + chunkSize, keys.length);
164
+
165
+ if (keyIndex === 0) {
166
+ result = '{';
167
+ }
168
+
169
+ for (let i = keyIndex; i < endIndex; i++) {
170
+ const key = keys[i];
171
+ const value = obj[key];
172
+
173
+ if (i > 0) result += ',';
174
+ result += JSON.stringify(key) + ':' + JSON.stringify(value);
175
+ }
176
+
177
+ keyIndex = endIndex;
178
+
179
+ if (keyIndex >= keys.length) {
180
+ result += '}';
181
+ resolve(result);
182
+ } else {
183
+ // Schedule next chunk
184
+ scheduleWork(processChunk);
185
+ }
186
+ } catch (error) {
187
+ reject(error);
188
+ }
189
+ }
190
+
191
+ processChunk();
192
+ } catch (error) {
193
+ reject(error);
194
+ }
195
+ });
196
+ }
197
+
198
+ // Fast compression for small objects (main thread)
199
+ async function compressSmallObject(data) {
200
+ const jsonString = JSON.stringify(data);
201
+ const textEncoder = new TextEncoder();
202
+ const uint8Array = textEncoder.encode(jsonString);
203
+
204
+ const compressionStream = new CompressionStream('gzip');
205
+ const writer = compressionStream.writable.getWriter();
206
+ const reader = compressionStream.readable.getReader();
207
+
208
+ writer.write(uint8Array);
209
+ writer.close();
210
+
211
+ const chunks = [];
212
+ let totalLength = 0;
213
+
214
+ while (true) {
215
+ const { done, value } = await reader.read();
216
+ if (done) break;
217
+ chunks.push(value);
218
+ totalLength += value.length;
219
+ }
220
+
221
+ const compressedData = new Uint8Array(totalLength);
222
+ let offset = 0;
223
+ for (const chunk of chunks) {
224
+ compressedData.set(chunk, offset);
225
+ offset += chunk.length;
226
+ }
227
+
228
+ return compressedData;
229
+ }
230
+
231
+ // Alternative: Use transferable objects for maximum efficiency
232
+ async function compressLargeObjectTransferable(data) {
233
+ try {
234
+ // Check if workers are supported first
235
+ if (!testWorkerSupport()) {
236
+ return compressSmallObject(data);
237
+ }
238
+
239
+ // Clean up stale promises first
240
+ cleanupStalePromises();
241
+
242
+ // Stringify on main thread but non-blocking
243
+ const jsonString = await stringifyNonBlocking(data);
244
+
245
+ // Convert to ArrayBuffer (transferable)
246
+ const encoder = new TextEncoder();
247
+ const uint8Array = encoder.encode(jsonString);
248
+ const buffer = uint8Array.buffer; // Use the original buffer for transfer
249
+
250
+ return new Promise((resolve, reject) => {
251
+ if (!compressionWorker) {
252
+ compressionWorker = createCompressionWorker();
253
+ compressionWorker.onmessage = (e) => {
254
+ const { id, success, data: result, error } = e.data;
255
+ const promise = workerPromises.get(id);
256
+ if (promise) {
257
+ workerPromises.delete(id);
258
+ if (success) {
259
+ promise.resolve(result);
260
+ } else {
261
+ promise.reject(new Error(error));
262
+ }
263
+ }
264
+ };
265
+
266
+ compressionWorker.onerror = (error) => {
267
+ console.error('Compression worker error:', error);
268
+ cleanupWorker();
269
+ compressSmallObject(data).then(resolve, reject);
270
+ };
271
+ }
272
+
273
+ const id = ++workerId;
274
+ workerPromises.set(id, { resolve, reject });
275
+
276
+ // Set timeout to prevent hanging promises
277
+ setTimeout(() => {
278
+ if (workerPromises.has(id)) {
279
+ workerPromises.delete(id);
280
+ reject(new Error('Compression timeout'));
281
+ }
282
+ }, 10000);
283
+
284
+ // Transfer the ArrayBuffer (no copying!)
285
+ compressionWorker.postMessage({
286
+ buffer,
287
+ id,
288
+ useBuffer: true
289
+ }, [buffer]);
290
+ });
291
+ } catch (error) {
292
+ console.warn('Failed to process large object with transferable:', error);
293
+ return compressSmallObject(data);
294
+ }
295
+ }
296
+
297
+ // Worker-based compression for large objects
298
+ async function compressLargeObject(data) {
299
+ // Check if workers are supported first - if not, use main thread compression
300
+ if (!testWorkerSupport()) {
301
+ return await compressSmallObject(data);
302
+ }
303
+
304
+ try {
305
+ // Use transferable objects for better performance
306
+ return await compressLargeObjectTransferable(data);
307
+ } catch (error) {
308
+ console.warn('Transferable failed, falling back to string method:', error);
309
+ try {
310
+ // Fallback to string method with worker
311
+ const jsonString = await stringifyNonBlocking(data);
312
+
313
+ return new Promise((resolve, reject) => {
314
+ if (!compressionWorker) {
315
+ compressionWorker = createCompressionWorker();
316
+ compressionWorker.onmessage = (e) => {
317
+ const { id, success, data: result, error } = e.data;
318
+ const promise = workerPromises.get(id);
319
+ if (promise) {
320
+ workerPromises.delete(id);
321
+ if (success) {
322
+ promise.resolve(result);
323
+ } else {
324
+ promise.reject(new Error(error));
325
+ }
326
+ }
327
+ };
328
+
329
+ compressionWorker.onerror = (error) => {
330
+ console.error('Compression worker error:', error);
331
+ cleanupWorker();
332
+ };
333
+ }
334
+
335
+ const id = ++workerId;
336
+ workerPromises.set(id, { resolve, reject });
337
+
338
+ // Set timeout to prevent hanging promises
339
+ setTimeout(() => {
340
+ if (workerPromises.has(id)) {
341
+ workerPromises.delete(id);
342
+ reject(new Error('Compression timeout'));
343
+ }
344
+ }, 10000);
345
+
346
+ compressionWorker.postMessage({ jsonString, id });
347
+ });
348
+ } catch (workerError) {
349
+ console.warn('Worker creation failed, falling back to main thread compression:', workerError);
350
+ // Final fallback: compress on main thread (may block UI but will work)
351
+ return await compressSmallObject(data);
352
+ }
353
+ }
354
+ }
355
+
356
+
357
+ setInterval(cleanupWorker, 5000);
358
+
359
+ function isLargeEvent(type) {
360
+ const LARGE_EVENT_TYPES = [
361
+ 2, // FullSnapshot
362
+ ];
363
+
364
+ if (LARGE_EVENT_TYPES.includes(type)) {
365
+ return true;
366
+ }
367
+
368
+ return false;
369
+ }
370
+
371
+ // Create chunks from a string with metadata
372
+ function createChunks(str, batchId) {
373
+ const chunks = [];
374
+ const totalChunks = Math.ceil(str.length / CHUNK_SIZE);
375
+
376
+ for (let i = 0; i < str.length; i += CHUNK_SIZE) {
377
+ const chunk = str.slice(i, i + CHUNK_SIZE);
378
+ chunks.push({
379
+ batchId: batchId,
380
+ chunkIndex: chunks.length,
381
+ totalChunks: totalChunks,
382
+ data: chunk,
383
+ isFinal: chunks.length === totalChunks - 1
384
+ });
385
+ }
386
+
387
+ return chunks;
388
+ }
389
+
390
+ // Send chunks with flow control
391
+ async function sendChunks(chunks) {
392
+ if (typeof window.lmnrSendEvents !== 'function') {
393
+ return;
394
+ }
395
+
396
+ window.lmnrChunkQueue.push(...chunks);
397
+
398
+ // Process queue
399
+ while (window.lmnrChunkQueue.length > 0) {
400
+ const chunk = window.lmnrChunkQueue.shift();
401
+ try {
402
+ window.lmnrSendEvents(JSON.stringify(chunk));
403
+ // Small delay between chunks to avoid overwhelming CDP
404
+ await new Promise(resolve => setTimeout(resolve, CHUNK_SEND_DELAY));
405
+ } catch (error) {
406
+ console.error('Failed to send chunk:', error);
407
+ // On error, clear failed chunk batch from queue
408
+ window.lmnrChunkQueue = window.lmnrChunkQueue.filter(c => c.batchId !== chunk.batchId);
409
+ break;
410
+ }
411
+ }
412
+ }
413
+
414
+ async function sendBatchIfReady() {
415
+ if (window.lmnrRrwebEventsBatch.length > 0 && typeof window.lmnrSendEvents === 'function') {
416
+ const events = window.lmnrRrwebEventsBatch;
417
+ window.lmnrRrwebEventsBatch = [];
418
+
419
+ try {
420
+ // Generate unique batch ID
421
+ const batchId = `${Date.now()}_${window.lmnrChunkSequence++}`;
422
+ window.lmnrCurrentBatchId = batchId;
423
+
424
+ // Stringify the entire batch
425
+ const batchString = JSON.stringify(events);
426
+
427
+ // Check size and chunk if necessary
428
+ if (batchString.length <= CHUNK_SIZE) {
429
+ // Small enough to send as single chunk
430
+ const chunk = {
431
+ batchId: batchId,
432
+ chunkIndex: 0,
433
+ totalChunks: 1,
434
+ data: batchString,
435
+ isFinal: true
436
+ };
437
+ window.lmnrSendEvents(JSON.stringify(chunk));
438
+ } else {
439
+ // Need to chunk
440
+ const chunks = createChunks(batchString, batchId);
441
+ await sendChunks(chunks);
442
+ }
443
+ } catch (error) {
444
+ console.error('Failed to send events:', error);
445
+ // Clear batch to prevent memory buildup
446
+ window.lmnrRrwebEventsBatch = [];
447
+ }
448
+ }
449
+ }
450
+
451
+ setInterval(sendBatchIfReady, BATCH_TIMEOUT);
452
+
453
+ async function bufferToBase64(buffer) {
454
+ const base64url = await new Promise(r => {
455
+ const reader = new FileReader()
456
+ reader.onload = () => r(reader.result)
457
+ reader.readAsDataURL(new Blob([buffer]))
458
+ });
459
+ return base64url.slice(base64url.indexOf(',') + 1);
460
+ }
461
+
462
+ window.lmnrRrweb.record({
463
+ async emit(event) {
464
+ try {
465
+ const isLarge = isLargeEvent(event.type);
466
+ const compressedResult = isLarge ?
467
+ await compressLargeObject(event.data) :
468
+ await compressSmallObject(event.data);
469
+
470
+ const base64Data = await bufferToBase64(compressedResult);
471
+ const eventToSend = {
472
+ ...event,
473
+ data: base64Data,
474
+ };
475
+ window.lmnrRrwebEventsBatch.push(eventToSend);
476
+ } catch (error) {
477
+ console.warn('Failed to push event to batch', error);
478
+ }
479
+ },
480
+ recordCanvas: true,
481
+ collectFonts: true,
482
+ recordCrossOriginIframes: true,
483
+ maskInputOptions: {
484
+ password: true,
485
+ textarea: maskInputOptions.textarea || false,
486
+ text: maskInputOptions.text || false,
487
+ number: maskInputOptions.number || false,
488
+ select: maskInputOptions.select || false,
489
+ email: maskInputOptions.email || false,
490
+ tel: maskInputOptions.tel || false,
491
+ }
492
+ });
493
+
494
+ function heartbeat() {
495
+ // Add heartbeat events
496
+ setInterval(() => {
497
+ window.lmnrRrweb.record.addCustomEvent('heartbeat', {
498
+ title: document.title,
499
+ url: document.URL,
500
+ })
501
+ }, HEARTBEAT_INTERVAL
502
+ );
503
+ }
504
+
505
+ heartbeat();
506
+ }
507
+ """
508
+
509
+
510
+ def get_mask_input_setting() -> MaskInputOptions:
511
+ """Get the mask_input setting from session recording configuration."""
512
+ try:
513
+ config = TracerWrapper.get_session_recording_options()
514
+ return config.get(
515
+ "mask_input_options",
516
+ MaskInputOptions(
517
+ textarea=False,
518
+ text=False,
519
+ number=False,
520
+ select=False,
521
+ email=False,
522
+ tel=False,
523
+ ),
524
+ )
525
+ except (AttributeError, Exception):
526
+ # Fallback to default configuration if TracerWrapper is not initialized
527
+ return MaskInputOptions(
528
+ textarea=False,
529
+ text=False,
530
+ number=False,
531
+ select=False,
532
+ email=False,
533
+ tel=False,
534
+ )
535
+
536
+
537
+ # browser_use.browser.session.CDPSession (browser-use >= 1.0.0)
538
+ async def inject_session_recorder(cdp_session):
539
+ cdp_client = cdp_session.cdp_client
540
+ try:
541
+ try:
542
+ is_loaded = await is_recorder_present(cdp_session)
543
+ except Exception as e:
544
+ logger.debug(f"Failed to check if session recorder is loaded: {e}")
545
+ is_loaded = False
546
+
547
+ if is_loaded:
548
+ return
549
+
550
+ async def load_session_recorder():
551
+ try:
552
+ await cdp_client.send.Runtime.evaluate(
553
+ {
554
+ "expression": f"({RRWEB_CONTENT})()",
555
+ "awaitPromise": True,
556
+ },
557
+ session_id=cdp_session.session_id,
558
+ )
559
+ return True
560
+ except Exception as e:
561
+ logger.error(f"Failed to load session recorder: {e}")
562
+ return False
563
+
564
+ if not await retry_async(
565
+ load_session_recorder,
566
+ delay=1,
567
+ error_message="Failed to load session recorder",
568
+ ):
569
+ return
570
+
571
+ try:
572
+ await cdp_client.send.Runtime.evaluate(
573
+ {
574
+ "expression": f"({INJECT_PLACEHOLDER})({orjson.dumps(get_mask_input_setting()).decode("utf-8")})",
575
+ },
576
+ session_id=cdp_session.session_id,
577
+ )
578
+ except Exception as e:
579
+ logger.debug(f"Failed to inject session recorder placeholder: {e}")
580
+
581
+ except Exception as e:
582
+ logger.error(f"Error during session recorder injection: {e}")
583
+
584
+
585
+ # browser_use.browser.session.CDPSession (browser-use >= 1.0.0)
586
+ @observe(name="cdp_use.session", ignore_input=True, ignore_output=True)
587
+ async def start_recording_events(
588
+ cdp_session,
589
+ lmnr_session_id: str,
590
+ client: AsyncLaminarClient,
591
+ ):
592
+ cdp_client = cdp_session.cdp_client
593
+
594
+ ctx = get_current_context()
595
+ span = trace.get_current_span(ctx)
596
+ trace_id = format(span.get_span_context().trace_id, "032x")
597
+ span.set_attribute("lmnr.internal.has_browser_session", True)
598
+
599
+ await inject_session_recorder(cdp_session)
600
+
601
+ # Buffer for reassembling chunks
602
+ chunk_buffers = {}
603
+
604
+ async def send_events_from_browser(chunk):
605
+ try:
606
+ # Handle chunked data
607
+ batch_id = chunk["batchId"]
608
+ chunk_index = chunk["chunkIndex"]
609
+ total_chunks = chunk["totalChunks"]
610
+ data = chunk["data"]
611
+
612
+ # Initialize buffer for this batch if needed
613
+ if batch_id not in chunk_buffers:
614
+ chunk_buffers[batch_id] = {
615
+ "chunks": {},
616
+ "total": total_chunks,
617
+ "timestamp": time.time(),
618
+ }
619
+
620
+ # Store chunk
621
+ chunk_buffers[batch_id]["chunks"][chunk_index] = data
622
+
623
+ # Check if we have all chunks
624
+ if len(chunk_buffers[batch_id]["chunks"]) == total_chunks:
625
+ # Reassemble the full message
626
+ full_data = ""
627
+ for i in range(total_chunks):
628
+ full_data += chunk_buffers[batch_id]["chunks"][i]
629
+
630
+ # Parse the JSON
631
+ events = orjson.loads(full_data)
632
+
633
+ # Send to server
634
+ if events and len(events) > 0:
635
+ await client._browser_events.send(lmnr_session_id, trace_id, events)
636
+
637
+ # Clean up buffer
638
+ del chunk_buffers[batch_id]
639
+
640
+ # Clean up old incomplete buffers
641
+ current_time = time.time()
642
+ to_delete = []
643
+ for bid, buffer in chunk_buffers.items():
644
+ if current_time - buffer["timestamp"] > OLD_BUFFER_TIMEOUT:
645
+ to_delete.append(bid)
646
+ for bid in to_delete:
647
+ logger.debug(f"Cleaning up incomplete chunk buffer: {bid}")
648
+ del chunk_buffers[bid]
649
+
650
+ except Exception as e:
651
+ logger.debug(f"Could not send events: {e}")
652
+
653
+ # cdp_use.cdp.runtime.events.BindingCalledEvent
654
+ async def send_events_callback(event, cdp_session_id: str | None = None):
655
+ if event["name"] != "lmnrSendEvents":
656
+ return
657
+ await send_events_from_browser(orjson.loads(event["payload"]))
658
+
659
+ await cdp_client.send.Runtime.addBinding(
660
+ {
661
+ "name": "lmnrSendEvents",
662
+ },
663
+ session_id=cdp_session.session_id,
664
+ )
665
+ cdp_client.register.Runtime.bindingCalled(send_events_callback)
666
+
667
+ await enable_target_discovery(cdp_session)
668
+ register_on_target_created(cdp_session, lmnr_session_id, client)
669
+
670
+
671
+ # browser_use.browser.session.CDPSession (browser-use >= 1.0.0)
672
+ async def enable_target_discovery(cdp_session):
673
+ cdp_client = cdp_session.cdp_client
674
+ await cdp_client.send.Target.setDiscoverTargets(
675
+ {
676
+ "discover": True,
677
+ },
678
+ session_id=cdp_session.session_id,
679
+ )
680
+
681
+
682
+ # browser_use.browser.session.CDPSession (browser-use >= 1.0.0)
683
+ def register_on_target_created(
684
+ cdp_session, lmnr_session_id: str, client: AsyncLaminarClient
685
+ ):
686
+ # cdp_use.cdp.target.events.TargetCreatedEvent
687
+ def on_target_created(event, cdp_session_id: str | None = None):
688
+ target_info = event["targetInfo"]
689
+ if target_info["type"] == "page":
690
+ asyncio.create_task(inject_session_recorder(cdp_session=cdp_session))
691
+
692
+ cdp_session.cdp_client.register.Target.targetCreated(on_target_created)
693
+
694
+
695
+ # browser_use.browser.session.CDPSession (browser-use >= 1.0.0)
696
+ async def is_recorder_present(cdp_session) -> bool:
697
+ cdp_client = cdp_session.cdp_client
698
+
699
+ result = await cdp_client.send.Runtime.evaluate(
700
+ {
701
+ "expression": """(()=>{
702
+ return typeof window.lmnrRrweb !== 'undefined';
703
+ })()""",
704
+ },
705
+ session_id=cdp_session.session_id,
706
+ )
707
+ if result and "result" in result and "value" in result["result"]:
708
+ return result["result"]["value"]
709
+ return False
710
+
711
+
712
+ async def take_full_snapshot(cdp_session):
713
+ cdp_client = cdp_session.cdp_client
714
+ result = await cdp_client.send.Runtime.evaluate(
715
+ {
716
+ "expression": """(() => {
717
+ if (window.lmnrRrweb) {
718
+ try {
719
+ window.lmnrRrweb.record.takeFullSnapshot();
720
+ return true;
721
+ } catch (e) {
722
+ console.error("Error taking full snapshot:", e);
723
+ return false;
724
+ }
725
+ }
726
+ return false;
727
+ })()""",
728
+ },
729
+ session_id=cdp_session.session_id,
730
+ )
731
+ if result and "result" in result and "value" in result["result"]:
732
+ return result["result"]["value"]
733
+ return False